From 90313fd7f94a53d219e873eb8d2513e7938b220f Mon Sep 17 00:00:00 2001 From: Ian Chen <ychen.cs10@nycu.edu.tw> Date: Fri, 24 Jan 2025 15:33:39 +0800 Subject: [PATCH] feat: add selftest for struct_ops --- selftest/struct-ops/Makefile | 95 + selftest/struct-ops/dep/include/.gitignore | 1 + .../arm/vmlinux-v6.13-rc1-g353b2a7de2a3.h | 137286 +++++++++++++ .../arm64/vmlinux-v6.13-rc1-g353b2a7de2a3.h | 102528 ++++++++++ .../mips/vmlinux-v6.13-rc1-g353b2a7de2a3.h | 105667 ++++++++++ .../powerpc/vmlinux-v6.13-rc1-g353b2a7de2a3.h | 102312 ++++++++++ .../riscv/vmlinux-v6.13-rc1-g353b2a7de2a3.h | 100680 ++++++++++ .../s390/vmlinux-v6.13-rc1-g353b2a7de2a3.h | 115675 +++++++++++ .../x86/vmlinux-v6.13-rc1-g353b2a7de2a3.h | 150792 +++++++++++++++ .../dep/include/bpf-compat/gnu/stubs.h | 11 + .../struct-ops/dep/include/lib/sdt_task.h | 113 + .../dep/include/scx/bpf_arena_common.h | 207 + .../struct-ops/dep/include/scx/common.bpf.h | 628 + selftest/struct-ops/dep/include/scx/common.h | 81 + .../struct-ops/dep/include/scx/compat.bpf.h | 143 + selftest/struct-ops/dep/include/scx/compat.h | 187 + .../dep/include/scx/enums.autogen.bpf.h | 105 + .../dep/include/scx/enums.autogen.h | 41 + .../struct-ops/dep/include/scx/enums.bpf.h | 12 + selftest/struct-ops/dep/include/scx/enums.h | 27 + .../dep/include/scx/namespace.bpf.h | 22 + .../dep/include/scx/namespace_impl.bpf.h | 81 + .../struct-ops/dep/include/scx/ravg.bpf.h | 42 + .../dep/include/scx/ravg_impl.bpf.h | 357 + .../dep/include/scx/user_exit_info.h | 118 + selftest/struct-ops/go.mod | 7 + selftest/struct-ops/go.sum | 8 + selftest/struct-ops/main.bpf.c | 151 + selftest/struct-ops/main.go | 113 + selftest/struct-ops/run.sh | 22 + 30 files changed, 817512 insertions(+) create mode 100644 selftest/struct-ops/Makefile create mode 100644 selftest/struct-ops/dep/include/.gitignore create mode 100644 selftest/struct-ops/dep/include/arch/arm/vmlinux-v6.13-rc1-g353b2a7de2a3.h create mode 100644 selftest/struct-ops/dep/include/arch/arm64/vmlinux-v6.13-rc1-g353b2a7de2a3.h create mode 100644 selftest/struct-ops/dep/include/arch/mips/vmlinux-v6.13-rc1-g353b2a7de2a3.h create mode 100644 selftest/struct-ops/dep/include/arch/powerpc/vmlinux-v6.13-rc1-g353b2a7de2a3.h create mode 100644 selftest/struct-ops/dep/include/arch/riscv/vmlinux-v6.13-rc1-g353b2a7de2a3.h create mode 100644 selftest/struct-ops/dep/include/arch/s390/vmlinux-v6.13-rc1-g353b2a7de2a3.h create mode 100644 selftest/struct-ops/dep/include/arch/x86/vmlinux-v6.13-rc1-g353b2a7de2a3.h create mode 100644 selftest/struct-ops/dep/include/bpf-compat/gnu/stubs.h create mode 100644 selftest/struct-ops/dep/include/lib/sdt_task.h create mode 100644 selftest/struct-ops/dep/include/scx/bpf_arena_common.h create mode 100644 selftest/struct-ops/dep/include/scx/common.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/common.h create mode 100644 selftest/struct-ops/dep/include/scx/compat.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/compat.h create mode 100644 selftest/struct-ops/dep/include/scx/enums.autogen.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/enums.autogen.h create mode 100644 selftest/struct-ops/dep/include/scx/enums.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/enums.h create mode 100644 selftest/struct-ops/dep/include/scx/namespace.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/namespace_impl.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/ravg.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/ravg_impl.bpf.h create mode 100644 selftest/struct-ops/dep/include/scx/user_exit_info.h create mode 100644 selftest/struct-ops/go.mod create mode 100644 selftest/struct-ops/go.sum create mode 100644 selftest/struct-ops/main.bpf.c create mode 100644 selftest/struct-ops/main.go create mode 100755 selftest/struct-ops/run.sh diff --git a/selftest/struct-ops/Makefile b/selftest/struct-ops/Makefile new file mode 100644 index 00000000..4263f2fd --- /dev/null +++ b/selftest/struct-ops/Makefile @@ -0,0 +1,95 @@ +BASEDIR = $(abspath ../../) + +OUTPUT = ../../output + +LIBBPF_SRC = $(abspath ../../libbpf/src) +LIBBPF_OBJ = $(abspath $(OUTPUT)/libbpf.a) + +CLANG = clang +CC = $(CLANG) +GO = go +PKGCONFIG = pkg-config + +ARCH := $(shell uname -m | sed 's/x86_64/amd64/g; s/aarch64/arm64/g') + +# libbpf + +LIBBPF_OBJDIR = $(abspath ./$(OUTPUT)/libbpf) + +CFLAGS = -g -O2 -Wall -fpie -I$(abspath ../common) +LDFLAGS = + +CGO_CFLAGS_STATIC = "-I$(abspath $(OUTPUT)) -I$(abspath ../common)" +CGO_LDFLAGS_STATIC = "$(shell PKG_CONFIG_PATH=$(LIBBPF_OBJDIR) $(PKGCONFIG) --static --libs libbpf)" +CGO_EXTLDFLAGS_STATIC = '-w -extldflags "-static"' +SCX_FLAGS=-O2 -g -Wall -fpie -mcpu=v3 -mlittle-endian \ +-I ../../libbpf/src/usr/include -I ../../libbpf/include/uapi \ +-I dep/include -I dep/include/arch/x86 -I dep/include/bpf-compat -I dep/include/lib + +CGO_CFLAGS_DYN = "-I. -I/usr/include/" +CGO_LDFLAGS_DYN = "$(shell $(PKGCONFIG) --shared --libs libbpf)" + +MAIN = main + +.PHONY: $(MAIN) +.PHONY: $(MAIN).go +.PHONY: $(MAIN).bpf.c + +all: $(MAIN)-static + +.PHONY: libbpfgo +.PHONY: libbpfgo-static +.PHONY: libbpfgo-dynamic + +## libbpfgo + +libbpfgo-static: + $(MAKE) -C $(BASEDIR) libbpfgo-static + +libbpfgo-dynamic: + $(MAKE) -C $(BASEDIR) libbpfgo-dynamic + +outputdir: + $(MAKE) -C $(BASEDIR) outputdir + +## test bpf dependency + +$(MAIN).bpf.o: $(MAIN).bpf.c + $(CLANG) -target bpf -D__TARGET_ARCH_$(ARCH) $(SCX_FLAGS) -I $(OUTPUT) -I $(abspath ../common) -c $< -o $@ + +## test + +.PHONY: $(MAIN)-static +.PHONY: $(MAIN)-dynamic + +$(MAIN)-static: libbpfgo-static | $(MAIN).bpf.o + CC=$(CLANG) \ + CGO_CFLAGS=$(CGO_CFLAGS_STATIC) \ + CGO_LDFLAGS=$(CGO_LDFLAGS_STATIC) \ + GOOS=linux GOARCH=$(ARCH) \ + $(GO) build \ + -tags netgo -ldflags $(CGO_EXTLDFLAGS_STATIC) \ + -o $(MAIN)-static ./$(MAIN).go + +$(MAIN)-dynamic: libbpfgo-dynamic | $(MAIN).bpf.o + CC=$(CLANG) \ + CGO_CFLAGS=$(CGO_CFLAGS_DYN) \ + CGO_LDFLAGS=$(CGO_LDFLAGS_DYN) \ + $(GO) build -o ./$(MAIN)-dynamic ./$(MAIN).go + +## run + +.PHONY: run +.PHONY: run-static +.PHONY: run-dynamic + +run: run-static + +run-static: $(MAIN)-static + sudo ./run.sh $(MAIN)-static + +run-dynamic: $(MAIN)-dynamic + sudo ./run.sh $(MAIN)-dynamic + +clean: + rm -f *.o *-static *-dynamic \ No newline at end of file diff --git a/selftest/struct-ops/dep/include/.gitignore b/selftest/struct-ops/dep/include/.gitignore new file mode 100644 index 00000000..0586962a --- /dev/null +++ b/selftest/struct-ops/dep/include/.gitignore @@ -0,0 +1 @@ +vmlinux.h diff --git a/selftest/struct-ops/dep/include/arch/arm/vmlinux-v6.13-rc1-g353b2a7de2a3.h b/selftest/struct-ops/dep/include/arch/arm/vmlinux-v6.13-rc1-g353b2a7de2a3.h new file mode 100644 index 00000000..b9e05413 --- /dev/null +++ b/selftest/struct-ops/dep/include/arch/arm/vmlinux-v6.13-rc1-g353b2a7de2a3.h @@ -0,0 +1,137286 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) +#endif + +typedef signed char __s8; + +typedef unsigned char __u8; + +typedef short unsigned int __u16; + +typedef int __s32; + +typedef unsigned int __u32; + +typedef long long int __s64; + +typedef long long unsigned int __u64; + +typedef __s8 s8; + +typedef __u8 u8; + +typedef __u16 u16; + +typedef __s32 s32; + +typedef __u32 u32; + +typedef __s64 s64; + +typedef __u64 u64; + +enum { + false = 0, + true = 1, +}; + +typedef long int __kernel_long_t; + +typedef long unsigned int __kernel_ulong_t; + +typedef int __kernel_pid_t; + +typedef unsigned int __kernel_uid32_t; + +typedef unsigned int __kernel_gid32_t; + +typedef unsigned int __kernel_size_t; + +typedef int __kernel_ssize_t; + +typedef long long int __kernel_loff_t; + +typedef long long int __kernel_time64_t; + +typedef __kernel_long_t __kernel_clock_t; + +typedef int __kernel_timer_t; + +typedef int __kernel_clockid_t; + +typedef unsigned int __poll_t; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +typedef short unsigned int umode_t; + +typedef __kernel_pid_t pid_t; + +typedef __kernel_clockid_t clockid_t; + +typedef _Bool bool; + +typedef __kernel_uid32_t uid_t; + +typedef __kernel_gid32_t gid_t; + +typedef __kernel_loff_t loff_t; + +typedef __kernel_size_t size_t; + +typedef __kernel_ssize_t ssize_t; + +typedef s32 int32_t; + +typedef u32 uint32_t; + +typedef s64 ktime_t; + +typedef u64 sector_t; + +typedef u64 blkcnt_t; + +typedef unsigned int gfp_t; + +typedef unsigned int fmode_t; + +typedef u32 phys_addr_t; + +typedef struct { + int counter; +} atomic_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct pt_regs { + long unsigned int uregs[18]; +}; + +struct perf_event; + +struct debug_info { + struct perf_event *hbp[32]; +}; + +struct thread_struct { + long unsigned int address; + long unsigned int trap_no; + long unsigned int error_code; + struct debug_info debug; +}; + +typedef struct { + s64 counter; +} atomic64_t; + +typedef atomic_t atomic_long_t; + +typedef int (*initcall_t)(); + +typedef void (*ctor_fn_t)(); + +struct lock_class_key {}; + +struct fs_context; + +struct fs_parameter_spec; + +struct dentry; + +struct super_block; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct __raw_tickets { + u16 owner; + u16 next; +}; + +typedef struct { + union { + u32 slock; + struct __raw_tickets tickets; + }; +} arch_spinlock_t; + +typedef struct { + u32 lock; +} arch_rwlock_t; + +struct lockdep_map {}; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + unsigned int flags; + long unsigned int begin; +}; + +typedef u32 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + long unsigned int type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno: 18; + unsigned int class_id: 6; + unsigned int flags: 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; + long: 32; +}; + +enum class_map_type { + DD_CLASS_TYPE_DISJOINT_BITS = 0, + DD_CLASS_TYPE_LEVEL_NUM = 1, + DD_CLASS_TYPE_DISJOINT_NAMES = 2, + DD_CLASS_TYPE_LEVEL_NAMES = 3, +}; + +struct ddebug_class_map { + struct list_head link; + struct module *mod; + const char *mod_name; + const char **class_names; + const int length; + const int base; + enum class_map_type map_type; +}; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kernfs_node; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + const struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized: 1; + unsigned int state_in_sysfs: 1; + unsigned int state_add_uevent_sent: 1; + unsigned int state_remove_uevent_sent: 1; + unsigned int uevent_suppress: 1; +}; + +struct module_param_attrs; + +struct completion; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct optimistic_spin_queue { + atomic_t tail; +}; + +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; + struct optimistic_spin_queue osq; + struct list_head wait_list; +}; + +struct rb_node { + long unsigned int __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_memory { + void *base; + void *rw_copy; + bool is_rox; + unsigned int size; + struct mod_tree_node mtn; +}; + +struct elf32_shdr; + +struct plt_entries; + +struct mod_plt_sec { + struct elf32_shdr *plt; + struct plt_entries *plt_ent; + int plt_count; +}; + +struct unwind_table; + +struct mod_arch_specific { + struct list_head unwind_list; + struct unwind_table *init_table; + struct mod_plt_sec core; + struct mod_plt_sec init; +}; + +struct elf32_sym; + +typedef struct elf32_sym Elf32_Sym; + +struct mod_kallsyms { + Elf32_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +struct _ddebug_info { + struct _ddebug *descs; + struct ddebug_class_map *classes; + unsigned int num_descs; + unsigned int num_classes; +}; + +struct module_attribute; + +struct kernel_symbol; + +struct kernel_param; + +struct exception_table_entry; + +struct bug_entry; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct tracepoint; + +typedef struct tracepoint * const tracepoint_ptr_t; + +struct srcu_struct; + +struct bpf_raw_event_map; + +struct trace_event_call; + +struct trace_eval_map; + +struct error_injection_entry; + +struct module { + enum module_state state; + struct list_head list; + char name[60]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + bool async_probe_requested; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct module_memory mem[7]; + struct mod_arch_specific arch; + long unsigned int taints; + unsigned int num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void *percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + unsigned int btf_data_size; + unsigned int btf_base_data_size; + void *btf_data; + void *btf_base_data; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + unsigned int num_ftrace_callsites; + long unsigned int *ftrace_callsites; + void *kprobes_text_start; + unsigned int kprobes_text_size; + long unsigned int *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + ctor_fn_t *ctors; + unsigned int num_ctors; + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; + struct _ddebug_info dyndbg_info; + long: 32; + long: 32; + long: 32; +}; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +typedef unsigned int fop_flags_t; + +typedef void *fl_owner_t; + +struct file; + +struct kiocb; + +struct iov_iter; + +struct io_comp_batch; + +struct dir_context; + +struct poll_table_struct; + +struct vm_area_struct; + +struct inode; + +struct file_lock; + +struct pipe_inode_info; + +struct file_lease; + +struct seq_file; + +struct io_uring_cmd; + +struct file_operations { + struct module *owner; + fop_flags_t fop_flags; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap)(struct file *, struct vm_area_struct *); + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct file *); + int (*setlease)(struct file *, int, struct file_lease **, void **); + long int (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); + int (*uring_cmd)(struct io_uring_cmd *, unsigned int); + int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); +}; + +struct static_call_key { + void *func; +}; + +struct bug_entry { + long unsigned int bug_addr; + const char *file; + short unsigned int line; + short unsigned int flags; +}; + +struct codetag { + unsigned int flags; + unsigned int lineno; + const char *modname; + const char *function; + const char *filename; + long: 32; +}; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long int tv_nsec; +}; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct pollfd; + +struct restart_block { + long unsigned int arch_data; + long int (*fn)(struct restart_block *); + union { + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + long: 32; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + long: 32; + u64 expires; + } nanosleep; + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + long unsigned int tv_sec; + long unsigned int tv_nsec; + } poll; + }; +}; + +struct cpu_context_save { + __u32 r4; + __u32 r5; + __u32 r6; + __u32 r7; + __u32 r8; + __u32 r9; + __u32 sl; + __u32 fp; + __u32 sp; + __u32 pc; + __u32 extra[2]; +}; + +struct fp_hard_struct { + unsigned int save[35]; +}; + +struct fp_soft_struct { + unsigned int save[35]; +}; + +union fp_state { + struct fp_hard_struct hard; + struct fp_soft_struct soft; +}; + +struct vfp_hard_struct { + __u64 fpregs[16]; + __u32 fpexc; + __u32 fpscr; + __u32 fpinst; + __u32 fpinst2; + __u32 cpu; + long: 32; +}; + +union vfp_state { + struct vfp_hard_struct hard; +}; + +struct thread_info { + long unsigned int flags; + int preempt_count; + __u32 cpu; + __u32 cpu_domain; + struct cpu_context_save cpu_context; + __u32 abi_syscall; + long unsigned int tp_value[2]; + long: 32; + union fp_state fpstate; + long: 32; + union vfp_state vfpstate; +}; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; +}; + +struct load_weight { + long unsigned int weight; + u32 inv_weight; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + long unsigned int load_avg; + long unsigned int runnable_avg; + long unsigned int util_avg; + unsigned int util_est; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cfs_rq; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + long: 32; + u64 deadline; + u64 min_vruntime; + u64 min_slice; + struct list_head group_node; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + unsigned char custom_slice; + long: 32; + u64 exec_start; + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; + u64 nr_migrations; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + long unsigned int runnable_weight; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + long unsigned int timeout; + long unsigned int watchdog_stamp; + unsigned int time_slice; + short unsigned int on_rq; + short unsigned int on_list; + struct sched_rt_entity *back; +}; + +struct timerqueue_node { + struct rb_node node; + long: 32; + ktime_t expires; +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; + long: 32; +}; + +struct sched_dl_entity; + +typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); + +struct task_struct; + +typedef struct task_struct * (*dl_server_pick_f)(struct sched_dl_entity *); + +struct rq; + +struct sched_dl_entity { + struct rb_node rb_node; + long: 32; + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + s64 runtime; + u64 deadline; + unsigned int flags; + unsigned int dl_throttled: 1; + unsigned int dl_yielded: 1; + unsigned int dl_non_contending: 1; + unsigned int dl_overrun: 1; + unsigned int dl_server: 1; + unsigned int dl_defer: 1; + unsigned int dl_defer_armed: 1; + unsigned int dl_defer_running: 1; + struct hrtimer dl_timer; + struct hrtimer inactive_timer; + struct rq *rq; + dl_server_has_tasks_f server_has_tasks; + dl_server_pick_f server_pick_task; + struct sched_dl_entity *pi_se; +}; + +struct scx_dsq_list_node { + struct list_head node; + u32 flags; + u32 priv; +}; + +struct scx_dispatch_q; + +struct cgroup; + +struct sched_ext_entity { + struct scx_dispatch_q *dsq; + struct scx_dsq_list_node dsq_list; + struct rb_node dsq_priq; + u32 dsq_seq; + u32 dsq_flags; + u32 flags; + u32 weight; + s32 sticky_cpu; + s32 holding_cpu; + u32 kf_mask; + struct task_struct *kf_tasks[2]; + atomic_long_t ops_state; + struct list_head runnable_node; + long unsigned int runnable_at; + long: 32; + u64 core_sched_at; + u64 ddsp_dsq_id; + u64 ddsp_enq_flags; + u64 slice; + u64 dsq_vtime; + bool disallow; + struct cgroup *cgrp_moving_from; + struct list_head tasks_node; +}; + +struct uclamp_se { + unsigned int value: 11; + unsigned int bucket_id: 3; + unsigned int active: 1; + unsigned int user_defined: 1; +}; + +struct sched_statistics {}; + +struct cpumask { + long unsigned int bits[128]; +}; + +typedef struct cpumask cpumask_t; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + long unsigned int pcount; + long: 32; + long long unsigned int run_delay; + long long unsigned int last_arrival; + long long unsigned int last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; + long: 32; +}; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +enum vtime_state { + VTIME_INACTIVE = 0, + VTIME_IDLE = 1, + VTIME_SYS = 2, + VTIME_USER = 3, + VTIME_GUEST = 4, +}; + +struct vtime { + seqcount_t seqcount; + long: 32; + long long unsigned int starttime; + enum vtime_state state; + unsigned int cpu; + u64 utime; + u64 stime; + u64 gtime; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + long unsigned int sig[2]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct syscall_user_dispatch {}; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + long unsigned int bits[1]; +} nodemask_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +struct tlbflush_unmap_batch {}; + +struct page; + +struct page_frag { + struct page *page; + __u16 offset; + __u16 size; +}; + +struct kmap_ctrl {}; + +struct timer_list { + struct hlist_node entry; + long unsigned int expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct llist_head { + struct llist_node *first; +}; + +struct sched_class; + +struct task_group; + +struct rcu_node; + +struct mm_struct; + +struct address_space; + +struct pid; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct rseq; + +struct task_delay_info; + +struct mem_cgroup; + +struct obj_cgroup; + +struct gendisk; + +struct uprobe_task; + +struct vm_struct; + +struct bpf_local_storage; + +struct bpf_run_ctx; + +struct bpf_net_context; + +struct task_struct { + struct thread_info thread_info; + unsigned int __state; + unsigned int saved_state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + int on_cpu; + struct __call_single_node wake_entry; + unsigned int wakee_flips; + long unsigned int wakee_flip_decay_ts; + struct task_struct *last_wakee; + int recent_used_cpu; + int wake_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_entity se; + struct sched_rt_entity rt; + long: 32; + struct sched_dl_entity dl; + struct sched_dl_entity *dl_server; + long: 32; + struct sched_ext_entity scx; + const struct sched_class *sched_class; + struct rb_node core_node; + long unsigned int core_cookie; + unsigned int core_occupation; + struct task_group *sched_task_group; + struct uclamp_se uclamp_req[2]; + struct uclamp_se uclamp[2]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_statistics stats; + unsigned int btrace_seq; + unsigned int policy; + long unsigned int max_allowed_capacity; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + short unsigned int migration_disabled; + short unsigned int migration_flags; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; + long unsigned int rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_exit_cpu; + struct list_head rcu_tasks_exit_list; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + long unsigned int jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork: 1; + unsigned int sched_contributes_to_load: 1; + unsigned int sched_migrated: 1; + long: 29; + unsigned int sched_remote_wakeup: 1; + unsigned int sched_rt_mutex: 1; + unsigned int in_execve: 1; + unsigned int in_iowait: 1; + unsigned int no_cgroup_migration: 1; + unsigned int frozen: 1; + unsigned int use_memdelay: 1; + unsigned int in_memstall: 1; + unsigned int in_eventfd: 1; + unsigned int in_thrashing: 1; + long unsigned int atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + struct task_struct *real_parent; + struct task_struct *parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_node; + struct completion *vfork_done; + int *set_child_tid; + int *clear_child_tid; + void *worker_private; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + struct vtime vtime; + atomic_t tick_dep_mask; + long unsigned int nvcsw; + long unsigned int nivcsw; + long: 32; + u64 start_time; + u64 start_boottime; + long unsigned int min_flt; + long unsigned int maj_flt; + struct posix_cputimers posix_cputimers; + const struct cred *ptracer_cred; + const struct cred *real_cred; + const struct cred *cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + long unsigned int last_switch_count; + long unsigned int last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + long unsigned int sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + long: 32; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct io_context *io_context; + struct capture_control *capture_control; + long unsigned int ptrace_message; + kernel_siginfo_t *last_siginfo; + long: 32; + struct task_io_accounting ioac; + unsigned int psi_flags; + long: 32; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + struct css_set *cgroups; + struct list_head cg_list; + struct robust_list_head *robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + u8 perf_recursion[4]; + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct rseq *rseq; + u32 rseq_len; + u32 rseq_sig; + long unsigned int rseq_event_mask; + int mm_cid; + int last_mm_cid; + int migrate_from_cpu; + int mm_cid_active; + struct callback_head cid_work; + struct tlbflush_unmap_batch tlb_ubc; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + long unsigned int dirty_paused_when; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + unsigned int kasan_depth; + int curr_ret_stack; + int curr_ret_depth; + long unsigned int *ret_stack; + long long unsigned int ftrace_timestamp; + long long unsigned int ftrace_sleeptime; + atomic_t trace_overrun; + atomic_t tracing_graph_pause; + long unsigned int trace_recursion; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct obj_cgroup *objcg; + struct gendisk *throttle_disk; + struct uprobe_task *utask; + struct kmap_ctrl kmap_ctrl; + struct callback_head rcu; + refcount_t rcu_users; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; + struct vm_struct *stack_vm_area; + refcount_t stack_refcount; + struct bpf_local_storage *bpf_storage; + struct bpf_run_ctx *bpf_ctx; + struct bpf_net_context *bpf_net_context; + struct llist_head kretprobe_instances; + struct thread_struct thread; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct page_pool; + +struct dev_pagemap; + +struct page { + long unsigned int flags; + union { + struct { + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + struct list_head buddy_list; + struct list_head pcp_list; + }; + struct address_space *mapping; + union { + long unsigned int index; + long unsigned int share; + }; + long unsigned int private; + }; + struct { + long unsigned int pp_magic; + struct page_pool *pp; + long unsigned int _pp_mapping_pad; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; + }; + struct { + long unsigned int compound_head; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + unsigned int page_type; + atomic_t _mapcount; + }; + atomic_t _refcount; + long unsigned int memcg_data; +}; + +typedef u32 pteval_t; + +typedef pteval_t pgprot_t; + +typedef long unsigned int vm_flags_t; + +struct vm_userfaultfd_ctx {}; + +struct vma_lock; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + union { + struct { + long unsigned int vm_start; + long unsigned int vm_end; + }; + struct callback_head vm_rcu; + }; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + union { + const vm_flags_t vm_flags; + vm_flags_t __vm_flags; + }; + bool detached; + int vm_lock_seq; + struct vma_lock *vm_lock; + struct { + struct rb_node rb; + long unsigned int rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + long unsigned int vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +typedef u32 pmdval_t; + +typedef pteval_t pte_t; + +typedef pmdval_t pmd_t; + +typedef pmdval_t pgd_t[2]; + +typedef struct page *pgtable_t; + +enum { + ___GFP_DMA_BIT = 0, + ___GFP_HIGHMEM_BIT = 1, + ___GFP_DMA32_BIT = 2, + ___GFP_MOVABLE_BIT = 3, + ___GFP_RECLAIMABLE_BIT = 4, + ___GFP_HIGH_BIT = 5, + ___GFP_IO_BIT = 6, + ___GFP_FS_BIT = 7, + ___GFP_ZERO_BIT = 8, + ___GFP_UNUSED_BIT = 9, + ___GFP_DIRECT_RECLAIM_BIT = 10, + ___GFP_KSWAPD_RECLAIM_BIT = 11, + ___GFP_WRITE_BIT = 12, + ___GFP_NOWARN_BIT = 13, + ___GFP_RETRY_MAYFAIL_BIT = 14, + ___GFP_NOFAIL_BIT = 15, + ___GFP_NORETRY_BIT = 16, + ___GFP_MEMALLOC_BIT = 17, + ___GFP_COMP_BIT = 18, + ___GFP_NOMEMALLOC_BIT = 19, + ___GFP_HARDWALL_BIT = 20, + ___GFP_THISNODE_BIT = 21, + ___GFP_ACCOUNT_BIT = 22, + ___GFP_ZEROTAGS_BIT = 23, + ___GFP_NO_OBJ_EXT_BIT = 24, + ___GFP_LAST_BIT = 25, +}; + +struct alloc_tag_counters { + u64 bytes; + u64 calls; +}; + +struct alloc_tag { + struct codetag ct; + struct alloc_tag_counters *counters; + long: 32; +}; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void *xa_head; +}; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct proc_ns_operations; + +struct ns_common { + struct dentry *stashed; + const struct proc_ns_operations *ops; + unsigned int inum; + refcount_t count; +}; + +struct kmem_cache; + +struct fs_pin; + +struct user_namespace; + +struct ucounts; + +struct pid_namespace { + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; + int memfd_noexec_scope; +}; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t *__sighandler_t; + +typedef void __restorefn_t(); + +typedef __restorefn_t *__sigrestore_t; + +union sigval { + int sival_int; + void *sival_ptr; +}; + +typedef union sigval sigval_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void *_addr; + union { + int _trapno; + short int _addr_lsb; + struct { + char _dummy_bnd[4]; + void *_lower; + void *_upper; + } _addr_bnd; + struct { + char _dummy_pkey[4]; + __u32 _pkey; + } _addr_pkey; + struct { + long unsigned int _data; + __u32 _type; + __u32 _flags; + } _perf; + }; + } _sigfault; + struct { + long int _band; + int _fd; + } _sigpoll; + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; +}; + +struct sigaction { + __sighandler_t sa_handler; + long unsigned int sa_flags; + __sigrestore_t sa_restorer; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + __u64 rseq_cs; + __u32 flags; + __u32 node_id; + __u32 mm_cid; + char end[0]; + long: 32; +}; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +typedef struct { + uid_t val; +} kuid_t; + +typedef struct { + gid_t val; +} kgid_t; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +struct rhash_head { + struct rhash_head *next; +}; + +struct scx_dispatch_q { + raw_spinlock_t lock; + struct list_head list; + struct rb_root priq; + u32 nr; + u32 seq; + u64 id; + struct rhash_head hash_node; + struct llist_node free_node; + struct callback_head rcu; +}; + +struct rq_flags; + +struct affinity_context; + +struct sched_class { + int uclamp_enabled; + void (*enqueue_task)(struct rq *, struct task_struct *, int); + bool (*dequeue_task)(struct rq *, struct task_struct *, int); + void (*yield_task)(struct rq *); + bool (*yield_to_task)(struct rq *, struct task_struct *); + void (*wakeup_preempt)(struct rq *, struct task_struct *, int); + int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); + struct task_struct * (*pick_task)(struct rq *); + struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *); + void (*put_prev_task)(struct rq *, struct task_struct *, struct task_struct *); + void (*set_next_task)(struct rq *, struct task_struct *, bool); + int (*select_task_rq)(struct task_struct *, int, int); + void (*migrate_task_rq)(struct task_struct *, int); + void (*task_woken)(struct rq *, struct task_struct *); + void (*set_cpus_allowed)(struct task_struct *, struct affinity_context *); + void (*rq_online)(struct rq *); + void (*rq_offline)(struct rq *); + struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); + void (*task_tick)(struct rq *, struct task_struct *, int); + void (*task_fork)(struct task_struct *); + void (*task_dead)(struct task_struct *); + void (*switching_to)(struct rq *, struct task_struct *); + void (*switched_from)(struct rq *, struct task_struct *); + void (*switched_to)(struct rq *, struct task_struct *); + void (*reweight_task)(struct rq *, struct task_struct *, const struct load_weight *); + void (*prio_changed)(struct rq *, struct task_struct *, int); + unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); + void (*update_curr)(struct rq *); + void (*task_change_group)(struct task_struct *); + int (*task_is_throttled)(struct task_struct *, int); +}; + +typedef struct {} lockdep_map_p; + +struct maple_tree { + union { + spinlock_t ma_lock; + lockdep_map_p ma_external_lock; + }; + unsigned int ma_flags; + void *ma_root; +}; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + struct optimistic_spin_queue osq; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +struct percpu_counter { + raw_spinlock_t lock; + long: 32; + s64 count; + struct list_head list; + s32 *counters; + long: 32; +}; + +typedef struct { + atomic64_t id; + atomic_t vmalloc_seq; + long unsigned int sigpage; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct mm_cid; + +struct linux_binfmt; + +struct kioctx_table; + +struct mm_struct { + struct { + struct { + atomic_t mm_count; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct maple_tree mm_mt; + long unsigned int mmap_base; + long unsigned int mmap_legacy_base; + long unsigned int task_size; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + struct mm_cid *pcpu_cid; + long unsigned int mm_cid_next_scan; + unsigned int nr_cpus_allowed; + atomic_t max_nr_cid; + raw_spinlock_t cpus_allowed_lock; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + int mm_lock_seq; + long unsigned int hiwater_rss; + long unsigned int hiwater_vm; + long unsigned int total_vm; + long unsigned int locked_vm; + atomic64_t pinned_vm; + long unsigned int data_vm; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int def_flags; + seqcount_t write_protect_seq; + spinlock_t arg_lock; + long unsigned int start_code; + long unsigned int end_code; + long unsigned int start_data; + long unsigned int end_data; + long unsigned int start_brk; + long unsigned int brk; + long unsigned int start_stack; + long unsigned int arg_start; + long unsigned int arg_end; + long unsigned int env_start; + long unsigned int env_end; + long unsigned int saved_auxv[46]; + long: 32; + struct percpu_counter rss_stat[4]; + struct linux_binfmt *binfmt; + long: 32; + mm_context_t context; + long unsigned int flags; + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; + struct task_struct *owner; + struct user_namespace *user_ns; + struct file *exe_file; + atomic_t tlb_flush_pending; + struct uprobes_state uprobes_state; + struct work_struct async_put_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + long unsigned int cpu_bitmap[0]; +}; + +typedef u32 errseq_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + struct rw_semaphore invalidate_lock; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + long unsigned int nrpages; + long unsigned int writeback_index; + const struct address_space_operations *a_ops; + long unsigned int flags; + errseq_t wb_err; + spinlock_t i_private_lock; + struct list_head i_private_list; + struct rw_semaphore i_mmap_rwsem; + void *i_private_data; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct dentry *stashed; + u64 ino; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[0]; +}; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +typedef struct { + u64 val; +} kernel_cap_t; + +struct user_struct; + +struct group_info; + +struct cred { + atomic_long_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + struct user_struct *user; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; + long: 32; +}; + +typedef int32_t key_serial_t; + +typedef __s64 time64_t; + +typedef uint32_t key_perm_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + long unsigned int hash; + union { + struct { + u16 desc_len; + char desc[2]; + }; + long unsigned int x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + long unsigned int nr_leaves_on_tree; +}; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct rw_semaphore sem; + struct key_user *user; + void *security; + long: 32; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + short unsigned int quotalen; + short unsigned int datalen; + short int state; + long unsigned int flags; + union { + struct keyring_index_key index_key; + struct { + long unsigned int hash; + long unsigned int len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + refcount_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct pacct_struct { + int ac_flag; + long int ac_exitcode; + long unsigned int ac_mem; + long: 32; + u64 ac_utime; + u64 ac_stime; + long unsigned int ac_minflt; + long unsigned int ac_majflt; +}; + +struct core_state; + +struct tty_struct; + +struct taskstats; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + int quick_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exec_task; + int group_stop_count; + unsigned int flags; + struct core_state *core_state; + unsigned int is_child_subreaper: 1; + unsigned int has_child_subreaper: 1; + unsigned int next_posix_timer_id; + struct hlist_head posix_timers; + struct hlist_head ignored_posix_timers; + long: 32; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + atomic_t tick_dep_mask; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + seqlock_t stats_lock; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + long unsigned int cnvcsw; + long unsigned int cnivcsw; + long unsigned int min_flt; + long unsigned int maj_flt; + long unsigned int cmin_flt; + long unsigned int cmaj_flt; + long unsigned int inblock; + long unsigned int oublock; + long unsigned int cinblock; + long unsigned int coublock; + long unsigned int maxrss; + long unsigned int cmaxrss; + struct task_io_accounting ioac; + long long unsigned int sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + bool oom_flag_origin; + short int oom_score_adj; + short int oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; + long: 32; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + short unsigned int ioprio; + spinlock_t lock; + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +struct arch_uprobe_task { + u32 backup; + long unsigned int saved_trap_no; +}; + +struct return_instance; + +struct uprobe; + +struct arch_uprobe; + +struct uprobe_task { + enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { + struct { + struct arch_uprobe_task autask; + long unsigned int vaddr; + }; + struct { + struct callback_head dup_xol_work; + long unsigned int dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + struct timer_list ri_timer; + long unsigned int xol_vaddr; + struct arch_uprobe *auprobe; +}; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; + long: 32; +}; + +typedef u32 probes_opcode_t; + +struct arch_probes_insn; + +typedef void probes_insn_handler_t(probes_opcode_t, struct arch_probes_insn *, struct pt_regs *); + +typedef long unsigned int probes_check_cc(long unsigned int); + +typedef void probes_insn_singlestep_t(probes_opcode_t, struct arch_probes_insn *, struct pt_regs *); + +typedef void probes_insn_fn_t(); + +struct arch_probes_insn { + probes_opcode_t *insn; + probes_insn_handler_t *insn_handler; + probes_check_cc *insn_check_cc; + probes_insn_singlestep_t *insn_singlestep; + probes_insn_fn_t *insn_fn; + int stack_space; + long unsigned int register_usage_flags; + bool kprobe_direct_exec; +}; + +typedef u32 uprobe_opcode_t; + +struct arch_uprobe { + u8 insn[4]; + long unsigned int ixol[2]; + uprobe_opcode_t bpinsn; + bool simulate; + u32 pcreg; + void (*prehandler)(struct arch_uprobe *, struct arch_uprobe_task *, struct pt_regs *); + void (*posthandler)(struct arch_uprobe *, struct arch_uprobe_task *, struct pt_regs *); + struct arch_probes_insn asi; +}; + +enum hprobe_state { + HPROBE_LEASED = 0, + HPROBE_STABLE = 1, + HPROBE_GONE = 2, + HPROBE_CONSUMED = 3, +}; + +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + +struct return_consumer { + __u64 cookie; + __u64 id; +}; + +struct return_instance { + struct hprobe hprobe; + long unsigned int func; + long unsigned int stack; + long unsigned int orig_ret_vaddr; + bool chained; + int consumers_cnt; + struct return_instance *next; + struct callback_head rcu; + long: 32; + struct return_consumer consumers[0]; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct workqueue_struct; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct vmem_altmap { + long unsigned int base_pfn; + const long unsigned int end_pfn; + const long unsigned int reserve; + long unsigned int free; + long unsigned int align; + long unsigned int alloc; + bool inaccessible; +}; + +struct percpu_ref_data; + +struct percpu_ref { + long unsigned int percpu_count_ptr; + struct percpu_ref_data *data; +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT = 2, + MEMORY_DEVICE_FS_DAX = 3, + MEMORY_DEVICE_GENERIC = 4, + MEMORY_DEVICE_PCI_P2PDMA = 5, +}; + +struct range { + u64 start; + u64 end; +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref ref; + struct completion done; + enum memory_type type; + unsigned int flags; + long unsigned int vmemmap_shift; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + long: 32; + union { + struct range range; + struct { + struct {} __empty_ranges; + struct range ranges[0]; + }; + }; +}; + +typedef struct { + long unsigned int val; +} swp_entry_t; + +struct folio { + union { + struct { + long unsigned int flags; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; + struct address_space *mapping; + long unsigned int index; + union { + void *private; + swp_entry_t swap; + }; + atomic_t _mapcount; + atomic_t _refcount; + long unsigned int memcg_data; + }; + struct page page; + }; + union { + struct { + long unsigned int _flags_1; + long unsigned int _head_1; + atomic_t _large_mapcount; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + }; + struct page __page_1; + }; + union { + struct { + long unsigned int _flags_2; + long unsigned int _head_2; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; + }; + struct { + long unsigned int _flags_2a; + long unsigned int _head_2a; + struct list_head _deferred_list; + }; + struct page __page_2; + }; +}; + +typedef struct { + atomic_t refcnt; +} file_ref_t; + +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +struct file_ra_state { + long unsigned int start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + long: 32; + loff_t prev_pos; +}; + +typedef struct { + long unsigned int v; +} freeptr_t; + +struct fown_struct; + +struct file { + file_ref_t f_ref; + spinlock_t f_lock; + fmode_t f_mode; + const struct file_operations *f_op; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + struct path f_path; + union { + struct mutex f_pos_lock; + u64 f_pipe; + }; + loff_t f_pos; + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; + struct hlist_head *f_ep; + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + freeptr_t f_freeptr; + }; +}; + +struct vma_lock { + struct rw_semaphore lock; +}; + +typedef unsigned int vm_fault_t; + +struct vm_fault; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*may_split)(struct vm_area_struct *, long unsigned int); + int (*mremap)(struct vm_area_struct *); + int (*mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int, long unsigned int); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); + vm_fault_t (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); + long unsigned int (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); + const char * (*name)(struct vm_area_struct *); + struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); +}; + +struct mm_cid { + u64 time; + int cid; + int recent_cid; +}; + +enum fault_flag { + FAULT_FLAG_WRITE = 1, + FAULT_FLAG_MKWRITE = 2, + FAULT_FLAG_ALLOW_RETRY = 4, + FAULT_FLAG_RETRY_NOWAIT = 8, + FAULT_FLAG_KILLABLE = 16, + FAULT_FLAG_TRIED = 32, + FAULT_FLAG_USER = 64, + FAULT_FLAG_REMOTE = 128, + FAULT_FLAG_INSTRUCTION = 256, + FAULT_FLAG_INTERRUPTIBLE = 512, + FAULT_FLAG_UNSHARE = 1024, + FAULT_FLAG_ORIG_PTE_VALID = 2048, + FAULT_FLAG_VMA_LOCK = 4096, +}; + +typedef struct { + pgd_t pgd; +} p4d_t; + +typedef struct { + p4d_t p4d; +} pud_t; + +struct vm_fault { + const struct { + struct vm_area_struct *vma; + gfp_t gfp_mask; + long unsigned int pgoff; + long unsigned int address; + long unsigned int real_address; + }; + enum fault_flag flags; + pmd_t *pmd; + pud_t *pud; + union { + pte_t orig_pte; + pmd_t orig_pmd; + }; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + long unsigned int gp_seq[4]; + atomic_long_t len; + long int seglen[4]; + u8 flags; +}; + +struct srcu_node; + +struct srcu_data { + atomic_long_t srcu_lock_count[2]; + atomic_long_t srcu_unlock_count[2]; + int srcu_reader_flavor; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + long unsigned int grpmask; + int cpu; + struct srcu_struct *ssp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct srcu_node { + spinlock_t lock; + long unsigned int srcu_have_cbs[4]; + long unsigned int srcu_data_have_cbs[4]; + long unsigned int srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_usage; + +struct srcu_struct { + unsigned int srcu_idx; + struct srcu_data *sda; + struct lockdep_map dep_map; + struct srcu_usage *srcu_sup; +}; + +struct srcu_usage { + struct srcu_node *node; + struct srcu_node *level[3]; + int srcu_size_state; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + long unsigned int srcu_gp_seq; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + long unsigned int srcu_gp_start; + long unsigned int srcu_last_gp_end; + long unsigned int srcu_size_jiffies; + long unsigned int srcu_n_lock_retries; + long unsigned int srcu_n_exp_nodelay; + bool sda_is_static; + long unsigned int srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + long unsigned int reschedule_jiffies; + long unsigned int reschedule_count; + struct delayed_work work; + struct srcu_struct *srcu_ssp; +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic: 1; + bool allow_reinit: 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +struct processor { + void (*_data_abort)(long unsigned int); + long unsigned int (*_prefetch_abort)(long unsigned int); + void (*_proc_init)(); + void (*check_bugs)(); + void (*_proc_fin)(); + void (*reset)(long unsigned int, bool); + int (*_do_idle)(); + void (*dcache_clean_area)(void *, int); + void (*switch_mm)(phys_addr_t, struct mm_struct *); + void (*set_pte_ext)(pte_t *, pte_t, unsigned int); + unsigned int suspend_size; + void (*do_suspend)(void *); + void (*do_resume)(void *); +}; + +struct cpu_tlb_fns { + void (*flush_user_range)(long unsigned int, long unsigned int, struct vm_area_struct *); + void (*flush_kern_range)(long unsigned int, long unsigned int); + long unsigned int tlb_flags; +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_DMA = 0, + KMALLOC_RANDOM_START = 0, + KMALLOC_RANDOM_END = 0, + KMALLOC_RECLAIM = 1, + KMALLOC_CGROUP = 2, + NR_KMALLOC_TYPES = 3, +}; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; + struct xarray xa; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + long unsigned int subdirs; + struct rb_root children; + struct kernfs_root *root; + long unsigned int rev; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; + long: 32; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + short unsigned int flags; + umode_t mode; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + u64 id; + void *priv; + struct kernfs_iattrs *iattr; + struct callback_head rcu; +}; + +struct kernfs_open_file; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); + loff_t (*llseek)(struct kernfs_open_file *, loff_t, int); +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped: 1; + bool released: 1; + const struct vm_operations_struct *vm_ops; +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void * (*grab_current_ns)(); + const void * (*netlink_ns)(struct sock *); + const void * (*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; + u32 dio_mem_align; + u32 dio_offset_align; + u64 change_cookie; + u64 subvol; + u32 atomic_write_unit_min; + u32 atomic_write_unit_max; + u32 atomic_write_segments_max; + long: 32; +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + struct address_space * (*f_mapping)(); + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); + int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *, struct vm_area_struct *); +}; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); + size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); + struct attribute **attrs; + union { + struct bin_attribute **bin_attrs; + const struct bin_attribute * const *bin_attrs_new; + }; +}; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations * (*child_ns_type)(const struct kobject *); + const void * (*namespace)(const struct kobject *); + void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(const struct kobject *); + const char * (* const name)(const struct kobject *); + int (* const uevent)(const struct kobject *, struct kobj_uevent_env *); +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active: 1; + unsigned int in_hrtirq: 1; + unsigned int hang_detected: 1; + unsigned int softirq_activated: 1; + unsigned int online: 1; + unsigned int nr_events; + short unsigned int nr_retries; + short unsigned int nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + long: 32; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + long: 32; + struct hrtimer_clock_base clock_base[8]; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint_ext { + int (*regfunc)(); + void (*unregfunc)(); + unsigned int faultable: 1; +}; + +struct tracepoint { + const char *name; + struct static_key_false key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + void *probestub; + struct tracepoint_func *funcs; + struct tracepoint_ext *ext; +}; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + long unsigned int nr_to_scan; + long unsigned int nr_scanned; + struct mem_cgroup *memcg; +}; + +struct shrinker { + long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); + long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); + long int batch; + int seeks; + unsigned int flags; + refcount_t refcount; + struct completion done; + struct callback_head rcu; + void *private_data; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); + int (*memory_failure)(struct dev_pagemap *, long unsigned int, long unsigned int, int); +}; + +struct hlist_bl_node; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct lockref { + union { + __u64 lock_count; + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct qstr { + union { + struct { + u32 hash; + u32 len; + }; + u64 hash_len; + }; + const unsigned char *name; + long: 32; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + long: 32; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[36]; + const struct dentry_operations *d_op; + struct super_block *d_sb; + long unsigned int d_time; + void *d_fsdata; + struct lockref d_lockref; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct hlist_node d_sib; + struct hlist_head d_children; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; + long: 32; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +} __attribute__((mode(byte))); + +struct posix_acl; + +struct inode_operations; + +struct bdi_writeback; + +struct file_lock_context; + +struct cdev; + +struct fsnotify_mark_connector; + +struct inode { + umode_t i_mode; + short unsigned int i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + long unsigned int i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + time64_t i_atime_sec; + time64_t i_mtime_sec; + time64_t i_ctime_sec; + u32 i_atime_nsec; + u32 i_mtime_nsec; + u32 i_ctime_nsec; + u32 i_generation; + spinlock_t i_lock; + short unsigned int i_bytes; + u8 i_blkbits; + enum rw_hint i_write_hint; + blkcnt_t i_blocks; + seqcount_t i_size_seqcount; + u32 i_state; + struct rw_semaphore i_rwsem; + long unsigned int dirtied_when; + long unsigned int dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + long: 32; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + void *i_private; + long: 32; +}; + +enum d_real_type { + D_REAL_DATA = 0, + D_REAL_METADATA = 1, +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char * (*d_dname)(struct dentry *, char *, int); + struct vfsmount * (*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry * (*d_real)(struct dentry *, enum d_real_type); + long: 32; + long: 32; + long: 32; +}; + +struct mtd_info; + +typedef long long int qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + long unsigned int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + long: 32; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; + long: 32; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; + long: 32; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct *task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + short unsigned int frozen; + int freeze_kcount; + int freeze_ucount; + struct percpu_rw_semaphore rw_sem[3]; +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct block_device; + +struct backing_dev_info; + +struct fsnotify_sb_info; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + long unsigned int s_blocksize; + long: 32; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + long unsigned int s_flags; + long unsigned int s_iflags; + long unsigned int s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + const struct xattr_handler * const *s_xattr; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct file *s_bdev_file; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + u32 s_fsnotify_mask; + struct fsnotify_sb_info *s_fsnotify_info; + char s_id[32]; + uuid_t s_uuid; + u8 s_uuid_len; + char s_sysfs_name[37]; + unsigned int s_max_links; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + struct shrinker *s_shrink; + atomic_long_t s_remove_count; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + long: 32; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct mnt_idmap; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; + struct mnt_idmap *mnt_idmap; +}; + +struct list_lru_one { + struct list_head list; + long int nr_items; + spinlock_t lock; +}; + +struct list_lru_node { + struct list_lru_one lru; + atomic_long_t nr_items; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, +}; + +struct exception_table_entry { + long unsigned int insn; + long unsigned int fixup; +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct key_preparsed_payload; + +struct key_match_data; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long int (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction * (*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +struct user_struct { + refcount_t __count; + long: 32; + struct percpu_counter epoll_watches; + long unsigned int unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + struct ratelimit_state ratelimit; + long: 32; +}; + +struct group_info { + refcount_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +struct request_queue; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + long: 32; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long int); + void *private; + int ki_flags; + u16 ki_ioprio; + union { + struct wait_page_queue *ki_waitq; + ssize_t (*dio_complete)(void *); + }; + long: 32; +}; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; + long: 32; +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + long unsigned int dq_flags; + long: 32; + struct mem_dqblk dq_dqb; +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot * (*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t * (*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_dqblk { + int d_fieldmask; + long: 32; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; + long: 32; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + long: 32; + long long unsigned int ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + long: 32; + struct qc_type_state s_state[3]; +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct writeback_control; + +struct readahead_control; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*read_folio)(struct file *, struct folio *); + int (*writepages)(struct address_space *, struct writeback_control *); + bool (*dirty_folio)(struct address_space *, struct folio *); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio)(struct folio *, size_t, size_t); + bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, enum migrate_mode); + int (*launder_folio)(struct folio *); + bool (*is_partially_uptodate)(struct folio *, size_t, size_t); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); + int (*error_remove_folio)(struct address_space *, struct folio *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); + int (*swap_rw)(struct kiocb *, struct iov_iter *); +}; + +struct fiemap_extent_info; + +struct fileattr; + +struct offset_ctx; + +struct inode_operations { + struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); + const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct mnt_idmap *, struct inode *, int); + struct posix_acl * (*get_inode_acl)(struct inode *, int, bool); + int (*readlink)(struct dentry *, char *, int); + int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); + int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); + int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); + int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); + int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); + struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int); + int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); + int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); + int (*fileattr_get)(struct dentry *, struct fileattr *); + struct offset_ctx * (*get_offset_ctx)(struct inode *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct fown_struct { + struct file *file; + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +enum freeze_holder { + FREEZE_HOLDER_KERNEL = 1, + FREEZE_HOLDER_USERSPACE = 2, + FREEZE_MAY_NEST = 4, +}; + +struct kstatfs; + +struct super_operations { + struct inode * (*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *, enum freeze_holder); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *, enum freeze_holder); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long int (*free_cached_objects)(struct super_block *, struct shrink_control *); + void (*shutdown)(struct super_block *); +}; + +struct fid; + +struct iomap; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry * (*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); + long unsigned int flags; +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); +}; + +typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + long: 32; + loff_t pos; +}; + +struct offset_ctx { + struct maple_tree mt; + long unsigned int next_offset; +}; + +struct p_log; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + short unsigned int flags; + const void *data; +}; + +typedef __u32 Elf32_Addr; + +typedef __u16 Elf32_Half; + +typedef __u32 Elf32_Off; + +typedef __u32 Elf32_Word; + +struct elf32_sym { + Elf32_Word st_name; + Elf32_Addr st_value; + Elf32_Word st_size; + unsigned char st_info; + unsigned char st_other; + Elf32_Half st_shndx; +}; + +struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +}; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +struct error_injection_entry { + long unsigned int addr; + int etype; +}; + +struct unwind_idx { + long unsigned int addr_offset; + long unsigned int insn; +}; + +struct unwind_table { + struct list_head list; + struct list_head mod_list; + const struct unwind_idx *start; + const struct unwind_idx *origin; + const struct unwind_idx *stop; + long unsigned int begin_addr; + long unsigned int end_addr; +}; + +struct plt_entries { + u32 ldr[16]; + u32 lit[16]; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct kernel_symbol { + long unsigned int value; + const char *name; + const char *namespace; +}; + +struct nsset; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common * (*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace * (*owner)(struct ns_common *); + struct ns_common * (*get_parent)(struct ns_common *); +}; + +struct l2x0_regs; + +struct outer_cache_fns { + void (*inv_range)(long unsigned int, long unsigned int); + void (*clean_range)(long unsigned int, long unsigned int); + void (*flush_range)(long unsigned int, long unsigned int); + void (*flush_all)(); + void (*disable)(); + void (*sync)(); + void (*resume)(); + void (*write_sec)(long unsigned int, unsigned int); + void (*configure)(const struct l2x0_regs *); +}; + +struct l2x0_regs { + long unsigned int phy_base; + long unsigned int aux_ctrl; + long unsigned int tag_latency; + long unsigned int data_latency; + long unsigned int filter_start; + long unsigned int filter_end; + long unsigned int prefetch_ctrl; + long unsigned int pwr_ctrl; + long unsigned int ctrl; + long unsigned int aux2_ctrl; +}; + +struct mpidr_hash { + u32 mask; + u32 shift_aff[3]; + u32 bits; +}; + +struct sleep_save_sp { + u32 *save_ptr_stash; + u32 save_ptr_stash_phys; +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +struct cacheline_padding { + char x[0]; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +struct task_cputime { + u64 stime; + u64 utime; + long long unsigned int sum_exec_runtime; +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + atomic_t count; + atomic_long_t ucount[10]; + atomic_long_t rlimit[4]; +}; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; + int nr_descendants; +}; + +struct cgroup_file { + struct kernfs_node *kn; + long unsigned int notified_at; + struct timer_list notify_timer; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; + u64 forceidle_sum; + u64 ntime; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + struct bpf_prog_array *effective[28]; + struct hlist_head progs[28]; + u8 flags[28]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + bool e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct psi_group; + +struct cgroup { + struct cgroup_subsys_state self; + long unsigned int flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + struct cgroup_file psi_files[3]; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state *subsys[13]; + int nr_dying_subsys[13]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[13]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad_; + struct cgroup *rstat_flush_next; + long: 32; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group *psi; + struct cgroup_bpf bpf; + struct cgroup_freezer_state freezer; + struct bpf_local_storage *bpf_cgrp_storage; + struct cgroup *ancestors[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +struct reclaim_state { + long unsigned int reclaimed; +}; + +struct css_set { + struct cgroup_subsys_state *subsys[13]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[13]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct page_counter { + atomic_long_t usage; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + long unsigned int emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + long unsigned int elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + long unsigned int watermark; + long unsigned int local_watermark; + long unsigned int failcnt; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + bool protection_support; + long unsigned int min; + long unsigned int low; + long unsigned int high; + long unsigned int max; + struct page_counter *parent; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct vmpressure { + long unsigned int scanned; + long unsigned int reclaimed; + long unsigned int tree_scanned; + long unsigned int tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + long: 32; + struct fprop_global completions; + struct timer_list period_timer; + long unsigned int period_time; + long unsigned int dirty_limit_tstamp; + long unsigned int dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + long: 32; + u64 at; + struct wb_completion done; +}; + +struct memcg_vmstats; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + long: 32; + long: 32; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct list_head memory_peaks; + struct list_head swap_peaks; + spinlock_t peaks_lock; + struct work_struct high_work; + long unsigned int zswap_max; + bool zswap_writeback; + struct vmpressure vmpressure; + bool oom_group; + int swappiness; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct memcg_vmstats *vmstats; + atomic_long_t memory_events[9]; + atomic_long_t memory_events_local[9]; + long unsigned int socket_pressure; + int kmemcg_id; + struct obj_cgroup *objcg; + struct obj_cgroup *orig_objcg; + struct list_head objcg_list; + struct memcg_vmstats_percpu *vmstats_percpu; + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct mem_cgroup_per_node *nodeinfo[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + union { + struct { + struct uid_gid_extent extent[5]; + u32 nr_extents; + }; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +struct ctl_table; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + const struct ctl_table *ctl_table; + int ctl_table_size; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + const struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; + enum { + SYSCTL_TABLE_TYPE_DEFAULT = 0, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, + } type; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct binfmt_misc; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + long unsigned int flags; + bool parent_could_setfcap; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + long int ucount_max[10]; + long int rlimit_max[4]; + struct binfmt_misc *binfmt_misc; +}; + +struct zswap_lruvec_state { + atomic_long_t nr_disk_swapins; +}; + +struct free_area { + struct list_head free_list[4]; + long unsigned int nr_free; +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_THROTTLED_WRITTEN = 33, + NR_KERNEL_MISC_RECLAIMABLE = 34, + NR_FOLL_PIN_ACQUIRED = 35, + NR_FOLL_PIN_RELEASED = 36, + NR_KERNEL_STACK_KB = 37, + NR_PAGETABLE = 38, + NR_SECONDARY_PAGETABLE = 39, + NR_SWAPCACHE = 40, + PGDEMOTE_KSWAPD = 41, + PGDEMOTE_DIRECT = 42, + PGDEMOTE_KHUGEPAGED = 43, + NR_VM_NODE_STAT_ITEMS = 44, +}; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + spinlock_t lru_lock; + long unsigned int anon_cost; + long unsigned int file_cost; + atomic_long_t nonresident_age; + long unsigned int refaults[2]; + long unsigned int flags; + struct pglist_data *pgdat; + struct zswap_lruvec_state zswap_lruvec_state; +}; + +struct per_cpu_pages; + +struct per_cpu_zonestat; + +struct zone { + long unsigned int _watermark[4]; + long unsigned int watermark_boost; + long unsigned int nr_reserved_highatomic; + long unsigned int nr_free_highatomic; + long int lowmem_reserve[2]; + struct pglist_data *zone_pgdat; + struct per_cpu_pages *per_cpu_pageset; + struct per_cpu_zonestat *per_cpu_zonestats; + int pageset_high_min; + int pageset_high_max; + int pageset_batch; + long unsigned int *pageblock_flags; + long unsigned int zone_start_pfn; + atomic_long_t managed_pages; + long unsigned int spanned_pages; + long unsigned int present_pages; + const char *name; + int initialized; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct free_area free_area[11]; + long unsigned int flags; + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + long unsigned int percpu_drift_mark; + long unsigned int compact_cached_free_pfn; + long unsigned int compact_cached_migrate_pfn[2]; + long unsigned int compact_init_migrate_pfn; + long unsigned int compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad3_; + atomic_long_t vm_stat[10]; + atomic_long_t vm_numa_event[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[3]; +}; + +enum zone_type { + ZONE_NORMAL = 0, + ZONE_MOVABLE = 1, + __MAX_NR_ZONES = 2, +}; + +struct per_cpu_nodestat; + +struct pglist_data { + struct zone node_zones[2]; + struct zonelist node_zonelists[1]; + int nr_zones; + struct page *node_mem_map; + long unsigned int node_start_pfn; + long unsigned int node_present_pages; + long unsigned int node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + wait_queue_head_t reclaim_wait[4]; + atomic_t nr_writeback_throttled; + long unsigned int nr_reclaim_start; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + bool proactive_compact_trigger; + long unsigned int totalreserve_pages; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct lruvec __lruvec; + long unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[44]; + long: 32; + long: 32; + long: 32; +}; + +struct per_cpu_pages { + spinlock_t lock; + int count; + int high; + int high_min; + int high_max; + int batch; + u8 flags; + u8 alloc_factor; + short int free_count; + struct list_head lists[12]; + long: 32; +}; + +struct per_cpu_zonestat { + s8 vm_stat_diff[10]; + s8 stat_threshold; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[44]; +}; + +struct shrinker_info_unit { + atomic_long_t nr_deferred[32]; + long unsigned int map[1]; +}; + +struct shrinker_info { + struct callback_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[0]; +}; + +typedef int proc_handler(const struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set * (*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, kuid_t *, kgid_t *); + int (*permissions)(struct ctl_table_header *, const struct ctl_table *); +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + long: 32; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + long: 32; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + long: 32; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; + __u64 compact_count; + __u64 compact_delay_total; + __u32 ac_tgid; + long: 32; + __u64 ac_tgetime; + __u64 ac_exe_dev; + __u64 ac_exe_inode; + __u64 wpcopy_count; + __u64 wpcopy_delay_total; + __u64 irq_count; + __u64 irq_delay_total; +}; + +struct wait_page_queue { + struct folio *folio; + int bit_nr; + wait_queue_entry_t wait; +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +struct folio_batch { + unsigned char nr; + unsigned char i; + bool percpu_pvec_drained; + struct folio *folios[31]; +}; + +struct swap_iocb; + +struct writeback_control { + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate: 1; + unsigned int for_background: 1; + unsigned int tagged_writepages: 1; + unsigned int for_reclaim: 1; + unsigned int range_cyclic: 1; + unsigned int for_sync: 1; + unsigned int unpinned_netfs_wb: 1; + unsigned int no_cgroup_owner: 1; + struct swap_iocb **swap_plug; + struct list_head *list; + struct folio_batch fbatch; + long unsigned int index; + int saved_err; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + struct file_ra_state *ra; + long unsigned int _index; + unsigned int _nr_pages; + unsigned int _batch_count; + bool _workingset; + long unsigned int _pflags; +}; + +struct iovec { + void *iov_base; + __kernel_size_t iov_len; +}; + +struct kvec; + +struct bio_vec; + +struct folio_queue; + +struct iov_iter { + u8 iter_type; + bool nofault; + bool data_source; + size_t iov_offset; + union { + struct iovec __ubuf_iovec; + struct { + union { + const struct iovec *__iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + const struct folio_queue *folioq; + struct xarray *xarray; + void *ubuf; + }; + size_t count; + }; + }; + union { + long unsigned int nr_segs; + u8 folioq_slot; + loff_t xarray_start; + }; +}; + +struct swap_cluster_info; + +struct percpu_cluster; + +struct swap_info_struct { + struct percpu_ref users; + long unsigned int flags; + short int prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + long unsigned int *zeromap; + struct swap_cluster_info *cluster_info; + struct list_head free_clusters; + struct list_head full_clusters; + struct list_head nonfull_clusters[1]; + struct list_head frag_clusters[1]; + unsigned int frag_cluster_nr[1]; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int *cluster_next_cpu; + struct percpu_cluster *percpu_cluster; + struct rb_root swap_extent_root; + struct block_device *bdev; + struct file *swap_file; + struct completion comp; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct work_struct reclaim_work; + struct list_head discard_clusters; + struct plist_node avail_lists[0]; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +struct bdi_writeback { + struct backing_dev_info *bdi; + long unsigned int state; + long unsigned int last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + atomic_t writeback_inodes; + long: 32; + struct percpu_counter stat[4]; + long unsigned int bw_time_stamp; + long unsigned int dirtied_stamp; + long unsigned int written_stamp; + long unsigned int write_bandwidth; + long unsigned int avg_write_bandwidth; + long unsigned int dirty_ratelimit; + long unsigned int balanced_dirty_ratelimit; + long: 32; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + struct delayed_work bw_dwork; + struct list_head bdi_node; + struct percpu_ref refcnt; + long: 32; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + struct list_head b_attached; + struct list_head offline_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head defer_sync; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +enum rpm_status { + RPM_INVALID = -1, + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +struct wakeup_source; + +struct wake_irq; + +struct pm_subsys_data; + +struct device; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + bool can_wakeup: 1; + bool async_suspend: 1; + bool in_dpm_list: 1; + bool is_prepared: 1; + bool is_suspended: 1; + bool is_noirq_suspended: 1; + bool is_late_suspended: 1; + bool no_pm: 1; + bool early_init: 1; + bool direct_complete: 1; + u32 driver_flags; + spinlock_t lock; + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path: 1; + bool syscore: 1; + bool no_pm_callbacks: 1; + bool async_in_progress: 1; + bool must_resume: 1; + bool may_skip_resume: 1; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth: 3; + bool idle_notification: 1; + bool request_pending: 1; + bool deferred_resume: 1; + bool needs_force_resume: 1; + bool runtime_auto: 1; + bool ignore_children: 1; + bool no_callbacks: 1; + bool irq_safe: 1; + bool use_autosuspend: 1; + bool timer_autosuspends: 1; + bool memalloc_noio: 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + enum rpm_status last_status; + int runtime_error; + int autosuspend_delay; + long: 32; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; + long: 32; +}; + +struct irq_domain; + +struct msi_device_data; + +struct dev_msi_info { + struct irq_domain *domain; + struct msi_device_data *data; +}; + +struct dev_archdata { + unsigned int dma_ops_setup: 1; +}; + +struct dev_iommu; + +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, + DEVICE_REMOVABLE_UNKNOWN = 1, + DEVICE_FIXED = 2, + DEVICE_REMOVABLE = 3, +}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct dma_map_ops; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct dma_coherent_mem; + +struct device_node; + +struct fwnode_handle; + +struct class; + +struct iommu_group; + +struct device_physical_location; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + const struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + long: 32; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct dev_msi_info msi; + const struct dma_map_ops *dma_ops; + u64 *dma_mask; + long: 32; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct dma_coherent_mem *dma_mem; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + const struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + struct device_physical_location *physical_location; + enum device_removable removable; + bool offline_disabled: 1; + bool offline: 1; + bool of_node_reused: 1; + bool state_synced: 1; + bool can_match: 1; + bool dma_coherent: 1; + bool dma_skip_sync: 1; + long: 32; +}; + +struct disk_stats; + +struct blk_holder_ops; + +struct partition_meta_info; + +struct block_device { + sector_t bd_start_sect; + sector_t bd_nr_sectors; + struct gendisk *bd_disk; + struct request_queue *bd_queue; + struct disk_stats *bd_stats; + long unsigned int bd_stamp; + atomic_t __bd_flags; + dev_t bd_dev; + struct address_space *bd_mapping; + atomic_t bd_openers; + spinlock_t bd_size_lock; + void *bd_claiming; + void *bd_holder; + const struct blk_holder_ops *bd_holder_ops; + struct mutex bd_holder_lock; + int bd_holders; + struct kobject *bd_holder_dir; + atomic_t bd_fsfreeze_count; + struct mutex bd_fsfreeze_mutex; + struct partition_meta_info *bd_meta_info; + int bd_writers; + long: 32; + struct device bd_device; +}; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + long unsigned int ra_pages; + long unsigned int io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + long unsigned int last_bdp_sleep; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; + long: 32; +}; + +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + long: 32; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; + long: 32; +}; + +typedef __u32 blk_opf_t; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +}; + +typedef unsigned int blk_qc_t; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct blkcg_gq; + +struct bio_set; + +struct bio { + struct bio *bi_next; + struct block_device *bi_bdev; + blk_opf_t bi_opf; + short unsigned int bi_flags; + short unsigned int bi_ioprio; + enum rw_hint bi_write_hint; + blk_status_t bi_status; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + union { + blk_qc_t bi_cookie; + unsigned int __bi_nr_segments; + }; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + long: 32; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + short unsigned int bi_vcnt; + short unsigned int bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +struct seq_operations { + void * (*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void * (*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +struct cgroup_namespace { + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct fwnode_operations; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle * (*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); + bool (*device_dma_supported)(const struct fwnode_handle *); + enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); + const char * (*get_name)(const struct fwnode_handle *); + const char * (*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); + struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + void * (*iomap)(struct fwnode_handle *, int); + int (*irq_get)(const struct fwnode_handle *, unsigned int); + int (*add_links)(struct fwnode_handle *); +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +struct u64_stats_sync { + seqcount_t seq; +}; + +struct psi_group_cpu { + seqcount_t seq; + unsigned int tasks[4]; + u32 state_mask; + u32 times[7]; + long: 32; + u64 state_start; + u32 times_prev[14]; + long: 32; + long: 32; +}; + +struct psi_group { + struct psi_group *parent; + bool enabled; + struct mutex avgs_lock; + struct psi_group_cpu *pcpu; + u64 avg_total[6]; + u64 avg_last_update; + u64 avg_next_update; + struct delayed_work avgs_work; + struct list_head avg_triggers; + u32 avg_nr_triggers[6]; + long: 32; + u64 total[12]; + long unsigned int avg[18]; + struct task_struct *rtpoll_task; + struct timer_list rtpoll_timer; + wait_queue_head_t rtpoll_wait; + atomic_t rtpoll_wakeup; + atomic_t rtpoll_scheduled; + struct mutex rtpoll_trigger_lock; + struct list_head rtpoll_triggers; + u32 rtpoll_nr_triggers[6]; + u32 rtpoll_states; + long: 32; + u64 rtpoll_min_period; + u64 rtpoll_total[6]; + u64 rtpoll_next_update; + u64 rtpoll_until; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init: 1; + bool implicit_on_dfl: 1; + bool threaded: 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + long: 32; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat subtree_bstat; + struct cgroup_base_stat last_subtree_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct list_head root_list; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cgroup cgrp; + struct cgroup *cgrp_ancestor_storage; + atomic_t nr_cgrps; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cftype { + char name[64]; + long unsigned int private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + struct lock_class_key lockdep_key; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; + struct list_head clock_list; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + long unsigned int timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + long unsigned int event_count; + long unsigned int active_count; + long unsigned int relax_count; + long unsigned int expire_count; + long unsigned int wakeup_count; + struct device *dev; + bool active: 1; + bool autosleep_enabled: 1; + long: 32; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); + int (*set_performance_state)(struct device *, unsigned int); +}; + +struct bus_type { + const char *name; + const char *dev_name; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, const struct device_driver *); + int (*uevent)(const struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + void (*dma_cleanup)(struct device *); + const struct dev_pm_ops *pm; + bool need_parent_lock; +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +struct of_device_id; + +struct acpi_device_id; + +struct driver_private; + +struct device_driver { + const char *name; + const struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +struct class { + const char *name; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *); + void (*class_release)(const struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void * (*namespace)(const struct device *); + void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; +}; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +typedef long unsigned int kernel_ulong_t; + +struct acpi_device_id { + __u8 id[16]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + unsigned int min_align_mask; + long unsigned int segment_boundary_mask; +}; + +enum device_physical_location_panel { + DEVICE_PANEL_TOP = 0, + DEVICE_PANEL_BOTTOM = 1, + DEVICE_PANEL_LEFT = 2, + DEVICE_PANEL_RIGHT = 3, + DEVICE_PANEL_FRONT = 4, + DEVICE_PANEL_BACK = 5, + DEVICE_PANEL_UNKNOWN = 6, +}; + +enum device_physical_location_vertical_position { + DEVICE_VERT_POS_UPPER = 0, + DEVICE_VERT_POS_CENTER = 1, + DEVICE_VERT_POS_LOWER = 2, +}; + +enum device_physical_location_horizontal_position { + DEVICE_HORI_POS_LEFT = 0, + DEVICE_HORI_POS_CENTER = 1, + DEVICE_HORI_POS_RIGHT = 2, +}; + +struct device_physical_location { + enum device_physical_location_panel panel; + enum device_physical_location_vertical_position vertical_position; + enum device_physical_location_horizontal_position horizontal_position; + bool dock; + bool lid; +}; + +typedef u32 dma_addr_t; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct sg_table; + +struct scatterlist; + +struct dma_map_ops { + void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); + void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); + struct page * (*alloc_pages_op)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); + void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); + int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); + int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); + dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); + int (*dma_supported)(struct device *, u64); + u64 (*get_required_mask)(struct device *); + size_t (*max_mapping_size)(struct device *); + size_t (*opt_mapping_size)(); + long unsigned int (*get_merge_boundary)(struct device *); +}; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; +}; + +struct blk_holder_ops { + void (*mark_dead)(struct block_device *, bool); + void (*sync)(struct block_device *); + int (*freeze)(struct block_device *); + int (*thaw)(struct block_device *); +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_alloc_cache; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + struct bio_alloc_cache *cache; + mempool_t bio_pool; + mempool_t bvec_pool; + unsigned int back_pad; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; + struct hlist_node cpuhp_dead; +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct folio_queue { + struct folio_batch vec; + u8 orders[31]; + struct folio_queue *next; + struct folio_queue *prev; + long unsigned int marks; + long unsigned int marks2; + long unsigned int marks3; +}; + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_OOM_GROUP_KILL = 5, + MEMCG_SWAP_HIGH = 6, + MEMCG_SWAP_MAX = 7, + MEMCG_SWAP_FAIL = 8, + MEMCG_NR_MEMORY_EVENTS = 9, +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + atomic_t generation; +}; + +struct lruvec_stats_percpu; + +struct lruvec_stats; + +struct mem_cgroup_per_node { + struct mem_cgroup *memcg; + struct lruvec_stats_percpu *lruvec_stats_percpu; + struct lruvec_stats *lruvec_stats; + struct shrinker_info *shrinker_info; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct lruvec lruvec; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + long unsigned int lru_zone_size[10]; + struct mem_cgroup_reclaim_iter iter; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum page_memcg_data_flags { + MEMCG_DATA_OBJEXTS = 1, + MEMCG_DATA_KMEM = 2, + __NR_MEMCG_DATA_FLAGS = 4, +}; + +struct swap_cluster_info { + spinlock_t lock; + u16 count; + u8 flags; + u8 order; + struct list_head list; +}; + +struct percpu_cluster { + unsigned int next[1]; +}; + +struct pbe { + void *address; + void *orig_address; + struct pbe *next; +}; + +struct vm_unmapped_area_info { + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + long unsigned int start_gap; +}; + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +struct vm_struct { + struct vm_struct *next; + void *addr; + long unsigned int size; + long unsigned int flags; + struct page **pages; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +enum utf8_normalization { + UTF8_NFDI = 0, + UTF8_NFDICF = 1, + UTF8_NMAX = 2, +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +struct proc_ops { + unsigned int proc_flags; + int (*proc_open)(struct inode *, struct file *); + ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *); + loff_t (*proc_lseek)(struct file *, loff_t, int); + int (*proc_release)(struct inode *, struct file *); + __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); + long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int); + int (*proc_mmap)(struct file *, struct vm_area_struct *); + long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); +}; + +union offset_union { + long unsigned int un; + long int sn; +}; + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +typedef int suspend_state_t; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +typedef s64 compat_loff_t; + +struct resume_swap_area { + __kernel_loff_t offset; + __u32 dev; +}; + +struct snapshot_handle { + unsigned int cur; + void *buffer; + int sync_read; +}; + +struct snapshot_data { + struct snapshot_handle handle; + int swap; + int mode; + bool frozen; + bool ready; + bool platform_support; + bool free_bitmaps; + dev_t dev; +}; + +struct compat_resume_swap_area { + compat_loff_t offset; + u32 dev; +}; + +typedef long unsigned int uintptr_t; + +typedef int (*cmp_func_t)(const void *, const void *); + +struct elf32_hdr { + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +}; + +typedef struct elf32_hdr Elf32_Ehdr; + +typedef struct elf32_shdr Elf32_Shdr; + +enum mod_mem_type { + MOD_TEXT = 0, + MOD_DATA = 1, + MOD_RODATA = 2, + MOD_RO_AFTER_INIT = 3, + MOD_INIT_TEXT = 4, + MOD_INIT_DATA = 5, + MOD_INIT_RODATA = 6, + MOD_MEM_NUM_TYPES = 7, + MOD_INVALID = -1, +}; + +struct load_info { + const char *name; + struct module *mod; + Elf32_Ehdr *hdr; + long unsigned int len; + Elf32_Shdr *sechdrs; + char *secstrings; + char *strtab; + long unsigned int symoffs; + long unsigned int stroffs; + long unsigned int init_typeoffs; + long unsigned int core_typeoffs; + bool sig_ok; + long unsigned int mod_kallsyms_init_off; + struct { + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; + } index; +}; + +struct __va_list { + void *__ap; +}; + +typedef struct __va_list va_list; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_BOUNCE = 8, + NR_FREE_CMA_PAGES = 9, + NR_VM_ZONE_STAT_ITEMS = 10, +}; + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC = 0, + TICKDEV_MODE_ONESHOT = 1, +}; + +struct clock_event_device; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_NORMAL = 4, + PGALLOC_MOVABLE = 5, + ALLOCSTALL_NORMAL = 6, + ALLOCSTALL_MOVABLE = 7, + PGSCAN_SKIP_NORMAL = 8, + PGSCAN_SKIP_MOVABLE = 9, + PGFREE = 10, + PGACTIVATE = 11, + PGDEACTIVATE = 12, + PGLAZYFREE = 13, + PGFAULT = 14, + PGMAJFAULT = 15, + PGLAZYFREED = 16, + PGREFILL = 17, + PGREUSE = 18, + PGSTEAL_KSWAPD = 19, + PGSTEAL_DIRECT = 20, + PGSTEAL_KHUGEPAGED = 21, + PGSCAN_KSWAPD = 22, + PGSCAN_DIRECT = 23, + PGSCAN_KHUGEPAGED = 24, + PGSCAN_DIRECT_THROTTLE = 25, + PGSCAN_ANON = 26, + PGSCAN_FILE = 27, + PGSTEAL_ANON = 28, + PGSTEAL_FILE = 29, + PGINODESTEAL = 30, + SLABS_SCANNED = 31, + KSWAPD_INODESTEAL = 32, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 33, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 34, + PAGEOUTRUN = 35, + PGROTATED = 36, + DROP_PAGECACHE = 37, + DROP_SLAB = 38, + OOM_KILL = 39, + PGMIGRATE_SUCCESS = 40, + PGMIGRATE_FAIL = 41, + THP_MIGRATION_SUCCESS = 42, + THP_MIGRATION_FAIL = 43, + THP_MIGRATION_SPLIT = 44, + COMPACTMIGRATE_SCANNED = 45, + COMPACTFREE_SCANNED = 46, + COMPACTISOLATED = 47, + COMPACTSTALL = 48, + COMPACTFAIL = 49, + COMPACTSUCCESS = 50, + KCOMPACTD_WAKE = 51, + KCOMPACTD_MIGRATE_SCANNED = 52, + KCOMPACTD_FREE_SCANNED = 53, + UNEVICTABLE_PGCULLED = 54, + UNEVICTABLE_PGSCANNED = 55, + UNEVICTABLE_PGRESCUED = 56, + UNEVICTABLE_PGMLOCKED = 57, + UNEVICTABLE_PGMUNLOCKED = 58, + UNEVICTABLE_PGCLEARED = 59, + UNEVICTABLE_PGSTRANDED = 60, + SWAP_RA = 61, + SWAP_RA_HIT = 62, + SWPIN_ZERO = 63, + SWPOUT_ZERO = 64, + ZSWPIN = 65, + ZSWPOUT = 66, + ZSWPWB = 67, + NR_VM_EVENT_ITEMS = 68, +}; + +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN = 1, + CLOCK_EVT_STATE_PERIODIC = 2, + CLOCK_EVT_STATE_ONESHOT = 3, + CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, +}; + +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(long unsigned int, struct clock_event_device *); + int (*set_next_ktime)(ktime_t, struct clock_event_device *); + long: 32; + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + long unsigned int retries; + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + long unsigned int min_delta_ticks; + long unsigned int max_delta_ticks; + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct tick_sched { + long unsigned int flags; + unsigned int stalled_jiffies; + long unsigned int last_tick_jiffies; + long: 32; + struct hrtimer sched_timer; + ktime_t last_tick; + ktime_t next_tick; + long unsigned int idle_jiffies; + long: 32; + ktime_t idle_waketime; + unsigned int got_idle_tick; + seqcount_t idle_sleeptime_seq; + ktime_t idle_entrytime; + long unsigned int last_jiffies; + long: 32; + u64 timer_expires_base; + u64 timer_expires; + u64 next_timer; + ktime_t idle_expires; + long unsigned int idle_calls; + long unsigned int idle_sleeps; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + atomic_t tick_dep_mask; + long unsigned int check_clocks; +}; + +struct timer_list_iter { + int cpu; + bool second_pass; + u64 now; +}; + +typedef short int __s16; + +typedef phys_addr_t resource_size_t; + +struct arch_hw_breakpoint_ctrl { + u32 __reserved: 9; + u32 mismatch: 1; + short: 6; + char: 3; + u32 len: 8; + u32 type: 2; + u32 privilege: 2; + u32 enabled: 1; +}; + +struct arch_hw_breakpoint { + u32 address; + u32 trigger; + struct arch_hw_breakpoint_ctrl step_ctrl; + struct arch_hw_breakpoint_ctrl ctrl; +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +typedef struct { + atomic64_t a; +} local64_t; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled: 1; + __u64 inherit: 1; + __u64 pinned: 1; + __u64 exclusive: 1; + __u64 exclude_user: 1; + __u64 exclude_kernel: 1; + __u64 exclude_hv: 1; + __u64 exclude_idle: 1; + __u64 mmap: 1; + __u64 comm: 1; + __u64 freq: 1; + __u64 inherit_stat: 1; + __u64 enable_on_exec: 1; + __u64 task: 1; + __u64 watermark: 1; + __u64 precise_ip: 2; + __u64 mmap_data: 1; + __u64 sample_id_all: 1; + __u64 exclude_host: 1; + __u64 exclude_guest: 1; + __u64 exclude_callchain_kernel: 1; + __u64 exclude_callchain_user: 1; + __u64 mmap2: 1; + __u64 comm_exec: 1; + __u64 use_clockid: 1; + __u64 context_switch: 1; + __u64 write_backward: 1; + __u64 namespaces: 1; + __u64 ksymbol: 1; + __u64 bpf_event: 1; + __u64 aux_output: 1; + __u64 cgroup: 1; + __u64 text_poke: 1; + __u64 build_id: 1; + __u64 inherit_thread: 1; + __u64 remove_on_exec: 1; + __u64 sigtrap: 1; + __u64 __reserved_1: 26; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + union { + __u32 aux_action; + struct { + __u32 aux_start_paused: 1; + __u32 aux_pause: 1; + __u32 aux_resume: 1; + __u32 __reserved_3: 29; + }; + }; + __u64 sig_data; + __u64 config3; +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; + long: 32; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head *next; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + long unsigned int config_base; + long unsigned int event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + u64 aux_config; + unsigned int aux_paused; + long: 32; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + struct arch_hw_breakpoint info; + struct rhlist_head bp_list; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + long: 32; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + long unsigned int addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct irq_work { + struct __call_single_node node; + void (*func)(struct irq_work *); + struct rcuwait irqwait; +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); + +struct ftrace_ops; + +struct ftrace_regs; + +typedef void (*ftrace_func_t)(long unsigned int, long unsigned int, struct ftrace_ops *, struct ftrace_regs *); + +struct ftrace_hash; + +struct ftrace_ops_hash { + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; + struct mutex regex_lock; +}; + +enum ftrace_ops_cmd { + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF = 0, + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER = 1, + FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER = 2, +}; + +typedef int (*ftrace_ops_func_t)(struct ftrace_ops *, enum ftrace_ops_cmd); + +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops *next; + long unsigned int flags; + void *private; + ftrace_func_t saved_func; + struct ftrace_ops_hash local_hash; + struct ftrace_ops_hash *func_hash; + struct ftrace_ops_hash old_hash; + long unsigned int trampoline; + long unsigned int trampoline_size; + struct list_head list; + struct list_head subop_list; + ftrace_ops_func_t ops_func; + struct ftrace_ops *managed; +}; + +struct pmu; + +struct perf_event_pmu_context; + +struct perf_buffer; + +struct fasync_struct; + +struct perf_addr_filter_range; + +struct bpf_prog; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + long: 32; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + unsigned int group_generation; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + long: 32; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + struct perf_event_pmu_context *pmu_ctx; + atomic_long_t refcount; + long: 32; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + long unsigned int rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + unsigned int pending_wakeup; + unsigned int pending_kill; + unsigned int pending_disable; + long unsigned int pending_addr; + struct irq_work pending_irq; + struct irq_work pending_disable_irq; + struct callback_head pending_task; + unsigned int pending_work; + struct rcuwait pending_work_wait; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + long unsigned int addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + atomic64_t lost_samples; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + struct bpf_prog *prog; + u64 bpf_cookie; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct ftrace_ops ftrace_ops; + struct perf_cgroup *cgrp; + struct list_head sb_list; + __u32 orig_type; +}; + +struct pollfd { + int fd; + short int events; + short int revents; +}; + +typedef struct cpumask cpumask_var_t[1]; + +enum { + TASK_COMM_LEN = 16, +}; + +struct perf_event_groups { + struct rb_root tree; + long: 32; + u64 index; +}; + +typedef struct { + atomic_long_t a; +} local_t; + +struct perf_event_context { + raw_spinlock_t lock; + struct mutex mutex; + struct list_head pmu_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + int nr_events; + int nr_user; + int is_active; + int nr_task_data; + int nr_stat; + int nr_freq; + int rotate_disable; + refcount_t refcount; + struct task_struct *task; + long: 32; + u64 time; + u64 timestamp; + u64 timeoffset; + struct perf_event_context *parent_ctx; + long: 32; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + struct callback_head callback_head; + local_t nr_no_switch_fast; + long: 32; +}; + +struct bpf_run_ctx {}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + union { + void *module; + atomic_t refcnt; + }; + void *data; + int flags; + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +struct trace_eval_map { + const char *system; + const char *eval_string; + long unsigned int eval_value; +}; + +struct ftrace_regs {}; + +struct ftrace_hash { + long unsigned int size_bits; + struct hlist_head *buckets; + long unsigned int count; + long unsigned int flags; + struct callback_head rcu; +}; + +struct ftrace_graph_ent { + long unsigned int func; + int depth; +}; + +struct ftrace_graph_ret { + long unsigned int func; + int depth; + unsigned int overrun; + long long unsigned int calltime; + long long unsigned int rettime; +}; + +struct fgraph_ops; + +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, struct fgraph_ops *); + +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, struct fgraph_ops *); + +struct fgraph_ops { + trace_func_graph_ent_t entryfunc; + trace_func_graph_ret_t retfunc; + struct ftrace_ops ops; + void *private; + trace_func_graph_ent_t saved_func; + int idx; +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; +}; + +struct trace_seq { + char buffer[8172]; + struct seq_buf seq; + size_t readpos; + int full; +}; + +typedef u32 phandle; + +struct property { + char *name; + int length; + void *value; + struct property *next; + struct bin_attribute attr; +}; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + struct kobject kobj; + long unsigned int _flags; + void *data; +}; + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_op: 5; + __u64 mem_lvl: 14; + __u64 mem_snoop: 5; + __u64 mem_lock: 2; + __u64 mem_dtlb: 7; + __u64 mem_lvl_num: 4; + __u64 mem_remote: 1; + __u64 mem_snoopx: 2; + __u64 mem_blk: 3; + __u64 mem_hops: 3; + __u64 mem_rsvd: 18; + }; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred: 1; + __u64 predicted: 1; + __u64 in_tx: 1; + __u64 abort: 1; + __u64 cycles: 16; + __u64 type: 4; + __u64 spec: 2; + __u64 new_type: 4; + __u64 priv: 3; + __u64 reserved: 31; +}; + +union perf_sample_weight { + __u64 full; + struct { + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; + }; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; + long: 32; +}; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + long: 32; + union { + struct bpf_cgroup_storage *cgroup_storage[2]; + u64 bpf_cookie; + }; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct bpf_insn { + __u8 code; + __u8 dst_reg: 4; + __u8 src_reg: 4; + __s16 off; + __s32 imm; +}; + +enum bpf_cgroup_iter_order { + BPF_CGROUP_ITER_ORDER_UNSPEC = 0, + BPF_CGROUP_ITER_SELF_ONLY = 1, + BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, + BPF_CGROUP_ITER_DESCENDANTS_POST = 3, + BPF_CGROUP_ITER_ANCESTORS_UP = 4, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, + BPF_MAP_TYPE_USER_RINGBUF = 31, + BPF_MAP_TYPE_CGRP_STORAGE = 32, + BPF_MAP_TYPE_ARENA = 33, + __MAX_BPF_MAP_TYPE = 34, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, + BPF_PROG_TYPE_NETFILTER = 32, + __MAX_BPF_PROG_TYPE = 33, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + BPF_LSM_CGROUP = 43, + BPF_STRUCT_OPS = 44, + BPF_NETFILTER = 45, + BPF_TCX_INGRESS = 46, + BPF_TCX_EGRESS = 47, + BPF_TRACE_UPROBE_MULTI = 48, + BPF_CGROUP_UNIX_CONNECT = 49, + BPF_CGROUP_UNIX_SENDMSG = 50, + BPF_CGROUP_UNIX_RECVMSG = 51, + BPF_CGROUP_UNIX_GETPEERNAME = 52, + BPF_CGROUP_UNIX_GETSOCKNAME = 53, + BPF_NETKIT_PRIMARY = 54, + BPF_NETKIT_PEER = 55, + BPF_TRACE_KPROBE_SESSION = 56, + BPF_TRACE_UPROBE_SESSION = 57, + __MAX_BPF_ATTACH_TYPE = 58, +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + __u64 map_extra; + __s32 value_type_btf_obj_fd; + __s32 map_token_fd; + }; + struct { + __u32 map_fd; + long: 32; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; + __u64 fd_array; + __u64 core_relos; + __u32 core_relo_rec_size; + __u32 log_true_size; + __s32 prog_token_fd; + long: 32; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + __s32 path_fd; + long: 32; + }; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; + long: 32; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + union { + __u32 prog_cnt; + __u32 count; + }; + long: 32; + __u64 prog_attach_flags; + __u64 link_ids; + __u64 link_attach_flags; + __u64 revision; + } query; + struct { + __u64 name; + __u32 prog_fd; + long: 32; + __u64 cookie; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + __u32 btf_log_true_size; + __u32 btf_flags; + __s32 btf_token_fd; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + union { + __u32 prog_fd; + __u32 map_fd; + }; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + long: 32; + }; + struct { + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __u64 syms; + __u64 addrs; + __u64 cookies; + } kprobe_multi; + struct { + __u32 target_btf_id; + long: 32; + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + long: 32; + __u64 expected_revision; + } tcx; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + long: 32; + } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + long: 32; + __u64 expected_revision; + } netkit; + }; + } link_create; + struct { + __u32 link_fd; + union { + __u32 new_prog_fd; + __u32 new_map_fd; + }; + __u32 flags; + union { + __u32 old_prog_fd; + __u32 old_map_fd; + }; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; + struct { + __u32 flags; + __u32 bpffs_fd; + } token_create; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +struct bpf_prog_stats; + +struct bpf_prog_aux; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited: 1; + u16 jit_requested: 1; + u16 gpl_compatible: 1; + u16 cb_access: 1; + u16 dst_needed: 1; + u16 blinding_requested: 1; + u16 blinded: 1; + u16 is_func: 1; + u16 kprobe_override: 1; + u16 has_callchain_buf: 1; + u16 enforce_expected_attach_type: 1; + u16 call_get_stack: 1; + u16 call_get_func_ip: 1; + u16 tstamp_type_access: 1; + u16 sleepable: 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_stats *stats; + int *active; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + union { + struct { + struct {} __empty_insns; + struct sock_filter insns[0]; + }; + struct { + struct {} __empty_insnsi; + struct bpf_insn insnsi[0]; + }; + }; +}; + +enum btf_field_type { + BPF_SPIN_LOCK = 1, + BPF_TIMER = 2, + BPF_KPTR_UNREF = 4, + BPF_KPTR_REF = 8, + BPF_KPTR_PERCPU = 16, + BPF_KPTR = 28, + BPF_LIST_HEAD = 32, + BPF_LIST_NODE = 64, + BPF_RB_ROOT = 128, + BPF_RB_NODE = 256, + BPF_GRAPH_NODE = 320, + BPF_GRAPH_ROOT = 160, + BPF_REFCOUNT = 512, + BPF_WORKQUEUE = 1024, + BPF_UPTR = 2048, +}; + +typedef void (*btf_dtor_kfunc_t)(void *); + +struct btf; + +struct btf_field_kptr { + struct btf *btf; + struct module *module; + btf_dtor_kfunc_t dtor; + u32 btf_id; +}; + +struct btf_record; + +struct btf_field_graph_root { + struct btf *btf; + u32 value_btf_id; + u32 node_offset; + struct btf_record *value_rec; +}; + +struct btf_field { + u32 offset; + u32 size; + enum btf_field_type type; + union { + struct btf_field_kptr kptr; + struct btf_field_graph_root graph_root; + }; +}; + +struct btf_record { + u32 cnt; + u32 field_mask; + int spin_lock_off; + int timer_off; + int wq_off; + int refcount_off; + struct btf_field fields[0]; +}; + +typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +enum bpf_iter_task_type { + BPF_TASK_ITER_ALL = 0, + BPF_TASK_ITER_TID = 1, + BPF_TASK_ITER_TGID = 2, +}; + +struct bpf_map; + +struct bpf_iter_aux_info { + struct bpf_map *map; + struct { + struct cgroup *start; + enum bpf_cgroup_iter_order order; + } cgroup; + struct { + enum bpf_iter_task_type type; + u32 pid; + } task; +}; + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +struct bpf_local_storage_map; + +struct bpf_verifier_env; + +struct bpf_func_state; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map * (*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, union bpf_attr *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + void * (*map_lookup_elem)(struct bpf_map *, void *); + long int (*map_update_elem)(struct bpf_map *, void *, void *, u64); + long int (*map_delete_elem)(struct bpf_map *, void *); + long int (*map_push_elem)(struct bpf_map *, void *, u64); + long int (*map_pop_elem)(struct bpf_map *, void *); + long int (*map_peek_elem)(struct bpf_map *, void *); + void * (*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + long unsigned int (*map_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); + long int (*map_redirect)(struct bpf_map *, u64, u64); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); + long int (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); + u64 (*map_mem_usage)(const struct bpf_map *); + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u64 map_extra; + u32 map_flags; + u32 id; + struct btf_record *record; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + u32 btf_vmlinux_value_type_id; + struct btf *btf; + struct obj_cgroup *objcg; + char name[16]; + struct mutex freeze_mutex; + atomic64_t refcnt; + atomic64_t usercnt; + union { + struct work_struct work; + struct callback_head rcu; + }; + atomic64_t writecnt; + struct { + const struct btf_type *attach_func_proto; + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; + bool bypass_spec_v1; + bool frozen; + bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + long: 32; + atomic64_t sleepable_refcnt; + s64 *elem_count; + long: 32; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf_kfunc_set_tab; + +struct btf_id_dtor_kfunc_tab; + +struct btf_struct_metas; + +struct btf_struct_ops_tab; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; + struct btf_kfunc_set_tab *kfunc_set_tab; + struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; + struct btf_struct_metas *struct_meta_tab; + struct btf_struct_ops_tab *struct_ops_tab; + struct btf *base_btf; + u32 start_id; + u32 start_str_off; + char name[60]; + bool kernel_btf; + __u32 *base_id_map; +}; + +struct bpf_arena; + +struct bpf_ksym { + long unsigned int start; + long unsigned int end; + char name[512]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct bpf_jit_poke_descriptor; + +struct bpf_kfunc_desc_tab; + +struct bpf_kfunc_btf_tab; + +struct bpf_prog_ops; + +struct btf_mod_pair; + +struct bpf_token; + +struct bpf_prog_offload; + +struct bpf_func_info_aux; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 real_func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + struct btf *attach_btf; + const struct bpf_ctx_arg_aux *ctx_arg_info; + void *priv_stack_ptr; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool dev_bound; + bool offload_requested; + bool attach_btf_trace; + bool attach_tracing_prog; + bool func_proto_unreliable; + bool tail_call_reachable; + bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; + bool is_extended; + bool jits_use_priv_stack; + bool priv_stack_requested; + u64 prog_array_member_cnt; + struct mutex ext_mutex; + struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *); + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; + struct bpf_kfunc_btf_tab *kfunc_btf_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct btf_mod_pair *used_btfs; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + u32 verified_insns; + int cgroup_atype; + struct bpf_map *cgroup_storage[2]; + char name[16]; + u64 (*bpf_exception_cb)(u64, u64, u64, u64, u64); + struct bpf_token *token; + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + struct module *mod; + u32 num_exentries; + struct exception_table_entry *extable; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_KEY = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCK_COMMON = 12, + PTR_TO_TCP_SOCK = 13, + PTR_TO_TP_BUFFER = 14, + PTR_TO_XDP_SOCK = 15, + PTR_TO_BTF_ID = 16, + PTR_TO_MEM = 17, + PTR_TO_ARENA = 18, + PTR_TO_BUF = 19, + PTR_TO_FUNC = 20, + CONST_PTR_TO_DYNPTR = 21, + __BPF_REG_TYPE_MAX = 22, + PTR_TO_MAP_VALUE_OR_NULL = 260, + PTR_TO_SOCKET_OR_NULL = 267, + PTR_TO_SOCK_COMMON_OR_NULL = 268, + PTR_TO_TCP_SOCK_OR_NULL = 269, + PTR_TO_BTF_ID_OR_NULL = 272, + __BPF_REG_TYPE_LIMIT = 134217727, +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); +}; + +struct net_device; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct btf_func_model { + u8 ret_size; + u8 ret_flags; + u8 nr_args; + u8 arg_size[12]; + u8 arg_flags[12]; +}; + +struct bpf_tramp_image { + void *image; + int size; + struct bpf_ksym ksym; + struct percpu_ref pcref; + void *ip_after_call; + void *ip_epilogue; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct bpf_trampoline { + struct hlist_node hlist; + struct ftrace_ops *fops; + struct mutex mutex; + refcount_t refcnt; + u32 flags; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + struct bpf_tramp_image *cur_image; + long: 32; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; + bool called: 1; + bool verified: 1; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + void *aux; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + struct btf *btf; + u32 btf_id; +}; + +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + +struct bpf_token { + struct work_struct work; + atomic64_t refcnt; + struct user_namespace *userns; + long: 32; + u64 allowed_cmds; + u64 allowed_maps; + u64 allowed_progs; + u64 allowed_attachs; +}; + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + long unsigned int pad; + }; + perf_copy_f copy; + void *data; + u32 size; +}; + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct perf_cpu_pmu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + struct device *parent; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + unsigned int scope; + int *pmu_disable_count; + struct perf_cpu_pmu_context *cpu_pmu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_pmu_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); + void * (*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + bool (*filter)(struct pmu *, int); + int (*check_period)(struct perf_event *, u64); +}; + +struct perf_event_pmu_context { + struct pmu *pmu; + struct perf_event_context *ctx; + struct list_head pmu_ctx_entry; + struct list_head pinned_active; + struct list_head flexible_active; + unsigned int embedded: 1; + unsigned int nr_events; + unsigned int nr_cgroups; + unsigned int nr_freq; + atomic_t refcount; + struct callback_head callback_head; + void *task_ctx_data; + int rotate_necessary; +}; + +struct perf_cpu_pmu_context { + struct perf_event_pmu_context epc; + struct perf_event_pmu_context *task_epc; + struct list_head sched_cb_entry; + int sched_cb_usage; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; + long: 32; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + long unsigned int wakeup; + long unsigned int size; + u64 aux_flags; + union { + void *addr; + long unsigned int head; + }; + int page; +}; + +struct perf_addr_filter_range { + long unsigned int start; + long unsigned int size; +}; + +struct perf_sample_data { + u64 sample_flags; + u64 period; + u64 dyn_size; + u64 type; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 ip; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; + union perf_sample_weight weight; + union perf_mem_data_src data_src; + u64 txn; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 stream_id; + u64 cgroup; + u64 addr; + u64 phys_addr; + u64 data_page_size; + u64 code_page_size; + u64 aux_size; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct prog_entry; + +struct event_filter { + struct prog_entry *prog; + char *filter_string; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; + long: 32; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; + u64 timeoffset; + int active; + long: 32; +}; + +struct trace_entry { + short unsigned int type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + long unsigned int iter_flags; + void *temp; + unsigned int temp_size; + char *fmt; + unsigned int fmt_size; + atomic_t wait_index; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool closed; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + long unsigned int lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + loff_t pos; + long int idx; + long: 32; +}; + +struct trace_buffer; + +struct trace_array_cpu; + +struct array_buffer { + struct trace_array *tr; + struct trace_buffer *buffer; + struct trace_array_cpu *data; + long: 32; + u64 time_start; + int cpu; + long: 32; +}; + +struct trace_pid_list; + +struct trace_event_file; + +struct eventfs_inode; + +struct trace_options; + +struct trace_func_repeats; + +struct trace_array { + struct list_head list; + char *name; + long: 32; + struct array_buffer array_buffer; + unsigned int mapped; + long unsigned int range_addr_start; + long unsigned int range_addr_size; + long int text_delta; + long int data_delta; + struct trace_pid_list *filtered_pids; + struct trace_pid_list *filtered_no_pids; + arch_spinlock_t max_lock; + int buffer_disabled; + int sys_refcount_enter; + int sys_refcount_exit; + struct trace_event_file *enter_syscall_files[468]; + struct trace_event_file *exit_syscall_files[468]; + int stop_count; + int clock_id; + int nr_topts; + bool clear_trace; + int buffer_percent; + unsigned int n_err_log_entries; + struct tracer *current_trace; + unsigned int trace_flags; + unsigned char trace_flags_index[32]; + unsigned int flags; + raw_spinlock_t start_lock; + const char *system_names; + struct list_head err_log; + struct dentry *dir; + struct dentry *options; + struct dentry *percpu_dir; + struct eventfs_inode *event_dir; + struct trace_options *topts; + struct list_head systems; + struct list_head events; + struct trace_event_file *trace_marker_file; + cpumask_var_t tracing_cpumask; + cpumask_var_t pipe_cpumask; + int ref; + int trace_ref; + struct ftrace_ops *ops; + struct trace_pid_list *function_pids; + struct trace_pid_list *function_no_pids; + struct fgraph_ops *gops; + struct list_head func_probes; + struct list_head mod_trace; + struct list_head mod_notrace; + int function_enabled; + int no_filter_buffering_ref; + struct list_head hist_vars; + struct trace_func_repeats *last_func_repeats; + bool ring_buffer_expanded; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +struct tracer_flags; + +struct tracer { + const char *name; + int (*init)(struct trace_array *); + void (*reset)(struct trace_array *); + void (*start)(struct trace_array *); + void (*stop)(struct trace_array *); + int (*update_thresh)(struct trace_array *); + void (*open)(struct trace_iterator *); + void (*pipe_open)(struct trace_iterator *); + void (*close)(struct trace_iterator *); + void (*pipe_close)(struct trace_iterator *); + ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); + ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*print_header)(struct seq_file *); + enum print_line_t (*print_line)(struct trace_iterator *); + int (*set_flag)(struct trace_array *, u32, u32, int); + int (*flag_changed)(struct trace_array *, u32, int); + struct tracer *next; + struct tracer_flags *flags; + int enabled; + bool print_max; + bool allow_instances; + bool noboot; +}; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + const int len; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head * (*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct eventfs_inode *ei; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + long unsigned int flags; + refcount_t ref; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +enum { + TRACE_EVENT_FL_CAP_ANY_BIT = 0, + TRACE_EVENT_FL_NO_SET_FILTER_BIT = 1, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 2, + TRACE_EVENT_FL_TRACEPOINT_BIT = 3, + TRACE_EVENT_FL_DYNAMIC_BIT = 4, + TRACE_EVENT_FL_KPROBE_BIT = 5, + TRACE_EVENT_FL_UPROBE_BIT = 6, + TRACE_EVENT_FL_EPROBE_BIT = 7, + TRACE_EVENT_FL_FPROBE_BIT = 8, + TRACE_EVENT_FL_CUSTOM_BIT = 9, +}; + +enum { + TRACE_EVENT_FL_CAP_ANY = 1, + TRACE_EVENT_FL_NO_SET_FILTER = 2, + TRACE_EVENT_FL_IGNORE_ENABLE = 4, + TRACE_EVENT_FL_TRACEPOINT = 8, + TRACE_EVENT_FL_DYNAMIC = 16, + TRACE_EVENT_FL_KPROBE = 32, + TRACE_EVENT_FL_UPROBE = 64, + TRACE_EVENT_FL_EPROBE = 128, + TRACE_EVENT_FL_FPROBE = 256, + TRACE_EVENT_FL_CUSTOM = 512, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, + EVENT_FILE_FL_FREED_BIT = 11, +}; + +struct event_subsystem; + +struct trace_subsystem_dir { + struct list_head list; + struct event_subsystem *subsystem; + struct trace_array *tr; + struct eventfs_inode *ei; + int ref_count; + int nr_events; +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_RDYN_STRING = 3, + FILTER_PTR_STRING = 4, + FILTER_TRACE_FN = 5, + FILTER_CPUMASK = 6, + FILTER_COMM = 7, + FILTER_CPU = 8, + FILTER_STACKTRACE = 9, +}; + +union lower_chunk { + union lower_chunk *next; + long unsigned int data[512]; +}; + +union upper_chunk { + union upper_chunk *next; + union lower_chunk *data[256]; +}; + +struct trace_pid_list { + raw_spinlock_t lock; + struct irq_work refill_irqwork; + union upper_chunk *upper[256]; + union upper_chunk *upper_list; + union lower_chunk *lower_list; + int free_upper_chunks; + int free_lower_chunks; +}; + +enum trace_type { + __TRACE_FIRST_TYPE = 0, + TRACE_FN = 1, + TRACE_CTX = 2, + TRACE_WAKE = 3, + TRACE_STACK = 4, + TRACE_PRINT = 5, + TRACE_BPRINT = 6, + TRACE_MMIO_RW = 7, + TRACE_MMIO_MAP = 8, + TRACE_BRANCH = 9, + TRACE_GRAPH_RET = 10, + TRACE_GRAPH_ENT = 11, + TRACE_GRAPH_RETADDR_ENT = 12, + TRACE_USER_STACK = 13, + TRACE_BLK = 14, + TRACE_BPUTS = 15, + TRACE_HWLAT = 16, + TRACE_OSNOISE = 17, + TRACE_TIMERLAT = 18, + TRACE_RAW_DATA = 19, + TRACE_FUNC_REPEATS = 20, + __TRACE_LAST_TYPE = 21, +}; + +struct trace_array_cpu { + atomic_t disabled; + void *buffer_page; + long unsigned int entries; + long unsigned int saved_latency; + long unsigned int critical_start; + long unsigned int critical_end; + long unsigned int critical_sequence; + long unsigned int nice; + long unsigned int policy; + long unsigned int rt_priority; + long unsigned int skipped_entries; + long: 32; + u64 preempt_timestamp; + pid_t pid; + kuid_t uid; + char comm[16]; + int ftrace_ignore_pid; + bool ignore_pid; +}; + +struct trace_option_dentry; + +struct trace_options { + struct tracer *tracer; + struct trace_option_dentry *topts; +}; + +struct tracer_opt; + +struct trace_option_dentry { + struct tracer_opt *opt; + struct tracer_flags *flags; + struct trace_array *tr; + struct dentry *entry; +}; + +struct trace_func_repeats { + long unsigned int ip; + long unsigned int parent_ip; + long unsigned int count; + long: 32; + u64 ts_last_call; +}; + +struct tracer_opt { + const char *name; + u32 bit; +}; + +struct tracer_flags { + u32 val; + struct tracer_opt *opts; + struct tracer *trace; +}; + +enum trace_iterator_bits { + TRACE_ITER_PRINT_PARENT_BIT = 0, + TRACE_ITER_SYM_OFFSET_BIT = 1, + TRACE_ITER_SYM_ADDR_BIT = 2, + TRACE_ITER_VERBOSE_BIT = 3, + TRACE_ITER_RAW_BIT = 4, + TRACE_ITER_HEX_BIT = 5, + TRACE_ITER_BIN_BIT = 6, + TRACE_ITER_BLOCK_BIT = 7, + TRACE_ITER_FIELDS_BIT = 8, + TRACE_ITER_PRINTK_BIT = 9, + TRACE_ITER_ANNOTATE_BIT = 10, + TRACE_ITER_USERSTACKTRACE_BIT = 11, + TRACE_ITER_SYM_USEROBJ_BIT = 12, + TRACE_ITER_PRINTK_MSGONLY_BIT = 13, + TRACE_ITER_CONTEXT_INFO_BIT = 14, + TRACE_ITER_LATENCY_FMT_BIT = 15, + TRACE_ITER_RECORD_CMD_BIT = 16, + TRACE_ITER_RECORD_TGID_BIT = 17, + TRACE_ITER_OVERWRITE_BIT = 18, + TRACE_ITER_STOP_ON_FREE_BIT = 19, + TRACE_ITER_IRQ_INFO_BIT = 20, + TRACE_ITER_MARKERS_BIT = 21, + TRACE_ITER_EVENT_FORK_BIT = 22, + TRACE_ITER_TRACE_PRINTK_BIT = 23, + TRACE_ITER_PAUSE_ON_TRACE_BIT = 24, + TRACE_ITER_HASH_PTR_BIT = 25, + TRACE_ITER_FUNCTION_BIT = 26, + TRACE_ITER_FUNC_FORK_BIT = 27, + TRACE_ITER_DISPLAY_GRAPH_BIT = 28, + TRACE_ITER_STACKTRACE_BIT = 29, + TRACE_ITER_LAST_BIT = 30, +}; + +struct event_subsystem { + struct list_head list; + const char *name; + struct event_filter *filter; + int ref_count; +}; + +struct pcpu_freelist_node; + +struct pcpu_freelist_head { + struct pcpu_freelist_node *first; + raw_spinlock_t lock; +}; + +struct pcpu_freelist_node { + struct pcpu_freelist_node *next; +}; + +struct pcpu_freelist { + struct pcpu_freelist_head *freelist; + struct pcpu_freelist_head extralist; +}; + +enum bpf_lru_list_type { + BPF_LRU_LIST_T_ACTIVE = 0, + BPF_LRU_LIST_T_INACTIVE = 1, + BPF_LRU_LIST_T_FREE = 2, + BPF_LRU_LOCAL_LIST_T_FREE = 3, + BPF_LRU_LOCAL_LIST_T_PENDING = 4, +}; + +struct bpf_lru_node { + struct list_head list; + u16 cpu; + u8 type; + u8 ref; +}; + +struct bpf_lru_list { + struct list_head lists[3]; + unsigned int counts[2]; + struct list_head *next_inactive_rotation; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_lru_locallist { + struct list_head lists[2]; + u16 next_steal; + raw_spinlock_t lock; +}; + +struct bpf_common_lru { + struct bpf_lru_list lru_list; + struct bpf_lru_locallist *local_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); + +struct bpf_lru { + union { + struct bpf_common_lru common_lru; + struct bpf_lru_list *percpu_lru; + }; + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; + unsigned int nr_scans; + bool percpu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct fd { + long unsigned int word; +}; + +typedef struct fd class_fd_t; + +struct bpf_array_aux { + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + long: 32; + union { + struct { + struct {} __empty_value; + char value[0]; + }; + struct { + struct {} __empty_ptrs; + void *ptrs[0]; + }; + struct { + struct {} __empty_pptrs; + void *pptrs[0]; + }; + }; +}; + +typedef __s16 s16; + +typedef __u16 __be16; + +typedef __u32 __be32; + +typedef __u32 __wsum; + +typedef void (*rcu_callback_t)(struct callback_head *); + +typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); + +enum { + BPF_REG_0 = 0, + BPF_REG_1 = 1, + BPF_REG_2 = 2, + BPF_REG_3 = 3, + BPF_REG_4 = 4, + BPF_REG_5 = 5, + BPF_REG_6 = 6, + BPF_REG_7 = 7, + BPF_REG_8 = 8, + BPF_REG_9 = 9, + BPF_REG_10 = 10, + __MAX_BPF_REG = 11, +}; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, + __MAX_BPF_LINK_TYPE = 15, +}; + +enum { + BPF_F_NO_PREALLOC = 1, + BPF_F_NO_COMMON_LRU = 2, + BPF_F_NUMA_NODE = 4, + BPF_F_RDONLY = 8, + BPF_F_WRONLY = 16, + BPF_F_STACK_BUILD_ID = 32, + BPF_F_ZERO_SEED = 64, + BPF_F_RDONLY_PROG = 128, + BPF_F_WRONLY_PROG = 256, + BPF_F_CLONE = 512, + BPF_F_MMAPABLE = 1024, + BPF_F_PRESERVE_ELEMS = 2048, + BPF_F_INNER_MAP = 4096, + BPF_F_LINK = 8192, + BPF_F_PATH_FD = 16384, + BPF_F_VTYPE_BTF_OBJ_FD = 32768, + BPF_F_TOKEN_FD = 65536, + BPF_F_SEGV_ON_FAULT = 131072, + BPF_F_NO_USER_CONV = 262144, +}; + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + BPF_FUNC_task_storage_get = 156, + BPF_FUNC_task_storage_delete = 157, + BPF_FUNC_get_current_task_btf = 158, + BPF_FUNC_bprm_opts_set = 159, + BPF_FUNC_ktime_get_coarse_ns = 160, + BPF_FUNC_ima_inode_hash = 161, + BPF_FUNC_sock_from_file = 162, + BPF_FUNC_check_mtu = 163, + BPF_FUNC_for_each_map_elem = 164, + BPF_FUNC_snprintf = 165, + BPF_FUNC_sys_bpf = 166, + BPF_FUNC_btf_find_by_name_kind = 167, + BPF_FUNC_sys_close = 168, + BPF_FUNC_timer_init = 169, + BPF_FUNC_timer_set_callback = 170, + BPF_FUNC_timer_start = 171, + BPF_FUNC_timer_cancel = 172, + BPF_FUNC_get_func_ip = 173, + BPF_FUNC_get_attach_cookie = 174, + BPF_FUNC_task_pt_regs = 175, + BPF_FUNC_get_branch_snapshot = 176, + BPF_FUNC_trace_vprintk = 177, + BPF_FUNC_skc_to_unix_sock = 178, + BPF_FUNC_kallsyms_lookup_name = 179, + BPF_FUNC_find_vma = 180, + BPF_FUNC_loop = 181, + BPF_FUNC_strncmp = 182, + BPF_FUNC_get_func_arg = 183, + BPF_FUNC_get_func_ret = 184, + BPF_FUNC_get_func_arg_cnt = 185, + BPF_FUNC_get_retval = 186, + BPF_FUNC_set_retval = 187, + BPF_FUNC_xdp_get_buff_len = 188, + BPF_FUNC_xdp_load_bytes = 189, + BPF_FUNC_xdp_store_bytes = 190, + BPF_FUNC_copy_from_user_task = 191, + BPF_FUNC_skb_set_tstamp = 192, + BPF_FUNC_ima_file_hash = 193, + BPF_FUNC_kptr_xchg = 194, + BPF_FUNC_map_lookup_percpu_elem = 195, + BPF_FUNC_skc_to_mptcp_sock = 196, + BPF_FUNC_dynptr_from_mem = 197, + BPF_FUNC_ringbuf_reserve_dynptr = 198, + BPF_FUNC_ringbuf_submit_dynptr = 199, + BPF_FUNC_ringbuf_discard_dynptr = 200, + BPF_FUNC_dynptr_read = 201, + BPF_FUNC_dynptr_write = 202, + BPF_FUNC_dynptr_data = 203, + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204, + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205, + BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206, + BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207, + BPF_FUNC_ktime_get_tai_ns = 208, + BPF_FUNC_user_ringbuf_drain = 209, + BPF_FUNC_cgrp_storage_get = 210, + BPF_FUNC_cgrp_storage_delete = 211, + __BPF_FUNC_MAX_ID = 212, +}; + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_id; + __u64 map_extra; +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + long: 32; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + long: 32; + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; + __u32 target_btf_id; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + long: 32; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + union { + struct { + __u64 cgroup_id; + __u32 order; + long: 32; + } cgroup; + struct { + __u32 tid; + __u32 pid; + } task; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + struct { + __u32 map_id; + } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + __u64 addrs; + __u32 count; + __u32 flags; + __u64 missed; + __u64 cookies; + } kprobe_multi; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 path_size; + __u32 count; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + __u32 type; + long: 32; + union { + struct { + __u64 file_name; + __u32 name_len; + __u32 offset; + __u64 cookie; + } uprobe; + struct { + __u64 func_name; + __u32 name_len; + __u32 offset; + __u64 addr; + __u64 missed; + __u64 cookie; + } kprobe; + struct { + __u64 tp_name; + __u32 name_len; + long: 32; + __u64 cookie; + } tracepoint; + struct { + __u64 config; + __u32 type; + long: 32; + __u64 cookie; + } event; + }; + } perf_event; + struct { + __u32 ifindex; + __u32 attach_type; + } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; + }; +}; + +struct rhashtable; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + +struct bpf_redirect_info { + u64 tgt_index; + void *tgt_value; + struct bpf_map *map; + u32 flags; + u32 map_id; + enum bpf_map_type map_type; + struct bpf_nh_params nh; + u32 kern_flags; + long: 32; +}; + +struct bpf_net_context { + struct bpf_redirect_info ri; + struct list_head cpu_map_flush_list; + struct list_head dev_map_flush_list; + struct list_head xskmap_map_flush_list; +}; + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +typedef struct { + union { + void *kernel; + void *user; + }; + bool is_kernel: 1; +} sockptr_t; + +typedef sockptr_t bpfptr_t; + +enum { + BTF_KIND_UNKN = 0, + BTF_KIND_INT = 1, + BTF_KIND_PTR = 2, + BTF_KIND_ARRAY = 3, + BTF_KIND_STRUCT = 4, + BTF_KIND_UNION = 5, + BTF_KIND_ENUM = 6, + BTF_KIND_FWD = 7, + BTF_KIND_TYPEDEF = 8, + BTF_KIND_VOLATILE = 9, + BTF_KIND_CONST = 10, + BTF_KIND_RESTRICT = 11, + BTF_KIND_FUNC = 12, + BTF_KIND_FUNC_PROTO = 13, + BTF_KIND_VAR = 14, + BTF_KIND_DATASEC = 15, + BTF_KIND_FLOAT = 16, + BTF_KIND_DECL_TAG = 17, + BTF_KIND_TYPE_TAG = 18, + BTF_KIND_ENUM64 = 19, + NR_BTF_KINDS = 20, + BTF_KIND_MAX = 19, +}; + +struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; +}; + +struct btf_param { + __u32 name_off; + __u32 type; +}; + +struct btf_struct_meta { + u32 btf_id; + struct btf_record *record; +}; + +struct ref_tracker_dir {}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + int sysctl_optmem_max; + u8 sysctl_txrehash; + u8 sysctl_tstamp_allow_data; + struct prot_inuse *prot_inuse; + struct cpumask *rps_default_mask; +}; + +struct ipstats_mib; + +struct tcp_mib; + +struct linux_mib; + +struct udp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct proc_dir_entry; + +struct netns_mib { + struct ipstats_mib *ip_statistics; + struct ipstats_mib *ipv6_statistics; + struct tcp_mib *tcp_statistics; + struct linux_mib *net_statistics; + struct udp_mib *udp_statistics; + struct udp_mib *udp_stats_in6; + struct udp_mib *udplite_statistics; + struct udp_mib *udplite_stats_in6; + struct icmp_mib *icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct icmpv6_mib *icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct unix_table { + spinlock_t *locks; + struct hlist_head *buckets; +}; + +struct netns_unix { + struct unix_table table; + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + refcount_t tw_refcount; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct local_ports { + u32 range; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct udp_table; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct inet_peer_base; + +struct fqdir; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + __u8 __cacheline_group_begin__netns_ipv4_read_tx[0]; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_tso_rtt_log; + u8 sysctl_tcp_autocorking; + int sysctl_tcp_min_snd_mss; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_wmem[3]; + u8 sysctl_ip_fwd_use_pmtu; + __u8 __cacheline_group_end__netns_ipv4_read_tx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_txrx[0]; + u8 sysctl_tcp_moderate_rcvbuf; + __u8 __cacheline_group_end__netns_ipv4_read_txrx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_rx[0]; + u8 sysctl_ip_early_demux; + u8 sysctl_tcp_early_demux; + u8 sysctl_tcp_l3mdev_accept; + int sysctl_tcp_reordering; + int sysctl_tcp_rmem[3]; + __u8 __cacheline_group_end__netns_ipv4_read_rx[0]; + long: 32; + long: 32; + struct inet_timewait_death_row tcp_death_row; + struct udp_table *udp_table; + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + bool fib_has_custom_local_routes; + bool fib_offload_disabled; + u8 sysctl_tcp_shrink_window; + struct hlist_head *fib_table_hash; + struct sock *fibnl; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct fqdir *fqdir; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_msgs_per_sec; + int sysctl_icmp_msgs_burst; + atomic_t icmp_global_credit; + u32 icmp_global_stamp; + u32 ip_rt_min_pmtu; + int ip_rt_mtu_expires; + int ip_rt_min_advmss; + struct local_ports ip_local_ports; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; + u8 sysctl_ip_dynaddr; + u8 sysctl_udp_early_demux; + u8 sysctl_nexthop_compat_mode; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; + u8 sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; + u8 sysctl_tcp_comp_sack_nr; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; + int sysctl_tcp_fin_timeout; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + int sysctl_tcp_rto_min_us; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_adv_win_scale; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_challenge_ack_limit; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_reflect_tos; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + unsigned int sysctl_tcp_child_ehash_entries; + long unsigned int sysctl_tcp_comp_sack_delay_ns; + long unsigned int sysctl_tcp_comp_sack_slack_ns; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + long unsigned int tfo_active_disable_stamp; + u32 tcp_challenge_timestamp; + u32 tcp_challenge_count; + u8 sysctl_tcp_plb_enabled; + u8 sysctl_tcp_plb_idle_rehash_rounds; + u8 sysctl_tcp_plb_rehash_rounds; + u8 sysctl_tcp_plb_suspend_rto_sec; + int sysctl_tcp_plb_cong_thresh; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_tcp_syn_linear_timeouts; + u8 sysctl_igmp_llm_reports; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + unsigned int sysctl_udp_child_hash_entries; + long unsigned int *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + long: 32; + siphash_key_t ip_id_key; + struct hlist_head *inet_addr_lst; + struct delayed_work addr_chk_work; + long: 32; + long: 32; +}; + +struct dst_entry; + +struct sk_buff; + +struct neighbour; + +struct dst_ops { + short unsigned int family; + unsigned int gc_thresh; + void (*gc)(struct dst_ops *); + struct dst_entry * (*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *); + void (*negative_advice)(struct sock *, struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct percpu_counter pcpuc_entries; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + u32 multipath_hash_fields; + u8 multipath_hash_policy; + u8 bindv6only; + u8 flowlabel_consistency; + u8 auto_flowlabels; + int icmpv6_time; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; + long unsigned int icmpv6_ratemask[8]; + long unsigned int *icmpv6_ratemask_ptr; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; + int idgen_retries; + int idgen_delay; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; + u8 skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; + u8 icmpv6_error_anycast_as_unicast; + long: 32; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct ioam6_pernet_data; + +struct netns_ipv6 { + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + atomic_t ip6_rt_gc_expire; + long unsigned int ip6_rt_last_gc; + unsigned char flowlabel_has_excl; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct hlist_head *inet6_addr_lst; + spinlock_t addrconf_hash_lock; + struct delayed_work addr_chk_work; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; + struct ioam6_pernet_data *ioam6_data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_logger *nf_loggers[11]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries *hooks_ipv4[5]; + struct nf_hook_entries *hooks_ipv6[5]; + struct nf_hook_entries *hooks_bridge[5]; + unsigned int defrag_ipv4_users; + unsigned int defrag_ipv6_users; +}; + +struct nf_ct_event_notifier; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; +}; + +struct ip_conntrack_stat; + +struct netns_ct { + u8 sysctl_log_invalid; + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_tstamp; + u8 sysctl_checksum; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_ip_net nf_ct_proto; +}; + +struct netns_nftables { + u8 gencursor; +}; + +struct netns_bpf { + struct bpf_prog_array *run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + spinlock_t rules_mod_lock; + unsigned int dev_base_seq; + u32 ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct ref_tracker_dir refcnt_tracker; + struct ref_tracker_dir notrefcnt_tracker; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct xarray dev_by_index; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + long: 32; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_nf nf; + struct netns_ct ct; + struct netns_nftables nft; + struct net_generic *gen; + struct netns_bpf bpf; + u64 net_cookie; + struct sock *diag_nlsk; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef struct { + u64 v; +} u64_stats_t; + +struct bpf_verifier_log { + u64 start_pos; + u64 end_pos; + char *ubuf; + u32 level; + u32 len_total; + u32 len_max; + char kbuf[1024]; +}; + +enum priv_stack_mode { + PRIV_STACK_UNKNOWN = 0, + NO_PRIV_STACK = 1, + PRIV_STACK_ADAPTIVE = 2, +}; + +enum bpf_arg_type { + ARG_DONTCARE = 0, + ARG_CONST_MAP_PTR = 1, + ARG_PTR_TO_MAP_KEY = 2, + ARG_PTR_TO_MAP_VALUE = 3, + ARG_PTR_TO_MEM = 4, + ARG_PTR_TO_ARENA = 5, + ARG_CONST_SIZE = 6, + ARG_CONST_SIZE_OR_ZERO = 7, + ARG_PTR_TO_CTX = 8, + ARG_ANYTHING = 9, + ARG_PTR_TO_SPIN_LOCK = 10, + ARG_PTR_TO_SOCK_COMMON = 11, + ARG_PTR_TO_SOCKET = 12, + ARG_PTR_TO_BTF_ID = 13, + ARG_PTR_TO_RINGBUF_MEM = 14, + ARG_CONST_ALLOC_SIZE_OR_ZERO = 15, + ARG_PTR_TO_BTF_ID_SOCK_COMMON = 16, + ARG_PTR_TO_PERCPU_BTF_ID = 17, + ARG_PTR_TO_FUNC = 18, + ARG_PTR_TO_STACK = 19, + ARG_PTR_TO_CONST_STR = 20, + ARG_PTR_TO_TIMER = 21, + ARG_KPTR_XCHG_DEST = 22, + ARG_PTR_TO_DYNPTR = 23, + __BPF_ARG_TYPE_MAX = 24, + ARG_PTR_TO_MAP_VALUE_OR_NULL = 259, + ARG_PTR_TO_MEM_OR_NULL = 260, + ARG_PTR_TO_CTX_OR_NULL = 264, + ARG_PTR_TO_SOCKET_OR_NULL = 268, + ARG_PTR_TO_STACK_OR_NULL = 275, + ARG_PTR_TO_BTF_ID_OR_NULL = 269, + ARG_PTR_TO_UNINIT_MEM = 67141636, + ARG_PTR_TO_FIXED_SIZE_MEM = 262148, + __BPF_ARG_TYPE_LIMIT = 134217727, +}; + +struct bpf_subprog_arg_info { + enum bpf_arg_type arg_type; + union { + u32 mem_size; + u32 btf_id; + }; +}; + +struct bpf_subprog_info { + u32 start; + u32 linfo_idx; + u16 stack_depth; + u16 stack_extra; + s16 fastcall_stack_off; + bool has_tail_call: 1; + bool tail_call_reachable: 1; + bool has_ld_abs: 1; + bool is_cb: 1; + bool is_async_cb: 1; + bool is_exception_cb: 1; + bool args_cached: 1; + bool keep_fastcall_stack: 1; + enum priv_stack_mode priv_stack_mode; + u8 arg_cnt; + struct bpf_subprog_arg_info args[5]; +}; + +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +struct bpf_idmap { + u32 tmp_id_gen; + struct bpf_id_pair map[600]; +}; + +struct bpf_idset { + u32 count; + u32 ids[600]; +}; + +struct backtrack_state { + struct bpf_verifier_env *env; + u32 frame; + u32 reg_masks[8]; + u64 stack_masks[8]; +}; + +enum bpf_dynptr_type { + BPF_DYNPTR_TYPE_INVALID = 0, + BPF_DYNPTR_TYPE_LOCAL = 1, + BPF_DYNPTR_TYPE_RINGBUF = 2, + BPF_DYNPTR_TYPE_SKB = 3, + BPF_DYNPTR_TYPE_XDP = 4, +}; + +enum bpf_iter_state { + BPF_ITER_STATE_INVALID = 0, + BPF_ITER_STATE_ACTIVE = 1, + BPF_ITER_STATE_DRAINED = 2, +}; + +struct tnum { + u64 value; + u64 mask; +}; + +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, + REG_LIVE_READ32 = 1, + REG_LIVE_READ64 = 2, + REG_LIVE_READ = 3, + REG_LIVE_WRITTEN = 4, + REG_LIVE_DONE = 8, +}; + +struct bpf_reg_state { + enum bpf_reg_type type; + s32 off; + union { + int range; + struct { + struct bpf_map *map_ptr; + u32 map_uid; + }; + struct { + struct btf *btf; + u32 btf_id; + }; + struct { + u32 mem_size; + u32 dynptr_id; + }; + struct { + enum bpf_dynptr_type type; + bool first_slot; + } dynptr; + struct { + struct btf *btf; + u32 btf_id; + enum bpf_iter_state state: 2; + int depth: 30; + } iter; + struct { + long unsigned int raw1; + long unsigned int raw2; + } raw; + u32 subprogno; + }; + long: 32; + struct tnum var_off; + s64 smin_value; + s64 smax_value; + u64 umin_value; + u64 umax_value; + s32 s32_min_value; + s32 s32_max_value; + u32 u32_min_value; + u32 u32_max_value; + u32 id; + u32 ref_obj_id; + struct bpf_reg_state *parent; + u32 frameno; + s32 subreg_def; + enum bpf_reg_liveness live; + bool precise; + long: 32; +}; + +struct bpf_verifier_ops; + +struct bpf_verifier_stack_elem; + +struct bpf_verifier_state; + +struct bpf_verifier_state_list; + +struct bpf_insn_aux_data; + +struct bpf_insn_hist_entry; + +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; + const struct bpf_verifier_ops *ops; + struct module *attach_btf_mod; + struct bpf_verifier_stack_elem *head; + int stack_size; + bool strict_alignment; + bool test_state_freq; + bool test_reg_invariants; + struct bpf_verifier_state *cur_state; + struct bpf_verifier_state_list **explored_states; + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[64]; + struct btf_mod_pair used_btfs[64]; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 id_gen; + u32 hidden_subprog_cnt; + int exception_callback_subprog; + bool explore_alu_limits; + bool allow_ptr_leaks; + bool allow_uninit_stack; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; + bool seen_direct_write; + bool seen_exception; + struct bpf_insn_aux_data *insn_aux_data; + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[258]; + union { + struct bpf_idmap idmap_scratch; + struct bpf_idset idset_scratch; + }; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + struct backtrack_state bt; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; + u32 pass_cnt; + u32 subprog_cnt; + u32 prev_insn_processed; + u32 insn_processed; + u32 prev_jmps_processed; + u32 jmps_processed; + long: 32; + u64 verification_time; + u32 max_states_per_insn; + u32 total_states; + u32 peak_states; + u32 longest_mark_read_walk; + bpfptr_t fd_array; + u32 scratched_regs; + long: 32; + u64 scratched_stack_slots; + u64 prev_log_pos; + u64 prev_insn_print_pos; + struct bpf_reg_state fake_reg[2]; + char tmp_str_buf[320]; + struct bpf_insn insn_buf[32]; + struct bpf_insn epilogue_buf[32]; +}; + +struct bpf_retval_range { + s32 minval; + s32 maxval; +}; + +struct bpf_reference_state; + +struct bpf_stack_state; + +struct bpf_func_state { + struct bpf_reg_state regs[11]; + int callsite; + u32 frameno; + u32 subprogno; + u32 async_entry_cnt; + struct bpf_retval_range callback_ret_range; + bool in_callback_fn; + bool in_async_callback_fn; + bool in_exception_callback_fn; + u32 callback_depth; + int acquired_refs; + int active_locks; + struct bpf_reference_state *refs; + struct bpf_stack_state *stack; + int allocated_stack; + long: 32; +}; + +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); + int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); + int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_offloaded_map *, void *); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; + long: 32; +}; + +typedef u64 netdev_features_t; + +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +enum rx_handler_result { + RX_HANDLER_CONSUMED = 0, + RX_HANDLER_ANOTHER = 1, + RX_HANDLER_EXACT = 2, + RX_HANDLER_PASS = 3, +}; + +typedef enum rx_handler_result rx_handler_result_t; + +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); + +typedef struct { + struct net *net; +} possible_net_t; + +typedef u32 xdp_features_t; + +struct net_device_stats { + union { + long unsigned int rx_packets; + atomic_long_t __rx_packets; + }; + union { + long unsigned int tx_packets; + atomic_long_t __tx_packets; + }; + union { + long unsigned int rx_bytes; + atomic_long_t __rx_bytes; + }; + union { + long unsigned int tx_bytes; + atomic_long_t __tx_bytes; + }; + union { + long unsigned int rx_errors; + atomic_long_t __rx_errors; + }; + union { + long unsigned int tx_errors; + atomic_long_t __tx_errors; + }; + union { + long unsigned int rx_dropped; + atomic_long_t __rx_dropped; + }; + union { + long unsigned int tx_dropped; + atomic_long_t __tx_dropped; + }; + union { + long unsigned int multicast; + atomic_long_t __multicast; + }; + union { + long unsigned int collisions; + atomic_long_t __collisions; + }; + union { + long unsigned int rx_length_errors; + atomic_long_t __rx_length_errors; + }; + union { + long unsigned int rx_over_errors; + atomic_long_t __rx_over_errors; + }; + union { + long unsigned int rx_crc_errors; + atomic_long_t __rx_crc_errors; + }; + union { + long unsigned int rx_frame_errors; + atomic_long_t __rx_frame_errors; + }; + union { + long unsigned int rx_fifo_errors; + atomic_long_t __rx_fifo_errors; + }; + union { + long unsigned int rx_missed_errors; + atomic_long_t __rx_missed_errors; + }; + union { + long unsigned int tx_aborted_errors; + atomic_long_t __tx_aborted_errors; + }; + union { + long unsigned int tx_carrier_errors; + atomic_long_t __tx_carrier_errors; + }; + union { + long unsigned int tx_fifo_errors; + atomic_long_t __tx_fifo_errors; + }; + union { + long unsigned int tx_heartbeat_errors; + atomic_long_t __tx_heartbeat_errors; + }; + union { + long unsigned int tx_window_errors; + atomic_long_t __tx_window_errors; + }; + union { + long unsigned int rx_compressed; + atomic_long_t __rx_compressed; + }; + union { + long unsigned int tx_compressed; + atomic_long_t __tx_compressed; + }; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; + struct rb_root tree; +}; + +enum netdev_ml_priv_type { + ML_PRIV_NONE = 0, + ML_PRIV_CAN = 1, +}; + +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE = 0, + NETDEV_PCPU_STAT_LSTATS = 1, + NETDEV_PCPU_STAT_TSTATS = 2, + NETDEV_PCPU_STAT_DSTATS = 3, +}; + +struct sfp_bus; + +struct udp_tunnel_nic; + +struct bpf_xdp_link; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; + +typedef struct {} netdevice_tracker; + +struct net_device_ops; + +struct header_ops; + +struct netdev_queue; + +struct xps_dev_maps; + +struct bpf_mprog_entry; + +struct pcpu_lstats; + +struct pcpu_sw_netstats; + +struct pcpu_dstats; + +struct inet6_dev; + +struct netdev_rx_queue; + +struct netdev_name_node; + +struct dev_ifalias; + +struct xdp_metadata_ops; + +struct xsk_tx_metadata_ops; + +struct net_device_core_stats; + +struct ethtool_ops; + +struct ndisc_ops; + +struct in_device; + +struct wireless_dev; + +struct cpu_rmap; + +struct Qdisc; + +struct xdp_dev_bulk_queue; + +struct rtnl_link_ops; + +struct netdev_stat_ops; + +struct netdev_queue_mgmt_ops; + +struct phy_link_topology; + +struct phy_device; + +struct udp_tunnel_nic_info; + +struct ethtool_netdev_state; + +struct rtnl_hw_stats64; + +struct devlink_port; + +struct dim_irq_moder; + +struct napi_config; + +struct net_device { + __u8 __cacheline_group_begin__net_device_read_tx[0]; + union { + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + }; + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + } priv_flags_fast; + }; + const struct net_device_ops *netdev_ops; + const struct header_ops *header_ops; + struct netdev_queue *_tx; + long: 32; + netdev_features_t gso_partial_features; + unsigned int real_num_tx_queues; + unsigned int gso_max_size; + unsigned int gso_ipv4_max_size; + u16 gso_max_segs; + s16 num_tc; + unsigned int mtu; + short unsigned int needed_headroom; + struct netdev_tc_txq tc_to_txq[16]; + struct xps_dev_maps *xps_maps[2]; + struct nf_hook_entries *nf_hooks_egress; + struct bpf_mprog_entry *tcx_egress; + __u8 __cacheline_group_end__net_device_read_tx[0]; + __u8 __cacheline_group_begin__net_device_read_txrx[0]; + union { + struct pcpu_lstats *lstats; + struct pcpu_sw_netstats *tstats; + struct pcpu_dstats *dstats; + }; + long unsigned int state; + unsigned int flags; + short unsigned int hard_header_len; + netdev_features_t features; + struct inet6_dev *ip6_ptr; + __u8 __cacheline_group_end__net_device_read_txrx[0]; + __u8 __cacheline_group_begin__net_device_read_rx[0]; + struct bpf_prog *xdp_prog; + struct list_head ptype_specific; + int ifindex; + unsigned int real_num_rx_queues; + struct netdev_rx_queue *_rx; + unsigned int gro_max_size; + unsigned int gro_ipv4_max_size; + rx_handler_func_t *rx_handler; + void *rx_handler_data; + possible_net_t nd_net; + struct bpf_mprog_entry *tcx_ingress; + __u8 __cacheline_group_end__net_device_read_rx[0]; + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias *ifalias; + long unsigned int mem_end; + long unsigned int mem_start; + long unsigned int base_addr; + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + xdp_features_t xdp_features; + const struct xdp_metadata_ops *xdp_metadata_ops; + const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; + short unsigned int gflags; + short unsigned int needed_tailroom; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + unsigned int min_mtu; + unsigned int max_mtu; + short unsigned int type; + unsigned char min_header_len; + unsigned char name_assign_type; + int group; + struct net_device_stats stats; + struct net_device_core_stats *core_stats; + atomic_t carrier_up_count; + atomic_t carrier_down_count; + const struct ethtool_ops *ethtool_ops; + const struct ndisc_ops *ndisc_ops; + unsigned int operstate; + unsigned char link_mode; + unsigned char if_port; + unsigned char dma; + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + short unsigned int neigh_priv_len; + short unsigned int dev_id; + short unsigned int dev_port; + int irq; + u32 priv_len; + spinlock_t addr_list_lock; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + struct kset *queues_kset; + unsigned int promiscuity; + unsigned int allmulti; + bool uc_promisc; + struct in_device *ip_ptr; + struct hlist_head fib_nh_head; + struct wireless_dev *ieee80211_ptr; + const unsigned char *dev_addr; + unsigned int num_rx_queues; + unsigned int xdp_zc_max_segs; + struct netdev_queue *ingress_queue; + struct nf_hook_entries *nf_hooks_ingress; + unsigned char broadcast[32]; + struct cpu_rmap *rx_cpu_rmap; + struct hlist_node index_hlist; + unsigned int num_tx_queues; + struct Qdisc *qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + struct xdp_dev_bulk_queue *xdp_bulkq; + struct timer_list watchdog_timer; + int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; + int *pcpu_refcnt; + struct ref_tracker_dir refcnt_tracker; + struct list_head link_watch_list; + u8 reg_state; + bool dismantle; + enum { + RTNL_LINK_INITIALIZED = 0, + RTNL_LINK_INITIALIZING = 1, + } rtnl_link_state: 16; + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *); + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type: 8; + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + const struct rtnl_link_ops *rtnl_link_ops; + const struct netdev_stat_ops *stat_ops; + const struct netdev_queue_mgmt_ops *queue_mgmt_ops; + unsigned int tso_max_size; + u16 tso_max_segs; + u8 prio_tc_map[16]; + struct phy_link_topology *link_topo; + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + bool proto_down; + bool threaded; + long unsigned int see_all_hwtstamp_requests: 1; + long unsigned int change_proto_down: 1; + long unsigned int netns_local: 1; + long unsigned int fcoe_mtu: 1; + struct list_head net_notifier_list; + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + struct ethtool_netdev_state *ethtool; + struct bpf_xdp_entity xdp_state[3]; + u8 dev_addr_shadow[32]; + netdevice_tracker linkwatch_dev_tracker; + netdevice_tracker watchdog_dev_tracker; + netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; + struct devlink_port *devlink_port; + struct hlist_head page_pools; + struct dim_irq_moder *irq_moder; + u64 max_pacing_offload_horizon; + struct napi_config *napi_config; + long unsigned int gro_flush_timeout; + u32 napi_defer_hard_irqs; + struct mutex lock; + struct hlist_head neighbours[2]; + u8 priv[0]; +}; + +enum bpf_type_flag { + PTR_MAYBE_NULL = 256, + MEM_RDONLY = 512, + MEM_RINGBUF = 1024, + MEM_USER = 2048, + MEM_PERCPU = 4096, + OBJ_RELEASE = 8192, + PTR_UNTRUSTED = 16384, + MEM_UNINIT = 32768, + DYNPTR_TYPE_LOCAL = 65536, + DYNPTR_TYPE_RINGBUF = 131072, + MEM_FIXED_SIZE = 262144, + MEM_ALLOC = 524288, + PTR_TRUSTED = 1048576, + MEM_RCU = 2097152, + NON_OWN_REF = 4194304, + DYNPTR_TYPE_SKB = 8388608, + DYNPTR_TYPE_XDP = 16777216, + MEM_ALIGNED = 33554432, + MEM_WRITE = 67108864, + __BPF_TYPE_FLAG_MAX = 67108865, + __BPF_TYPE_LAST_FLAG = 67108864, +}; + +enum bpf_return_type { + RET_INTEGER = 0, + RET_VOID = 1, + RET_PTR_TO_MAP_VALUE = 2, + RET_PTR_TO_SOCKET = 3, + RET_PTR_TO_TCP_SOCK = 4, + RET_PTR_TO_SOCK_COMMON = 5, + RET_PTR_TO_MEM = 6, + RET_PTR_TO_MEM_OR_BTF_ID = 7, + RET_PTR_TO_BTF_ID = 8, + __BPF_RET_TYPE_MAX = 9, + RET_PTR_TO_MAP_VALUE_OR_NULL = 258, + RET_PTR_TO_SOCKET_OR_NULL = 259, + RET_PTR_TO_TCP_SOCK_OR_NULL = 260, + RET_PTR_TO_SOCK_COMMON_OR_NULL = 261, + RET_PTR_TO_RINGBUF_MEM_OR_NULL = 1286, + RET_PTR_TO_DYNPTR_MEM_OR_NULL = 262, + RET_PTR_TO_BTF_ID_OR_NULL = 264, + RET_PTR_TO_BTF_ID_TRUSTED = 1048584, + __BPF_RET_TYPE_LIMIT = 134217727, +}; + +struct bpf_func_proto { + u64 (*func)(u64, u64, u64, u64, u64); + bool gpl_only; + bool pkt_access; + bool might_sleep; + bool allow_fastcall; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + union { + struct { + u32 *arg1_btf_id; + u32 *arg2_btf_id; + u32 *arg3_btf_id; + u32 *arg4_btf_id; + u32 *arg5_btf_id; + }; + u32 *arg_btf_id[5]; + struct { + size_t arg1_size; + size_t arg2_size; + size_t arg3_size; + size_t arg4_size; + size_t arg5_size; + }; + size_t arg_size[5]; + }; + int *ret_btf_id; + bool (*allowed)(const struct bpf_prog *); +}; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2, +}; + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + bool is_ldsx; + union { + int ctx_field_size; + struct { + struct btf *btf; + u32 btf_id; + }; + }; + struct bpf_verifier_log *log; + bool is_retval; +}; + +struct bpf_verifier_ops { + const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); + bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); + int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); + int (*gen_epilogue)(struct bpf_insn *, const struct bpf_prog *, s16); + int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); + u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + int (*btf_struct_access)(struct bpf_verifier_log *, const struct bpf_reg_state *, int, int); +}; + +struct bpf_tramp_link; + +struct bpf_tramp_links { + struct bpf_tramp_link *links[38]; + int nr_links; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + bool sleepable; + union { + struct callback_head rcu; + struct work_struct work; + }; + long: 32; +}; + +struct bpf_tramp_link { + struct bpf_link link; + struct hlist_node tramp_hlist; + u64 cookie; +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY = 0, + BPF_TRAMP_FEXIT = 1, + BPF_TRAMP_MODIFY_RETURN = 2, + BPF_TRAMP_MAX = 3, + BPF_TRAMP_REPLACE = 4, +}; + +struct bpf_prog_stats { + u64_stats_t cnt; + u64_stats_t nsecs; + u64_stats_t misses; + struct u64_stats_sync syncp; + long: 32; +}; + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + void (*dealloc_deferred)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); + int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *); + int (*check_member)(const struct btf_type *, const struct btf_member *, const struct bpf_prog *); + int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); + int (*reg)(void *, struct bpf_link *); + void (*unreg)(void *, struct bpf_link *); + int (*update)(void *, void *, struct bpf_link *); + int (*validate)(void *); + void *cfi_stubs; + struct module *owner; + const char *name; + struct btf_func_model func_models[64]; +}; + +struct bpf_struct_ops_arg_info { + struct bpf_ctx_arg_aux *info; + u32 cnt; +}; + +struct bpf_struct_ops_desc { + struct bpf_struct_ops *st_ops; + const struct btf_type *type; + const struct btf_type *value_type; + u32 type_id; + u32 value_id; + struct bpf_struct_ops_arg_info *arg_info; +}; + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT = 0, + BPF_STRUCT_OPS_STATE_INUSE = 1, + BPF_STRUCT_OPS_STATE_TOBEFREE = 2, + BPF_STRUCT_OPS_STATE_READY = 3, +}; + +struct bpf_struct_ops_common_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; +}; + +typedef unsigned char *sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + long unsigned int dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + struct llist_node ll_node; + }; + struct sock *sk; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + long unsigned int _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + long unsigned int _sk_redir; + }; + long unsigned int _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned: 1; + __u8 nohdr: 1; + __u8 fclone: 2; + __u8 peeked: 1; + __u8 head_frag: 1; + __u8 pfmemalloc: 1; + __u8 pp_recycle: 1; + __u8 active_extensions; + union { + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + }; + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + } headers; + }; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; + long: 32; +}; + +typedef short unsigned int __kernel_sa_family_t; + +typedef __kernel_sa_family_t sa_family_t; + +struct sockaddr { + sa_family_t sa_family; + union { + char sa_data_min[14]; + struct { + struct {} __empty_sa_data; + char sa_data[0]; + }; + }; +}; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; + unsigned int slot_map; +} te1_settings; + +typedef struct { + short unsigned int encoding; + short unsigned int parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + short unsigned int lmi; + short unsigned int dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +} fr_proto_pvc_info; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + short unsigned int dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; + +struct ifmap { + long unsigned int mem_start; + long unsigned int mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct if_settings { + unsigned int type; + unsigned int size; + union { + raw_hdlc_proto *raw_hdlc; + cisco_proto *cisco; + fr_proto *fr; + fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; + x25_hdlc_proto *x25; + sync_serial_settings *sync; + te1_settings *te1; + } ifs_ifsu; +}; + +struct ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void *ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; + long: 32; +}; + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short int cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; + unsigned int chaintoolong; +}; + +struct sk_buff_list { + struct sk_buff *next; + struct sk_buff *prev; +}; + +struct sk_buff_head { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + }; + struct sk_buff_list list; + }; + __u32 qlen; + spinlock_t lock; +}; + +struct skb_shared_hwtstamps { + union { + ktime_t hwtstamp; + void *netdev_data; + }; +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[1]; + u8 chunks; + long: 0; + char data[0]; +}; + +struct dql { + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + short unsigned int stall_thrs; + long unsigned int history_head; + long unsigned int history[4]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + unsigned int limit; + unsigned int num_completed; + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + unsigned int lowest_slack; + long unsigned int slack_start_time; + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; + short unsigned int stall_max; + long unsigned int last_reap; + long unsigned int stall_cnt; + long: 32; + long: 32; + long: 32; +}; + +struct ipstats_mib { + u64 mibs[38]; + struct u64_stats_sync syncp; + long: 32; +}; + +struct icmp_mib { + long unsigned int mibs[30]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + long unsigned int mibs[7]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct tcp_mib { + long unsigned int mibs[16]; +}; + +struct udp_mib { + long unsigned int mibs[10]; +}; + +struct linux_mib { + long unsigned int mibs[132]; +}; + +struct inet_frags; + +struct fqdir { + long int high_thresh; + long int low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct rhashtable rhashtable; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_long_t mem; + struct work_struct destroy_work; + struct llist_node free_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + u8 tstamp_type; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*min_tso_segs)(struct sock *); + void (*cong_control)(struct sock *, u32, int, const struct rate_sample *); + u32 (*undo_cwnd)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef struct { + atomic_t refcnt; +} rcuref_t; + +struct lwtunnel_state; + +struct uncached_list; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + long unsigned int _metrics; + long unsigned int expires; + void *__pad1; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + short unsigned int flags; + short int obsolete; + short unsigned int header_len; + short unsigned int trailer_len; + int __use; + long unsigned int lastuse; + struct callback_head callback_head; + short int error; + short int __pad; + __u32 tclassid; + struct lwtunnel_state *lwtstate; + rcuref_t __rcuref; + netdevice_tracker dev_tracker; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; +}; + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + long unsigned int hh_data[24]; +}; + +struct neigh_table; + +struct neigh_parms; + +struct neigh_ops; + +struct neighbour { + struct hlist_node hash; + struct hlist_node dev_list; + struct neigh_table *tbl; + struct neigh_parms *parms; + long unsigned int confirmed; + long unsigned int updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + long unsigned int used; + atomic_t probes; + u8 nud_state; + u8 type; + u8 dead; + u8 protocol; + u32 flags; + seqlock_t ha_lock; + unsigned char ha[32]; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct list_head managed_list; + struct callback_head rcu; + struct net_device *dev; + netdevice_tracker dev_tracker; + u8 primary_key[0]; +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +struct nla_policy; + +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + const struct nla_policy *policy; + const struct nlattr *miss_nest; + u16 miss_type; + u8 cookie[20]; + u8 cookie_len; + char _msg_buf[80]; +}; + +struct netlink_range_validation; + +struct netlink_range_validation_signed; + +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + u16 strict_start_type; + const u32 bitfield32_valid; + const u32 mask; + const char *reject_message; + const struct nla_policy *nested_policy; + const struct netlink_range_validation *range; + const struct netlink_range_validation_signed *range_signed; + struct { + s16 min; + s16 max; + }; + int (*validate)(const struct nlattr *, struct netlink_ext_ack *); + }; +}; + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq; + unsigned int seq; + int flags; + bool strict_check; + union { + u8 ctx[48]; + long int args[6]; + }; +}; + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + __u64 rx_compressed; + __u64 tx_compressed; + __u64 rx_nohandler; + __u64 rx_otherhost_dropped; +}; + +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + +struct ifla_vf_guid { + __u32 vf; + long: 32; + __u64 guid; +}; + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; + +enum netdev_tx { + __NETDEV_TX_MIN = -2147483648, + NETDEV_TX_OK = 0, + NETDEV_TX_BUSY = 16, +}; + +typedef enum netdev_tx netdev_tx_t; + +struct net_device_core_stats { + long unsigned int rx_dropped; + long unsigned int tx_dropped; + long unsigned int rx_nohandler; + long unsigned int rx_otherhost_dropped; +}; + +struct header_ops { + int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); + int (*parse)(const struct sk_buff *, unsigned char *); + int (*cache)(const struct neighbour *, struct hh_cache *, __be16); + void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); + bool (*validate)(const char *, unsigned int); + __be16 (*parse_protocol)(const struct sk_buff *); +}; + +struct gro_list { + struct list_head list; + int count; +}; + +struct napi_config { + u64 gro_flush_timeout; + u64 irq_suspend_timeout; + u32 defer_hard_irqs; + unsigned int napi_id; +}; + +struct napi_struct { + struct list_head poll_list; + long unsigned int state; + int weight; + u32 defer_hard_irqs_count; + long unsigned int gro_bitmask; + int (*poll)(struct napi_struct *, int); + int list_owner; + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + unsigned int napi_id; + struct hrtimer timer; + struct task_struct *thread; + long unsigned int gro_flush_timeout; + long unsigned int irq_suspend_timeout; + u32 defer_hard_irqs; + struct list_head dev_list; + struct hlist_node napi_hash_node; + int irq; + int index; + struct napi_config *config; + long: 32; +}; + +struct netdev_queue { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; + struct kobject kobj; + long unsigned int tx_maxrate; + atomic_long_t trans_timeout; + struct net_device *sb_dev; + long: 32; + struct dql dql; + spinlock_t _xmit_lock; + int xmit_lock_owner; + long unsigned int trans_start; + long unsigned int state; + struct napi_struct *napi; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct gnet_stats_basic_sync { + u64_stats_t bytes; + u64_stats_t packets; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +struct Qdisc_ops; + +struct qdisc_size_table; + +struct net_rate_estimator; + +struct Qdisc { + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + unsigned int flags; + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table *stab; + struct hlist_node hash; + u32 handle; + u32 parent; + struct netdev_queue *dev_queue; + struct net_rate_estimator *rate_est; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + int pad; + refcount_t refcnt; + struct sk_buff_head gso_skb; + struct qdisc_skb_head q; + struct gnet_stats_basic_sync bstats; + struct gnet_stats_queue qstats; + int owner; + long unsigned int state; + long unsigned int state2; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + long: 32; + long: 32; + long: 32; + spinlock_t busylock; + spinlock_t seqlock; + struct callback_head rcu; + netdevice_tracker dev_tracker; + struct lock_class_key root_lock_key; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long int privdata[0]; +}; + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[0]; +}; + +struct xps_dev_maps { + struct callback_head rcu; + unsigned int nr_ids; + s16 num_tc; + struct xps_map *attr_map[0]; +}; + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN = 1, + DEV_PATH_BRIDGE = 2, + DEV_PATH_PPPOE = 3, + DEV_PATH_DSA = 4, + DEV_PATH_MTK_WDMA = 5, +}; + +struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; + union { + struct { + u16 id; + __be16 proto; + u8 h_dest[6]; + } encap; + struct { + enum { + DEV_PATH_BR_VLAN_KEEP = 0, + DEV_PATH_BR_VLAN_TAG = 1, + DEV_PATH_BR_VLAN_UNTAG = 2, + DEV_PATH_BR_VLAN_UNTAG_HW = 3, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; + } bridge; + struct { + int port; + u16 proto; + } dsa; + struct { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; + u8 amsdu; + } mtk_wdma; + }; +}; + +struct net_device_path_ctx { + const struct net_device *dev; + u8 daddr[6]; + int num_vlans; + struct { + u16 id; + __be16 proto; + } vlan[2]; +}; + +enum tc_setup_type { + TC_QUERY_CAPS = 0, + TC_SETUP_QDISC_MQPRIO = 1, + TC_SETUP_CLSU32 = 2, + TC_SETUP_CLSFLOWER = 3, + TC_SETUP_CLSMATCHALL = 4, + TC_SETUP_CLSBPF = 5, + TC_SETUP_BLOCK = 6, + TC_SETUP_QDISC_CBS = 7, + TC_SETUP_QDISC_RED = 8, + TC_SETUP_QDISC_PRIO = 9, + TC_SETUP_QDISC_MQ = 10, + TC_SETUP_QDISC_ETF = 11, + TC_SETUP_ROOT_QDISC = 12, + TC_SETUP_QDISC_GRED = 13, + TC_SETUP_QDISC_TAPRIO = 14, + TC_SETUP_FT = 15, + TC_SETUP_QDISC_ETS = 16, + TC_SETUP_QDISC_TBF = 17, + TC_SETUP_QDISC_FIFO = 18, + TC_SETUP_QDISC_HTB = 19, + TC_SETUP_ACT = 20, +}; + +enum bpf_netdev_command { + XDP_SETUP_PROG = 0, + XDP_SETUP_PROG_HW = 1, + BPF_OFFLOAD_MAP_ALLOC = 2, + BPF_OFFLOAD_MAP_FREE = 3, + XDP_SETUP_XSK_POOL = 4, +}; + +struct xsk_buff_pool; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + struct { + struct bpf_offloaded_map *offmap; + }; + struct { + struct xsk_buff_pool *pool; + u16 queue_id; + } xsk; + }; +}; + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[0]; +}; + +struct xdp_frame; + +struct xdp_buff; + +struct ip_tunnel_parm_kern; + +struct kernel_hwtstamp_config; + +struct net_device_ops { + int (*ndo_init)(struct net_device *); + void (*ndo_uninit)(struct net_device *); + int (*ndo_open)(struct net_device *); + int (*ndo_stop)(struct net_device *); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); + netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); + u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); + void (*ndo_change_rx_flags)(struct net_device *, int); + void (*ndo_set_rx_mode)(struct net_device *); + int (*ndo_set_mac_address)(struct net_device *, void *); + int (*ndo_validate_addr)(struct net_device *); + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_eth_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_siocbond)(struct net_device *, struct ifreq *, int); + int (*ndo_siocwandev)(struct net_device *, struct if_settings *); + int (*ndo_siocdevprivate)(struct net_device *, struct ifreq *, void *, int); + int (*ndo_set_config)(struct net_device *, struct ifmap *); + int (*ndo_change_mtu)(struct net_device *, int); + int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); + void (*ndo_tx_timeout)(struct net_device *, unsigned int); + void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); + bool (*ndo_has_offload_stats)(const struct net_device *, int); + int (*ndo_get_offload_stats)(int, const struct net_device *, void *); + struct net_device_stats * (*ndo_get_stats)(struct net_device *); + int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); + int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); + int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); + int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); + int (*ndo_set_vf_rate)(struct net_device *, int, int, int); + int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); + int (*ndo_set_vf_trust)(struct net_device *, int, bool); + int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); + int (*ndo_set_vf_link_state)(struct net_device *, int, int); + int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); + int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); + int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); + int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); + int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); + int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); + int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); + int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_del_slave)(struct net_device *, struct net_device *); + struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); + struct net_device * (*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); + netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); + int (*ndo_set_features)(struct net_device *, netdev_features_t); + int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); + void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); + int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del_bulk)(struct nlmsghdr *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); + int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); + int (*ndo_mdb_add)(struct net_device *, struct nlattr **, u16, struct netlink_ext_ack *); + int (*ndo_mdb_del)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_del_bulk)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_dump)(struct net_device *, struct sk_buff *, struct netlink_callback *); + int (*ndo_mdb_get)(struct net_device *, struct nlattr **, u32, u32, struct netlink_ext_ack *); + int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); + int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); + int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); + int (*ndo_change_carrier)(struct net_device *, bool); + int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); + void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); + void (*ndo_dfwd_del_station)(struct net_device *, void *); + int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); + int (*ndo_get_iflink)(const struct net_device *); + int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); + void (*ndo_set_rx_headroom)(struct net_device *, int); + int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); + int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *, struct xdp_buff *); + int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); + int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm_kern *, int); + struct net_device * (*ndo_get_peer_dev)(struct net_device *); + int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); + ktime_t (*ndo_get_tstamp)(struct net_device *, const struct skb_shared_hwtstamps *, bool); + int (*ndo_hwtstamp_get)(struct net_device *, struct kernel_hwtstamp_config *); + int (*ndo_hwtstamp_set)(struct net_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +struct pcpu_sw_netstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct pcpu_dstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_drops; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_drops; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +enum xdp_rss_hash_type { + XDP_RSS_L3_IPV4 = 1, + XDP_RSS_L3_IPV6 = 2, + XDP_RSS_L3_DYNHDR = 4, + XDP_RSS_L4 = 8, + XDP_RSS_L4_TCP = 16, + XDP_RSS_L4_UDP = 32, + XDP_RSS_L4_SCTP = 64, + XDP_RSS_L4_IPSEC = 128, + XDP_RSS_L4_ICMP = 256, + XDP_RSS_TYPE_NONE = 0, + XDP_RSS_TYPE_L2 = 0, + XDP_RSS_TYPE_L3_IPV4 = 1, + XDP_RSS_TYPE_L3_IPV6 = 2, + XDP_RSS_TYPE_L3_IPV4_OPT = 5, + XDP_RSS_TYPE_L3_IPV6_EX = 6, + XDP_RSS_TYPE_L4_ANY = 8, + XDP_RSS_TYPE_L4_IPV4_TCP = 25, + XDP_RSS_TYPE_L4_IPV4_UDP = 41, + XDP_RSS_TYPE_L4_IPV4_SCTP = 73, + XDP_RSS_TYPE_L4_IPV4_IPSEC = 137, + XDP_RSS_TYPE_L4_IPV4_ICMP = 265, + XDP_RSS_TYPE_L4_IPV6_TCP = 26, + XDP_RSS_TYPE_L4_IPV6_UDP = 42, + XDP_RSS_TYPE_L4_IPV6_SCTP = 74, + XDP_RSS_TYPE_L4_IPV6_IPSEC = 138, + XDP_RSS_TYPE_L4_IPV6_ICMP = 266, + XDP_RSS_TYPE_L4_IPV6_TCP_EX = 30, + XDP_RSS_TYPE_L4_IPV6_UDP_EX = 46, + XDP_RSS_TYPE_L4_IPV6_SCTP_EX = 78, +}; + +struct xdp_md; + +struct xdp_metadata_ops { + int (*xmo_rx_timestamp)(const struct xdp_md *, u64 *); + int (*xmo_rx_hash)(const struct xdp_md *, u32 *, enum xdp_rss_hash_type *); + int (*xmo_rx_vlan_tag)(const struct xdp_md *, __be16 *, u16 *); +}; + +struct xsk_tx_metadata_ops { + void (*tmo_request_timestamp)(void *); + u64 (*tmo_fill_timestamp)(void *); + void (*tmo_request_checksum)(u16, u16, void *); +}; + +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE = 0, + ETHTOOL_ID_ACTIVE = 1, + ETHTOOL_ID_ON = 2, + ETHTOOL_ID_OFF = 3, +}; + +struct ethtool_drvinfo; + +struct ethtool_regs; + +struct ethtool_wolinfo; + +struct ethtool_link_ext_state_info; + +struct ethtool_link_ext_stats; + +struct ethtool_eeprom; + +struct ethtool_coalesce; + +struct kernel_ethtool_coalesce; + +struct ethtool_ringparam; + +struct kernel_ethtool_ringparam; + +struct ethtool_pause_stats; + +struct ethtool_pauseparam; + +struct ethtool_test; + +struct ethtool_stats; + +struct ethtool_rxnfc; + +struct ethtool_flash; + +struct ethtool_rxfh_param; + +struct ethtool_rxfh_context; + +struct ethtool_channels; + +struct ethtool_dump; + +struct kernel_ethtool_ts_info; + +struct ethtool_ts_stats; + +struct ethtool_modinfo; + +struct ethtool_keee; + +struct ethtool_tunable; + +struct ethtool_link_ksettings; + +struct ethtool_fec_stats; + +struct ethtool_fecparam; + +struct ethtool_module_eeprom; + +struct ethtool_eth_phy_stats; + +struct ethtool_eth_mac_stats; + +struct ethtool_eth_ctrl_stats; + +struct ethtool_rmon_stats; + +struct ethtool_rmon_hist_range; + +struct ethtool_module_power_mode_params; + +struct ethtool_mm_state; + +struct ethtool_mm_cfg; + +struct ethtool_mm_stats; + +struct ethtool_ops { + u32 cap_link_lanes_supported: 1; + u32 cap_rss_ctx_supported: 1; + u32 cap_rss_sym_xor_supported: 1; + u32 rxfh_per_ctx_key: 1; + u32 cap_rss_rxnfc_adds: 1; + u32 rxfh_indir_space; + u16 rxfh_key_space; + u16 rxfh_priv_size; + u32 rxfh_max_num_contexts; + u32 supported_coalesce_params; + u32 supported_ring_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); + void (*get_link_ext_stats)(struct net_device *, struct ethtool_link_ext_stats *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); + void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); + int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*create_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*modify_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*remove_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, u32, struct netlink_ext_ack *); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *); + void (*get_ts_stats)(struct net_device *, struct ethtool_ts_stats *); + int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_keee *); + int (*set_eee)(struct net_device *, struct ethtool_keee *); + int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); + int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + int (*set_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); + void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); + void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); + void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, const struct ethtool_rmon_hist_range **); + int (*get_module_power_mode)(struct net_device *, struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*set_module_power_mode)(struct net_device *, const struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*get_mm)(struct net_device *, struct ethtool_mm_state *); + int (*set_mm)(struct net_device *, struct ethtool_mm_cfg *, struct netlink_ext_ack *); + void (*get_mm_stats)(struct net_device *, struct ethtool_mm_stats *); +}; + +struct nd_opt_hdr; + +struct ndisc_options; + +struct prefix_info; + +struct ndisc_ops { + int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); + void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); + int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); + void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); + void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); +}; + +struct rtnl_link_ops { + struct list_head list; + struct srcu_struct srcu; + const char *kind; + size_t priv_size; + struct net_device * (*alloc)(struct nlattr **, const char *, unsigned char, unsigned int, unsigned int); + void (*setup)(struct net_device *); + bool netns_refund; + const u16 peer_type; + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + void (*dellink)(struct net_device *, struct list_head *); + size_t (*get_size)(const struct net_device *); + int (*fill_info)(struct sk_buff *, const struct net_device *); + size_t (*get_xstats_size)(const struct net_device *); + int (*fill_xstats)(struct sk_buff *, const struct net_device *); + unsigned int (*get_num_tx_queues)(); + unsigned int (*get_num_rx_queues)(); + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + size_t (*get_slave_size)(const struct net_device *, const struct net_device *); + int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); + struct net * (*get_link_net)(const struct net_device *); + size_t (*get_linkxstats_size)(const struct net_device *, int); + int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); +}; + +struct netdev_queue_stats_rx; + +struct netdev_queue_stats_tx; + +struct netdev_stat_ops { + void (*get_queue_stats_rx)(struct net_device *, int, struct netdev_queue_stats_rx *); + void (*get_queue_stats_tx)(struct net_device *, int, struct netdev_queue_stats_tx *); + void (*get_base_stats)(struct net_device *, struct netdev_queue_stats_rx *, struct netdev_queue_stats_tx *); +}; + +struct netdev_queue_mgmt_ops { + size_t ndo_queue_mem_size; + int (*ndo_queue_mem_alloc)(struct net_device *, void *, int); + void (*ndo_queue_mem_free)(struct net_device *, void *); + int (*ndo_queue_start)(struct net_device *, void *, int); + int (*ndo_queue_stop)(struct net_device *, void *, int); +}; + +struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; +}; + +struct udp_tunnel_info; + +struct udp_tunnel_nic_shared; + +struct udp_tunnel_nic_info { + int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*sync_table)(struct net_device *, unsigned int); + struct udp_tunnel_nic_shared *shared; + unsigned int flags; + struct udp_tunnel_nic_table_info tables[4]; +}; + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + short unsigned int tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +struct gnet_dump { + spinlock_t *lock; + struct sk_buff *skb; + struct nlattr *tail; + int compat_tc_stats; + int compat_xstats; + int padattr; + void *xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +struct netlink_range_validation { + u64 min; + u64 max; +}; + +struct netlink_range_validation_signed { + s64 min; + s64 max; +}; + +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0, + FLOW_ACTION_HW_STATS_DELAYED_BIT = 1, + FLOW_ACTION_HW_STATS_DISABLED_BIT = 2, + FLOW_ACTION_HW_STATS_NUM_BITS = 3, +}; + +struct flow_block { + struct list_head cb_list; +}; + +typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[0]; +}; + +struct Qdisc_class_ops; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); + int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*attach)(struct Qdisc *); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + void (*change_real_num_tx)(struct Qdisc *, unsigned int); + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *, u32); + void (*egress_block_set)(struct Qdisc *, u32); + u32 (*ingress_block_get)(struct Qdisc *); + u32 (*egress_block_get)(struct Qdisc *); + struct module *owner; +}; + +struct qdisc_walker; + +struct tcf_block; + +struct Qdisc_class_ops { + unsigned int flags; + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); + struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); + void (*qlen_notify)(struct Qdisc *, long unsigned int); + long unsigned int (*find)(struct Qdisc *, u32); + int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + void (*walk)(struct Qdisc *, struct qdisc_walker *); + struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); + void (*unbind_tcf)(struct Qdisc *, long unsigned int); + int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); + int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); +}; + +struct tcf_chain; + +struct tcf_block { + struct xarray ports; + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + bool bypass_wanted; + atomic_t filtercnt; + atomic_t skipswcnt; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[128]; + struct mutex proto_destroy_lock; +}; + +struct tcf_result; + +struct tcf_proto_ops; + +struct tcf_proto { + struct tcf_proto *next; + void *root; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + __be16 protocol; + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + spinlock_t lock; + bool deleting; + bool counted; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +struct tcf_result { + union { + struct { + long unsigned int class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + }; +}; + +struct tcf_walker; + +struct tcf_exts; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + int (*init)(struct tcf_proto *); + void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); + void * (*get)(struct tcf_proto *, u32); + void (*put)(struct tcf_proto *, void *); + int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, u32, struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *); + void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); + int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); + void (*hw_add)(struct tcf_proto *, void *); + void (*hw_del)(struct tcf_proto *, void *); + void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); + void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); + void (*tmplt_destroy)(void *); + void (*tmplt_reoffload)(struct tcf_chain *, bool, flow_setup_cb_t *, void *); + struct tcf_exts * (*get_exts)(const struct tcf_proto *, u32); + int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*tmplt_dump)(struct sk_buff *, struct net *, void *); + struct module *owner; + int flags; +}; + +struct tcf_chain { + struct mutex filter_chain_lock; + struct tcf_proto *filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[8]; +}; + +enum ref_state_type { + REF_TYPE_PTR = 0, + REF_TYPE_LOCK = 1, +}; + +struct bpf_reference_state { + enum ref_state_type type; + int id; + int insn_idx; + void *ptr; +}; + +enum { + INSN_F_FRAMENO_MASK = 7, + INSN_F_SPI_MASK = 63, + INSN_F_SPI_SHIFT = 3, + INSN_F_STACK_ACCESS = 512, +}; + +struct bpf_insn_hist_entry { + u32 idx; + u32 prev_idx: 22; + u32 flags: 10; + u64 linked_regs; +}; + +struct bpf_verifier_state { + struct bpf_func_state *frame[8]; + struct bpf_verifier_state *parent; + u32 branches; + u32 insn_idx; + u32 curframe; + bool speculative; + bool active_rcu_lock; + u32 active_preempt_lock; + bool used_as_loop_entry; + bool in_sleepable; + u32 first_insn_idx; + u32 last_insn_idx; + struct bpf_verifier_state *loop_entry; + u32 insn_hist_start; + u32 insn_hist_end; + u32 dfs_depth; + u32 callback_unroll_depth; + u32 may_goto_depth; +}; + +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt; + int hit_cnt; +}; + +struct bpf_loop_inline_state { + unsigned int initialized: 1; + unsigned int fit_for_inline: 1; + u32 callback_subprogno; +}; + +struct bpf_map_ptr_state { + struct bpf_map *map_ptr; + bool poison; + bool unpriv; +}; + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; + struct bpf_map_ptr_state map_ptr_state; + s32 call_imm; + u32 alu_limit; + struct { + u32 map_index; + u32 map_off; + }; + struct { + enum bpf_reg_type reg_type; + union { + struct { + struct btf *btf; + u32 btf_id; + }; + u32 mem_size; + }; + } btf_var; + struct bpf_loop_inline_state loop_inline_state; + }; + long: 32; + union { + u64 obj_new_size; + u64 insert_off; + }; + struct btf_struct_meta *kptr_struct_meta; + long: 32; + u64 map_key_state; + int ctx_field_size; + u32 seen; + bool sanitize_stack_spill; + bool zext_dst; + bool needs_zext; + bool storage_get_func_atomic; + bool is_iter_next; + bool call_with_percpu_alloc_ptr; + u8 alu_state; + u8 fastcall_pattern: 1; + u8 fastcall_spills_num: 3; + unsigned int orig_idx; + bool jmp_point; + bool prune_point; + bool force_checkpoint; + bool calls_callback; +}; + +struct rcu_synchronize { + struct callback_head head; + struct completion completion; +}; + +typedef struct poll_table_struct poll_table; + +struct bpf_struct_ops_value { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + char data[0]; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + const struct bpf_struct_ops_desc *st_ops_desc; + struct mutex lock; + struct bpf_link **links; + struct bpf_ksym **ksyms; + u32 funcs_cnt; + u32 image_pages_cnt; + void *image_pages[8]; + struct btf *btf; + struct bpf_struct_ops_value *uvalue; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_struct_ops_value kvalue; +}; + +struct bpf_struct_ops_link { + struct bpf_link link; + struct bpf_map *map; + wait_queue_head_t wait_hup; +}; + +enum { + IDX_MODULE_ID = 0, + IDX_ST_OPS_COMMON_VALUE_ID = 1, +}; + +enum lockdep_ok { + LOCKDEP_STILL_OK = 0, + LOCKDEP_NOW_UNRELIABLE = 1, +}; + +struct cpu_user_fns { + void (*cpu_clear_user_highpage)(struct page *, long unsigned int); + void (*cpu_copy_user_highpage)(struct page *, struct page *, long unsigned int, struct vm_area_struct *); +}; + +typedef struct { + u64 val; +} pfn_t; + +typedef struct { + void *lock; +} class_preempt_notrace_t; + +enum { + MM_FILEPAGES = 0, + MM_ANONPAGES = 1, + MM_SWAPENTS = 2, + MM_SHMEMPAGES = 3, + NR_MM_COUNTERS = 4, +}; + +struct compact_control; + +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +struct task_delay_info { + raw_spinlock_t lock; + long: 32; + u64 blkio_start; + u64 blkio_delay; + u64 swapin_start; + u64 swapin_delay; + u32 blkio_count; + u32 swapin_count; + u64 freepages_start; + u64 freepages_delay; + u64 thrashing_start; + u64 thrashing_delay; + u64 compact_start; + u64 compact_delay; + u64 wpcopy_start; + u64 wpcopy_delay; + u64 irq_delay; + u32 freepages_count; + u32 thrashing_count; + u32 compact_count; + u32 wpcopy_count; + u32 irq_count; + long: 32; +}; + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +struct maple_alloc { + long unsigned int total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[61]; +}; + +struct maple_enode; + +enum store_type { + wr_invalid = 0, + wr_new_root = 1, + wr_store_root = 2, + wr_exact_fit = 3, + wr_spanning_store = 4, + wr_split_store = 5, + wr_rebalance = 6, + wr_append = 7, + wr_node_store = 8, + wr_slot_store = 9, +}; + +enum maple_status { + ma_active = 0, + ma_start = 1, + ma_root = 2, + ma_none = 3, + ma_pause = 4, + ma_overflow = 5, + ma_underflow = 6, + ma_error = 7, +}; + +struct ma_state { + struct maple_tree *tree; + long unsigned int index; + long unsigned int last; + struct maple_enode *node; + long unsigned int min; + long unsigned int max; + struct maple_alloc *alloc; + enum maple_status status; + unsigned char depth; + unsigned char offset; + unsigned char mas_flags; + unsigned char end; + enum store_type store_type; +}; + +struct ptdesc { + long unsigned int __page_flags; + union { + struct callback_head pt_rcu_head; + struct list_head pt_list; + struct { + long unsigned int _pt_pad_1; + pgtable_t pmd_huge_pte; + }; + }; + long unsigned int __page_mapping; + union { + long unsigned int pt_index; + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + union { + long unsigned int _pt_pad_2; + spinlock_t ptl; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int pt_memcg_data; +}; + +struct anon_vma { + struct anon_vma *root; + struct rw_semaphore rwsem; + atomic_t refcount; + long unsigned int num_children; + long unsigned int num_active_vmas; + struct anon_vma *parent; + struct rb_root_cached rb_root; +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_COMPLETED = 16384, + VM_FAULT_HINDEX_MASK = 983040, +}; + +typedef unsigned int zap_flags_t; + +enum { + FOLL_WRITE = 1, + FOLL_GET = 2, + FOLL_DUMP = 4, + FOLL_FORCE = 8, + FOLL_NOWAIT = 16, + FOLL_NOFAULT = 32, + FOLL_HWPOISON = 64, + FOLL_ANON = 128, + FOLL_LONGTERM = 256, + FOLL_SPLIT_PMD = 512, + FOLL_PCI_P2PDMA = 1024, + FOLL_INTERRUPTIBLE = 2048, + FOLL_HONOR_NUMA_FAULT = 4096, +}; + +enum pageflags { + PG_locked = 0, + PG_writeback = 1, + PG_referenced = 2, + PG_uptodate = 3, + PG_dirty = 4, + PG_lru = 5, + PG_head = 6, + PG_waiters = 7, + PG_active = 8, + PG_workingset = 9, + PG_owner_priv_1 = 10, + PG_owner_2 = 11, + PG_arch_1 = 12, + PG_reserved = 13, + PG_private = 14, + PG_private_2 = 15, + PG_reclaim = 16, + PG_swapbacked = 17, + PG_unevictable = 18, + PG_mlocked = 19, + __NR_PAGEFLAGS = 20, + PG_readahead = 16, + PG_swapcache = 10, + PG_checked = 10, + PG_anon_exclusive = 11, + PG_mappedtodisk = 11, + PG_fscache = 15, + PG_pinned = 10, + PG_savepinned = 4, + PG_foreign = 10, + PG_xen_remapped = 10, + PG_isolated = 16, + PG_reported = 3, + PG_has_hwpoisoned = 8, + PG_large_rmappable = 9, + PG_partially_mapped = 16, +}; + +enum pagetype { + PGTY_buddy = 240, + PGTY_offline = 241, + PGTY_table = 242, + PGTY_guard = 243, + PGTY_hugetlb = 244, + PGTY_slab = 245, + PGTY_zsmalloc = 246, + PGTY_unaccepted = 247, + PGTY_mapcount_underflow = 255, +}; + +enum { + __PERCPU_REF_ATOMIC = 1, + __PERCPU_REF_DEAD = 2, + __PERCPU_REF_ATOMIC_DEAD = 3, + __PERCPU_REF_FLAG_BITS = 2, +}; + +typedef unsigned int pgtbl_mod_mask; + +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC = 0, + MTHP_STAT_ANON_FAULT_FALLBACK = 1, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE = 2, + MTHP_STAT_ZSWPOUT = 3, + MTHP_STAT_SWPIN = 4, + MTHP_STAT_SWPOUT = 5, + MTHP_STAT_SWPOUT_FALLBACK = 6, + MTHP_STAT_SHMEM_ALLOC = 7, + MTHP_STAT_SHMEM_FALLBACK = 8, + MTHP_STAT_SHMEM_FALLBACK_CHARGE = 9, + MTHP_STAT_SPLIT = 10, + MTHP_STAT_SPLIT_FAILED = 11, + MTHP_STAT_SPLIT_DEFERRED = 12, + MTHP_STAT_NR_ANON = 13, + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED = 14, + __MTHP_STAT_COUNT = 15, +}; + +struct encoded_page; + +struct vm_event_state { + long unsigned int event[68]; +}; + +struct zap_details { + struct folio *single_folio; + bool even_cows; + zap_flags_t zap_flags; +}; + +struct follow_pfnmap_args { + struct vm_area_struct *vma; + long unsigned int address; + spinlock_t *lock; + pte_t *ptep; + long unsigned int pfn; + pgprot_t pgprot; + bool writable; + bool special; +}; + +typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *); + +enum { + SWP_USED = 1, + SWP_WRITEOK = 2, + SWP_DISCARDABLE = 4, + SWP_DISCARDING = 8, + SWP_SOLIDSTATE = 16, + SWP_CONTINUED = 32, + SWP_BLKDEV = 64, + SWP_ACTIVATED = 128, + SWP_FS_OPS = 256, + SWP_AREA_DISCARD = 512, + SWP_PAGE_DISCARD = 1024, + SWP_STABLE_WRITES = 2048, + SWP_SYNCHRONOUS_IO = 4096, + SWP_SCANNING = 16384, +}; + +typedef long unsigned int pte_marker; + +typedef int rmap_t; + +enum rmap_level { + RMAP_LEVEL_PTE = 0, + RMAP_LEVEL_PMD = 1, +}; + +struct mmu_notifier_range { + long unsigned int start; + long unsigned int end; +}; + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct encoded_page *encoded_pages[0]; +}; + +struct mmu_gather { + struct mm_struct *mm; + long unsigned int start; + long unsigned int end; + unsigned int fullmm: 1; + unsigned int need_flush_all: 1; + unsigned int freed_tables: 1; + unsigned int delayed_rmap: 1; + unsigned int cleared_ptes: 1; + unsigned int cleared_pmds: 1; + unsigned int cleared_puds: 1; + unsigned int cleared_p4ds: 1; + unsigned int vma_exec: 1; + unsigned int vma_huge: 1; + unsigned int vma_pfn: 1; + unsigned int batch_count; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[8]; +}; + +struct unlink_vma_file_batch { + int count; + struct vm_area_struct *vmas[8]; +}; + +typedef int fpb_t; + +struct compact_control { + struct list_head freepages[11]; + struct list_head migratepages; + unsigned int nr_freepages; + unsigned int nr_migratepages; + long unsigned int free_pfn; + long unsigned int migrate_pfn; + long unsigned int fast_start_pfn; + struct zone *zone; + long unsigned int total_migrate_scanned; + long unsigned int total_free_scanned; + short unsigned int fast_search_fail; + short int search_order; + const gfp_t gfp_mask; + int order; + int migratetype; + const unsigned int alloc_flags; + const int highest_zoneidx; + enum migrate_mode mode; + bool ignore_skip_hint; + bool no_set_skip_hint; + bool ignore_block_suitable; + bool direct_compaction; + bool proactive_compaction; + bool whole_zone; + bool contended; + bool finish_pageblock; + bool alloc_contig; +}; + +enum work_bits { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_INACTIVE_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + WORK_STRUCT_FLAG_BITS = 4, + WORK_STRUCT_COLOR_SHIFT = 4, + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PWQ_SHIFT = 8, + WORK_OFFQ_FLAG_SHIFT = 4, + WORK_OFFQ_BH_BIT = 4, + WORK_OFFQ_FLAG_END = 5, + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_DISABLE_SHIFT = 5, + WORK_OFFQ_DISABLE_BITS = 16, + WORK_OFFQ_POOL_SHIFT = 21, + WORK_OFFQ_LEFT = 11, + WORK_OFFQ_POOL_BITS = 11, +}; + +enum wq_misc_consts { + WORK_NR_COLORS = 16, + WORK_CPU_UNBOUND = 32, + WORK_BUSY_PENDING = 1, + WORK_BUSY_RUNNING = 2, + WORKER_DESC_LEN = 32, +}; + +struct request; + +struct rq_list { + struct request *head; + struct request *tail; +}; + +struct blk_plug { + struct rq_list mq_list; + struct rq_list cached_rqs; + u64 cur_ktime; + short unsigned int nr_ios; + short unsigned int rq_count; + bool multiple_queues; + bool has_elevator; + struct list_head cb_list; +}; + +typedef unsigned int blk_mode_t; + +struct block_device_operations; + +struct timer_rand_state; + +struct disk_events; + +struct badblocks; + +struct blk_independent_access_ranges; + +struct gendisk { + int major; + int first_minor; + int minors; + char disk_name[32]; + short unsigned int events; + short unsigned int event_flags; + struct xarray part_tbl; + struct block_device *part0; + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + struct bio_set bio_split; + int flags; + long unsigned int state; + struct mutex open_mutex; + unsigned int open_partitions; + struct backing_dev_info *bdi; + struct kobject queue_kobj; + struct kobject *slave_dir; + struct list_head slave_bdevs; + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; + long: 32; + u64 diskseq; + blk_mode_t open_mode; + struct blk_independent_access_ranges *ia_ranges; +}; + +typedef unsigned int blk_features_t; + +typedef unsigned int blk_flags_t; + +enum blk_integrity_checksum { + BLK_INTEGRITY_CSUM_NONE = 0, + BLK_INTEGRITY_CSUM_IP = 1, + BLK_INTEGRITY_CSUM_CRC = 2, + BLK_INTEGRITY_CSUM_CRC64 = 3, +} __attribute__((mode(byte))); + +struct blk_integrity { + unsigned char flags; + enum blk_integrity_checksum csum_type; + unsigned char tuple_size; + unsigned char pi_offset; + unsigned char interval_exp; + unsigned char tag_size; +}; + +struct queue_limits { + blk_features_t features; + blk_flags_t flags; + long unsigned int seg_boundary_mask; + long unsigned int virt_boundary_mask; + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_user_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_user_discard_sectors; + unsigned int max_secure_erase_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_hw_zone_append_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int zone_write_granularity; + unsigned int atomic_write_hw_max; + unsigned int atomic_write_max_sectors; + unsigned int atomic_write_hw_boundary; + unsigned int atomic_write_boundary_sectors; + unsigned int atomic_write_hw_unit_min; + unsigned int atomic_write_unit_min; + unsigned int atomic_write_hw_unit_max; + unsigned int atomic_write_unit_max; + short unsigned int max_segments; + short unsigned int max_integrity_segments; + short unsigned int max_discard_segments; + unsigned int max_open_zones; + unsigned int max_active_zones; + unsigned int dma_alignment; + unsigned int dma_pad_mask; + struct blk_integrity integrity; +}; + +struct elevator_queue; + +struct blk_mq_ops; + +struct blk_mq_ctx; + +struct blk_queue_stats; + +struct rq_qos; + +struct blk_mq_tags; + +struct blk_trace; + +struct blk_flush_queue; + +struct throtl_data; + +struct blk_mq_tag_set; + +struct request_queue { + void *queuedata; + struct elevator_queue *elevator; + const struct blk_mq_ops *mq_ops; + struct blk_mq_ctx *queue_ctx; + long unsigned int queue_flags; + unsigned int rq_timeout; + unsigned int queue_depth; + refcount_t refs; + unsigned int nr_hw_queues; + struct xarray hctx_table; + struct percpu_ref q_usage_counter; + struct lock_class_key io_lock_cls_key; + struct lockdep_map io_lockdep_map; + struct lock_class_key q_lock_cls_key; + struct lockdep_map q_lockdep_map; + struct request *last_merge; + spinlock_t queue_lock; + int quiesce_depth; + struct gendisk *disk; + struct kobject *mq_kobj; + struct queue_limits limits; + struct device *dev; + enum rpm_status rpm_status; + atomic_t pm_only; + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + struct mutex rq_qos_mutex; + int id; + long unsigned int nr_requests; + struct timer_list timeout; + struct work_struct timeout_work; + atomic_t nr_active_requests_shared_tags; + struct blk_mq_tags *sched_shared_tags; + struct list_head icq_list; + long unsigned int blkcg_pols[1]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct mutex blkcg_mutex; + int node; + spinlock_t requeue_lock; + struct list_head requeue_list; + struct delayed_work requeue_work; + struct blk_trace *blk_trace; + struct blk_flush_queue *fq; + struct list_head flush_list; + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + struct mutex limits_lock; + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + int mq_freeze_depth; + struct throtl_data *td; + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + struct mutex mq_freeze_lock; + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; + struct mutex debugfs_mutex; + bool mq_sysfs_init_done; +}; + +struct io_comp_batch { + struct rq_list req_list; + bool need_ts; + void (*complete)(struct io_comp_batch *); +}; + +enum { + EI_ETYPE_NULL = 0, + EI_ETYPE_ERRNO = 1, + EI_ETYPE_ERRNO_NULL = 2, + EI_ETYPE_TRUE = 3, +}; + +struct partition_meta_info { + char uuid[37]; + u8 volname[64]; +}; + +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 resv[4]; + __u64 capacity; + __u8 reserved[24]; +}; + +typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); + +enum blk_unique_id { + BLK_UID_T10 = 1, + BLK_UID_EUI64 = 2, + BLK_UID_NAA = 3, +}; + +struct hd_geometry; + +struct pr_ops; + +struct block_device_operations { + void (*submit_bio)(struct bio *); + int (*poll_bio)(struct bio *, struct io_comp_batch *, unsigned int); + int (*open)(struct gendisk *, blk_mode_t); + void (*release)(struct gendisk *); + int (*ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + int (*compat_ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + unsigned int (*check_events)(struct gendisk *, unsigned int); + void (*unlock_native_capacity)(struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + int (*set_read_only)(struct block_device *, bool); + void (*free_disk)(struct gendisk *); + void (*swap_slot_free_notify)(struct block_device *, long unsigned int); + int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); + char * (*devnode)(struct gendisk *, umode_t *); + int (*get_unique_id)(struct gendisk *, u8 *, enum blk_unique_id); + struct module *owner; + const struct pr_ops *pr_ops; + int (*alternative_gpt_sector)(struct gendisk *, sector_t *); +}; + +struct blk_independent_access_range { + struct kobject kobj; + long: 32; + sector_t sector; + sector_t nr_sectors; +}; + +struct blk_independent_access_ranges { + struct kobject kobj; + bool sysfs_registered; + unsigned int nr_ia_ranges; + long: 32; + struct blk_independent_access_range ia_range[0]; +}; + +enum blk_eh_timer_return { + BLK_EH_DONE = 0, + BLK_EH_RESET_TIMER = 1, +}; + +struct blk_mq_hw_ctx; + +struct blk_mq_queue_data; + +struct blk_mq_ops { + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); + void (*commit_rqs)(struct blk_mq_hw_ctx *); + void (*queue_rqs)(struct rq_list *); + int (*get_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *, int); + void (*set_rq_budget_token)(struct request *, int); + int (*get_rq_budget_token)(struct request *); + enum blk_eh_timer_return (*timeout)(struct request *); + int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); + void (*complete)(struct request *); + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); + void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); + void (*cleanup_rq)(struct request *); + bool (*busy)(struct request_queue *); + void (*map_queues)(struct blk_mq_tag_set *); + void (*show_rq)(struct seq_file *, struct request *); +}; + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_keys; + +struct pr_held_reservation; + +struct pr_ops { + int (*pr_register)(struct block_device *, u64, u64, u32); + int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); + int (*pr_release)(struct block_device *, u64, enum pr_type); + int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); + int (*pr_clear)(struct block_device *, u64); + int (*pr_read_keys)(struct block_device *, struct pr_keys *); + int (*pr_read_reservation)(struct block_device *, struct pr_held_reservation *); +}; + +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF = 0, + REFCOUNT_ADD_OVF = 1, + REFCOUNT_ADD_UAF = 2, + REFCOUNT_SUB_UAF = 3, + REFCOUNT_DEC_LEAK = 4, +}; + +typedef unsigned int xa_mark_t; + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +typedef int __kernel_rwf_t; + +struct ida { + struct xarray xa; +}; + +struct eventfd_ctx { + struct kref kref; + wait_queue_head_t wqh; + __u64 count; + unsigned int flags; + int id; +}; + +typedef u64 uint64_t; + +struct dax_device; + +struct iomap_folio_ops; + +struct iomap { + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + struct block_device *bdev; + struct dax_device *dax_dev; + void *inline_data; + void *private; + const struct iomap_folio_ops *folio_ops; + u64 validity_cookie; +}; + +struct iomap_iter; + +struct iomap_folio_ops { + struct folio * (*get_folio)(struct iomap_iter *, loff_t, unsigned int); + void (*put_folio)(struct inode *, loff_t, unsigned int, struct folio *); + bool (*iomap_valid)(struct inode *, const struct iomap *); +}; + +struct iomap_iter { + struct inode *inode; + long: 32; + loff_t pos; + u64 len; + s64 processed; + unsigned int flags; + long: 32; + struct iomap iomap; + struct iomap srcmap; + void *private; + long: 32; +}; + +struct iomap_ops { + int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, struct iomap *); + int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *); +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + CPUTIME_FORCEIDLE = 10, + NR_STATS = 11, +}; + +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS = 1, + CGROUP_INET_SOCK_CREATE = 2, + CGROUP_SOCK_OPS = 3, + CGROUP_DEVICE = 4, + CGROUP_INET4_BIND = 5, + CGROUP_INET6_BIND = 6, + CGROUP_INET4_CONNECT = 7, + CGROUP_INET6_CONNECT = 8, + CGROUP_UNIX_CONNECT = 9, + CGROUP_INET4_POST_BIND = 10, + CGROUP_INET6_POST_BIND = 11, + CGROUP_UDP4_SENDMSG = 12, + CGROUP_UDP6_SENDMSG = 13, + CGROUP_UNIX_SENDMSG = 14, + CGROUP_SYSCTL = 15, + CGROUP_UDP4_RECVMSG = 16, + CGROUP_UDP6_RECVMSG = 17, + CGROUP_UNIX_RECVMSG = 18, + CGROUP_GETSOCKOPT = 19, + CGROUP_SETSOCKOPT = 20, + CGROUP_INET4_GETPEERNAME = 21, + CGROUP_INET6_GETPEERNAME = 22, + CGROUP_UNIX_GETPEERNAME = 23, + CGROUP_INET4_GETSOCKNAME = 24, + CGROUP_INET6_GETSOCKNAME = 25, + CGROUP_UNIX_GETSOCKNAME = 26, + CGROUP_INET_SOCK_RELEASE = 27, + CGROUP_LSM_START = 28, + CGROUP_LSM_END = 27, + MAX_CGROUP_BPF_ATTACH_TYPE = 28, +}; + +enum psi_task_count { + NR_IOWAIT = 0, + NR_MEMSTALL = 1, + NR_RUNNING = 2, + NR_MEMSTALL_RUNNING = 3, + NR_PSI_TASK_COUNTS = 4, +}; + +enum psi_res { + PSI_IO = 0, + PSI_MEM = 1, + PSI_CPU = 2, + NR_PSI_RESOURCES = 3, +}; + +enum psi_states { + PSI_IO_SOME = 0, + PSI_IO_FULL = 1, + PSI_MEM_SOME = 2, + PSI_MEM_FULL = 3, + PSI_CPU_SOME = 4, + PSI_CPU_FULL = 5, + PSI_NONIDLE = 6, + NR_PSI_STATES = 7, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL = 1, + NR_PSI_AGGREGATORS = 2, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + pids_cgrp_id = 9, + rdma_cgrp_id = 10, + misc_cgrp_id = 11, + debug_cgrp_id = 12, + CGROUP_SUBSYS_COUNT = 13, +}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +struct iomap_swapfile_info { + struct iomap iomap; + struct swap_info_struct *sis; + long: 32; + uint64_t lowest_ppage; + uint64_t highest_ppage; + long unsigned int nr_pages; + int nr_extents; + struct file *file; + long: 32; +}; + +struct fc_log; + +struct p_log { + const char *prefix; + struct fc_log *log; +}; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT = 0, + FS_CONTEXT_FOR_SUBMOUNT = 1, + FS_CONTEXT_FOR_RECONFIGURE = 2, +}; + +enum fs_context_phase { + FS_CONTEXT_CREATE_PARAMS = 0, + FS_CONTEXT_CREATING = 1, + FS_CONTEXT_AWAITING_MOUNT = 2, + FS_CONTEXT_AWAITING_RECONF = 3, + FS_CONTEXT_RECONF_PARAMS = 4, + FS_CONTEXT_RECONFIGURING = 5, + FS_CONTEXT_FAILED = 6, +}; + +struct fs_context_operations; + +struct fs_context { + const struct fs_context_operations *ops; + struct mutex uapi_mutex; + struct file_system_type *fs_type; + void *fs_private; + void *sget_key; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + struct p_log log; + const char *source; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + unsigned int s_iflags; + enum fs_context_purpose purpose: 8; + enum fs_context_phase phase: 8; + bool need_free: 1; + bool global: 1; + bool oldapi: 1; + bool exclusive: 1; +}; + +struct audit_names; + +struct filename { + const char *name; + const char *uptr; + atomic_t refcnt; + struct audit_names *aname; + const char iname[0]; +}; + +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 1, + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, + KERNFS_ROOT_SUPPORT_EXPORTOP = 4, + KERNFS_ROOT_SUPPORT_USER_XATTR = 8, +}; + +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *, struct kernfs_root *); + int (*mkdir)(struct kernfs_node *, const char *, umode_t); + int (*rmdir)(struct kernfs_node *); + int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); + int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); +}; + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + long unsigned int magic; + bool new_sb_created; +}; + +enum fs_value_type { + fs_value_is_undefined = 0, + fs_value_is_flag = 1, + fs_value_is_string = 2, + fs_value_is_blob = 3, + fs_value_is_filename = 4, + fs_value_is_file = 5, +}; + +struct fs_parameter { + const char *key; + enum fs_value_type type: 8; + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +struct fc_log { + refcount_t usage; + u8 head; + u8 tail; + u8 need_free; + struct module *owner; + char *buffer[8]; +}; + +struct fs_context_operations { + void (*free)(struct fs_context *); + int (*dup)(struct fs_context *, struct fs_context *); + int (*parse_param)(struct fs_context *, struct fs_parameter *); + int (*parse_monolithic)(struct fs_context *, void *); + int (*get_tree)(struct fs_context *); + int (*reconfigure)(struct fs_context *); +}; + +enum { + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS = 1, + IPSTATS_MIB_INOCTETS = 2, + IPSTATS_MIB_INDELIVERS = 3, + IPSTATS_MIB_OUTFORWDATAGRAMS = 4, + IPSTATS_MIB_OUTREQUESTS = 5, + IPSTATS_MIB_OUTOCTETS = 6, + IPSTATS_MIB_INHDRERRORS = 7, + IPSTATS_MIB_INTOOBIGERRORS = 8, + IPSTATS_MIB_INNOROUTES = 9, + IPSTATS_MIB_INADDRERRORS = 10, + IPSTATS_MIB_INUNKNOWNPROTOS = 11, + IPSTATS_MIB_INTRUNCATEDPKTS = 12, + IPSTATS_MIB_INDISCARDS = 13, + IPSTATS_MIB_OUTDISCARDS = 14, + IPSTATS_MIB_OUTNOROUTES = 15, + IPSTATS_MIB_REASMTIMEOUT = 16, + IPSTATS_MIB_REASMREQDS = 17, + IPSTATS_MIB_REASMOKS = 18, + IPSTATS_MIB_REASMFAILS = 19, + IPSTATS_MIB_FRAGOKS = 20, + IPSTATS_MIB_FRAGFAILS = 21, + IPSTATS_MIB_FRAGCREATES = 22, + IPSTATS_MIB_INMCASTPKTS = 23, + IPSTATS_MIB_OUTMCASTPKTS = 24, + IPSTATS_MIB_INBCASTPKTS = 25, + IPSTATS_MIB_OUTBCASTPKTS = 26, + IPSTATS_MIB_INMCASTOCTETS = 27, + IPSTATS_MIB_OUTMCASTOCTETS = 28, + IPSTATS_MIB_INBCASTOCTETS = 29, + IPSTATS_MIB_OUTBCASTOCTETS = 30, + IPSTATS_MIB_CSUMERRORS = 31, + IPSTATS_MIB_NOECTPKTS = 32, + IPSTATS_MIB_ECT1PKTS = 33, + IPSTATS_MIB_ECT0PKTS = 34, + IPSTATS_MIB_CEPKTS = 35, + IPSTATS_MIB_REASM_OVERLAPS = 36, + IPSTATS_MIB_OUTPKTS = 37, + __IPSTATS_MIB_MAX = 38, +}; + +enum { + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS = 1, + ICMP_MIB_INERRORS = 2, + ICMP_MIB_INDESTUNREACHS = 3, + ICMP_MIB_INTIMEEXCDS = 4, + ICMP_MIB_INPARMPROBS = 5, + ICMP_MIB_INSRCQUENCHS = 6, + ICMP_MIB_INREDIRECTS = 7, + ICMP_MIB_INECHOS = 8, + ICMP_MIB_INECHOREPS = 9, + ICMP_MIB_INTIMESTAMPS = 10, + ICMP_MIB_INTIMESTAMPREPS = 11, + ICMP_MIB_INADDRMASKS = 12, + ICMP_MIB_INADDRMASKREPS = 13, + ICMP_MIB_OUTMSGS = 14, + ICMP_MIB_OUTERRORS = 15, + ICMP_MIB_OUTDESTUNREACHS = 16, + ICMP_MIB_OUTTIMEEXCDS = 17, + ICMP_MIB_OUTPARMPROBS = 18, + ICMP_MIB_OUTSRCQUENCHS = 19, + ICMP_MIB_OUTREDIRECTS = 20, + ICMP_MIB_OUTECHOS = 21, + ICMP_MIB_OUTECHOREPS = 22, + ICMP_MIB_OUTTIMESTAMPS = 23, + ICMP_MIB_OUTTIMESTAMPREPS = 24, + ICMP_MIB_OUTADDRMASKS = 25, + ICMP_MIB_OUTADDRMASKREPS = 26, + ICMP_MIB_CSUMERRORS = 27, + ICMP_MIB_RATELIMITGLOBAL = 28, + ICMP_MIB_RATELIMITHOST = 29, + __ICMP_MIB_MAX = 30, +}; + +enum { + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS = 1, + ICMP6_MIB_INERRORS = 2, + ICMP6_MIB_OUTMSGS = 3, + ICMP6_MIB_OUTERRORS = 4, + ICMP6_MIB_CSUMERRORS = 5, + ICMP6_MIB_RATELIMITHOST = 6, + __ICMP6_MIB_MAX = 7, +}; + +enum { + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM = 1, + TCP_MIB_RTOMIN = 2, + TCP_MIB_RTOMAX = 3, + TCP_MIB_MAXCONN = 4, + TCP_MIB_ACTIVEOPENS = 5, + TCP_MIB_PASSIVEOPENS = 6, + TCP_MIB_ATTEMPTFAILS = 7, + TCP_MIB_ESTABRESETS = 8, + TCP_MIB_CURRESTAB = 9, + TCP_MIB_INSEGS = 10, + TCP_MIB_OUTSEGS = 11, + TCP_MIB_RETRANSSEGS = 12, + TCP_MIB_INERRS = 13, + TCP_MIB_OUTRSTS = 14, + TCP_MIB_CSUMERRORS = 15, + __TCP_MIB_MAX = 16, +}; + +enum { + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS = 1, + UDP_MIB_NOPORTS = 2, + UDP_MIB_INERRORS = 3, + UDP_MIB_OUTDATAGRAMS = 4, + UDP_MIB_RCVBUFERRORS = 5, + UDP_MIB_SNDBUFERRORS = 6, + UDP_MIB_CSUMERRORS = 7, + UDP_MIB_IGNOREDMULTI = 8, + UDP_MIB_MEMERRORS = 9, + __UDP_MIB_MAX = 10, +}; + +enum { + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT = 1, + LINUX_MIB_SYNCOOKIESRECV = 2, + LINUX_MIB_SYNCOOKIESFAILED = 3, + LINUX_MIB_EMBRYONICRSTS = 4, + LINUX_MIB_PRUNECALLED = 5, + LINUX_MIB_RCVPRUNED = 6, + LINUX_MIB_OFOPRUNED = 7, + LINUX_MIB_OUTOFWINDOWICMPS = 8, + LINUX_MIB_LOCKDROPPEDICMPS = 9, + LINUX_MIB_ARPFILTER = 10, + LINUX_MIB_TIMEWAITED = 11, + LINUX_MIB_TIMEWAITRECYCLED = 12, + LINUX_MIB_TIMEWAITKILLED = 13, + LINUX_MIB_PAWSACTIVEREJECTED = 14, + LINUX_MIB_PAWSESTABREJECTED = 15, + LINUX_MIB_DELAYEDACKS = 16, + LINUX_MIB_DELAYEDACKLOCKED = 17, + LINUX_MIB_DELAYEDACKLOST = 18, + LINUX_MIB_LISTENOVERFLOWS = 19, + LINUX_MIB_LISTENDROPS = 20, + LINUX_MIB_TCPHPHITS = 21, + LINUX_MIB_TCPPUREACKS = 22, + LINUX_MIB_TCPHPACKS = 23, + LINUX_MIB_TCPRENORECOVERY = 24, + LINUX_MIB_TCPSACKRECOVERY = 25, + LINUX_MIB_TCPSACKRENEGING = 26, + LINUX_MIB_TCPSACKREORDER = 27, + LINUX_MIB_TCPRENOREORDER = 28, + LINUX_MIB_TCPTSREORDER = 29, + LINUX_MIB_TCPFULLUNDO = 30, + LINUX_MIB_TCPPARTIALUNDO = 31, + LINUX_MIB_TCPDSACKUNDO = 32, + LINUX_MIB_TCPLOSSUNDO = 33, + LINUX_MIB_TCPLOSTRETRANSMIT = 34, + LINUX_MIB_TCPRENOFAILURES = 35, + LINUX_MIB_TCPSACKFAILURES = 36, + LINUX_MIB_TCPLOSSFAILURES = 37, + LINUX_MIB_TCPFASTRETRANS = 38, + LINUX_MIB_TCPSLOWSTARTRETRANS = 39, + LINUX_MIB_TCPTIMEOUTS = 40, + LINUX_MIB_TCPLOSSPROBES = 41, + LINUX_MIB_TCPLOSSPROBERECOVERY = 42, + LINUX_MIB_TCPRENORECOVERYFAIL = 43, + LINUX_MIB_TCPSACKRECOVERYFAIL = 44, + LINUX_MIB_TCPRCVCOLLAPSED = 45, + LINUX_MIB_TCPDSACKOLDSENT = 46, + LINUX_MIB_TCPDSACKOFOSENT = 47, + LINUX_MIB_TCPDSACKRECV = 48, + LINUX_MIB_TCPDSACKOFORECV = 49, + LINUX_MIB_TCPABORTONDATA = 50, + LINUX_MIB_TCPABORTONCLOSE = 51, + LINUX_MIB_TCPABORTONMEMORY = 52, + LINUX_MIB_TCPABORTONTIMEOUT = 53, + LINUX_MIB_TCPABORTONLINGER = 54, + LINUX_MIB_TCPABORTFAILED = 55, + LINUX_MIB_TCPMEMORYPRESSURES = 56, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, + LINUX_MIB_TCPSACKDISCARD = 58, + LINUX_MIB_TCPDSACKIGNOREDOLD = 59, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, + LINUX_MIB_TCPSPURIOUSRTOS = 61, + LINUX_MIB_TCPMD5NOTFOUND = 62, + LINUX_MIB_TCPMD5UNEXPECTED = 63, + LINUX_MIB_TCPMD5FAILURE = 64, + LINUX_MIB_SACKSHIFTED = 65, + LINUX_MIB_SACKMERGED = 66, + LINUX_MIB_SACKSHIFTFALLBACK = 67, + LINUX_MIB_TCPBACKLOGDROP = 68, + LINUX_MIB_PFMEMALLOCDROP = 69, + LINUX_MIB_TCPMINTTLDROP = 70, + LINUX_MIB_TCPDEFERACCEPTDROP = 71, + LINUX_MIB_IPRPFILTER = 72, + LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, + LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, + LINUX_MIB_TCPREQQFULLDROP = 75, + LINUX_MIB_TCPRETRANSFAIL = 76, + LINUX_MIB_TCPRCVCOALESCE = 77, + LINUX_MIB_TCPBACKLOGCOALESCE = 78, + LINUX_MIB_TCPOFOQUEUE = 79, + LINUX_MIB_TCPOFODROP = 80, + LINUX_MIB_TCPOFOMERGE = 81, + LINUX_MIB_TCPCHALLENGEACK = 82, + LINUX_MIB_TCPSYNCHALLENGE = 83, + LINUX_MIB_TCPFASTOPENACTIVE = 84, + LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, + LINUX_MIB_TCPFASTOPENPASSIVE = 86, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, + LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, + LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, + LINUX_MIB_BUSYPOLLRXPACKETS = 92, + LINUX_MIB_TCPAUTOCORKING = 93, + LINUX_MIB_TCPFROMZEROWINDOWADV = 94, + LINUX_MIB_TCPTOZEROWINDOWADV = 95, + LINUX_MIB_TCPWANTZEROWINDOWADV = 96, + LINUX_MIB_TCPSYNRETRANS = 97, + LINUX_MIB_TCPORIGDATASENT = 98, + LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, + LINUX_MIB_TCPHYSTARTTRAINCWND = 100, + LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, + LINUX_MIB_TCPHYSTARTDELAYCWND = 102, + LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, + LINUX_MIB_TCPACKSKIPPEDPAWS = 104, + LINUX_MIB_TCPACKSKIPPEDSEQ = 105, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, + LINUX_MIB_TCPWINPROBE = 109, + LINUX_MIB_TCPKEEPALIVE = 110, + LINUX_MIB_TCPMTUPFAIL = 111, + LINUX_MIB_TCPMTUPSUCCESS = 112, + LINUX_MIB_TCPDELIVERED = 113, + LINUX_MIB_TCPDELIVEREDCE = 114, + LINUX_MIB_TCPACKCOMPRESSED = 115, + LINUX_MIB_TCPZEROWINDOWDROP = 116, + LINUX_MIB_TCPRCVQDROP = 117, + LINUX_MIB_TCPWQUEUETOOBIG = 118, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, + LINUX_MIB_TCPTIMEOUTREHASH = 120, + LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, + LINUX_MIB_TCPDSACKRECVSEGS = 122, + LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, + LINUX_MIB_TCPMIGRATEREQSUCCESS = 124, + LINUX_MIB_TCPMIGRATEREQFAILURE = 125, + LINUX_MIB_TCPPLBREHASH = 126, + LINUX_MIB_TCPAOREQUIRED = 127, + LINUX_MIB_TCPAOBAD = 128, + LINUX_MIB_TCPAOKEYNOTFOUND = 129, + LINUX_MIB_TCPAOGOOD = 130, + LINUX_MIB_TCPAODROPPEDICMPS = 131, + __LINUX_MIB_MAX = 132, +}; + +enum { + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR = 1, + LINUX_MIB_XFRMINBUFFERERROR = 2, + LINUX_MIB_XFRMINHDRERROR = 3, + LINUX_MIB_XFRMINNOSTATES = 4, + LINUX_MIB_XFRMINSTATEPROTOERROR = 5, + LINUX_MIB_XFRMINSTATEMODEERROR = 6, + LINUX_MIB_XFRMINSTATESEQERROR = 7, + LINUX_MIB_XFRMINSTATEEXPIRED = 8, + LINUX_MIB_XFRMINSTATEMISMATCH = 9, + LINUX_MIB_XFRMINSTATEINVALID = 10, + LINUX_MIB_XFRMINTMPLMISMATCH = 11, + LINUX_MIB_XFRMINNOPOLS = 12, + LINUX_MIB_XFRMINPOLBLOCK = 13, + LINUX_MIB_XFRMINPOLERROR = 14, + LINUX_MIB_XFRMOUTERROR = 15, + LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, + LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, + LINUX_MIB_XFRMOUTNOSTATES = 18, + LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, + LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, + LINUX_MIB_XFRMOUTSTATESEQERROR = 21, + LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, + LINUX_MIB_XFRMOUTPOLBLOCK = 23, + LINUX_MIB_XFRMOUTPOLDEAD = 24, + LINUX_MIB_XFRMOUTPOLERROR = 25, + LINUX_MIB_XFRMFWDHDRERROR = 26, + LINUX_MIB_XFRMOUTSTATEINVALID = 27, + LINUX_MIB_XFRMACQUIREERROR = 28, + LINUX_MIB_XFRMOUTSTATEDIRERROR = 29, + LINUX_MIB_XFRMINSTATEDIRERROR = 30, + __LINUX_MIB_XFRMMAX = 31, +}; + +enum { + LINUX_MIB_TLSNUM = 0, + LINUX_MIB_TLSCURRTXSW = 1, + LINUX_MIB_TLSCURRRXSW = 2, + LINUX_MIB_TLSCURRTXDEVICE = 3, + LINUX_MIB_TLSCURRRXDEVICE = 4, + LINUX_MIB_TLSTXSW = 5, + LINUX_MIB_TLSRXSW = 6, + LINUX_MIB_TLSTXDEVICE = 7, + LINUX_MIB_TLSRXDEVICE = 8, + LINUX_MIB_TLSDECRYPTERROR = 9, + LINUX_MIB_TLSRXDEVICERESYNC = 10, + LINUX_MIB_TLSDECRYPTRETRY = 11, + LINUX_MIB_TLSRXNOPADVIOL = 12, + __LINUX_MIB_TLSMAX = 13, +}; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5, + NF_INET_INGRESS = 5, +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_NUMPROTO = 11, +}; + +enum tcp_conntrack { + TCP_CONNTRACK_NONE = 0, + TCP_CONNTRACK_SYN_SENT = 1, + TCP_CONNTRACK_SYN_RECV = 2, + TCP_CONNTRACK_ESTABLISHED = 3, + TCP_CONNTRACK_FIN_WAIT = 4, + TCP_CONNTRACK_CLOSE_WAIT = 5, + TCP_CONNTRACK_LAST_ACK = 6, + TCP_CONNTRACK_TIME_WAIT = 7, + TCP_CONNTRACK_CLOSE = 8, + TCP_CONNTRACK_LISTEN = 9, + TCP_CONNTRACK_MAX = 10, + TCP_CONNTRACK_IGNORE = 11, + TCP_CONNTRACK_RETRANS = 12, + TCP_CONNTRACK_UNACK = 13, + TCP_CONNTRACK_TIMEOUT_MAX = 14, +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED = 0, + UDP_CT_REPLIED = 1, + UDP_CT_MAX = 2, +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3, +}; + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP = 1, + MAX_NETNS_BPF_ATTACH_TYPE = 2, +}; + +enum { + TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = 1, + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = 2, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = 4, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = 8, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = 16, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = 32, + __TCA_FLOWER_KEY_FLAGS_MAX = 33, +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, + FLOW_DISSECTOR_KEY_PPPOE = 29, + FLOW_DISSECTOR_KEY_L2TPV3 = 30, + FLOW_DISSECTOR_KEY_CFM = 31, + FLOW_DISSECTOR_KEY_IPSEC = 32, + FLOW_DISSECTOR_KEY_MAX = 33, +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_NUM = 1, +}; + +struct buffer_head; + +typedef void bh_end_io_t(struct buffer_head *, int); + +struct buffer_head { + long unsigned int b_state; + struct buffer_head *b_this_page; + union { + struct page *b_page; + struct folio *b_folio; + }; + long: 32; + sector_t b_blocknr; + size_t b_size; + char *b_data; + struct block_device *b_bdev; + bh_end_io_t *b_end_io; + void *b_private; + struct list_head b_assoc_buffers; + struct address_space *b_assoc_map; + atomic_t b_count; + spinlock_t b_uptodate_lock; +}; + +struct fiemap_extent; + +struct fiemap_extent_info { + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent *fi_extents_start; +}; + +struct xattr; + +typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); + +struct xattr { + const char *name; + void *value; + size_t value_len; +}; + +typedef unsigned int tid_t; + +struct transaction_chp_stats_s { + long unsigned int cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +struct journal_s; + +typedef struct journal_s journal_t; + +struct journal_head; + +struct transaction_s; + +typedef struct transaction_s transaction_t; + +struct transaction_s { + journal_t *t_journal; + tid_t t_tid; + enum { + T_RUNNING = 0, + T_LOCKED = 1, + T_SWITCH = 2, + T_FLUSH = 3, + T_COMMIT = 4, + T_COMMIT_DFLUSH = 5, + T_COMMIT_JFLUSH = 6, + T_COMMIT_CALLBACK = 7, + T_FINISHED = 8, + } t_state; + long unsigned int t_log_start; + int t_nr_buffers; + struct journal_head *t_reserved_list; + struct journal_head *t_buffers; + struct journal_head *t_forget; + struct journal_head *t_checkpoint_list; + struct journal_head *t_shadow_list; + struct list_head t_inode_list; + long unsigned int t_max_wait; + long unsigned int t_start; + long unsigned int t_requested; + struct transaction_chp_stats_s t_chp_stats; + atomic_t t_updates; + atomic_t t_outstanding_credits; + atomic_t t_outstanding_revokes; + atomic_t t_handle_count; + transaction_t *t_cpnext; + transaction_t *t_cpprev; + long unsigned int t_expires; + ktime_t t_start_time; + unsigned int t_synchronous_commit: 1; + int t_need_data_flush; + struct list_head t_private_list; +}; + +struct jbd2_buffer_trigger_type; + +struct journal_head { + struct buffer_head *b_bh; + spinlock_t b_state_lock; + int b_jcount; + unsigned int b_jlist; + unsigned int b_modified; + char *b_frozen_data; + char *b_committed_data; + transaction_t *b_transaction; + transaction_t *b_next_transaction; + struct journal_head *b_tnext; + struct journal_head *b_tprev; + transaction_t *b_cp_transaction; + struct journal_head *b_cpnext; + struct journal_head *b_cpprev; + struct jbd2_buffer_trigger_type *b_triggers; + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +struct jbd2_buffer_trigger_type { + void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); + void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); +}; + +struct crypto_alg; + +struct crypto_tfm { + refcount_t refcnt; + u32 crt_flags; + int node; + void (*exit)(struct crypto_tfm *); + struct crypto_alg *__crt_alg; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__crt_ctx[0]; +}; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); + void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); + void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); + int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); +}; + +struct crypto_type; + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + int cra_priority; + refcount_t cra_refcnt; + char cra_name[128]; + char cra_driver_name[128]; + const struct crypto_type *cra_type; + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + int (*cra_init)(struct crypto_tfm *); + void (*cra_exit)(struct crypto_tfm *); + void (*cra_destroy)(struct crypto_alg *); + struct module *cra_module; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct crypto_instance; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); + unsigned int (*extsize)(struct crypto_alg *); + int (*init_tfm)(struct crypto_tfm *); + void (*show)(struct seq_file *, struct crypto_alg *); + int (*report)(struct sk_buff *, struct crypto_alg *); + void (*free)(struct crypto_instance *); + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_shash { + unsigned int descsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct jbd2_journal_handle; + +typedef struct jbd2_journal_handle handle_t; + +struct jbd2_journal_handle { + union { + transaction_t *h_transaction; + journal_t *h_journal; + }; + handle_t *h_rsv_handle; + int h_total_credits; + int h_revoke_credits; + int h_revoke_credits_requested; + int h_ref; + int h_err; + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; + long unsigned int h_start_jiffies; + unsigned int h_requested_credits; + unsigned int saved_alloc_context; +}; + +struct transaction_run_stats_s { + long unsigned int rs_wait; + long unsigned int rs_request_delay; + long unsigned int rs_running; + long unsigned int rs_locked; + long unsigned int rs_flushing; + long unsigned int rs_logging; + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + long unsigned int ts_tid; + long unsigned int ts_requested; + struct transaction_run_stats_s run; +}; + +enum passtype { + PASS_SCAN = 0, + PASS_REVOKE = 1, + PASS_REPLAY = 2, +}; + +struct journal_superblock_s; + +typedef struct journal_superblock_s journal_superblock_t; + +struct jbd2_revoke_table_s; + +struct jbd2_inode; + +struct journal_s { + long unsigned int j_flags; + int j_errno; + struct mutex j_abort_mutex; + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + rwlock_t j_state_lock; + int j_barrier_count; + struct mutex j_barrier; + transaction_t *j_running_transaction; + transaction_t *j_committing_transaction; + transaction_t *j_checkpoint_transactions; + wait_queue_head_t j_wait_transaction_locked; + wait_queue_head_t j_wait_done_commit; + wait_queue_head_t j_wait_commit; + wait_queue_head_t j_wait_updates; + wait_queue_head_t j_wait_reserved; + wait_queue_head_t j_fc_wait; + struct mutex j_checkpoint_mutex; + struct buffer_head *j_chkpt_bhs[64]; + struct shrinker *j_shrinker; + long: 32; + struct percpu_counter j_checkpoint_jh_count; + transaction_t *j_shrink_transaction; + long unsigned int j_head; + long unsigned int j_tail; + long unsigned int j_free; + long unsigned int j_first; + long unsigned int j_last; + long unsigned int j_fc_first; + long unsigned int j_fc_off; + long unsigned int j_fc_last; + struct block_device *j_dev; + int j_blocksize; + long: 32; + long long unsigned int j_blk_offset; + char j_devname[56]; + struct block_device *j_fs_dev; + errseq_t j_fs_dev_wb_err; + unsigned int j_total_len; + atomic_t j_reserved_credits; + spinlock_t j_list_lock; + struct inode *j_inode; + tid_t j_tail_sequence; + tid_t j_transaction_sequence; + tid_t j_commit_sequence; + tid_t j_commit_request; + __u8 j_uuid[16]; + struct task_struct *j_task; + int j_max_transaction_buffers; + int j_revoke_records_per_block; + int j_transaction_overhead_buffers; + long unsigned int j_commit_interval; + struct timer_list j_commit_timer; + spinlock_t j_revoke_lock; + struct jbd2_revoke_table_s *j_revoke; + struct jbd2_revoke_table_s *j_revoke_table[2]; + struct buffer_head **j_wbuf; + struct buffer_head **j_fc_wbuf; + int j_wbufsize; + int j_fc_wbufsize; + pid_t j_last_sync_writer; + long: 32; + u64 j_average_commit_time; + u32 j_min_batch_time; + u32 j_max_batch_time; + void (*j_commit_callback)(journal_t *, transaction_t *); + int (*j_submit_inode_data_buffers)(struct jbd2_inode *); + int (*j_finish_inode_data_buffers)(struct jbd2_inode *); + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + unsigned int j_failed_commit; + void *j_private; + struct crypto_shash *j_chksum_driver; + __u32 j_csum_seed; + void (*j_fc_cleanup_callback)(struct journal_s *, int, tid_t); + int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); + int (*j_bmap)(struct journal_s *, sector_t *); + long: 32; +}; + +struct journal_header_s { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +}; + +typedef struct journal_header_s journal_header_t; + +struct journal_superblock_s { + journal_header_t s_header; + __be32 s_blocksize; + __be32 s_maxlen; + __be32 s_first; + __be32 s_sequence; + __be32 s_start; + __be32 s_errno; + __be32 s_feature_compat; + __be32 s_feature_incompat; + __be32 s_feature_ro_compat; + __u8 s_uuid[16]; + __be32 s_nr_users; + __be32 s_dynsuper; + __be32 s_max_transaction; + __be32 s_max_trans_data; + __u8 s_checksum_type; + __u8 s_padding2[3]; + __be32 s_num_fc_blks; + __be32 s_head; + __u32 s_padding[40]; + __be32 s_checksum; + __u8 s_users[768]; +}; + +struct jbd2_inode { + transaction_t *i_transaction; + transaction_t *i_next_transaction; + struct list_head i_list; + struct inode *i_vfs_inode; + long unsigned int i_flags; + loff_t i_dirty_start; + loff_t i_dirty_end; +}; + +struct fiemap_extent { + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + __u64 fe_reserved64[2]; + __u32 fe_flags; + __u32 fe_reserved[3]; +}; + +typedef __u16 __le16; + +typedef u16 wchar_t; + +typedef u32 unicode_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char)(wchar_t, unsigned char *, int); + int (*char2uni)(const unsigned char *, int, wchar_t *); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +enum utf16_endian { + UTF16_HOST_ENDIAN = 0, + UTF16_LITTLE_ENDIAN = 1, + UTF16_BIG_ENDIAN = 2, +}; + +struct utf8_table { + int cmask; + int cval; + int shift; + long int lmask; + long int lval; +}; + +typedef __u32 __le32; + +typedef __u64 __le64; + +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +struct btrfs_qgroup_limit { + __u64 flags; + __u64 max_rfer; + __u64 max_excl; + __u64 rsv_rfer; + __u64 rsv_excl; +}; + +struct btrfs_qgroup_inherit { + __u64 flags; + __u64 num_qgroups; + __u64 num_ref_copies; + __u64 num_excl_copies; + struct btrfs_qgroup_limit lim; + __u64 qgroups[0]; +}; + +struct btrfs_scrub_progress { + __u64 data_extents_scrubbed; + __u64 tree_extents_scrubbed; + __u64 data_bytes_scrubbed; + __u64 tree_bytes_scrubbed; + __u64 read_errors; + __u64 csum_errors; + __u64 verify_errors; + __u64 no_csum; + __u64 csum_discards; + __u64 super_errors; + __u64 malloc_errors; + __u64 uncorrectable_errors; + __u64 corrected_errors; + __u64 last_physical; + __u64 unverified_errors; +}; + +struct btrfs_balance_args { + __u64 profiles; + union { + __u64 usage; + struct { + __u32 usage_min; + __u32 usage_max; + }; + }; + __u64 devid; + __u64 pstart; + __u64 pend; + __u64 vstart; + __u64 vend; + __u64 target; + __u64 flags; + union { + __u64 limit; + struct { + __u32 limit_min; + __u32 limit_max; + }; + }; + __u32 stripes_min; + __u32 stripes_max; + __u64 unused[6]; +}; + +struct btrfs_balance_progress { + __u64 expected; + __u64 considered; + __u64 completed; +}; + +struct btrfs_disk_key { + __le64 objectid; + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct btrfs_key { + __u64 objectid; + __u8 type; + __u64 offset; +} __attribute__((packed)); + +struct btrfs_header { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __u8 chunk_tree_uuid[16]; + __le64 generation; + __le64 owner; + __le32 nritems; + __u8 level; +} __attribute__((packed)); + +struct btrfs_root_backup { + __le64 tree_root; + __le64 tree_root_gen; + __le64 chunk_root; + __le64 chunk_root_gen; + __le64 extent_root; + __le64 extent_root_gen; + __le64 fs_root; + __le64 fs_root_gen; + __le64 dev_root; + __le64 dev_root_gen; + __le64 csum_root; + __le64 csum_root_gen; + __le64 total_bytes; + __le64 bytes_used; + __le64 num_devices; + __le64 unused_64[4]; + __u8 tree_root_level; + __u8 chunk_root_level; + __u8 extent_root_level; + __u8 fs_root_level; + __u8 dev_root_level; + __u8 csum_root_level; + __u8 unused_8[10]; +}; + +struct btrfs_item { + struct btrfs_disk_key key; + __le32 offset; + __le32 size; +} __attribute__((packed)); + +struct btrfs_key_ptr { + struct btrfs_disk_key key; + __le64 blockptr; + __le64 generation; +} __attribute__((packed)); + +struct btrfs_dev_item { + __le64 devid; + __le64 total_bytes; + __le64 bytes_used; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le64 type; + __le64 generation; + __le64 start_offset; + __le32 dev_group; + __u8 seek_speed; + __u8 bandwidth; + __u8 uuid[16]; + __u8 fsid[16]; +} __attribute__((packed)); + +struct btrfs_super_block { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __le64 magic; + __le64 generation; + __le64 root; + __le64 chunk_root; + __le64 log_root; + __le64 __unused_log_root_transid; + __le64 total_bytes; + __le64 bytes_used; + __le64 root_dir_objectid; + __le64 num_devices; + __le32 sectorsize; + __le32 nodesize; + __le32 __unused_leafsize; + __le32 stripesize; + __le32 sys_chunk_array_size; + __le64 chunk_root_generation; + __le64 compat_flags; + __le64 compat_ro_flags; + __le64 incompat_flags; + __le16 csum_type; + __u8 root_level; + __u8 chunk_root_level; + __u8 log_root_level; + struct btrfs_dev_item dev_item; + char label[256]; + __le64 cache_generation; + __le64 uuid_tree_generation; + __u8 metadata_uuid[16]; + __u64 nr_global_roots; + __le64 reserved[27]; + __u8 sys_chunk_array[2048]; + struct btrfs_root_backup super_roots[4]; + __u8 padding[565]; +} __attribute__((packed)); + +struct btrfs_extent_item { + __le64 refs; + __le64 generation; + __le64 flags; +}; + +struct btrfs_tree_block_info { + struct btrfs_disk_key key; + __u8 level; +}; + +struct btrfs_extent_data_ref { + __le64 root; + __le64 objectid; + __le64 offset; + __le32 count; +}; + +struct btrfs_shared_data_ref { + __le32 count; +}; + +struct btrfs_extent_owner_ref { + __le64 root_id; +}; + +struct btrfs_extent_inline_ref { + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct btrfs_timespec { + __le64 sec; + __le32 nsec; +}; + +struct btrfs_inode_item { + __le64 generation; + __le64 transid; + __le64 size; + __le64 nbytes; + __le64 block_group; + __le32 nlink; + __le32 uid; + __le32 gid; + __le32 mode; + __le64 rdev; + __le64 flags; + __le64 sequence; + __le64 reserved[4]; + struct btrfs_timespec atime; + struct btrfs_timespec ctime; + struct btrfs_timespec mtime; + struct btrfs_timespec otime; +}; + +struct btrfs_root_item { + struct btrfs_inode_item inode; + __le64 generation; + __le64 root_dirid; + __le64 bytenr; + __le64 byte_limit; + __le64 bytes_used; + __le64 last_snapshot; + __le64 flags; + __le32 refs; + struct btrfs_disk_key drop_progress; + __u8 drop_level; + __u8 level; + __le64 generation_v2; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __le64 ctransid; + __le64 otransid; + __le64 stransid; + __le64 rtransid; + struct btrfs_timespec ctime; + struct btrfs_timespec otime; + struct btrfs_timespec stime; + struct btrfs_timespec rtime; + __le64 reserved[8]; +} __attribute__((packed)); + +enum { + BTRFS_FILE_EXTENT_INLINE = 0, + BTRFS_FILE_EXTENT_REG = 1, + BTRFS_FILE_EXTENT_PREALLOC = 2, + BTRFS_NR_FILE_EXTENT_TYPES = 3, +}; + +struct btrfs_file_extent_item { + __le64 generation; + __le64 ram_bytes; + __u8 compression; + __u8 encryption; + __le16 other_encoding; + __u8 type; + __le64 disk_bytenr; + __le64 disk_num_bytes; + __le64 offset; + __le64 num_bytes; +} __attribute__((packed)); + +struct btrfs_tree_parent_check { + u64 owner_root; + u64 transid; + struct btrfs_key first_key; + bool has_first_key; + u8 level; + long: 32; +}; + +struct extent_map_tree { + struct rb_root root; + struct list_head modified_extents; + rwlock_t lock; +}; + +struct btrfs_fs_info; + +struct btrfs_inode; + +struct extent_io_tree { + struct rb_root state; + union { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + }; + u8 owner; + spinlock_t lock; +}; + +enum btrfs_rsv_type { + BTRFS_BLOCK_RSV_GLOBAL = 0, + BTRFS_BLOCK_RSV_DELALLOC = 1, + BTRFS_BLOCK_RSV_TRANS = 2, + BTRFS_BLOCK_RSV_CHUNK = 3, + BTRFS_BLOCK_RSV_DELOPS = 4, + BTRFS_BLOCK_RSV_DELREFS = 5, + BTRFS_BLOCK_RSV_EMPTY = 6, + BTRFS_BLOCK_RSV_TEMP = 7, +}; + +struct btrfs_space_info; + +struct btrfs_block_rsv { + u64 size; + u64 reserved; + struct btrfs_space_info *space_info; + spinlock_t lock; + bool full; + bool failfast; + enum btrfs_rsv_type type: 8; + long: 32; + u64 qgroup_rsv_size; + u64 qgroup_rsv_reserved; +}; + +struct btrfs_root; + +struct btrfs_delayed_node; + +struct btrfs_inode { + struct btrfs_root *root; + long: 32; + u64 objectid; + u8 prop_compress; + u8 defrag_compress; + spinlock_t lock; + struct extent_map_tree extent_tree; + struct extent_io_tree io_tree; + struct extent_io_tree *file_extent_tree; + struct mutex log_mutex; + unsigned int outstanding_extents; + spinlock_t ordered_tree_lock; + struct rb_root ordered_tree; + struct rb_node *ordered_tree_last; + struct list_head delalloc_inodes; + long unsigned int runtime_flags; + long: 32; + u64 generation; + u64 last_trans; + u64 logged_trans; + int last_sub_trans; + int last_log_commit; + union { + u64 delalloc_bytes; + u64 first_dir_index_to_log; + }; + union { + u64 new_delalloc_bytes; + u64 last_dir_index_offset; + }; + union { + u64 defrag_bytes; + u64 reloc_block_group_start; + }; + u64 disk_i_size; + union { + u64 index_cnt; + u64 csum_bytes; + }; + u64 dir_index; + u64 last_unlink_trans; + union { + u64 last_reflink_trans; + u64 ref_root_id; + }; + u32 flags; + u32 ro_flags; + struct btrfs_block_rsv block_rsv; + struct btrfs_delayed_node *delayed_node; + long: 32; + u64 i_otime_sec; + u32 i_otime_nsec; + struct list_head delayed_iput; + struct rw_semaphore i_mmap_lock; + long: 32; + struct inode vfs_inode; +}; + +struct btrfs_block_group; + +struct btrfs_free_cluster { + spinlock_t lock; + spinlock_t refill_lock; + struct rb_root root; + long: 32; + u64 max_size; + u64 window_start; + bool fragmented; + struct btrfs_block_group *block_group; + struct list_head block_group_list; +}; + +struct btrfs_discard_ctl { + struct workqueue_struct *discard_workers; + struct delayed_work work; + spinlock_t lock; + struct btrfs_block_group *block_group; + struct list_head discard_list[3]; + u64 prev_discard; + u64 prev_discard_time; + atomic_t discardable_extents; + long: 32; + atomic64_t discardable_bytes; + u64 max_discard_size; + u64 delay_ms; + u32 iops_limit; + u32 kbps_limit; + u64 discard_extent_bytes; + u64 discard_bitmap_bytes; + atomic64_t discard_bytes_saved; +}; + +struct btrfs_work; + +typedef void (*btrfs_func_t)(struct btrfs_work *); + +typedef void (*btrfs_ordered_func_t)(struct btrfs_work *, bool); + +struct btrfs_workqueue; + +struct btrfs_work { + btrfs_func_t func; + btrfs_ordered_func_t ordered_func; + struct work_struct normal_work; + struct list_head ordered_list; + struct btrfs_workqueue *wq; + long unsigned int flags; +}; + +struct btrfs_device; + +struct btrfs_dev_replace { + u64 replace_state; + time64_t time_started; + time64_t time_stopped; + atomic64_t num_write_errors; + atomic64_t num_uncorrectable_read_errors; + u64 cursor_left; + u64 committed_cursor_left; + u64 cursor_left_last_write_of_item; + u64 cursor_right; + u64 cont_reading_from_srcdev_mode; + int is_valid; + int item_needs_writeback; + struct btrfs_device *srcdev; + struct btrfs_device *tgtdev; + struct mutex lock_finishing_cancel_unmount; + struct rw_semaphore rwsem; + long: 32; + struct btrfs_scrub_progress scrub_progress; + struct percpu_counter bio_counter; + wait_queue_head_t replace_wait; + struct task_struct *replace_task; +}; + +enum btrfs_exclusive_operation { + BTRFS_EXCLOP_NONE = 0, + BTRFS_EXCLOP_BALANCE_PAUSED = 1, + BTRFS_EXCLOP_BALANCE = 2, + BTRFS_EXCLOP_DEV_ADD = 3, + BTRFS_EXCLOP_DEV_REMOVE = 4, + BTRFS_EXCLOP_DEV_REPLACE = 5, + BTRFS_EXCLOP_RESIZE = 6, + BTRFS_EXCLOP_SWAP_ACTIVATE = 7, +}; + +struct btrfs_commit_stats { + u64 commit_count; + u64 max_commit_dur; + u64 last_commit_dur; + u64 total_commit_dur; +}; + +struct btrfs_transaction; + +struct btrfs_stripe_hash_table; + +struct btrfs_fs_devices; + +struct reloc_control; + +struct btrfs_balance_control; + +struct ulist; + +struct btrfs_delayed_root; + +struct btrfs_fs_info { + u8 chunk_tree_uuid[16]; + long unsigned int flags; + struct btrfs_root *tree_root; + struct btrfs_root *chunk_root; + struct btrfs_root *dev_root; + struct btrfs_root *fs_root; + struct btrfs_root *quota_root; + struct btrfs_root *uuid_root; + struct btrfs_root *data_reloc_root; + struct btrfs_root *block_group_root; + struct btrfs_root *stripe_root; + struct btrfs_root *log_root_tree; + rwlock_t global_root_lock; + struct rb_root global_root_tree; + spinlock_t fs_roots_radix_lock; + struct xarray fs_roots_radix; + rwlock_t block_group_cache_lock; + struct rb_root_cached block_group_cache_tree; + atomic64_t free_chunk_space; + struct extent_io_tree excluded_extents; + struct rb_root_cached mapping_tree; + rwlock_t mapping_tree_lock; + long: 32; + struct btrfs_block_rsv global_block_rsv; + struct btrfs_block_rsv trans_block_rsv; + struct btrfs_block_rsv chunk_block_rsv; + struct btrfs_block_rsv delayed_block_rsv; + struct btrfs_block_rsv delayed_refs_rsv; + struct btrfs_block_rsv empty_block_rsv; + u64 generation; + u64 last_trans_committed; + u64 last_reloc_trans; + u64 last_trans_log_full_commit; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + u32 commit_interval; + long: 32; + u64 max_inline; + struct btrfs_transaction *running_transaction; + wait_queue_head_t transaction_throttle; + wait_queue_head_t transaction_wait; + wait_queue_head_t transaction_blocked_wait; + wait_queue_head_t async_submit_wait; + spinlock_t super_lock; + struct btrfs_super_block *super_copy; + struct btrfs_super_block *super_for_commit; + struct super_block *sb; + struct inode *btree_inode; + struct mutex tree_log_mutex; + struct mutex transaction_kthread_mutex; + struct mutex cleaner_mutex; + struct mutex chunk_mutex; + struct mutex ro_block_group_mutex; + struct btrfs_stripe_hash_table *stripe_hash_table; + struct mutex ordered_operations_mutex; + struct rw_semaphore commit_root_sem; + struct rw_semaphore cleanup_work_sem; + struct rw_semaphore subvol_sem; + spinlock_t trans_lock; + struct mutex reloc_mutex; + struct list_head trans_list; + struct list_head dead_roots; + struct list_head caching_block_groups; + spinlock_t delayed_iput_lock; + struct list_head delayed_iputs; + atomic_t nr_delayed_iputs; + wait_queue_head_t delayed_iputs_wait; + atomic64_t tree_mod_seq; + rwlock_t tree_mod_log_lock; + struct rb_root tree_mod_log; + struct list_head tree_mod_seq_list; + atomic_t async_delalloc_pages; + spinlock_t ordered_root_lock; + struct list_head ordered_roots; + struct mutex delalloc_root_mutex; + spinlock_t delalloc_root_lock; + struct list_head delalloc_roots; + struct btrfs_workqueue *workers; + struct btrfs_workqueue *delalloc_workers; + struct btrfs_workqueue *flush_workers; + struct workqueue_struct *endio_workers; + struct workqueue_struct *endio_meta_workers; + struct workqueue_struct *rmw_workers; + struct workqueue_struct *compressed_write_workers; + struct btrfs_workqueue *endio_write_workers; + struct btrfs_workqueue *endio_freespace_worker; + struct btrfs_workqueue *caching_workers; + struct btrfs_workqueue *fixup_workers; + struct btrfs_workqueue *delayed_workers; + struct task_struct *transaction_kthread; + struct task_struct *cleaner_kthread; + u32 thread_pool_size; + struct kobject *space_info_kobj; + struct kobject *qgroups_kobj; + struct kobject *discard_kobj; + struct percpu_counter dirty_metadata_bytes; + struct percpu_counter delalloc_bytes; + struct percpu_counter ordered_bytes; + s32 dirty_metadata_batch; + s32 delalloc_batch; + struct percpu_counter evictable_extent_maps; + u64 em_shrinker_last_root; + u64 em_shrinker_last_ino; + atomic64_t em_shrinker_nr_to_scan; + struct work_struct em_shrinker_work; + struct list_head dirty_cowonly_roots; + struct btrfs_fs_devices *fs_devices; + struct list_head space_info; + struct btrfs_space_info *data_sinfo; + struct reloc_control *reloc_ctl; + long: 32; + struct btrfs_free_cluster data_alloc_cluster; + struct btrfs_free_cluster meta_alloc_cluster; + spinlock_t defrag_inodes_lock; + struct rb_root defrag_inodes; + atomic_t defrag_running; + seqlock_t profiles_lock; + long: 32; + u64 avail_data_alloc_bits; + u64 avail_metadata_alloc_bits; + u64 avail_system_alloc_bits; + spinlock_t balance_lock; + struct mutex balance_mutex; + atomic_t balance_pause_req; + atomic_t balance_cancel_req; + struct btrfs_balance_control *balance_ctl; + wait_queue_head_t balance_wait_q; + atomic_t reloc_cancel_req; + u32 data_chunk_allocations; + u32 metadata_ratio; + void *bdev_holder; + struct mutex scrub_lock; + atomic_t scrubs_running; + atomic_t scrub_pause_req; + atomic_t scrubs_paused; + atomic_t scrub_cancel_req; + wait_queue_head_t scrub_pause_wait; + refcount_t scrub_workers_refcnt; + u32 sectors_per_page; + struct workqueue_struct *scrub_workers; + long: 32; + struct btrfs_discard_ctl discard_ctl; + u64 qgroup_flags; + struct rb_root qgroup_tree; + spinlock_t qgroup_lock; + struct ulist *qgroup_ulist; + struct mutex qgroup_ioctl_lock; + struct list_head dirty_qgroups; + u64 qgroup_seq; + struct mutex qgroup_rescan_lock; + struct btrfs_key qgroup_rescan_progress; + struct btrfs_workqueue *qgroup_rescan_workers; + struct completion qgroup_rescan_completion; + struct btrfs_work qgroup_rescan_work; + bool qgroup_rescan_running; + u8 qgroup_drop_subtree_thres; + u64 qgroup_enable_gen; + int fs_error; + long unsigned int fs_state; + struct btrfs_delayed_root *delayed_root; + spinlock_t buffer_lock; + struct xarray buffer_radix; + int backup_root_index; + struct btrfs_dev_replace dev_replace; + struct semaphore uuid_tree_rescan_sem; + struct work_struct async_reclaim_work; + struct work_struct async_data_reclaim_work; + struct work_struct preempt_reclaim_work; + struct work_struct reclaim_bgs_work; + struct list_head reclaim_bgs; + int bg_reclaim_threshold; + spinlock_t unused_bgs_lock; + struct list_head unused_bgs; + struct mutex unused_bg_unpin_mutex; + struct mutex reclaim_bgs_lock; + u32 nodesize; + u32 sectorsize; + u32 sectorsize_bits; + u32 csum_size; + u32 csums_per_leaf; + u32 stripesize; + u64 max_extent_size; + spinlock_t swapfile_pins_lock; + struct rb_root swapfile_pins; + struct crypto_shash *csum_shash; + enum btrfs_exclusive_operation exclusive_operation; + u64 zone_size; + struct queue_limits limits; + long: 32; + u64 max_zone_append_size; + struct mutex zoned_meta_io_lock; + spinlock_t treelog_bg_lock; + u64 treelog_bg; + spinlock_t relocation_bg_lock; + long: 32; + u64 data_reloc_bg; + struct mutex zoned_data_reloc_io_lock; + struct btrfs_block_group *active_meta_bg; + struct btrfs_block_group *active_system_bg; + long: 32; + u64 nr_global_roots; + spinlock_t zone_active_bgs_lock; + struct list_head zone_active_bgs; + long: 32; + struct btrfs_commit_stats commit_stats; + u64 last_root_drop_gen; + struct lockdep_map btrfs_trans_num_writers_map; + struct lockdep_map btrfs_trans_num_extwriters_map; + struct lockdep_map btrfs_state_change_map[4]; + struct lockdep_map btrfs_trans_pending_ordered_map; + struct lockdep_map btrfs_ordered_extent_map; +}; + +struct ulist_node { + u64 val; + u64 aux; + struct list_head list; + struct rb_node rb_node; + long: 32; +}; + +struct ulist { + long unsigned int nnodes; + struct list_head nodes; + struct rb_root root; + struct ulist_node *prealloc; +}; + +enum { + EXTENT_BUFFER_UPTODATE = 0, + EXTENT_BUFFER_DIRTY = 1, + EXTENT_BUFFER_CORRUPT = 2, + EXTENT_BUFFER_READAHEAD = 3, + EXTENT_BUFFER_TREE_REF = 4, + EXTENT_BUFFER_STALE = 5, + EXTENT_BUFFER_WRITEBACK = 6, + EXTENT_BUFFER_READ_ERR = 7, + EXTENT_BUFFER_UNMAPPED = 8, + EXTENT_BUFFER_IN_TREE = 9, + EXTENT_BUFFER_WRITE_ERR = 10, + EXTENT_BUFFER_ZONED_ZEROOUT = 11, + EXTENT_BUFFER_READING = 12, +}; + +struct extent_buffer { + u64 start; + u32 len; + u32 folio_size; + long unsigned int bflags; + struct btrfs_fs_info *fs_info; + void *addr; + spinlock_t refs_lock; + atomic_t refs; + int read_mirror; + s8 log_index; + u8 folio_shift; + struct callback_head callback_head; + struct rw_semaphore lock; + struct folio *folios[16]; + long: 32; +}; + +enum btrfs_discard_state { + BTRFS_DISCARD_EXTENTS = 0, + BTRFS_DISCARD_BITMAPS = 1, + BTRFS_DISCARD_RESET_CURSOR = 2, +}; + +struct btrfs_io_ctl { + void *cur; + void *orig; + struct page *page; + struct page **pages; + struct btrfs_fs_info *fs_info; + struct inode *inode; + long unsigned int size; + int index; + int num_pages; + int entries; + int bitmaps; +}; + +enum btrfs_block_group_size_class { + BTRFS_BG_SZ_NONE = 0, + BTRFS_BG_SZ_SMALL = 1, + BTRFS_BG_SZ_MEDIUM = 2, + BTRFS_BG_SZ_LARGE = 3, +}; + +struct btrfs_caching_control; + +struct btrfs_free_space_ctl; + +struct btrfs_chunk_map; + +struct btrfs_block_group { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + spinlock_t lock; + long: 32; + u64 start; + u64 length; + u64 pinned; + u64 reserved; + u64 used; + u64 delalloc_bytes; + u64 bytes_super; + u64 flags; + u64 cache_generation; + u64 global_root_id; + u64 commit_used; + u32 bitmap_high_thresh; + u32 bitmap_low_thresh; + struct rw_semaphore data_rwsem; + long unsigned int full_stripe_len; + long unsigned int runtime_flags; + unsigned int ro; + int disk_cache_state; + int cached; + struct btrfs_caching_control *caching_ctl; + struct btrfs_space_info *space_info; + struct btrfs_free_space_ctl *free_space_ctl; + struct rb_node cache_node; + struct list_head list; + refcount_t refs; + struct list_head cluster_list; + struct list_head bg_list; + struct list_head ro_list; + atomic_t frozen; + struct list_head discard_list; + int discard_index; + u64 discard_eligible_time; + u64 discard_cursor; + enum btrfs_discard_state discard_state; + struct list_head dirty_list; + struct list_head io_list; + struct btrfs_io_ctl io_ctl; + atomic_t reservations; + atomic_t nocow_writers; + struct mutex free_space_lock; + int swap_extents; + u64 alloc_offset; + u64 zone_unusable; + u64 zone_capacity; + u64 meta_write_pointer; + struct btrfs_chunk_map *physical_map; + struct list_head active_bg_list; + struct work_struct zone_finish_work; + struct extent_buffer *last_eb; + enum btrfs_block_group_size_class size_class; + long: 32; + u64 reclaim_mark; +}; + +struct extent_changeset { + u64 bytes_changed; + struct ulist range_changed; + long: 32; +}; + +enum btrfs_lock_nesting { + BTRFS_NESTING_NORMAL = 0, + BTRFS_NESTING_COW = 1, + BTRFS_NESTING_LEFT = 2, + BTRFS_NESTING_RIGHT = 3, + BTRFS_NESTING_LEFT_COW = 4, + BTRFS_NESTING_RIGHT_COW = 5, + BTRFS_NESTING_SPLIT = 6, + BTRFS_NESTING_NEW_ROOT = 7, + BTRFS_NESTING_MAX = 8, +}; + +struct btrfs_drew_lock { + atomic_t readers; + atomic_t writers; + wait_queue_head_t pending_writers; + wait_queue_head_t pending_readers; +}; + +enum { + __EXTENT_DIRTY_BIT = 0, + EXTENT_DIRTY = 1, + __EXTENT_DIRTY_SEQ = 0, + __EXTENT_UPTODATE_BIT = 1, + EXTENT_UPTODATE = 2, + __EXTENT_UPTODATE_SEQ = 1, + __EXTENT_LOCKED_BIT = 2, + EXTENT_LOCKED = 4, + __EXTENT_LOCKED_SEQ = 2, + __EXTENT_DIO_LOCKED_BIT = 3, + EXTENT_DIO_LOCKED = 8, + __EXTENT_DIO_LOCKED_SEQ = 3, + __EXTENT_NEW_BIT = 4, + EXTENT_NEW = 16, + __EXTENT_NEW_SEQ = 4, + __EXTENT_DELALLOC_BIT = 5, + EXTENT_DELALLOC = 32, + __EXTENT_DELALLOC_SEQ = 5, + __EXTENT_DEFRAG_BIT = 6, + EXTENT_DEFRAG = 64, + __EXTENT_DEFRAG_SEQ = 6, + __EXTENT_BOUNDARY_BIT = 7, + EXTENT_BOUNDARY = 128, + __EXTENT_BOUNDARY_SEQ = 7, + __EXTENT_NODATASUM_BIT = 8, + EXTENT_NODATASUM = 256, + __EXTENT_NODATASUM_SEQ = 8, + __EXTENT_CLEAR_META_RESV_BIT = 9, + EXTENT_CLEAR_META_RESV = 512, + __EXTENT_CLEAR_META_RESV_SEQ = 9, + __EXTENT_NEED_WAIT_BIT = 10, + EXTENT_NEED_WAIT = 1024, + __EXTENT_NEED_WAIT_SEQ = 10, + __EXTENT_NORESERVE_BIT = 11, + EXTENT_NORESERVE = 2048, + __EXTENT_NORESERVE_SEQ = 11, + __EXTENT_QGROUP_RESERVED_BIT = 12, + EXTENT_QGROUP_RESERVED = 4096, + __EXTENT_QGROUP_RESERVED_SEQ = 12, + __EXTENT_CLEAR_DATA_RESV_BIT = 13, + EXTENT_CLEAR_DATA_RESV = 8192, + __EXTENT_CLEAR_DATA_RESV_SEQ = 13, + __EXTENT_DELALLOC_NEW_BIT = 14, + EXTENT_DELALLOC_NEW = 16384, + __EXTENT_DELALLOC_NEW_SEQ = 14, + __EXTENT_ADD_INODE_BYTES_BIT = 15, + EXTENT_ADD_INODE_BYTES = 32768, + __EXTENT_ADD_INODE_BYTES_SEQ = 15, + __EXTENT_CLEAR_ALL_BITS_BIT = 16, + EXTENT_CLEAR_ALL_BITS = 65536, + __EXTENT_CLEAR_ALL_BITS_SEQ = 16, + __EXTENT_NOWAIT_BIT = 17, + EXTENT_NOWAIT = 131072, + __EXTENT_NOWAIT_SEQ = 17, +}; + +struct extent_state { + u64 start; + u64 end; + struct rb_node rb_node; + wait_queue_head_t wq; + refcount_t refs; + u32 state; +}; + +struct btrfs_space_info { + struct btrfs_fs_info *fs_info; + spinlock_t lock; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 bytes_zone_unusable; + u64 max_extent_size; + u64 chunk_size; + int bg_reclaim_threshold; + int clamp; + unsigned int full: 1; + unsigned int chunk_alloc: 1; + unsigned int flush: 1; + unsigned int force_alloc; + u64 disk_used; + u64 disk_total; + u64 flags; + struct list_head list; + struct list_head ro_bgs; + struct list_head priority_tickets; + struct list_head tickets; + u64 reclaim_size; + u64 tickets_id; + struct rw_semaphore groups_sem; + struct list_head block_groups[9]; + struct kobject kobj; + struct kobject *block_group_kobjs[9]; + u64 reclaim_count; + u64 reclaim_bytes; + u64 reclaim_errors; + bool dynamic_reclaim; + bool periodic_reclaim; + bool periodic_reclaim_ready; + long: 32; + s64 reclaimable_bytes; +}; + +enum { + BTRFS_FS_STATE_REMOUNTING = 0, + BTRFS_FS_STATE_RO = 1, + BTRFS_FS_STATE_TRANS_ABORTED = 2, + BTRFS_FS_STATE_DEV_REPLACING = 3, + BTRFS_FS_STATE_DUMMY_FS_INFO = 4, + BTRFS_FS_STATE_NO_DATA_CSUMS = 5, + BTRFS_FS_STATE_SKIP_META_CSUMS = 6, + BTRFS_FS_STATE_LOG_CLEANUP_ERROR = 7, + BTRFS_FS_STATE_COUNT = 8, +}; + +enum { + BTRFS_FS_CLOSING_START = 0, + BTRFS_FS_CLOSING_DONE = 1, + BTRFS_FS_LOG_RECOVERING = 2, + BTRFS_FS_OPEN = 3, + BTRFS_FS_QUOTA_ENABLED = 4, + BTRFS_FS_UPDATE_UUID_TREE_GEN = 5, + BTRFS_FS_CREATING_FREE_SPACE_TREE = 6, + BTRFS_FS_BTREE_ERR = 7, + BTRFS_FS_LOG1_ERR = 8, + BTRFS_FS_LOG2_ERR = 9, + BTRFS_FS_QUOTA_OVERRIDE = 10, + BTRFS_FS_FROZEN = 11, + BTRFS_FS_BALANCE_RUNNING = 12, + BTRFS_FS_RELOC_RUNNING = 13, + BTRFS_FS_CLEANER_RUNNING = 14, + BTRFS_FS_CSUM_IMPL_FAST = 15, + BTRFS_FS_DISCARD_RUNNING = 16, + BTRFS_FS_CLEANUP_SPACE_CACHE_V1 = 17, + BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED = 18, + BTRFS_FS_TREE_MOD_LOG_USERS = 19, + BTRFS_FS_COMMIT_TRANS = 20, + BTRFS_FS_UNFINISHED_DROPS = 21, + BTRFS_FS_NEED_ZONE_FINISH = 22, + BTRFS_FS_NEED_TRANS_COMMIT = 23, + BTRFS_FS_ACTIVE_ZONE_TRACKING = 24, + BTRFS_FS_FEATURE_CHANGED = 25, + BTRFS_FS_UNALIGNED_TREE_BLOCK = 26, + BTRFS_FS_32BIT_ERROR = 27, + BTRFS_FS_32BIT_WARN = 28, +}; + +enum { + BTRFS_MOUNT_NODATASUM = 1ULL, + BTRFS_MOUNT_NODATACOW = 2ULL, + BTRFS_MOUNT_NOBARRIER = 4ULL, + BTRFS_MOUNT_SSD = 8ULL, + BTRFS_MOUNT_DEGRADED = 16ULL, + BTRFS_MOUNT_COMPRESS = 32ULL, + BTRFS_MOUNT_NOTREELOG = 64ULL, + BTRFS_MOUNT_FLUSHONCOMMIT = 128ULL, + BTRFS_MOUNT_SSD_SPREAD = 256ULL, + BTRFS_MOUNT_NOSSD = 512ULL, + BTRFS_MOUNT_DISCARD_SYNC = 1024ULL, + BTRFS_MOUNT_FORCE_COMPRESS = 2048ULL, + BTRFS_MOUNT_SPACE_CACHE = 4096ULL, + BTRFS_MOUNT_CLEAR_CACHE = 8192ULL, + BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = 16384ULL, + BTRFS_MOUNT_ENOSPC_DEBUG = 32768ULL, + BTRFS_MOUNT_AUTO_DEFRAG = 65536ULL, + BTRFS_MOUNT_USEBACKUPROOT = 131072ULL, + BTRFS_MOUNT_SKIP_BALANCE = 262144ULL, + BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = 524288ULL, + BTRFS_MOUNT_RESCAN_UUID_TREE = 1048576ULL, + BTRFS_MOUNT_FRAGMENT_DATA = 2097152ULL, + BTRFS_MOUNT_FRAGMENT_METADATA = 4194304ULL, + BTRFS_MOUNT_FREE_SPACE_TREE = 8388608ULL, + BTRFS_MOUNT_NOLOGREPLAY = 16777216ULL, + BTRFS_MOUNT_REF_VERIFY = 33554432ULL, + BTRFS_MOUNT_DISCARD_ASYNC = 67108864ULL, + BTRFS_MOUNT_IGNOREBADROOTS = 134217728ULL, + BTRFS_MOUNT_IGNOREDATACSUMS = 268435456ULL, + BTRFS_MOUNT_NODISCARD = 536870912ULL, + BTRFS_MOUNT_NOSPACECACHE = 1073741824ULL, + BTRFS_MOUNT_IGNOREMETACSUMS = 2147483648ULL, + BTRFS_MOUNT_IGNORESUPERFLAGS = 4294967296ULL, +}; + +struct rcu_string; + +struct btrfs_zoned_device_info; + +struct scrub_ctx; + +struct btrfs_device { + struct list_head dev_list; + struct list_head dev_alloc_list; + struct list_head post_commit_list; + struct btrfs_fs_devices *fs_devices; + struct btrfs_fs_info *fs_info; + struct rcu_string *name; + long: 32; + u64 generation; + struct file *bdev_file; + struct block_device *bdev; + struct btrfs_zoned_device_info *zone_info; + dev_t devt; + long unsigned int dev_state; + blk_status_t last_flush_error; + seqcount_t data_seqcount; + long: 32; + u64 devid; + u64 total_bytes; + u64 disk_total_bytes; + u64 bytes_used; + u32 io_align; + u32 io_width; + u64 type; + atomic_t sb_write_errors; + u32 sector_size; + u8 uuid[16]; + u64 commit_total_bytes; + u64 commit_bytes_used; + struct bio flush_bio; + struct completion flush_wait; + struct scrub_ctx *scrub_ctx; + int dev_stats_valid; + atomic_t dev_stats_ccnt; + atomic_t dev_stat_values[5]; + struct extent_io_tree alloc_state; + struct completion kobj_unregister; + struct kobject devid_kobj; + long: 32; + u64 scrub_speed_max; +}; + +struct btrfs_qgroup_swapped_blocks { + spinlock_t lock; + bool swapped; + struct rb_root blocks[8]; +}; + +struct btrfs_root { + struct rb_node rb_node; + struct extent_buffer *node; + struct extent_buffer *commit_root; + struct btrfs_root *log_root; + struct btrfs_root *reloc_root; + long unsigned int state; + struct btrfs_root_item root_item; + struct btrfs_key root_key; + struct btrfs_fs_info *fs_info; + struct extent_io_tree dirty_log_pages; + struct mutex objectid_mutex; + spinlock_t accounting_lock; + struct btrfs_block_rsv *block_rsv; + struct mutex log_mutex; + wait_queue_head_t log_writer_wait; + wait_queue_head_t log_commit_wait[2]; + struct list_head log_ctxs[2]; + atomic_t log_writers; + atomic_t log_commit[2]; + atomic_t log_batch; + int log_transid; + int log_transid_committed; + int last_log_commit; + pid_t log_start_pid; + u64 last_trans; + u64 free_objectid; + struct btrfs_key defrag_progress; + struct btrfs_key defrag_max; + struct list_head dirty_list; + struct list_head root_list; + struct xarray inodes; + struct xarray delayed_nodes; + dev_t anon_dev; + spinlock_t root_item_lock; + refcount_t refs; + struct mutex delalloc_mutex; + spinlock_t delalloc_lock; + struct list_head delalloc_inodes; + struct list_head delalloc_root; + u64 nr_delalloc_inodes; + struct mutex ordered_extent_mutex; + spinlock_t ordered_extent_lock; + struct list_head ordered_extents; + struct list_head ordered_root; + u64 nr_ordered_extents; + struct list_head reloc_dirty_list; + int send_in_progress; + int dedupe_in_progress; + struct btrfs_drew_lock snapshot_lock; + atomic_t snapshot_force_cow; + spinlock_t qgroup_meta_rsv_lock; + u64 qgroup_meta_rsv_pertrans; + u64 qgroup_meta_rsv_prealloc; + wait_queue_head_t qgroup_flush_wait; + atomic_t nr_swapfiles; + struct btrfs_qgroup_swapped_blocks swapped_blocks; + struct extent_io_tree log_csum_range; + u64 relocation_src_root; +}; + +enum btrfs_trans_state { + TRANS_STATE_RUNNING = 0, + TRANS_STATE_COMMIT_PREP = 1, + TRANS_STATE_COMMIT_START = 2, + TRANS_STATE_COMMIT_DOING = 3, + TRANS_STATE_UNBLOCKED = 4, + TRANS_STATE_SUPER_COMMITTED = 5, + TRANS_STATE_COMPLETED = 6, + TRANS_STATE_MAX = 7, +}; + +struct btrfs_delayed_ref_root { + struct xarray head_refs; + struct xarray dirty_extents; + spinlock_t lock; + long unsigned int num_heads; + long unsigned int num_heads_ready; + long: 32; + u64 pending_csums; + long unsigned int flags; + long: 32; + u64 run_delayed_start; + u64 qgroup_to_skip; +}; + +struct btrfs_transaction { + u64 transid; + atomic_t num_extwriters; + atomic_t num_writers; + refcount_t use_count; + long unsigned int flags; + enum btrfs_trans_state state; + int aborted; + struct list_head list; + struct extent_io_tree dirty_pages; + time64_t start_time; + wait_queue_head_t writer_wait; + wait_queue_head_t commit_wait; + struct list_head pending_snapshots; + struct list_head dev_update_list; + struct list_head switch_commits; + struct list_head dirty_bgs; + struct list_head io_bgs; + struct list_head dropped_roots; + struct extent_io_tree pinned_extents; + struct mutex cache_write_mutex; + spinlock_t dirty_bgs_lock; + struct list_head deleted_bgs; + spinlock_t dropped_roots_lock; + long: 32; + struct btrfs_delayed_ref_root delayed_refs; + struct btrfs_fs_info *fs_info; + atomic_t pending_ordered; + wait_queue_head_t pending_wait; + long: 32; +}; + +enum btrfs_chunk_allocation_policy { + BTRFS_CHUNK_ALLOC_REGULAR = 0, + BTRFS_CHUNK_ALLOC_ZONED = 1, +}; + +enum btrfs_read_policy { + BTRFS_READ_POLICY_PID = 0, + BTRFS_NR_READ_POLICY = 1, +}; + +struct btrfs_fs_devices { + u8 fsid[16]; + u8 metadata_uuid[16]; + struct list_head fs_list; + u64 num_devices; + u64 open_devices; + u64 rw_devices; + u64 missing_devices; + u64 total_rw_bytes; + u64 total_devices; + u64 latest_generation; + struct btrfs_device *latest_dev; + struct mutex device_list_mutex; + struct list_head devices; + struct list_head alloc_list; + struct list_head seed_list; + int opened; + bool rotating; + bool discardable; + bool seeding; + bool temp_fsid; + struct btrfs_fs_info *fs_info; + struct kobject fsid_kobj; + struct kobject *devices_kobj; + struct kobject *devinfo_kobj; + struct completion kobj_unregister; + enum btrfs_chunk_allocation_policy chunk_alloc_policy; + enum btrfs_read_policy read_policy; +}; + +struct btrfs_balance_control { + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + u64 flags; + struct btrfs_balance_progress stat; +}; + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + struct list_head prepare_list; + atomic_t items; + atomic_t items_seq; + int nodes; + wait_queue_head_t wait; +}; + +struct btrfs_path { + struct extent_buffer *nodes[8]; + int slots[8]; + u8 locks[8]; + u8 reada; + u8 lowest_level; + unsigned int search_for_split: 1; + unsigned int keep_locks: 1; + unsigned int skip_locking: 1; + unsigned int search_commit_root: 1; + unsigned int need_commit_sem: 1; + unsigned int skip_release_on_error: 1; + unsigned int search_for_extension: 1; + unsigned int nowait: 1; +}; + +enum { + BTRFS_ROOT_IN_TRANS_SETUP = 0, + BTRFS_ROOT_SHAREABLE = 1, + BTRFS_ROOT_TRACK_DIRTY = 2, + BTRFS_ROOT_IN_RADIX = 3, + BTRFS_ROOT_ORPHAN_ITEM_INSERTED = 4, + BTRFS_ROOT_DEFRAG_RUNNING = 5, + BTRFS_ROOT_FORCE_COW = 6, + BTRFS_ROOT_MULTI_LOG_TASKS = 7, + BTRFS_ROOT_DIRTY = 8, + BTRFS_ROOT_DELETING = 9, + BTRFS_ROOT_DEAD_RELOC_TREE = 10, + BTRFS_ROOT_DEAD_TREE = 11, + BTRFS_ROOT_HAS_LOG_TREE = 12, + BTRFS_ROOT_QGROUP_FLUSHING = 13, + BTRFS_ROOT_ORPHAN_CLEANUP = 14, + BTRFS_ROOT_UNFINISHED_DROP = 15, + BTRFS_ROOT_RESET_LOCKDEP_CLASS = 16, +}; + +struct btrfs_item_batch { + const struct btrfs_key *keys; + const u32 *data_sizes; + u32 total_data_size; + int nr; +}; + +enum btrfs_trim_state { + BTRFS_TRIM_STATE_UNTRIMMED = 0, + BTRFS_TRIM_STATE_TRIMMED = 1, + BTRFS_TRIM_STATE_TRIMMING = 2, +}; + +struct btrfs_free_space { + struct rb_node offset_index; + struct rb_node bytes_index; + u64 offset; + u64 bytes; + u64 max_extent_size; + long unsigned int *bitmap; + struct list_head list; + enum btrfs_trim_state trim_state; + s32 bitmap_extents; + long: 32; +}; + +struct btrfs_free_space_op; + +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + struct rb_root_cached free_space_bytes; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + s32 discardable_extents[2]; + s64 discardable_bytes[2]; + const struct btrfs_free_space_op *op; + struct btrfs_block_group *block_group; + struct mutex cache_writeout_mutex; + struct list_head trimming_ranges; + long: 32; +}; + +struct btrfs_free_space_op { + bool (*use_bitmap)(struct btrfs_free_space_ctl *, struct btrfs_free_space *); +}; + +enum btrfs_chunk_alloc_enum { + CHUNK_ALLOC_NO_FORCE = 0, + CHUNK_ALLOC_LIMITED = 1, + CHUNK_ALLOC_FORCE = 2, + CHUNK_ALLOC_FORCE_FOR_EXTENT = 3, +}; + +enum btrfs_block_group_flags { + BLOCK_GROUP_FLAG_IREF = 0, + BLOCK_GROUP_FLAG_REMOVED = 1, + BLOCK_GROUP_FLAG_TO_COPY = 2, + BLOCK_GROUP_FLAG_RELOCATING_REPAIR = 3, + BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED = 4, + BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE = 5, + BLOCK_GROUP_FLAG_ZONED_DATA_RELOC = 6, + BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE = 7, + BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE = 8, + BLOCK_GROUP_FLAG_NEW = 9, +}; + +enum btrfs_caching_type { + BTRFS_CACHE_NO = 0, + BTRFS_CACHE_STARTED = 1, + BTRFS_CACHE_FINISHED = 2, + BTRFS_CACHE_ERROR = 3, +}; + +struct btrfs_caching_control { + struct list_head list; + struct mutex mutex; + wait_queue_head_t wait; + struct btrfs_work work; + struct btrfs_block_group *block_group; + atomic_t progress; + refcount_t count; +}; + +struct btrfs_io_context; + +struct btrfs_io_stripe { + struct btrfs_device *dev; + long: 32; + u64 physical; + u64 length; + bool rst_search_commit_root; + struct btrfs_io_context *bioc; +}; + +struct btrfs_chunk_map { + struct rb_node rb_node; + int verified_stripes; + refcount_t refs; + long: 32; + u64 start; + u64 chunk_len; + u64 stripe_size; + u64 type; + int io_align; + int io_width; + int num_stripes; + int sub_stripes; + struct btrfs_io_stripe stripes[0]; +}; + +enum btrfs_extent_allocation_policy { + BTRFS_EXTENT_ALLOC_CLUSTERED = 0, + BTRFS_EXTENT_ALLOC_ZONED = 1, +}; + +struct find_free_extent_ctl { + u64 ram_bytes; + u64 num_bytes; + u64 min_alloc_size; + u64 empty_size; + u64 flags; + int delalloc; + long: 32; + u64 search_start; + u64 empty_cluster; + struct btrfs_free_cluster *last_ptr; + bool use_cluster; + bool have_caching_bg; + bool orig_have_caching_bg; + bool for_treelog; + bool for_data_reloc; + int index; + int loop; + bool retry_uncached; + int cached; + long: 32; + u64 max_extent_size; + u64 total_free_space; + u64 found_offset; + u64 hint_byte; + enum btrfs_extent_allocation_policy policy; + bool hinted; + enum btrfs_block_group_size_class size_class; + long: 32; +}; + +enum btrfs_inline_ref_type { + BTRFS_REF_TYPE_INVALID = 0, + BTRFS_REF_TYPE_BLOCK = 1, + BTRFS_REF_TYPE_DATA = 2, + BTRFS_REF_TYPE_ANY = 3, +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + struct list_head n_list; + struct list_head p_list; + struct rb_root_cached ins_root; + struct rb_root_cached del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + refcount_t refs; + int count; + u64 index_cnt; + long unsigned int flags; + u32 curr_index_batch_size; + u32 index_item_leaves; + long: 32; +}; + +enum btrfs_delayed_ref_action { + BTRFS_ADD_DELAYED_REF = 1, + BTRFS_DROP_DELAYED_REF = 2, + BTRFS_ADD_DELAYED_EXTENT = 3, + BTRFS_UPDATE_DELAYED_HEAD = 4, +} __attribute__((mode(byte))); + +struct btrfs_data_ref { + u64 objectid; + u64 offset; +}; + +struct btrfs_tree_ref { + int level; +}; + +struct btrfs_delayed_ref_node { + struct rb_node ref_node; + struct list_head add_list; + long: 32; + u64 bytenr; + u64 num_bytes; + u64 seq; + u64 ref_root; + u64 parent; + refcount_t refs; + int ref_mod; + unsigned int action: 8; + unsigned int type: 8; + long: 32; + union { + struct btrfs_tree_ref tree_ref; + struct btrfs_data_ref data_ref; + }; +}; + +struct btrfs_delayed_extent_op { + struct btrfs_disk_key key; + bool update_key; + bool update_flags; + long: 32; + u64 flags_to_set; +}; + +struct btrfs_delayed_ref_head { + u64 bytenr; + u64 num_bytes; + struct mutex mutex; + refcount_t refs; + spinlock_t lock; + struct rb_root_cached ref_tree; + struct list_head ref_add_list; + struct btrfs_delayed_extent_op *extent_op; + int total_ref_mod; + int ref_mod; + u64 owning_root; + u64 reserved_bytes; + u8 level; + bool must_insert_reserved; + bool is_data; + bool is_system; + bool processing; + bool tracked; +}; + +enum btrfs_ref_type { + BTRFS_REF_NOT_SET = 0, + BTRFS_REF_DATA = 1, + BTRFS_REF_METADATA = 2, + BTRFS_REF_LAST = 3, +} __attribute__((mode(byte))); + +struct btrfs_ref { + enum btrfs_ref_type type; + enum btrfs_delayed_ref_action action; + bool skip_qgroup; + long: 32; + u64 bytenr; + u64 num_bytes; + u64 owning_root; + u64 ref_root; + u64 parent; + union { + struct btrfs_data_ref data_ref; + struct btrfs_tree_ref tree_ref; + }; +}; + +struct btrfs_pending_snapshot; + +struct btrfs_trans_handle { + u64 transid; + u64 bytes_reserved; + u64 delayed_refs_bytes_reserved; + u64 chunk_bytes_reserved; + long unsigned int delayed_ref_updates; + long unsigned int delayed_ref_csum_deletions; + struct btrfs_transaction *transaction; + struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *orig_rsv; + struct btrfs_pending_snapshot *pending_snapshot; + refcount_t use_count; + unsigned int type; + short int aborted; + bool adding_csums; + bool allocating_chunk; + bool removing_chunk; + bool reloc_reserved; + bool in_fsync; + struct btrfs_fs_info *fs_info; + struct list_head new_bgs; + long: 32; + struct btrfs_block_rsv delayed_rsv; +}; + +struct btrfs_pending_snapshot { + struct dentry *dentry; + struct btrfs_inode *dir; + struct btrfs_root *root; + struct btrfs_root_item *root_item; + struct btrfs_root *snap; + struct btrfs_qgroup_inherit *inherit; + struct btrfs_path *path; + long: 32; + struct btrfs_block_rsv block_rsv; + int error; + dev_t anon_dev; + bool readonly; + struct list_head list; + long: 32; +}; + +struct rcu_string { + struct callback_head rcu; + char str[0]; +}; + +enum btrfs_raid_types { + BTRFS_RAID_SINGLE = 0, + BTRFS_RAID_RAID0 = 1, + BTRFS_RAID_RAID1 = 2, + BTRFS_RAID_DUP = 3, + BTRFS_RAID_RAID10 = 4, + BTRFS_RAID_RAID5 = 5, + BTRFS_RAID_RAID6 = 6, + BTRFS_RAID_RAID1C3 = 7, + BTRFS_RAID_RAID1C4 = 8, + BTRFS_NR_RAID_TYPES = 9, +}; + +struct btrfs_zoned_device_info { + u64 zone_size; + u8 zone_size_shift; + u32 nr_zones; + unsigned int max_active_zones; + int reserved_active_zones; + atomic_t active_zones_left; + long unsigned int *seq_zones; + long unsigned int *empty_zones; + long unsigned int *active_zones; + struct blk_zone *zone_cache; + long: 32; + struct blk_zone sb_zones[6]; +}; + +struct btrfs_io_context { + refcount_t refs; + struct btrfs_fs_info *fs_info; + u64 map_type; + struct bio *orig_bio; + atomic_t error; + u16 max_errors; + long: 32; + u64 logical; + u64 size; + struct list_head rst_ordered_entry; + u16 num_stripes; + u16 mirror_num; + u16 replace_nr_stripes; + s16 replace_stripe_src; + u64 full_stripe_logical; + struct btrfs_io_stripe stripes[0]; +}; + +struct btrfs_discard_stripe { + struct btrfs_device *dev; + long: 32; + u64 physical; + u64 length; +}; + +enum btrfs_qgroup_rsv_type { + BTRFS_QGROUP_RSV_DATA = 0, + BTRFS_QGROUP_RSV_META_PERTRANS = 1, + BTRFS_QGROUP_RSV_META_PREALLOC = 2, + BTRFS_QGROUP_RSV_LAST = 3, +}; + +struct btrfs_squota_delta { + u64 root; + u64 num_bytes; + u64 generation; + bool is_inc; + bool is_data; + long: 32; +}; + +enum btrfs_qgroup_mode { + BTRFS_QGROUP_MODE_DISABLED = 0, + BTRFS_QGROUP_MODE_FULL = 1, + BTRFS_QGROUP_MODE_SIMPLE = 2, +}; + +enum btrfs_loop_type { + LOOP_CACHING_NOWAIT = 0, + LOOP_CACHING_WAIT = 1, + LOOP_UNSET_SIZE_CLASS = 2, + LOOP_ALLOC_CHUNK = 3, + LOOP_WRONG_SIZE_CLASS = 4, + LOOP_NO_EMPTY_SIZE = 5, +}; + +struct walk_control { + u64 refs[8]; + u64 flags[8]; + struct btrfs_key update_progress; + struct btrfs_key drop_progress; + int drop_level; + int stage; + int level; + int shared_level; + int update_ref; + int keep_locks; + int reada_slot; + int reada_count; + int restarted; + int lookup_info; + long: 32; +}; + +enum { + BTRFS_INODE_FLUSH_ON_CLOSE = 0, + BTRFS_INODE_DUMMY = 1, + BTRFS_INODE_IN_DEFRAG = 2, + BTRFS_INODE_HAS_ASYNC_EXTENT = 3, + BTRFS_INODE_NEEDS_FULL_SYNC = 4, + BTRFS_INODE_COPY_EVERYTHING = 5, + BTRFS_INODE_HAS_PROPS = 6, + BTRFS_INODE_SNAPSHOT_FLUSH = 7, + BTRFS_INODE_NO_XATTRS = 8, + BTRFS_INODE_NO_DELALLOC_FLUSH = 9, + BTRFS_INODE_VERITY_IN_PROGRESS = 10, + BTRFS_INODE_FREE_SPACE_INODE = 11, + BTRFS_INODE_NO_CAP_XATTR = 12, + BTRFS_INODE_COW_WRITE_ERROR = 13, + BTRFS_INODE_ROOT_STUB = 14, +}; + +enum btrfs_reserve_flush_enum { + BTRFS_RESERVE_NO_FLUSH = 0, + BTRFS_RESERVE_FLUSH_LIMIT = 1, + BTRFS_RESERVE_FLUSH_EVICT = 2, + BTRFS_RESERVE_FLUSH_DATA = 3, + BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE = 4, + BTRFS_RESERVE_FLUSH_ALL = 5, + BTRFS_RESERVE_FLUSH_ALL_STEAL = 6, + BTRFS_RESERVE_FLUSH_EMERGENCY = 7, +}; + +typedef struct { + __u8 b[16]; +} guid_t; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +struct fileattr { + u32 flags; + u32 fsx_xflags; + u32 fsx_extsize; + u32 fsx_nextents; + u32 fsx_projid; + u32 fsx_cowextsize; + bool flags_valid: 1; + bool fsx_valid: 1; +}; + +typedef u16 efi_char16_t; + +typedef guid_t efi_guid_t; + +struct efivarfs_mount_opts { + kuid_t uid; + kgid_t gid; +}; + +struct efivarfs_fs_info { + struct efivarfs_mount_opts mount_opts; + struct list_head efivarfs_list; + struct super_block *sb; + struct notifier_block nb; +}; + +struct efi_variable { + efi_char16_t VariableName[512]; + efi_guid_t VendorGuid; + __u32 Attributes; +}; + +struct efivar_entry { + struct efi_variable var; + struct list_head list; + struct kobject kobj; +}; + +enum key_need_perm { + KEY_NEED_UNSPECIFIED = 0, + KEY_NEED_VIEW = 1, + KEY_NEED_READ = 2, + KEY_NEED_WRITE = 3, + KEY_NEED_SEARCH = 4, + KEY_NEED_LINK = 5, + KEY_NEED_SETATTR = 6, + KEY_NEED_UNLINK = 7, + KEY_SYSADMIN_OVERRIDE = 8, + KEY_AUTHTOKEN_OVERRIDE = 9, + KEY_DEFER_PERM_CHECK = 10, +}; + +struct __key_reference_with_attributes; + +typedef struct __key_reference_with_attributes *key_ref_t; + +enum key_state { + KEY_IS_UNINSTANTIATED = 0, + KEY_IS_POSITIVE = 1, +}; + +struct key_user { + struct rb_node node; + struct mutex cons_lock; + spinlock_t lock; + refcount_t usage; + atomic_t nkeys; + atomic_t nikeys; + kuid_t uid; + int qnkeys; + int qnbytes; +}; + +struct key_preparsed_payload { + const char *orig_description; + char *description; + union key_payload payload; + const void *data; + size_t datalen; + size_t quotalen; + long: 32; + time64_t expiry; +}; + +struct key_match_data { + bool (*cmp)(const struct key *, const struct key_match_data *); + const void *raw_data; + void *preparsed; + unsigned int lookup_type; +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt = 0, + kernel_pkey_decrypt = 1, + kernel_pkey_sign = 2, + kernel_pkey_verify = 3, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op: 8; +}; + +struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; +}; + +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + struct key_match_data match_data; + unsigned int flags; + int (*iterator)(const void *, void *); + int skipped_ret; + bool possessed; + key_ref_t result; + long: 32; + time64_t now; +}; + +typedef void (*crypto_completion_t)(void *, int); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + u32 flags; +}; + +struct aead_request { + struct crypto_async_request base; + unsigned int assoclen; + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct scatterlist { + long unsigned int page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; +}; + +struct crypto_aead; + +struct aead_alg { + int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); + int (*setauthsize)(struct crypto_aead *, unsigned int); + int (*encrypt)(struct aead_request *); + int (*decrypt)(struct aead_request *); + int (*init)(struct crypto_aead *); + void (*exit)(struct crypto_aead *); + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct crypto_template; + +struct crypto_spawn; + +struct crypto_instance { + struct crypto_alg alg; + struct crypto_template *tmpl; + union { + struct hlist_node list; + struct crypto_spawn *spawns; + }; + struct work_struct free_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + struct crypto_instance *inst; + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct rtattr; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + int (*create)(struct crypto_template *, struct rtattr **); + char name[128]; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_request { + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + struct crypto_async_request base; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_skcipher { + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +struct crypto_lskcipher { + struct crypto_tfm base; +}; + +struct skcipher_alg_common { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); + int (*encrypt)(struct skcipher_request *); + int (*decrypt)(struct skcipher_request *); + int (*export)(struct skcipher_request *, void *); + int (*import)(struct skcipher_request *, const void *); + int (*init)(struct crypto_skcipher *); + void (*exit)(struct crypto_skcipher *); + unsigned int walksize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; + }; + struct skcipher_alg_common co; + }; +}; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } src; + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } dst; + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + unsigned int ivsize; + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; +}; + +enum { + SKCIPHER_WALK_PHYS = 1, + SKCIPHER_WALK_SLOW = 2, + SKCIPHER_WALK_COPY = 4, + SKCIPHER_WALK_DIFF = 8, + SKCIPHER_WALK_SLEEP = 16, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[0]; +}; + +typedef void (*exitcall_t)(); + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct shash_desc { + struct crypto_shash *tfm; + long: 32; + void *__ctx[0]; +}; + +struct shash_alg { + int (*init)(struct shash_desc *); + int (*update)(struct shash_desc *, const u8 *, unsigned int); + int (*final)(struct shash_desc *, u8 *); + int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*export)(struct shash_desc *, void *); + int (*import)(struct shash_desc *, const void *); + int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_shash *); + void (*exit_tfm)(struct crypto_shash *); + int (*clone_tfm)(struct crypto_shash *, struct crypto_shash *); + unsigned int descsize; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + unsigned int digestsize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; + }; + struct hash_alg_common halg; + }; +}; + +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + long: 32; +}; + +struct xxhash64_tfm_ctx { + u64 seed; +}; + +struct xxhash64_desc_ctx { + struct xxh64_state xxhstate; +}; + +enum asn1_class { + ASN1_UNIV = 0, + ASN1_APPL = 1, + ASN1_CONT = 2, + ASN1_PRIV = 3, +}; + +enum asn1_method { + ASN1_PRIM = 0, + ASN1_CONS = 1, +}; + +enum asn1_tag { + ASN1_EOC = 0, + ASN1_BOOL = 1, + ASN1_INT = 2, + ASN1_BTS = 3, + ASN1_OTS = 4, + ASN1_NULL = 5, + ASN1_OID = 6, + ASN1_ODE = 7, + ASN1_EXT = 8, + ASN1_REAL = 9, + ASN1_ENUM = 10, + ASN1_EPDV = 11, + ASN1_UTF8STR = 12, + ASN1_RELOID = 13, + ASN1_SEQ = 16, + ASN1_SET = 17, + ASN1_NUMSTR = 18, + ASN1_PRNSTR = 19, + ASN1_TEXSTR = 20, + ASN1_VIDSTR = 21, + ASN1_IA5STR = 22, + ASN1_UNITIM = 23, + ASN1_GENTIM = 24, + ASN1_GRASTR = 25, + ASN1_VISSTR = 26, + ASN1_GENSTR = 27, + ASN1_UNISTR = 28, + ASN1_CHRSTR = 29, + ASN1_BMPSTR = 30, + ASN1_LONG_TAG = 31, +}; + +typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum asn1_opcode { + ASN1_OP_MATCH = 0, + ASN1_OP_MATCH_OR_SKIP = 1, + ASN1_OP_MATCH_ACT = 2, + ASN1_OP_MATCH_ACT_OR_SKIP = 3, + ASN1_OP_MATCH_JUMP = 4, + ASN1_OP_MATCH_JUMP_OR_SKIP = 5, + ASN1_OP_MATCH_ANY = 8, + ASN1_OP_MATCH_ANY_OR_SKIP = 9, + ASN1_OP_MATCH_ANY_ACT = 10, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, + ASN1_OP_COND_MATCH_OR_SKIP = 17, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, + ASN1_OP_COND_MATCH_ANY = 24, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, + ASN1_OP_COND_MATCH_ANY_ACT = 26, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, + ASN1_OP_COND_FAIL = 28, + ASN1_OP_COMPLETE = 29, + ASN1_OP_ACT = 30, + ASN1_OP_MAYBE_ACT = 31, + ASN1_OP_END_SEQ = 32, + ASN1_OP_END_SET = 33, + ASN1_OP_END_SEQ_OF = 34, + ASN1_OP_END_SET_OF = 35, + ASN1_OP_END_SEQ_ACT = 36, + ASN1_OP_END_SET_ACT = 37, + ASN1_OP_END_SEQ_OF_ACT = 38, + ASN1_OP_END_SET_OF_ACT = 39, + ASN1_OP_RETURN = 40, + ASN1_OP__NR = 41, +}; + +enum pkcs7_actions { + ACT_pkcs7_check_content_type = 0, + ACT_pkcs7_extract_cert = 1, + ACT_pkcs7_note_OID = 2, + ACT_pkcs7_note_certificate_list = 3, + ACT_pkcs7_note_content = 4, + ACT_pkcs7_note_data = 5, + ACT_pkcs7_note_signed_info = 6, + ACT_pkcs7_note_signeddata_version = 7, + ACT_pkcs7_note_signerinfo_version = 8, + ACT_pkcs7_sig_note_authenticated_attr = 9, + ACT_pkcs7_sig_note_digest_algo = 10, + ACT_pkcs7_sig_note_issuer = 11, + ACT_pkcs7_sig_note_pkey_algo = 12, + ACT_pkcs7_sig_note_serial = 13, + ACT_pkcs7_sig_note_set_of_authattrs = 14, + ACT_pkcs7_sig_note_signature = 15, + ACT_pkcs7_sig_note_skid = 16, + NR__pkcs7_actions = 17, +}; + +typedef unsigned int slab_flags_t; + +struct kmem_cache_args { + unsigned int align; + unsigned int useroffset; + unsigned int usersize; + unsigned int freeptr_offset; + bool use_freeptr_offset; + void (*ctor)(void *); +}; + +enum kobject_action { + KOBJ_ADD = 0, + KOBJ_REMOVE = 1, + KOBJ_CHANGE = 2, + KOBJ_MOVE = 3, + KOBJ_ONLINE = 4, + KOBJ_OFFLINE = 5, + KOBJ_BIND = 6, + KOBJ_UNBIND = 7, +}; + +struct blkg_iostat { + u64 bytes[3]; + u64 ios[3]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkcg_gq *blkg; + struct llist_node lnode; + int lqueued; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +struct blkcg; + +struct blkg_policy_data; + +struct blkcg_gq { + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + struct blkcg_gq *parent; + struct percpu_ref refcnt; + bool online; + struct blkg_iostat_set *iostat_cpu; + long: 32; + struct blkg_iostat_set iostat; + struct blkg_policy_data *pd[6]; + spinlock_t async_bio_lock; + struct bio_list async_bios; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + struct callback_head callback_head; + long: 32; +}; + +typedef __u32 blk_mq_req_flags_t; + +enum req_op { + REQ_OP_READ = 0, + REQ_OP_WRITE = 1, + REQ_OP_FLUSH = 2, + REQ_OP_DISCARD = 3, + REQ_OP_SECURE_ERASE = 5, + REQ_OP_ZONE_APPEND = 7, + REQ_OP_WRITE_ZEROES = 9, + REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_RESET = 13, + REQ_OP_ZONE_RESET_ALL = 15, + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST = 36, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = 8, + __REQ_FAILFAST_TRANSPORT = 9, + __REQ_FAILFAST_DRIVER = 10, + __REQ_SYNC = 11, + __REQ_META = 12, + __REQ_PRIO = 13, + __REQ_NOMERGE = 14, + __REQ_IDLE = 15, + __REQ_INTEGRITY = 16, + __REQ_FUA = 17, + __REQ_PREFLUSH = 18, + __REQ_RAHEAD = 19, + __REQ_BACKGROUND = 20, + __REQ_NOWAIT = 21, + __REQ_POLLED = 22, + __REQ_ALLOC_CACHE = 23, + __REQ_SWAP = 24, + __REQ_DRV = 25, + __REQ_FS_PRIVATE = 26, + __REQ_ATOMIC = 27, + __REQ_NOUNMAP = 28, + __REQ_NR_BITS = 29, +}; + +struct sbitmap_word { + long unsigned int word; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int cleared; + raw_spinlock_t swap_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sbitmap { + unsigned int depth; + unsigned int shift; + unsigned int map_nr; + bool round_robin; + struct sbitmap_word *map; + unsigned int *alloc_hint; +}; + +struct sbq_wait_state { + wait_queue_head_t wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sbitmap_queue { + struct sbitmap sb; + unsigned int wake_batch; + atomic_t wake_index; + struct sbq_wait_state *ws; + atomic_t ws_active; + unsigned int min_shallow_depth; + atomic_t completion_cnt; + atomic_t wakeup_cnt; +}; + +struct elevator_type; + +struct elevator_queue { + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + long unsigned int flags; + struct hlist_head hash[64]; +}; + +struct blk_mq_ctxs; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + unsigned int cpu; + short unsigned int index_hw[3]; + struct blk_mq_hw_ctx *hctxs[3]; + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef __u32 req_flags_t; + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +enum rq_end_io_ret { + RQ_END_IO_NONE = 0, + RQ_END_IO_FREE = 1, +}; + +typedef enum rq_end_io_ret rq_end_io_fn(struct request *, blk_status_t); + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + int tag; + int internal_tag; + unsigned int timeout; + unsigned int __data_len; + long: 32; + sector_t __sector; + struct bio *bio; + struct bio *biotail; + union { + struct list_head queuelist; + struct request *rq_next; + }; + struct block_device *part; + long: 32; + u64 alloc_time_ns; + u64 start_time_ns; + u64 io_start_time_ns; + short unsigned int wbt_flags; + short unsigned int stats_sectors; + short unsigned int nr_phys_segments; + short unsigned int nr_integrity_segments; + enum mq_rq_state state; + atomic_t ref; + long unsigned int deadline; + union { + struct hlist_node hash; + struct llist_node ipi_list; + }; + union { + struct rb_node rb_node; + struct bio_vec special_vec; + }; + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { + unsigned int seq; + rq_end_io_fn *saved_end_io; + } flush; + long: 32; + u64 fifo_time; + rq_end_io_fn *end_io; + void *end_io_data; +}; + +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + unsigned int active_queues; + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; + spinlock_t lock; +}; + +struct rchan; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + long unsigned int *sequence; + unsigned char *msg_data; + u16 act_mask; + long: 32; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct list_head running_list; + atomic_t dropped; +}; + +struct blk_flush_queue { + spinlock_t mq_flush_lock; + unsigned int flush_pending_idx: 1; + unsigned int flush_running_idx: 1; + blk_status_t rq_status; + long unsigned int flush_pending_since; + struct list_head flush_queue[2]; + long unsigned int flush_data_in_flight; + struct request *flush_rq; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +struct blk_mq_tag_set { + const struct blk_mq_ops *ops; + struct blk_mq_queue_map map[3]; + unsigned int nr_maps; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int cmd_size; + int numa_node; + unsigned int timeout; + unsigned int flags; + void *driver_data; + struct blk_mq_tags **tags; + struct blk_mq_tags *shared_tags; + struct mutex tag_list_lock; + struct list_head tag_list; + struct srcu_struct *srcu; +}; + +enum { + QUEUE_FLAG_DYING = 0, + QUEUE_FLAG_NOMERGES = 1, + QUEUE_FLAG_SAME_COMP = 2, + QUEUE_FLAG_FAIL_IO = 3, + QUEUE_FLAG_NOXMERGES = 4, + QUEUE_FLAG_SAME_FORCE = 5, + QUEUE_FLAG_INIT_DONE = 6, + QUEUE_FLAG_STATS = 7, + QUEUE_FLAG_REGISTERED = 8, + QUEUE_FLAG_QUIESCED = 9, + QUEUE_FLAG_RQ_ALLOC_TIME = 10, + QUEUE_FLAG_HCTX_ACTIVE = 11, + QUEUE_FLAG_SQ_SCHED = 12, + QUEUE_FLAG_MAX = 13, +}; + +enum { + __RQF_STARTED = 0, + __RQF_FLUSH_SEQ = 1, + __RQF_MIXED_MERGE = 2, + __RQF_DONTPREP = 3, + __RQF_SCHED_TAGS = 4, + __RQF_USE_SCHED = 5, + __RQF_FAILED = 6, + __RQF_QUIET = 7, + __RQF_IO_STAT = 8, + __RQF_PM = 9, + __RQF_HASHED = 10, + __RQF_STATS = 11, + __RQF_SPECIAL_PAYLOAD = 12, + __RQF_ZONE_WRITE_PLUGGING = 13, + __RQF_TIMED_OUT = 14, + __RQF_RESV = 15, + __RQF_BITS = 16, +}; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + long unsigned int state; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + long unsigned int flags; + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + void *driver_data; + struct sbitmap ctx_map; + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + short unsigned int type; + short unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + unsigned int numa_node; + unsigned int queue_num; + atomic_t nr_active; + struct hlist_node cpuhp_online; + struct hlist_node cpuhp_dead; + struct kobject kobj; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct list_head hctx_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +enum { + BLK_MQ_F_SHOULD_MERGE = 1, + BLK_MQ_F_TAG_QUEUE_SHARED = 2, + BLK_MQ_F_STACKING = 4, + BLK_MQ_F_TAG_HCTX_SHARED = 8, + BLK_MQ_F_BLOCKING = 16, + BLK_MQ_F_NO_SCHED = 32, + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 64, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, +}; + +struct rchan_buf { + void *start; + void *data; + size_t offset; + size_t subbufs_produced; + size_t subbufs_consumed; + struct rchan *chan; + wait_queue_head_t read_wait; + struct irq_work wakeup_work; + struct dentry *dentry; + struct kref kref; + struct page **page_array; + unsigned int page_count; + unsigned int finalized; + size_t *padding; + size_t prev_padding; + size_t bytes_consumed; + size_t early_bytes; + unsigned int cpu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct rchan_callbacks; + +struct rchan { + u32 version; + size_t subbuf_size; + size_t n_subbufs; + size_t alloc_size; + const struct rchan_callbacks *cb; + struct kref kref; + void *private_data; + size_t last_toobig; + struct rchan_buf **buf; + int is_global; + struct list_head list; + struct dentry *parent; + int has_base_filename; + char base_filename[255]; +}; + +struct rchan_callbacks { + int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); + struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); + int (*remove_buf_file)(struct dentry *); +}; + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx *queue_ctx; +}; + +typedef unsigned int blk_insert_t; + +struct blk_mq_alloc_data { + struct request_queue *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + unsigned int nr_tags; + struct rq_list *cached_rqs; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, blk_insert_t); + struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request * (*former_request)(struct request_queue *, struct request *); + struct request * (*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +struct elv_fs_entry; + +struct blk_mq_debugfs_attr; + +struct elevator_type { + struct kmem_cache *icq_cache; + struct elevator_mq_ops ops; + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + struct module *elevator_owner; + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; + char icq_cache_name[22]; + struct list_head list; +}; + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +struct blk_mq_debugfs_attr { + const char *name; + umode_t mode; + int (*show)(void *, struct seq_file *); + ssize_t (*write)(void *, const char *, size_t, loff_t *); + const struct seq_operations *seq_ops; +}; + +enum blkg_iostat_type { + BLKG_IOSTAT_READ = 0, + BLKG_IOSTAT_WRITE = 1, + BLKG_IOSTAT_DISCARD = 2, + BLKG_IOSTAT_NR = 3, +}; + +struct blkcg_policy_data; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + atomic_t congestion_count; + struct xarray blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + struct blkcg_policy_data *cpd[6]; + struct list_head all_blkcgs_node; + struct llist_head *lhead; + struct list_head cgwb_list; + long: 32; +}; + +struct blkg_policy_data { + struct blkcg_gq *blkg; + int plid; + bool online; +}; + +struct blkcg_policy_data { + struct blkcg *blkcg; + int plid; +}; + +struct match_token { + int token; + const char *pattern; +}; + +enum { + MAX_OPT_ARGS = 3, +}; + +typedef struct { + char *from; + char *to; +} substring_t; + +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0, + HRTIMER_MODE_REL = 1, + HRTIMER_MODE_PINNED = 2, + HRTIMER_MODE_SOFT = 4, + HRTIMER_MODE_HARD = 8, + HRTIMER_MODE_ABS_PINNED = 2, + HRTIMER_MODE_REL_PINNED = 3, + HRTIMER_MODE_ABS_SOFT = 4, + HRTIMER_MODE_REL_SOFT = 5, + HRTIMER_MODE_ABS_PINNED_SOFT = 6, + HRTIMER_MODE_REL_PINNED_SOFT = 7, + HRTIMER_MODE_ABS_HARD = 8, + HRTIMER_MODE_REL_HARD = 9, + HRTIMER_MODE_ABS_PINNED_HARD = 10, + HRTIMER_MODE_REL_PINNED_HARD = 11, +}; + +enum rq_qos_id { + RQ_QOS_WBT = 0, + RQ_QOS_LATENCY = 1, + RQ_QOS_COST = 2, +}; + +struct rq_qos_ops; + +struct rq_qos { + const struct rq_qos_ops *ops; + struct gendisk *disk; + enum rq_qos_id id; + struct rq_qos *next; + struct dentry *debugfs_dir; +}; + +struct rq_qos_ops { + void (*throttle)(struct rq_qos *, struct bio *); + void (*track)(struct rq_qos *, struct request *, struct bio *); + void (*merge)(struct rq_qos *, struct request *, struct bio *); + void (*issue)(struct rq_qos *, struct request *); + void (*requeue)(struct rq_qos *, struct request *); + void (*done)(struct rq_qos *, struct request *); + void (*done_bio)(struct rq_qos *, struct bio *); + void (*cleanup)(struct rq_qos *, struct bio *); + void (*queue_depth_changed)(struct rq_qos *); + void (*exit)(struct rq_qos *); + const struct blk_mq_debugfs_attr *debugfs_attrs; +}; + +enum { + CFTYPE_ONLY_ON_ROOT = 1, + CFTYPE_NOT_ON_ROOT = 2, + CFTYPE_NS_DELEGATABLE = 4, + CFTYPE_NO_PREFIX = 8, + CFTYPE_WORLD_WRITABLE = 16, + CFTYPE_DEBUG = 32, + __CFTYPE_ONLY_ON_DFL = 65536, + __CFTYPE_NOT_ON_DFL = 131072, + __CFTYPE_ADDED = 262144, +}; + +typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); + +typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); + +typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(struct gendisk *, struct blkcg *, gfp_t); + +typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_stat_pd_fn(struct blkg_policy_data *, struct seq_file *); + +struct blkcg_policy { + int plid; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +struct blkg_conf_ctx { + char *input; + char *body; + struct block_device *bdev; + struct blkcg_gq *blkg; +}; + +enum { + MILLION = 1000000, + MIN_PERIOD = 1000, + MAX_PERIOD = 1000000, + MARGIN_MIN_PCT = 10, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + INUSE_ADJ_STEP_PCT = 25, + TIMER_SLACK_PCT = 1, + WEIGHT_ONE = 65536, +}; + +enum { + VTIME_PER_SEC_SHIFT = 37ULL, + VTIME_PER_SEC = 137438953472ULL, + VTIME_PER_USEC = 137438ULL, + VTIME_PER_NSEC = 137ULL, + VRATE_MIN_PPM = 10000ULL, + VRATE_MAX_PPM = 100000000ULL, + VRATE_MIN = 1374ULL, + VRATE_CLAMP_ADJ_PCT = 4ULL, + AUTOP_CYCLE_NSEC = 10000000000ULL, +}; + +enum { + RQ_WAIT_BUSY_PCT = 5, + UNBUSY_THR_PCT = 75, + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250000, + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100000, + MAX_LAGGING_PERIODS = 10, + IOC_PAGE_SHIFT = 12, + IOC_PAGE_SIZE = 4096, + IOC_SECT_TO_PAGE_SHIFT = 3, + LCOEF_RANDIO_PAGES = 4096, +}; + +enum ioc_running { + IOC_IDLE = 0, + IOC_RUNNING = 1, + IOC_STOP = 2, +}; + +enum { + QOS_ENABLE = 0, + QOS_CTRL = 1, + NR_QOS_CTRL_PARAMS = 2, +}; + +enum { + QOS_RPPM = 0, + QOS_RLAT = 1, + QOS_WPPM = 2, + QOS_WLAT = 3, + QOS_MIN = 4, + QOS_MAX = 5, + NR_QOS_PARAMS = 6, +}; + +enum { + COST_CTRL = 0, + COST_MODEL = 1, + NR_COST_CTRL_PARAMS = 2, +}; + +enum { + I_LCOEF_RBPS = 0, + I_LCOEF_RSEQIOPS = 1, + I_LCOEF_RRANDIOPS = 2, + I_LCOEF_WBPS = 3, + I_LCOEF_WSEQIOPS = 4, + I_LCOEF_WRANDIOPS = 5, + NR_I_LCOEFS = 6, +}; + +enum { + LCOEF_RPAGE = 0, + LCOEF_RSEQIO = 1, + LCOEF_RRANDIO = 2, + LCOEF_WPAGE = 3, + LCOEF_WSEQIO = 4, + LCOEF_WRANDIO = 5, + NR_LCOEFS = 6, +}; + +enum { + AUTOP_INVALID = 0, + AUTOP_HDD = 1, + AUTOP_SSD_QD1 = 2, + AUTOP_SSD_DFL = 3, + AUTOP_SSD_FAST = 4, +}; + +struct ioc_params { + u32 qos[6]; + u64 i_lcoefs[6]; + u64 lcoefs[6]; + u32 too_fast_vrate_pct; + u32 too_slow_vrate_pct; +}; + +struct ioc_margins { + s64 min; + s64 low; + s64 target; +}; + +struct ioc_missed { + local_t nr_met; + local_t nr_missed; + u32 last_met; + u32 last_missed; +}; + +struct ioc_pcpu_stat { + struct ioc_missed missed[2]; + local64_t rq_wait_ns; + u64 last_rq_wait_ns; +}; + +struct ioc { + struct rq_qos rqos; + bool enabled; + struct ioc_params params; + struct ioc_margins margins; + u32 period_us; + u32 timer_slack_ns; + u64 vrate_min; + u64 vrate_max; + spinlock_t lock; + struct timer_list timer; + struct list_head active_iocgs; + struct ioc_pcpu_stat *pcpu_stat; + enum ioc_running running; + atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; + seqcount_spinlock_t period_seqcount; + long: 32; + u64 period_at; + u64 period_at_vtime; + atomic64_t cur_period; + int busy_level; + bool weights_updated; + atomic_t hweight_gen; + long: 32; + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; + u64 autop_too_fast_at; + u64 autop_too_slow_at; + int autop_idx; + bool user_qos_params: 1; + bool user_cost_model: 1; +}; + +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct iocg_stat { + u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; +}; + +struct ioc_gq { + struct blkg_policy_data pd; + struct ioc *ioc; + u32 cfg_weight; + u32 weight; + u32 active; + u32 inuse; + u32 last_inuse; + long: 32; + s64 saved_margin; + sector_t cursor; + atomic64_t vtime; + atomic64_t done_vtime; + u64 abs_vdebt; + u64 delay; + u64 delay_at; + atomic64_t active_period; + struct list_head active_list; + u64 child_active_sum; + u64 child_inuse_sum; + u64 child_adjusted_sum; + int hweight_gen; + u32 hweight_active; + u32 hweight_inuse; + u32 hweight_donating; + u32 hweight_after_donation; + struct list_head walk_list; + struct list_head surplus_list; + struct wait_queue_head waitq; + struct hrtimer waitq_timer; + u64 activated_at; + struct iocg_pcpu_stat *pcpu_stat; + long: 32; + struct iocg_stat stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; + int level; + struct ioc_gq *ancestors[0]; + long: 32; +}; + +struct ioc_cgrp { + struct blkcg_policy_data cpd; + unsigned int dfl_weight; +}; + +struct ioc_now { + u64 now_ns; + u64 now; + u64 vnow; +}; + +struct iocg_wait { + struct wait_queue_entry wait; + struct bio *bio; + u64 abs_cost; + bool committed; + long: 32; +}; + +struct iocg_wake_ctx { + struct ioc_gq *iocg; + u32 hw_inuse; + s64 vbudget; +}; + +struct ring_buffer_event { + u32 type_len: 5; + u32 time_delta: 27; + u32 array[0]; +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; + long: 32; + u64 cookie; +}; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned int trace_ctx; + struct pt_regs *regs; +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, + EVENT_FILE_FL_FREED = 2048, +}; + +struct trace_event_raw_iocost_iocg_state { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u64 vrate; + u64 last_period; + u64 cur_period; + u64 vtime; + u32 weight; + u32 inuse; + u64 hweight_active; + u64 hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocg_inuse_update { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u32 old_inuse; + u32 new_inuse; + u64 old_hweight_inuse; + u64 new_hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocost_ioc_vrate_adj { + struct trace_entry ent; + u32 __data_loc_devname; + long: 32; + u64 old_vrate; + u64 new_vrate; + int busy_level; + u32 read_missed_ppm; + u32 write_missed_ppm; + u32 rq_wait_pct; + int nr_lagging; + int nr_shortages; + char __data[0]; +}; + +struct trace_event_raw_iocost_iocg_forgive_debt { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u32 usage_pct; + long: 32; + u64 old_debt; + u64 new_debt; + u64 old_delay; + u64 new_delay; + char __data[0]; +}; + +struct trace_event_data_offsets_iocost_iocg_state { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocg_inuse_update { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocost_ioc_vrate_adj { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_iocost_iocg_forgive_debt { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_iocg_idle)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); + +typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u64, u64, u64, u64); + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + void (*xor_syndrome)(int, int, int, size_t, void **); + int (*valid)(); + const char *name; + int priority; +}; + +struct raid6_recov_calls { + void (*data2)(int, size_t, int, int, void **); + void (*datap)(int, size_t, int, void **); + int (*valid)(); + const char *name; + int priority; +}; + +enum OID { + OID_id_dsa_with_sha1 = 0, + OID_id_dsa = 1, + OID_id_ecPublicKey = 2, + OID_id_prime192v1 = 3, + OID_id_prime256v1 = 4, + OID_id_ecdsa_with_sha1 = 5, + OID_id_ecdsa_with_sha224 = 6, + OID_id_ecdsa_with_sha256 = 7, + OID_id_ecdsa_with_sha384 = 8, + OID_id_ecdsa_with_sha512 = 9, + OID_rsaEncryption = 10, + OID_sha1WithRSAEncryption = 11, + OID_sha256WithRSAEncryption = 12, + OID_sha384WithRSAEncryption = 13, + OID_sha512WithRSAEncryption = 14, + OID_sha224WithRSAEncryption = 15, + OID_data = 16, + OID_signed_data = 17, + OID_email_address = 18, + OID_contentType = 19, + OID_messageDigest = 20, + OID_signingTime = 21, + OID_smimeCapabilites = 22, + OID_smimeAuthenticatedAttrs = 23, + OID_mskrb5 = 24, + OID_krb5 = 25, + OID_krb5u2u = 26, + OID_msIndirectData = 27, + OID_msStatementType = 28, + OID_msSpOpusInfo = 29, + OID_msPeImageDataObjId = 30, + OID_msIndividualSPKeyPurpose = 31, + OID_msOutlookExpress = 32, + OID_ntlmssp = 33, + OID_negoex = 34, + OID_spnego = 35, + OID_IAKerb = 36, + OID_PKU2U = 37, + OID_Scram = 38, + OID_certAuthInfoAccess = 39, + OID_sha1 = 40, + OID_id_ansip384r1 = 41, + OID_id_ansip521r1 = 42, + OID_sha256 = 43, + OID_sha384 = 44, + OID_sha512 = 45, + OID_sha224 = 46, + OID_commonName = 47, + OID_surname = 48, + OID_countryName = 49, + OID_locality = 50, + OID_stateOrProvinceName = 51, + OID_organizationName = 52, + OID_organizationUnitName = 53, + OID_title = 54, + OID_description = 55, + OID_name = 56, + OID_givenName = 57, + OID_initials = 58, + OID_generationalQualifier = 59, + OID_subjectKeyIdentifier = 60, + OID_keyUsage = 61, + OID_subjectAltName = 62, + OID_issuerAltName = 63, + OID_basicConstraints = 64, + OID_crlDistributionPoints = 65, + OID_certPolicies = 66, + OID_authorityKeyIdentifier = 67, + OID_extKeyUsage = 68, + OID_NetlogonMechanism = 69, + OID_appleLocalKdcSupported = 70, + OID_gostCPSignA = 71, + OID_gostCPSignB = 72, + OID_gostCPSignC = 73, + OID_gost2012PKey256 = 74, + OID_gost2012PKey512 = 75, + OID_gost2012Digest256 = 76, + OID_gost2012Digest512 = 77, + OID_gost2012Signature256 = 78, + OID_gost2012Signature512 = 79, + OID_gostTC26Sign256A = 80, + OID_gostTC26Sign256B = 81, + OID_gostTC26Sign256C = 82, + OID_gostTC26Sign256D = 83, + OID_gostTC26Sign512A = 84, + OID_gostTC26Sign512B = 85, + OID_gostTC26Sign512C = 86, + OID_sm2 = 87, + OID_sm3 = 88, + OID_SM2_with_SM3 = 89, + OID_sm3WithRSAEncryption = 90, + OID_TPMLoadableKey = 91, + OID_TPMImportableKey = 92, + OID_TPMSealedData = 93, + OID_sha3_256 = 94, + OID_sha3_384 = 95, + OID_sha3_512 = 96, + OID_id_ecdsa_with_sha3_256 = 97, + OID_id_ecdsa_with_sha3_384 = 98, + OID_id_ecdsa_with_sha3_512 = 99, + OID_id_rsassa_pkcs1_v1_5_with_sha3_256 = 100, + OID_id_rsassa_pkcs1_v1_5_with_sha3_384 = 101, + OID_id_rsassa_pkcs1_v1_5_with_sha3_512 = 102, + OID__NR = 103, +}; + +struct pci_device_id { + __u32 vendor; + __u32 device; + __u32 subvendor; + __u32 subdevice; + __u32 class; + __u32 class_mask; + kernel_ulong_t driver_data; + __u32 override_only; +}; + +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + long unsigned int flags; + long unsigned int desc; + struct resource *parent; + struct resource *sibling; + struct resource *child; +}; + +typedef resource_size_t (*resource_alignf)(void *, const struct resource *, resource_size_t, resource_size_t); + +struct pci_bus; + +struct hotplug_slot; + +struct pci_slot { + struct pci_bus *bus; + struct list_head list; + struct hotplug_slot *hotplug; + unsigned char number; + struct kobject kobj; +}; + +typedef short unsigned int pci_bus_flags_t; + +struct pci_dev; + +struct pci_ops; + +struct pci_bus { + struct list_head node; + struct pci_bus *parent; + struct list_head children; + struct list_head devices; + struct pci_dev *self; + struct list_head slots; + struct resource *resource[4]; + struct list_head resources; + struct resource busn_res; + struct pci_ops *ops; + void *sysdata; + struct proc_dir_entry *procdir; + unsigned char number; + unsigned char primary; + unsigned char max_bus_speed; + unsigned char cur_bus_speed; + int domain_nr; + char name[48]; + short unsigned int bridge_ctl; + pci_bus_flags_t bus_flags; + struct device *bridge; + long: 32; + struct device dev; + struct bin_attribute *legacy_io; + struct bin_attribute *legacy_mem; + unsigned int is_added: 1; + unsigned int unsafe_warn: 1; + long: 32; +}; + +enum { + PCI_STD_RESOURCES = 0, + PCI_STD_RESOURCE_END = 5, + PCI_ROM_RESOURCE = 6, + PCI_BRIDGE_RESOURCES = 7, + PCI_BRIDGE_RESOURCE_END = 10, + PCI_NUM_RESOURCES = 11, + DEVICE_COUNT_RESOURCE = 11, +}; + +typedef int pci_power_t; + +typedef unsigned int pci_channel_state_t; + +typedef short unsigned int pci_dev_flags_t; + +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; +}; + +struct pcie_bwctrl_data; + +struct pci_driver; + +struct pcie_link_state; + +struct pci_dev { + struct list_head bus_list; + struct pci_bus *bus; + struct pci_bus *subordinate; + void *sysdata; + struct proc_dir_entry *procent; + struct pci_slot *slot; + unsigned int devfn; + short unsigned int vendor; + short unsigned int device; + short unsigned int subsystem_vendor; + short unsigned int subsystem_device; + unsigned int class; + u8 revision; + u8 hdr_type; + u32 devcap; + u8 pcie_cap; + u8 msi_cap; + u8 msix_cap; + u8 pcie_mpss: 3; + u8 rom_base_reg; + u8 pin; + u16 pcie_flags_reg; + long unsigned int *dma_alias_mask; + struct pci_driver *driver; + long: 32; + u64 dma_mask; + struct device_dma_parameters dma_parms; + pci_power_t current_state; + u8 pm_cap; + unsigned int pme_support: 5; + unsigned int pme_poll: 1; + unsigned int pinned: 1; + unsigned int config_rrs_sv: 1; + unsigned int imm_ready: 1; + unsigned int d1_support: 1; + unsigned int d2_support: 1; + unsigned int no_d1d2: 1; + unsigned int no_d3cold: 1; + unsigned int bridge_d3: 1; + unsigned int d3cold_allowed: 1; + unsigned int mmio_always_on: 1; + unsigned int wakeup_prepared: 1; + unsigned int skip_bus_pm: 1; + unsigned int ignore_hotplug: 1; + unsigned int hotplug_user_indicators: 1; + unsigned int clear_retrain_link: 1; + unsigned int d3hot_delay; + unsigned int d3cold_delay; + u16 l1ss; + struct pcie_link_state *link_state; + unsigned int ltr_path: 1; + unsigned int pasid_no_tlp: 1; + unsigned int eetlp_prefix_path: 1; + pci_channel_state_t error_state; + long: 32; + struct device dev; + int cfg_size; + unsigned int irq; + struct resource resource[11]; + struct resource driver_exclusive_resource; + bool match_driver; + unsigned int transparent: 1; + unsigned int io_window: 1; + unsigned int pref_window: 1; + unsigned int pref_64_window: 1; + unsigned int multifunction: 1; + unsigned int is_busmaster: 1; + unsigned int no_msi: 1; + unsigned int no_64bit_msi: 1; + unsigned int block_cfg_access: 1; + unsigned int broken_parity_status: 1; + unsigned int irq_reroute_variant: 2; + unsigned int msi_enabled: 1; + unsigned int msix_enabled: 1; + unsigned int ari_enabled: 1; + unsigned int ats_enabled: 1; + unsigned int pasid_enabled: 1; + unsigned int pri_enabled: 1; + unsigned int tph_enabled: 1; + unsigned int is_managed: 1; + unsigned int is_msi_managed: 1; + unsigned int needs_freset: 1; + unsigned int state_saved: 1; + unsigned int is_physfn: 1; + unsigned int is_virtfn: 1; + unsigned int is_hotplug_bridge: 1; + unsigned int shpc_managed: 1; + unsigned int is_thunderbolt: 1; + unsigned int untrusted: 1; + unsigned int external_facing: 1; + unsigned int broken_intx_masking: 1; + unsigned int io_window_1k: 1; + unsigned int irq_managed: 1; + unsigned int non_compliant_bars: 1; + unsigned int is_probed: 1; + unsigned int link_active_reporting: 1; + unsigned int no_vf_scan: 1; + unsigned int no_command_memory: 1; + unsigned int rom_bar_overlap: 1; + unsigned int rom_attr_enabled: 1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; + spinlock_t pcie_cap_lock; + u32 saved_config_space[16]; + struct hlist_head saved_cap_space; + struct bin_attribute *res_attr[11]; + struct bin_attribute *res_attr_wc[11]; + void *msix_base; + raw_spinlock_t msi_lock; + struct pci_vpd vpd; + struct pcie_bwctrl_data *link_bwctrl; + u16 acs_cap; + u8 supported_speeds; + phys_addr_t rom; + size_t romlen; + const char *driver_override; + long unsigned int priv_flags; + u8 reset_methods[8]; +}; + +struct pci_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct pci_error_handlers; + +struct pci_driver { + const char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *, const struct pci_device_id *); + void (*remove)(struct pci_dev *); + int (*suspend)(struct pci_dev *, pm_message_t); + int (*resume)(struct pci_dev *); + void (*shutdown)(struct pci_dev *); + int (*sriov_configure)(struct pci_dev *, int); + int (*sriov_set_msix_vec_count)(struct pci_dev *, int); + u32 (*sriov_get_vf_total_msix)(struct pci_dev *); + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + struct device_driver driver; + struct pci_dynids dynids; + bool driver_managed_dma; +}; + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; + struct pci_ops *ops; + struct pci_ops *child_ops; + void *sysdata; + int busnr; + int domain_nr; + struct list_head windows; + struct list_head dma_ranges; + u8 (*swizzle_irq)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + unsigned int ignore_reset_delay: 1; + unsigned int no_ext_tags: 1; + unsigned int no_inc_mrrs: 1; + unsigned int native_aer: 1; + unsigned int native_pcie_hotplug: 1; + unsigned int native_shpc_hotplug: 1; + unsigned int native_pme: 1; + unsigned int native_ltr: 1; + unsigned int native_dpc: 1; + unsigned int native_cxl_error: 1; + unsigned int preserve_config: 1; + unsigned int size_windows: 1; + unsigned int msi_domain: 1; + resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int private[0]; +}; + +struct pci_ops { + int (*add_bus)(struct pci_bus *); + void (*remove_bus)(struct pci_bus *); + void * (*map_bus)(struct pci_bus *, unsigned int, int); + int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); + int (*write)(struct pci_bus *, unsigned int, int, int, u32); +}; + +typedef u32 pci_bus_addr_t; + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +typedef unsigned int pci_ers_result_t; + +struct pci_error_handlers { + pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *); + pci_ers_result_t (*slot_reset)(struct pci_dev *); + void (*reset_prepare)(struct pci_dev *); + void (*reset_done)(struct pci_dev *); + void (*resume)(struct pci_dev *); + void (*cor_error_detected)(struct pci_dev *); +}; + +enum vesa_blank_mode { + VESA_NO_BLANKING = 0, + VESA_VSYNC_SUSPEND = 1, + VESA_HSYNC_SUSPEND = 2, + VESA_POWERDOWN = 3, + VESA_BLANK_MAX = 3, +}; + +struct fb_videomode { + const char *name; + u32 refresh; + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct fb_cvt_data { + u32 xres; + u32 yres; + u32 refresh; + u32 f_refresh; + u32 pixclock; + u32 hperiod; + u32 hblank; + u32 hfreq; + u32 htotal; + u32 vtotal; + u32 vsync; + u32 hsync; + u32 h_front_porch; + u32 h_back_porch; + u32 v_front_porch; + u32 v_back_porch; + u32 h_margin; + u32 v_margin; + u32 interlace; + u32 aspect_ratio; + u32 active_pixels; + u32 flags; + u32 status; +}; + +typedef unsigned char u_char; + +typedef short unsigned int u_short; + +typedef unsigned int u_int; + +typedef u16 uint16_t; + +typedef long unsigned int irq_hw_number_t; + +enum { + KERNEL_PARAM_FL_UNSAFE = 1, + KERNEL_PARAM_FL_HWPARAM = 2, +}; + +struct ld_semaphore { + atomic_long_t count; + raw_spinlock_t wait_lock; + unsigned int wait_readers; + struct list_head read_wait; + struct list_head write_wait; +}; + +typedef unsigned int tcflag_t; + +typedef unsigned char cc_t; + +typedef unsigned int speed_t; + +struct ktermios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct winsize { + short unsigned int ws_row; + short unsigned int ws_col; + short unsigned int ws_xpixel; + short unsigned int ws_ypixel; +}; + +struct tty_driver; + +struct tty_port; + +struct tty_operations; + +struct tty_ldisc; + +struct tty_struct { + struct kref kref; + int index; + struct device *dev; + struct tty_driver *driver; + struct tty_port *port; + const struct tty_operations *ops; + struct tty_ldisc *ldisc; + struct ld_semaphore ldisc_sem; + struct mutex atomic_write_lock; + struct mutex legacy_mutex; + struct mutex throttle_mutex; + struct rw_semaphore termios_rwsem; + struct mutex winsize_mutex; + struct ktermios termios; + struct ktermios termios_locked; + char name[64]; + long unsigned int flags; + int count; + unsigned int receive_room; + struct winsize winsize; + struct { + spinlock_t lock; + bool stopped; + bool tco_stopped; + } flow; + struct { + struct pid *pgrp; + struct pid *session; + spinlock_t lock; + unsigned char pktstatus; + bool packet; + } ctrl; + bool hw_stopped; + bool closing; + int flow_change; + struct tty_struct *link; + struct fasync_struct *fasync; + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + spinlock_t files_lock; + int write_cnt; + u8 *write_buf; + struct list_head tty_files; + struct work_struct SAK_work; +}; + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *, struct device_attribute *, char *); + ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); +}; + +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED = 1, + DOMAIN_BUS_GENERIC_MSI = 2, + DOMAIN_BUS_PCI_MSI = 3, + DOMAIN_BUS_PLATFORM_MSI = 4, + DOMAIN_BUS_NEXUS = 5, + DOMAIN_BUS_IPI = 6, + DOMAIN_BUS_FSL_MC_MSI = 7, + DOMAIN_BUS_TI_SCI_INTA_MSI = 8, + DOMAIN_BUS_WAKEUP = 9, + DOMAIN_BUS_VMD_MSI = 10, + DOMAIN_BUS_PCI_DEVICE_MSI = 11, + DOMAIN_BUS_PCI_DEVICE_MSIX = 12, + DOMAIN_BUS_DMAR = 13, + DOMAIN_BUS_AMDVI = 14, + DOMAIN_BUS_DEVICE_MSI = 15, + DOMAIN_BUS_WIRED_TO_MSI = 16, +}; + +struct irq_domain_ops; + +struct irq_domain_chip_generic; + +struct msi_parent_ops; + +struct irq_data; + +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + struct mutex mutex; + struct irq_domain *root; + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; + struct device *dev; + struct device *pm_dev; + struct irq_domain *parent; + const struct msi_parent_ops *msi_parent_ops; + void (*exit)(struct irq_domain *); + irq_hw_number_t hwirq_max; + unsigned int revmap_size; + struct xarray revmap_tree; + struct irq_data *revmap[0]; +}; + +enum con_scroll { + SM_UP = 0, + SM_DOWN = 1, +}; + +enum vc_intensity { + VCI_HALF_BRIGHT = 0, + VCI_NORMAL = 1, + VCI_BOLD = 2, + VCI_MASK = 3, +}; + +struct vc_data; + +struct console_font; + +struct consw { + struct module *owner; + const char * (*con_startup)(); + void (*con_init)(struct vc_data *, bool); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, unsigned int, unsigned int, unsigned int); + void (*con_putc)(struct vc_data *, u16, unsigned int, unsigned int); + void (*con_putcs)(struct vc_data *, const u16 *, unsigned int, unsigned int, unsigned int); + void (*con_cursor)(struct vc_data *, bool); + bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); + bool (*con_switch)(struct vc_data *); + bool (*con_blank)(struct vc_data *, enum vesa_blank_mode, bool); + int (*con_font_set)(struct vc_data *, const struct console_font *, unsigned int, unsigned int); + int (*con_font_get)(struct vc_data *, struct console_font *, unsigned int); + int (*con_font_default)(struct vc_data *, struct console_font *, const char *); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, bool); + void (*con_set_palette)(struct vc_data *, const unsigned char *); + void (*con_scrolldelta)(struct vc_data *, int); + bool (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); + void (*con_invert_region)(struct vc_data *, u16 *, int); + void (*con_debug_enter)(struct vc_data *); + void (*con_debug_leave)(struct vc_data *); +}; + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + unsigned int used; + unsigned int size; + unsigned int commit; + unsigned int lookahead; + unsigned int read; + bool flags; + long: 0; + u8 data[0]; +}; + +struct tty_bufhead { + struct tty_buffer *head; + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; + atomic_t mem_used; + int mem_limit; + struct tty_buffer *tail; +}; + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +struct tty_port_operations; + +struct tty_port_client_operations; + +struct tty_port { + struct tty_bufhead buf; + struct tty_struct *tty; + struct tty_struct *itty; + const struct tty_port_operations *ops; + const struct tty_port_client_operations *client_ops; + spinlock_t lock; + int blocked_open; + int count; + wait_queue_head_t open_wait; + wait_queue_head_t delta_msr_wait; + long unsigned int flags; + long unsigned int iflags; + unsigned char console: 1; + struct mutex mutex; + struct mutex buf_mutex; + u8 *xmit_buf; + struct { + union { + struct __kfifo kfifo; + u8 *type; + const u8 *const_type; + char (*rectype)[0]; + u8 *ptr; + const u8 *ptr_const; + }; + u8 buf[0]; + } xmit_fifo; + unsigned int close_delay; + unsigned int closing_wait; + int drain_delay; + struct kref kref; + void *client_data; +}; + +struct vc_state { + unsigned int x; + unsigned int y; + unsigned char color; + unsigned char Gx_charset[2]; + unsigned int charset: 1; + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; + +struct console_font { + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_mode { + char mode; + char waitv; + short int relsig; + short int acqsig; + short int frsig; +}; + +struct uni_pagedict; + +struct vc_data { + struct tty_port port; + struct vc_state state; + struct vc_state saved_state; + short unsigned int vc_num; + unsigned int vc_cols; + unsigned int vc_rows; + unsigned int vc_size_row; + unsigned int vc_scan_lines; + unsigned int vc_cell_height; + long unsigned int vc_origin; + long unsigned int vc_scr_end; + long unsigned int vc_visible_origin; + unsigned int vc_top; + unsigned int vc_bottom; + const struct consw *vc_sw; + short unsigned int *vc_screenbuf; + unsigned int vc_screenbuf_size; + unsigned char vc_mode; + unsigned char vc_attr; + unsigned char vc_def_color; + unsigned char vc_ulcolor; + unsigned char vc_itcolor; + unsigned char vc_halfcolor; + unsigned int vc_cursor_type; + short unsigned int vc_complement_mask; + short unsigned int vc_s_complement_mask; + long unsigned int vc_pos; + short unsigned int vc_hi_font_mask; + struct console_font vc_font; + short unsigned int vc_video_erase_char; + unsigned int vc_state; + unsigned int vc_npar; + unsigned int vc_par[16]; + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + unsigned int vc_disp_ctrl: 1; + unsigned int vc_toggle_meta: 1; + unsigned int vc_decscnm: 1; + unsigned int vc_decom: 1; + unsigned int vc_decawm: 1; + unsigned int vc_deccm: 1; + unsigned int vc_decim: 1; + unsigned int vc_priv: 3; + unsigned int vc_need_wrap: 1; + unsigned int vc_can_do_color: 1; + unsigned int vc_report_mouse: 2; + unsigned char vc_utf: 1; + unsigned char vc_utf_count; + int vc_utf_char; + long unsigned int vc_tab_stop[8]; + unsigned char vc_palette[48]; + short unsigned int *vc_translate; + unsigned int vc_bell_pitch; + unsigned int vc_bell_duration; + short unsigned int vc_cur_blink_ms; + struct vc_data **vc_display_fg; + struct uni_pagedict *uni_pagedict; + struct uni_pagedict **uni_pagedict_loc; + u32 **vc_uni_lines; +}; + +struct tty_driver { + struct kref kref; + struct cdev **cdevs; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; + int major; + int minor_start; + unsigned int num; + short int type; + short int subtype; + struct ktermios init_termios; + long unsigned int flags; + struct proc_dir_entry *proc_entry; + struct tty_driver *other; + struct tty_struct **ttys; + struct tty_port **ports; + struct ktermios **termios; + void *driver_state; + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; + unsigned int ipi_offset; +}; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + irq_hw_number_t hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct irqstat; + +struct irqaction; + +struct irq_affinity_notify; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + struct irqstat *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + long unsigned int last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + long unsigned int threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; + unsigned int force_resume_depth; + struct proc_dir_entry *dir; + struct callback_head rcu; + struct kobject kobj; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + struct hlist_node resend_node; + long: 32; + long: 32; +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +struct msi_msg; + +struct irq_chip { + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + long unsigned int flags; +}; + +struct irqstat { + unsigned int cnt; +}; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + long unsigned int thread_flags; + long unsigned int thread_mask; + const char *name; + struct proc_dir_entry *dir; + long: 32; + long: 32; + long: 32; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +struct irq_chip_regs { + long unsigned int enable; + long unsigned int disable; + long unsigned int mask; + long unsigned int ack; + long unsigned int eoi; + long unsigned int type; +}; + +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +struct irq_chip_generic { + raw_spinlock_t lock; + void *reg_base; + u32 (*reg_readl)(void *); + void (*reg_writel)(u32, void *); + void (*suspend)(struct irq_chip_generic *); + void (*resume)(struct irq_chip_generic *); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + long unsigned int installed; + long unsigned int unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1, + IRQ_GC_INIT_NESTED_LOCK = 2, + IRQ_GC_MASK_CACHE_PER_TYPE = 4, + IRQ_GC_NO_MASK = 8, + IRQ_GC_BE_IO = 16, +}; + +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + void (*exit)(struct irq_chip_generic *); + struct irq_chip_generic *gc[0]; +}; + +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[16]; +}; + +struct irq_domain_ops { + int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); + int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); + int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); + void (*unmap)(struct irq_domain *, unsigned int); + int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); + int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); + void (*free)(struct irq_domain *, unsigned int, unsigned int); + int (*activate)(struct irq_domain *, struct irq_data *, bool); + void (*deactivate)(struct irq_domain *, struct irq_data *); + int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); +}; + +struct msi_domain_info; + +struct msi_parent_ops { + u32 supported_flags; + u32 required_flags; + u32 bus_select_token; + u32 bus_select_mask; + const char *prefix; + bool (*init_dev_msi_info)(struct device *, struct irq_domain *, struct irq_domain *, struct msi_domain_info *); +}; + +struct fb_fix_screeninfo { + char id[16]; + long unsigned int smem_start; + __u32 smem_len; + __u32 type; + __u32 type_aux; + __u32 visual; + __u16 xpanstep; + __u16 ypanstep; + __u16 ywrapstep; + __u32 line_length; + long unsigned int mmio_start; + __u32 mmio_len; + __u32 accel; + __u16 capabilities; + __u16 reserved[2]; +}; + +struct fb_bitfield { + __u32 offset; + __u32 length; + __u32 msb_right; +}; + +struct fb_var_screeninfo { + __u32 xres; + __u32 yres; + __u32 xres_virtual; + __u32 yres_virtual; + __u32 xoffset; + __u32 yoffset; + __u32 bits_per_pixel; + __u32 grayscale; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + __u32 nonstd; + __u32 activate; + __u32 height; + __u32 width; + __u32 accel_flags; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 rotate; + __u32 colorspace; + __u32 reserved[4]; +}; + +struct fb_cmap { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +enum { + FB_BLANK_UNBLANK = 0, + FB_BLANK_NORMAL = 1, + FB_BLANK_VSYNC_SUSPEND = 2, + FB_BLANK_HSYNC_SUSPEND = 3, + FB_BLANK_POWERDOWN = 4, +}; + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 fg_color; + __u32 bg_color; + __u8 depth; + const char *data; + struct fb_cmap cmap; +}; + +struct fbcurpos { + __u16 x; + __u16 y; +}; + +struct fb_cursor { + __u16 set; + __u16 enable; + __u16 rop; + const char *mask; + struct fbcurpos hot; + struct fb_image image; +}; + +struct fb_chroma { + __u32 redx; + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; + __u8 manufacturer[4]; + __u8 monitor[14]; + __u8 serial_no[14]; + __u8 ascii[14]; + __u32 modedb_len; + __u32 model; + __u32 serial; + __u32 year; + __u32 week; + __u32 hfmin; + __u32 hfmax; + __u32 dclkmin; + __u32 dclkmax; + __u16 input; + __u16 dpms; + __u16 signal; + __u16 vfmin; + __u16 vfmax; + __u16 gamma; + __u16 gtf: 1; + __u16 misc; + __u8 version; + __u8 revision; + __u8 max_x; + __u8 max_y; +}; + +struct fb_info; + +struct fb_pixmap { + u8 *addr; + u32 size; + u32 offset; + u32 buf_align; + u32 scan_align; + u32 access_align; + u32 flags; + long unsigned int blit_x[2]; + long unsigned int blit_y[4]; + void (*writeio)(struct fb_info *, void *, void *, unsigned int); + void (*readio)(struct fb_info *, void *, void *, unsigned int); +}; + +struct lcd_device; + +struct fb_ops; + +struct fb_info { + refcount_t count; + int node; + int flags; + int fbcon_rotate_hint; + struct mutex lock; + struct mutex mm_lock; + struct fb_var_screeninfo var; + struct fb_fix_screeninfo fix; + struct fb_monspecs monspecs; + struct fb_pixmap pixmap; + struct fb_pixmap sprite; + struct fb_cmap cmap; + struct list_head modelist; + struct fb_videomode *mode; + struct lcd_device *lcd_dev; + const struct fb_ops *fbops; + struct device *device; + struct device *dev; + int class_flag; + union { + char *screen_base; + char *screen_buffer; + }; + long unsigned int screen_size; + void *pseudo_palette; + u32 state; + void *fbcon_par; + void *par; + bool skip_vt_switch; + bool skip_panic; +}; + +struct fb_blit_caps { + long unsigned int x[2]; + long unsigned int y[4]; + u32 len; + u32 flags; +}; + +struct fb_ops { + struct module *owner; + int (*fb_open)(struct fb_info *, int); + int (*fb_release)(struct fb_info *, int); + ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); + ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); + int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); + int (*fb_set_par)(struct fb_info *); + int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); + int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); + int (*fb_blank)(int, struct fb_info *); + int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); + void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); + void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); + void (*fb_imageblit)(struct fb_info *, const struct fb_image *); + int (*fb_cursor)(struct fb_info *, struct fb_cursor *); + int (*fb_sync)(struct fb_info *); + int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); + void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); + void (*fb_destroy)(struct fb_info *); + int (*fb_debug_enter)(struct fb_info *); + int (*fb_debug_leave)(struct fb_info *); +}; + +struct serial_icounter_struct; + +struct serial_struct; + +struct tty_operations { + struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); + int (*install)(struct tty_driver *, struct tty_struct *); + void (*remove)(struct tty_driver *, struct tty_struct *); + int (*open)(struct tty_struct *, struct file *); + void (*close)(struct tty_struct *, struct file *); + void (*shutdown)(struct tty_struct *); + void (*cleanup)(struct tty_struct *); + ssize_t (*write)(struct tty_struct *, const u8 *, size_t); + int (*put_char)(struct tty_struct *, u8); + void (*flush_chars)(struct tty_struct *); + unsigned int (*write_room)(struct tty_struct *); + unsigned int (*chars_in_buffer)(struct tty_struct *); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + void (*throttle)(struct tty_struct *); + void (*unthrottle)(struct tty_struct *); + void (*stop)(struct tty_struct *); + void (*start)(struct tty_struct *); + void (*hangup)(struct tty_struct *); + int (*break_ctl)(struct tty_struct *, int); + void (*flush_buffer)(struct tty_struct *); + int (*ldisc_ok)(struct tty_struct *, int); + void (*set_ldisc)(struct tty_struct *); + void (*wait_until_sent)(struct tty_struct *, int); + void (*send_xchar)(struct tty_struct *, u8); + int (*tiocmget)(struct tty_struct *); + int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); + int (*resize)(struct tty_struct *, struct winsize *); + int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); + int (*get_serial)(struct tty_struct *, struct serial_struct *); + int (*set_serial)(struct tty_struct *, struct serial_struct *); + void (*show_fdinfo)(struct tty_struct *, struct seq_file *); + int (*proc_show)(struct seq_file *, void *); +}; + +struct tty_ldisc_ops { + char *name; + int num; + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *); + ssize_t (*read)(struct tty_struct *, struct file *, u8 *, size_t, void **, long unsigned int); + ssize_t (*write)(struct tty_struct *, struct file *, const u8 *, size_t); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); + void (*hangup)(struct tty_struct *); + void (*receive_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, bool); + size_t (*receive_buf2)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + struct module *owner; +}; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + struct tty_struct *tty; +}; + +struct tty_port_operations { + bool (*carrier_raised)(struct tty_port *); + void (*dtr_rts)(struct tty_port *, bool); + void (*shutdown)(struct tty_port *); + int (*activate)(struct tty_port *, struct tty_struct *); + void (*destruct)(struct tty_port *); +}; + +struct tty_port_client_operations { + size_t (*receive_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_port *); +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; +}; + +struct font_desc { + int idx; + const char *name; + unsigned int width; + unsigned int height; + unsigned int charcount; + const void *data; + int pref; +}; + +struct fbcon_display { + const u_char *fontdata; + int userfont; + u_short inverse; + short int yscroll; + int vrows; + int cursor_shape; + int con_rotate; + u32 xres_virtual; + u32 yres_virtual; + u32 height; + u32 width; + u32 bits_per_pixel; + u32 grayscale; + u32 nonstd; + u32 accel_flags; + u32 rotate; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + const struct fb_videomode *mode; +}; + +struct fbcon_ops { + void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); + void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); + void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); + void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); + void (*cursor)(struct vc_data *, struct fb_info *, bool, int, int); + int (*update_start)(struct fb_info *); + int (*rotate_font)(struct fb_info *, struct vc_data *); + struct fb_var_screeninfo var; + struct delayed_work cursor_work; + struct fb_cursor cursor_state; + struct fbcon_display *p; + struct fb_info *info; + int currcon; + int cur_blink_jiffies; + int cursor_flash; + int cursor_reset; + int blank_state; + int graphics; + int save_graphics; + bool initialized; + int rotate; + int cur_rotate; + char *cursor_data; + u8 *fontbuffer; + u8 *fontdata; + u8 *cursor_src; + u32 cursor_size; + u32 fd_size; +}; + +enum { + FBCON_LOGO_CANSHOW = -1, + FBCON_LOGO_DRAW = -2, + FBCON_LOGO_DONTSHOW = -3, +}; + +typedef unsigned int uint; + +typedef struct mutex *class_mutex_t; + +typedef class_mutex_t class_mutex_intr_t; + +enum work_flags { + WORK_STRUCT_PENDING = 1, + WORK_STRUCT_INACTIVE = 2, + WORK_STRUCT_PWQ = 4, + WORK_STRUCT_LINKED = 8, + WORK_STRUCT_STATIC = 0, +}; + +struct serial_icounter_struct { + int cts; + int dsr; + int rng; + int dcd; + int rx; + int tx; + int frame; + int overrun; + int parity; + int brk; + int buf_overrun; + int reserved[9]; +}; + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char[1]; + int hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + long unsigned int iomap_base; +}; + +enum cons_flags { + CON_PRINTBUFFER = 1, + CON_CONSDEV = 2, + CON_ENABLED = 4, + CON_BOOT = 8, + CON_ANYTIME = 16, + CON_BRL = 32, + CON_EXTENDED = 64, + CON_SUSPENDED = 128, + CON_NBCON = 256, +}; + +enum nbcon_prio { + NBCON_PRIO_NONE = 0, + NBCON_PRIO_NORMAL = 1, + NBCON_PRIO_EMERGENCY = 2, + NBCON_PRIO_PANIC = 3, + NBCON_PRIO_MAX = 4, +}; + +struct console; + +struct printk_buffers; + +struct nbcon_context { + struct console *console; + unsigned int spinwait_max_us; + enum nbcon_prio prio; + unsigned int allow_unsafe_takeover: 1; + unsigned int backlog: 1; + struct printk_buffers *pbufs; + long: 32; + u64 seq; +}; + +struct nbcon_write_context; + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned int); + int (*read)(struct console *, char *, unsigned int); + struct tty_driver * (*device)(struct console *, int *); + void (*unblank)(); + int (*setup)(struct console *, char *); + int (*exit)(struct console *); + int (*match)(struct console *, char *, int, char *); + short int flags; + short int index; + int cflag; + uint ispeed; + uint ospeed; + long: 32; + u64 seq; + long unsigned int dropped; + void *data; + struct hlist_node node; + void (*write_atomic)(struct console *, struct nbcon_write_context *); + void (*write_thread)(struct console *, struct nbcon_write_context *); + void (*device_lock)(struct console *, long unsigned int *); + void (*device_unlock)(struct console *, long unsigned int); + atomic_t nbcon_state; + atomic_long_t nbcon_seq; + struct nbcon_context nbcon_device_ctxt; + atomic_long_t nbcon_prev_seq; + struct printk_buffers *pbufs; + struct task_struct *kthread; + struct rcuwait rcuwait; + struct irq_work irq_work; +}; + +struct nbcon_write_context { + struct nbcon_context ctxt; + char *outbuf; + unsigned int len; + bool unsafe_takeover; + long: 32; +}; + +struct gpio_desc; + +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = 1, + GPIOD_OUT_LOW = 3, + GPIOD_OUT_HIGH = 7, + GPIOD_OUT_LOW_OPEN_DRAIN = 11, + GPIOD_OUT_HIGH_OPEN_DRAIN = 15, +}; + +struct serial_rs485 { + __u32 flags; + __u32 delay_rts_before_send; + __u32 delay_rts_after_send; + union { + __u32 padding[5]; + struct { + __u8 addr_recv; + __u8 addr_dest; + __u8 padding0[2]; + __u32 padding1[4]; + }; + }; +}; + +struct serial_iso7816 { + __u32 flags; + __u32 tg; + __u32 sc_fi; + __u32 sc_di; + __u32 clk; + __u32 reserved[5]; +}; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, + IRQD_AFFINITY_ON_ACTIVATE = 268435456, + IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, + IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, +}; + +struct uart_port; + +struct uart_ops { + unsigned int (*tx_empty)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_mctrl)(struct uart_port *); + void (*stop_tx)(struct uart_port *); + void (*start_tx)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + void (*send_xchar)(struct uart_port *, char); + void (*stop_rx)(struct uart_port *); + void (*start_rx)(struct uart_port *); + void (*enable_ms)(struct uart_port *); + void (*break_ctl)(struct uart_port *, int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + const char * (*type)(struct uart_port *); + void (*release_port)(struct uart_port *); + int (*request_port)(struct uart_port *); + void (*config_port)(struct uart_port *, int); + int (*verify_port)(struct uart_port *, struct serial_struct *); + int (*ioctl)(struct uart_port *, unsigned int, long unsigned int); +}; + +struct uart_icount { + __u32 cts; + __u32 dsr; + __u32 rng; + __u32 dcd; + __u32 rx; + __u32 tx; + __u32 frame; + __u32 overrun; + __u32 parity; + __u32 brk; + __u32 buf_overrun; +}; + +typedef u64 upf_t; + +typedef unsigned int upstat_t; + +struct uart_state; + +struct serial_port_device; + +struct uart_port { + spinlock_t lock; + long unsigned int iobase; + unsigned char *membase; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); + void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); + int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); + int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *); + unsigned int ctrl_id; + unsigned int port_id; + unsigned int irq; + long unsigned int irqflags; + unsigned int uartclk; + unsigned int fifosize; + unsigned char x_char; + unsigned char regshift; + unsigned char iotype; + unsigned char quirks; + unsigned int read_status_mask; + unsigned int ignore_status_mask; + struct uart_state *state; + struct uart_icount icount; + struct console *cons; + upf_t flags; + upstat_t status; + bool hw_stopped; + unsigned int mctrl; + unsigned int frame_time; + unsigned int type; + const struct uart_ops *ops; + unsigned int custom_divisor; + unsigned int line; + unsigned int minor; + resource_size_t mapbase; + resource_size_t mapsize; + struct device *dev; + struct serial_port_device *port_dev; + long unsigned int sysrq; + u8 sysrq_ch; + unsigned char has_sysrq; + unsigned char sysrq_seq; + unsigned char hub6; + unsigned char suspended; + unsigned char console_reinit; + const char *name; + struct attribute_group *attr_group; + const struct attribute_group **tty_groups; + struct serial_rs485 rs485; + struct serial_rs485 rs485_supported; + struct gpio_desc *rs485_term_gpio; + struct gpio_desc *rs485_rx_during_tx_gpio; + struct serial_iso7816 iso7816; + void *private_data; +}; + +enum uart_pm_state { + UART_PM_STATE_ON = 0, + UART_PM_STATE_OFF = 3, + UART_PM_STATE_UNDEFINED = 4, +}; + +struct uart_state { + struct tty_port port; + enum uart_pm_state pm_state; + atomic_t refcount; + wait_queue_head_t remove_wait; + struct uart_port *uart_port; +}; + +struct serial_port_device { + struct device dev; + struct uart_port *port; + unsigned int tx_enabled: 1; +}; + +struct uart_driver { + struct module *owner; + const char *driver_name; + const char *dev_name; + int major; + int minor; + int nr; + struct console *cons; + struct uart_state *state; + struct tty_driver *tty_driver; +}; + +enum lockdown_reason { + LOCKDOWN_NONE = 0, + LOCKDOWN_MODULE_SIGNATURE = 1, + LOCKDOWN_DEV_MEM = 2, + LOCKDOWN_EFI_TEST = 3, + LOCKDOWN_KEXEC = 4, + LOCKDOWN_HIBERNATION = 5, + LOCKDOWN_PCI_ACCESS = 6, + LOCKDOWN_IOPORT = 7, + LOCKDOWN_MSR = 8, + LOCKDOWN_ACPI_TABLES = 9, + LOCKDOWN_DEVICE_TREE = 10, + LOCKDOWN_PCMCIA_CIS = 11, + LOCKDOWN_TIOCSSERIAL = 12, + LOCKDOWN_MODULE_PARAMETERS = 13, + LOCKDOWN_MMIOTRACE = 14, + LOCKDOWN_DEBUGFS = 15, + LOCKDOWN_XMON_WR = 16, + LOCKDOWN_BPF_WRITE_USER = 17, + LOCKDOWN_DBG_WRITE_KERNEL = 18, + LOCKDOWN_RTAS_ERROR_INJECTION = 19, + LOCKDOWN_INTEGRITY_MAX = 20, + LOCKDOWN_KCORE = 21, + LOCKDOWN_KPROBES = 22, + LOCKDOWN_BPF_READ_KERNEL = 23, + LOCKDOWN_DBG_READ_KERNEL = 24, + LOCKDOWN_PERF = 25, + LOCKDOWN_TRACEFS = 26, + LOCKDOWN_XMON_RW = 27, + LOCKDOWN_XFRM_SECRET = 28, + LOCKDOWN_CONFIDENTIALITY_MAX = 29, +}; + +struct serial_ctrl_device { + struct device dev; + struct ida port_ida; + long: 32; +}; + +struct uart_match { + struct uart_port *port; + struct uart_driver *driver; +}; + +typedef long unsigned int ulong; + +enum wq_flags { + WQ_BH = 1, + WQ_UNBOUND = 2, + WQ_FREEZABLE = 4, + WQ_MEM_RECLAIM = 8, + WQ_HIGHPRI = 16, + WQ_CPU_INTENSIVE = 32, + WQ_SYSFS = 64, + WQ_POWER_EFFICIENT = 128, + __WQ_DESTROYING = 32768, + __WQ_DRAINING = 65536, + __WQ_ORDERED = 131072, + __WQ_LEGACY = 262144, + __WQ_BH_ALLOWS = 17, +}; + +struct klist_node; + +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +}; + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +typedef int (*device_match_t)(struct device *, const void *); + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 4294967295, +}; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); + long unsigned int flags; +}; + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT = 0, + HCTX_TYPE_READ = 1, + HCTX_TYPE_POLL = 2, + HCTX_MAX_TYPES = 3, +}; + +typedef bool busy_tag_iter_fn(struct request *, void *); + +enum scsi_host_status { + DID_OK = 0, + DID_NO_CONNECT = 1, + DID_BUS_BUSY = 2, + DID_TIME_OUT = 3, + DID_BAD_TARGET = 4, + DID_ABORT = 5, + DID_PARITY = 6, + DID_ERROR = 7, + DID_RESET = 8, + DID_BAD_INTR = 9, + DID_PASSTHROUGH = 10, + DID_SOFT_ERROR = 11, + DID_IMM_RETRY = 12, + DID_REQUEUE = 13, + DID_TRANSPORT_DISRUPTED = 14, + DID_TRANSPORT_FAILFAST = 15, + DID_TRANSPORT_MARGINAL = 20, +}; + +typedef __u64 blist_flags_t; + +enum scsi_device_state { + SDEV_CREATED = 1, + SDEV_RUNNING = 2, + SDEV_CANCEL = 3, + SDEV_DEL = 4, + SDEV_QUIESCE = 5, + SDEV_OFFLINE = 6, + SDEV_TRANSPORT_OFFLINE = 7, + SDEV_BLOCK = 8, + SDEV_CREATED_BLOCK = 9, +}; + +enum scsi_device_event { + SDEV_EVT_MEDIA_CHANGE = 1, + SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, + SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, + SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, + SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, + SDEV_EVT_LUN_CHANGE_REPORTED = 6, + SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, + SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, + SDEV_EVT_FIRST = 1, + SDEV_EVT_LAST = 8, + SDEV_EVT_MAXBITS = 9, +}; + +struct scsi_vpd { + struct callback_head rcu; + int len; + unsigned char data[0]; +}; + +struct Scsi_Host; + +struct scsi_target; + +struct scsi_device_handler; + +struct bsg_device; + +struct scsi_device { + struct Scsi_Host *host; + struct request_queue *request_queue; + struct list_head siblings; + struct list_head same_target_siblings; + struct sbitmap budget_map; + atomic_t device_blocked; + atomic_t restarts; + spinlock_t list_lock; + struct list_head starved_entry; + short unsigned int queue_depth; + short unsigned int max_queue_depth; + short unsigned int last_queue_full_depth; + short unsigned int last_queue_full_count; + long unsigned int last_queue_full_time; + long unsigned int queue_ramp_up_period; + long unsigned int last_queue_ramp_up; + unsigned int id; + unsigned int channel; + u64 lun; + unsigned int manufacturer; + unsigned int sector_size; + void *hostdata; + unsigned char type; + char scsi_level; + char inq_periph_qual; + struct mutex inquiry_mutex; + unsigned char inquiry_len; + unsigned char *inquiry; + const char *vendor; + const char *model; + const char *rev; + struct scsi_vpd *vpd_pg0; + struct scsi_vpd *vpd_pg83; + struct scsi_vpd *vpd_pg80; + struct scsi_vpd *vpd_pg89; + struct scsi_vpd *vpd_pgb0; + struct scsi_vpd *vpd_pgb1; + struct scsi_vpd *vpd_pgb2; + struct scsi_vpd *vpd_pgb7; + struct scsi_target *sdev_target; + long: 32; + blist_flags_t sdev_bflags; + unsigned int eh_timeout; + unsigned int manage_system_start_stop: 1; + unsigned int manage_runtime_start_stop: 1; + unsigned int manage_shutdown: 1; + unsigned int force_runtime_start_on_system_start: 1; + unsigned int removable: 1; + unsigned int changed: 1; + unsigned int busy: 1; + unsigned int lockable: 1; + unsigned int locked: 1; + unsigned int borken: 1; + unsigned int disconnect: 1; + unsigned int soft_reset: 1; + unsigned int sdtr: 1; + unsigned int wdtr: 1; + unsigned int ppr: 1; + unsigned int tagged_supported: 1; + unsigned int simple_tags: 1; + unsigned int was_reset: 1; + unsigned int expecting_cc_ua: 1; + unsigned int use_10_for_rw: 1; + unsigned int use_10_for_ms: 1; + unsigned int set_dbd_for_ms: 1; + unsigned int read_before_ms: 1; + unsigned int no_report_opcodes: 1; + unsigned int no_write_same: 1; + unsigned int use_16_for_rw: 1; + unsigned int use_16_for_sync: 1; + unsigned int skip_ms_page_8: 1; + unsigned int skip_ms_page_3f: 1; + unsigned int skip_vpd_pages: 1; + unsigned int try_vpd_pages: 1; + unsigned int use_192_bytes_for_3f: 1; + unsigned int no_start_on_add: 1; + unsigned int allow_restart: 1; + unsigned int start_stop_pwr_cond: 1; + unsigned int no_uld_attach: 1; + unsigned int select_no_atn: 1; + unsigned int fix_capacity: 1; + unsigned int guess_capacity: 1; + unsigned int retry_hwerror: 1; + unsigned int last_sector_bug: 1; + unsigned int no_read_disc_info: 1; + unsigned int no_read_capacity_16: 1; + unsigned int try_rc_10_first: 1; + unsigned int security_supported: 1; + unsigned int is_visible: 1; + unsigned int wce_default_on: 1; + unsigned int no_dif: 1; + unsigned int broken_fua: 1; + unsigned int lun_in_cdb: 1; + unsigned int unmap_limit_for_ws: 1; + unsigned int rpm_autosuspend: 1; + unsigned int ignore_media_change: 1; + unsigned int silence_suspend: 1; + unsigned int no_vpd_size: 1; + unsigned int cdl_supported: 1; + unsigned int cdl_enable: 1; + unsigned int queue_stopped; + bool offline_already; + atomic_t disk_events_disable_depth; + long unsigned int supported_events[1]; + long unsigned int pending_events[1]; + struct list_head event_list; + struct work_struct event_work; + unsigned int max_device_blocked; + atomic_t iorequest_cnt; + atomic_t iodone_cnt; + atomic_t ioerr_cnt; + atomic_t iotmo_cnt; + long: 32; + struct device sdev_gendev; + struct device sdev_dev; + struct work_struct requeue_work; + struct scsi_device_handler *handler; + void *handler_data; + size_t dma_drain_len; + void *dma_drain_buf; + unsigned int sg_timeout; + unsigned int sg_reserved_size; + struct bsg_device *bsg_dev; + unsigned char access_state; + struct mutex state_mutex; + enum scsi_device_state sdev_state; + struct task_struct *quiesced_by; + long unsigned int sdev_data[0]; + long: 32; +}; + +enum scsi_host_state { + SHOST_CREATED = 1, + SHOST_RUNNING = 2, + SHOST_CANCEL = 3, + SHOST_DEL = 4, + SHOST_RECOVERY = 5, + SHOST_CANCEL_RECOVERY = 6, + SHOST_DEL_RECOVERY = 7, +}; + +struct scsi_host_template; + +struct scsi_transport_template; + +struct Scsi_Host { + struct list_head __devices; + struct list_head __targets; + struct list_head starved_list; + spinlock_t default_lock; + spinlock_t *host_lock; + struct mutex scan_mutex; + struct list_head eh_abort_list; + struct list_head eh_cmd_q; + struct task_struct *ehandler; + struct completion *eh_action; + wait_queue_head_t host_wait; + const struct scsi_host_template *hostt; + struct scsi_transport_template *transportt; + struct kref tagset_refcnt; + struct completion tagset_freed; + struct blk_mq_tag_set tag_set; + atomic_t host_blocked; + unsigned int host_failed; + unsigned int host_eh_scheduled; + unsigned int host_no; + int eh_deadline; + long unsigned int last_reset; + unsigned int max_channel; + unsigned int max_id; + u64 max_lun; + unsigned int unique_id; + short unsigned int max_cmd_len; + int this_id; + int can_queue; + short int cmd_per_lun; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int opt_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + unsigned int nr_hw_queues; + unsigned int nr_maps; + unsigned int active_mode: 2; + unsigned int host_self_blocked: 1; + unsigned int reverse_ordering: 1; + unsigned int tmf_in_progress: 1; + unsigned int async_scan: 1; + unsigned int eh_noresume: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int short_inquiry: 1; + unsigned int no_scsi2_lun_in_cdb: 1; + unsigned int no_highmem: 1; + struct workqueue_struct *work_q; + struct workqueue_struct *tmf_work_q; + unsigned int max_host_blocked; + unsigned int prot_capabilities; + unsigned char prot_guard_type; + long unsigned int base; + long unsigned int io_port; + unsigned char n_io_port; + unsigned char dma_channel; + unsigned int irq; + enum scsi_host_state shost_state; + long: 32; + struct device shost_gendev; + struct device shost_dev; + void *shost_data; + struct device *dma_dev; + int rpm_autosuspend_delay; + long unsigned int hostdata[0]; + long: 32; +}; + +enum scsi_target_state { + STARGET_CREATED = 1, + STARGET_RUNNING = 2, + STARGET_REMOVE = 3, + STARGET_CREATED_REMOVE = 4, + STARGET_DEL = 5, +}; + +struct scsi_target { + struct scsi_device *starget_sdev_user; + struct list_head siblings; + struct list_head devices; + long: 32; + struct device dev; + struct kref reap_ref; + unsigned int channel; + unsigned int id; + unsigned int create: 1; + unsigned int single_lun: 1; + unsigned int pdt_1f_for_no_lun: 1; + unsigned int no_report_luns: 1; + unsigned int expecting_lun_change: 1; + atomic_t target_busy; + atomic_t target_blocked; + unsigned int can_queue; + unsigned int max_target_blocked; + char scsi_level; + enum scsi_target_state state; + void *hostdata; + long unsigned int starget_data[0]; + long: 32; +}; + +enum scsi_timeout_action { + SCSI_EH_DONE = 0, + SCSI_EH_RESET_TIMER = 1, + SCSI_EH_NOT_HANDLED = 2, +}; + +struct scsi_cmnd; + +struct scsi_host_template { + unsigned int cmd_size; + int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); + void (*commit_rqs)(struct Scsi_Host *, u16); + struct module *module; + const char *name; + const char * (*info)(struct Scsi_Host *); + int (*ioctl)(struct scsi_device *, unsigned int, void *); + int (*init_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*exit_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*eh_abort_handler)(struct scsi_cmnd *); + int (*eh_device_reset_handler)(struct scsi_cmnd *); + int (*eh_target_reset_handler)(struct scsi_cmnd *); + int (*eh_bus_reset_handler)(struct scsi_cmnd *); + int (*eh_host_reset_handler)(struct scsi_cmnd *); + int (*slave_alloc)(struct scsi_device *); + int (*device_configure)(struct scsi_device *, struct queue_limits *); + int (*slave_configure)(struct scsi_device *); + void (*slave_destroy)(struct scsi_device *); + int (*target_alloc)(struct scsi_target *); + void (*target_destroy)(struct scsi_target *); + int (*scan_finished)(struct Scsi_Host *, long unsigned int); + void (*scan_start)(struct Scsi_Host *); + int (*change_queue_depth)(struct scsi_device *, int); + void (*map_queues)(struct Scsi_Host *); + int (*mq_poll)(struct Scsi_Host *, unsigned int); + bool (*dma_need_drain)(struct request *); + int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *); + void (*unlock_native_capacity)(struct scsi_device *); + int (*show_info)(struct seq_file *, struct Scsi_Host *); + int (*write_info)(struct Scsi_Host *, char *, int); + enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); + bool (*eh_should_retry_cmd)(struct scsi_cmnd *); + int (*host_reset)(struct Scsi_Host *, int); + const char *proc_name; + int can_queue; + int this_id; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + short int cmd_per_lun; + int tag_alloc_policy; + unsigned int track_queue_depth: 1; + unsigned int supported_mode: 2; + unsigned int emulated: 1; + unsigned int skip_settle_delay: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int max_host_blocked; + const struct attribute_group **shost_groups; + const struct attribute_group **sdev_groups; + u64 vendor_id; +}; + +enum scsi_cmnd_submitter { + SUBMITTED_BY_BLOCK_LAYER = 0, + SUBMITTED_BY_SCSI_ERROR_HANDLER = 1, + SUBMITTED_BY_SCSI_RESET_IOCTL = 2, +} __attribute__((mode(byte))); + +struct scsi_data_buffer { + struct sg_table table; + unsigned int length; +}; + +struct scsi_cmnd { + struct scsi_device *device; + struct list_head eh_entry; + struct delayed_work abort_work; + struct callback_head rcu; + int eh_eflags; + int budget_token; + long unsigned int jiffies_at_alloc; + int retries; + int allowed; + unsigned char prot_op; + unsigned char prot_type; + unsigned char prot_flags; + enum scsi_cmnd_submitter submitter; + short unsigned int cmd_len; + enum dma_data_direction sc_data_direction; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scsi_data_buffer *prot_sdb; + unsigned int underflow; + unsigned int transfersize; + unsigned int resid_len; + unsigned int sense_len; + unsigned char *sense_buffer; + int flags; + long unsigned int state; + unsigned int extra_len; + unsigned char *host_scribble; + int result; +}; + +struct scsi_transport_template { + struct transport_container host_attrs; + struct transport_container target_attrs; + struct transport_container device_attrs; + int (*user_scan)(struct Scsi_Host *, uint, uint, u64); + int device_size; + int device_private_offset; + int target_size; + int target_private_offset; + int host_size; + unsigned int create_work_queue: 1; + void (*eh_strategy_handler)(struct Scsi_Host *); +}; + +struct scsi_host_busy_iter_data { + bool (*fn)(struct scsi_cmnd *, void *); + void *priv; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next; + struct hlist_nulls_node **pprev; +}; + +typedef __u64 __addrpair; + +typedef __u32 __portpair; + +struct proto; + +struct sock_common { + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + short unsigned int skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse: 4; + unsigned char skc_reuseport: 1; + unsigned char skc_ipv6only: 1; + unsigned char skc_net_refcnt: 1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + atomic64_t skc_cookie; + union { + long unsigned int skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + int skc_dontcopy_begin[0]; + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + short unsigned int skc_tx_queue_mapping; + short unsigned int skc_rx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + refcount_t skc_refcnt; + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; + long: 32; +}; + +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; +} socket_lock_t; + +struct sock_cgroup_data { + struct cgroup *cgroup; + u32 classid; +}; + +typedef struct {} netns_tracker; + +struct sk_filter; + +struct socket_wq; + +struct socket; + +struct sock_reuseport; + +struct sock { + struct sock_common __sk_common; + __u8 __cacheline_group_begin__sock_write_rx[0]; + atomic_t sk_drops; + __s32 sk_peek_off; + struct sk_buff_head sk_error_queue; + struct sk_buff_head sk_receive_queue; + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + __u8 __cacheline_group_end__sock_write_rx[0]; + __u8 __cacheline_group_begin__sock_read_rx[0]; + struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + unsigned int sk_ll_usec; + unsigned int sk_napi_id; + u16 sk_busy_poll_budget; + u8 sk_prefer_busy_poll; + u8 sk_userlocks; + int sk_rcvbuf; + struct sk_filter *sk_filter; + union { + struct socket_wq *sk_wq; + struct socket_wq *sk_wq_raw; + }; + void (*sk_data_ready)(struct sock *); + long int sk_rcvtimeo; + int sk_rcvlowat; + __u8 __cacheline_group_end__sock_read_rx[0]; + __u8 __cacheline_group_begin__sock_read_rxtx[0]; + int sk_err; + struct socket *sk_socket; + struct mem_cgroup *sk_memcg; + __u8 __cacheline_group_end__sock_read_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_rxtx[0]; + socket_lock_t sk_lock; + u32 sk_reserved_mem; + int sk_forward_alloc; + u32 sk_tsflags; + __u8 __cacheline_group_end__sock_write_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_tx[0]; + int sk_write_pending; + atomic_t sk_omem_alloc; + int sk_sndbuf; + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + long unsigned int sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff_head sk_write_queue; + u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + struct page_frag sk_frag; + struct timer_list sk_timer; + long unsigned int sk_pacing_rate; + atomic_t sk_zckey; + atomic_t sk_tskey; + __u8 __cacheline_group_end__sock_write_tx[0]; + __u8 __cacheline_group_begin__sock_read_tx[0]; + long unsigned int sk_max_pacing_rate; + long int sk_sndtimeo; + u32 sk_priority; + u32 sk_mark; + struct dst_entry *sk_dst_cache; + long: 32; + netdev_features_t sk_route_caps; + u16 sk_gso_type; + u16 sk_gso_max_segs; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + u32 sk_txhash; + u8 sk_pacing_shift; + bool sk_use_task_frag; + __u8 __cacheline_group_end__sock_read_tx[0]; + u8 sk_gso_disabled: 1; + u8 sk_kern_sock: 1; + u8 sk_no_check_tx: 1; + u8 sk_no_check_rx: 1; + u8 sk_shutdown; + u16 sk_type; + u16 sk_protocol; + long unsigned int sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + spinlock_t sk_peer_lock; + int sk_bind_phc; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + long: 32; + ktime_t sk_stamp; + seqlock_t sk_stamp_seq; + int sk_disconnects; + u8 sk_txrehash; + u8 sk_clockid; + u8 sk_txtime_deadline_mode: 1; + u8 sk_txtime_report_errors: 1; + u8 sk_txtime_unused: 6; + void *sk_user_data; + struct sock_cgroup_data sk_cgrp_data; + void (*sk_state_change)(struct sock *); + void (*sk_write_space)(struct sock *); + void (*sk_error_report)(struct sock *); + int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); + void (*sk_destruct)(struct sock *); + struct sock_reuseport *sk_reuseport_cb; + struct bpf_local_storage *sk_bpf_storage; + struct callback_head sk_rcu; + netns_tracker ns_tracker; + struct xarray sk_user_frags; + long: 32; +}; + +struct ubuf_info; + +struct msghdr { + void *msg_name; + int msg_namelen; + int msg_inq; + long: 32; + struct iov_iter msg_iter; + union { + void *msg_control; + void *msg_control_user; + }; + bool msg_control_is_user: 1; + bool msg_get_inq: 1; + unsigned int msg_flags; + __kernel_size_t msg_controllen; + struct kiocb *msg_iocb; + struct ubuf_info *msg_ubuf; + int (*sg_from_iter)(struct sk_buff *, struct iov_iter *, size_t); + long: 32; +}; + +struct ubuf_info_ops; + +struct ubuf_info { + const struct ubuf_info_ops *ops; + refcount_t refcnt; + u8 flags; +}; + +struct prot_inuse { + int all; + int val[64]; +}; + +enum skb_drop_reason { + SKB_NOT_DROPPED_YET = 0, + SKB_CONSUMED = 1, + SKB_DROP_REASON_NOT_SPECIFIED = 2, + SKB_DROP_REASON_NO_SOCKET = 3, + SKB_DROP_REASON_PKT_TOO_SMALL = 4, + SKB_DROP_REASON_TCP_CSUM = 5, + SKB_DROP_REASON_SOCKET_FILTER = 6, + SKB_DROP_REASON_UDP_CSUM = 7, + SKB_DROP_REASON_NETFILTER_DROP = 8, + SKB_DROP_REASON_OTHERHOST = 9, + SKB_DROP_REASON_IP_CSUM = 10, + SKB_DROP_REASON_IP_INHDR = 11, + SKB_DROP_REASON_IP_RPFILTER = 12, + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST = 13, + SKB_DROP_REASON_XFRM_POLICY = 14, + SKB_DROP_REASON_IP_NOPROTO = 15, + SKB_DROP_REASON_SOCKET_RCVBUFF = 16, + SKB_DROP_REASON_PROTO_MEM = 17, + SKB_DROP_REASON_TCP_AUTH_HDR = 18, + SKB_DROP_REASON_TCP_MD5NOTFOUND = 19, + SKB_DROP_REASON_TCP_MD5UNEXPECTED = 20, + SKB_DROP_REASON_TCP_MD5FAILURE = 21, + SKB_DROP_REASON_TCP_AONOTFOUND = 22, + SKB_DROP_REASON_TCP_AOUNEXPECTED = 23, + SKB_DROP_REASON_TCP_AOKEYNOTFOUND = 24, + SKB_DROP_REASON_TCP_AOFAILURE = 25, + SKB_DROP_REASON_SOCKET_BACKLOG = 26, + SKB_DROP_REASON_TCP_FLAGS = 27, + SKB_DROP_REASON_TCP_ABORT_ON_DATA = 28, + SKB_DROP_REASON_TCP_ZEROWINDOW = 29, + SKB_DROP_REASON_TCP_OLD_DATA = 30, + SKB_DROP_REASON_TCP_OVERWINDOW = 31, + SKB_DROP_REASON_TCP_OFOMERGE = 32, + SKB_DROP_REASON_TCP_RFC7323_PAWS = 33, + SKB_DROP_REASON_TCP_OLD_SEQUENCE = 34, + SKB_DROP_REASON_TCP_INVALID_SEQUENCE = 35, + SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE = 36, + SKB_DROP_REASON_TCP_RESET = 37, + SKB_DROP_REASON_TCP_INVALID_SYN = 38, + SKB_DROP_REASON_TCP_CLOSE = 39, + SKB_DROP_REASON_TCP_FASTOPEN = 40, + SKB_DROP_REASON_TCP_OLD_ACK = 41, + SKB_DROP_REASON_TCP_TOO_OLD_ACK = 42, + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA = 43, + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE = 44, + SKB_DROP_REASON_TCP_OFO_DROP = 45, + SKB_DROP_REASON_IP_OUTNOROUTES = 46, + SKB_DROP_REASON_BPF_CGROUP_EGRESS = 47, + SKB_DROP_REASON_IPV6DISABLED = 48, + SKB_DROP_REASON_NEIGH_CREATEFAIL = 49, + SKB_DROP_REASON_NEIGH_FAILED = 50, + SKB_DROP_REASON_NEIGH_QUEUEFULL = 51, + SKB_DROP_REASON_NEIGH_DEAD = 52, + SKB_DROP_REASON_TC_EGRESS = 53, + SKB_DROP_REASON_SECURITY_HOOK = 54, + SKB_DROP_REASON_QDISC_DROP = 55, + SKB_DROP_REASON_CPU_BACKLOG = 56, + SKB_DROP_REASON_XDP = 57, + SKB_DROP_REASON_TC_INGRESS = 58, + SKB_DROP_REASON_UNHANDLED_PROTO = 59, + SKB_DROP_REASON_SKB_CSUM = 60, + SKB_DROP_REASON_SKB_GSO_SEG = 61, + SKB_DROP_REASON_SKB_UCOPY_FAULT = 62, + SKB_DROP_REASON_DEV_HDR = 63, + SKB_DROP_REASON_DEV_READY = 64, + SKB_DROP_REASON_FULL_RING = 65, + SKB_DROP_REASON_NOMEM = 66, + SKB_DROP_REASON_HDR_TRUNC = 67, + SKB_DROP_REASON_TAP_FILTER = 68, + SKB_DROP_REASON_TAP_TXFILTER = 69, + SKB_DROP_REASON_ICMP_CSUM = 70, + SKB_DROP_REASON_INVALID_PROTO = 71, + SKB_DROP_REASON_IP_INADDRERRORS = 72, + SKB_DROP_REASON_IP_INNOROUTES = 73, + SKB_DROP_REASON_IP_LOCAL_SOURCE = 74, + SKB_DROP_REASON_IP_INVALID_SOURCE = 75, + SKB_DROP_REASON_IP_LOCALNET = 76, + SKB_DROP_REASON_IP_INVALID_DEST = 77, + SKB_DROP_REASON_PKT_TOO_BIG = 78, + SKB_DROP_REASON_DUP_FRAG = 79, + SKB_DROP_REASON_FRAG_REASM_TIMEOUT = 80, + SKB_DROP_REASON_FRAG_TOO_FAR = 81, + SKB_DROP_REASON_TCP_MINTTL = 82, + SKB_DROP_REASON_IPV6_BAD_EXTHDR = 83, + SKB_DROP_REASON_IPV6_NDISC_FRAG = 84, + SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT = 85, + SKB_DROP_REASON_IPV6_NDISC_BAD_CODE = 86, + SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS = 87, + SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST = 88, + SKB_DROP_REASON_QUEUE_PURGE = 89, + SKB_DROP_REASON_TC_COOKIE_ERROR = 90, + SKB_DROP_REASON_PACKET_SOCK_ERROR = 91, + SKB_DROP_REASON_TC_CHAIN_NOTFOUND = 92, + SKB_DROP_REASON_TC_RECLASSIFY_LOOP = 93, + SKB_DROP_REASON_VXLAN_INVALID_HDR = 94, + SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND = 95, + SKB_DROP_REASON_MAC_INVALID_SOURCE = 96, + SKB_DROP_REASON_VXLAN_ENTRY_EXISTS = 97, + SKB_DROP_REASON_VXLAN_NO_REMOTE = 98, + SKB_DROP_REASON_IP_TUNNEL_ECN = 99, + SKB_DROP_REASON_TUNNEL_TXINFO = 100, + SKB_DROP_REASON_LOCAL_MAC = 101, + SKB_DROP_REASON_ARP_PVLAN_DISABLE = 102, + SKB_DROP_REASON_MAX = 103, + SKB_DROP_REASON_SUBSYS_MASK = 4294901760, +}; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(const struct net *); + int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); + struct module *owner; + struct callback_head rcu; +}; + +enum { + NETIF_F_SG_BIT = 0, + NETIF_F_IP_CSUM_BIT = 1, + __UNUSED_NETIF_F_1 = 2, + NETIF_F_HW_CSUM_BIT = 3, + NETIF_F_IPV6_CSUM_BIT = 4, + NETIF_F_HIGHDMA_BIT = 5, + NETIF_F_FRAGLIST_BIT = 6, + NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, + NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, + NETIF_F_VLAN_CHALLENGED_BIT = 10, + NETIF_F_GSO_BIT = 11, + __UNUSED_NETIF_F_12 = 12, + __UNUSED_NETIF_F_13 = 13, + NETIF_F_GRO_BIT = 14, + NETIF_F_LRO_BIT = 15, + NETIF_F_GSO_SHIFT = 16, + NETIF_F_TSO_BIT = 16, + NETIF_F_GSO_ROBUST_BIT = 17, + NETIF_F_TSO_ECN_BIT = 18, + NETIF_F_TSO_MANGLEID_BIT = 19, + NETIF_F_TSO6_BIT = 20, + NETIF_F_FSO_BIT = 21, + NETIF_F_GSO_GRE_BIT = 22, + NETIF_F_GSO_GRE_CSUM_BIT = 23, + NETIF_F_GSO_IPXIP4_BIT = 24, + NETIF_F_GSO_IPXIP6_BIT = 25, + NETIF_F_GSO_UDP_TUNNEL_BIT = 26, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, + NETIF_F_GSO_PARTIAL_BIT = 28, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, + NETIF_F_GSO_SCTP_BIT = 30, + NETIF_F_GSO_ESP_BIT = 31, + NETIF_F_GSO_UDP_BIT = 32, + NETIF_F_GSO_UDP_L4_BIT = 33, + NETIF_F_GSO_FRAGLIST_BIT = 34, + NETIF_F_GSO_LAST = 34, + NETIF_F_FCOE_CRC_BIT = 35, + NETIF_F_SCTP_CRC_BIT = 36, + __UNUSED_NETIF_F_37 = 37, + NETIF_F_NTUPLE_BIT = 38, + NETIF_F_RXHASH_BIT = 39, + NETIF_F_RXCSUM_BIT = 40, + NETIF_F_NOCACHE_COPY_BIT = 41, + NETIF_F_LOOPBACK_BIT = 42, + NETIF_F_RXFCS_BIT = 43, + NETIF_F_RXALL_BIT = 44, + NETIF_F_HW_VLAN_STAG_TX_BIT = 45, + NETIF_F_HW_VLAN_STAG_RX_BIT = 46, + NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, + NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, + NETIF_F_HW_TC_BIT = 49, + NETIF_F_HW_ESP_BIT = 50, + NETIF_F_HW_ESP_TX_CSUM_BIT = 51, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, + NETIF_F_HW_TLS_TX_BIT = 53, + NETIF_F_HW_TLS_RX_BIT = 54, + NETIF_F_GRO_HW_BIT = 55, + NETIF_F_HW_TLS_RECORD_BIT = 56, + NETIF_F_GRO_FRAGLIST_BIT = 57, + NETIF_F_HW_MACSEC_BIT = 58, + NETIF_F_GRO_UDP_FWD_BIT = 59, + NETIF_F_HW_HSR_TAG_INS_BIT = 60, + NETIF_F_HW_HSR_TAG_RM_BIT = 61, + NETIF_F_HW_HSR_FWD_BIT = 62, + NETIF_F_HW_HSR_DUP_BIT = 63, + NETDEV_FEATURE_COUNT = 64, +}; + +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +}; + +typedef long unsigned int netmem_ref; + +struct skb_frag { + netmem_ref netmem; + unsigned int len; + unsigned int offset; +}; + +typedef struct skb_frag skb_frag_t; + +enum { + SKBTX_HW_TSTAMP = 1, + SKBTX_SW_TSTAMP = 2, + SKBTX_IN_PROGRESS = 4, + SKBTX_HW_TSTAMP_USE_CYCLES = 8, + SKBTX_WIFI_STATUS = 16, + SKBTX_HW_TSTAMP_NETDEV = 32, + SKBTX_SCHED_TSTAMP = 64, +}; + +struct ubuf_info_ops { + void (*complete)(struct sk_buff *, struct ubuf_info *, bool); + int (*link_skb)(struct sk_buff *, struct ubuf_info *); +}; + +struct xsk_tx_metadata_compl { + __u64 *tx_timestamp; +}; + +struct skb_shared_info { + __u8 flags; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + short unsigned int gso_size; + short unsigned int gso_segs; + struct sk_buff *frag_list; + long: 32; + union { + struct skb_shared_hwtstamps hwtstamps; + struct xsk_tx_metadata_compl xsk_meta; + }; + unsigned int gso_type; + u32 tskey; + atomic_t dataref; + unsigned int xdp_frags_size; + void *destructor_arg; + skb_frag_t frags[17]; +}; + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *); + void (*pre_exit)(struct net *); + void (*exit)(struct net *); + void (*exit_batch)(struct list_head *); + void (*exit_batch_rtnl)(struct list_head *, struct list_head *); + unsigned int * const id; + const size_t size; +}; + +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED = 1, + SS_CONNECTING = 2, + SS_CONNECTED = 3, + SS_DISCONNECTING = 4, +} socket_state; + +struct socket_wq { + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + long unsigned int flags; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct proto_ops; + +struct socket { + socket_state state; + short int type; + long unsigned int flags; + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct socket_wq wq; +}; + +typedef struct { + size_t written; + size_t count; + union { + char *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); + +typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); + +struct proto_accept_arg; + +struct proto_ops { + int family; + struct module *owner; + int (*release)(struct socket *); + int (*bind)(struct socket *, struct sockaddr *, int); + int (*connect)(struct socket *, struct sockaddr *, int, int); + int (*socketpair)(struct socket *, struct socket *); + int (*accept)(struct socket *, struct socket *, struct proto_accept_arg *); + int (*getname)(struct socket *, struct sockaddr *, int); + __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); + int (*ioctl)(struct socket *, unsigned int, long unsigned int); + int (*gettstamp)(struct socket *, void *, bool, bool); + int (*listen)(struct socket *, int); + int (*shutdown)(struct socket *, int); + int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct socket *, int, int, char *, int *); + void (*show_fdinfo)(struct seq_file *, struct socket *); + int (*sendmsg)(struct socket *, struct msghdr *, size_t); + int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); + int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); + ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct socket *); + int (*set_peek_off)(struct sock *, int); + int (*peek_len)(struct socket *); + int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); + int (*read_skb)(struct sock *, skb_read_actor_t); + int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); + int (*set_rcvlowat)(struct sock *, int); +}; + +struct proto_accept_arg { + int flags; + int err; + int is_empty; + bool kern; +}; + +enum net_device_flags { + IFF_UP = 1, + IFF_BROADCAST = 2, + IFF_DEBUG = 4, + IFF_LOOPBACK = 8, + IFF_POINTOPOINT = 16, + IFF_NOTRAILERS = 32, + IFF_RUNNING = 64, + IFF_NOARP = 128, + IFF_PROMISC = 256, + IFF_ALLMULTI = 512, + IFF_MASTER = 1024, + IFF_SLAVE = 2048, + IFF_MULTICAST = 4096, + IFF_PORTSEL = 8192, + IFF_AUTOMEDIA = 16384, + IFF_DYNAMIC = 32768, + IFF_LOWER_UP = 65536, + IFF_DORMANT = 131072, + IFF_ECHO = 262144, +}; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + void *sysctl_table; + int dead; + refcount_t refcnt; + struct callback_head callback_head; + int reachable_time; + u32 qlen; + int data[14]; + long unsigned int data_state[1]; +}; + +enum hwtstamp_source { + HWTSTAMP_SOURCE_UNSPEC = 0, + HWTSTAMP_SOURCE_NETDEV = 1, + HWTSTAMP_SOURCE_PHYLIB = 2, +}; + +struct kernel_hwtstamp_config { + int flags; + int tx_type; + int rx_filter; + struct ifreq *ifr; + bool copied_to_user; + enum hwtstamp_source source; +}; + +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1, + IFF_EBRIDGE = 2, + IFF_BONDING = 4, + IFF_ISATAP = 8, + IFF_WAN_HDLC = 16, + IFF_XMIT_DST_RELEASE = 32, + IFF_DONT_BRIDGE = 64, + IFF_DISABLE_NETPOLL = 128, + IFF_MACVLAN_PORT = 256, + IFF_BRIDGE_PORT = 512, + IFF_OVS_DATAPATH = 1024, + IFF_TX_SKB_SHARING = 2048, + IFF_UNICAST_FLT = 4096, + IFF_TEAM_PORT = 8192, + IFF_SUPP_NOFCS = 16384, + IFF_LIVE_ADDR_CHANGE = 32768, + IFF_MACVLAN = 65536, + IFF_XMIT_DST_RELEASE_PERM = 131072, + IFF_L3MDEV_MASTER = 262144, + IFF_NO_QUEUE = 524288, + IFF_OPENVSWITCH = 1048576, + IFF_L3MDEV_SLAVE = 2097152, + IFF_TEAM = 4194304, + IFF_RXFH_CONFIGURED = 8388608, + IFF_PHONY_HEADROOM = 16777216, + IFF_MACSEC = 33554432, + IFF_NO_RX_HANDLER = 67108864, + IFF_FAILOVER = 134217728, + IFF_FAILOVER_SLAVE = 268435456, + IFF_L3MDEV_RX_HANDLER = 536870912, + IFF_NO_ADDRCONF = 1073741824, + IFF_TX_SKB_NO_LINEAR = 2147483648, +}; + +struct ethtool_netdev_state { + struct xarray rss_ctx; + struct mutex rss_lock; + unsigned int wol_enabled: 1; + unsigned int module_fw_flash_in_progress: 1; +}; + +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; + +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, + ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, + ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, + ETHTOOL_LINK_EXT_STATE_MODULE = 10, +}; + +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, +}; + +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, +}; + +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, +}; + +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST = 3, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS = 4, +}; + +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, +}; + +enum ethtool_link_ext_substate_module { + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, +}; + +enum ethtool_mac_stats_src { + ETHTOOL_MAC_STATS_SRC_AGGREGATE = 0, + ETHTOOL_MAC_STATS_SRC_EMAC = 1, + ETHTOOL_MAC_STATS_SRC_PMAC = 2, +}; + +enum ethtool_module_power_mode_policy { + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO = 2, +}; + +enum ethtool_module_power_mode { + ETHTOOL_MODULE_POWER_MODE_LOW = 1, + ETHTOOL_MODULE_POWER_MODE_HIGH = 2, +}; + +enum ethtool_mm_verify_status { + ETHTOOL_MM_VERIFY_STATUS_UNKNOWN = 0, + ETHTOOL_MM_VERIFY_STATUS_INITIAL = 1, + ETHTOOL_MM_VERIFY_STATUS_VERIFYING = 2, + ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED = 3, + ETHTOOL_MM_VERIFY_STATUS_FAILED = 4, + ETHTOOL_MM_VERIFY_STATUS_DISABLED = 5, +}; + +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; + +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; + +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + long: 32; + __u64 ring_cookie; + __u32 location; + long: 32; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; + long: 32; +}; + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_fecparam { + __u32 cmd; + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, + ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, + ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, + __ETHTOOL_LINK_MODE_MASK_NBITS = 103, +}; + +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 rate_matching; + __u32 reserved[7]; +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF = 0, + HWTSTAMP_TX_ON = 1, + HWTSTAMP_TX_ONESTEP_SYNC = 2, + HWTSTAMP_TX_ONESTEP_P2P = 3, + __HWTSTAMP_TX_CNT = 4, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE = 0, + HWTSTAMP_FILTER_ALL = 1, + HWTSTAMP_FILTER_SOME = 2, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, + HWTSTAMP_FILTER_PTP_V2_EVENT = 12, + HWTSTAMP_FILTER_PTP_V2_SYNC = 13, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, + HWTSTAMP_FILTER_NTP_ALL = 15, + __HWTSTAMP_FILTER_CNT = 16, +}; + +struct kernel_ethtool_ringparam { + u32 rx_buf_len; + u8 tcp_data_split; + u8 tx_push; + u8 rx_push; + u32 cqe_size; + u32 tx_push_buf_len; + u32 tx_push_buf_max_len; +}; + +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + enum ethtool_link_ext_substate_module module; + u32 __link_ext_substate; + }; +}; + +struct ethtool_link_ext_stats { + u64 link_down_events; +}; + +struct ethtool_rxfh_context { + u32 indir_size; + u32 key_size; + u16 priv_size; + u8 hfunc; + u8 input_xfrm; + u8 indir_configured: 1; + u8 key_configured: 1; + u32 key_off; + u8 data[0]; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + long unsigned int supported[4]; + long unsigned int advertising[4]; + long unsigned int lp_advertising[4]; + } link_modes; + u32 lanes; +}; + +struct ethtool_keee { + long unsigned int supported[4]; + long unsigned int advertised[4]; + long unsigned int lp_advertised[4]; + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_active; + bool eee_enabled; +}; + +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; + u32 tx_aggr_max_bytes; + u32 tx_aggr_max_frames; + u32 tx_aggr_time_usecs; +}; + +struct ethtool_eth_mac_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + }; + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + } stats; + }; +}; + +struct ethtool_eth_phy_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 SymbolErrorDuringCarrier; + }; + struct { + u64 SymbolErrorDuringCarrier; + } stats; + }; +}; + +struct ethtool_eth_ctrl_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + }; + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + } stats; + }; +}; + +struct ethtool_pause_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + }; + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + } stats; + }; +}; + +struct ethtool_fec_stat { + u64 total; + u64 lanes[8]; +}; + +struct ethtool_fec_stats { + struct ethtool_fec_stat corrected_blocks; + struct ethtool_fec_stat uncorrectable_blocks; + struct ethtool_fec_stat corrected_bits; +}; + +struct ethtool_rmon_hist_range { + u16 low; + u16 high; +}; + +struct ethtool_rmon_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + }; + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + } stats; + }; +}; + +struct ethtool_ts_stats { + union { + struct { + u64 pkts; + u64 lost; + u64 err; + }; + struct { + u64 pkts; + u64 lost; + u64 err; + } tx_stats; + }; +}; + +struct ethtool_module_eeprom { + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 *data; +}; + +struct ethtool_module_power_mode_params { + enum ethtool_module_power_mode_policy policy; + enum ethtool_module_power_mode mode; +}; + +struct ethtool_mm_state { + u32 verify_time; + u32 max_verify_time; + enum ethtool_mm_verify_status verify_status; + bool tx_enabled; + bool tx_active; + bool pmac_enabled; + bool verify_enabled; + u32 tx_min_frag_size; + u32 rx_min_frag_size; +}; + +struct ethtool_mm_cfg { + u32 verify_time; + bool verify_enabled; + bool tx_enabled; + bool pmac_enabled; + u32 tx_min_frag_size; +}; + +struct ethtool_mm_stats { + u64 MACMergeFrameAssErrorCount; + u64 MACMergeFrameSmdErrorCount; + u64 MACMergeFrameAssOkCount; + u64 MACMergeFragCountRx; + u64 MACMergeFragCountTx; + u64 MACMergeHoldCount; +}; + +struct ethtool_rxfh_param { + u8 hfunc; + u32 indir_size; + u32 *indir; + u32 key_size; + u8 *key; + u32 rss_context; + u8 rss_delete; + u8 input_xfrm; +}; + +struct kernel_ethtool_ts_info { + u32 cmd; + u32 so_timestamping; + int phc_index; + enum hwtstamp_tx_types tx_types; + enum hwtstamp_rx_filters rx_filters; +}; + +enum { + RTAX_UNSPEC = 0, + RTAX_LOCK = 1, + RTAX_MTU = 2, + RTAX_WINDOW = 3, + RTAX_RTT = 4, + RTAX_RTTVAR = 5, + RTAX_SSTHRESH = 6, + RTAX_CWND = 7, + RTAX_ADVMSS = 8, + RTAX_REORDERING = 9, + RTAX_HOPLIMIT = 10, + RTAX_INITCWND = 11, + RTAX_FEATURES = 12, + RTAX_RTO_MIN = 13, + RTAX_INITRWND = 14, + RTAX_QUICKACK = 15, + RTAX_CC_ALGO = 16, + RTAX_FASTOPEN_NO_COOKIE = 17, + __RTAX_MAX = 18, +}; + +enum { + NEIGH_VAR_MCAST_PROBES = 0, + NEIGH_VAR_UCAST_PROBES = 1, + NEIGH_VAR_APP_PROBES = 2, + NEIGH_VAR_MCAST_REPROBES = 3, + NEIGH_VAR_RETRANS_TIME = 4, + NEIGH_VAR_BASE_REACHABLE_TIME = 5, + NEIGH_VAR_DELAY_PROBE_TIME = 6, + NEIGH_VAR_INTERVAL_PROBE_TIME_MS = 7, + NEIGH_VAR_GC_STALETIME = 8, + NEIGH_VAR_QUEUE_LEN_BYTES = 9, + NEIGH_VAR_PROXY_QLEN = 10, + NEIGH_VAR_ANYCAST_DELAY = 11, + NEIGH_VAR_PROXY_DELAY = 12, + NEIGH_VAR_LOCKTIME = 13, + NEIGH_VAR_QUEUE_LEN = 14, + NEIGH_VAR_RETRANS_TIME_MS = 15, + NEIGH_VAR_BASE_REACHABLE_TIME_MS = 16, + NEIGH_VAR_GC_INTERVAL = 17, + NEIGH_VAR_GC_THRESH1 = 18, + NEIGH_VAR_GC_THRESH2 = 19, + NEIGH_VAR_GC_THRESH3 = 20, + NEIGH_VAR_MAX = 21, +}; + +struct pneigh_entry; + +struct neigh_statistics; + +struct neigh_hash_table; + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *, const struct net_device *, __u32 *); + bool (*key_eq)(const struct neighbour *, const void *); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *); + int (*is_multicast)(const void *); + bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + long unsigned int last_flush; + struct delayed_work gc_work; + struct delayed_work managed_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + struct list_head managed_list; + rwlock_t lock; + long unsigned int last_rand; + struct neigh_statistics *stats; + struct neigh_hash_table *nht; + struct pneigh_entry **phash_buckets; +}; + +struct neigh_statistics { + long unsigned int allocs; + long unsigned int destroys; + long unsigned int hash_grows; + long unsigned int res_failed; + long unsigned int lookups; + long unsigned int hits; + long unsigned int rcv_probes_mcast; + long unsigned int rcv_probes_ucast; + long unsigned int periodic_gc_runs; + long unsigned int forced_gc_runs; + long unsigned int unres_discards; + long unsigned int table_fulls; +}; + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u8 protocol; + u32 key[0]; +}; + +struct neigh_hash_table { + struct hlist_head *hash_heads; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT = 2, + TCP_SYN_RECV = 3, + TCP_FIN_WAIT1 = 4, + TCP_FIN_WAIT2 = 5, + TCP_TIME_WAIT = 6, + TCP_CLOSE = 7, + TCP_CLOSE_WAIT = 8, + TCP_LAST_ACK = 9, + TCP_LISTEN = 10, + TCP_CLOSING = 11, + TCP_NEW_SYN_RECV = 12, + TCP_BOUND_INACTIVE = 13, + TCP_MAX_STATES = 14, +}; + +struct smc_hashinfo; + +struct sk_psock; + +struct request_sock_ops; + +struct timewait_sock_ops; + +struct raw_hashinfo; + +struct proto { + void (*close)(struct sock *, long int); + int (*pre_connect)(struct sock *, struct sockaddr *, int); + int (*connect)(struct sock *, struct sockaddr *, int); + int (*disconnect)(struct sock *, int); + struct sock * (*accept)(struct sock *, struct proto_accept_arg *); + int (*ioctl)(struct sock *, int, int *); + int (*init)(struct sock *); + void (*destroy)(struct sock *); + void (*shutdown)(struct sock *, int); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*keepalive)(struct sock *, int); + int (*sendmsg)(struct sock *, struct msghdr *, size_t); + int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int *); + void (*splice_eof)(struct socket *); + int (*bind)(struct sock *, struct sockaddr *, int); + int (*bind_add)(struct sock *, struct sockaddr *, int); + int (*backlog_rcv)(struct sock *, struct sk_buff *); + bool (*bpf_bypass_getsockopt)(int, int); + void (*release_cb)(struct sock *); + int (*hash)(struct sock *); + void (*unhash)(struct sock *); + void (*rehash)(struct sock *); + int (*get_port)(struct sock *, short unsigned int); + void (*put_port)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + unsigned int inuse_idx; + bool (*stream_memory_free)(const struct sock *, int); + bool (*sock_is_readable)(struct sock *); + void (*enter_memory_pressure)(struct sock *); + void (*leave_memory_pressure)(struct sock *); + atomic_long_t *memory_allocated; + int *per_cpu_fw_alloc; + struct percpu_counter *sockets_allocated; + long unsigned int *memory_pressure; + long int *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + int max_header; + bool no_autobind; + struct kmem_cache *slab; + unsigned int obj_size; + unsigned int ipv6_pinfo_offset; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + unsigned int *orphan_count; + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + struct module *owner; + char name[32]; + struct list_head node; + int (*diag_destroy)(struct sock *, int); +}; + +enum sk_rst_reason { + SK_RST_REASON_NOT_SPECIFIED = 0, + SK_RST_REASON_NO_SOCKET = 1, + SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE = 2, + SK_RST_REASON_TCP_RFC7323_PAWS = 3, + SK_RST_REASON_TCP_TOO_OLD_ACK = 4, + SK_RST_REASON_TCP_ACK_UNSENT_DATA = 5, + SK_RST_REASON_TCP_FLAGS = 6, + SK_RST_REASON_TCP_OLD_ACK = 7, + SK_RST_REASON_TCP_ABORT_ON_DATA = 8, + SK_RST_REASON_TCP_TIMEWAIT_SOCKET = 9, + SK_RST_REASON_INVALID_SYN = 10, + SK_RST_REASON_TCP_ABORT_ON_CLOSE = 11, + SK_RST_REASON_TCP_ABORT_ON_LINGER = 12, + SK_RST_REASON_TCP_ABORT_ON_MEMORY = 13, + SK_RST_REASON_TCP_STATE = 14, + SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT = 15, + SK_RST_REASON_TCP_DISCONNECT_WITH_DATA = 16, + SK_RST_REASON_MPTCP_RST_EUNSPEC = 17, + SK_RST_REASON_MPTCP_RST_EMPTCP = 18, + SK_RST_REASON_MPTCP_RST_ERESOURCE = 19, + SK_RST_REASON_MPTCP_RST_EPROHIBIT = 20, + SK_RST_REASON_MPTCP_RST_EWQ2BIG = 21, + SK_RST_REASON_MPTCP_RST_EBADPERF = 22, + SK_RST_REASON_MPTCP_RST_EMIDDLEBOX = 23, + SK_RST_REASON_ERROR = 24, + SK_RST_REASON_MAX = 25, +}; + +struct request_sock; + +struct request_sock_ops { + int family; + unsigned int obj_size; + struct kmem_cache *slab; + char *slab_name; + int (*rtx_syn_ack)(const struct sock *, struct request_sock *); + void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*send_reset)(const struct sock *, struct sk_buff *, enum sk_rst_reason); + void (*destructor)(struct request_sock *); + void (*syn_ack_timeout)(const struct request_sock *); +}; + +struct timewait_sock_ops { + struct kmem_cache *twsk_slab; + char *twsk_slab_name; + unsigned int twsk_obj_size; + void (*twsk_destructor)(struct sock *); +}; + +struct saved_syn; + +struct request_sock { + struct sock_common __req_common; + struct request_sock *dl_next; + u16 mss; + u8 num_retrans; + u8 syncookie: 1; + u8 num_timeout: 7; + u32 ts_recent; + struct timer_list rsk_timer; + const struct request_sock_ops *rsk_ops; + struct sock *sk; + struct saved_syn *saved_syn; + u32 secid; + u32 peer_secid; + u32 timeout; +}; + +struct saved_syn { + u32 mac_hdrlen; + u32 network_hdrlen; + u32 tcp_hdrlen; + u8 data[0]; +}; + +enum tsq_enum { + TSQ_THROTTLED = 0, + TSQ_QUEUED = 1, + TCP_TSQ_DEFERRED = 2, + TCP_WRITE_TIMER_DEFERRED = 3, + TCP_DELACK_TIMER_DEFERRED = 4, + TCP_MTU_REDUCED_DEFERRED = 5, + TCP_ACK_DEFERRED = 6, +}; + +enum tk_offsets { + TK_OFFS_REAL = 0, + TK_OFFS_BOOT = 1, + TK_OFFS_TAI = 2, + TK_OFFS_MAX = 3, +}; + +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +struct msix_entry { + u32 vector; + u16 entry; +}; + +enum { + IPPROTO_IP = 0, + IPPROTO_ICMP = 1, + IPPROTO_IGMP = 2, + IPPROTO_IPIP = 4, + IPPROTO_TCP = 6, + IPPROTO_EGP = 8, + IPPROTO_PUP = 12, + IPPROTO_UDP = 17, + IPPROTO_IDP = 22, + IPPROTO_TP = 29, + IPPROTO_DCCP = 33, + IPPROTO_IPV6 = 41, + IPPROTO_RSVP = 46, + IPPROTO_GRE = 47, + IPPROTO_ESP = 50, + IPPROTO_AH = 51, + IPPROTO_MTP = 92, + IPPROTO_BEETPH = 94, + IPPROTO_ENCAP = 98, + IPPROTO_PIM = 103, + IPPROTO_COMP = 108, + IPPROTO_L2TP = 115, + IPPROTO_SCTP = 132, + IPPROTO_UDPLITE = 136, + IPPROTO_MPLS = 137, + IPPROTO_ETHERNET = 143, + IPPROTO_RAW = 255, + IPPROTO_SMC = 256, + IPPROTO_MPTCP = 262, + IPPROTO_MAX = 263, +}; + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + __u32 ingress_ifindex; + __u32 rx_queue_index; + __u32 egress_ifindex; +}; + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize; + struct xdp_mem_info mem; + struct net_device *dev_rx; + u32 frame_sz; + u32 flags; +}; + +struct xdp_rxq_info; + +struct xdp_txq_info; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; + u32 flags; +}; + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575 = 1, + e1000_82576 = 2, + e1000_82580 = 3, + e1000_i350 = 4, + e1000_i354 = 5, + e1000_i210 = 6, + e1000_i211 = 7, + e1000_num_macs = 8, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types = 4, +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none = 1, + e1000_nvm_eeprom_spi = 2, + e1000_nvm_flash_hw = 3, + e1000_nvm_invm = 4, + e1000_nvm_flash_sw = 5, +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small = 1, + e1000_nvm_override_spi_large = 2, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none = 1, + e1000_phy_m88 = 2, + e1000_phy_igp = 3, + e1000_phy_igp_2 = 4, + e1000_phy_gg82563 = 5, + e1000_phy_igp_3 = 6, + e1000_phy_ife = 7, + e1000_phy_82580 = 8, + e1000_phy_i210 = 9, + e1000_phy_bcm54616 = 10, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci = 1, + e1000_bus_type_pcix = 2, + e1000_bus_type_pci_express = 3, + e1000_bus_type_reserved = 4, +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33 = 1, + e1000_bus_speed_66 = 2, + e1000_bus_speed_100 = 3, + e1000_bus_speed_120 = 4, + e1000_bus_speed_133 = 5, + e1000_bus_speed_2500 = 6, + e1000_bus_speed_5000 = 7, + e1000_bus_speed_reserved = 8, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1 = 1, + e1000_bus_width_pcie_x2 = 2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32 = 9, + e1000_bus_width_64 = 10, + e1000_bus_width_reserved = 11, +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok = 1, + e1000_1000t_rx_status_undefined = 255, +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed = 1, + e1000_rev_polarity_undefined = 255, +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause = 1, + e1000_fc_tx_pause = 2, + e1000_fc_full = 3, + e1000_fc_default = 255, +}; + +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master = 1, + e1000_ms_force_slave = 2, + e1000_ms_auto = 3, +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on = 1, + e1000_smart_speed_off = 2, +}; + +struct e1000_sfp_flags { + u8 e1000_base_sx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_t: 1; + u8 e100_base_lx: 1; + u8 e100_base_fx: 1; + u8 e10_base_bx10: 1; + u8 e10_base_px: 1; +}; + +struct e1000_hw; + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[3]; +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type type; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + u16 mta_reg_count; + u16 uta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_fc_info { + u32 high_water; + u32 low_water; + u16 pause_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + u32 snoop; + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16, bool); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); + s32 (*unlock)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + u8 *hw_addr; + u8 *flash_address; + long unsigned int io_base; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + u8 revision_id; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +struct cyclecounter { + u64 (*read)(const struct cyclecounter *); + long: 32; + u64 mask; + u32 mult; + u32 shift; +}; + +struct timecounter { + const struct cyclecounter *cc; + long: 32; + u64 cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +struct ptp_clock_time { + __s64 sec; + __u32 nsec; + __u32 reserved; +}; + +struct ptp_extts_request { + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct ptp_perout_request { + union { + struct ptp_clock_time start; + struct ptp_clock_time phase; + }; + struct ptp_clock_time period; + unsigned int index; + unsigned int flags; + union { + struct ptp_clock_time on; + unsigned int rsv[4]; + }; +}; + +enum ptp_pin_function { + PTP_PF_NONE = 0, + PTP_PF_EXTTS = 1, + PTP_PF_PEROUT = 2, + PTP_PF_PHYSYNC = 3, +}; + +struct ptp_pin_desc { + char name[64]; + unsigned int index; + unsigned int func; + unsigned int chan; + unsigned int rsv[5]; +}; + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS = 0, + PTP_CLK_REQ_PEROUT = 1, + PTP_CLK_REQ_PPS = 2, + } type; + long: 32; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; + clockid_t clockid; + long: 32; +}; + +struct ptp_clock_info { + struct module *owner; + char name[32]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int n_pins; + int pps; + struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *, long int); + int (*adjphase)(struct ptp_clock_info *, s32); + s32 (*getmaxphase)(struct ptp_clock_info *); + int (*adjtime)(struct ptp_clock_info *, s64); + int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); + int (*gettimex64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); + int (*getcycles64)(struct ptp_clock_info *, struct timespec64 *); + int (*getcyclesx64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosscycles)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); + int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); + long int (*do_aux_work)(struct ptp_clock_info *); +}; + +struct regulator; + +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +struct rt_mutex { + struct rt_mutex_base rtmutex; +}; + +struct i2c_msg { + __u16 addr; + __u16 flags; + __u16 len; + __u8 *buf; +}; + +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[34]; +}; + +struct i2c_adapter; + +struct i2c_client { + short unsigned int flags; + short unsigned int addr; + char name[20]; + struct i2c_adapter *adapter; + long: 32; + struct device dev; + int init_irq; + int irq; + struct list_head detected; + void *devres_group_id; + long: 32; +}; + +struct i2c_algorithm; + +struct i2c_lock_operations; + +struct i2c_bus_recovery_info; + +struct i2c_adapter_quirks; + +struct i2c_adapter { + struct module *owner; + unsigned int class; + const struct i2c_algorithm *algo; + void *algo_data; + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + int timeout; + int retries; + long: 32; + struct device dev; + long unsigned int locked_flags; + int nr; + char name[48]; + struct completion dev_released; + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; + struct dentry *debugfs; + long unsigned int addrs_in_instantiation[4]; +}; + +struct i2c_algorithm { + union { + int (*xfer)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); + }; + union { + int (*xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + }; + int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + u32 (*functionality)(struct i2c_adapter *); +}; + +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int); + int (*trylock_bus)(struct i2c_adapter *, unsigned int); + void (*unlock_bus)(struct i2c_adapter *, unsigned int); +}; + +struct pinctrl; + +struct pinctrl_state; + +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int); + int (*get_sda)(struct i2c_adapter *); + void (*set_sda)(struct i2c_adapter *, int); + int (*get_bus_free)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; +}; + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; + long: 32; +}; + +struct i2c_algo_bit_data { + void *data; + void (*setsda)(void *, int); + void (*setscl)(void *, int); + int (*getsda)(void *); + int (*getscl)(void *); + int (*pre_xfer)(struct i2c_adapter *); + void (*post_xfer)(struct i2c_adapter *); + int udelay; + int timeout; + bool can_do_atomic; +}; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; + unsigned int napi_id; + u32 frag_size; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct xdp_txq_info { + struct net_device *dev; +}; + +struct vf_data_storage { + unsigned char vf_mac_addresses[6]; + u16 vf_mc_hashes[30]; + u16 num_vf_mc_hashes; + u32 flags; + long unsigned int last_nack; + u16 pf_vlan; + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[6]; +}; + +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP = 1, +}; + +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + long unsigned int time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + dma_addr_t dma; + __u32 len; + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; + __u16 page_offset; + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring; + +struct igb_ring_container { + struct igb_ring *ring; + unsigned int total_bytes; + unsigned int total_packets; + u16 work_limit; + u8 count; + u8 itr; +}; + +struct igb_q_vector; + +struct igb_ring { + struct igb_q_vector *q_vector; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct device *dev; + union { + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; + long unsigned int flags; + void *tail; + dma_addr_t dma; + unsigned int size; + u16 count; + u8 queue_index; + u8 reg_idx; + bool launchtime_enable; + bool cbs_enable; + s32 idleslope; + s32 sendslope; + s32 hicredit; + s32 locredit; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + union { + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + struct { + struct sk_buff *skb; + long: 32; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + long: 32; + }; + }; + struct xdp_rxq_info xdp_rxq; +}; + +struct igb_adapter; + +struct igb_q_vector { + struct igb_adapter *adapter; + int cpu; + u32 eims_value; + u16 itr_val; + u8 set_itr; + void *itr_register; + struct igb_ring_container rx; + struct igb_ring_container tx; + long: 32; + struct napi_struct napi; + struct callback_head rcu; + char name[25]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct igb_ring ring[0]; +}; + +struct ptp_clock; + +struct hwmon_buff; + +struct igb_mac_addr; + +struct igb_adapter { + long unsigned int active_vlans[128]; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + long unsigned int state; + unsigned int flags; + unsigned int num_q_vectors; + struct msix_entry msix_entries[10]; + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + int num_rx_queues; + struct igb_ring *rx_ring[16]; + u32 max_frame_size; + u32 min_frame_size; + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + u8 *io_addr; + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + long unsigned int led_status; + struct pci_dev *pdev; + spinlock_t stats64_lock; + long: 32; + struct rtnl_link_stats64 stats64; + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + u32 test_icr; + long: 32; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + int msg_enable; + struct igb_q_vector *q_vector[8]; + u32 eims_enable_mask; + u32 eims_other; + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + long unsigned int ptp_tx_start; + long unsigned int last_rx_ptp_check; + long unsigned int last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + long: 32; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + struct ptp_pin_desc sdp_config[4]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[2]; + char fw_version[48]; + struct hwmon_buff *igb_hwmon_buff; + bool ets; + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[128]; + long unsigned int link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + spinlock_t nfc_lock; + bool etype_bitmap[3]; + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; + spinlock_t vfs_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[13]; + struct hwmon_attr hwmon_list[12]; + unsigned int n_hwmon; +}; + +struct igb_mac_addr { + u8 addr[6]; + u8 queue; + u8 state; +}; + +enum e1000_state_t { + __IGB_TESTING = 0, + __IGB_RESETTING = 1, + __IGB_DOWN = 2, + __IGB_PTP_TX_IN_PROGRESS = 3, +}; + +enum nl80211_iftype { + NL80211_IFTYPE_UNSPECIFIED = 0, + NL80211_IFTYPE_ADHOC = 1, + NL80211_IFTYPE_STATION = 2, + NL80211_IFTYPE_AP = 3, + NL80211_IFTYPE_AP_VLAN = 4, + NL80211_IFTYPE_WDS = 5, + NL80211_IFTYPE_MONITOR = 6, + NL80211_IFTYPE_MESH_POINT = 7, + NL80211_IFTYPE_P2P_CLIENT = 8, + NL80211_IFTYPE_P2P_GO = 9, + NL80211_IFTYPE_P2P_DEVICE = 10, + NL80211_IFTYPE_OCB = 11, + NL80211_IFTYPE_NAN = 12, + NUM_NL80211_IFTYPES = 13, + NL80211_IFTYPE_MAX = 12, +}; + +enum ieee80211_bss_type { + IEEE80211_BSS_TYPE_ESS = 0, + IEEE80211_BSS_TYPE_PBSS = 1, + IEEE80211_BSS_TYPE_IBSS = 2, + IEEE80211_BSS_TYPE_MBSS = 3, + IEEE80211_BSS_TYPE_ANY = 4, +}; + +struct wiphy; + +struct wiphy_work; + +typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *); + +struct wiphy_work { + struct list_head entry; + wiphy_work_func_t func; +}; + +enum nl80211_chan_width { + NL80211_CHAN_WIDTH_20_NOHT = 0, + NL80211_CHAN_WIDTH_20 = 1, + NL80211_CHAN_WIDTH_40 = 2, + NL80211_CHAN_WIDTH_80 = 3, + NL80211_CHAN_WIDTH_80P80 = 4, + NL80211_CHAN_WIDTH_160 = 5, + NL80211_CHAN_WIDTH_5 = 6, + NL80211_CHAN_WIDTH_10 = 7, + NL80211_CHAN_WIDTH_1 = 8, + NL80211_CHAN_WIDTH_2 = 9, + NL80211_CHAN_WIDTH_4 = 10, + NL80211_CHAN_WIDTH_8 = 11, + NL80211_CHAN_WIDTH_16 = 12, + NL80211_CHAN_WIDTH_320 = 13, +}; + +enum ieee80211_edmg_bw_config { + IEEE80211_EDMG_BW_CONFIG_4 = 4, + IEEE80211_EDMG_BW_CONFIG_5 = 5, + IEEE80211_EDMG_BW_CONFIG_6 = 6, + IEEE80211_EDMG_BW_CONFIG_7 = 7, + IEEE80211_EDMG_BW_CONFIG_8 = 8, + IEEE80211_EDMG_BW_CONFIG_9 = 9, + IEEE80211_EDMG_BW_CONFIG_10 = 10, + IEEE80211_EDMG_BW_CONFIG_11 = 11, + IEEE80211_EDMG_BW_CONFIG_12 = 12, + IEEE80211_EDMG_BW_CONFIG_13 = 13, + IEEE80211_EDMG_BW_CONFIG_14 = 14, + IEEE80211_EDMG_BW_CONFIG_15 = 15, +}; + +struct ieee80211_edmg { + u8 channels; + enum ieee80211_edmg_bw_config bw_config; +}; + +struct ieee80211_channel; + +struct cfg80211_chan_def { + struct ieee80211_channel *chan; + enum nl80211_chan_width width; + u32 center_freq1; + u32 center_freq2; + struct ieee80211_edmg edmg; + u16 freq1_offset; + u16 punctured; +}; + +struct cfg80211_conn; + +struct cfg80211_cached_keys; + +struct cfg80211_cqm_config; + +struct cfg80211_internal_bss; + +struct wireless_dev { + struct wiphy *wiphy; + enum nl80211_iftype iftype; + struct list_head list; + struct net_device *netdev; + u32 identifier; + struct list_head mgmt_registrations; + u8 mgmt_registrations_need_update: 1; + bool use_4addr; + bool is_running; + bool registered; + bool registering; + short: 0; + u8 address[6]; + struct cfg80211_conn *conn; + struct cfg80211_cached_keys *connect_keys; + enum ieee80211_bss_type conn_bss_type; + u32 conn_owner_nlportid; + struct work_struct disconnect_wk; + u8 disconnect_bssid[6]; + struct list_head event_list; + spinlock_t event_lock; + u8 connected: 1; + bool ps; + int ps_timeout; + u32 ap_unexpected_nlportid; + u32 owner_nlportid; + bool nl_owner_dead; + struct wiphy_work cqm_rssi_work; + struct cfg80211_cqm_config *cqm_config; + struct list_head pmsr_list; + spinlock_t pmsr_lock; + struct work_struct pmsr_free_wk; + long unsigned int unprot_beacon_reported; + union { + struct { + u8 connected_addr[6]; + u8 ssid[32]; + u8 ssid_len; + long: 0; + } client; + struct { + int beacon_interval; + struct cfg80211_chan_def preset_chandef; + struct cfg80211_chan_def chandef; + u8 id[32]; + u8 id_len; + u8 id_up_len; + } mesh; + struct { + struct cfg80211_chan_def preset_chandef; + u8 ssid[32]; + u8 ssid_len; + } ap; + struct { + struct cfg80211_internal_bss *current_bss; + struct cfg80211_chan_def chandef; + int beacon_interval; + u8 ssid[32]; + u8 ssid_len; + } ibss; + struct { + struct cfg80211_chan_def chandef; + } ocb; + } u; + struct { + u8 addr[6]; + union { + struct { + unsigned int beacon_interval; + struct cfg80211_chan_def chandef; + } ap; + struct { + struct cfg80211_internal_bss *current_bss; + } client; + }; + bool cac_started; + long unsigned int cac_start_time; + unsigned int cac_time_ms; + } links[15]; + u16 valid_links; + u32 radio_mask; +}; + +struct ieee80211_hdr { + __le16 frame_control; + __le16 duration_id; + union { + struct { + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + }; + struct { + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + } addrs; + }; + __le16 seq_ctrl; + u8 addr4[6]; +}; + +struct ieee80211_msrment_ie { + u8 token; + u8 mode; + u8 type; + u8 request[0]; +}; + +struct ieee80211_ext_chansw_ie { + u8 mode; + u8 new_operating_class; + u8 new_ch_num; + u8 count; +}; + +struct ieee80211_tpc_report_ie { + u8 tx_power; + u8 link_margin; +}; + +struct ieee80211_mgmt { + __le16 frame_control; + __le16 duration; + u8 da[6]; + u8 sa[6]; + u8 bssid[6]; + __le16 seq_ctrl; + union { + struct { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + u8 variable[0]; + } auth; + struct { + __le16 reason_code; + } deauth; + struct { + __le16 capab_info; + __le16 listen_interval; + u8 variable[0]; + } assoc_req; + struct { + __le16 capab_info; + __le16 status_code; + __le16 aid; + u8 variable[0]; + } assoc_resp; + struct { + __le16 capab_info; + __le16 status_code; + __le16 aid; + u8 variable[0]; + } reassoc_resp; + struct { + __le16 capab_info; + __le16 status_code; + u8 variable[0]; + } s1g_assoc_resp; + struct { + __le16 capab_info; + __le16 status_code; + u8 variable[0]; + } s1g_reassoc_resp; + struct { + __le16 capab_info; + __le16 listen_interval; + u8 current_ap[6]; + u8 variable[0]; + } reassoc_req; + struct { + __le16 reason_code; + } disassoc; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + u8 variable[0]; + } beacon; + struct { + struct { + struct {} __empty_variable; + u8 variable[0]; + }; + } probe_req; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + u8 variable[0]; + } probe_resp; + struct { + u8 category; + union { + struct { + u8 action_code; + u8 dialog_token; + u8 status_code; + u8 variable[0]; + } wme_action; + struct { + u8 action_code; + u8 variable[0]; + } chan_switch; + struct { + u8 action_code; + struct ieee80211_ext_chansw_ie data; + u8 variable[0]; + } ext_chan_switch; + struct { + u8 action_code; + u8 dialog_token; + u8 element_id; + u8 length; + struct ieee80211_msrment_ie msr_elem; + } measurement; + struct { + u8 action_code; + u8 dialog_token; + __le16 capab; + __le16 timeout; + __le16 start_seq_num; + u8 variable[0]; + } addba_req; + struct { + u8 action_code; + u8 dialog_token; + __le16 status; + __le16 capab; + __le16 timeout; + u8 variable[0]; + } addba_resp; + struct { + u8 action_code; + __le16 params; + __le16 reason_code; + } __attribute__((packed)) delba; + struct { + u8 action_code; + u8 variable[0]; + } self_prot; + struct { + u8 action_code; + u8 variable[0]; + } mesh_action; + struct { + u8 action; + u8 trans_id[2]; + } sa_query; + struct { + u8 action; + u8 smps_control; + } ht_smps; + struct { + u8 action_code; + u8 chanwidth; + } ht_notify_cw; + struct { + u8 action_code; + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } tdls_discover_resp; + struct { + u8 action_code; + u8 operating_mode; + } vht_opmode_notif; + struct { + u8 action_code; + u8 membership[8]; + u8 position[16]; + } vht_group_notif; + struct { + u8 action_code; + u8 dialog_token; + u8 tpc_elem_id; + u8 tpc_elem_length; + struct ieee80211_tpc_report_ie tpc; + } tpc_report; + struct { + u8 action_code; + u8 dialog_token; + u8 follow_up; + u8 tod[6]; + u8 toa[6]; + __le16 tod_error; + __le16 toa_error; + u8 variable[0]; + } __attribute__((packed)) ftm; + struct { + u8 action_code; + u8 variable[0]; + } s1g; + struct { + u8 action_code; + u8 dialog_token; + u8 follow_up; + u32 tod; + u32 toa; + u8 max_tod_error; + u8 max_toa_error; + } __attribute__((packed)) wnm_timing_msr; + struct { + u8 action_code; + u8 dialog_token; + u8 variable[0]; + } ttlm_req; + struct { + u8 action_code; + u8 dialog_token; + u8 status_code; + u8 variable[0]; + } ttlm_res; + struct { + u8 action_code; + } ttlm_tear_down; + } u; + } action; + struct { + struct {} __empty_body; + u8 body[0]; + }; + } u; +}; + +struct ieee80211_p2p_noa_desc { + u8 count; + __le32 duration; + __le32 interval; + __le32 start_time; +} __attribute__((packed)); + +struct ieee80211_p2p_noa_attr { + u8 index; + u8 oppps_ctwindow; + struct ieee80211_p2p_noa_desc desc[4]; +}; + +struct ieee80211_mcs_info { + u8 rx_mask[10]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +}; + +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + struct ieee80211_mcs_info mcs; + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __attribute__((packed)); + +struct ieee80211_vht_mcs_info { + __le16 rx_mcs_map; + __le16 rx_highest; + __le16 tx_mcs_map; + __le16 tx_highest; +}; + +struct ieee80211_vht_cap { + __le32 vht_cap_info; + struct ieee80211_vht_mcs_info supp_mcs; +}; + +struct ieee80211_he_cap_elem { + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; +}; + +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +}; + +struct ieee80211_eht_mcs_nss_supp_20mhz_only { + union { + struct { + u8 rx_tx_mcs7_max_nss; + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; + }; + u8 rx_tx_max_nss[4]; + }; +}; + +struct ieee80211_eht_mcs_nss_supp_bw { + union { + struct { + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; + }; + u8 rx_tx_max_nss[3]; + }; +}; + +struct ieee80211_eht_cap_elem_fixed { + u8 mac_cap_info[2]; + u8 phy_cap_info[9]; +}; + +enum ieee80211_eid { + WLAN_EID_SSID = 0, + WLAN_EID_SUPP_RATES = 1, + WLAN_EID_FH_PARAMS = 2, + WLAN_EID_DS_PARAMS = 3, + WLAN_EID_CF_PARAMS = 4, + WLAN_EID_TIM = 5, + WLAN_EID_IBSS_PARAMS = 6, + WLAN_EID_COUNTRY = 7, + WLAN_EID_REQUEST = 10, + WLAN_EID_QBSS_LOAD = 11, + WLAN_EID_EDCA_PARAM_SET = 12, + WLAN_EID_TSPEC = 13, + WLAN_EID_TCLAS = 14, + WLAN_EID_SCHEDULE = 15, + WLAN_EID_CHALLENGE = 16, + WLAN_EID_PWR_CONSTRAINT = 32, + WLAN_EID_PWR_CAPABILITY = 33, + WLAN_EID_TPC_REQUEST = 34, + WLAN_EID_TPC_REPORT = 35, + WLAN_EID_SUPPORTED_CHANNELS = 36, + WLAN_EID_CHANNEL_SWITCH = 37, + WLAN_EID_MEASURE_REQUEST = 38, + WLAN_EID_MEASURE_REPORT = 39, + WLAN_EID_QUIET = 40, + WLAN_EID_IBSS_DFS = 41, + WLAN_EID_ERP_INFO = 42, + WLAN_EID_TS_DELAY = 43, + WLAN_EID_TCLAS_PROCESSING = 44, + WLAN_EID_HT_CAPABILITY = 45, + WLAN_EID_QOS_CAPA = 46, + WLAN_EID_RSN = 48, + WLAN_EID_802_15_COEX = 49, + WLAN_EID_EXT_SUPP_RATES = 50, + WLAN_EID_AP_CHAN_REPORT = 51, + WLAN_EID_NEIGHBOR_REPORT = 52, + WLAN_EID_RCPI = 53, + WLAN_EID_MOBILITY_DOMAIN = 54, + WLAN_EID_FAST_BSS_TRANSITION = 55, + WLAN_EID_TIMEOUT_INTERVAL = 56, + WLAN_EID_RIC_DATA = 57, + WLAN_EID_DSE_REGISTERED_LOCATION = 58, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59, + WLAN_EID_EXT_CHANSWITCH_ANN = 60, + WLAN_EID_HT_OPERATION = 61, + WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62, + WLAN_EID_BSS_AVG_ACCESS_DELAY = 63, + WLAN_EID_ANTENNA_INFO = 64, + WLAN_EID_RSNI = 65, + WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66, + WLAN_EID_BSS_AVAILABLE_CAPACITY = 67, + WLAN_EID_BSS_AC_ACCESS_DELAY = 68, + WLAN_EID_TIME_ADVERTISEMENT = 69, + WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, + WLAN_EID_MULTIPLE_BSSID = 71, + WLAN_EID_BSS_COEX_2040 = 72, + WLAN_EID_BSS_INTOLERANT_CHL_REPORT = 73, + WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, + WLAN_EID_RIC_DESCRIPTOR = 75, + WLAN_EID_MMIE = 76, + WLAN_EID_ASSOC_COMEBACK_TIME = 77, + WLAN_EID_EVENT_REQUEST = 78, + WLAN_EID_EVENT_REPORT = 79, + WLAN_EID_DIAGNOSTIC_REQUEST = 80, + WLAN_EID_DIAGNOSTIC_REPORT = 81, + WLAN_EID_LOCATION_PARAMS = 82, + WLAN_EID_NON_TX_BSSID_CAP = 83, + WLAN_EID_SSID_LIST = 84, + WLAN_EID_MULTI_BSSID_IDX = 85, + WLAN_EID_FMS_DESCRIPTOR = 86, + WLAN_EID_FMS_REQUEST = 87, + WLAN_EID_FMS_RESPONSE = 88, + WLAN_EID_QOS_TRAFFIC_CAPA = 89, + WLAN_EID_BSS_MAX_IDLE_PERIOD = 90, + WLAN_EID_TSF_REQUEST = 91, + WLAN_EID_TSF_RESPOSNE = 92, + WLAN_EID_WNM_SLEEP_MODE = 93, + WLAN_EID_TIM_BCAST_REQ = 94, + WLAN_EID_TIM_BCAST_RESP = 95, + WLAN_EID_COLL_IF_REPORT = 96, + WLAN_EID_CHANNEL_USAGE = 97, + WLAN_EID_TIME_ZONE = 98, + WLAN_EID_DMS_REQUEST = 99, + WLAN_EID_DMS_RESPONSE = 100, + WLAN_EID_LINK_ID = 101, + WLAN_EID_WAKEUP_SCHEDUL = 102, + WLAN_EID_CHAN_SWITCH_TIMING = 104, + WLAN_EID_PTI_CONTROL = 105, + WLAN_EID_PU_BUFFER_STATUS = 106, + WLAN_EID_INTERWORKING = 107, + WLAN_EID_ADVERTISEMENT_PROTOCOL = 108, + WLAN_EID_EXPEDITED_BW_REQ = 109, + WLAN_EID_QOS_MAP_SET = 110, + WLAN_EID_ROAMING_CONSORTIUM = 111, + WLAN_EID_EMERGENCY_ALERT = 112, + WLAN_EID_MESH_CONFIG = 113, + WLAN_EID_MESH_ID = 114, + WLAN_EID_LINK_METRIC_REPORT = 115, + WLAN_EID_CONGESTION_NOTIFICATION = 116, + WLAN_EID_PEER_MGMT = 117, + WLAN_EID_CHAN_SWITCH_PARAM = 118, + WLAN_EID_MESH_AWAKE_WINDOW = 119, + WLAN_EID_BEACON_TIMING = 120, + WLAN_EID_MCCAOP_SETUP_REQ = 121, + WLAN_EID_MCCAOP_SETUP_RESP = 122, + WLAN_EID_MCCAOP_ADVERT = 123, + WLAN_EID_MCCAOP_TEARDOWN = 124, + WLAN_EID_GANN = 125, + WLAN_EID_RANN = 126, + WLAN_EID_EXT_CAPABILITY = 127, + WLAN_EID_PREQ = 130, + WLAN_EID_PREP = 131, + WLAN_EID_PERR = 132, + WLAN_EID_PXU = 137, + WLAN_EID_PXUC = 138, + WLAN_EID_AUTH_MESH_PEER_EXCH = 139, + WLAN_EID_MIC = 140, + WLAN_EID_DESTINATION_URI = 141, + WLAN_EID_UAPSD_COEX = 142, + WLAN_EID_WAKEUP_SCHEDULE = 143, + WLAN_EID_EXT_SCHEDULE = 144, + WLAN_EID_STA_AVAILABILITY = 145, + WLAN_EID_DMG_TSPEC = 146, + WLAN_EID_DMG_AT = 147, + WLAN_EID_DMG_CAP = 148, + WLAN_EID_CISCO_VENDOR_SPECIFIC = 150, + WLAN_EID_DMG_OPERATION = 151, + WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, + WLAN_EID_DMG_BEAM_REFINEMENT = 153, + WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154, + WLAN_EID_AWAKE_WINDOW = 157, + WLAN_EID_MULTI_BAND = 158, + WLAN_EID_ADDBA_EXT = 159, + WLAN_EID_NEXT_PCP_LIST = 160, + WLAN_EID_PCP_HANDOVER = 161, + WLAN_EID_DMG_LINK_MARGIN = 162, + WLAN_EID_SWITCHING_STREAM = 163, + WLAN_EID_SESSION_TRANSITION = 164, + WLAN_EID_DYN_TONE_PAIRING_REPORT = 165, + WLAN_EID_CLUSTER_REPORT = 166, + WLAN_EID_RELAY_CAP = 167, + WLAN_EID_RELAY_XFER_PARAM_SET = 168, + WLAN_EID_BEAM_LINK_MAINT = 169, + WLAN_EID_MULTIPLE_MAC_ADDR = 170, + WLAN_EID_U_PID = 171, + WLAN_EID_DMG_LINK_ADAPT_ACK = 172, + WLAN_EID_MCCAOP_ADV_OVERVIEW = 174, + WLAN_EID_QUIET_PERIOD_REQ = 175, + WLAN_EID_QUIET_PERIOD_RESP = 177, + WLAN_EID_EPAC_POLICY = 182, + WLAN_EID_CLISTER_TIME_OFF = 183, + WLAN_EID_INTER_AC_PRIO = 184, + WLAN_EID_SCS_DESCRIPTOR = 185, + WLAN_EID_QLOAD_REPORT = 186, + WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187, + WLAN_EID_HL_STREAM_ID = 188, + WLAN_EID_GCR_GROUP_ADDR = 189, + WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190, + WLAN_EID_VHT_CAPABILITY = 191, + WLAN_EID_VHT_OPERATION = 192, + WLAN_EID_EXTENDED_BSS_LOAD = 193, + WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, + WLAN_EID_TX_POWER_ENVELOPE = 195, + WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, + WLAN_EID_AID = 197, + WLAN_EID_QUIET_CHANNEL = 198, + WLAN_EID_OPMODE_NOTIF = 199, + WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201, + WLAN_EID_AID_REQUEST = 210, + WLAN_EID_AID_RESPONSE = 211, + WLAN_EID_S1G_BCN_COMPAT = 213, + WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, + WLAN_EID_S1G_TWT = 216, + WLAN_EID_S1G_CAPABILITIES = 217, + WLAN_EID_VENDOR_SPECIFIC = 221, + WLAN_EID_QOS_PARAMETER = 222, + WLAN_EID_S1G_OPERATION = 232, + WLAN_EID_CAG_NUMBER = 237, + WLAN_EID_AP_CSN = 239, + WLAN_EID_FILS_INDICATION = 240, + WLAN_EID_DILS = 241, + WLAN_EID_FRAGMENT = 242, + WLAN_EID_RSNX = 244, + WLAN_EID_EXTENSION = 255, +}; + +struct ieee80211_he_6ghz_capa { + __le16 capa; +}; + +enum rfkill_hard_block_reasons { + RFKILL_HARD_BLOCK_SIGNAL = 1, + RFKILL_HARD_BLOCK_NOT_OWNER = 2, +}; + +enum nl80211_he_gi { + NL80211_RATE_INFO_HE_GI_0_8 = 0, + NL80211_RATE_INFO_HE_GI_1_6 = 1, + NL80211_RATE_INFO_HE_GI_3_2 = 2, +}; + +enum nl80211_he_ltf { + NL80211_RATE_INFO_HE_1XLTF = 0, + NL80211_RATE_INFO_HE_2XLTF = 1, + NL80211_RATE_INFO_HE_4XLTF = 2, +}; + +enum nl80211_reg_initiator { + NL80211_REGDOM_SET_BY_CORE = 0, + NL80211_REGDOM_SET_BY_USER = 1, + NL80211_REGDOM_SET_BY_DRIVER = 2, + NL80211_REGDOM_SET_BY_COUNTRY_IE = 3, +}; + +enum nl80211_dfs_regions { + NL80211_DFS_UNSET = 0, + NL80211_DFS_FCC = 1, + NL80211_DFS_ETSI = 2, + NL80211_DFS_JP = 3, +}; + +enum nl80211_user_reg_hint_type { + NL80211_USER_REG_HINT_USER = 0, + NL80211_USER_REG_HINT_CELL_BASE = 1, + NL80211_USER_REG_HINT_INDOOR = 2, +}; + +enum nl80211_txrate_gi { + NL80211_TXRATE_DEFAULT_GI = 0, + NL80211_TXRATE_FORCE_SGI = 1, + NL80211_TXRATE_FORCE_LGI = 2, +}; + +enum nl80211_band { + NL80211_BAND_2GHZ = 0, + NL80211_BAND_5GHZ = 1, + NL80211_BAND_60GHZ = 2, + NL80211_BAND_6GHZ = 3, + NL80211_BAND_S1GHZ = 4, + NL80211_BAND_LC = 5, + NUM_NL80211_BANDS = 6, +}; + +enum nl80211_tx_power_setting { + NL80211_TX_POWER_AUTOMATIC = 0, + NL80211_TX_POWER_LIMITED = 1, + NL80211_TX_POWER_FIXED = 2, +}; + +struct nl80211_wowlan_tcp_data_seq { + __u32 start; + __u32 offset; + __u32 len; +}; + +struct nl80211_wowlan_tcp_data_token { + __u32 offset; + __u32 len; + __u8 token_stream[0]; +}; + +struct nl80211_wowlan_tcp_data_token_feature { + __u32 min_len; + __u32 max_len; + __u32 bufsize; +}; + +enum nl80211_dfs_state { + NL80211_DFS_USABLE = 0, + NL80211_DFS_UNAVAILABLE = 1, + NL80211_DFS_AVAILABLE = 2, +}; + +struct nl80211_vendor_cmd_info { + __u32 vendor_id; + __u32 subcmd; +}; + +enum nl80211_sar_type { + NL80211_SAR_TYPE_POWER = 0, + NUM_NL80211_SAR_TYPE = 1, +}; + +struct rfkill; + +enum environment_cap { + ENVIRON_ANY = 0, + ENVIRON_INDOOR = 1, + ENVIRON_OUTDOOR = 2, +}; + +struct regulatory_request { + struct callback_head callback_head; + int wiphy_idx; + enum nl80211_reg_initiator initiator; + enum nl80211_user_reg_hint_type user_reg_hint_type; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + bool intersect; + bool processed; + enum environment_cap country_ie_env; + struct list_head list; +}; + +struct ieee80211_freq_range { + u32 start_freq_khz; + u32 end_freq_khz; + u32 max_bandwidth_khz; +}; + +struct ieee80211_power_rule { + u32 max_antenna_gain; + u32 max_eirp; +}; + +struct ieee80211_wmm_ac { + u16 cw_min; + u16 cw_max; + u16 cot; + u8 aifsn; +}; + +struct ieee80211_wmm_rule { + struct ieee80211_wmm_ac client[4]; + struct ieee80211_wmm_ac ap[4]; +}; + +struct ieee80211_reg_rule { + struct ieee80211_freq_range freq_range; + struct ieee80211_power_rule power_rule; + struct ieee80211_wmm_rule wmm_rule; + u32 flags; + u32 dfs_cac_ms; + bool has_wmm; + s8 psd; +}; + +struct ieee80211_regdomain { + struct callback_head callback_head; + u32 n_reg_rules; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + struct ieee80211_reg_rule reg_rules[0]; +}; + +struct ieee80211_channel { + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + u16 hw_value; + u32 flags; + int max_antenna_gain; + int max_power; + int max_reg_power; + bool beacon_found; + u32 orig_flags; + int orig_mag; + int orig_mpwr; + enum nl80211_dfs_state dfs_state; + long unsigned int dfs_state_entered; + unsigned int dfs_cac_ms; + s8 psd; +}; + +struct ieee80211_rate { + u32 flags; + u16 bitrate; + u16 hw_value; + u16 hw_value_short; +}; + +struct ieee80211_he_obss_pd { + bool enable; + u8 sr_ctrl; + u8 non_srg_max_offset; + u8 min_offset; + u8 max_offset; + u8 bss_color_bitmap[8]; + u8 partial_bssid_bitmap[8]; +}; + +struct cfg80211_he_bss_color { + u8 color; + bool enabled; + bool partial; +}; + +struct ieee80211_sta_ht_cap { + u16 cap; + bool ht_supported; + u8 ampdu_factor; + u8 ampdu_density; + struct ieee80211_mcs_info mcs; + short: 0; +} __attribute__((packed)); + +struct ieee80211_sta_vht_cap { + bool vht_supported; + u32 cap; + struct ieee80211_vht_mcs_info vht_mcs; +}; + +struct ieee80211_sta_he_cap { + bool has_he; + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + u8 ppe_thres[25]; +} __attribute__((packed)); + +struct ieee80211_eht_mcs_nss_supp { + union { + struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz; + struct { + struct ieee80211_eht_mcs_nss_supp_bw _80; + struct ieee80211_eht_mcs_nss_supp_bw _160; + struct ieee80211_eht_mcs_nss_supp_bw _320; + } bw; + }; +}; + +struct ieee80211_sta_eht_cap { + bool has_eht; + struct ieee80211_eht_cap_elem_fixed eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp; + u8 eht_ppe_thres[32]; +}; + +struct ieee80211_sband_iftype_data { + u16 types_mask; + struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct ieee80211_sta_eht_cap eht_cap; + struct { + const u8 *data; + unsigned int len; + } vendor_elems; +} __attribute__((packed)); + +struct ieee80211_sta_s1g_cap { + bool s1g; + u8 cap[10]; + u8 nss_mcs[5]; +}; + +struct ieee80211_supported_band { + struct ieee80211_channel *channels; + struct ieee80211_rate *bitrates; + enum nl80211_band band; + int n_channels; + int n_bitrates; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_s1g_cap s1g_cap; + struct ieee80211_edmg edmg_cap; + u16 n_iftype_data; + const struct ieee80211_sband_iftype_data *iftype_data; +}; + +struct cfg80211_bitrate_mask { + struct { + u32 legacy; + u8 ht_mcs[10]; + u16 vht_mcs[8]; + u16 he_mcs[8]; + enum nl80211_txrate_gi gi; + enum nl80211_he_gi he_gi; + enum nl80211_he_ltf he_ltf; + } control[6]; +}; + +struct mac_address { + u8 addr[6]; +}; + +struct cfg80211_sar_freq_ranges { + u32 start_freq; + u32 end_freq; +}; + +struct cfg80211_sar_capa { + enum nl80211_sar_type type; + u32 num_freq_ranges; + const struct cfg80211_sar_freq_ranges *freq_ranges; +}; + +struct cfg80211_ssid { + u8 ssid[32]; + u8 ssid_len; +}; + +struct cfg80211_scan_info { + u64 scan_start_tsf; + u8 tsf_bssid[6]; + bool aborted; +}; + +struct cfg80211_scan_6ghz_params { + u32 short_ssid; + u32 channel_idx; + u8 bssid[6]; + bool unsolicited_probe; + bool short_ssid_valid; + bool psc_no_listen; + s8 psd_20; +}; + +struct cfg80211_scan_request { + struct cfg80211_ssid *ssids; + int n_ssids; + u32 n_channels; + const u8 *ie; + size_t ie_len; + u16 duration; + bool duration_mandatory; + u32 flags; + u32 rates[6]; + struct wireless_dev *wdev; + u8 mac_addr[6]; + u8 mac_addr_mask[6]; + u8 bssid[6]; + struct wiphy *wiphy; + long unsigned int scan_start; + long: 32; + struct cfg80211_scan_info info; + bool notified; + bool no_cck; + bool scan_6ghz; + u32 n_6ghz_params; + struct cfg80211_scan_6ghz_params *scan_6ghz_params; + s8 tsf_report_link_id; + struct ieee80211_channel *channels[0]; +}; + +enum cfg80211_signal_type { + CFG80211_SIGNAL_TYPE_NONE = 0, + CFG80211_SIGNAL_TYPE_MBM = 1, + CFG80211_SIGNAL_TYPE_UNSPEC = 2, +}; + +struct ieee80211_txrx_stypes; + +struct ieee80211_iface_combination; + +struct wiphy_iftype_akm_suites; + +struct wiphy_wowlan_support; + +struct cfg80211_wowlan; + +struct wiphy_iftype_ext_capab; + +struct wiphy_coalesce_support; + +struct wiphy_vendor_command; + +struct cfg80211_pmsr_capabilities; + +struct wiphy_radio; + +struct wiphy { + struct mutex mtx; + u8 perm_addr[6]; + u8 addr_mask[6]; + struct mac_address *addresses; + const struct ieee80211_txrx_stypes *mgmt_stypes; + const struct ieee80211_iface_combination *iface_combinations; + int n_iface_combinations; + u16 software_iftypes; + u16 n_addresses; + u16 interface_modes; + u16 max_acl_mac_addrs; + u32 flags; + u32 regulatory_flags; + u32 features; + u8 ext_features[9]; + u32 ap_sme_capa; + enum cfg80211_signal_type signal_type; + int bss_priv_size; + u8 max_scan_ssids; + u8 max_sched_scan_reqs; + u8 max_sched_scan_ssids; + u8 max_match_sets; + u16 max_scan_ie_len; + u16 max_sched_scan_ie_len; + u32 max_sched_scan_plans; + u32 max_sched_scan_plan_interval; + u32 max_sched_scan_plan_iterations; + int n_cipher_suites; + const u32 *cipher_suites; + int n_akm_suites; + const u32 *akm_suites; + const struct wiphy_iftype_akm_suites *iftype_akm_suites; + unsigned int num_iftype_akm_suites; + u8 retry_short; + u8 retry_long; + u32 frag_threshold; + u32 rts_threshold; + u8 coverage_class; + char fw_version[32]; + u32 hw_version; + const struct wiphy_wowlan_support *wowlan; + struct cfg80211_wowlan *wowlan_config; + u16 max_remain_on_channel_duration; + u8 max_num_pmkids; + u32 available_antennas_tx; + u32 available_antennas_rx; + u32 probe_resp_offload; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; + const struct wiphy_iftype_ext_capab *iftype_ext_capab; + unsigned int num_iftype_ext_capab; + const void *privid; + struct ieee80211_supported_band *bands[6]; + void (*reg_notifier)(struct wiphy *, struct regulatory_request *); + const struct ieee80211_regdomain *regd; + long: 32; + struct device dev; + bool registered; + struct dentry *debugfsdir; + const struct ieee80211_ht_cap *ht_capa_mod_mask; + const struct ieee80211_vht_cap *vht_capa_mod_mask; + struct list_head wdev_list; + possible_net_t _net; + const struct wiphy_coalesce_support *coalesce; + const struct wiphy_vendor_command *vendor_commands; + const struct nl80211_vendor_cmd_info *vendor_events; + int n_vendor_commands; + int n_vendor_events; + u16 max_ap_assoc_sta; + u8 max_num_csa_counters; + u32 bss_select_support; + u8 nan_supported_bands; + u32 txq_limit; + u32 txq_memory_limit; + u32 txq_quantum; + long unsigned int tx_queue_len; + u8 support_mbssid: 1; + u8 support_only_he_mbssid: 1; + const struct cfg80211_pmsr_capabilities *pmsr_capa; + long: 32; + struct { + u64 peer; + u64 vif; + u8 max_retry; + long: 32; + } tid_config_support; + u8 max_data_retry_count; + const struct cfg80211_sar_capa *sar_capa; + struct rfkill *rfkill; + u8 mbssid_max_interfaces; + u8 ema_max_profile_periodicity; + u16 max_num_akm_suites; + u16 hw_timestamp_max_peers; + int n_radio; + const struct wiphy_radio *radio; + long: 32; + long: 32; + long: 32; + char priv[0]; +}; + +struct cfg80211_match_set { + struct cfg80211_ssid ssid; + u8 bssid[6]; + s32 rssi_thold; +}; + +struct cfg80211_sched_scan_plan { + u32 interval; + u32 iterations; +}; + +struct cfg80211_bss_select_adjust { + enum nl80211_band band; + s8 delta; +}; + +struct cfg80211_sched_scan_request { + u64 reqid; + struct cfg80211_ssid *ssids; + int n_ssids; + u32 n_channels; + const u8 *ie; + size_t ie_len; + u32 flags; + struct cfg80211_match_set *match_sets; + int n_match_sets; + s32 min_rssi_thold; + u32 delay; + struct cfg80211_sched_scan_plan *scan_plans; + int n_scan_plans; + u8 mac_addr[6]; + u8 mac_addr_mask[6]; + bool relative_rssi_set; + s8 relative_rssi; + struct cfg80211_bss_select_adjust rssi_adjust; + struct wiphy *wiphy; + struct net_device *dev; + long unsigned int scan_start; + bool report_results; + struct callback_head callback_head; + u32 owner_nlportid; + bool nl_owner_dead; + struct list_head list; + struct ieee80211_channel *channels[0]; +}; + +struct cfg80211_bss_ies { + u64 tsf; + struct callback_head callback_head; + int len; + bool from_beacon; + u8 data[0]; +}; + +struct cfg80211_bss { + struct ieee80211_channel *channel; + const struct cfg80211_bss_ies *ies; + const struct cfg80211_bss_ies *beacon_ies; + const struct cfg80211_bss_ies *proberesp_ies; + struct cfg80211_bss *hidden_beacon_bss; + struct cfg80211_bss *transmitted_bss; + struct list_head nontrans_list; + s32 signal; + u16 beacon_interval; + u16 capability; + u8 bssid[6]; + u8 chains; + s8 chain_signal[4]; + u8 proberesp_ecsa_stuck: 1; + u8 bssid_index; + u8 max_bssid_indicator; + u8 use_for; + u8 cannot_use_reasons; + u8 priv[0]; +}; + +struct cfg80211_pkt_pattern { + const u8 *mask; + const u8 *pattern; + int pattern_len; + int pkt_offset; +}; + +struct cfg80211_wowlan_tcp { + struct socket *sock; + __be32 src; + __be32 dst; + u16 src_port; + u16 dst_port; + u8 dst_mac[6]; + int payload_len; + const u8 *payload; + struct nl80211_wowlan_tcp_data_seq payload_seq; + u32 data_interval; + u32 wake_len; + const u8 *wake_data; + const u8 *wake_mask; + u32 tokens_size; + struct nl80211_wowlan_tcp_data_token payload_tok; +}; + +struct cfg80211_wowlan { + bool any; + bool disconnect; + bool magic_pkt; + bool gtk_rekey_failure; + bool eap_identity_req; + bool four_way_handshake; + bool rfkill_release; + struct cfg80211_pkt_pattern *patterns; + struct cfg80211_wowlan_tcp *tcp; + int n_patterns; + struct cfg80211_sched_scan_request *nd_config; +}; + +struct ieee80211_iface_limit { + u16 max; + u16 types; +}; + +struct ieee80211_iface_combination { + const struct ieee80211_iface_limit *limits; + u32 num_different_channels; + u16 max_interfaces; + u8 n_limits; + bool beacon_int_infra_match; + u8 radar_detect_widths; + u8 radar_detect_regions; + u32 beacon_int_min_gcd; +}; + +struct ieee80211_txrx_stypes { + u16 tx; + u16 rx; +}; + +struct wiphy_wowlan_tcp_support { + const struct nl80211_wowlan_tcp_data_token_feature *tok; + u32 data_payload_max; + u32 data_interval_max; + u32 wake_payload_max; + bool seq; +}; + +struct wiphy_wowlan_support { + u32 flags; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; + int max_nd_match_sets; + const struct wiphy_wowlan_tcp_support *tcp; +}; + +struct wiphy_coalesce_support { + int n_rules; + int max_delay; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; +}; + +struct wiphy_vendor_command { + struct nl80211_vendor_cmd_info info; + u32 flags; + int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int); + int (*dumpit)(struct wiphy *, struct wireless_dev *, struct sk_buff *, const void *, int, long unsigned int *); + const struct nla_policy *policy; + unsigned int maxattr; +}; + +struct wiphy_iftype_ext_capab { + enum nl80211_iftype iftype; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; + u16 eml_capabilities; + u16 mld_capa_and_ops; +}; + +struct cfg80211_pmsr_capabilities { + unsigned int max_peers; + u8 report_ap_tsf: 1; + u8 randomize_mac_addr: 1; + struct { + u32 preambles; + u32 bandwidths; + s8 max_bursts_exponent; + u8 max_ftms_per_burst; + u8 supported: 1; + u8 asap: 1; + u8 non_asap: 1; + u8 request_lci: 1; + u8 request_civicloc: 1; + u8 trigger_based: 1; + u8 non_trigger_based: 1; + } ftm; +}; + +struct wiphy_iftype_akm_suites { + u16 iftypes_mask; + const u32 *akm_suites; + int n_akm_suites; +}; + +struct wiphy_radio_freq_range { + u32 start_freq; + u32 end_freq; +}; + +struct wiphy_radio { + const struct wiphy_radio_freq_range *freq_range; + int n_freq_range; + const struct ieee80211_iface_combination *iface_combinations; + int n_iface_combinations; + u32 antenna_mask; +}; + +enum ieee80211_ap_reg_power { + IEEE80211_REG_UNSET_AP = 0, + IEEE80211_REG_LPI_AP = 1, + IEEE80211_REG_SP_AP = 2, + IEEE80211_REG_VLP_AP = 3, +}; + +typedef u32 codel_time_t; + +struct ieee80211_chan_req { + struct cfg80211_chan_def oper; + struct cfg80211_chan_def ap; +}; + +struct ieee80211_chanctx_conf { + struct cfg80211_chan_def def; + struct cfg80211_chan_def min_def; + struct cfg80211_chan_def ap; + int radio_idx; + u8 rx_chains_static; + u8 rx_chains_dynamic; + bool radar_enabled; + long: 0; + u8 drv_priv[0]; +}; + +struct ieee80211_vif_cfg { + bool assoc; + bool ibss_joined; + bool ibss_creator; + bool ps; + u16 aid; + u16 eml_cap; + u16 eml_med_sync_delay; + u16 mld_capa_op; + __be32 arp_addr_list[4]; + int arp_addr_cnt; + u8 ssid[32]; + size_t ssid_len; + bool s1g; + bool idle; + u8 ap_addr[6]; +}; + +struct ieee80211_mu_group_data { + u8 membership[8]; + u8 position[16]; +}; + +struct ieee80211_fils_discovery { + u32 min_interval; + u32 max_interval; +}; + +struct ieee80211_parsed_tpe_eirp { + bool valid; + s8 power[5]; + u8 count; +}; + +struct ieee80211_parsed_tpe_psd { + bool valid; + s8 power[16]; + u8 count; + u8 n; +}; + +struct ieee80211_parsed_tpe { + struct ieee80211_parsed_tpe_eirp max_local[2]; + struct ieee80211_parsed_tpe_eirp max_reg_client[2]; + struct ieee80211_parsed_tpe_psd psd_local[2]; + struct ieee80211_parsed_tpe_psd psd_reg_client[2]; +}; + +struct ieee80211_vif; + +struct ieee80211_ftm_responder_params; + +struct ieee80211_bss_conf { + struct ieee80211_vif *vif; + struct cfg80211_bss *bss; + const u8 *bssid; + unsigned int link_id; + u8 addr[6]; + u8 htc_trig_based_pkt_ext; + bool uora_exists; + u8 uora_ocw_range; + u16 frame_time_rts_th; + bool he_support; + bool twt_requester; + bool twt_responder; + bool twt_protected; + bool twt_broadcast; + bool use_cts_prot; + bool use_short_preamble; + bool use_short_slot; + bool enable_beacon; + u8 dtim_period; + u16 beacon_int; + u16 assoc_capability; + long: 32; + u64 sync_tsf; + u32 sync_device_ts; + u8 sync_dtim_count; + u32 basic_rates; + struct ieee80211_rate *beacon_rate; + int mcast_rate[6]; + u16 ht_operation_mode; + s32 cqm_rssi_thold; + u32 cqm_rssi_hyst; + s32 cqm_rssi_low; + s32 cqm_rssi_high; + struct ieee80211_chan_req chanreq; + struct ieee80211_mu_group_data mu_group; + bool qos; + bool hidden_ssid; + int txpower; + enum nl80211_tx_power_setting txpower_type; + struct ieee80211_p2p_noa_attr p2p_noa_attr; + bool allow_p2p_go_ps; + u16 max_idle_period; + bool protected_keep_alive; + bool ftm_responder; + struct ieee80211_ftm_responder_params *ftmr_params; + bool nontransmitted; + u8 transmitter_bssid[6]; + u8 bssid_index; + u8 bssid_indicator; + bool ema_ap; + u8 profile_periodicity; + struct { + u32 params; + u16 nss_set; + } he_oper; + struct ieee80211_he_obss_pd he_obss_pd; + struct cfg80211_he_bss_color he_bss_color; + struct ieee80211_fils_discovery fils_discovery; + u32 unsol_bcast_probe_resp_interval; + struct cfg80211_bitrate_mask beacon_tx_rate; + enum ieee80211_ap_reg_power power_type; + struct ieee80211_parsed_tpe tpe; + u8 pwr_reduction; + bool eht_support; + bool csa_active; + bool mu_mimo_owner; + struct ieee80211_chanctx_conf *chanctx_conf; + bool color_change_active; + u8 color_change_color; + bool ht_ldpc; + bool vht_ldpc; + bool he_ldpc; + bool vht_su_beamformer; + bool vht_su_beamformee; + bool vht_mu_beamformer; + bool vht_mu_beamformee; + bool he_su_beamformer; + bool he_su_beamformee; + bool he_mu_beamformer; + bool he_full_ul_mumimo; + bool eht_su_beamformer; + bool eht_su_beamformee; + bool eht_mu_beamformer; + bool eht_80mhz_full_bw_ul_mumimo; + u8 bss_param_ch_cnt; + u8 bss_param_ch_cnt_link_id; +}; + +struct ieee80211_neg_ttlm { + u16 downlink[8]; + u16 uplink[8]; + bool valid; +}; + +struct ieee80211_txq; + +struct ieee80211_vif { + enum nl80211_iftype type; + struct ieee80211_vif_cfg cfg; + struct ieee80211_bss_conf bss_conf; + struct ieee80211_bss_conf *link_conf[15]; + u16 valid_links; + u16 active_links; + u16 dormant_links; + u16 suspended_links; + struct ieee80211_neg_ttlm neg_ttlm; + u8 addr[6]; + bool p2p; + u8 cab_queue; + u8 hw_queue[4]; + struct ieee80211_txq *txq; + netdev_features_t netdev_features; + u32 driver_flags; + u32 offload_flags; + bool probe_req_reg; + bool rx_mcast_action_reg; + struct ieee80211_vif *mbssid_tx_vif; + u8 drv_priv[0]; +}; + +enum ieee80211_smps_mode { + IEEE80211_SMPS_AUTOMATIC = 0, + IEEE80211_SMPS_OFF = 1, + IEEE80211_SMPS_STATIC = 2, + IEEE80211_SMPS_DYNAMIC = 3, + IEEE80211_SMPS_NUM_MODES = 4, +}; + +struct ieee80211_sta_aggregates { + u16 max_amsdu_len; + u16 max_rc_amsdu_len; + u16 max_tid_amsdu_len[16]; +}; + +enum ieee80211_sta_rx_bandwidth { + IEEE80211_STA_RX_BW_20 = 0, + IEEE80211_STA_RX_BW_40 = 1, + IEEE80211_STA_RX_BW_80 = 2, + IEEE80211_STA_RX_BW_160 = 3, + IEEE80211_STA_RX_BW_320 = 4, +}; + +struct ieee80211_sta_txpwr { + s16 power; + enum nl80211_tx_power_setting type; +}; + +struct ieee80211_sta; + +struct ieee80211_link_sta { + struct ieee80211_sta *sta; + u8 addr[6]; + u8 link_id; + long: 0; + enum ieee80211_smps_mode smps_mode; + u32 supp_rates[6]; + struct ieee80211_sta_ht_cap ht_cap; + long: 0; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct ieee80211_sta_eht_cap eht_cap; + struct ieee80211_sta_aggregates agg; + u8 rx_nss; + long: 0; + enum ieee80211_sta_rx_bandwidth bandwidth; + struct ieee80211_sta_txpwr txpwr; +} __attribute__((packed)); + +struct ieee80211_sta_rates; + +struct ieee80211_sta { + u8 addr[6]; + u16 aid; + u16 max_rx_aggregation_subframes; + bool wme; + u8 uapsd_queues; + u8 max_sp; + struct ieee80211_sta_rates *rates; + bool tdls; + bool tdls_initiator; + bool mfp; + bool mlo; + bool spp_amsdu; + u8 max_amsdu_subframes; + struct ieee80211_sta_aggregates *cur; + bool support_p2p_ps; + struct ieee80211_txq *txq[17]; + u16 valid_links; + long: 0; + struct ieee80211_link_sta deflink; + struct ieee80211_link_sta *link[15]; + u8 drv_priv[0]; +}; + +struct ieee80211_ftm_responder_params { + const u8 *lci; + const u8 *civicloc; + size_t lci_len; + size_t civicloc_len; +}; + +enum mac80211_rate_control_flags { + IEEE80211_TX_RC_USE_RTS_CTS = 1, + IEEE80211_TX_RC_USE_CTS_PROTECT = 2, + IEEE80211_TX_RC_USE_SHORT_PREAMBLE = 4, + IEEE80211_TX_RC_MCS = 8, + IEEE80211_TX_RC_GREEN_FIELD = 16, + IEEE80211_TX_RC_40_MHZ_WIDTH = 32, + IEEE80211_TX_RC_DUP_DATA = 64, + IEEE80211_TX_RC_SHORT_GI = 128, + IEEE80211_TX_RC_VHT_MCS = 256, + IEEE80211_TX_RC_80_MHZ_WIDTH = 512, + IEEE80211_TX_RC_160_MHZ_WIDTH = 1024, +}; + +struct ieee80211_tx_rate { + s8 idx; + u16 count: 5; + u16 flags: 11; +} __attribute__((packed)); + +struct ieee80211_key_conf { + atomic64_t tx_pn; + u32 cipher; + u8 icv_len; + u8 iv_len; + u8 hw_key_idx; + s8 keyidx; + u16 flags; + s8 link_id; + u8 keylen; + u8 key[0]; + long: 32; +}; + +struct ieee80211_tx_info { + u32 flags; + u32 band: 3; + u32 status_data_idr: 1; + u32 status_data: 13; + u32 hw_queue: 4; + u32 tx_time_est: 10; + union { + struct { + union { + struct { + struct ieee80211_tx_rate rates[4]; + s8 rts_cts_rate_idx; + u8 use_rts: 1; + u8 use_cts_prot: 1; + u8 short_preamble: 1; + u8 skip_table: 1; + u8 antennas: 2; + }; + long unsigned int jiffies; + }; + struct ieee80211_vif *vif; + struct ieee80211_key_conf *hw_key; + u32 flags; + codel_time_t enqueue_time; + } control; + struct { + u64 cookie; + } ack; + struct { + struct ieee80211_tx_rate rates[4]; + s32 ack_signal; + u8 ampdu_ack_len; + u8 ampdu_len; + u8 antenna; + u8 pad; + u16 tx_time; + u8 flags; + u8 pad2; + void *status_driver_data[4]; + } status; + struct { + struct ieee80211_tx_rate driver_rates[4]; + u8 pad[4]; + void *rate_driver_data[6]; + }; + void *driver_data[10]; + }; +}; + +struct ieee80211_conf { + u32 flags; + int power_level; + int dynamic_ps_timeout; + u16 listen_interval; + u8 ps_dtim_period; + u8 long_frame_max_tx_count; + u8 short_frame_max_tx_count; + struct cfg80211_chan_def chandef; + bool radar_enabled; + enum ieee80211_smps_mode smps_mode; +}; + +struct ieee80211_channel_switch { + u64 timestamp; + u32 device_timestamp; + bool block_tx; + struct cfg80211_chan_def chandef; + u8 count; + u8 link_id; + u32 delay; + long: 32; +}; + +struct ieee80211_txq { + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + u8 tid; + u8 ac; + long: 0; + u8 drv_priv[0]; +}; + +struct ieee80211_sta_rates { + struct callback_head callback_head; + struct { + s8 idx; + u8 count; + u8 count_cts; + u8 count_rts; + u16 flags; + } rate[4]; +}; + +struct ieee80211_hw { + struct ieee80211_conf conf; + struct wiphy *wiphy; + const char *rate_control_algorithm; + void *priv; + long unsigned int flags[2]; + unsigned int extra_tx_headroom; + unsigned int extra_beacon_tailroom; + int vif_data_size; + int sta_data_size; + int chanctx_data_size; + int txq_data_size; + u16 queues; + u16 max_listen_interval; + s8 max_signal; + u8 max_rates; + u8 max_report_rates; + u8 max_rate_tries; + u16 max_rx_aggregation_subframes; + u16 max_tx_aggregation_subframes; + u8 max_tx_fragments; + u8 offchannel_tx_hw_queue; + u8 radiotap_mcs_details; + u16 radiotap_vht_details; + struct { + int units_pos; + s16 accuracy; + } radiotap_timestamp; + netdev_features_t netdev_features; + u8 uapsd_queues; + u8 uapsd_max_sp_len; + u8 max_nan_de_entries; + u8 tx_sk_pacing_shift; + u8 weight_multiplier; + u32 max_mtu; + const s8 *tx_power_levels; + u8 max_txpwr_levels_idx; + long: 32; +}; + +struct iwl_mod_params { + int swcrypto; + unsigned int disable_11n; + int amsdu_size; + bool fw_restart; + bool bt_coex_active; + int led_mode; + bool power_save; + int power_level; + char *nvm_file; + u32 uapsd_disable; + bool disable_11ac; + bool disable_11ax; + bool remove_when_gone; + u32 enable_ini; + bool disable_11be; +}; + +enum iwl_amsdu_size { + IWL_AMSDU_DEF = 0, + IWL_AMSDU_4K = 1, + IWL_AMSDU_8K = 2, + IWL_AMSDU_12K = 3, + IWL_AMSDU_2K = 4, +}; + +enum iwl_err_mode { + IWL_ERR_MODE_REGULAR = 0, + IWL_ERR_MODE_RFKILL = 1, + IWL_ERR_MODE_TRACE_ONLY = 2, + IWL_ERR_MODE_RATELIMIT = 3, +}; + +enum iwl_device_family { + IWL_DEVICE_FAMILY_UNDEFINED = 0, + IWL_DEVICE_FAMILY_1000 = 1, + IWL_DEVICE_FAMILY_100 = 2, + IWL_DEVICE_FAMILY_2000 = 3, + IWL_DEVICE_FAMILY_2030 = 4, + IWL_DEVICE_FAMILY_105 = 5, + IWL_DEVICE_FAMILY_135 = 6, + IWL_DEVICE_FAMILY_5000 = 7, + IWL_DEVICE_FAMILY_5150 = 8, + IWL_DEVICE_FAMILY_6000 = 9, + IWL_DEVICE_FAMILY_6000i = 10, + IWL_DEVICE_FAMILY_6005 = 11, + IWL_DEVICE_FAMILY_6030 = 12, + IWL_DEVICE_FAMILY_6050 = 13, + IWL_DEVICE_FAMILY_6150 = 14, + IWL_DEVICE_FAMILY_7000 = 15, + IWL_DEVICE_FAMILY_8000 = 16, + IWL_DEVICE_FAMILY_9000 = 17, + IWL_DEVICE_FAMILY_22000 = 18, + IWL_DEVICE_FAMILY_AX210 = 19, + IWL_DEVICE_FAMILY_BZ = 20, + IWL_DEVICE_FAMILY_SC = 21, +}; + +enum iwl_led_mode { + IWL_LED_DEFAULT = 0, + IWL_LED_RF_STATE = 1, + IWL_LED_BLINK = 2, + IWL_LED_DISABLE = 3, +}; + +enum iwl_nvm_type { + IWL_NVM = 0, + IWL_NVM_EXT = 1, + IWL_NVM_SDP = 2, +}; + +struct iwl_base_params { + unsigned int wd_timeout; + u16 eeprom_size; + u16 max_event_log_size; + u8 pll_cfg: 1; + u8 shadow_ram_support: 1; + u8 shadow_reg_enable: 1; + u8 pcie_l1_allowed: 1; + u8 apmg_wake_up_wa: 1; + u8 scd_chain_ext_wa: 1; + u16 num_of_queues; + u32 max_tfd_queue_size; + u8 max_ll_items; + u8 led_compensation; +}; + +struct iwl_ht_params { + u8 ht_greenfield_support: 1; + u8 stbc: 1; + u8 ldpc: 1; + u8 use_rts_for_aggregation: 1; + u8 ht40_bands; +}; + +struct iwl_tt_tx_backoff { + s32 temperature; + u32 backoff; +}; + +struct iwl_tt_params { + u32 ct_kill_entry; + u32 ct_kill_exit; + u32 ct_kill_duration; + u32 dynamic_smps_entry; + u32 dynamic_smps_exit; + u32 tx_protection_entry; + u32 tx_protection_exit; + struct iwl_tt_tx_backoff tx_backoff[6]; + u8 support_ct_kill: 1; + u8 support_dynamic_smps: 1; + u8 support_tx_protection: 1; + u8 support_tx_backoff: 1; +}; + +struct iwl_eeprom_params { + const u8 regulatory_bands[7]; + bool enhanced_txpower; +}; + +struct iwl_pwr_tx_backoff { + u32 pwr; + u32 backoff; +}; + +struct iwl_cfg_trans_params { + const struct iwl_base_params *base_params; + enum iwl_device_family device_family; + u32 umac_prph_offset; + u32 xtal_latency; + u32 extra_phy_cfg_flags; + u32 rf_id: 1; + u32 gen2: 1; + u32 mq_rx_supported: 1; + u32 integrated: 1; + u32 low_latency_xtal: 1; + u32 bisr_workaround: 1; + u32 ltr_delay: 2; + u32 imr_enabled: 1; +}; + +struct iwl_fw_mon_reg { + u32 addr; + u32 mask; +}; + +struct iwl_fw_mon_regs { + struct iwl_fw_mon_reg write_ptr; + struct iwl_fw_mon_reg cycle_cnt; + struct iwl_fw_mon_reg cur_frag; +}; + +struct iwl_cfg { + struct iwl_cfg_trans_params trans; + const char *name; + const char *fw_name_pre; + const char *fw_name_mac; + const struct iwl_ht_params *ht_params; + const struct iwl_eeprom_params *eeprom_params; + const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; + const char *default_nvm_file_C_step; + const struct iwl_tt_params *thermal_params; + enum iwl_led_mode led_mode; + enum iwl_nvm_type nvm_type; + u32 max_data_size; + u32 max_inst_size; + netdev_features_t features; + u32 dccm_offset; + u32 dccm_len; + u32 dccm2_offset; + u32 dccm2_len; + u32 smem_offset; + u32 smem_len; + u16 nvm_ver; + u16 nvm_calib_ver; + u32 rx_with_siso_diversity: 1; + u32 tx_with_siso_diversity: 1; + u32 internal_wimax_coex: 1; + u32 host_interrupt_operation_mode: 1; + u32 high_temp: 1; + u32 mac_addr_from_csr: 10; + u32 lp_xtal_workaround: 1; + u32 apmg_not_supported: 1; + u32 vht_mu_mimo_supported: 1; + u32 cdb: 1; + u32 dbgc_supported: 1; + u32 uhb_supported: 1; + u8 valid_tx_ant; + u8 valid_rx_ant; + u8 non_shared_ant; + u8 nvm_hw_section_num; + u8 max_tx_agg_size; + u8 ucode_api_max; + u8 ucode_api_min; + u16 num_rbds; + u32 min_umac_error_event_table; + u32 d3_debug_data_base_addr; + u32 d3_debug_data_length; + u32 min_txq_size; + u32 gp2_reg_addr; + u32 min_ba_txq_size; + const struct iwl_fw_mon_regs mon_dram_regs; + const struct iwl_fw_mon_regs mon_smem_regs; + const struct iwl_fw_mon_regs mon_dbgi_regs; +}; + +struct iwl_fw_ini_header { + __le32 version; + __le32 domain; +}; + +struct iwl_fw_ini_allocation_tlv { + struct iwl_fw_ini_header hdr; + __le32 alloc_id; + __le32 buf_location; + __le32 req_size; + __le32 max_frags_num; + __le32 min_size; +}; + +enum iwl_fw_ini_buffer_location { + IWL_FW_INI_LOCATION_INVALID = 0, + IWL_FW_INI_LOCATION_SRAM_PATH = 1, + IWL_FW_INI_LOCATION_DRAM_PATH = 2, + IWL_FW_INI_LOCATION_NPK_PATH = 3, + IWL_FW_INI_LOCATION_NUM = 4, +}; + +enum iwl_fw_ini_time_point { + IWL_FW_INI_TIME_POINT_INVALID = 0, + IWL_FW_INI_TIME_POINT_EARLY = 1, + IWL_FW_INI_TIME_POINT_AFTER_ALIVE = 2, + IWL_FW_INI_TIME_POINT_POST_INIT = 3, + IWL_FW_INI_TIME_POINT_FW_ASSERT = 4, + IWL_FW_INI_TIME_POINT_FW_HW_ERROR = 5, + IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG = 6, + IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION = 7, + IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF = 8, + IWL_FW_INI_TIME_POINT_USER_TRIGGER = 9, + IWL_FW_INI_TIME_POINT_PERIODIC = 10, + IWL_FW_INI_TIME_POINT_RESERVED = 11, + IWL_FW_INI_TIME_POINT_HOST_ASSERT = 12, + IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT = 13, + IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE = 14, + IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE = 15, + IWL_FW_INI_TIME_POINT_HOST_D3_START = 16, + IWL_FW_INI_TIME_POINT_HOST_D3_END = 17, + IWL_FW_INI_TIME_POINT_MISSED_BEACONS = 18, + IWL_FW_INI_TIME_POINT_ASSOC_FAILED = 19, + IWL_FW_INI_TIME_POINT_TX_FAILED = 20, + IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED = 21, + IWL_FW_INI_TIME_POINT_TX_LATENCY_THRESHOLD = 22, + IWL_FW_INI_TIME_POINT_HANG_OCCURRED = 23, + IWL_FW_INI_TIME_POINT_EAPOL_FAILED = 24, + IWL_FW_INI_TIME_POINT_FAKE_TX = 25, + IWL_FW_INI_TIME_POINT_DEASSOC = 26, + IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ = 27, + IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START = 28, + IWL_FW_INI_TIME_SCAN_FAILURE = 29, + IWL_FW_INI_TIME_ESR_LINK_UP = 30, + IWL_FW_INI_TIME_ESR_LINK_DOWN = 31, + IWL_FW_INI_TIME_POINT_NUM = 32, +}; + +struct iwl_ucode_tlv { + __le32 type; + __le32 length; + u8 data[0]; +}; + +enum iwl_ucode_tlv_flag { + IWL_UCODE_TLV_FLAGS_PAN = 1, + IWL_UCODE_TLV_FLAGS_NEWSCAN = 2, + IWL_UCODE_TLV_FLAGS_MFP = 4, + IWL_UCODE_TLV_FLAGS_SHORT_BL = 128, + IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = 1024, + IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = 4096, + IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = 32768, + IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = 65536, + IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = 16777216, + IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = 33554432, + IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = 67108864, +}; + +struct iwl_tlv_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +}; + +struct iwl_fw_dbg_reg_op { + u8 op; + u8 reserved[3]; + __le32 addr; + __le32 val; +}; + +struct iwl_fw_dbg_mem_seg_tlv { + __le32 data_type; + __le32 ofs; + __le32 len; +}; + +struct iwl_fw_dbg_dest_tlv_v1 { + u8 version; + u8 monitor_mode; + u8 size_power; + u8 reserved; + __le32 base_reg; + __le32 end_reg; + __le32 write_ptr_reg; + __le32 wrap_count; + u8 base_shift; + u8 end_shift; + struct iwl_fw_dbg_reg_op reg_ops[0]; +} __attribute__((packed)); + +struct iwl_fw_dbg_conf_hcmd { + u8 id; + u8 reserved; + __le16 len; + u8 data[0]; +}; + +struct iwl_fw_dbg_trigger_tlv { + __le32 id; + __le32 vif_type; + __le32 stop_conf_ids; + __le32 stop_delay; + u8 mode; + u8 start_conf_id; + __le16 occurrences; + __le16 trig_dis_ms; + u8 flags; + u8 reserved[5]; + u8 data[0]; +}; + +struct iwl_fw_dbg_conf_tlv { + u8 id; + u8 usniffer; + u8 reserved; + u8 num_of_hcmds; + struct iwl_fw_dbg_conf_hcmd hcmd; +}; + +struct iwl_fw_cmd_version { + u8 cmd; + u8 group; + u8 cmd_ver; + u8 notif_ver; +}; + +struct iwl_cmd_header { + u8 cmd; + u8 group_id; + __le16 sequence; +}; + +enum iwl_ucode_type { + IWL_UCODE_REGULAR = 0, + IWL_UCODE_INIT = 1, + IWL_UCODE_WOWLAN = 2, + IWL_UCODE_REGULAR_USNIFFER = 3, + IWL_UCODE_TYPE_MAX = 4, +}; + +struct iwl_ucode_capabilities { + u32 max_probe_length; + u32 n_scan_channels; + u32 standard_phy_calibration_size; + u32 flags; + u32 error_log_addr; + u32 error_log_size; + u32 num_stations; + u32 num_beacons; + long unsigned int _api[3]; + long unsigned int _capa[4]; + const struct iwl_fw_cmd_version *cmd_versions; + u32 n_cmd_versions; +}; + +struct fw_desc { + const void *data; + u32 len; + u32 offset; +}; + +struct fw_img { + struct fw_desc *sec; + int num_sec; + bool is_dual_cpus; + u32 paging_mem_size; +}; + +enum iwl_fw_type { + IWL_FW_DVM = 0, + IWL_FW_MVM = 1, +}; + +struct iwl_fw_dbg { + struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; + u8 n_dest_reg; + struct iwl_fw_dbg_conf_tlv *conf_tlv[32]; + struct iwl_fw_dbg_trigger_tlv *trigger_tlv[17]; + size_t trigger_tlv_len[17]; + struct iwl_fw_dbg_mem_seg_tlv *mem_tlv; + size_t n_mem_tlv; + u32 dump_mask; +}; + +struct iwl_dump_exclude { + u32 addr; + u32 size; +}; + +struct iwl_fw { + u32 ucode_ver; + char fw_version[128]; + struct fw_img img[4]; + size_t iml_len; + u8 *iml; + struct iwl_ucode_capabilities ucode_capa; + bool enhance_sensitivity_table; + u32 init_evtlog_ptr; + u32 init_evtlog_size; + u32 init_errlog_ptr; + u32 inst_evtlog_ptr; + u32 inst_evtlog_size; + u32 inst_errlog_ptr; + struct iwl_tlv_calib_ctrl default_calib[4]; + u32 phy_config; + u8 valid_tx_ant; + u8 valid_rx_ant; + enum iwl_fw_type type; + u8 human_readable[64]; + struct iwl_fw_dbg dbg; + u8 *phy_integration_ver; + u32 phy_integration_ver_len; + struct iwl_dump_exclude dump_excl[2]; + struct iwl_dump_exclude dump_excl_wowlan[2]; +}; + +struct iwl_rx_packet; + +union iwl_dbg_tlv_tp_data { + struct iwl_rx_packet *fw_pkt; +}; + +struct iwl_rx_packet { + __le32 len_n_flags; + struct iwl_cmd_header hdr; + u8 data[0]; +}; + +struct iwl_dbg_tlv_time_point_data { + struct list_head trig_list; + struct list_head active_trig_list; + struct list_head hcmd_list; + struct list_head config_list; +}; + +struct iwl_op_mode; + +struct iwl_trans; + +struct iwl_rx_cmd_buffer; + +struct iwl_op_mode_ops { + struct iwl_op_mode * (*start)(struct iwl_trans *, const struct iwl_cfg *, const struct iwl_fw *, struct dentry *); + void (*stop)(struct iwl_op_mode *); + void (*rx)(struct iwl_op_mode *, struct napi_struct *, struct iwl_rx_cmd_buffer *); + void (*rx_rss)(struct iwl_op_mode *, struct napi_struct *, struct iwl_rx_cmd_buffer *, unsigned int); + void (*queue_full)(struct iwl_op_mode *, int); + void (*queue_not_full)(struct iwl_op_mode *, int); + bool (*hw_rf_kill)(struct iwl_op_mode *, bool); + void (*free_skb)(struct iwl_op_mode *, struct sk_buff *); + void (*nic_error)(struct iwl_op_mode *, bool); + void (*cmd_queue_full)(struct iwl_op_mode *); + void (*nic_config)(struct iwl_op_mode *); + void (*wimax_active)(struct iwl_op_mode *); + void (*time_point)(struct iwl_op_mode *, enum iwl_fw_ini_time_point, union iwl_dbg_tlv_tp_data *); + void (*device_powered_off)(struct iwl_op_mode *); +}; + +struct iwl_op_mode { + const struct iwl_op_mode_ops *ops; + char op_mode_specific[0]; +}; + +enum iwl_trans_state { + IWL_TRANS_NO_FW = 0, + IWL_TRANS_FW_STARTED = 1, + IWL_TRANS_FW_ALIVE = 2, +}; + +enum iwl_ini_cfg_state { + IWL_INI_CFG_STATE_NOT_LOADED = 0, + IWL_INI_CFG_STATE_LOADED = 1, + IWL_INI_CFG_STATE_CORRUPTED = 2, +}; + +struct iwl_dram_data; + +struct iwl_fw_mon { + u32 num_frags; + struct iwl_dram_data *frags; +}; + +struct iwl_dram_data { + dma_addr_t physical; + void *block; + int size; +}; + +struct iwl_imr_data { + u32 imr_enable; + u32 imr_size; + u32 sram_addr; + u32 sram_size; + u32 imr2sram_remainbyte; + long: 32; + u64 imr_curr_addr; + __le64 imr_base_addr; +}; + +struct iwl_pc_data; + +struct iwl_trans_debug { + u8 n_dest_reg; + bool rec_on; + const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; + const struct iwl_fw_dbg_conf_tlv *conf_tlv[32]; + struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv; + u32 lmac_error_event_table[2]; + u32 umac_error_event_table; + u32 tcm_error_event_table[2]; + u32 rcm_error_event_table[2]; + unsigned int error_event_table_tlv_status; + enum iwl_ini_cfg_state internal_ini_cfg; + enum iwl_ini_cfg_state external_ini_cfg; + struct iwl_fw_ini_allocation_tlv fw_mon_cfg[5]; + struct iwl_fw_mon fw_mon_ini[5]; + struct iwl_dram_data fw_mon; + bool hw_error; + enum iwl_fw_ini_buffer_location ini_dest; + long: 32; + u64 unsupported_region_msk; + struct iwl_ucode_tlv *active_regions[64]; + struct list_head debug_info_tlv_list; + struct iwl_dbg_tlv_time_point_data time_point[32]; + struct list_head periodic_trig_list; + u32 domains_bitmap; + u32 ucode_preset; + bool restart_required; + u32 last_tp_resetfw; + struct iwl_imr_data imr_data; + u8 dump_file_name_ext[32]; + bool dump_file_name_ext_valid; + u32 num_pc; + struct iwl_pc_data *pc_data; + bool yoyo_bin_loaded; +}; + +struct iwl_self_init_dram { + struct iwl_dram_data *fw; + int fw_cnt; + struct iwl_dram_data *paging; + int paging_cnt; +}; + +enum iwl_plat_pm_mode { + IWL_PLAT_PM_MODE_DISABLED = 0, + IWL_PLAT_PM_MODE_D3 = 1, +}; + +struct iwl_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +struct iwl_drv; + +struct iwl_hcmd_arr; + +struct iwl_trans { + bool csme_own; + struct iwl_op_mode *op_mode; + const struct iwl_cfg_trans_params *trans_cfg; + const struct iwl_cfg *cfg; + struct iwl_drv *drv; + enum iwl_trans_state state; + long unsigned int status; + struct device *dev; + u32 max_skb_frags; + u32 hw_rev; + u32 hw_rev_step; + u32 hw_rf_id; + u32 hw_crf_id; + u32 hw_cnv_id; + u32 hw_wfpm_id; + u32 hw_id; + char hw_id_str[52]; + u32 sku_id[3]; + bool reduced_cap_sku; + u8 no_160: 1; + u8 step_urm: 1; + u8 rx_mpdu_cmd; + u8 rx_mpdu_cmd_hdr_size; + bool pm_support; + bool ltr_enabled; + u8 pnvm_loaded: 1; + u8 fail_to_parse_pnvm_image: 1; + u8 reduce_power_loaded: 1; + u8 failed_to_load_reduce_power_image: 1; + const struct iwl_hcmd_arr *command_groups; + int command_groups_size; + bool wide_cmd_header; + wait_queue_head_t wait_command_queue; + u8 num_rx_queues; + size_t iml_len; + u8 *iml; + struct kmem_cache *dev_cmd_pool; + char dev_cmd_pool_name[50]; + struct dentry *dbgfs_dir; + struct iwl_trans_debug dbg; + struct iwl_self_init_dram init_dram; + enum iwl_plat_pm_mode system_pm_mode; + const char *name; + u32 mbx_addr_0_step; + u32 mbx_addr_1_step; + u8 pcie_link_speed; + struct iwl_dma_ptr invalid_tx_cmd; + char trans_specific[0]; +}; + +struct iwl_rx_cmd_buffer { + struct page *_page; + int _offset; + bool _page_stolen; + u32 _rx_page_order; + unsigned int truesize; +}; + +enum CMD_MODE { + CMD_ASYNC = 1, + CMD_WANT_SKB = 2, + CMD_SEND_IN_RFKILL = 4, + CMD_BLOCK_TXQS = 8, + CMD_SEND_IN_D3 = 16, +}; + +struct iwl_device_tx_cmd { + struct iwl_cmd_header hdr; + u8 payload[0]; +}; + +enum iwl_hcmd_dataflag { + IWL_HCMD_DFL_NOCOPY = 1, + IWL_HCMD_DFL_DUP = 2, +}; + +struct iwl_host_cmd { + const void *data[2]; + struct iwl_rx_packet *resp_pkt; + long unsigned int _rx_page_addr; + u32 _rx_page_order; + u32 flags; + u32 id; + u16 len[2]; + u8 dataflags[2]; +}; + +struct iwl_hcmd_names { + u8 cmd_id; + const char * const cmd_name; +}; + +struct iwl_hcmd_arr { + const struct iwl_hcmd_names *arr; + int size; +}; + +struct iwl_trans_config { + struct iwl_op_mode *op_mode; + u8 cmd_queue; + u8 cmd_fifo; + const u8 *no_reclaim_cmds; + unsigned int n_no_reclaim_cmds; + enum iwl_amsdu_size rx_buf_size; + bool bc_table_dword; + bool scd_set_active; + const struct iwl_hcmd_arr *command_groups; + int command_groups_size; + u8 cb_data_offs; + bool fw_reset_handshake; + u8 queue_alloc_cmd_ver; +}; + +struct iwl_pc_data { + u8 pc_name[32]; + u32 pc_address; +}; + +struct iwl_nvm_data { + int n_hw_addrs; + u8 hw_addr[6]; + u8 calib_version; + __le16 calib_voltage; + __le16 raw_temperature; + __le16 kelvin_temperature; + __le16 kelvin_voltage; + __le16 xtal_calib[2]; + bool sku_cap_band_24ghz_enable; + bool sku_cap_band_52ghz_enable; + bool sku_cap_11n_enable; + bool sku_cap_11ac_enable; + bool sku_cap_11ax_enable; + bool sku_cap_amt_enable; + bool sku_cap_ipan_enable; + bool sku_cap_mimo_disabled; + bool sku_cap_11be_enable; + u16 radio_cfg_type; + u8 radio_cfg_step; + u8 radio_cfg_dash; + u8 radio_cfg_pnum; + u8 valid_tx_ant; + u8 valid_rx_ant; + u32 nvm_version; + s8 max_tx_pwr_half_dbm; + bool lar_enabled; + bool vht160_supported; + struct ieee80211_supported_band bands[6]; + struct { + struct ieee80211_sband_iftype_data low[2]; + struct ieee80211_sband_iftype_data high[2]; + struct ieee80211_sband_iftype_data uhb[2]; + } iftd; + struct ieee80211_channel channels[0]; +}; + +struct iwl_notif_wait_data { + struct list_head notif_waits; + spinlock_t notif_wait_lock; + wait_queue_head_t notif_waitq; +}; + +enum { + REPLY_ALIVE = 1, + REPLY_ERROR = 2, + REPLY_ECHO = 3, + REPLY_RXON = 16, + REPLY_RXON_ASSOC = 17, + REPLY_QOS_PARAM = 19, + REPLY_RXON_TIMING = 20, + REPLY_ADD_STA = 24, + REPLY_REMOVE_STA = 25, + REPLY_REMOVE_ALL_STA = 26, + REPLY_TXFIFO_FLUSH = 30, + REPLY_WEPKEY = 32, + REPLY_TX = 28, + REPLY_LEDS_CMD = 72, + REPLY_TX_LINK_QUALITY_CMD = 78, + COEX_PRIORITY_TABLE_CMD = 90, + COEX_MEDIUM_NOTIFICATION = 91, + COEX_EVENT_CMD = 92, + TEMPERATURE_NOTIFICATION = 98, + CALIBRATION_CFG_CMD = 101, + CALIBRATION_RES_NOTIFICATION = 102, + CALIBRATION_COMPLETE_NOTIFICATION = 103, + REPLY_QUIET_CMD = 113, + REPLY_CHANNEL_SWITCH = 114, + CHANNEL_SWITCH_NOTIFICATION = 115, + REPLY_SPECTRUM_MEASUREMENT_CMD = 116, + SPECTRUM_MEASURE_NOTIFICATION = 117, + POWER_TABLE_CMD = 119, + PM_SLEEP_NOTIFICATION = 122, + PM_DEBUG_STATISTIC_NOTIFIC = 123, + REPLY_SCAN_CMD = 128, + REPLY_SCAN_ABORT_CMD = 129, + SCAN_START_NOTIFICATION = 130, + SCAN_RESULTS_NOTIFICATION = 131, + SCAN_COMPLETE_NOTIFICATION = 132, + BEACON_NOTIFICATION = 144, + REPLY_TX_BEACON = 145, + WHO_IS_AWAKE_NOTIFICATION = 148, + REPLY_TX_POWER_DBM_CMD = 149, + QUIET_NOTIFICATION = 150, + REPLY_TX_PWR_TABLE_CMD = 151, + REPLY_TX_POWER_DBM_CMD_V1 = 152, + TX_ANT_CONFIGURATION_CMD = 152, + MEASURE_ABORT_NOTIFICATION = 153, + REPLY_BT_CONFIG = 155, + REPLY_STATISTICS_CMD = 156, + STATISTICS_NOTIFICATION = 157, + REPLY_CARD_STATE_CMD = 160, + CARD_STATE_NOTIFICATION = 161, + MISSED_BEACONS_NOTIFICATION = 162, + REPLY_CT_KILL_CONFIG_CMD = 164, + SENSITIVITY_CMD = 168, + REPLY_PHY_CALIBRATION_CMD = 176, + REPLY_RX_PHY_CMD = 192, + REPLY_RX_MPDU_CMD = 193, + REPLY_RX = 195, + REPLY_COMPRESSED_BA = 197, + REPLY_BT_COEX_PRIO_TABLE = 204, + REPLY_BT_COEX_PROT_ENV = 205, + REPLY_BT_COEX_PROFILE_NOTIF = 206, + REPLY_WIPAN_PARAMS = 178, + REPLY_WIPAN_RXON = 179, + REPLY_WIPAN_RXON_TIMING = 180, + REPLY_WIPAN_RXON_ASSOC = 182, + REPLY_WIPAN_QOS_PARAM = 183, + REPLY_WIPAN_WEPKEY = 184, + REPLY_WIPAN_P2P_CHANNEL_SWITCH = 185, + REPLY_WIPAN_NOA_NOTIFICATION = 188, + REPLY_WIPAN_DEACTIVATION_COMPLETE = 189, + REPLY_WOWLAN_PATTERNS = 224, + REPLY_WOWLAN_WAKEUP_FILTER = 225, + REPLY_WOWLAN_TSC_RSC_PARAMS = 226, + REPLY_WOWLAN_TKIP_PARAMS = 227, + REPLY_WOWLAN_KEK_KCK_MATERIAL = 228, + REPLY_WOWLAN_GET_STATUS = 229, + REPLY_D3_CONFIG = 211, + REPLY_MAX = 255, +}; + +struct iwl_tx_ant_config_cmd { + __le32 valid; +}; + +struct iwl_error_event_table { + u32 valid; + u32 error_id; + u32 pc; + u32 blink1; + u32 blink2; + u32 ilink1; + u32 ilink2; + u32 data1; + u32 data2; + u32 line; + u32 bcon_time; + u32 tsf_low; + u32 tsf_hi; + u32 gp1; + u32 gp2; + u32 gp3; + u32 ucode_ver; + u32 hw_ver; + u32 brd_ver; + u32 log_pc; + u32 frame_ptr; + u32 stack_ptr; + u32 hcmd; + u32 isr0; + u32 isr1; + u32 isr2; + u32 isr3; + u32 isr4; + u32 isr_pref; + u32 wait_event; + u32 l2p_control; + u32 l2p_duration; + u32 l2p_mhvalid; + u32 l2p_addr_match; + u32 lmpm_pmg_sel; + u32 u_timestamp; + u32 flow_handler; +}; + +enum { + RXON_DEV_TYPE_AP = 1, + RXON_DEV_TYPE_ESS = 3, + RXON_DEV_TYPE_IBSS = 4, + RXON_DEV_TYPE_SNIFFER = 6, + RXON_DEV_TYPE_CP = 7, + RXON_DEV_TYPE_2STA = 8, + RXON_DEV_TYPE_P2P = 9, +}; + +struct iwl_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 ofdm_ht_triple_stream_basic_rates; + u8 reserved5; + __le16 acquisition_data; + __le16 reserved6; +} __attribute__((packed)); + +struct iwl_rxon_time_cmd { + __le64 timestamp; + __le16 beacon_interval; + __le16 atim_window; + __le32 beacon_init_val; + __le16 listen_interval; + u8 dtim_period; + u8 delta_cp_bss_tbtts; +}; + +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved1; + __le16 edca_txop; +}; + +struct iwl_qosparam_cmd { + __le32 qos_flags; + struct iwl_ac_qos ac[4]; +}; + +struct iwl_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; + u8 reserved1; + __le16 tkip_rx_ttak[5]; + u8 key_offset; + u8 reserved2; + u8 key[16]; + __le64 tx_secur_seq_cnt; + __le64 hw_tkip_mic_rx_key; + __le64 hw_tkip_mic_tx_key; +}; + +struct sta_id_modify { + u8 addr[6]; + __le16 reserved1; + u8 sta_id; + u8 modify_mask; + __le16 reserved2; +}; + +struct iwl_addsta_cmd { + u8 mode; + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl_keyinfo key; + __le32 station_flags; + __le32 station_flags_msk; + __le16 tid_disable_tx; + __le16 legacy_reserved; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + __le16 reserved2; +}; + +struct iwl_wep_key { + u8 key_index; + u8 key_offset; + u8 reserved1[2]; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +}; + +struct iwl_rx_phy_res { + u8 non_cfg_phy_cnt; + u8 cfg_phy_cnt; + u8 stat_id; + u8 reserved1; + __le64 timestamp; + __le32 beacon_time_stamp; + __le16 phy_flags; + __le16 channel; + u8 non_cfg_phy_buf[32]; + __le32 rate_n_flags; + __le16 byte_count; + __le16 frame_time; +}; + +struct iwl_dram_scratch { + u8 try_cnt; + u8 bt_kill_cnt; + __le16 reserved; +}; + +struct iwl_tx_cmd { + __le16 len; + __le16 next_frame_len; + __le32 tx_flags; + struct iwl_dram_scratch scratch; + __le32 rate_n_flags; + u8 sta_id; + u8 sec_ctl; + u8 initial_rate_index; + u8 reserved; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved2; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 tid_tspec; + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + __le16 driver_txop; + union { + struct { + struct {} __empty_payload; + u8 payload[0]; + }; + struct { + struct {} __empty_hdr; + struct ieee80211_hdr hdr[0]; + }; + }; +}; + +struct iwl_link_qual_general_params { + u8 flags; + u8 mimo_delimiter; + u8 single_stream_ant_msk; + u8 dual_stream_ant_msk; + u8 start_rate_index[4]; +}; + +struct iwl_link_qual_agg_params { + __le16 agg_time_limit; + u8 agg_dis_start_th; + u8 agg_frame_cnt_limit; + __le32 reserved; +}; + +struct iwl_link_quality_cmd { + u8 sta_id; + u8 reserved1; + __le16 control; + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; + struct { + __le32 rate_n_flags; + } rs_table[16]; + __le32 reserved2; +}; + +struct iwl_bt_cmd { + u8 flags; + u8 lead_time; + u8 max_kill; + u8 reserved; + __le32 kill_ack_mask; + __le32 kill_cts_mask; +}; + +struct iwl_measurement_histogram { + __le32 ofdm[8]; + __le32 cck[8]; +}; + +struct iwl_spectrum_notification { + u8 id; + u8 token; + u8 channel_index; + u8 state; + __le32 start_time; + u8 band; + u8 channel; + u8 type; + u8 reserved1; + __le32 cca_ofdm; + __le32 cca_cck; + __le32 cca_time; + u8 basic_type; + u8 reserved2[3]; + struct iwl_measurement_histogram histogram; + __le32 stop_time; + __le32 status; +}; + +struct iwl_powertable_cmd { + __le16 flags; + u8 keep_alive_seconds; + u8 debug_flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[5]; + __le32 keep_alive_beacons; +}; + +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +}; + +struct iwl_ct_kill_throttling_config { + __le32 critical_temperature_exit; + __le32 reserved; + __le32 critical_temperature_enter; +}; + +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; +}; + +struct statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +}; + +struct statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved3; +}; + +struct statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +}; + +struct statistics_rx_non_phy { + __le32 bogus_cts; + __le32 bogus_ack; + __le32 non_bssid_frames; + __le32 filtered_frames; + __le32 non_channel_beacons; + __le32 channel_beacons; + __le32 num_missed_bcon; + __le32 adc_rx_saturation_time; + __le32 ina_detection_search_time; + __le32 beacon_silence_rssi_a; + __le32 beacon_silence_rssi_b; + __le32 beacon_silence_rssi_c; + __le32 interference_data_flag; + __le32 channel_load; + __le32 dsp_false_alarms; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; +}; + +struct statistics_tx_power { + u8 ant_a; + u8 ant_b; + u8 ant_c; + u8 reserved; +}; + +struct statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; +}; + +struct statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct statistics_tx_non_phy_agg agg; + struct statistics_tx_power tx_power; + __le32 reserved1; +}; + +struct statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 reserved1; + __le32 reserved2; +}; + +struct statistics_general_common { + __le32 temperature; + __le32 temperature_m; + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct statistics_div div; + __le32 rx_enable_counter; + __le32 num_of_sos_states; +}; + +struct iwl_statistics_cmd { + __le32 configuration_flags; +}; + +enum iwl_ucode_calib_cfg { + IWL_CALIB_CFG_RX_BB_IDX = 1, + IWL_CALIB_CFG_DC_IDX = 2, + IWL_CALIB_CFG_LO_IDX = 4, + IWL_CALIB_CFG_TX_IQ_IDX = 8, + IWL_CALIB_CFG_RX_IQ_IDX = 16, + IWL_CALIB_CFG_NOISE_IDX = 32, + IWL_CALIB_CFG_CRYSTAL_IDX = 64, + IWL_CALIB_CFG_TEMPERATURE_IDX = 128, + IWL_CALIB_CFG_PAPD_IDX = 256, + IWL_CALIB_CFG_SENSITIVITY_IDX = 512, + IWL_CALIB_CFG_TX_PWR_IDX = 1024, +}; + +struct iwl_calib_cfg_elmnt_s { + __le32 is_enable; + __le32 start; + __le32 send_res; + __le32 apply_res; + __le32 reserved; +}; + +struct iwl_calib_cfg_status_s { + struct iwl_calib_cfg_elmnt_s once; + struct iwl_calib_cfg_elmnt_s perd; + __le32 flags; +}; + +struct iwl_calib_cfg_cmd { + struct iwl_calib_cfg_status_s ucd_calib_cfg; + struct iwl_calib_cfg_status_s drv_calib_cfg; + __le32 reserved1; +}; + +enum { + COEX_UNASSOC_IDLE = 0, + COEX_UNASSOC_MANUAL_SCAN = 1, + COEX_UNASSOC_AUTO_SCAN = 2, + COEX_CALIBRATION = 3, + COEX_PERIODIC_CALIBRATION = 4, + COEX_CONNECTION_ESTAB = 5, + COEX_ASSOCIATED_IDLE = 6, + COEX_ASSOC_MANUAL_SCAN = 7, + COEX_ASSOC_AUTO_SCAN = 8, + COEX_ASSOC_ACTIVE_LEVEL = 9, + COEX_RF_ON = 10, + COEX_RF_OFF = 11, + COEX_STAND_ALONE_DEBUG = 12, + COEX_IPAN_ASSOC_LEVEL = 13, + COEX_RSRVD1 = 14, + COEX_RSRVD2 = 15, + COEX_NUM_OF_EVENTS = 16, +}; + +enum bt_coex_prio_table_events { + BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0, + BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5, + BT_COEX_PRIO_TBL_EVT_DTIM = 6, + BT_COEX_PRIO_TBL_EVT_SCAN52 = 7, + BT_COEX_PRIO_TBL_EVT_SCAN24 = 8, + BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9, + BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10, + BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11, + BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12, + BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13, + BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14, + BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15, + BT_COEX_PRIO_TBL_EVT_MAX = 16, +}; + +struct iwl_power_mgr { + struct iwl_powertable_cmd sleep_cmd; + struct iwl_powertable_cmd sleep_cmd_next; + int debug_sleep_level_override; + bool bus_pm; +}; + +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX = 1, + IWL_RATE_5M_INDEX = 2, + IWL_RATE_11M_INDEX = 3, + IWL_RATE_6M_INDEX = 4, + IWL_RATE_9M_INDEX = 5, + IWL_RATE_12M_INDEX = 6, + IWL_RATE_18M_INDEX = 7, + IWL_RATE_24M_INDEX = 8, + IWL_RATE_36M_INDEX = 9, + IWL_RATE_48M_INDEX = 10, + IWL_RATE_54M_INDEX = 11, + IWL_RATE_60M_INDEX = 12, + IWL_RATE_COUNT = 13, + IWL_RATE_COUNT_LEGACY = 12, + IWL_RATE_INVM_INDEX = 13, + IWL_RATE_INVALID = 13, +}; + +enum { + IWL_FIRST_OFDM_RATE = 4, + IWL_LAST_OFDM_RATE = 12, + IWL_FIRST_CCK_RATE = 0, + IWL_LAST_CCK_RATE = 3, +}; + +struct iwl_sensitivity_ranges; + +struct iwl_hw_params { + u8 tx_chains_num; + u8 rx_chains_num; + bool use_rts_for_aggregation; + u32 ct_kill_threshold; + u32 ct_kill_exit_threshold; + const struct iwl_sensitivity_ranges *sens; +}; + +struct iwl_rf_reset { + int reset_request_count; + int reset_success_count; + int reset_reject_count; + long unsigned int last_reset_jiffies; +}; + +enum iwl_scan_type { + IWL_SCAN_NORMAL = 0, + IWL_SCAN_RADIO_RESET = 1, +}; + +enum iwl_rxon_context_id { + IWL_RXON_CTX_BSS = 0, + IWL_RXON_CTX_PAN = 1, + NUM_IWL_RXON_CTX = 2, +}; + +struct iwl_qos_info { + int qos_active; + struct iwl_qosparam_cmd def_qos_parm; +}; + +struct iwl_rxon_context { + struct ieee80211_vif *vif; + u8 mcast_queue; + u8 ac_to_queue[4]; + u8 ac_to_fifo[4]; + bool always_active; + bool is_active; + bool ht_need_multiple_chains; + enum iwl_rxon_context_id ctxid; + u32 interface_modes; + u32 exclusive_interface_modes; + u8 unused_devtype; + u8 ap_devtype; + u8 ibss_devtype; + u8 station_devtype; + const struct iwl_rxon_cmd active; + struct iwl_rxon_cmd staging; + struct iwl_rxon_time_cmd timing; + struct iwl_qos_info qos_data; + u8 bcast_sta_id; + u8 ap_sta_id; + u8 rxon_cmd; + u8 rxon_assoc_cmd; + u8 rxon_timing_cmd; + u8 qos_cmd; + u8 wep_key_cmd; + struct iwl_wep_key wep_keys[4]; + u8 key_mapping_keys; + __le32 station_flags; + int beacon_int; + struct { + bool non_gf_sta_present; + u8 protection; + bool enabled; + bool is_40mhz; + u8 extension_chan_offset; + } ht; +}; + +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[20]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +struct iwl_chain_noise_data { + u32 active_chains; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u16 beacon_count; + u8 disconn_array[3]; + u8 delta_gain_code[3]; + u8 radio_write; + u8 state; +}; + +struct iwl_ht_config { + bool single_chain_sufficient; + enum ieee80211_smps_mode smps; +}; + +enum iwl_tt_state { + IWL_TI_0 = 0, + IWL_TI_1 = 1, + IWL_TI_2 = 2, + IWL_TI_CT_KILL = 3, + IWL_TI_STATE_MAX = 4, +}; + +struct iwl_tt_restriction; + +struct iwl_tt_trans; + +struct iwl_tt_mgmt { + enum iwl_tt_state state; + bool advanced_tt; + u8 tt_power_mode; + bool ct_kill_toggle; + struct iwl_tt_restriction *restriction; + struct iwl_tt_trans *transaction; + struct timer_list ct_kill_exit_tm; + struct timer_list ct_kill_waiting_tm; +}; + +struct iwl_station_entry { + struct iwl_addsta_cmd sta; + u8 used; + u8 ctxid; + struct iwl_link_quality_cmd *lq; +}; + +enum iwl_agg_state { + IWL_AGG_OFF = 0, + IWL_AGG_STARTING = 1, + IWL_AGG_ON = 2, + IWL_EMPTYING_HW_QUEUE_ADDBA = 3, + IWL_EMPTYING_HW_QUEUE_DELBA = 4, +}; + +struct iwl_ht_agg { + u32 rate_n_flags; + enum iwl_agg_state state; + u16 txq_id; + u16 ssn; + bool wait_for_ba; +}; + +struct iwl_tid_data { + u16 seq_number; + u16 next_reclaimed; + struct iwl_ht_agg agg; +}; + +struct reply_tx_error_statistics { + u32 pp_delay; + u32 pp_few_bytes; + u32 pp_bt_prio; + u32 pp_quiet_period; + u32 pp_calc_ttak; + u32 int_crossed_retry; + u32 short_limit; + u32 long_limit; + u32 fifo_underrun; + u32 drain_flow; + u32 rfkill_flush; + u32 life_expire; + u32 dest_ps; + u32 host_abort; + u32 bt_retry; + u32 sta_invalid; + u32 frag_drop; + u32 tid_disable; + u32 fifo_flush; + u32 insuff_cf_poll; + u32 fail_hw_drop; + u32 sta_color_mismatch; + u32 unknown; +}; + +struct reply_agg_tx_error_statistics { + u32 underrun; + u32 bt_prio; + u32 few_bytes; + u32 abort; + u32 last_sent_ttl; + u32 last_sent_try; + u32 last_sent_bt_kill; + u32 scd_query; + u32 bad_crc32; + u32 response; + u32 dump_tx; + u32 delay_tx; + u32 unknown; +}; + +struct iwl_event_log { + bool ucode_trace; + u32 num_wraps; + u32 next_entry; + int non_wraps_count; + int wraps_once_count; + int wraps_more_count; +}; + +struct iwl_dvm_cfg; + +struct iwl_wipan_noa_data; + +struct iwl_priv { + struct iwl_trans *trans; + struct device *dev; + const struct iwl_cfg *cfg; + const struct iwl_fw *fw; + const struct iwl_dvm_cfg *lib; + long unsigned int status; + spinlock_t sta_lock; + struct mutex mutex; + long unsigned int transport_queue_stop; + bool passive_no_rx; + u8 queue_to_mac80211[32]; + long: 0; + atomic_t queue_stop_count[32]; + long unsigned int agg_q_alloc[1]; + struct ieee80211_hw *hw; + struct napi_struct *napi; + struct list_head calib_results; + struct workqueue_struct *workqueue; + struct iwl_hw_params hw_params; + enum nl80211_band band; + u8 valid_contexts; + long: 0; + void (*rx_handlers[255])(struct iwl_priv *, struct iwl_rx_cmd_buffer *); + struct iwl_notif_wait_data notif_wait; + struct iwl_spectrum_notification measure_report; + u8 measurement_status; + long: 0; + u32 ucode_beacon_time; + int missed_beacon_threshold; + u32 ibss_manager; + long unsigned int rx_statistics_jiffies; + u32 rx_handlers_stats[255]; + struct iwl_rf_reset rf_reset; + long unsigned int reload_jiffies; + int reload_count; + bool ucode_loaded; + u8 plcp_delta_threshold; + long: 0; + s32 temperature; + s32 last_temperature; + struct iwl_wipan_noa_data *noa_data; + long unsigned int scan_start; + long unsigned int scan_start_tsf; + size_t scan_cmd_size; + void *scan_cmd; + enum nl80211_band scan_band; + struct cfg80211_scan_request *scan_request; + struct ieee80211_vif *scan_vif; + enum iwl_scan_type scan_type; + u8 scan_tx_ant[6]; + u8 mgmt_tx_ant; + u8 sta_key_max_num; + bool new_scan_threshold_behaviour; + bool wowlan; + struct mac_address addresses[2]; + long: 0; + struct iwl_rxon_context contexts[2]; + __le16 switch_channel; + u8 start_calib; + long: 0; + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + __le16 sensitivity_tbl[11]; + __le16 enhance_sensitivity_tbl[12]; + long: 0; + struct iwl_ht_config current_ht_config; + u8 retry_rate; + long: 0; + int activity_timer_active; + struct iwl_power_mgr power_data; + struct iwl_tt_mgmt thermal_throttle; + int num_stations; + struct iwl_station_entry stations[16]; + long unsigned int ucode_key_table; + struct iwl_tid_data tid_data[128]; + atomic_t num_aux_in_flight; + u8 mac80211_registered; + u8 is_open; + long: 0; + enum nl80211_iftype iw_mode; + u64 timestamp; + struct { + __le32 flag; + struct statistics_general_common common; + struct statistics_rx_non_phy rx_non_phy; + struct statistics_rx_phy rx_ofdm; + struct statistics_rx_ht_phy rx_ofdm_ht; + struct statistics_rx_phy rx_cck; + struct statistics_tx tx; + spinlock_t lock; + } statistics; + u8 agg_tids_count; + struct iwl_rx_phy_res last_phy_res; + long: 0; + u32 ampdu_ref; + bool last_phy_res_valid; + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + long: 0; + struct reply_tx_error_statistics reply_tx_stats; + struct reply_agg_tx_error_statistics reply_agg_tx_stats; + u8 bt_enable_flag; + u8 bt_status; + u8 bt_traffic_load; + u8 last_bt_traffic_load; + bool bt_ch_announce; + bool bt_full_concurrent; + long: 0; + __le32 kill_ack_mask; + __le32 kill_cts_mask; + __le16 bt_valid; + bool reduced_txpower; + long: 0; + u16 bt_on_thresh; + u16 bt_duration; + u16 dynamic_frag_thresh; + u8 bt_ci_compliance; + long: 0; + struct work_struct bt_traffic_change_work; + bool bt_enable_pspoll; + long: 0; + struct iwl_rxon_context *cur_rssi_ctx; + bool bt_is_sco; + long: 0; + struct work_struct restart; + struct work_struct scan_completed; + struct work_struct abort_scan; + struct work_struct beacon_update; + struct iwl_rxon_context *beacon_ctx; + struct sk_buff *beacon_skb; + void *beacon_cmd; + struct work_struct tt_work; + struct work_struct ct_enter; + struct work_struct ct_exit; + struct work_struct start_internal_scan; + struct work_struct tx_flush; + struct work_struct bt_full_concurrency; + struct work_struct bt_runtime_config; + struct delayed_work scan_check; + s8 tx_power_user_lmt; + s8 tx_power_next; + long: 0; + struct iwl_nvm_data *nvm_data; + u8 *eeprom_blob; + size_t eeprom_blob_size; + struct work_struct txpower_work; + u32 calib_disabled; + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; + struct timer_list ucode_trace; + struct iwl_event_log event_log; + u8 kck[16]; + u8 kek[16]; + __le64 replay_ctr; + __le16 last_seq_ctl; + bool have_rekey_data; + long: 0; + struct wiphy_wowlan_support wowlan_support; + struct { + u32 error_event_table; + u32 log_event_table; + } device_pointers; + enum iwl_ucode_type cur_ucode; + long: 32; +} __attribute__((packed)); + +enum iwl_antenna_ok { + IWL_ANT_OK_NONE = 0, + IWL_ANT_OK_SINGLE = 1, + IWL_ANT_OK_MULTI = 2, +}; + +struct iwl_tt_restriction { + enum iwl_antenna_ok tx_stream; + enum iwl_antenna_ok rx_stream; + bool is_ht; +}; + +struct iwl_tt_trans { + enum iwl_tt_state next_state; + u32 tt_low; + u32 tt_high; +}; + +struct iwl_sensitivity_ranges { + u16 min_nrg_cck; + u16 nrg_th_cck; + u16 nrg_th_ofdm; + u16 auto_corr_min_ofdm; + u16 auto_corr_min_ofdm_mrc; + u16 auto_corr_min_ofdm_x1; + u16 auto_corr_min_ofdm_mrc_x1; + u16 auto_corr_max_ofdm; + u16 auto_corr_max_ofdm_mrc; + u16 auto_corr_max_ofdm_x1; + u16 auto_corr_max_ofdm_mrc_x1; + u16 auto_corr_max_cck; + u16 auto_corr_max_cck_mrc; + u16 auto_corr_min_cck; + u16 auto_corr_min_cck_mrc; + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +struct iwl_dvm_bt_params { + bool advanced_bt_coexist; + u8 bt_init_traffic_load; + u32 bt_prio_boost; + u16 agg_time_limit; + bool bt_sco_disable; + bool bt_session_2; +}; + +struct iwl_dvm_cfg { + void (*set_hw_params)(struct iwl_priv *); + int (*set_channel_switch)(struct iwl_priv *, struct ieee80211_channel_switch *); + void (*nic_config)(struct iwl_priv *); + void (*temperature)(struct iwl_priv *); + const struct iwl_dvm_bt_params *bt_params; + s32 chain_noise_scale; + u8 plcp_delta_threshold; + bool adv_thermal_throttle; + bool support_ct_kill_exit; + bool hd_v2; + bool no_idle_support; + bool need_temp_offset_calib; + bool no_xtal_calib; + bool temp_offset_v2; + bool adv_pm; +}; + +struct iwl_wipan_noa_data { + struct callback_head callback_head; + u32 length; + u8 data[0]; +}; + +struct ieee80211_he_mu_edca_param_ac_rec { + u8 aifsn; + u8 ecw_min_max; + u8 mu_edca_timer; +}; + +enum nl80211_preamble { + NL80211_PREAMBLE_LEGACY = 0, + NL80211_PREAMBLE_HT = 1, + NL80211_PREAMBLE_VHT = 2, + NL80211_PREAMBLE_DMG = 3, + NL80211_PREAMBLE_HE = 4, +}; + +struct cfg80211_ftm_responder_stats { + u32 filled; + u32 success_num; + u32 partial_num; + u32 failed_num; + u32 asap_num; + u32 non_asap_num; + u64 total_duration_ms; + u32 unknown_triggers_num; + u32 reschedule_requests_num; + u32 out_of_window_triggers_num; + long: 32; +}; + +struct cfg80211_pmsr_ftm_request_peer { + enum nl80211_preamble preamble; + u16 burst_period; + u8 requested: 1; + u8 asap: 1; + u8 request_lci: 1; + u8 request_civicloc: 1; + u8 trigger_based: 1; + u8 non_trigger_based: 1; + u8 lmr_feedback: 1; + u8 num_bursts_exp; + u8 burst_duration; + u8 ftms_per_burst; + u8 ftmr_retries; + u8 bss_color; +}; + +struct cfg80211_pmsr_request_peer { + u8 addr[6]; + struct cfg80211_chan_def chandef; + u8 report_ap_tsf: 1; + struct cfg80211_pmsr_ftm_request_peer ftm; +}; + +struct cfg80211_pmsr_request { + u64 cookie; + void *drv_data; + u32 n_peers; + u32 nl_portid; + u32 timeout; + u8 mac_addr[6]; + u8 mac_addr_mask[6]; + struct list_head list; + struct cfg80211_pmsr_request_peer peers[0]; + long: 32; +}; + +struct wiphy_delayed_work { + struct wiphy_work work; + struct wiphy *wiphy; + struct timer_list timer; +}; + +enum ieee80211_ac_numbers { + IEEE80211_AC_VO = 0, + IEEE80211_AC_VI = 1, + IEEE80211_AC_BE = 2, + IEEE80211_AC_BK = 3, +}; + +struct ieee80211_tx_queue_params { + u16 txop; + u16 cw_min; + u16 cw_max; + u8 aifs; + bool acm; + bool uapsd; + bool mu_edca; + struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec; +}; + +enum ieee80211_rssi_event_data { + RSSI_EVENT_HIGH = 0, + RSSI_EVENT_LOW = 1, +}; + +struct ieee80211_scan_ies { + const u8 *ies[6]; + size_t len[6]; + const u8 *common_ies; + size_t common_ie_len; +}; + +enum ieee80211_sta_state { + IEEE80211_STA_NOTEXIST = 0, + IEEE80211_STA_NONE = 1, + IEEE80211_STA_AUTH = 2, + IEEE80211_STA_ASSOC = 3, + IEEE80211_STA_AUTHORIZED = 4, +}; + +enum ieee80211_interface_iteration_flags { + IEEE80211_IFACE_ITER_NORMAL = 0, + IEEE80211_IFACE_ITER_RESUME_ALL = 1, + IEEE80211_IFACE_ITER_ACTIVE = 2, + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = 4, +}; + +enum iwl_bt_coex_lut_type { + BT_COEX_TIGHT_LUT = 0, + BT_COEX_LOOSE_LUT = 1, + BT_COEX_TX_DIS_LUT = 2, + BT_COEX_MAX_LUT = 3, + BT_COEX_INVALID_LUT = 255, +}; + +enum iwl_bt_coex_mode { + BT_COEX_DISABLE = 0, + BT_COEX_NW = 1, + BT_COEX_BT = 2, + BT_COEX_WIFI = 3, +}; + +enum iwl_bt_coex_enabled_modules { + BT_COEX_MPLUT_ENABLED = 1, + BT_COEX_MPLUT_BOOST_ENABLED = 2, + BT_COEX_SYNC2SCO_ENABLED = 4, + BT_COEX_CORUN_ENABLED = 8, + BT_COEX_HIGH_BAND_RET = 16, +}; + +struct iwl_bt_coex_cmd { + __le32 mode; + __le32 enabled_modules; +}; + +struct iwl_bt_coex_reduced_txp_update_cmd { + __le32 reduced_txp; +}; + +struct iwl_bt_coex_ci_cmd { + __le64 bt_primary_ci; + __le32 primary_ch_phy_id; + __le64 bt_secondary_ci; + __le32 secondary_ch_phy_id; +}; + +enum iwl_bt_activity_grading { + BT_OFF = 0, + BT_ON_NO_CONNECTION = 1, + BT_LOW_TRAFFIC = 2, + BT_HIGH_TRAFFIC = 3, + BT_VERY_HIGH_TRAFFIC = 4, + BT_MAX_AG = 5, +}; + +struct iwl_bt_coex_prof_old_notif { + __le32 mbox_msg[4]; + __le32 msg_idx; + __le32 bt_ci_compliance; + __le32 primary_ch_lut; + __le32 secondary_ch_lut; + __le32 bt_activity_grading; + u8 ttc_status; + u8 rrc_status; + u8 wifi_loss_low_rssi; + u8 wifi_loss_mid_high_rssi; +}; + +enum iwl_bt_coex_subcmd_ids { + PROFILE_NOTIF = 255, +}; + +struct iwl_bt_coex_profile_notif { + u8 wifi_loss_low_rssi[6]; + u8 wifi_loss_mid_high_rssi[6]; +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE = 1, + THERMAL_TRIP_HOT = 2, + THERMAL_TRIP_CRITICAL = 3, +}; + +struct thermal_trip { + int temperature; + int hysteresis; + enum thermal_trip_type type; + u8 flags; + void *priv; +}; + +struct thermal_cooling_device_ops; + +struct thermal_cooling_device { + int id; + const char *type; + long unsigned int max_state; + long: 32; + struct device device; + struct device_node *np; + void *devdata; + void *stats; + const struct thermal_cooling_device_ops *ops; + bool updated; + struct mutex lock; + struct list_head thermal_instances; + struct list_head node; +}; + +struct thermal_cooling_device_ops { + int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); + int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); + int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, long unsigned int, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, long unsigned int *); +}; + +typedef unsigned int iwl_ucode_tlv_capa_t; + +enum iwl_ucode_tlv_capa { + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = 0, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT = 1, + IWL_UCODE_TLV_CAPA_UMAC_SCAN = 2, + IWL_UCODE_TLV_CAPA_BEAMFORMER = 3, + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = 6, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = 8, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = 9, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = 10, + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = 11, + IWL_UCODE_TLV_CAPA_DQA_SUPPORT = 12, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = 13, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = 17, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = 18, + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = 21, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = 22, + IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = 26, + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = 28, + IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = 29, + IWL_UCODE_TLV_CAPA_BT_COEX_RRC = 30, + IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = 31, + IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG = 32, + IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = 37, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = 38, + IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = 39, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT = 40, + IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = 41, + IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = 43, + IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = 44, + IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = 45, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = 46, + IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = 47, + IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = 48, + IWL_UCODE_TLV_CAPA_CS_MODIFY = 49, + IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = 50, + IWL_UCODE_TLV_CAPA_SET_PPAG = 52, + IWL_UCODE_TLV_CAPA_TAS_CFG = 53, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = 54, + IWL_UCODE_TLV_CAPA_PROTECTED_TWT = 56, + IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = 57, + IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = 58, + IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = 59, + IWL_UCODE_TLV_CAPA_BROADCAST_TWT = 60, + IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = 61, + IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = 62, + IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = 63, + IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = 64, + IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = 65, + IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = 67, + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = 68, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = 70, + IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = 71, + IWL_UCODE_TLV_CAPA_BEACON_STORING = 72, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = 73, + IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = 74, + IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = 75, + IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = 76, + IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = 77, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = 80, + IWL_UCODE_TLV_CAPA_LQM_SUPPORT = 81, + IWL_UCODE_TLV_CAPA_TX_POWER_ACK = 84, + IWL_UCODE_TLV_CAPA_D3_DEBUG = 87, + IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = 88, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = 89, + IWL_UCODE_TLV_CAPA_CSI_REPORTING = 90, + IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP = 92, + IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = 93, + IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = 96, + IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = 98, + IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = 100, + IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = 103, + IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = 104, + IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = 105, + IWL_UCODE_TLV_CAPA_SYNCED_TIME = 106, + IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM = 108, + IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT = 109, + IWL_UCODE_TLV_CAPA_MLD_API_SUPPORT = 110, + IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT = 111, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT = 112, + IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT = 113, + IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = 114, + IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = 116, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = 117, + IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = 121, + IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = 122, + NUM_IWL_UCODE_TLV_CAPA = 123, +}; + +struct iwl_fw_ini_trigger_tlv { + struct iwl_fw_ini_header hdr; + __le32 time_point; + __le32 trigger_reason; + __le32 apply_policy; + __le32 dump_delay; + __le32 occurrences; + __le32 reserved; + __le32 ignore_consec; + __le32 reset_fw; + __le32 multi_dut; + __le64 regions_mask; + __le32 data[0]; +}; + +struct iwl_fw_error_dump_trigger_desc { + __le32 type; + u8 data[0]; +}; + +struct iwl_fw_paging { + dma_addr_t fw_paging_phys; + struct page *fw_paging_block; + u32 fw_paging_size; + u32 fw_offs; +}; + +struct iwl_dump_sanitize_ops { + void (*frob_txf)(void *, void *, size_t); + void (*frob_hcmd)(void *, void *, size_t); + void (*frob_mem)(void *, u32, void *, size_t); +}; + +enum iwl_tsf_id { + TSF_ID_A = 0, + TSF_ID_B = 1, + TSF_ID_C = 2, + TSF_ID_D = 3, + NUM_TSF_IDS = 4, +}; + +struct iwl_p2p_noa_attr { + u8 id; + u8 len_low; + u8 len_high; + u8 idx; + u8 ctwin; + struct ieee80211_p2p_noa_desc desc[2]; + u8 reserved; +}; + +struct iwl_probe_resp_data_notif { + __le32 mac_id; + __le32 noa_active; + struct iwl_p2p_noa_attr noa_attr; + u8 csa_counter; + u8 reserved[3]; +}; + +enum iwl_roc_activity { + ROC_ACTIVITY_HOTSPOT = 0, + ROC_ACTIVITY_P2P_DISC = 1, + ROC_ACTIVITY_P2P_TXRX = 2, + ROC_ACTIVITY_P2P_NEG = 3, + ROC_NUM_ACTIVITIES = 4, +}; + +struct iwl_time_quota_data { + __le32 id_and_color; + __le32 quota; + __le32 max_duration; + __le32 low_latency; +}; + +struct iwl_time_quota_cmd { + struct iwl_time_quota_data quotas[4]; +}; + +enum iwl_mvm_command_groups { + LEGACY_GROUP = 0, + LONG_GROUP = 1, + SYSTEM_GROUP = 2, + MAC_CONF_GROUP = 3, + PHY_OPS_GROUP = 4, + DATA_PATH_GROUP = 5, + SCAN_GROUP = 6, + NAN_GROUP = 7, + LOCATION_GROUP = 8, + BT_COEX_GROUP = 9, + PROT_OFFLOAD_GROUP = 11, + REGULATORY_AND_NVM_GROUP = 12, + DEBUG_GROUP = 15, + STATISTICS_GROUP = 16, +}; + +enum iwl_legacy_cmds { + UCODE_ALIVE_NTFY = 1, + REPLY_ERROR___2 = 2, + ECHO_CMD = 3, + INIT_COMPLETE_NOTIF = 4, + PHY_CONTEXT_CMD = 8, + DBG_CFG = 9, + SCAN_ITERATION_COMPLETE_UMAC = 181, + SCAN_CFG_CMD = 12, + SCAN_REQ_UMAC = 13, + SCAN_ABORT_UMAC = 14, + SCAN_COMPLETE_UMAC = 15, + BA_WINDOW_STATUS_NOTIFICATION_ID = 19, + ADD_STA_KEY = 23, + ADD_STA = 24, + REMOVE_STA = 25, + TX_CMD = 28, + TXPATH_FLUSH = 30, + MGMT_MCAST_KEY = 31, + SCD_QUEUE_CFG = 29, + WEP_KEY = 32, + SHARED_MEM_CFG = 37, + TDLS_CHANNEL_SWITCH_CMD = 39, + TDLS_CHANNEL_SWITCH_NOTIFICATION = 170, + TDLS_CONFIG_CMD = 167, + MAC_CONTEXT_CMD = 40, + TIME_EVENT_CMD = 41, + TIME_EVENT_NOTIFICATION = 42, + BINDING_CONTEXT_CMD = 43, + TIME_QUOTA_CMD = 44, + NON_QOS_TX_COUNTER_CMD = 45, + LEDS_CMD = 72, + LQ_CMD = 78, + FW_PAGING_BLOCK_CMD = 79, + SCAN_OFFLOAD_REQUEST_CMD = 81, + SCAN_OFFLOAD_ABORT_CMD = 82, + HOT_SPOT_CMD = 83, + WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION = 103, + WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION = 104, + SCAN_OFFLOAD_COMPLETE = 109, + SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 110, + MATCH_FOUND_NOTIFICATION = 217, + SCAN_ITERATION_COMPLETE = 231, + PHY_CONFIGURATION_CMD = 106, + CALIB_RES_NOTIF_PHY_DB = 107, + PHY_DB_CMD = 108, + POWER_TABLE_CMD___2 = 119, + PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 120, + LTR_CONFIG = 238, + REPLY_THERMAL_MNG_BACKOFF = 126, + NVM_ACCESS_CMD = 136, + BEACON_NOTIFICATION___2 = 144, + BEACON_TEMPLATE_CMD = 145, + TX_ANT_CONFIGURATION_CMD___2 = 152, + STATISTICS_CMD = 156, + STATISTICS_NOTIFICATION___2 = 157, + EOSP_NOTIFICATION = 158, + REDUCE_TX_POWER_CMD = 159, + MISSED_BEACONS_NOTIFICATION___2 = 162, + MAC_PM_POWER_TABLE = 169, + MFUART_LOAD_NOTIFICATION = 177, + RSS_CONFIG_CMD = 179, + REPLY_RX_PHY_CMD___2 = 192, + REPLY_RX_MPDU_CMD___2 = 193, + BAR_FRAME_RELEASE = 194, + FRAME_RELEASE = 195, + BA_NOTIF = 197, + MCC_UPDATE_CMD = 200, + MCC_CHUB_UPDATE_CMD = 201, + MARKER_CMD = 203, + BT_PROFILE_NOTIFICATION = 206, + BT_CONFIG = 155, + BT_COEX_UPDATE_REDUCED_TXP = 92, + BT_COEX_CI = 93, + REPLY_SF_CFG_CMD = 209, + REPLY_BEACON_FILTERING_CMD = 210, + DTS_MEASUREMENT_NOTIFICATION = 221, + LDBG_CONFIG_CMD = 246, + DEBUG_LOG_MSG = 247, + MCAST_FILTER_CMD = 208, + D3_CONFIG_CMD = 211, + PROT_OFFLOAD_CONFIG_CMD = 212, + D0I3_END_CMD = 237, + WOWLAN_PATTERNS = 224, + WOWLAN_CONFIGURATION = 225, + WOWLAN_TSC_RSC_PARAM = 226, + WOWLAN_TKIP_PARAM = 227, + WOWLAN_KEK_KCK_MATERIAL = 228, + WOWLAN_GET_STATUSES = 229, + SCAN_OFFLOAD_PROFILES_QUERY_CMD = 86, +}; + +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +}; + +enum iwl_mcc_source { + MCC_SOURCE_OLD_FW = 0, + MCC_SOURCE_ME = 1, + MCC_SOURCE_BIOS = 2, + MCC_SOURCE_3G_LTE_HOST = 3, + MCC_SOURCE_3G_LTE_DEVICE = 4, + MCC_SOURCE_WIFI = 5, + MCC_SOURCE_RESERVED = 6, + MCC_SOURCE_DEFAULT = 7, + MCC_SOURCE_UNINITIALIZED = 8, + MCC_SOURCE_MCC_API = 9, + MCC_SOURCE_GET_CURRENT = 16, + MCC_SOURCE_GETTING_MCC_TEST_MODE = 17, +}; + +struct iwl_mcc_allowed_ap_type_cmd { + u8 offset_map[338]; + __le16 reserved; +}; + +struct iwl_sar_offset_mapping_cmd { + u8 offset_map[338]; + __le16 reserved; +}; + +struct iwl_lq_cmd { + u8 sta_id; + u8 reduced_tpc; + __le16 control; + u8 flags; + u8 mimo_delim; + u8 single_stream_ant_msk; + u8 dual_stream_ant_msk; + u8 initial_rate_index[4]; + __le16 agg_time_limit; + u8 agg_disable_start_th; + u8 agg_frame_cnt_limit; + __le32 reserved2; + __le32 rs_table[16]; + __le32 ss_params; +}; + +struct iwl_rx_phy_info { + u8 non_cfg_phy_cnt; + u8 cfg_phy_cnt; + u8 stat_id; + u8 reserved1; + __le32 system_timestamp; + __le64 timestamp; + __le32 beacon_time_stamp; + __le16 phy_flags; + __le16 channel; + __le32 non_cfg_phy[8]; + __le32 rate_n_flags; + __le32 byte_count; + u8 mac_active_msk; + u8 mac_context_info; + __le16 frame_time; +}; + +enum iwl_sf_state { + SF_LONG_DELAY_ON = 0, + SF_FULL_ON = 1, + SF_UNINIT = 2, + SF_INIT_OFF = 3, + SF_HW_NUM_STATES = 4, +}; + +struct mvm_statistics_rx_non_phy { + __le32 bogus_cts; + __le32 bogus_ack; + __le32 non_channel_beacons; + __le32 channel_beacons; + __le32 num_missed_bcon; + __le32 adc_rx_saturation_time; + __le32 ina_detection_search_time; + __le32 beacon_silence_rssi_a; + __le32 beacon_silence_rssi_b; + __le32 beacon_silence_rssi_c; + __le32 interference_data_flag; + __le32 channel_load; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; + __le32 num_bt_kills; + __le32 mac_id; +}; + +struct mvm_statistics_rx_non_phy_v3 { + __le32 bogus_cts; + __le32 bogus_ack; + __le32 non_bssid_frames; + __le32 filtered_frames; + __le32 non_channel_beacons; + __le32 channel_beacons; + __le32 num_missed_bcon; + __le32 adc_rx_saturation_time; + __le32 ina_detection_search_time; + __le32 beacon_silence_rssi_a; + __le32 beacon_silence_rssi_b; + __le32 beacon_silence_rssi_c; + __le32 interference_data_flag; + __le32 channel_load; + __le32 dsp_false_alarms; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; + __le32 num_bt_kills; + __le32 mac_id; + __le32 directed_data_mpdu; +}; + +struct mvm_statistics_rx_phy { + __le32 unresponded_rts; + __le32 rxe_frame_lmt_overrun; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 reserved; +}; + +struct mvm_statistics_rx_phy_v2 { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_lmt_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved; +}; + +struct mvm_statistics_rx_ht_phy_v1 { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +}; + +struct mvm_statistics_rx_ht_phy { + __le32 mh_format_err; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +}; + +struct mvm_statistics_rx { + struct mvm_statistics_rx_phy ofdm; + struct mvm_statistics_rx_phy cck; + struct mvm_statistics_rx_non_phy general; + struct mvm_statistics_rx_ht_phy ofdm_ht; +}; + +struct mvm_statistics_rx_v3 { + struct mvm_statistics_rx_phy_v2 ofdm; + struct mvm_statistics_rx_phy_v2 cck; + struct mvm_statistics_rx_non_phy_v3 general; + struct mvm_statistics_rx_ht_phy_v1 ofdm_ht; +}; + +enum iwl_table_type { + LQ_NONE = 0, + LQ_LEGACY_G = 1, + LQ_LEGACY_A = 2, + LQ_HT_SISO = 3, + LQ_HT_MIMO2 = 4, + LQ_VHT_SISO = 5, + LQ_VHT_MIMO2 = 6, + LQ_HE_SISO = 7, + LQ_HE_MIMO2 = 8, + LQ_MAX = 9, +}; + +struct rs_rate { + int index; + enum iwl_table_type type; + u8 ant; + u32 bw; + bool sgi; + bool ldpc; + bool stbc; + bool bfer; +}; + +struct iwl_mvm; + +struct lq_sta_pers_rs_fw { + u32 sta_id; + u8 chains; + s8 chain_signal[4]; + s8 last_rssi; + struct iwl_mvm *drv; +}; + +enum iwl_mvm_queue_status { + IWL_MVM_QUEUE_FREE = 0, + IWL_MVM_QUEUE_RESERVED = 1, + IWL_MVM_QUEUE_READY = 2, + IWL_MVM_QUEUE_SHARED = 3, +}; + +struct iwl_mvm_dqa_txq_info { + u8 ra_sta_id; + bool reserved; + u8 mac80211_ac; + u8 txq_tid; + u16 tid_bitmap; + long unsigned int last_frame_time[9]; + enum iwl_mvm_queue_status status; +}; + +struct iwl_mvm_tvqm_txq_info { + u8 sta_id; + u8 txq_tid; +}; + +struct iwl_nvm_section { + u16 length; + const u8 *data; +}; + +struct iwl_fwrt_shared_mem_cfg { + int num_lmacs; + int num_txfifo_entries; + struct { + u32 txfifo_size[15]; + u32 rxfifo1_size; + } lmac[2]; + u32 rxfifo2_size; + u32 rxfifo2_control_size; + u32 internal_txfifo_addr; + u32 internal_txfifo_size[6]; +}; + +struct iwl_fw_dump_desc; + +struct iwl_fwrt_dump_data { + union { + struct { + struct iwl_fw_ini_trigger_tlv *trig; + struct iwl_rx_packet *fw_pkt; + }; + struct { + const struct iwl_fw_dump_desc *desc; + bool monitor_only; + }; + }; +}; + +struct iwl_fwrt_wk_data { + u8 idx; + struct delayed_work wk; + struct iwl_fwrt_dump_data dump_data; +}; + +struct iwl_txf_iter_data { + int fifo; + int lmac; + u32 fifo_size; + u8 internal_txf; +}; + +struct iwl_sar_profile_chain { + u8 subbands[11]; +}; + +struct iwl_sar_profile { + bool enabled; + struct iwl_sar_profile_chain chains[4]; +}; + +struct iwl_geo_profile_band { + u8 max; + u8 chains[2]; +}; + +struct iwl_geo_profile { + struct iwl_geo_profile_band bands[3]; +}; + +struct iwl_ppag_chain { + s8 subbands[11]; +}; + +struct iwl_fw_runtime_ops; + +struct iwl_fw_runtime { + struct iwl_trans *trans; + const struct iwl_fw *fw; + struct device *dev; + const struct iwl_fw_runtime_ops *ops; + void *ops_ctx; + const struct iwl_dump_sanitize_ops *sanitize_ops; + void *sanitize_ctx; + struct iwl_fw_paging fw_paging_db[33]; + u16 num_of_paging_blk; + u16 num_of_pages_in_last_blk; + enum iwl_ucode_type cur_fw_img; + struct iwl_fwrt_shared_mem_cfg smem_cfg; + struct { + struct iwl_fwrt_wk_data wks[5]; + long unsigned int active_wks; + u8 conf; + long unsigned int non_collect_ts_start[32]; + u32 *d3_debug_data; + u32 lmac_err_id[2]; + u32 tcm_err_id[2]; + u32 rcm_err_id[2]; + u32 umac_err_id; + struct iwl_txf_iter_data txf_iter_data; + struct { + u8 type; + u8 subtype; + u32 lmac_major; + u32 lmac_minor; + u32 umac_major; + u32 umac_minor; + } fw_ver; + } dump; + long: 32; + struct { + u64 seq; + } timestamp; + struct iwl_sar_profile sar_profiles[4]; + u8 sar_chain_a_profile; + u8 sar_chain_b_profile; + u8 reduced_power_flags; + struct iwl_geo_profile geo_profiles[8]; + long: 0; + u32 geo_rev; + u32 geo_num_profiles; + bool geo_enabled; + struct iwl_ppag_chain ppag_chains[2]; + long: 0; + u32 ppag_flags; + u8 ppag_ver; + struct iwl_sar_offset_mapping_cmd sgom_table; + bool sgom_enabled; + struct iwl_mcc_allowed_ap_type_cmd uats_table; + u8 uefi_tables_lock_status; + long: 0; +} __attribute__((packed)); + +enum iwl_mvm_scan_type { + IWL_SCAN_TYPE_NOT_SET = 0, + IWL_SCAN_TYPE_UNASSOC = 1, + IWL_SCAN_TYPE_WILD = 2, + IWL_SCAN_TYPE_MILD = 3, + IWL_SCAN_TYPE_FRAGMENTED = 4, + IWL_SCAN_TYPE_FAST_BALANCE = 5, +}; + +enum iwl_mvm_sched_scan_pass_all_states { + SCHED_SCAN_PASS_ALL_DISABLED = 0, + SCHED_SCAN_PASS_ALL_ENABLED = 1, + SCHED_SCAN_PASS_ALL_FOUND = 2, +}; + +struct iwl_mvm_int_sta { + u32 sta_id; + u8 type; + u32 tfd_queue_msk; +}; + +struct iwl_mvm_phy_ctxt { + u16 id; + u16 color; + u32 ref; + enum nl80211_chan_width width; + struct ieee80211_channel *channel; + u32 center_freq1; + bool rlc_disabled; + u32 channel_load_by_us; +}; + +enum iwl_bt_force_ant_mode { + BT_FORCE_ANT_DIS = 0, + BT_FORCE_ANT_AUTO = 1, + BT_FORCE_ANT_BT = 2, + BT_FORCE_ANT_WIFI = 3, + BT_FORCE_ANT_MAX = 4, +}; + +struct iwl_mvm_tt_mgmt { + struct delayed_work ct_kill_exit; + bool dynamic_smps; + u32 tx_backoff; + u32 min_backoff; + struct iwl_tt_params params; + bool throttle; +}; + +struct thermal_zone_device; + +struct iwl_mvm_thermal_device { + struct thermal_trip trips[8]; + struct thermal_zone_device *tzone; +}; + +struct iwl_mvm_cooling_device { + u32 cur_state; + struct thermal_cooling_device *cdev; +}; + +struct ewma_rate { + long unsigned int internal; +}; + +struct iwl_mvm_tcm_mac { + struct { + u32 pkts[4]; + u32 airtime; + } tx; + struct { + u32 pkts[4]; + u32 airtime; + u32 last_ampdu_ref; + } rx; + long: 32; + struct { + u64 rx_bytes; + struct ewma_rate rate; + bool detected; + } uapsd_nonagg_detect; + bool opened_rx_ba_sessions; + long: 32; +}; + +enum iwl_mvm_traffic_load { + IWL_MVM_TRAFFIC_LOW = 0, + IWL_MVM_TRAFFIC_MEDIUM = 1, + IWL_MVM_TRAFFIC_HIGH = 2, +}; + +struct iwl_mvm_tcm { + struct delayed_work work; + spinlock_t lock; + long unsigned int ts; + long unsigned int ll_ts; + long unsigned int uapsd_nonagg_ts; + bool paused; + struct iwl_mvm_tcm_mac data[4]; + struct { + u32 elapsed; + u32 airtime[4]; + enum iwl_mvm_traffic_load load[4]; + enum iwl_mvm_traffic_load band_load[6]; + enum iwl_mvm_traffic_load global_load; + bool low_latency[4]; + bool change[4]; + } result; +}; + +enum iwl_mvm_tdls_cs_state { + IWL_MVM_TDLS_SW_IDLE = 0, + IWL_MVM_TDLS_SW_REQ_SENT = 1, + IWL_MVM_TDLS_SW_RESP_RCVD = 2, + IWL_MVM_TDLS_SW_REQ_RCVD = 3, + IWL_MVM_TDLS_SW_ACTIVE = 4, +}; + +struct ptp_data { + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct delayed_work dwork; + u32 last_gp2; + u32 wrap_counter; + u32 scale_update_gp2; + long: 32; + u64 scale_update_adj_time_ns; + u64 scaled_freq; + s64 delta; +}; + +struct iwl_time_sync_data { + struct sk_buff_head frame_list; + u8 peer_addr[6]; + bool active; +}; + +struct iwl_mei_scan_filter { + bool is_mei_limited_scan; + struct sk_buff_head scan_res; + struct work_struct scan_work; +}; + +struct iwl_phy_db; + +struct iwl_mvm_vif; + +struct iwl_mei_nvm; + +struct iwl_mvm_csme_conn_info; + +struct iwl_mvm_baid_data; + +struct iwl_mvm_acs_survey; + +struct iwl_mvm { + struct device *dev; + struct iwl_trans *trans; + const struct iwl_fw *fw; + const struct iwl_cfg *cfg; + struct iwl_phy_db *phy_db; + struct ieee80211_hw *hw; + struct mutex mutex; + struct list_head async_handlers_list; + spinlock_t async_handlers_lock; + struct work_struct async_handlers_wk; + struct wiphy_work async_handlers_wiphy_wk; + struct wiphy_work trig_link_selection_wk; + struct work_struct roc_done_wk; + long unsigned int init_status; + long unsigned int status; + u32 queue_sync_cookie; + long unsigned int queue_sync_state; + struct iwl_mvm_vif *bf_allowed_vif; + bool hw_registered; + bool rfkill_safe_init_done; + u8 cca_40mhz_workaround; + long: 0; + u32 ampdu_ref; + bool ampdu_toggle; + long: 0; + struct iwl_notif_wait_data notif_wait; + union { + struct mvm_statistics_rx_v3 rx_stats_v3; + struct mvm_statistics_rx rx_stats; + }; + struct { + u64 rx_time; + u64 tx_time; + u64 on_time_rf; + u64 on_time_scan; + } radio_stats; + struct { + u64 rx_time; + u64 tx_time; + u64 on_time_rf; + u64 on_time_scan; + } accu_radio_stats; + struct list_head add_stream_txqs; + union { + struct iwl_mvm_dqa_txq_info queue_info[32]; + struct iwl_mvm_tvqm_txq_info tvqm_info[512]; + }; + struct work_struct add_stream_wk; + spinlock_t add_stream_lock; + const char *nvm_file_name; + struct iwl_nvm_data *nvm_data; + struct iwl_mei_nvm *mei_nvm_data; + struct iwl_mvm_csme_conn_info *csme_conn_info; + bool mei_rfkill_blocked; + bool mei_registered; + long: 0; + struct work_struct sap_connected_wk; + struct iwl_nvm_data *temp_nvm_data; + struct iwl_nvm_section nvm_sections[13]; + long: 32; + struct iwl_fw_runtime fwrt; + struct mac_address addresses[5]; + struct iwl_rx_phy_info last_phy_info; + long: 0; + struct ieee80211_sta *fw_id_to_mac_id[16]; + struct ieee80211_link_sta *fw_id_to_link_sta[16]; + u8 rx_ba_sessions; + long: 0; + u32 rts_threshold; + unsigned int scan_status; + size_t scan_cmd_size; + void *scan_cmd; + struct iwl_mcast_filter_cmd *mcast_filter_cmd; + enum iwl_mvm_scan_type scan_type; + enum iwl_mvm_scan_type hb_scan_type; + enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; + struct delayed_work scan_timeout_dwork; + unsigned int max_scans; + u32 scan_uid_status[4]; + u64 scan_start; + struct iwl_mvm_vif *scan_vif; + u8 scan_link_id; + u8 scan_rx_ant; + long: 0; + struct iwl_mvm_int_sta aux_sta; + struct iwl_mvm_int_sta snif_sta; + bool last_ebs_successful; + u8 scan_last_antenna_idx; + u8 mgmt_last_antenna_idx; + u8 set_tx_ant; + u8 set_rx_ant; + long: 0; + enum iwl_sf_state sf_state; + struct dentry *debugfs_dir; + struct iwl_mvm_phy_ctxt phy_ctxts[3]; + struct list_head time_event_list; + spinlock_t time_event_lock; + long unsigned int fw_key_table[1]; + u8 fw_key_deleted[16]; + struct ieee80211_vif *vif_id_to_mac[4]; + struct ieee80211_bss_conf *link_id_to_link_conf[4]; + s8 fw_restart; + long: 0; + u8 *error_recovery_buf; + struct ieee80211_vif *p2p_device_vif; + struct wiphy_wowlan_support wowlan; + int gtk_ivlen; + int gtk_icvlen; + int ptk_ivlen; + int ptk_icvlen; + struct ieee80211_scan_ies nd_ies; + struct cfg80211_match_set *nd_match_sets; + int n_nd_match_sets; + struct ieee80211_channel **nd_channels; + int n_nd_channels; + bool net_detect; + bool fast_resume; + u8 offload_tid; + long: 0; + wait_queue_head_t rx_sync_waitq; + union { + struct iwl_bt_coex_prof_old_notif last_bt_notif; + struct iwl_bt_coex_profile_notif last_bt_wifi_loss; + }; + struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; + u8 bt_tx_prio; + long: 0; + enum iwl_bt_force_ant_mode bt_force_ant_mode; + struct list_head aux_roc_te_list; + struct iwl_mvm_tt_mgmt thermal_throttle; + struct iwl_mvm_thermal_device tz_device; + struct iwl_mvm_cooling_device cooling_dev; + s32 temperature; + bool temperature_test; + bool fw_static_smps_request; + long: 0; + long unsigned int bt_coex_last_tcm_ts; + struct iwl_mvm_tcm tcm; + u8 uapsd_noagg_bssid_write_idx; + short: 0; + struct mac_address uapsd_noagg_bssids[20]; + struct iwl_time_quota_cmd last_quota_cmd; + u16 aux_queue; + u16 snif_queue; + u16 probe_queue; + u16 p2p_dev_queue; + u8 ps_disabled; + long: 0; + u32 ext_clock_valid; + struct ieee80211_vif *csme_vif; + struct ieee80211_vif *csa_vif; + struct ieee80211_vif *csa_tx_blocked_vif; + u8 csa_tx_block_bcn_timeout; + long: 0; + u32 ap_last_beacon_gp2; + bool ibss_manager; + bool lar_regdom_set; + long: 0; + enum iwl_mcc_source mcc_src; + struct { + struct delayed_work dwork; + enum iwl_mvm_tdls_cs_state state; + u8 cur_sta_id; + struct { + u8 sta_id; + u8 op_class; + bool initiator; + struct cfg80211_chan_def chandef; + struct sk_buff *skb; + u32 ch_sw_tm_ie; + u32 sent_timestamp; + } peer; + } tdls_cs; + u32 ciphers[10]; + long: 32; + struct cfg80211_ftm_responder_stats ftm_resp_stats; + struct { + struct cfg80211_pmsr_request *req; + struct wireless_dev *req_wdev; + struct list_head loc_list; + int responses[5]; + struct { + struct list_head resp; + } smooth; + struct list_head pasn_list; + } ftm_initiator; + struct list_head resp_pasn_list; + long: 32; + struct ptp_data ptp_data; + struct { + u8 range_resp; + } cmd_ver; + long: 0; + struct ieee80211_vif *nan_vif; + struct iwl_mvm_baid_data *baid_map[32]; + bool drop_bcn_ap_mode; + long: 0; + struct delayed_work cs_tx_unblock_dwork; + bool monitor_on; + u8 monitor_p80; + __le16 cur_aid; + u8 cur_bssid[6]; + bool rx_ts_ptp; + long: 0; + long unsigned int last_6ghz_passive_scan_jiffies; + long unsigned int last_reset_or_resume_time_jiffies; + bool sta_remove_requires_queue_remove; + bool mld_api_is_used; + bool fw_product_reset; + long: 0; + struct iwl_time_sync_data time_sync; + struct iwl_mei_scan_filter mei_scan_filter; + struct iwl_mvm_acs_survey *acs_survey; + bool statistics_clear; + long: 0; + u32 bios_enable_puncturing; +} __attribute__((packed)); + +struct iwl_lq_sta_rs_fw { + u32 last_rate_n_flags; + struct lq_sta_pers_rs_fw pers; +}; + +struct iwl_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; +}; + +enum rs_column { + RS_COLUMN_LEGACY_ANT_A = 0, + RS_COLUMN_LEGACY_ANT_B = 1, + RS_COLUMN_SISO_ANT_A = 2, + RS_COLUMN_SISO_ANT_B = 3, + RS_COLUMN_SISO_ANT_A_SGI = 4, + RS_COLUMN_SISO_ANT_B_SGI = 5, + RS_COLUMN_MIMO2 = 6, + RS_COLUMN_MIMO2_SGI = 7, + RS_COLUMN_LAST = 7, + RS_COLUMN_COUNT = 8, + RS_COLUMN_INVALID = 9, +}; + +struct rs_rate_stats { + u64 success; + u64 total; +}; + +struct iwl_scale_tbl_info { + struct rs_rate rate; + enum rs_column column; + const u16 *expected_tpt; + long: 32; + struct iwl_rate_scale_data win[17]; + struct iwl_rate_scale_data tpc_win[16]; +}; + +struct lq_sta_pers { + u8 chains; + s8 chain_signal[4]; + s8 last_rssi; + u16 max_agg_bufsize; + struct rs_rate_stats tx_stats[136]; + struct iwl_mvm *drv; + spinlock_t lock; +}; + +struct rs_init_rate_info; + +struct iwl_lq_sta { + u8 active_tbl; + u8 rs_state; + u8 search_better_tbl; + s32 last_tpt; + u32 table_count_limit; + u32 max_failure_limit; + u32 max_success_limit; + u32 table_count; + u32 total_failed; + u32 total_success; + u64 flush_timer; + u32 visited_columns; + long: 32; + u64 last_tx; + bool is_vht; + bool ldpc; + bool stbc_capable; + bool bfer_capable; + enum nl80211_band band; + long unsigned int active_legacy_rate; + long unsigned int active_siso_rate; + long unsigned int active_mimo2_rate; + u8 max_legacy_rate_idx; + u8 max_siso_rate_idx; + u8 max_mimo2_rate_idx; + struct rs_rate optimal_rate; + long unsigned int optimal_rate_mask; + const struct rs_init_rate_info *optimal_rates; + int optimal_nentries; + u8 missed_rate_counter; + struct iwl_lq_cmd lq; + long: 32; + struct iwl_scale_tbl_info lq_info[2]; + u8 tx_agg_tid_en; + u32 last_rate_n_flags; + u8 is_agg; + int tpc_reduce; + struct lq_sta_pers pers; +}; + +struct rs_init_rate_info { + s8 rssi; + u8 rate_idx; +}; + +enum iwl_mvm_agg_state { + IWL_AGG_OFF___2 = 0, + IWL_AGG_QUEUED = 1, + IWL_AGG_STARTING___2 = 2, + IWL_AGG_ON___2 = 3, + IWL_EMPTYING_HW_QUEUE_ADDBA___2 = 4, + IWL_EMPTYING_HW_QUEUE_DELBA___2 = 5, +}; + +struct iwl_mvm_tid_data { + u16 seq_number; + u16 next_reclaimed; + u32 rate_n_flags; + u8 lq_color; + bool amsdu_in_ampdu_allowed; + enum iwl_mvm_agg_state state; + u16 txq_id; + u16 ssn; + u16 tx_time; + long unsigned int tpt_meas_start; + u32 tx_count_last; + u32 tx_count; +}; + +struct iwl_mvm_key_pn { + struct callback_head callback_head; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + u8 pn[48]; + long: 32; + long: 32; + long: 32; + long: 32; + } q[0]; +}; + +struct iwl_mvm_rxq_dup_data { + __le16 last_seq[9]; + u8 last_sub_frame[9]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct iwl_mvm_link_sta { + struct callback_head callback_head; + u32 sta_id; + long: 32; + union { + struct iwl_lq_sta_rs_fw rs_fw; + struct iwl_lq_sta rs_drv; + } lq_sta; + u16 orig_amsdu_len; + u8 avg_energy; + long: 32; +}; + +struct iwl_mvm_mpdu_counter { + u32 tx; + u32 rx; +}; + +struct iwl_mvm_tpt_counter { + spinlock_t lock; + struct iwl_mvm_mpdu_counter per_link[3]; + long unsigned int window_start; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct iwl_mvm_sta { + u32 tfd_queue_msk; + u32 mac_id_n_color; + u16 tid_disable_agg; + u8 sta_type; + enum ieee80211_sta_state sta_state; + bool bt_reduced_txpower; + bool next_status_eosp; + bool authorized; + spinlock_t lock; + struct iwl_mvm_tid_data tid_data[9]; + u8 tid_to_baid[8]; + struct ieee80211_vif *vif; + struct iwl_mvm_key_pn *ptk_pn[4]; + struct iwl_mvm_rxq_dup_data *dup_data; + u8 reserved_queue; + s8 tx_protection; + bool tt_tx_protection; + bool disable_tx; + u16 amsdu_enabled; + u16 max_amsdu_len; + bool sleeping; + u8 agg_tids; + u8 sleep_tx_count; + u8 tx_ant; + u32 pairwise_cipher; + long: 32; + struct iwl_mvm_link_sta deflink; + struct iwl_mvm_link_sta *link[15]; + struct iwl_mvm_tpt_counter *mpdu_counters; +}; + +struct iwl_fw_runtime_ops { + void (*dump_start)(void *); + void (*dump_end)(void *); + int (*send_hcmd)(void *, struct iwl_host_cmd *); + bool (*d3_debug_enable)(void *); +}; + +struct iwl_fw_dump_desc { + size_t len; + struct iwl_fw_error_dump_trigger_desc trig_desc; +}; + +struct iwl_mei_nvm { + u8 hw_addr[6]; + u8 n_hw_addrs; + u8 reserved; + u32 radio_cfg; + u32 caps; + u32 nvm_version; + u32 channels[110]; +}; + +struct iwl_mei_conn_info { + u8 lp_state; + u8 auth_mode; + u8 ssid_len; + u8 channel; + u8 band; + u8 pairwise_cipher; + u8 bssid[6]; + u8 ssid[32]; +}; + +struct iwl_mvm_time_event_data { + struct ieee80211_vif *vif; + struct list_head list; + long unsigned int end_jiffies; + u32 duration; + bool running; + u32 uid; + u32 id; + s8 link_id; +}; + +enum iwl_mvm_smps_type_request { + IWL_MVM_SMPS_REQ_BT_COEX = 0, + IWL_MVM_SMPS_REQ_TT = 1, + IWL_MVM_SMPS_REQ_PROT = 2, + IWL_MVM_SMPS_REQ_FW = 3, + NUM_IWL_MVM_SMPS_REQ = 4, +}; + +struct iwl_mvm_link_bf_data { + int ave_beacon_signal; + int last_cqm_event; + int bt_coex_min_thold; + int bt_coex_max_thold; + int last_bt_coex_event; +}; + +struct iwl_probe_resp_data { + struct callback_head callback_head; + struct iwl_probe_resp_data_notif notif; + int noa_len; +}; + +struct iwl_mvm_vif_link_info { + u8 bssid[6]; + u8 ap_sta_id; + u8 fw_link_id; + struct iwl_mvm_int_sta bcast_sta; + struct iwl_mvm_int_sta mcast_sta; + struct { + u32 num_beacons; + u32 accu_num_beacons; + u8 avg_signal; + } beacon_stats; + enum ieee80211_smps_mode smps_requests[4]; + struct iwl_probe_resp_data *probe_resp_data; + struct ieee80211_key_conf *igtk; + bool he_ru_2mhz_block; + bool active; + bool listen_lmac; + bool csa_block_tx; + u16 cab_queue; + struct iwl_mvm_phy_ctxt *phy_ctxt; + struct ieee80211_tx_queue_params queue_params[4]; + u16 mgmt_queue; + struct iwl_mvm_link_bf_data bf_data; +}; + +enum iwl_mvm_esr_state { + IWL_MVM_ESR_BLOCKED_PREVENTION = 1, + IWL_MVM_ESR_BLOCKED_WOWLAN = 2, + IWL_MVM_ESR_BLOCKED_TPT = 4, + IWL_MVM_ESR_BLOCKED_FW = 8, + IWL_MVM_ESR_BLOCKED_NON_BSS = 16, + IWL_MVM_ESR_BLOCKED_ROC = 32, + IWL_MVM_ESR_BLOCKED_TMP_NON_BSS = 64, + IWL_MVM_ESR_EXIT_MISSED_BEACON = 65536, + IWL_MVM_ESR_EXIT_LOW_RSSI = 131072, + IWL_MVM_ESR_EXIT_COEX = 262144, + IWL_MVM_ESR_EXIT_BANDWIDTH = 524288, + IWL_MVM_ESR_EXIT_CSA = 1048576, + IWL_MVM_ESR_EXIT_LINK_USAGE = 2097152, + IWL_MVM_ESR_EXIT_FAIL_ENTRY = 4194304, +}; + +struct iwl_mvm_esr_exit { + long unsigned int ts; + enum iwl_mvm_esr_state reason; +}; + +struct iwl_mvm_vif { + struct iwl_mvm *mvm; + u16 id; + u16 color; + bool associated; + u8 ap_assoc_sta_count; + bool uploaded; + bool ap_ibss_active; + bool pm_enabled; + bool monitor_active; + bool esr_active; + bool session_prot_connection_loss; + u8 low_latency: 6; + u8 low_latency_actual: 1; + u8 authorized: 1; + bool ps_disabled; + u32 esr_disable_reason; + u32 ap_beacon_time; + bool bf_enabled; + bool ba_enabled; + struct { + u8 kck[24]; + u8 kek[32]; + size_t kek_len; + size_t kck_len; + u32 akm; + long: 32; + __le64 replay_ctr; + bool valid; + long: 32; + } rekey_data; + int tx_key_idx; + bool seqno_valid; + u16 seqno; + struct in6_addr target_ipv6_addrs[12]; + long unsigned int tentative_addrs[1]; + int num_target_ipv6_addrs; + u8 uapsd_misbehaving_ap_addr[6]; + struct delayed_work uapsd_nonagg_detected_wk; + bool csa_countdown; + bool csa_failed; + bool csa_bcn_pending; + bool csa_blocks_tx; + u16 csa_target_freq; + u16 csa_count; + u16 csa_misbehave; + struct delayed_work csa_work; + enum iwl_tsf_id tsf_id; + struct iwl_mvm_time_event_data time_event_data; + struct iwl_mvm_time_event_data hs_time_event_data; + enum iwl_roc_activity roc_activity; + long: 32; + netdev_features_t features; + struct ieee80211_sta *ap_sta; + struct ieee80211_key_conf *ap_early_keys[4]; + struct { + struct ieee80211_key_conf *keys[2]; + } bcn_prot; + u16 max_tx_op; + u16 link_selection_res; + u8 link_selection_primary; + u8 primary_link; + struct iwl_mvm_esr_exit last_esr_exit; + u8 exit_same_reason_count; + struct wiphy_delayed_work prevent_esr_done_wk; + struct wiphy_delayed_work mlo_int_scan_wk; + struct wiphy_work unblock_esr_tpt_wk; + struct wiphy_delayed_work unblock_esr_tmp_non_bss_wk; + struct iwl_mvm_vif_link_info deflink; + struct iwl_mvm_vif_link_info *link[15]; + long: 32; +}; + +struct iwl_mvm_reorder_buffer { + u16 head_sn; + u16 num_stored; + int queue; + bool valid; + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct iwl_mvm_reorder_buf_entry { + struct sk_buff_head frames; +}; + +struct iwl_mvm_baid_data { + struct callback_head callback_head; + u32 sta_mask; + u8 tid; + u8 baid; + u16 timeout; + u16 buf_size; + u16 entries_per_queue; + long unsigned int last_rx; + struct timer_list session_timer; + struct iwl_mvm_baid_data **rcu_ptr; + struct iwl_mvm *mvm; + long: 32; + long: 32; + long: 32; + struct iwl_mvm_reorder_buffer reorder_buf[16]; + struct iwl_mvm_reorder_buf_entry entries[0]; +}; + +struct iwl_mvm_acs_survey_channel { + u32 time; + u32 time_busy; + u32 time_tx; + u32 time_rx; + s8 noise; +}; + +struct iwl_mvm_acs_survey { + struct iwl_mvm_acs_survey_channel *bands[6]; + int n_channels; + struct iwl_mvm_acs_survey_channel channels[0]; +}; + +struct iwl_mvm_csme_conn_info { + struct callback_head callback_head; + struct iwl_mei_conn_info conn_info; +}; + +struct iwl_bt_iterator_data { + struct iwl_bt_coex_prof_old_notif *notif; + struct iwl_mvm *mvm; + struct ieee80211_chanctx_conf *primary; + struct ieee80211_chanctx_conf *secondary; + bool primary_ll; + u8 primary_load; + u8 secondary_load; +}; + +struct arm_delay_ops { + void (*delay)(long unsigned int); + void (*const_udelay)(long unsigned int); + void (*udelay)(long unsigned int); + long unsigned int ticks_per_jiffy; +}; + +enum nl80211_mntr_flags { + __NL80211_MNTR_FLAG_INVALID = 0, + NL80211_MNTR_FLAG_FCSFAIL = 1, + NL80211_MNTR_FLAG_PLCPFAIL = 2, + NL80211_MNTR_FLAG_CONTROL = 3, + NL80211_MNTR_FLAG_OTHER_BSS = 4, + NL80211_MNTR_FLAG_COOK_FRAMES = 5, + NL80211_MNTR_FLAG_ACTIVE = 6, + NL80211_MNTR_FLAG_SKIP_TX = 7, + __NL80211_MNTR_FLAG_AFTER_LAST = 8, + NL80211_MNTR_FLAG_MAX = 7, +}; + +enum nl80211_ext_feature_index { + NL80211_EXT_FEATURE_VHT_IBSS = 0, + NL80211_EXT_FEATURE_RRM = 1, + NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER = 2, + NL80211_EXT_FEATURE_SCAN_START_TIME = 3, + NL80211_EXT_FEATURE_BSS_PARENT_TSF = 4, + NL80211_EXT_FEATURE_SET_SCAN_DWELL = 5, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 6, + NL80211_EXT_FEATURE_BEACON_RATE_HT = 7, + NL80211_EXT_FEATURE_BEACON_RATE_VHT = 8, + NL80211_EXT_FEATURE_FILS_STA = 9, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA = 10, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED = 11, + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 12, + NL80211_EXT_FEATURE_CQM_RSSI_LIST = 13, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 14, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK = 15, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X = 16, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 17, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP = 18, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 19, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 20, + NL80211_EXT_FEATURE_MFP_OPTIONAL = 21, + NL80211_EXT_FEATURE_LOW_SPAN_SCAN = 22, + NL80211_EXT_FEATURE_LOW_POWER_SCAN = 23, + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN = 24, + NL80211_EXT_FEATURE_DFS_OFFLOAD = 25, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211 = 26, + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT = 27, + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 27, + NL80211_EXT_FEATURE_TXQS = 28, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN = 29, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT = 30, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 31, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 32, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 33, + NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 34, + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 35, + NL80211_EXT_FEATURE_EXT_KEY_ID = 36, + NL80211_EXT_FEATURE_STA_TX_PWR = 37, + NL80211_EXT_FEATURE_SAE_OFFLOAD = 38, + NL80211_EXT_FEATURE_VLAN_OFFLOAD = 39, + NL80211_EXT_FEATURE_AQL = 40, + NL80211_EXT_FEATURE_BEACON_PROTECTION = 41, + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 42, + NL80211_EXT_FEATURE_PROTECTED_TWT = 43, + NL80211_EXT_FEATURE_DEL_IBSS_STA = 44, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS = 45, + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 46, + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ = 47, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS = 48, + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 49, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 50, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 51, + NL80211_EXT_FEATURE_FILS_DISCOVERY = 52, + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 53, + NL80211_EXT_FEATURE_BEACON_RATE_HE = 54, + NL80211_EXT_FEATURE_SECURE_LTF = 55, + NL80211_EXT_FEATURE_SECURE_RTT = 56, + NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 57, + NL80211_EXT_FEATURE_BSS_COLOR = 58, + NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD = 59, + NL80211_EXT_FEATURE_RADAR_BACKGROUND = 60, + NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 61, + NL80211_EXT_FEATURE_PUNCT = 62, + NL80211_EXT_FEATURE_SECURE_NAN = 63, + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 64, + NL80211_EXT_FEATURE_OWE_OFFLOAD = 65, + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 66, + NL80211_EXT_FEATURE_DFS_CONCURRENT = 67, + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 68, + NUM_NL80211_EXT_FEATURES = 69, + MAX_NL80211_EXT_FEATURES = 68, +}; + +struct rate_info { + u16 flags; + u16 legacy; + u8 mcs; + u8 nss; + u8 bw; + u8 he_gi; + u8 he_dcm; + u8 he_ru_alloc; + u8 n_bonded_ch; + u8 eht_gi; + u8 eht_ru_alloc; +}; + +enum ieee80211_hw_flags { + IEEE80211_HW_HAS_RATE_CONTROL = 0, + IEEE80211_HW_RX_INCLUDES_FCS = 1, + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 2, + IEEE80211_HW_SIGNAL_UNSPEC = 3, + IEEE80211_HW_SIGNAL_DBM = 4, + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 5, + IEEE80211_HW_SPECTRUM_MGMT = 6, + IEEE80211_HW_AMPDU_AGGREGATION = 7, + IEEE80211_HW_SUPPORTS_PS = 8, + IEEE80211_HW_PS_NULLFUNC_STACK = 9, + IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 10, + IEEE80211_HW_MFP_CAPABLE = 11, + IEEE80211_HW_WANT_MONITOR_VIF = 12, + IEEE80211_HW_NO_VIRTUAL_MONITOR = 13, + IEEE80211_HW_NO_AUTO_VIF = 14, + IEEE80211_HW_SW_CRYPTO_CONTROL = 15, + IEEE80211_HW_SUPPORT_FAST_XMIT = 16, + IEEE80211_HW_REPORTS_TX_ACK_STATUS = 17, + IEEE80211_HW_CONNECTION_MONITOR = 18, + IEEE80211_HW_QUEUE_CONTROL = 19, + IEEE80211_HW_SUPPORTS_PER_STA_GTK = 20, + IEEE80211_HW_AP_LINK_PS = 21, + IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 22, + IEEE80211_HW_SUPPORTS_RC_TABLE = 23, + IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 24, + IEEE80211_HW_TIMING_BEACON_ONLY = 25, + IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 26, + IEEE80211_HW_CHANCTX_STA_CSA = 27, + IEEE80211_HW_SUPPORTS_CLONED_SKBS = 28, + IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS = 29, + IEEE80211_HW_TDLS_WIDER_BW = 30, + IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU = 31, + IEEE80211_HW_BEACON_TX_STATUS = 32, + IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR = 33, + IEEE80211_HW_SUPPORTS_REORDERING_BUFFER = 34, + IEEE80211_HW_USES_RSS = 35, + IEEE80211_HW_TX_AMSDU = 36, + IEEE80211_HW_TX_FRAG_LIST = 37, + IEEE80211_HW_REPORTS_LOW_ACK = 38, + IEEE80211_HW_SUPPORTS_TX_FRAG = 39, + IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA = 40, + IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP = 41, + IEEE80211_HW_BUFF_MMPDU_TXQ = 42, + IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW = 43, + IEEE80211_HW_STA_MMPDU_TXQ = 44, + IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN = 45, + IEEE80211_HW_SUPPORTS_MULTI_BSSID = 46, + IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID = 47, + IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT = 48, + IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD = 49, + IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD = 50, + IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP = 51, + IEEE80211_HW_DETECTS_COLOR_COLLISION = 52, + IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX = 53, + IEEE80211_HW_DISALLOW_PUNCTURING = 54, + IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ = 55, + IEEE80211_HW_HANDLES_QUIET_CSA = 56, + NUM_IEEE80211_HW_FLAGS = 57, +}; + +struct firmware { + size_t size; + const u8 *data; + void *priv; +}; + +enum rtw_hci_type { + RTW_HCI_TYPE_PCIE = 0, + RTW_HCI_TYPE_USB = 1, + RTW_HCI_TYPE_SDIO = 2, + RTW_HCI_TYPE_UNDEFINE = 3, +}; + +struct rtw_hci_ops; + +struct rtw_hci { + struct rtw_hci_ops *ops; + enum rtw_hci_type type; + u32 rpwm_addr; + u32 cpwm_addr; + u8 bulkout_num; +}; + +struct rtw_dev; + +struct rtw_tx_pkt_info; + +struct rtw_hci_ops { + int (*tx_write)(struct rtw_dev *, struct rtw_tx_pkt_info *, struct sk_buff *); + void (*tx_kick_off)(struct rtw_dev *); + void (*flush_queues)(struct rtw_dev *, u32, bool); + int (*setup)(struct rtw_dev *); + int (*start)(struct rtw_dev *); + void (*stop)(struct rtw_dev *); + void (*deep_ps)(struct rtw_dev *, bool); + void (*link_ps)(struct rtw_dev *, bool); + void (*interface_cfg)(struct rtw_dev *); + void (*dynamic_rx_agg)(struct rtw_dev *, bool); + int (*write_data_rsvd_page)(struct rtw_dev *, u8 *, u32); + int (*write_data_h2c)(struct rtw_dev *, u8 *, u32); + u8 (*read8)(struct rtw_dev *, u32); + u16 (*read16)(struct rtw_dev *, u32); + u32 (*read32)(struct rtw_dev *, u32); + void (*write8)(struct rtw_dev *, u32, u8); + void (*write16)(struct rtw_dev *, u32, u16); + void (*write32)(struct rtw_dev *, u32, u32); +}; + +enum rtw_supported_band { + RTW_BAND_2G = 1, + RTW_BAND_5G = 2, + RTW_BAND_60G = 4, +}; + +enum rtw_bandwidth { + RTW_CHANNEL_WIDTH_20 = 0, + RTW_CHANNEL_WIDTH_40 = 1, + RTW_CHANNEL_WIDTH_80 = 2, + RTW_CHANNEL_WIDTH_160 = 3, + RTW_CHANNEL_WIDTH_80_80 = 4, + RTW_CHANNEL_WIDTH_5 = 5, + RTW_CHANNEL_WIDTH_10 = 6, +}; + +enum rtw_net_type { + RTW_NET_NO_LINK = 0, + RTW_NET_AD_HOC = 1, + RTW_NET_MGD_LINKED = 2, + RTW_NET_AP_MODE = 3, +}; + +enum rtw_rf_type { + RF_1T1R = 0, + RF_1T2R = 1, + RF_2T2R = 2, + RF_2T3R = 3, + RF_2T4R = 4, + RF_3T3R = 5, + RF_3T4R = 6, + RF_4T4R = 7, + RF_TYPE_MAX = 8, +}; + +enum rtw_rf_path { + RF_PATH_A = 0, + RF_PATH_B = 1, + RF_PATH_C = 2, + RF_PATH_D = 3, +}; + +enum rtw_bb_path { + BB_PATH_A = 1, + BB_PATH_B = 2, + BB_PATH_C = 4, + BB_PATH_D = 8, + BB_PATH_AB = 3, + BB_PATH_AC = 5, + BB_PATH_AD = 9, + BB_PATH_BC = 6, + BB_PATH_BD = 10, + BB_PATH_CD = 12, + BB_PATH_ABC = 7, + BB_PATH_ABD = 11, + BB_PATH_ACD = 13, + BB_PATH_BCD = 14, + BB_PATH_ABCD = 15, +}; + +enum rtw_rate_section { + RTW_RATE_SECTION_CCK = 0, + RTW_RATE_SECTION_OFDM = 1, + RTW_RATE_SECTION_HT_1S = 2, + RTW_RATE_SECTION_HT_2S = 3, + RTW_RATE_SECTION_VHT_1S = 4, + RTW_RATE_SECTION_VHT_2S = 5, + RTW_RATE_SECTION_MAX = 6, +}; + +enum rtw_chip_type { + RTW_CHIP_TYPE_8822B = 0, + RTW_CHIP_TYPE_8822C = 1, + RTW_CHIP_TYPE_8723D = 2, + RTW_CHIP_TYPE_8821C = 3, + RTW_CHIP_TYPE_8703B = 4, + RTW_CHIP_TYPE_8821A = 5, + RTW_CHIP_TYPE_8812A = 6, +}; + +enum rtw_fw_type { + RTW_NORMAL_FW = 0, + RTW_WOWLAN_FW = 1, +}; + +enum rtw_trx_desc_rate { + DESC_RATE1M = 0, + DESC_RATE2M = 1, + DESC_RATE5_5M = 2, + DESC_RATE11M = 3, + DESC_RATE6M = 4, + DESC_RATE9M = 5, + DESC_RATE12M = 6, + DESC_RATE18M = 7, + DESC_RATE24M = 8, + DESC_RATE36M = 9, + DESC_RATE48M = 10, + DESC_RATE54M = 11, + DESC_RATEMCS0 = 12, + DESC_RATEMCS1 = 13, + DESC_RATEMCS2 = 14, + DESC_RATEMCS3 = 15, + DESC_RATEMCS4 = 16, + DESC_RATEMCS5 = 17, + DESC_RATEMCS6 = 18, + DESC_RATEMCS7 = 19, + DESC_RATEMCS8 = 20, + DESC_RATEMCS9 = 21, + DESC_RATEMCS10 = 22, + DESC_RATEMCS11 = 23, + DESC_RATEMCS12 = 24, + DESC_RATEMCS13 = 25, + DESC_RATEMCS14 = 26, + DESC_RATEMCS15 = 27, + DESC_RATEMCS16 = 28, + DESC_RATEMCS17 = 29, + DESC_RATEMCS18 = 30, + DESC_RATEMCS19 = 31, + DESC_RATEMCS20 = 32, + DESC_RATEMCS21 = 33, + DESC_RATEMCS22 = 34, + DESC_RATEMCS23 = 35, + DESC_RATEMCS24 = 36, + DESC_RATEMCS25 = 37, + DESC_RATEMCS26 = 38, + DESC_RATEMCS27 = 39, + DESC_RATEMCS28 = 40, + DESC_RATEMCS29 = 41, + DESC_RATEMCS30 = 42, + DESC_RATEMCS31 = 43, + DESC_RATEVHT1SS_MCS0 = 44, + DESC_RATEVHT1SS_MCS1 = 45, + DESC_RATEVHT1SS_MCS2 = 46, + DESC_RATEVHT1SS_MCS3 = 47, + DESC_RATEVHT1SS_MCS4 = 48, + DESC_RATEVHT1SS_MCS5 = 49, + DESC_RATEVHT1SS_MCS6 = 50, + DESC_RATEVHT1SS_MCS7 = 51, + DESC_RATEVHT1SS_MCS8 = 52, + DESC_RATEVHT1SS_MCS9 = 53, + DESC_RATEVHT2SS_MCS0 = 54, + DESC_RATEVHT2SS_MCS1 = 55, + DESC_RATEVHT2SS_MCS2 = 56, + DESC_RATEVHT2SS_MCS3 = 57, + DESC_RATEVHT2SS_MCS4 = 58, + DESC_RATEVHT2SS_MCS5 = 59, + DESC_RATEVHT2SS_MCS6 = 60, + DESC_RATEVHT2SS_MCS7 = 61, + DESC_RATEVHT2SS_MCS8 = 62, + DESC_RATEVHT2SS_MCS9 = 63, + DESC_RATEVHT3SS_MCS0 = 64, + DESC_RATEVHT3SS_MCS1 = 65, + DESC_RATEVHT3SS_MCS2 = 66, + DESC_RATEVHT3SS_MCS3 = 67, + DESC_RATEVHT3SS_MCS4 = 68, + DESC_RATEVHT3SS_MCS5 = 69, + DESC_RATEVHT3SS_MCS6 = 70, + DESC_RATEVHT3SS_MCS7 = 71, + DESC_RATEVHT3SS_MCS8 = 72, + DESC_RATEVHT3SS_MCS9 = 73, + DESC_RATEVHT4SS_MCS0 = 74, + DESC_RATEVHT4SS_MCS1 = 75, + DESC_RATEVHT4SS_MCS2 = 76, + DESC_RATEVHT4SS_MCS3 = 77, + DESC_RATEVHT4SS_MCS4 = 78, + DESC_RATEVHT4SS_MCS5 = 79, + DESC_RATEVHT4SS_MCS6 = 80, + DESC_RATEVHT4SS_MCS7 = 81, + DESC_RATEVHT4SS_MCS8 = 82, + DESC_RATEVHT4SS_MCS9 = 83, + DESC_RATE_MAX = 84, +}; + +enum rtw_regulatory_domains { + RTW_REGD_FCC = 0, + RTW_REGD_MKK = 1, + RTW_REGD_ETSI = 2, + RTW_REGD_IC = 3, + RTW_REGD_KCC = 4, + RTW_REGD_ACMA = 5, + RTW_REGD_CHILE = 6, + RTW_REGD_UKRAINE = 7, + RTW_REGD_MEXICO = 8, + RTW_REGD_CN = 9, + RTW_REGD_QATAR = 10, + RTW_REGD_UK = 11, + RTW_REGD_WW = 12, + RTW_REGD_MAX = 13, +}; + +enum rtw_flags { + RTW_FLAG_RUNNING = 0, + RTW_FLAG_FW_RUNNING = 1, + RTW_FLAG_SCANNING = 2, + RTW_FLAG_POWERON = 3, + RTW_FLAG_LEISURE_PS = 4, + RTW_FLAG_LEISURE_PS_DEEP = 5, + RTW_FLAG_DIG_DISABLE = 6, + RTW_FLAG_BUSY_TRAFFIC = 7, + RTW_FLAG_WOWLAN = 8, + RTW_FLAG_RESTARTING = 9, + RTW_FLAG_RESTART_TRIGGERING = 10, + RTW_FLAG_FORCE_LOWEST_RATE = 11, + NUM_OF_RTW_FLAGS = 12, +}; + +enum rtw_evm { + RTW_EVM_OFDM = 0, + RTW_EVM_1SS = 1, + RTW_EVM_2SS_A = 2, + RTW_EVM_2SS_B = 3, + RTW_EVM_NUM = 4, +}; + +enum rtw_snr { + RTW_SNR_OFDM_A = 0, + RTW_SNR_OFDM_B = 1, + RTW_SNR_OFDM_C = 2, + RTW_SNR_OFDM_D = 3, + RTW_SNR_1SS_A = 4, + RTW_SNR_1SS_B = 5, + RTW_SNR_1SS_C = 6, + RTW_SNR_1SS_D = 7, + RTW_SNR_2SS_A = 8, + RTW_SNR_2SS_B = 9, + RTW_SNR_2SS_C = 10, + RTW_SNR_2SS_D = 11, + RTW_SNR_NUM = 12, +}; + +enum rtw_port { + RTW_PORT_0 = 0, + RTW_PORT_1 = 1, + RTW_PORT_2 = 2, + RTW_PORT_3 = 3, + RTW_PORT_4 = 4, + RTW_PORT_NUM = 5, +}; + +enum rtw_wow_flags { + RTW_WOW_FLAG_EN_MAGIC_PKT = 0, + RTW_WOW_FLAG_EN_REKEY_PKT = 1, + RTW_WOW_FLAG_EN_DISCONNECT = 2, + RTW_WOW_FLAG_MAX = 3, +}; + +struct rtw_2g_1s_pwr_idx_diff { + s8 ofdm: 4; + s8 bw20: 4; +}; + +struct rtw_2g_ns_pwr_idx_diff { + s8 bw20: 4; + s8 bw40: 4; + s8 cck: 4; + s8 ofdm: 4; +}; + +struct rtw_2g_txpwr_idx { + u8 cck_base[6]; + u8 bw40_base[5]; + struct rtw_2g_1s_pwr_idx_diff ht_1s_diff; + struct rtw_2g_ns_pwr_idx_diff ht_2s_diff; + struct rtw_2g_ns_pwr_idx_diff ht_3s_diff; + struct rtw_2g_ns_pwr_idx_diff ht_4s_diff; +}; + +struct rtw_5g_ht_1s_pwr_idx_diff { + s8 ofdm: 4; + s8 bw20: 4; +}; + +struct rtw_5g_ht_ns_pwr_idx_diff { + s8 bw20: 4; + s8 bw40: 4; +}; + +struct rtw_5g_ofdm_ns_pwr_idx_diff { + s8 ofdm_3s: 4; + s8 ofdm_2s: 4; + s8 ofdm_4s: 4; + s8 res: 4; +}; + +struct rtw_5g_vht_ns_pwr_idx_diff { + s8 bw160: 4; + s8 bw80: 4; +}; + +struct rtw_5g_txpwr_idx { + u8 bw40_base[14]; + struct rtw_5g_ht_1s_pwr_idx_diff ht_1s_diff; + struct rtw_5g_ht_ns_pwr_idx_diff ht_2s_diff; + struct rtw_5g_ht_ns_pwr_idx_diff ht_3s_diff; + struct rtw_5g_ht_ns_pwr_idx_diff ht_4s_diff; + struct rtw_5g_ofdm_ns_pwr_idx_diff ofdm_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_1s_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff; + struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff; +}; + +struct rtw_txpwr_idx { + struct rtw_2g_txpwr_idx pwr_idx_2g; + struct rtw_5g_txpwr_idx pwr_idx_5g; +}; + +struct rtw_hw_reg { + u32 addr; + u32 mask; +}; + +struct rtw_ltecoex_addr { + u32 ctrl; + u32 wdata; + u32 rdata; +}; + +struct rtw_reg_domain { + u32 addr; + u32 mask; + u8 domain; +}; + +struct rtw_rf_sipi_addr { + u32 hssi_1; + u32 hssi_2; + u32 lssi_read; + u32 lssi_read_pi; +}; + +struct rtw_hw_reg_offset { + struct rtw_hw_reg hw_reg; + u8 offset; +}; + +struct rtw_vif_port { + struct rtw_hw_reg mac_addr; + struct rtw_hw_reg bssid; + struct rtw_hw_reg net_type; + struct rtw_hw_reg aid; + struct rtw_hw_reg bcn_ctrl; +}; + +struct rtw_tx_pkt_info { + u32 tx_pkt_size; + u8 offset; + u8 pkt_offset; + u8 tim_offset; + u8 mac_id; + u8 rate_id; + u8 rate; + u8 qsel; + u8 bw; + u8 sec_type; + u8 sn; + bool ampdu_en; + u8 ampdu_factor; + u8 ampdu_density; + u16 seq; + bool stbc; + bool ldpc; + bool dis_rate_fallback; + bool bmc; + bool use_rate; + bool ls; + bool fs; + bool short_gi; + bool report; + bool rts; + bool dis_qselseq; + bool en_hwseq; + u8 hw_ssn_sel; + bool nav_use_hdr; + bool bt_null; +}; + +struct rtw_sta_info; + +struct rtw_rx_pkt_stat { + bool phy_status; + bool icv_err; + bool crc_err; + bool decrypted; + bool is_c2h; + bool channel_invalid; + s32 signal_power; + u16 pkt_len; + u8 bw; + u8 drv_info_sz; + u8 shift; + u8 rate; + u8 mac_id; + u8 cam_id; + u8 ppdu_cnt; + u32 tsf_low; + s8 rx_power[4]; + u8 rssi; + u8 rxsc; + s8 rx_snr[4]; + u8 rx_evm[4]; + s8 cfo_tail[4]; + u16 freq; + u8 band; + struct rtw_sta_info *si; + struct ieee80211_vif *vif; + struct ieee80211_hdr *hdr; +}; + +struct ewma_rssi { + long unsigned int internal; +}; + +struct rtw_ra_report { + struct rate_info txrate; + u32 bit_rate; + u8 desc_rate; +}; + +struct rtw_sta_info { + struct rtw_dev *rtwdev; + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct ewma_rssi avg_rssi; + u8 rssi_level; + u8 mac_id; + u8 rate_id; + enum rtw_bandwidth bw_mode; + enum rtw_rf_type rf_type; + u8 stbc_en: 2; + u8 ldpc_en: 2; + bool sgi_enable; + bool vht_enable; + u8 init_ra_lv; + u64 ra_mask; + long unsigned int tid_ba[1]; + struct rtw_ra_report ra_report; + bool use_cfg_mask; + struct cfg80211_bitrate_mask *mask; + struct work_struct rc_work; + long: 32; +}; + +struct ewma_tp { + long unsigned int internal; +}; + +struct rtw_traffic_stats { + u64 tx_unicast; + u64 rx_unicast; + u64 tx_cnt; + u64 rx_cnt; + u32 tx_throughput; + u32 rx_throughput; + struct ewma_tp tx_ewma_tp; + struct ewma_tp rx_ewma_tp; +}; + +enum rtw_lps_mode { + RTW_MODE_ACTIVE = 0, + RTW_MODE_LPS = 1, + RTW_MODE_WMM_PS = 2, +}; + +enum rtw_lps_deep_mode { + LPS_DEEP_MODE_NONE = 0, + LPS_DEEP_MODE_LCLK = 1, + LPS_DEEP_MODE_PG = 2, +}; + +enum rtw_pwr_state { + RTW_RF_OFF = 0, + RTW_RF_ON = 4, + RTW_ALL_ON = 12, +}; + +struct rtw_lps_conf { + enum rtw_lps_mode mode; + enum rtw_lps_deep_mode deep_mode; + enum rtw_lps_deep_mode wow_deep_mode; + enum rtw_pwr_state state; + u8 awake_interval; + u8 rlbm; + u8 smart_ps; + u8 port_id; + bool sec_cam_backup; + bool pattern_cam_backup; +}; + +struct rtw_cam_entry { + bool valid; + bool group; + u8 addr[6]; + u8 hw_key_type; + struct ieee80211_key_conf *key; +}; + +struct rtw_sec_desc { + bool default_key_search; + u32 total_cam_num; + struct rtw_cam_entry cam_table[32]; + long unsigned int cam_map[1]; +}; + +struct rtw_tx_report { + spinlock_t q_lock; + struct sk_buff_head queue; + atomic_t sn; + struct timer_list purge_timer; +}; + +struct rtw_hw_scan_info { + struct ieee80211_vif *scanning_vif; + u8 probe_pg_size; + u8 op_pri_ch_idx; + u8 op_pri_ch; + u8 op_chan; + u8 op_bw; +}; + +struct rtw_phy_cond { + u32 rfe: 8; + u32 intf: 4; + u32 pkg: 4; + u32 plat: 4; + u32 intf_rsvd: 4; + u32 cut: 4; + u32 branch: 2; + u32 neg: 1; + u32 pos: 1; +}; + +struct rtw_phy_cond2 { + u8 type_glna; + u8 type_gpa; + u8 type_alna; + u8 type_apa; +}; + +enum rtw_sar_bands { + RTW_SAR_BAND_0 = 0, + RTW_SAR_BAND_1 = 1, + RTW_SAR_BAND_3 = 2, + RTW_SAR_BAND_4 = 3, + RTW_SAR_BAND_NR = 4, +}; + +enum rtw_sar_sources { + RTW_SAR_SOURCE_NONE = 0, + RTW_SAR_SOURCE_COMMON = 1, +}; + +union rtw_sar_cfg { + s8 common[4]; +}; + +struct rtw_sar { + enum rtw_sar_sources src; + union rtw_sar_cfg cfg[24]; +}; + +struct rtw_hal { + u32 rcr; + u32 chip_version; + u8 cut_version; + u8 mp_chip; + u8 oem_id; + u8 pkg_type; + struct rtw_phy_cond phy_cond; + struct rtw_phy_cond2 phy_cond2; + bool rfe_btg; + u8 ps_mode; + u8 current_channel; + u8 current_primary_channel_index; + u8 current_band_width; + u8 current_band_type; + u8 primary_channel; + u8 cch_by_bw[3]; + u8 sec_ch_offset; + u8 rf_type; + u8 rf_path_num; + u8 rf_phy_num; + u32 antenna_tx; + u32 antenna_rx; + u8 bfee_sts_cap; + bool txrx_1ss; + bool cck_high_power; + struct mutex tx_power_mutex; + s8 tx_pwr_by_rate_offset_2g[336]; + s8 tx_pwr_by_rate_offset_5g[336]; + s8 tx_pwr_by_rate_base_2g[24]; + s8 tx_pwr_by_rate_base_5g[24]; + s8 tx_pwr_limit_2g[3276]; + s8 tx_pwr_limit_5g[11466]; + s8 tx_pwr_tbl[336]; + enum rtw_sar_bands sar_band; + struct rtw_sar sar; + u32 ch_param[3]; +}; + +struct rtw_rqpn; + +struct rtw_fifo_conf { + u16 rsvd_boundary; + u16 rsvd_pg_num; + u16 rsvd_drv_pg_num; + u16 txff_pg_num; + u16 acq_pg_num; + u16 rsvd_drv_addr; + u16 rsvd_h2c_info_addr; + u16 rsvd_h2c_sta_info_addr; + u16 rsvd_h2cq_addr; + u16 rsvd_cpu_instr_addr; + u16 rsvd_fw_txbuf_addr; + u16 rsvd_csibuf_addr; + const struct rtw_rqpn *rqpn; +}; + +struct rtw_fwcd_desc { + u32 size; + u8 *next; + u8 *data; +}; + +struct rtw_fw_state { + const struct firmware *firmware; + struct rtw_dev *rtwdev; + struct completion completion; + struct rtw_fwcd_desc fwcd_desc; + u16 version; + u8 sub_version; + u8 sub_index; + u16 h2c_version; + u32 feature; + u32 feature_ext; + enum rtw_fw_type type; +}; + +struct rtw_efuse { + u32 size; + u32 physical_size; + u32 logical_size; + u32 protect_size; + u8 addr[6]; + u8 channel_plan; + u8 country_code[2]; + u8 rf_board_option; + u8 rfe_option; + u8 power_track_type; + u8 thermal_meter[4]; + u8 thermal_meter_k; + u8 crystal_cap; + u8 ant_div_cfg; + u8 ant_div_type; + u8 regd; + u8 afe; + u8 lna_type_2g; + u8 lna_type_5g; + u8 glna_type; + u8 alna_type; + bool ext_lna_2g; + bool ext_lna_5g; + u8 pa_type_2g; + u8 pa_type_5g; + u8 gpa_type; + u8 apa_type; + bool ext_pa_2g; + bool ext_pa_5g; + u8 tx_bb_swing_setting_2g; + u8 tx_bb_swing_setting_5g; + bool btcoex; + bool share_ant; + u8 bt_setting; + u8 usb_mode_switch; + struct { + u8 hci; + u8 bw; + u8 ptcl; + u8 nss; + u8 ant_num; + } hw_cap; + struct rtw_txpwr_idx txpwr_idx_table[4]; +}; + +enum rtw_regd_state { + RTW_REGD_STATE_WORLDWIDE = 0, + RTW_REGD_STATE_PROGRAMMED = 1, + RTW_REGD_STATE_SETTING = 2, + RTW_REGD_STATE_NR = 3, +}; + +struct rtw_regulatory; + +struct rtw_regd { + enum rtw_regd_state state; + const struct rtw_regulatory *regulatory; + enum nl80211_dfs_regions dfs_region; +}; + +struct rtw_bf_info { + u8 bfer_mu_cnt; + u8 bfer_su_cnt; + long unsigned int bfer_su_reg_maping[1]; + u8 cur_csi_rpt_rate; +}; + +struct ewma_thermal { + long unsigned int internal; +}; + +struct rtw_dpk_info { + bool is_dpk_pwr_on; + bool is_reload; + long unsigned int dpk_path_ok[1]; + u8 thermal_dpk[2]; + struct ewma_thermal avg_thermal[2]; + u32 gnt_control; + u32 gnt_value; + u8 result[4]; + u8 dpk_txagc[4]; + u32 coef[80]; + u16 dpk_gs[4]; + u8 thermal_dpk_delta[4]; + u8 pre_pwsf[4]; + u8 dpk_band; + u8 dpk_ch; + u8 dpk_bw; +}; + +struct rtw_cfo_track { + bool is_adjust; + u8 crystal_cap; + s32 cfo_tail[4]; + s32 cfo_cnt[4]; + u32 packet_count; + u32 packet_count_pre; +}; + +struct rtw_pkt_count { + u16 num_bcn_pkt; + u16 num_qry_pkt[84]; +}; + +struct ewma_evm { + long unsigned int internal; +}; + +struct ewma_snr { + long unsigned int internal; +}; + +struct rtw_iqk_info { + bool done; + struct { + u32 s1_x; + u32 s1_y; + u32 s0_x; + u32 s0_y; + } result; +}; + +struct rtw_gapk_info { + u32 rf3f_bp[220]; + u32 rf3f_fs[44]; + bool txgapk_bp_done; + s8 offset[44]; + s8 fianl_offset[44]; + u8 read_txgain; + u8 channel; +}; + +enum rtw_edcca_mode { + RTW_EDCCA_NORMAL = 0, + RTW_EDCCA_ADAPTIVITY = 1, +}; + +struct rtw_dm_info { + u32 cck_fa_cnt; + u32 ofdm_fa_cnt; + u32 total_fa_cnt; + u32 cck_cca_cnt; + u32 ofdm_cca_cnt; + u32 total_cca_cnt; + u32 cck_ok_cnt; + u32 cck_err_cnt; + u32 ofdm_ok_cnt; + u32 ofdm_err_cnt; + u32 ht_ok_cnt; + u32 ht_err_cnt; + u32 vht_ok_cnt; + u32 vht_err_cnt; + u8 min_rssi; + u8 pre_min_rssi; + u16 fa_history[4]; + u8 igi_history[4]; + u8 igi_bitmap; + bool damping; + u8 damping_cnt; + u8 damping_rssi; + u8 cck_gi_u_bnd; + u8 cck_gi_l_bnd; + u8 fix_rate; + u8 tx_rate; + u32 rrsr_val_init; + u32 rrsr_mask_min; + u8 thermal_avg[4]; + u8 thermal_meter_k; + u8 thermal_meter_lck; + s8 delta_power_index[4]; + s8 delta_power_index_last[4]; + u8 default_ofdm_index; + u8 default_cck_index; + bool pwr_trk_triggered; + bool pwr_trk_init_trigger; + struct ewma_thermal avg_thermal[4]; + s8 txagc_remnant_cck; + s8 txagc_remnant_ofdm[4]; + u8 rx_cck_agc_report_type; + u32 dack_adck[4]; + u16 dack_msbk[120]; + u8 dack_dck[16]; + struct rtw_dpk_info dpk_info; + struct rtw_cfo_track cfo_track; + u8 cck_pd_lv[8]; + u32 cck_fa_avg; + u8 cck_pd_default; + s8 rx_snr[4]; + u8 rx_evm_dbm[4]; + s16 cfo_tail[4]; + u8 rssi[4]; + u8 curr_rx_rate; + struct rtw_pkt_count cur_pkt_count; + struct rtw_pkt_count last_pkt_count; + struct ewma_evm ewma_evm[4]; + struct ewma_snr ewma_snr[12]; + u32 dm_flags; + struct rtw_iqk_info iqk; + struct rtw_gapk_info gapk; + bool is_bt_iqk_timeout; + s8 l2h_th_ini; + enum rtw_edcca_mode edcca_mode; + u8 scan_density; +}; + +struct rtw_coex_hid { + u8 hid_handle; + u8 hid_vendor; + u8 hid_name[3]; + bool hid_info_completed; + bool is_game_hid; +}; + +struct rtw_coex_hid_handle_list { + u8 cmd_id; + u8 len; + u8 subid; + u8 handle_cnt; + u8 handle[4]; +}; + +struct rtw_coex_stat { + bool bt_disabled; + bool bt_disabled_pre; + bool bt_link_exist; + bool bt_whck_test; + bool bt_inq_page; + bool bt_inq_remain; + bool bt_inq; + bool bt_page; + bool bt_ble_voice; + bool bt_ble_exist; + bool bt_hfp_exist; + bool bt_a2dp_exist; + bool bt_hid_exist; + bool bt_pan_exist; + bool bt_opp_exist; + bool bt_acl_busy; + bool bt_fix_2M; + bool bt_setup_link; + bool bt_multi_link; + bool bt_multi_link_pre; + bool bt_multi_link_remain; + bool bt_a2dp_sink; + bool bt_a2dp_active; + bool bt_reenable; + bool bt_ble_scan_en; + bool bt_init_scan; + bool bt_slave; + bool bt_418_hid_exist; + bool bt_ble_hid_exist; + bool bt_game_hid_exist; + bool bt_hid_handle_cnt; + bool bt_mailbox_reply; + bool wl_under_lps; + bool wl_under_ips; + bool wl_hi_pri_task1; + bool wl_hi_pri_task2; + bool wl_force_lps_ctrl; + bool wl_gl_busy; + bool wl_linkscan_proc; + bool wl_ps_state_fail; + bool wl_tx_limit_en; + bool wl_ampdu_limit_en; + bool wl_connected; + bool wl_slot_extend; + bool wl_cck_lock; + bool wl_cck_lock_pre; + bool wl_cck_lock_ever; + bool wl_connecting; + bool wl_slot_toggle; + bool wl_slot_toggle_change; + bool wl_mimo_ps; + u32 bt_supported_version; + u32 bt_supported_feature; + u32 hi_pri_tx; + u32 hi_pri_rx; + u32 lo_pri_tx; + u32 lo_pri_rx; + u32 patch_ver; + u16 bt_reg_vendor_ae; + u16 bt_reg_vendor_ac; + s8 bt_rssi; + u8 kt_ver; + u8 gnt_workaround_state; + u8 tdma_timer_base; + u8 bt_profile_num; + u8 bt_info_c2h[60]; + u8 bt_info_lb2; + u8 bt_info_lb3; + u8 bt_info_hb0; + u8 bt_info_hb1; + u8 bt_info_hb2; + u8 bt_info_hb3; + u8 bt_ble_scan_type; + u8 bt_hid_pair_num; + u8 bt_hid_slot; + u8 bt_a2dp_bitpool; + u8 bt_iqk_state; + u8 bt_disable_cnt; + u16 wl_beacon_interval; + u8 wl_noisy_level; + u8 wl_fw_dbg_info[10]; + u8 wl_fw_dbg_info_pre[10]; + u8 wl_rx_rate; + u8 wl_tx_rate; + u8 wl_rts_rx_rate; + u8 wl_coex_mode; + u8 wl_iot_peer; + u8 ampdu_max_time; + u8 wl_tput_dir; + u8 wl_toggle_para[6]; + u8 wl_toggle_interval; + u16 score_board; + u16 retry_limit; + u32 cnt_bt[13]; + u32 cnt_wl[8]; + u32 cnt_bt_info_c2h[6]; + u32 darfrc; + u32 darfrch; + struct rtw_coex_hid hid_info[4]; + struct rtw_coex_hid_handle_list hid_handle_list; +}; + +struct rtw_coex_dm { + bool cur_ps_tdma_on; + bool cur_wl_rx_low_gain_en; + bool ignore_wl_act; + u8 reason; + u8 bt_rssi_state[4]; + u8 wl_rssi_state[4]; + u8 wl_ch_info[3]; + u8 cur_ps_tdma; + u8 cur_table; + u8 ps_tdma_para[5]; + u8 cur_bt_pwr_lvl; + u8 cur_bt_lna_lvl; + u8 cur_wl_pwr_lvl; + u8 bt_status; + u32 cur_ant_pos_type; + u32 cur_switch_status; + u32 setting_tdma; + u8 fw_tdma_para[5]; +}; + +struct rtw_coex_rfe { + bool ant_switch_exist; + bool ant_switch_diversity; + bool ant_switch_with_bt; + u8 rfe_module_type; + u8 ant_switch_polarity; + bool wlg_at_btg; +}; + +struct rtw_coex { + struct sk_buff_head queue; + wait_queue_head_t wait; + bool under_5g; + bool stop_dm; + bool freeze; + bool freerun; + bool wl_rf_off; + bool manual_control; + struct rtw_coex_stat stat; + struct rtw_coex_dm dm; + struct rtw_coex_rfe rfe; + struct delayed_work bt_relink_work; + struct delayed_work bt_reenable_work; + struct delayed_work defreeze_work; + struct delayed_work wl_remain_work; + struct delayed_work bt_remain_work; + struct delayed_work wl_connecting_work; + struct delayed_work bt_multi_link_remain_work; + struct delayed_work wl_ccklock_work; +}; + +struct rtw_debugfs; + +struct rtw_path_div { + enum rtw_bb_path current_tx_path; + u32 path_a_sum; + u32 path_b_sum; + u16 path_a_cnt; + u16 path_b_cnt; +}; + +struct rtw_wow_pattern { + u16 crc; + u8 type; + u8 valid; + u8 mask[16]; +}; + +struct rtw_pno_request { + bool inited; + u32 match_set_cnt; + struct cfg80211_match_set *match_sets; + u8 channel_cnt; + struct ieee80211_channel *channels; + struct cfg80211_sched_scan_plan scan_plan; +}; + +struct rtw_wow_param { + struct ieee80211_vif *wow_vif; + long unsigned int flags[1]; + u8 txpause; + u8 pattern_cnt; + struct rtw_wow_pattern patterns[12]; + bool ips_enabled; + struct rtw_pno_request pno_req; +}; + +struct rtw_chip_info; + +struct rtw_dev { + struct ieee80211_hw *hw; + struct device *dev; + struct rtw_hci hci; + struct rtw_hw_scan_info scan_info; + const struct rtw_chip_info *chip; + struct rtw_hal hal; + struct rtw_fifo_conf fifo; + struct rtw_fw_state fw; + struct rtw_efuse efuse; + struct rtw_sec_desc sec; + long: 32; + struct rtw_traffic_stats stats; + struct rtw_regd regd; + struct rtw_bf_info bf_info; + struct rtw_dm_info dm_info; + struct rtw_coex coex; + struct mutex mutex; + struct delayed_work watch_dog_work; + u32 watch_dog_cnt; + struct list_head rsvd_page_list; + struct sk_buff_head c2h_queue; + struct work_struct c2h_work; + struct work_struct ips_work; + struct work_struct fw_recovery_work; + struct work_struct update_beacon_work; + spinlock_t txq_lock; + struct list_head txqs; + struct workqueue_struct *tx_wq; + struct work_struct tx_work; + struct work_struct ba_work; + struct rtw_tx_report tx_report; + struct { + u8 last_box_num; + u32 seq; + } h2c; + struct rtw_lps_conf lps_conf; + bool ps_enabled; + bool beacon_loss; + struct completion lps_leave_check; + struct rtw_debugfs *debugfs; + u8 sta_cnt; + u32 rts_threshold; + long unsigned int hw_port[1]; + long unsigned int mac_id_map[1]; + long unsigned int flags[1]; + u8 mp_mode; + struct rtw_path_div dm_path_div; + struct rtw_fw_state wow_fw; + struct rtw_wow_param wow; + bool need_rfk; + struct completion fw_scan_density; + bool ap_active; + long: 0; + u8 priv[0]; +}; + +enum rtw_bfee_role { + RTW_BFEE_NONE = 0, + RTW_BFEE_SU = 1, + RTW_BFEE_MU = 2, +}; + +struct rtw_bfee { + enum rtw_bfee_role role; + u16 p_aid; + u8 g_id; + u8 mac_addr[6]; + u8 sound_dim; + u8 su_reg_index; + u16 aid; +}; + +struct rtw_vif { + enum rtw_net_type net_type; + u16 aid; + u8 mac_id; + u8 mac_addr[6]; + u8 bssid[6]; + u8 port; + u8 bcn_ctrl; + struct list_head rsvd_page_list; + struct ieee80211_tx_queue_params tx_params[4]; + const struct rtw_vif_port *conf; + struct cfg80211_scan_request *scan_req; + struct ieee80211_scan_ies *scan_ies; + long: 32; + struct rtw_traffic_stats stats; + struct rtw_bfee bfee; + long: 32; +}; + +struct rtw_regulatory { + char alpha2[2]; + u8 txpwr_regd_2g; + u8 txpwr_regd_5g; +}; + +struct rtw_chip_ops { + int (*power_on)(struct rtw_dev *); + void (*power_off)(struct rtw_dev *); + int (*mac_init)(struct rtw_dev *); + int (*dump_fw_crash)(struct rtw_dev *); + void (*shutdown)(struct rtw_dev *); + int (*read_efuse)(struct rtw_dev *, u8 *); + void (*phy_set_param)(struct rtw_dev *); + void (*set_channel)(struct rtw_dev *, u8, u8, u8); + void (*query_phy_status)(struct rtw_dev *, u8 *, struct rtw_rx_pkt_stat *); + u32 (*read_rf)(struct rtw_dev *, enum rtw_rf_path, u32, u32); + bool (*write_rf)(struct rtw_dev *, enum rtw_rf_path, u32, u32, u32); + void (*set_tx_power_index)(struct rtw_dev *); + int (*rsvd_page_dump)(struct rtw_dev *, u8 *, u32, u32); + int (*set_antenna)(struct rtw_dev *, u32, u32); + void (*cfg_ldo25)(struct rtw_dev *, bool); + void (*efuse_grant)(struct rtw_dev *, bool); + void (*false_alarm_statistics)(struct rtw_dev *); + void (*phy_calibration)(struct rtw_dev *); + void (*dpk_track)(struct rtw_dev *); + void (*cck_pd_set)(struct rtw_dev *, u8); + void (*pwr_track)(struct rtw_dev *); + void (*config_bfee)(struct rtw_dev *, struct rtw_vif *, struct rtw_bfee *, bool); + void (*set_gid_table)(struct rtw_dev *, struct ieee80211_vif *, struct ieee80211_bss_conf *); + void (*cfg_csi_rate)(struct rtw_dev *, u8, u8, u8, u8 *); + void (*adaptivity_init)(struct rtw_dev *); + void (*adaptivity)(struct rtw_dev *); + void (*cfo_init)(struct rtw_dev *); + void (*cfo_track)(struct rtw_dev *); + void (*config_tx_path)(struct rtw_dev *, u8, enum rtw_bb_path, enum rtw_bb_path, bool); + void (*config_txrx_mode)(struct rtw_dev *, u8, u8, bool); + void (*fill_txdesc_checksum)(struct rtw_dev *, struct rtw_tx_pkt_info *, u8 *); + void (*coex_set_init)(struct rtw_dev *); + void (*coex_set_ant_switch)(struct rtw_dev *, u8, u8); + void (*coex_set_gnt_fix)(struct rtw_dev *); + void (*coex_set_gnt_debug)(struct rtw_dev *); + void (*coex_set_rfe_type)(struct rtw_dev *); + void (*coex_set_wl_tx_power)(struct rtw_dev *, u8); + void (*coex_set_wl_rx_gain)(struct rtw_dev *, bool); +}; + +struct rtw_pwr_seq_cmd { + u16 offset; + u8 cut_mask; + u8 intf_mask; + u8 base: 4; + u8 cmd: 4; + u8 mask; + u8 value; +}; + +enum rtw_dma_mapping { + RTW_DMA_MAPPING_EXTRA = 0, + RTW_DMA_MAPPING_LOW = 1, + RTW_DMA_MAPPING_NORMAL = 2, + RTW_DMA_MAPPING_HIGH = 3, + RTW_DMA_MAPPING_MAX = 4, + RTW_DMA_MAPPING_UNDEF = 5, +}; + +struct rtw_rqpn { + enum rtw_dma_mapping dma_map_vo; + enum rtw_dma_mapping dma_map_vi; + enum rtw_dma_mapping dma_map_be; + enum rtw_dma_mapping dma_map_bk; + enum rtw_dma_mapping dma_map_mg; + enum rtw_dma_mapping dma_map_hi; +}; + +struct rtw_prioq_addr { + u32 rsvd; + u32 avail; +}; + +struct rtw_prioq_addrs { + struct rtw_prioq_addr prio[4]; + bool wsize; +}; + +struct rtw_page_table { + u16 hq_num; + u16 nq_num; + u16 lq_num; + u16 exq_num; + u16 gapq_num; +}; + +struct rtw_intf_phy_para { + u16 offset; + u16 value; + u16 ip_sel; + u16 cut_mask; + u16 platform; +}; + +struct rtw_intf_phy_para_table { + const struct rtw_intf_phy_para *usb2_para; + const struct rtw_intf_phy_para *usb3_para; + const struct rtw_intf_phy_para *gen1_para; + const struct rtw_intf_phy_para *gen2_para; + u8 n_usb2_para; + u8 n_usb3_para; + u8 n_gen1_para; + u8 n_gen2_para; +}; + +struct rtw_table { + const void *data; + const u32 size; + void (*parse)(struct rtw_dev *, const struct rtw_table *); + void (*do_cfg)(struct rtw_dev *, const struct rtw_table *, u32, u32); + enum rtw_rf_path rf_path; +}; + +struct rtw_pwr_track_tbl; + +struct rtw_rfe_def { + const struct rtw_table *phy_pg_tbl; + const struct rtw_table *txpwr_lmt_tbl; + const struct rtw_pwr_track_tbl *pwr_track_tbl; + const struct rtw_table *agc_btg_tbl; +}; + +struct rtw_pwr_track_tbl { + const u8 *pwrtrk_5gb_n[3]; + const u8 *pwrtrk_5gb_p[3]; + const u8 *pwrtrk_5ga_n[3]; + const u8 *pwrtrk_5ga_p[3]; + const u8 *pwrtrk_2gb_n; + const u8 *pwrtrk_2gb_p; + const u8 *pwrtrk_2ga_n; + const u8 *pwrtrk_2ga_p; + const u8 *pwrtrk_2g_cckb_n; + const u8 *pwrtrk_2g_cckb_p; + const u8 *pwrtrk_2g_ccka_n; + const u8 *pwrtrk_2g_ccka_p; + const s8 *pwrtrk_xtal_n; + const s8 *pwrtrk_xtal_p; +}; + +enum rtw_wlan_cpu { + RTW_WCPU_11AC = 0, + RTW_WCPU_11N = 1, +}; + +enum rtw_fw_fifo_sel { + RTW_FW_FIFO_SEL_TX = 0, + RTW_FW_FIFO_SEL_RX = 1, + RTW_FW_FIFO_SEL_RSVD_PAGE = 2, + RTW_FW_FIFO_SEL_REPORT = 3, + RTW_FW_FIFO_SEL_LLT = 4, + RTW_FW_FIFO_SEL_RXBUF_FW = 5, + RTW_FW_FIFO_MAX = 6, +}; + +struct rtw_fwcd_segs; + +struct coex_table_para; + +struct coex_tdma_para; + +struct coex_rf_para; + +struct coex_5g_afh_map; + +struct rtw_chip_info { + const struct rtw_chip_ops *ops; + u8 id; + const char *fw_name; + enum rtw_wlan_cpu wlan_cpu; + u8 tx_pkt_desc_sz; + u8 tx_buf_desc_sz; + u8 rx_pkt_desc_sz; + u8 rx_buf_desc_sz; + u32 phy_efuse_size; + u32 log_efuse_size; + u32 ptct_efuse_size; + u32 txff_size; + u32 rxff_size; + u32 fw_rxff_size; + u16 rsvd_drv_pg_num; + u8 band; + u16 page_size; + u8 csi_buf_pg_num; + u8 dig_max; + u8 dig_min; + u8 txgi_factor; + bool is_pwr_by_rate_dec; + bool rx_ldpc; + bool tx_stbc; + u8 max_power_index; + u8 ampdu_density; + u16 fw_fifo_addr[6]; + const struct rtw_fwcd_segs *fwcd_segs; + u8 usb_tx_agg_desc_num; + bool hw_feature_report; + u8 c2h_ra_report_size; + bool old_datarate_fb_limit; + u8 default_1ss_tx_path; + bool path_div_supported; + bool ht_supported; + bool vht_supported; + u8 lps_deep_mode_supported; + u8 sys_func_en; + const struct rtw_pwr_seq_cmd * const *pwr_on_seq; + const struct rtw_pwr_seq_cmd * const *pwr_off_seq; + const struct rtw_rqpn *rqpn_table; + const struct rtw_prioq_addrs *prioq_addrs; + const struct rtw_page_table *page_table; + const struct rtw_intf_phy_para_table *intf_table; + const struct rtw_hw_reg *dig; + const struct rtw_hw_reg *dig_cck; + u32 rf_base_addr[2]; + u32 rf_sipi_addr[2]; + const struct rtw_rf_sipi_addr *rf_sipi_read_addr; + u8 fix_rf_phy_num; + const struct rtw_ltecoex_addr *ltecoex_addr; + const struct rtw_table *mac_tbl; + const struct rtw_table *agc_tbl; + const struct rtw_table *bb_tbl; + const struct rtw_table *rf_tbl[4]; + const struct rtw_table *rfk_init_tbl; + const struct rtw_rfe_def *rfe_defs; + u32 rfe_defs_size; + bool en_dis_dpd; + u16 dpd_ratemask; + u8 iqk_threshold; + u8 lck_threshold; + u8 bfer_su_max_num; + u8 bfer_mu_max_num; + const struct rtw_hw_reg_offset *edcca_th; + s8 l2h_th_ini_cs; + s8 l2h_th_ini_ad; + const char *wow_fw_name; + const struct wiphy_wowlan_support *wowlan_stub; + const u8 max_sched_scan_ssids; + const u16 max_scan_ie_len; + u32 coex_para_ver; + u8 bt_desired_ver; + bool scbd_support; + bool new_scbd10_def; + bool ble_hid_profile_support; + bool wl_mimo_ps_support; + u8 pstdma_type; + u8 bt_rssi_type; + u8 ant_isolation; + u8 rssi_tolerance; + u8 table_sant_num; + u8 table_nsant_num; + u8 tdma_sant_num; + u8 tdma_nsant_num; + u8 bt_afh_span_bw20; + u8 bt_afh_span_bw40; + u8 afh_5g_num; + u8 wl_rf_para_num; + u8 coex_info_hw_regs_num; + const u8 *bt_rssi_step; + const u8 *wl_rssi_step; + const struct coex_table_para *table_nsant; + const struct coex_table_para *table_sant; + const struct coex_tdma_para *tdma_sant; + const struct coex_tdma_para *tdma_nsant; + const struct coex_rf_para *wl_rf_para_tx; + const struct coex_rf_para *wl_rf_para_rx; + const struct coex_5g_afh_map *afh_5g; + const struct rtw_hw_reg *btg_reg; + const struct rtw_reg_domain *coex_info_hw_regs; + u32 wl_fw_desired_ver; +}; + +struct rtw_fwcd_segs { + const u32 *segs; + u8 num; +}; + +struct coex_table_para { + u32 bt; + u32 wl; +}; + +struct coex_tdma_para { + u8 para[5]; +}; + +struct coex_rf_para { + u8 wl_pwr_dec_lvl; + u8 bt_pwr_dec_lvl; + bool wl_low_gain_en; + u8 bt_lna_lvl; +}; + +struct coex_5g_afh_map { + u32 wl_5g_ch; + u8 bt_skip_ch; + u8 bt_skip_span; +}; + +enum rtw_coex_bt_state_cnt { + COEX_CNT_BT_RETRY = 0, + COEX_CNT_BT_REINIT = 1, + COEX_CNT_BT_REENABLE = 2, + COEX_CNT_BT_POPEVENT = 3, + COEX_CNT_BT_SETUPLINK = 4, + COEX_CNT_BT_IGNWLANACT = 5, + COEX_CNT_BT_INQ = 6, + COEX_CNT_BT_PAGE = 7, + COEX_CNT_BT_ROLESWITCH = 8, + COEX_CNT_BT_AFHUPDATE = 9, + COEX_CNT_BT_INFOUPDATE = 10, + COEX_CNT_BT_IQK = 11, + COEX_CNT_BT_IQKFAIL = 12, + COEX_CNT_BT_MAX = 13, +}; + +enum rtw_coex_wl_state_cnt { + COEX_CNT_WL_SCANAP = 0, + COEX_CNT_WL_CONNPKT = 1, + COEX_CNT_WL_COEXRUN = 2, + COEX_CNT_WL_NOISY0 = 3, + COEX_CNT_WL_NOISY1 = 4, + COEX_CNT_WL_NOISY2 = 5, + COEX_CNT_WL_5MS_NOEXTEND = 6, + COEX_CNT_WL_FW_NOTIFY = 7, + COEX_CNT_WL_MAX = 8, +}; + +struct rtw_swing_table { + const u8 *p[4]; + const u8 *n[4]; +}; + +enum rtw_rf_band { + RF_BAND_2G_CCK = 0, + RF_BAND_2G_OFDM = 1, + RF_BAND_5G_L = 2, + RF_BAND_5G_M = 3, + RF_BAND_5G_H = 4, + RF_BAND_MAX = 5, +}; + +enum rtw_fw_feature { + FW_FEATURE_SIG = 1, + FW_FEATURE_LPS_C2H = 2, + FW_FEATURE_LCLK = 4, + FW_FEATURE_PG = 8, + FW_FEATURE_TX_WAKE = 16, + FW_FEATURE_BCN_FILTER = 32, + FW_FEATURE_NOTIFY_SCAN = 64, + FW_FEATURE_ADAPTIVITY = 128, + FW_FEATURE_SCAN_OFFLOAD = 256, + FW_FEATURE_MAX = 2147483648, +}; + +enum rtw_debug_mask { + RTW_DBG_PCI = 1, + RTW_DBG_TX = 2, + RTW_DBG_RX = 4, + RTW_DBG_PHY = 8, + RTW_DBG_FW = 16, + RTW_DBG_EFUSE = 32, + RTW_DBG_COEX = 64, + RTW_DBG_RFK = 128, + RTW_DBG_REGD = 256, + RTW_DBG_DEBUGFS = 512, + RTW_DBG_PS = 1024, + RTW_DBG_BF = 2048, + RTW_DBG_WOW = 4096, + RTW_DBG_CFO = 8192, + RTW_DBG_PATH_DIV = 16384, + RTW_DBG_ADAPTIVITY = 32768, + RTW_DBG_HW_SCAN = 65536, + RTW_DBG_STATE = 131072, + RTW_DBG_SDIO = 262144, + RTW_DBG_USB = 524288, + RTW_DBG_UNEXP = 2147483648, + RTW_DBG_ALL = 4294967295, +}; + +struct rtw_txpwr_lmt_cfg_pair { + u8 regd; + u8 band; + u8 bw; + u8 rs; + u8 ch; + s8 txpwr_lmt; +}; + +struct rtw_phy_pg_cfg_pair { + u32 band; + u32 rf_path; + u32 tx_num; + u32 addr; + u32 bitmask; + u32 data; +}; + +struct rtw_power_params { + u8 pwr_base; + s8 pwr_offset; + s8 pwr_limit; + s8 pwr_remnant; + s8 pwr_sar; +}; + +enum rtw_phy_cck_pd_lv { + CCK_PD_LV0 = 0, + CCK_PD_LV1 = 1, + CCK_PD_LV2 = 2, + CCK_PD_LV3 = 3, + CCK_PD_LV4 = 4, + CCK_PD_LV_MAX = 5, +}; + +struct rtw_sar_arg { + u8 sar_band; + u8 path; + u8 rs; +}; + +struct phy_cfg_pair { + u32 addr; + u32 data; +}; + +union phy_table_tile { + struct { + struct rtw_phy_cond cond; + struct rtw_phy_cond2 cond2; + }; + struct phy_cfg_pair cfg; +}; + +enum rtw_phy_band_type { + PHY_BAND_2G = 0, + PHY_BAND_5G = 1, +}; + +struct rtw_phy_stat_iter_data { + struct rtw_dev *rtwdev; + u8 min_rssi; +}; + +struct rtw_rx_addr_match_data { + struct rtw_dev *rtwdev; + struct ieee80211_hdr *hdr; + struct rtw_rx_pkt_stat *pkt_stat; + u8 *bssid; +}; + +struct usb_device_id { + __u16 match_flags; + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice_lo; + __u16 bcdDevice_hi; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 bInterfaceNumber; + kernel_ulong_t driver_info; +}; + +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +}; + +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__((packed)); + +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +}; + +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__((packed)); + +struct usb_ssp_isoc_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wReseved; + __le32 dwBytesPerInterval; +}; + +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +}; + +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +}; + +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +struct usb_ext_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +} __attribute__((packed)); + +struct usb_ss_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; + __le16 wSpeedSupported; + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +}; + +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; +}; + +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; + __le16 wFunctionalitySupport; + __le16 wReserved; + union { + __le32 legacy_padding; + struct { + struct {} __empty_bmSublinkSpeedAttr; + __le32 bmSublinkSpeedAttr[0]; + }; + }; +}; + +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, + USB_SPEED_LOW = 1, + USB_SPEED_FULL = 2, + USB_SPEED_HIGH = 3, + USB_SPEED_WIRELESS = 4, + USB_SPEED_SUPER = 5, + USB_SPEED_SUPER_PLUS = 6, +}; + +enum usb_device_state { + USB_STATE_NOTATTACHED = 0, + USB_STATE_ATTACHED = 1, + USB_STATE_POWERED = 2, + USB_STATE_RECONNECTING = 3, + USB_STATE_UNAUTHENTICATED = 4, + USB_STATE_DEFAULT = 5, + USB_STATE_ADDRESS = 6, + USB_STATE_CONFIGURED = 7, + USB_STATE_SUSPENDED = 8, +}; + +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1 = 1, + USB_SSP_GEN_1x2 = 2, + USB_SSP_GEN_2x2 = 3, +}; + +struct ep_device; + +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; + long: 0; + struct list_head urb_list; + void *hcpriv; + struct ep_device *ep_dev; + unsigned char *extra; + int extralen; + int enabled; + int streams; +} __attribute__((packed)); + +struct usb_host_interface { + struct usb_interface_descriptor desc; + int extralen; + unsigned char *extra; + struct usb_host_endpoint *endpoint; + char *string; +}; + +enum usb_interface_condition { + USB_INTERFACE_UNBOUND = 0, + USB_INTERFACE_BINDING = 1, + USB_INTERFACE_BOUND = 2, + USB_INTERFACE_UNBINDING = 3, +}; + +enum usb_wireless_status { + USB_WIRELESS_STATUS_NA = 0, + USB_WIRELESS_STATUS_DISCONNECTED = 1, + USB_WIRELESS_STATUS_CONNECTED = 2, +}; + +struct usb_interface { + struct usb_host_interface *altsetting; + struct usb_host_interface *cur_altsetting; + unsigned int num_altsetting; + struct usb_interface_assoc_descriptor *intf_assoc; + int minor; + enum usb_interface_condition condition; + unsigned int sysfs_files_created: 1; + unsigned int ep_devs_created: 1; + unsigned int unregistering: 1; + unsigned int needs_remote_wakeup: 1; + unsigned int needs_altsetting0: 1; + unsigned int needs_binding: 1; + unsigned int resetting_device: 1; + unsigned int authorized: 1; + enum usb_wireless_status wireless_status; + struct work_struct wireless_status_work; + struct device dev; + struct device *usb_dev; + struct work_struct reset_ws; + long: 32; +}; + +struct usb_interface_cache { + unsigned int num_altsetting; + struct kref ref; + struct usb_host_interface altsetting[0]; +}; + +struct usb_host_config { + struct usb_config_descriptor desc; + char *string; + struct usb_interface_assoc_descriptor *intf_assoc[16]; + struct usb_interface *interface[32]; + struct usb_interface_cache *intf_cache[32]; + unsigned char *extra; + int extralen; +}; + +struct usb_host_bos { + struct usb_bos_descriptor *desc; + struct usb_ext_cap_descriptor *ext_cap; + struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; +}; + +struct usb_device; + +struct usb_bus { + struct device *controller; + struct device *sysdev; + int busnum; + const char *bus_name; + u8 uses_pio_for_control; + u8 otg_port; + unsigned int is_b_host: 1; + unsigned int b_hnp_enable: 1; + unsigned int no_stop_on_short: 1; + unsigned int no_sg_constraint: 1; + unsigned int sg_tablesize; + int devnum_next; + struct mutex devnum_next_mutex; + long unsigned int devmap[4]; + struct usb_device *root_hub; + struct usb_bus *hs_companion; + int bandwidth_allocated; + int bandwidth_int_reqs; + int bandwidth_isoc_reqs; + unsigned int resuming_ports; +}; + +enum usb_link_tunnel_mode { + USB_LINK_UNKNOWN = 0, + USB_LINK_NATIVE = 1, + USB_LINK_TUNNELED = 2, +}; + +struct usb2_lpm_parameters { + unsigned int besl; + int timeout; +}; + +struct usb3_lpm_parameters { + unsigned int mel; + unsigned int pel; + unsigned int sel; + int timeout; +}; + +struct usb_tt; + +struct usb_device { + int devnum; + char devpath[16]; + u32 route; + enum usb_device_state state; + enum usb_device_speed speed; + unsigned int rx_lanes; + unsigned int tx_lanes; + enum usb_ssp_rate ssp_rate; + struct usb_tt *tt; + int ttport; + unsigned int toggle[2]; + struct usb_device *parent; + struct usb_bus *bus; + struct usb_host_endpoint ep0; + long: 32; + struct device dev; + struct usb_device_descriptor descriptor; + struct usb_host_bos *bos; + struct usb_host_config *config; + struct usb_host_config *actconfig; + struct usb_host_endpoint *ep_in[16]; + struct usb_host_endpoint *ep_out[16]; + char **rawdescriptors; + short unsigned int bus_mA; + u8 portnum; + u8 level; + u8 devaddr; + unsigned int can_submit: 1; + unsigned int persist_enabled: 1; + unsigned int reset_in_progress: 1; + unsigned int have_langid: 1; + unsigned int authorized: 1; + unsigned int authenticated: 1; + unsigned int lpm_capable: 1; + unsigned int lpm_devinit_allow: 1; + unsigned int usb2_hw_lpm_capable: 1; + unsigned int usb2_hw_lpm_besl_capable: 1; + unsigned int usb2_hw_lpm_enabled: 1; + unsigned int usb2_hw_lpm_allowed: 1; + unsigned int usb3_lpm_u1_enabled: 1; + unsigned int usb3_lpm_u2_enabled: 1; + int string_langid; + char *product; + char *manufacturer; + char *serial; + struct list_head filelist; + int maxchild; + u32 quirks; + atomic_t urbnum; + long unsigned int active_duration; + long unsigned int connect_time; + unsigned int do_remote_wakeup: 1; + unsigned int reset_resume: 1; + unsigned int port_is_suspended: 1; + enum usb_link_tunnel_mode tunnel_mode; + int slot_id; + struct usb2_lpm_parameters l1_params; + struct usb3_lpm_parameters u1_params; + struct usb3_lpm_parameters u2_params; + unsigned int lpm_disable_count; + u16 hub_delay; + unsigned int use_generic_driver: 1; + long: 32; +}; + +struct usb_tt { + struct usb_device *hub; + int multi; + unsigned int think_time; + void *hcpriv; + spinlock_t lock; + struct list_head clear_list; + struct work_struct clear_work; +}; + +struct usb_device_driver { + const char *name; + bool (*match)(struct usb_device *); + int (*probe)(struct usb_device *); + void (*disconnect)(struct usb_device *); + int (*suspend)(struct usb_device *, pm_message_t); + int (*resume)(struct usb_device *, pm_message_t); + int (*choose_configuration)(struct usb_device *); + const struct attribute_group **dev_groups; + struct device_driver driver; + const struct usb_device_id *id_table; + unsigned int supports_autosuspend: 1; + unsigned int generic_subclass: 1; +}; + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +enum ucount_type { + UCOUNT_USER_NAMESPACES = 0, + UCOUNT_PID_NAMESPACES = 1, + UCOUNT_UTS_NAMESPACES = 2, + UCOUNT_IPC_NAMESPACES = 3, + UCOUNT_NET_NAMESPACES = 4, + UCOUNT_MNT_NAMESPACES = 5, + UCOUNT_CGROUP_NAMESPACES = 6, + UCOUNT_TIME_NAMESPACES = 7, + UCOUNT_INOTIFY_INSTANCES = 8, + UCOUNT_INOTIFY_WATCHES = 9, + UCOUNT_COUNTS = 10, +}; + +enum rlimit_type { + UCOUNT_RLIMIT_NPROC = 0, + UCOUNT_RLIMIT_MSGQUEUE = 1, + UCOUNT_RLIMIT_SIGPENDING = 2, + UCOUNT_RLIMIT_MEMLOCK = 3, + UCOUNT_RLIMIT_COUNTS = 4, +}; + +enum usb_phy_interface { + USBPHY_INTERFACE_MODE_UNKNOWN = 0, + USBPHY_INTERFACE_MODE_UTMI = 1, + USBPHY_INTERFACE_MODE_UTMIW = 2, + USBPHY_INTERFACE_MODE_ULPI = 3, + USBPHY_INTERFACE_MODE_SERIAL = 4, + USBPHY_INTERFACE_MODE_HSIC = 5, +}; + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +struct input_keymap_entry { + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +struct ff_replay { + __u16 length; + __u16 delay; +}; + +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + __s16 right_coeff; + __s16 left_coeff; + __u16 deadband; + __s16 center; +}; + +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + __s16 *custom_data; +}; + +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_device_id { + kernel_ulong_t flags; + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + kernel_ulong_t evbit[1]; + kernel_ulong_t keybit[24]; + kernel_ulong_t relbit[1]; + kernel_ulong_t absbit[2]; + kernel_ulong_t mscbit[1]; + kernel_ulong_t ledbit[1]; + kernel_ulong_t sndbit[1]; + kernel_ulong_t ffbit[4]; + kernel_ulong_t swbit[1]; + kernel_ulong_t propbit[1]; + kernel_ulong_t driver_info; +}; + +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO = 1, + INPUT_CLK_BOOT = 2, + INPUT_CLK_MAX = 3, +}; + +struct ff_device; + +struct input_dev_poller; + +struct input_mt; + +struct input_handle; + +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + long unsigned int propbit[1]; + long unsigned int evbit[1]; + long unsigned int keybit[24]; + long unsigned int relbit[1]; + long unsigned int absbit[2]; + long unsigned int mscbit[1]; + long unsigned int ledbit[1]; + long unsigned int sndbit[1]; + long unsigned int ffbit[4]; + long unsigned int swbit[1]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); + int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); + struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; + struct timer_list timer; + int rep[2]; + struct input_mt *mt; + struct input_absinfo *absinfo; + long unsigned int key[24]; + long unsigned int led[1]; + long unsigned int snd[1]; + long unsigned int sw[1]; + int (*open)(struct input_dev *); + void (*close)(struct input_dev *); + int (*flush)(struct input_dev *, struct file *); + int (*event)(struct input_dev *, unsigned int, unsigned int, int); + struct input_handle *grab; + spinlock_t event_lock; + struct mutex mutex; + unsigned int users; + bool going_away; + struct device dev; + struct list_head h_list; + struct list_head node; + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + bool devres_managed; + ktime_t timestamp[3]; + bool inhibited; + long: 32; +}; + +struct ff_device { + int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); + int (*erase)(struct input_dev *, int); + int (*playback)(struct input_dev *, int, int); + void (*set_gain)(struct input_dev *, u16); + void (*set_autocenter)(struct input_dev *, u16); + void (*destroy)(struct ff_device *); + void *private; + long unsigned int ffbit[4]; + struct mutex mutex; + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[0]; +}; + +struct input_dev_poller { + void (*poll)(struct input_dev *); + unsigned int poll_interval; + unsigned int poll_interval_max; + unsigned int poll_interval_min; + struct input_dev *input; + struct delayed_work work; +}; + +struct input_handler; + +struct input_handle { + void *private; + int open; + const char *name; + struct input_dev *dev; + struct input_handler *handler; + unsigned int (*handle_events)(struct input_handle *, struct input_value *, unsigned int); + struct list_head d_node; + struct list_head h_node; +}; + +struct input_handler { + void *private; + void (*event)(struct input_handle *, unsigned int, unsigned int, int); + unsigned int (*events)(struct input_handle *, struct input_value *, unsigned int); + bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); + bool (*match)(struct input_handler *, struct input_dev *); + int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); + void (*disconnect)(struct input_handle *); + void (*start)(struct input_handle *); + bool passive_observer; + bool legacy_minors; + int minor; + const char *name; + const struct input_device_id *id_table; + struct list_head h_list; + struct list_head node; +}; + +enum rc_proto { + RC_PROTO_UNKNOWN = 0, + RC_PROTO_OTHER = 1, + RC_PROTO_RC5 = 2, + RC_PROTO_RC5X_20 = 3, + RC_PROTO_RC5_SZ = 4, + RC_PROTO_JVC = 5, + RC_PROTO_SONY12 = 6, + RC_PROTO_SONY15 = 7, + RC_PROTO_SONY20 = 8, + RC_PROTO_NEC = 9, + RC_PROTO_NECX = 10, + RC_PROTO_NEC32 = 11, + RC_PROTO_SANYO = 12, + RC_PROTO_MCIR2_KBD = 13, + RC_PROTO_MCIR2_MSE = 14, + RC_PROTO_RC6_0 = 15, + RC_PROTO_RC6_6A_20 = 16, + RC_PROTO_RC6_6A_24 = 17, + RC_PROTO_RC6_6A_32 = 18, + RC_PROTO_RC6_MCE = 19, + RC_PROTO_SHARP = 20, + RC_PROTO_XMP = 21, + RC_PROTO_CEC = 22, + RC_PROTO_IMON = 23, + RC_PROTO_RCMM12 = 24, + RC_PROTO_RCMM24 = 25, + RC_PROTO_RCMM32 = 26, + RC_PROTO_XBOX_DVD = 27, + RC_PROTO_MAX = 27, +}; + +struct rc_map_table { + u64 scancode; + u32 keycode; + long: 32; +}; + +struct rc_map { + struct rc_map_table *scan; + unsigned int size; + unsigned int len; + unsigned int alloc; + enum rc_proto rc_proto; + const char *name; + spinlock_t lock; +}; + +struct rc_map_list { + struct list_head list; + struct rc_map map; +}; + +enum rc_driver_type { + RC_DRIVER_SCANCODE = 0, + RC_DRIVER_IR_RAW = 1, + RC_DRIVER_IR_RAW_TX = 2, +}; + +struct rc_scancode_filter { + u32 data; + u32 mask; +}; + +struct ir_raw_event_ctrl; + +struct rc_dev { + struct device dev; + bool managed_alloc; + const struct attribute_group *sysfs_groups[5]; + const char *device_name; + const char *input_phys; + struct input_id input_id; + const char *driver_name; + const char *map_name; + struct rc_map rc_map; + struct mutex lock; + unsigned int minor; + struct ir_raw_event_ctrl *raw; + struct input_dev *input_dev; + enum rc_driver_type driver_type; + bool idle; + bool encode_wakeup; + long: 32; + u64 allowed_protocols; + u64 enabled_protocols; + u64 allowed_wakeup_protocols; + enum rc_proto wakeup_protocol; + struct rc_scancode_filter scancode_filter; + struct rc_scancode_filter scancode_wakeup_filter; + u32 scancode_mask; + u32 users; + void *priv; + spinlock_t keylock; + bool keypressed; + long unsigned int keyup_jiffies; + struct timer_list timer_keyup; + struct timer_list timer_repeat; + u32 last_keycode; + enum rc_proto last_protocol; + long: 32; + u64 last_scancode; + u8 last_toggle; + u32 timeout; + u32 min_timeout; + u32 max_timeout; + u32 rx_resolution; + bool registered; + int (*change_protocol)(struct rc_dev *, u64 *); + int (*open)(struct rc_dev *); + void (*close)(struct rc_dev *); + int (*s_tx_mask)(struct rc_dev *, u32); + int (*s_tx_carrier)(struct rc_dev *, u32); + int (*s_tx_duty_cycle)(struct rc_dev *, u32); + int (*s_rx_carrier_range)(struct rc_dev *, u32, u32); + int (*tx_ir)(struct rc_dev *, unsigned int *, unsigned int); + void (*s_idle)(struct rc_dev *, bool); + int (*s_wideband_receiver)(struct rc_dev *, int); + int (*s_carrier_report)(struct rc_dev *, int); + int (*s_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_wakeup_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_timeout)(struct rc_dev *, unsigned int); +}; + +struct ir_raw_event { + union { + u32 duration; + u32 carrier; + }; + u8 duty_cycle; + unsigned int pulse: 1; + unsigned int overflow: 1; + unsigned int timeout: 1; + unsigned int carrier_report: 1; +}; + +struct nec_dec { + int state; + unsigned int count; + u32 bits; + bool is_nec_x; + bool necx_repeat; +}; + +struct rc5_dec { + int state; + u32 bits; + unsigned int count; + bool is_rc5x; +}; + +struct rc6_dec { + int state; + u8 header; + u32 body; + bool toggle; + unsigned int count; + unsigned int wanted_bits; +}; + +struct sony_dec { + int state; + u32 bits; + unsigned int count; +}; + +struct jvc_dec { + int state; + u16 bits; + u16 old_bits; + unsigned int count; + bool first; + bool toggle; +}; + +struct sanyo_dec { + int state; + unsigned int count; + u64 bits; +}; + +struct sharp_dec { + int state; + unsigned int count; + u32 bits; + unsigned int pulse_len; +}; + +struct mce_kbd_dec { + spinlock_t keylock; + struct timer_list rx_timeout; + int state; + u8 header; + u32 body; + unsigned int count; + unsigned int wanted_bits; +}; + +struct xmp_dec { + int state; + unsigned int count; + u32 durations[16]; +}; + +struct ir_raw_event_ctrl { + struct list_head list; + struct task_struct *thread; + struct { + union { + struct __kfifo kfifo; + struct ir_raw_event *type; + const struct ir_raw_event *const_type; + char (*rectype)[0]; + struct ir_raw_event *ptr; + const struct ir_raw_event *ptr_const; + }; + struct ir_raw_event buf[512]; + } kfifo; + ktime_t last_event; + struct rc_dev *dev; + spinlock_t edge_spinlock; + struct timer_list edge_handle; + struct ir_raw_event prev_ev; + struct ir_raw_event this_ev; + struct nec_dec nec; + struct rc5_dec rc5; + struct rc6_dec rc6; + struct sony_dec sony; + struct jvc_dec jvc; + struct sanyo_dec sanyo; + struct sharp_dec sharp; + struct mce_kbd_dec mce_kbd; + struct xmp_dec xmp; + long: 32; +}; + +struct ir_raw_handler { + struct list_head list; + u64 protocols; + int (*decode)(struct rc_dev *, struct ir_raw_event); + int (*encode)(enum rc_proto, u32, struct ir_raw_event *, unsigned int); + u32 carrier; + u32 min_timeout; + int (*raw_register)(struct rc_dev *); + int (*raw_unregister)(struct rc_dev *); +}; + +struct ir_raw_timings_manchester { + unsigned int leader_pulse; + unsigned int leader_space; + unsigned int clock; + unsigned int invert: 1; + unsigned int trailer_space; +}; + +enum rc5_state { + STATE_INACTIVE = 0, + STATE_BIT_START = 1, + STATE_BIT_END = 2, + STATE_CHECK_RC5X = 3, + STATE_FINISHED = 4, +}; + +struct wait_bit_key { + long unsigned int *flags; + int bit_nr; + long unsigned int timeout; +}; + +typedef int wait_bit_action_f(struct wait_bit_key *, int); + +typedef struct {} local_lock_t; + +enum { + BIO_PAGE_PINNED = 0, + BIO_CLONED = 1, + BIO_BOUNCED = 2, + BIO_QUIET = 3, + BIO_CHAIN = 4, + BIO_REFFED = 5, + BIO_BPS_THROTTLED = 6, + BIO_TRACE_COMPLETION = 7, + BIO_CGROUP_ACCT = 8, + BIO_QOS_THROTTLED = 9, + BIO_QOS_MERGED = 10, + BIO_REMAPPED = 11, + BIO_ZONE_WRITE_PLUGGING = 12, + BIO_EMULATES_ZONE_APPEND = 13, + BIO_FLAG_LAST = 14, +}; + +struct badblocks { + struct device *dev; + int count; + int unacked_exist; + int shift; + u64 *page; + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; +}; + +struct blk_plug_cb; + +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); + +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; + +typedef s32 dma_cookie_t; + +enum dma_status { + DMA_COMPLETE = 0, + DMA_IN_PROGRESS = 1, + DMA_PAUSED = 2, + DMA_ERROR = 3, + DMA_OUT_OF_ORDER = 4, +}; + +enum dma_transfer_direction { + DMA_MEM_TO_MEM = 0, + DMA_MEM_TO_DEV = 1, + DMA_DEV_TO_MEM = 2, + DMA_DEV_TO_DEV = 3, + DMA_TRANS_NONE = 4, +}; + +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +struct dma_vec { + dma_addr_t addr; + size_t len; +}; + +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = 1, + DMA_CTRL_ACK = 2, + DMA_PREP_PQ_DISABLE_P = 4, + DMA_PREP_PQ_DISABLE_Q = 8, + DMA_PREP_CONTINUE = 16, + DMA_PREP_FENCE = 32, + DMA_CTRL_REUSE = 64, + DMA_PREP_CMD = 128, + DMA_PREP_REPEAT = 256, + DMA_PREP_LOAD_EOT = 512, +}; + +enum sum_check_flags { + SUM_CHECK_P_RESULT = 1, + SUM_CHECK_Q_RESULT = 2, +}; + +typedef struct { + long unsigned int bits[1]; +} dma_cap_mask_t; + +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = 1, + DESC_METADATA_ENGINE = 2, +}; + +struct dma_chan_percpu { + long unsigned int memcpy_count; + long unsigned int bytes_transferred; +}; + +struct dma_router { + struct device *dev; + void (*route_free)(struct device *, void *); +}; + +struct dma_device; + +struct dma_chan_dev; + +struct dma_chan { + struct dma_device *device; + struct device *slave; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + int chan_id; + struct dma_chan_dev *dev; + const char *name; + char *dbg_client_name; + struct list_head device_node; + struct dma_chan_percpu *local; + int client_count; + int table_count; + struct dma_router *router; + void *route_data; + void *private; +}; + +typedef bool (*dma_filter_fn)(struct dma_chan *, void *); + +struct dma_slave_map; + +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, +}; + +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +struct dma_async_tx_descriptor; + +struct dma_slave_caps; + +struct dma_slave_config; + +struct dma_tx_state; + +struct dma_device { + struct kref ref; + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; + short unsigned int max_xor; + short unsigned int max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + int dev_id; + struct device *dev; + struct module *owner; + struct ida chan_ida; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan *); + int (*device_router_config)(struct dma_chan *); + void (*device_free_chan_resources)(struct dma_chan *); + struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t, dma_addr_t, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t, int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_peripheral_dma_vec)(struct dma_chan *, const struct dma_vec *, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); + struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t, u64, long unsigned int); + void (*device_caps)(struct dma_chan *, struct dma_slave_caps *); + int (*device_config)(struct dma_chan *, struct dma_slave_config *); + int (*device_pause)(struct dma_chan *); + int (*device_resume)(struct dma_chan *); + int (*device_terminate_all)(struct dma_chan *); + void (*device_synchronize)(struct dma_chan *); + enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t, struct dma_tx_state *); + void (*device_issue_pending)(struct dma_chan *); + void (*device_release)(struct dma_device *); + void (*dbg_summary_show)(struct seq_file *, struct dma_device *); + struct dentry *dbg_dev_root; +}; + +struct dma_chan_dev { + struct dma_chan *chan; + long: 32; + struct device device; + int dev_id; + bool chan_dma_dev; +}; + +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, + DMA_SLAVE_BUSWIDTH_128_BYTES = 128, +}; + +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + void *peripheral_config; + size_t peripheral_size; +}; + +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +typedef void (*dma_async_tx_callback)(void *); + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, + DMA_TRANS_READ_FAILED = 1, + DMA_TRANS_WRITE_FAILED = 2, + DMA_TRANS_ABORTED = 3, +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); + +struct dmaengine_unmap_data { + u8 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); + void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); + int (*set_len)(struct dma_async_tx_descriptor *, size_t); +}; + +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; + dma_addr_t phys; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); + int (*desc_free)(struct dma_async_tx_descriptor *); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; +}; + +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; + u32 in_flight_bytes; +}; + +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = 1, + ASYNC_TX_XOR_DROP_DST = 2, + ASYNC_TX_ACK = 4, + ASYNC_TX_FENCE = 8, + ASYNC_TX_PQ_XOR_DST = 16, +}; + +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + +typedef union { + long unsigned int addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_MCE_DEAD = 8, + CPUHP_VIRT_NET_DEAD = 9, + CPUHP_IBMVNIC_DEAD = 10, + CPUHP_SLUB_DEAD = 11, + CPUHP_DEBUG_OBJ_DEAD = 12, + CPUHP_MM_WRITEBACK_DEAD = 13, + CPUHP_MM_VMSTAT_DEAD = 14, + CPUHP_SOFTIRQ_DEAD = 15, + CPUHP_NET_MVNETA_DEAD = 16, + CPUHP_CPUIDLE_DEAD = 17, + CPUHP_ARM64_FPSIMD_DEAD = 18, + CPUHP_ARM_OMAP_WAKE_DEAD = 19, + CPUHP_IRQ_POLL_DEAD = 20, + CPUHP_BLOCK_SOFTIRQ_DEAD = 21, + CPUHP_BIO_DEAD = 22, + CPUHP_ACPI_CPUDRV_DEAD = 23, + CPUHP_S390_PFAULT_DEAD = 24, + CPUHP_BLK_MQ_DEAD = 25, + CPUHP_FS_BUFF_DEAD = 26, + CPUHP_PRINTK_DEAD = 27, + CPUHP_MM_MEMCQ_DEAD = 28, + CPUHP_PERCPU_CNT_DEAD = 29, + CPUHP_RADIX_DEAD = 30, + CPUHP_PAGE_ALLOC = 31, + CPUHP_NET_DEV_DEAD = 32, + CPUHP_PCI_XGENE_DEAD = 33, + CPUHP_IOMMU_IOVA_DEAD = 34, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, + CPUHP_PADATA_DEAD = 36, + CPUHP_AP_DTPM_CPU_DEAD = 37, + CPUHP_RANDOM_PREPARE = 38, + CPUHP_WORKQUEUE_PREP = 39, + CPUHP_POWER_NUMA_PREPARE = 40, + CPUHP_HRTIMERS_PREPARE = 41, + CPUHP_X2APIC_PREPARE = 42, + CPUHP_SMPCFD_PREPARE = 43, + CPUHP_RELAY_PREPARE = 44, + CPUHP_MD_RAID5_PREPARE = 45, + CPUHP_RCUTREE_PREP = 46, + CPUHP_CPUIDLE_COUPLED_PREPARE = 47, + CPUHP_POWERPC_PMAC_PREPARE = 48, + CPUHP_POWERPC_MMU_CTX_PREPARE = 49, + CPUHP_XEN_PREPARE = 50, + CPUHP_XEN_EVTCHN_PREPARE = 51, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, + CPUHP_SH_SH3X_PREPARE = 53, + CPUHP_TOPOLOGY_PREPARE = 54, + CPUHP_NET_IUCV_PREPARE = 55, + CPUHP_ARM_BL_PREPARE = 56, + CPUHP_TRACE_RB_PREPARE = 57, + CPUHP_MM_ZS_PREPARE = 58, + CPUHP_MM_ZSWP_POOL_PREPARE = 59, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 60, + CPUHP_ZCOMP_PREPARE = 61, + CPUHP_TIMERS_PREPARE = 62, + CPUHP_TMIGR_PREPARE = 63, + CPUHP_MIPS_SOC_PREPARE = 64, + CPUHP_BP_PREPARE_DYN = 65, + CPUHP_BP_PREPARE_DYN_END = 85, + CPUHP_BP_KICK_AP = 86, + CPUHP_BRINGUP_CPU = 87, + CPUHP_AP_IDLE_DEAD = 88, + CPUHP_AP_OFFLINE = 89, + CPUHP_AP_CACHECTRL_STARTING = 90, + CPUHP_AP_SCHED_STARTING = 91, + CPUHP_AP_RCUTREE_DYING = 92, + CPUHP_AP_CPU_PM_STARTING = 93, + CPUHP_AP_IRQ_GIC_STARTING = 94, + CPUHP_AP_IRQ_HIP04_STARTING = 95, + CPUHP_AP_IRQ_APPLE_AIC_STARTING = 96, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 97, + CPUHP_AP_IRQ_BCM2836_STARTING = 98, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 99, + CPUHP_AP_IRQ_EIOINTC_STARTING = 100, + CPUHP_AP_IRQ_AVECINTC_STARTING = 101, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 102, + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING = 103, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING = 104, + CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING = 105, + CPUHP_AP_ARM_MVEBU_COHERENCY = 106, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 107, + CPUHP_AP_PERF_X86_STARTING = 108, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 109, + CPUHP_AP_PERF_XTENSA_STARTING = 110, + CPUHP_AP_ARM_VFP_STARTING = 111, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 112, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 113, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 114, + CPUHP_AP_PERF_ARM_STARTING = 115, + CPUHP_AP_PERF_RISCV_STARTING = 116, + CPUHP_AP_ARM_L2X0_STARTING = 117, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 118, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 119, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING = 120, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 121, + CPUHP_AP_JCORE_TIMER_STARTING = 122, + CPUHP_AP_ARM_TWD_STARTING = 123, + CPUHP_AP_QCOM_TIMER_STARTING = 124, + CPUHP_AP_TEGRA_TIMER_STARTING = 125, + CPUHP_AP_ARMADA_TIMER_STARTING = 126, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 127, + CPUHP_AP_ARC_TIMER_STARTING = 128, + CPUHP_AP_REALTEK_TIMER_STARTING = 129, + CPUHP_AP_RISCV_TIMER_STARTING = 130, + CPUHP_AP_CLINT_TIMER_STARTING = 131, + CPUHP_AP_CSKY_TIMER_STARTING = 132, + CPUHP_AP_TI_GP_TIMER_STARTING = 133, + CPUHP_AP_HYPERV_TIMER_STARTING = 134, + CPUHP_AP_DUMMY_TIMER_STARTING = 135, + CPUHP_AP_ARM_XEN_STARTING = 136, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 137, + CPUHP_AP_ARM_CORESIGHT_STARTING = 138, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 139, + CPUHP_AP_ARM64_ISNDEP_STARTING = 140, + CPUHP_AP_SMPCFD_DYING = 141, + CPUHP_AP_HRTIMERS_DYING = 142, + CPUHP_AP_TICK_DYING = 143, + CPUHP_AP_X86_TBOOT_DYING = 144, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 145, + CPUHP_AP_ONLINE = 146, + CPUHP_TEARDOWN_CPU = 147, + CPUHP_AP_ONLINE_IDLE = 148, + CPUHP_AP_HYPERV_ONLINE = 149, + CPUHP_AP_KVM_ONLINE = 150, + CPUHP_AP_SCHED_WAIT_EMPTY = 151, + CPUHP_AP_SMPBOOT_THREADS = 152, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 153, + CPUHP_AP_BLK_MQ_ONLINE = 154, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 155, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 156, + CPUHP_AP_PERF_ONLINE = 157, + CPUHP_AP_PERF_X86_ONLINE = 158, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 159, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 160, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 161, + CPUHP_AP_PERF_S390_CF_ONLINE = 162, + CPUHP_AP_PERF_S390_SF_ONLINE = 163, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 164, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 165, + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 166, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 167, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 168, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 169, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 170, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 171, + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 172, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 173, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 174, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 175, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 176, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 177, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 178, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 179, + CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE = 180, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 181, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 182, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 183, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 184, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 185, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 186, + CPUHP_AP_PERF_CSKY_ONLINE = 187, + CPUHP_AP_TMIGR_ONLINE = 188, + CPUHP_AP_WATCHDOG_ONLINE = 189, + CPUHP_AP_WORKQUEUE_ONLINE = 190, + CPUHP_AP_RANDOM_ONLINE = 191, + CPUHP_AP_RCUTREE_ONLINE = 192, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 193, + CPUHP_AP_ONLINE_DYN = 194, + CPUHP_AP_ONLINE_DYN_END = 234, + CPUHP_AP_X86_HPET_ONLINE = 235, + CPUHP_AP_X86_KVM_CLK_ONLINE = 236, + CPUHP_AP_ACTIVE = 237, + CPUHP_ONLINE = 238, +}; + +typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); + +enum sync_action { + ACTION_RESYNC = 0, + ACTION_RECOVER = 1, + ACTION_CHECK = 2, + ACTION_REPAIR = 3, + ACTION_RESHAPE = 4, + ACTION_FROZEN = 5, + ACTION_IDLE = 6, + NR_SYNC_ACTIONS = 7, +}; + +struct md_cluster_info; + +struct md_personality; + +struct md_thread; + +struct bitmap_operations; + +struct md_rdev; + +struct mddev { + void *private; + struct md_personality *pers; + dev_t unit; + int md_minor; + struct list_head disks; + long unsigned int flags; + long unsigned int sb_flags; + int suspended; + struct mutex suspend_mutex; + struct percpu_ref active_io; + int ro; + int sysfs_active; + struct gendisk *gendisk; + struct kobject kobj; + int hold_active; + int major_version; + int minor_version; + int patch_version; + int persistent; + int external; + char metadata_type[17]; + int chunk_sectors; + time64_t ctime; + time64_t utime; + int level; + int layout; + char clevel[16]; + int raid_disks; + int max_disks; + sector_t dev_sectors; + sector_t array_sectors; + int external_size; + long: 32; + __u64 events; + int can_decrease_events; + char uuid[16]; + long: 32; + sector_t reshape_position; + int delta_disks; + int new_level; + int new_layout; + int new_chunk_sectors; + int reshape_backwards; + struct md_thread *thread; + struct md_thread *sync_thread; + enum sync_action last_sync_action; + sector_t curr_resync; + sector_t curr_resync_completed; + long unsigned int resync_mark; + long: 32; + sector_t resync_mark_cnt; + sector_t curr_mark_cnt; + sector_t resync_max_sectors; + atomic64_t resync_mismatches; + sector_t suspend_lo; + sector_t suspend_hi; + int sync_speed_min; + int sync_speed_max; + int parallel_resync; + int ok_start_degraded; + long unsigned int recovery; + int recovery_disabled; + int in_sync; + struct mutex open_mutex; + struct mutex reconfig_mutex; + atomic_t active; + atomic_t openers; + int changed; + int degraded; + atomic_t recovery_active; + wait_queue_head_t recovery_wait; + long: 32; + sector_t recovery_cp; + sector_t resync_min; + sector_t resync_max; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_action; + struct kernfs_node *sysfs_completed; + struct kernfs_node *sysfs_degraded; + struct kernfs_node *sysfs_level; + struct work_struct del_work; + struct work_struct sync_work; + spinlock_t lock; + wait_queue_head_t sb_wait; + atomic_t pending_writes; + unsigned int safemode; + unsigned int safemode_delay; + struct timer_list safemode_timer; + struct percpu_ref writes_pending; + int sync_checkers; + void *bitmap; + struct bitmap_operations *bitmap_ops; + struct { + struct file *file; + long: 32; + loff_t offset; + long unsigned int space; + long: 32; + loff_t default_offset; + long unsigned int default_space; + struct mutex mutex; + long unsigned int chunksize; + long unsigned int daemon_sleep; + long unsigned int max_write_behind; + int external; + int nodes; + char cluster_name[64]; + long: 32; + } bitmap_info; + atomic_t max_corr_read_errors; + struct list_head all_mddevs; + const struct attribute_group *to_remove; + struct bio_set bio_set; + struct bio_set sync_set; + struct bio_set io_clone_set; + struct work_struct event_work; + mempool_t *serial_info_pool; + void (*sync_super)(struct mddev *, struct md_rdev *); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; + unsigned int noio_flag; + struct list_head deleting; + atomic_t sync_seq; + bool has_superblocks: 1; + bool fail_last_dev: 1; + bool serialize_policy: 1; + long: 32; +}; + +struct serial_in_rdev; + +struct md_rdev { + struct list_head same_set; + sector_t sectors; + struct mddev *mddev; + int last_events; + struct block_device *meta_bdev; + struct block_device *bdev; + struct file *bdev_file; + struct page *sb_page; + struct page *bb_page; + int sb_loaded; + __u64 sb_events; + sector_t data_offset; + sector_t new_data_offset; + sector_t sb_start; + int sb_size; + int preferred_minor; + struct kobject kobj; + long unsigned int flags; + wait_queue_head_t blocked_wait; + int desc_nr; + int raid_disk; + int new_raid_disk; + int saved_raid_disk; + long: 32; + union { + sector_t recovery_offset; + sector_t journal_tail; + }; + atomic_t nr_pending; + atomic_t read_errors; + time64_t last_read_error; + atomic_t corrected_errors; + struct serial_in_rdev *serial; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_unack_badblocks; + struct kernfs_node *sysfs_badblocks; + long: 32; + struct badblocks badblocks; + struct { + short int offset; + unsigned int size; + sector_t sector; + } ppl; +}; + +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + +enum flag_bits { + Faulty = 0, + In_sync = 1, + Bitmap_sync = 2, + WriteMostly = 3, + AutoDetected = 4, + Blocked = 5, + WriteErrorSeen = 6, + FaultRecorded = 7, + BlockedBadBlocks = 8, + WantReplacement = 9, + Replacement = 10, + Candidate = 11, + Journal = 12, + ClusterRemove = 13, + ExternalBbl = 14, + FailFast = 15, + LastDev = 16, + CollisionCheck = 17, + Nonrot = 18, +}; + +enum mddev_flags { + MD_ARRAY_FIRST_USE = 0, + MD_CLOSING = 1, + MD_JOURNAL_CLEAN = 2, + MD_HAS_JOURNAL = 3, + MD_CLUSTER_RESYNC_LOCKED = 4, + MD_FAILFAST_SUPPORTED = 5, + MD_HAS_PPL = 6, + MD_HAS_MULTIPLE_PPLS = 7, + MD_NOT_READY = 8, + MD_BROKEN = 9, + MD_DELETED = 10, +}; + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS = 0, + MD_SB_CHANGE_CLEAN = 1, + MD_SB_CHANGE_PENDING = 2, + MD_SB_NEED_REWRITE = 3, +}; + +struct md_personality { + char *name; + int level; + struct list_head list; + struct module *owner; + bool (*make_request)(struct mddev *, struct bio *); + int (*run)(struct mddev *); + int (*start)(struct mddev *); + void (*free)(struct mddev *, void *); + void (*status)(struct seq_file *, struct mddev *); + void (*error_handler)(struct mddev *, struct md_rdev *); + int (*hot_add_disk)(struct mddev *, struct md_rdev *); + int (*hot_remove_disk)(struct mddev *, struct md_rdev *); + int (*spare_active)(struct mddev *); + sector_t (*sync_request)(struct mddev *, sector_t, sector_t, int *); + int (*resize)(struct mddev *, sector_t); + sector_t (*size)(struct mddev *, sector_t, int); + int (*check_reshape)(struct mddev *); + int (*start_reshape)(struct mddev *); + void (*finish_reshape)(struct mddev *); + void (*update_reshape_pos)(struct mddev *); + void (*prepare_suspend)(struct mddev *); + void (*quiesce)(struct mddev *, int); + void * (*takeover)(struct mddev *); + int (*change_consistency_policy)(struct mddev *, const char *); +}; + +struct md_thread { + void (*run)(struct md_thread *); + struct mddev *mddev; + wait_queue_head_t wqueue; + long unsigned int flags; + struct task_struct *tsk; + long unsigned int timeout; + void *private; +}; + +struct md_bitmap_stats; + +struct bitmap_operations { + bool (*enabled)(struct mddev *); + int (*create)(struct mddev *, int); + int (*resize)(struct mddev *, sector_t, int, bool); + int (*load)(struct mddev *); + void (*destroy)(struct mddev *); + void (*flush)(struct mddev *); + void (*write_all)(struct mddev *); + void (*dirty_bits)(struct mddev *, long unsigned int, long unsigned int); + void (*unplug)(struct mddev *, bool); + void (*daemon_work)(struct mddev *); + void (*wait_behind_writes)(struct mddev *); + int (*startwrite)(struct mddev *, sector_t, long unsigned int, bool); + void (*endwrite)(struct mddev *, sector_t, long unsigned int, bool, bool); + bool (*start_sync)(struct mddev *, sector_t, sector_t *, bool); + void (*end_sync)(struct mddev *, sector_t, sector_t *); + void (*cond_end_sync)(struct mddev *, sector_t, bool); + void (*close_sync)(struct mddev *); + void (*update_sb)(void *); + int (*get_stats)(void *, struct md_bitmap_stats *); + void (*sync_with_cluster)(struct mddev *, sector_t, sector_t, sector_t, sector_t); + void * (*get_from_slot)(struct mddev *, int); + int (*copy_from_slot)(struct mddev *, int, sector_t *, sector_t *, bool); + void (*set_pages)(void *, long unsigned int); + void (*free)(void *); +}; + +enum recovery_flags { + MD_RECOVERY_NEEDED = 0, + MD_RECOVERY_RUNNING = 1, + MD_RECOVERY_INTR = 2, + MD_RECOVERY_DONE = 3, + MD_RECOVERY_FROZEN = 4, + MD_RECOVERY_WAIT = 5, + MD_RECOVERY_ERROR = 6, + MD_RECOVERY_SYNC = 7, + MD_RECOVERY_REQUESTED = 8, + MD_RECOVERY_CHECK = 9, + MD_RECOVERY_RECOVER = 10, + MD_RECOVERY_RESHAPE = 11, + MD_RESYNCING_REMOTE = 12, +}; + +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct mddev *, char *); + ssize_t (*store)(struct mddev *, const char *, size_t); +}; + +enum check_states { + check_state_idle = 0, + check_state_run = 1, + check_state_run_q = 2, + check_state_run_pq = 3, + check_state_check_result = 4, + check_state_compute_run = 5, + check_state_compute_result = 6, +}; + +enum reconstruct_states { + reconstruct_state_idle = 0, + reconstruct_state_prexor_drain_run = 1, + reconstruct_state_drain_run = 2, + reconstruct_state_run = 3, + reconstruct_state_prexor_drain_result = 4, + reconstruct_state_drain_result = 5, + reconstruct_state_result = 6, +}; + +struct stripe_operations { + int target; + int target2; + enum sum_check_flags zero_sum_result; +}; + +struct r5dev { + struct bio req; + struct bio rreq; + struct bio_vec vec; + struct bio_vec rvec; + struct page *page; + struct page *orig_page; + unsigned int offset; + struct bio *toread; + struct bio *read; + struct bio *towrite; + struct bio *written; + long: 32; + sector_t sector; + long unsigned int flags; + u32 log_checksum; + short unsigned int write_hint; + long: 32; +}; + +struct r5conf; + +struct r5worker_group; + +struct r5l_io_unit; + +struct ppl_io_unit; + +struct stripe_head { + struct hlist_node hash; + struct list_head lru; + struct llist_node release_list; + struct r5conf *raid_conf; + short int generation; + long: 32; + sector_t sector; + short int pd_idx; + short int qd_idx; + short int ddf_layout; + short int hash_lock_index; + long unsigned int state; + atomic_t count; + int bm_seq; + int disks; + int overwrite_disks; + enum check_states check_state; + enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; + int cpu; + struct r5worker_group *group; + struct stripe_head *batch_head; + spinlock_t batch_lock; + struct list_head batch_list; + union { + struct r5l_io_unit *log_io; + struct ppl_io_unit *ppl_io; + }; + struct list_head log_list; + long: 32; + sector_t log_start; + struct list_head r5c; + struct page *ppl_page; + struct stripe_operations ops; + struct r5dev dev[0]; +}; + +struct raid5_percpu; + +struct disk_info; + +struct r5l_log; + +struct r5pending_data; + +struct r5conf { + struct hlist_head *stripe_hashtbl; + spinlock_t hash_locks[8]; + struct mddev *mddev; + int chunk_sectors; + int level; + int algorithm; + int rmw_level; + int max_degraded; + int raid_disks; + int max_nr_stripes; + int min_nr_stripes; + sector_t reshape_progress; + sector_t reshape_safe; + int previous_raid_disks; + int prev_chunk_sectors; + int prev_algo; + short int generation; + seqcount_spinlock_t gen_lock; + long unsigned int reshape_checkpoint; + long long int min_offset_diff; + struct list_head handle_list; + struct list_head loprio_list; + struct list_head hold_list; + struct list_head delayed_list; + struct list_head bitmap_list; + struct bio *retry_read_aligned; + unsigned int retry_read_offset; + struct bio *retry_read_aligned_list; + atomic_t preread_active_stripes; + atomic_t active_aligned_reads; + atomic_t pending_full_writes; + int bypass_count; + int bypass_threshold; + int skip_copy; + struct list_head *last_hold; + atomic_t reshape_stripes; + int active_name; + char cache_name[96]; + struct kmem_cache *slab_cache; + struct mutex cache_size_mutex; + int seq_flush; + int seq_write; + int quiesce; + int fullsync; + int recovery_disabled; + struct raid5_percpu *percpu; + int scribble_disks; + int scribble_sectors; + struct hlist_node node; + atomic_t active_stripes; + struct list_head inactive_list[8]; + atomic_t r5c_cached_full_stripes; + struct list_head r5c_full_stripe_list; + atomic_t r5c_cached_partial_stripes; + struct list_head r5c_partial_stripe_list; + atomic_t r5c_flushing_full_stripes; + atomic_t r5c_flushing_partial_stripes; + atomic_t empty_inactive_list_nr; + struct llist_head released_stripes; + wait_queue_head_t wait_for_quiescent; + wait_queue_head_t wait_for_stripe; + wait_queue_head_t wait_for_reshape; + long unsigned int cache_state; + struct shrinker *shrinker; + int pool_size; + spinlock_t device_lock; + struct disk_info *disks; + struct bio_set bio_split; + struct md_thread *thread; + struct list_head temp_inactive_list[8]; + struct r5worker_group *worker_groups; + int group_cnt; + int worker_cnt_per_group; + struct r5l_log *log; + void *log_private; + spinlock_t pending_bios_lock; + bool batch_bio_dispatch; + struct r5pending_data *pending_data; + struct list_head free_list; + struct list_head pending_list; + int pending_data_cnt; + struct r5pending_data *next_pending_data; +}; + +struct r5worker; + +struct r5worker_group { + struct list_head handle_list; + struct list_head loprio_list; + struct r5conf *conf; + struct r5worker *workers; + int stripes_cnt; +}; + +struct stripe_head_state { + int syncing; + int expanding; + int expanded; + int replacing; + int locked; + int uptodate; + int to_read; + int to_write; + int failed; + int written; + int to_fill; + int compute; + int req_compute; + int non_overwrite; + int injournal; + int just_cached; + int failed_num[2]; + int p_failed; + int q_failed; + int dec_preread_active; + long unsigned int ops_request; + struct md_rdev *blocked_rdev; + int handle_bad_blocks; + int log_failed; + int waiting_extra_page; +}; + +enum r5dev_flags { + R5_UPTODATE = 0, + R5_LOCKED = 1, + R5_DOUBLE_LOCKED = 2, + R5_OVERWRITE = 3, + R5_Insync = 4, + R5_Wantread = 5, + R5_Wantwrite = 6, + R5_Overlap = 7, + R5_ReadNoMerge = 8, + R5_ReadError = 9, + R5_ReWrite = 10, + R5_Expanded = 11, + R5_Wantcompute = 12, + R5_Wantfill = 13, + R5_Wantdrain = 14, + R5_WantFUA = 15, + R5_SyncIO = 16, + R5_WriteError = 17, + R5_MadeGood = 18, + R5_ReadRepl = 19, + R5_MadeGoodRepl = 20, + R5_NeedReplace = 21, + R5_WantReplace = 22, + R5_Discard = 23, + R5_SkipCopy = 24, + R5_InJournal = 25, + R5_OrigPageUPTDODATE = 26, +}; + +enum { + STRIPE_ACTIVE = 0, + STRIPE_HANDLE = 1, + STRIPE_SYNC_REQUESTED = 2, + STRIPE_SYNCING = 3, + STRIPE_INSYNC = 4, + STRIPE_REPLACED = 5, + STRIPE_PREREAD_ACTIVE = 6, + STRIPE_DELAYED = 7, + STRIPE_DEGRADED = 8, + STRIPE_BIT_DELAY = 9, + STRIPE_EXPANDING = 10, + STRIPE_EXPAND_SOURCE = 11, + STRIPE_EXPAND_READY = 12, + STRIPE_IO_STARTED = 13, + STRIPE_FULL_WRITE = 14, + STRIPE_BIOFILL_RUN = 15, + STRIPE_COMPUTE_RUN = 16, + STRIPE_ON_UNPLUG_LIST = 17, + STRIPE_DISCARD = 18, + STRIPE_ON_RELEASE_LIST = 19, + STRIPE_BATCH_READY = 20, + STRIPE_BATCH_ERR = 21, + STRIPE_BITMAP_PENDING = 22, + STRIPE_LOG_TRAPPED = 23, + STRIPE_R5C_CACHING = 24, + STRIPE_R5C_PARTIAL_STRIPE = 25, + STRIPE_R5C_FULL_STRIPE = 26, + STRIPE_R5C_PREFLUSH = 27, +}; + +enum { + STRIPE_OP_BIOFILL = 0, + STRIPE_OP_COMPUTE_BLK = 1, + STRIPE_OP_PREXOR = 2, + STRIPE_OP_BIODRAIN = 3, + STRIPE_OP_RECONSTRUCT = 4, + STRIPE_OP_CHECK = 5, + STRIPE_OP_PARTIAL_PARITY = 6, +}; + +enum { + PARITY_DISABLE_RMW = 0, + PARITY_ENABLE_RMW = 1, + PARITY_PREFER_RMW = 2, +}; + +enum { + SYNDROME_SRC_ALL = 0, + SYNDROME_SRC_WANT_DRAIN = 1, + SYNDROME_SRC_WRITTEN = 2, +}; + +struct disk_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + struct page *extra_page; +}; + +struct r5worker { + struct work_struct work; + struct r5worker_group *group; + struct list_head temp_inactive_list[8]; + bool working; +}; + +enum r5_cache_state { + R5_INACTIVE_BLOCKED = 0, + R5_ALLOC_MORE = 1, + R5_DID_ALLOC = 2, + R5C_LOG_TIGHT = 3, + R5C_LOG_CRITICAL = 4, + R5C_EXTRA_PAGE_IN_USE = 5, +}; + +struct r5pending_data { + struct list_head sibling; + sector_t sector; + struct bio_list bios; +}; + +struct raid5_percpu { + struct page *spare_page; + void *scribble; + int scribble_obj_size; + local_lock_t lock; +}; + +struct strip_zone { + sector_t zone_end; + sector_t dev_start; + int nb_dev; + int disk_shift; +}; + +enum r0layout { + RAID0_ORIG_LAYOUT = 1, + RAID0_ALT_MULTIZONE_LAYOUT = 2, +}; + +struct r0conf { + struct strip_zone *strip_zone; + struct md_rdev **devlist; + int nr_strip_zones; + enum r0layout layout; +}; + +struct md_bitmap_stats { + u64 events_cleared; + int behind_writes; + bool behind_wait; + long unsigned int missing_pages; + long unsigned int file_pages; + long unsigned int sync_size; + long unsigned int pages; + struct file *file; + long: 32; +}; + +enum stripe_result { + STRIPE_SUCCESS = 0, + STRIPE_RETRY = 1, + STRIPE_SCHEDULE_AND_RETRY = 2, + STRIPE_FAIL = 3, + STRIPE_WAIT_RESHAPE = 4, +}; + +struct stripe_request_ctx { + struct stripe_head *batch_last; + long: 32; + sector_t first_sector; + sector_t last_sector; + long unsigned int sectors_to_do[9]; + bool do_flush; +}; + +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; + struct list_head temp_inactive_list[8]; +}; + +enum reshape_loc { + LOC_NO_RESHAPE = 0, + LOC_AHEAD_OF_RESHAPE = 1, + LOC_INSIDE_RESHAPE = 2, + LOC_BEHIND_RESHAPE = 3, +}; + +typedef __kernel_long_t __kernel_old_time_t; + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long int tv_nsec; +}; + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[7]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + +struct ip_ra_chain { + struct ip_ra_chain *next; + struct sock *sk; + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct callback_head rcu; +}; + +struct inet_peer_base { + struct rb_root rb_root; + seqlock_t lock; + int total; +}; + +struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; +}; + +struct ipv6_devconf { + __u8 __cacheline_group_begin__ipv6_devconf_read_txrx[0]; + __s32 disable_ipv6; + __s32 hop_limit; + __s32 mtu6; + __s32 forwarding; + __s32 disable_policy; + __s32 proxy_ndp; + __u8 __cacheline_group_end__ipv6_devconf_read_txrx[0]; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_min_advance; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_min_lft; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + __s32 accept_source_route; + __s32 accept_ra_from_local; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + __s32 accept_untracked_na; + struct ipv6_stable_secret stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 ndisc_tclass; + __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; + __u8 ndisc_evict_nocarrier; + __u8 ra_honor_pio_life; + __u8 ra_honor_pio_pflag; + struct ctl_table_header *sysctl_header; +}; + +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct unix_edge; + +struct scm_fp_list { + short int count; + short int count_unix; + short int max; + bool inflight; + bool dead; + struct list_head vertices; + struct unix_edge *edges; + struct user_struct *user; + struct file *fp[253]; +}; + +struct unix_sock; + +struct unix_edge { + struct unix_sock *predecessor; + struct unix_sock *successor; + struct list_head vertex_entry; + struct list_head stack_entry; +}; + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_NR_TABLES = 2, + NEIGH_LINK_TABLE = 2, +}; + +enum { + NAPI_STATE_SCHED = 0, + NAPI_STATE_MISSED = 1, + NAPI_STATE_DISABLE = 2, + NAPI_STATE_NPSVC = 3, + NAPI_STATE_LISTED = 4, + NAPI_STATE_NO_BUSY_POLL = 5, + NAPI_STATE_IN_BUSY_POLL = 6, + NAPI_STATE_PREFER_BUSY_POLL = 7, + NAPI_STATE_THREADED = 8, + NAPI_STATE_SCHED_THREADED = 9, +}; + +enum xps_map_type { + XPS_CPUS = 0, + XPS_RXQS = 1, + XPS_MAPS_MAX = 2, +}; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE = 3, +}; + +struct ipv6_devstat { + struct proc_dir_entry *proc_dir_entry; + struct ipstats_mib *ipv6; + struct icmpv6_mib_device *icmpv6dev; + struct icmpv6msg_mib_device *icmpv6msgdev; +}; + +struct ifmcaddr6; + +struct ifacaddr6; + +struct inet6_dev { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head addr_list; + struct ifmcaddr6 *mc_list; + struct ifmcaddr6 *mc_tomb; + unsigned char mc_qrv; + unsigned char mc_gq_running; + unsigned char mc_ifc_count; + unsigned char mc_dad_count; + long unsigned int mc_v1_seen; + long unsigned int mc_qi; + long unsigned int mc_qri; + long unsigned int mc_maxdelay; + struct delayed_work mc_gq_work; + struct delayed_work mc_ifc_work; + struct delayed_work mc_dad_work; + struct delayed_work mc_query_work; + struct delayed_work mc_report_work; + struct sk_buff_head mc_query_queue; + struct sk_buff_head mc_report_queue; + spinlock_t mc_query_lock; + spinlock_t mc_report_lock; + struct mutex mc_lock; + struct ifacaddr6 *ac_list; + rwlock_t lock; + refcount_t refcnt; + __u32 if_flags; + int dead; + u32 desync_factor; + struct list_head tempaddr_list; + struct in6_addr token; + struct neigh_parms *nd_parms; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; + struct timer_list rs_timer; + __s32 rs_interval; + __u8 rs_probes; + long unsigned int tstamp; + struct callback_head rcu; + unsigned int ra_mtu; +}; + +enum { + NETIF_MSG_DRV_BIT = 0, + NETIF_MSG_PROBE_BIT = 1, + NETIF_MSG_LINK_BIT = 2, + NETIF_MSG_TIMER_BIT = 3, + NETIF_MSG_IFDOWN_BIT = 4, + NETIF_MSG_IFUP_BIT = 5, + NETIF_MSG_RX_ERR_BIT = 6, + NETIF_MSG_TX_ERR_BIT = 7, + NETIF_MSG_TX_QUEUED_BIT = 8, + NETIF_MSG_INTR_BIT = 9, + NETIF_MSG_TX_DONE_BIT = 10, + NETIF_MSG_RX_STATUS_BIT = 11, + NETIF_MSG_PKTDATA_BIT = 12, + NETIF_MSG_HW_BIT = 13, + NETIF_MSG_WOL_BIT = 14, + NETIF_MSG_CLASS_COUNT = 15, +}; + +struct lwtunnel_state { + __u16 type; + __u16 flags; + __u16 headroom; + atomic_t refcnt; + int (*orig_output)(struct net *, struct sock *, struct sk_buff *); + int (*orig_input)(struct sk_buff *); + struct callback_head rcu; + __u8 data[0]; +}; + +struct ip6_sf_list { + struct ip6_sf_list *sf_next; + struct in6_addr sf_addr; + long unsigned int sf_count[2]; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; + struct callback_head rcu; +}; + +struct ifmcaddr6 { + struct in6_addr mca_addr; + struct inet6_dev *idev; + struct ifmcaddr6 *next; + struct ip6_sf_list *mca_sources; + struct ip6_sf_list *mca_tomb; + unsigned int mca_sfmode; + unsigned char mca_crcount; + long unsigned int mca_sfcount[2]; + struct delayed_work mca_work; + unsigned int mca_flags; + int mca_users; + refcount_t mca_refcnt; + long unsigned int mca_cstamp; + long unsigned int mca_tstamp; + struct callback_head rcu; +}; + +struct ifacaddr6 { + struct in6_addr aca_addr; + struct fib6_info *aca_rt; + struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; + int aca_users; + refcount_t aca_refcnt; + long unsigned int aca_cstamp; + long unsigned int aca_tstamp; + struct callback_head rcu; +}; + +enum { + __ND_OPT_PREFIX_INFO_END = 0, + ND_OPT_SOURCE_LL_ADDR = 1, + ND_OPT_TARGET_LL_ADDR = 2, + ND_OPT_PREFIX_INFO = 3, + ND_OPT_REDIRECT_HDR = 4, + ND_OPT_MTU = 5, + ND_OPT_NONCE = 14, + __ND_OPT_ARRAY_MAX = 15, + ND_OPT_ROUTE_INFO = 24, + ND_OPT_RDNSS = 25, + ND_OPT_DNSSL = 31, + ND_OPT_6CO = 34, + ND_OPT_CAPTIVE_PORTAL = 37, + ND_OPT_PREF64 = 38, + __ND_OPT_MAX = 39, +}; + +struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +}; + +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[15]; + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; +}; + +struct prefix_info { + __u8 type; + __u8 length; + __u8 prefix_len; + union { + __u8 flags; + struct { + __u8 reserved: 4; + __u8 preferpd: 1; + __u8 routeraddr: 1; + __u8 autoconf: 1; + __u8 onlink: 1; + }; + }; + __be32 valid; + __be32 prefered; + __be32 reserved2; + struct in6_addr prefix; +}; + +struct scm_timestamping { + struct __kernel_old_timespec ts[3]; +}; + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; +}; + +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; + +struct unix_vertex { + struct list_head edges; + struct list_head entry; + struct list_head scc_entry; + long unsigned int out_degree; + long unsigned int index; + long unsigned int scc_index; +}; + +struct scm_stat { + atomic_t nr_fds; + long unsigned int nr_unix_fds; +}; + +struct unix_address; + +struct unix_sock { + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock; + struct mutex bindlock; + struct sock *peer; + struct sock *listener; + struct unix_vertex *vertex; + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; + struct sk_buff *oob_skb; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct unix_address { + refcount_t refcnt; + int len; + struct sockaddr_un name[0]; +}; + +typedef __u64 __be64; + +typedef __u16 __sum16; + +struct rhash_lock_head; + +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + struct bucket_table *future_tbl; + struct lockdep_map dep_map; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct rhash_lock_head *buckets[0]; +}; + +struct sockaddr_in6 { + short unsigned int sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + int flowic_l3mdev; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + __u32 flowic_secid; + kuid_t flowic_uid; + __u32 flowic_multipath_hash; + struct flowi_tunnel flowic_tun_key; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + struct { + __u8 type; + __u8 code; + } icmpt; + __be32 gre_key; + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; + __be32 saddr; + __be32 daddr; + union flowi_uli uli; + long: 32; +}; + +struct flowi6 { + struct flowi_common __fl_common; + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; + __u32 mp_hash; + long: 32; +}; + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + } u; +}; + +struct inet_ehash_bucket; + +struct inet_bind_hashbucket; + +struct inet_listen_hashbucket; + +struct inet_hashinfo { + struct inet_ehash_bucket *ehash; + spinlock_t *ehash_locks; + unsigned int ehash_mask; + unsigned int ehash_locks_mask; + struct kmem_cache *bind_bucket_cachep; + struct inet_bind_hashbucket *bhash; + struct kmem_cache *bind2_bucket_cachep; + struct inet_bind_hashbucket *bhash2; + unsigned int bhash_size; + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; + bool pernet; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ipv4_devconf { + void *sysctl; + int data[33]; + long unsigned int state[2]; +}; + +struct tcp_fastopen_context { + siphash_key_t key[2]; + int num; + struct callback_head rcu; + long: 32; +}; + +struct rt6key { + struct in6_addr addr; + int plen; +}; + +struct rtable; + +struct fnhe_hash_bucket; + +struct fib_nh_common { + struct net_device *nhc_dev; + netdevice_tracker nhc_dev_tracker; + int nhc_oif; + unsigned char nhc_scope; + u8 nhc_family; + u8 nhc_gw_family; + unsigned char nhc_flags; + struct lwtunnel_state *nhc_lwtstate; + union { + __be32 ipv4; + struct in6_addr ipv6; + } nhc_gw; + int nhc_weight; + atomic_t nhc_upper_bound; + struct rtable **nhc_pcpu_rth_output; + struct rtable *nhc_rth_input; + struct fnhe_hash_bucket *nhc_exceptions; +}; + +struct rt6_exception_bucket; + +struct fib6_nh { + struct fib_nh_common nh_common; + struct rt6_info **rt6i_pcpu; + struct rt6_exception_bucket *rt6i_exception_bucket; +}; + +struct fib6_node; + +struct dst_metrics; + +struct nexthop; + +struct fib6_info { + struct fib6_table *fib6_table; + struct fib6_info *fib6_next; + struct fib6_node *fib6_node; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; + unsigned int fib6_nsiblings; + refcount_t fib6_ref; + long unsigned int expires; + struct hlist_node gc_link; + struct dst_metrics *fib6_metrics; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; + u32 fib6_metric; + u8 fib6_protocol; + u8 fib6_type; + u8 offload; + u8 trap; + u8 offload_failed; + u8 should_flush: 1; + u8 dst_nocount: 1; + u8 dst_nopolicy: 1; + u8 fib6_destroying: 1; + u8 unused: 4; + struct callback_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; +}; + +struct rt6_info { + struct dst_entry dst; + struct fib6_info *from; + int sernum; + struct rt6key rt6i_dst; + struct rt6key rt6i_src; + struct in6_addr rt6i_gateway; + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + short unsigned int rt6i_nfheader_len; +}; + +struct rt6_statistics { + __u32 fib_nodes; + __u32 fib_route_nodes; + __u32 fib_rt_entries; + __u32 fib_rt_cache; + __u32 fib_discarded_routes; + atomic_t fib_rt_alloc; +}; + +struct fib6_node { + struct fib6_node *parent; + struct fib6_node *left; + struct fib6_node *right; + struct fib6_info *leaf; + __u16 fn_bit; + __u16 fn_flags; + int fn_sernum; + struct fib6_info *rr_ptr; + struct callback_head rcu; +}; + +struct fib6_table { + struct hlist_node tb6_hlist; + u32 tb6_id; + spinlock_t tb6_lock; + struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; + unsigned int flags; + unsigned int fib_seq; + struct hlist_head tb6_gc_hlist; +}; + +struct page_pool_params_fast { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; +}; + +struct page_pool_alloc_stats { + u64 fast; + u64 slow; + u64 slow_high_order; + u64 empty; + u64 refill; + u64 waive; +}; + +struct pp_alloc_cache { + u32 count; + netmem_ref cache[128]; +}; + +struct ptr_ring { + int producer; + spinlock_t producer_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + int consumer_head; + int consumer_tail; + spinlock_t consumer_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + int size; + int batch; + void **queue; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct page_pool_params_slow { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; +}; + +struct page_pool_recycle_stats; + +struct page_pool { + struct page_pool_params_fast p; + int cpuid; + u32 pages_state_hold_cnt; + bool has_init_callback: 1; + bool dma_map: 1; + bool dma_sync: 1; + bool system: 1; + long: 32; + __u8 __cacheline_group_begin__frag[0]; + long int frag_users; + netmem_ref frag_page; + unsigned int frag_offset; + __u8 __cacheline_group_end__frag[0]; + long: 32; + struct {} __cacheline_group_pad__frag; + struct delayed_work release_dw; + void (*disconnect)(void *); + long unsigned int defer_start; + long unsigned int defer_warn; + struct page_pool_alloc_stats alloc_stats; + u32 xdp_mem_id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct pp_alloc_cache alloc; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ptr_ring ring; + void *mp_priv; + struct page_pool_recycle_stats *recycle_stats; + atomic_t pages_state_release_cnt; + refcount_t user_cnt; + u64 destroy_cnt; + struct page_pool_params_slow slow; + long: 32; + struct { + struct hlist_node list; + u64 detach_time; + u32 napi_id; + u32 id; + } user; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct in_addr { + __be32 s_addr; +}; + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + unsigned char __pad[8]; +}; + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct trace_print_flags { + long unsigned int mask; + const char *name; +}; + +struct net_generic { + union { + struct { + unsigned int len; + struct callback_head rcu; + } s; + struct { + struct {} __empty_ptr; + void *ptr[0]; + }; + }; +}; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; + long: 32; +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED = 0, + BPF_CGROUP_STORAGE_PERCPU = 1, + __BPF_CGROUP_STORAGE_MAX = 2, +}; + +struct bpf_storage_buffer; + +struct bpf_cgroup_storage_map; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list_map; + struct list_head list_cg; + struct rb_node node; + struct callback_head rcu; + long: 32; +}; + +struct in_ifaddr; + +struct ip_mc_list; + +struct in_device { + struct net_device *dev; + netdevice_tracker dev_tracker; + refcount_t refcnt; + int dead; + struct in_ifaddr *ifa_list; + struct ip_mc_list *mc_list; + struct ip_mc_list **mc_hash; + int mc_count; + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + long unsigned int mr_v1_seen; + long unsigned int mr_v2_seen; + long unsigned int mr_maxdelay; + long unsigned int mr_qi; + long unsigned int mr_qri; + unsigned char mr_qrv; + unsigned char mr_gq_running; + u32 mr_ifc_count; + struct timer_list mr_gq_timer; + struct timer_list mr_ifc_timer; + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct callback_head callback_head; +}; + +struct iphdr { + __u8 ihl: 4; + __u8 version: 4; + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + union { + struct { + __be32 saddr; + __be32 daddr; + }; + struct { + __be32 saddr; + __be32 daddr; + } addrs; + }; +}; + +enum { + IPV4_DEVCONF_FORWARDING = 1, + IPV4_DEVCONF_MC_FORWARDING = 2, + IPV4_DEVCONF_PROXY_ARP = 3, + IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, + IPV4_DEVCONF_SECURE_REDIRECTS = 5, + IPV4_DEVCONF_SEND_REDIRECTS = 6, + IPV4_DEVCONF_SHARED_MEDIA = 7, + IPV4_DEVCONF_RP_FILTER = 8, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, + IPV4_DEVCONF_BOOTP_RELAY = 10, + IPV4_DEVCONF_LOG_MARTIANS = 11, + IPV4_DEVCONF_TAG = 12, + IPV4_DEVCONF_ARPFILTER = 13, + IPV4_DEVCONF_MEDIUM_ID = 14, + IPV4_DEVCONF_NOXFRM = 15, + IPV4_DEVCONF_NOPOLICY = 16, + IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, + IPV4_DEVCONF_ARP_ANNOUNCE = 18, + IPV4_DEVCONF_ARP_IGNORE = 19, + IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, + IPV4_DEVCONF_ARP_ACCEPT = 21, + IPV4_DEVCONF_ARP_NOTIFY = 22, + IPV4_DEVCONF_ACCEPT_LOCAL = 23, + IPV4_DEVCONF_SRC_VMARK = 24, + IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, + IPV4_DEVCONF_ROUTE_LOCALNET = 26, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, + IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, + IPV4_DEVCONF_BC_FORWARDING = 32, + IPV4_DEVCONF_ARP_EVICT_NOCARRIER = 33, + __IPV4_DEVCONF_MAX = 34, +}; + +enum { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + TCA_EXT_WARN_MSG = 16, + __TCA_MAX = 17, +}; + +struct in_ifaddr { + struct hlist_node addr_lst; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct callback_head callback_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + unsigned char ifa_proto; + __u32 ifa_flags; + char ifa_label[16]; + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + long unsigned int ifa_cstamp; + long unsigned int ifa_tstamp; +}; + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 20, +}; + +struct trace_event_raw_kfree_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + void *rx_sk; + short unsigned int protocol; + enum skb_drop_reason reason; + char __data[0]; +}; + +struct trace_event_raw_consume_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + char __data[0]; +}; + +struct trace_event_raw_skb_copy_datagram_iovec { + struct trace_entry ent; + const void *skbaddr; + int len; + char __data[0]; +}; + +struct trace_event_data_offsets_kfree_skb {}; + +struct trace_event_data_offsets_consume_skb {}; + +struct trace_event_data_offsets_skb_copy_datagram_iovec {}; + +typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *, enum skb_drop_reason, struct sock *); + +typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *, void *); + +typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); + +struct trace_event_raw_net_dev_start_xmit { + struct trace_entry ent; + u32 __data_loc_name; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + unsigned int len; + unsigned int data_len; + int network_offset; + bool transport_offset_valid; + int transport_offset; + u8 tx_flags; + u16 gso_size; + u16 gso_segs; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + int rc; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit_timeout { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_driver; + int queue_index; + char __data[0]; +}; + +struct trace_event_raw_net_dev_template { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_verbose_template { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int napi_id; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + u32 hash; + bool l4_hash; + unsigned int len; + unsigned int data_len; + unsigned int truesize; + bool mac_header_valid; + int mac_header; + unsigned char nr_frags; + u16 gso_size; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_exit_template { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_net_dev_start_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit_timeout { + u32 name; + const void *name_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_net_dev_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_verbose_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_exit_template {}; + +typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); + +typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); + +typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); + +typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); + +typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); + +struct trace_event_raw_napi_poll { + struct trace_entry ent; + struct napi_struct *napi; + u32 __data_loc_dev_name; + int work; + int budget; + char __data[0]; +}; + +struct trace_event_raw_dql_stall_detected { + struct trace_entry ent; + short unsigned int thrs; + unsigned int len; + long unsigned int last_reap; + long unsigned int hist_head; + long unsigned int now; + long unsigned int hist[4]; + char __data[0]; +}; + +struct trace_event_data_offsets_napi_poll { + u32 dev_name; + const void *dev_name_ptr_; +}; + +struct trace_event_data_offsets_dql_stall_detected {}; + +typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); + +typedef void (*btf_trace_dql_stall_detected)(void *, short unsigned int, unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int *); + +struct dst_metrics { + u32 metrics[17]; + refcount_t refcnt; +}; + +enum { + TCPF_ESTABLISHED = 2, + TCPF_SYN_SENT = 4, + TCPF_SYN_RECV = 8, + TCPF_FIN_WAIT1 = 16, + TCPF_FIN_WAIT2 = 32, + TCPF_TIME_WAIT = 64, + TCPF_CLOSE = 128, + TCPF_CLOSE_WAIT = 256, + TCPF_LAST_ACK = 512, + TCPF_LISTEN = 1024, + TCPF_CLOSING = 2048, + TCPF_NEW_SYN_RECV = 4096, + TCPF_BOUND_INACTIVE = 8192, +}; + +struct sock_reuseport { + struct callback_head rcu; + u16 max_socks; + u16 num_socks; + u16 num_closed_socks; + u16 incoming_cpu; + unsigned int synq_overflow_ts; + unsigned int reuseport_id; + unsigned int bind_inany: 1; + unsigned int has_conns: 1; + struct bpf_prog *prog; + struct sock *socks[0]; +}; + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +}; + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; +}; + +struct ipv6hdr { + __u8 priority: 4; + __u8 version: 4; + __u8 flow_lbl[3]; + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + union { + struct { + struct in6_addr saddr; + struct in6_addr daddr; + }; + struct { + struct in6_addr saddr; + struct in6_addr daddr; + } addrs; + }; +}; + +struct minmax_sample { + u32 t; + u32 v; +}; + +struct minmax { + struct minmax_sample s[3]; +}; + +struct fastopen_queue { + struct request_sock *rskq_rst_head; + struct request_sock *rskq_rst_tail; + spinlock_t lock; + int qlen; + int max_qlen; + struct tcp_fastopen_context *ctx; +}; + +struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + struct fastopen_queue fastopenq; +}; + +struct ip_options { + __be32 faddr; + __be32 nexthop; + unsigned char optlen; + unsigned char srr; + unsigned char rr; + unsigned char ts; + unsigned char is_strictroute: 1; + unsigned char srr_is_hit: 1; + unsigned char is_changed: 1; + unsigned char rr_needaddr: 1; + unsigned char ts_needtime: 1; + unsigned char ts_needaddr: 1; + unsigned char router_alert; + unsigned char cipso; + unsigned char __pad2; + unsigned char __data[0]; +}; + +struct ip_options_rcu { + struct callback_head rcu; + struct ip_options opt; +}; + +struct ipv6_txoptions { + refcount_t refcnt; + int tot_len; + __u16 opt_flen; + __u16 opt_nflen; + struct ipv6_opt_hdr *hopopt; + struct ipv6_opt_hdr *dst0opt; + struct ipv6_rt_hdr *srcrt; + struct ipv6_opt_hdr *dst1opt; + struct callback_head rcu; +}; + +struct inet_request_sock { + struct request_sock req; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u16 tstamp_ok: 1; + u16 sack_ok: 1; + u16 wscale_ok: 1; + u16 ecn_ok: 1; + u16 acked: 1; + u16 no_srccheck: 1; + u16 smc_ok: 1; + u32 ir_mark; + union { + struct ip_options_rcu *ireq_opt; + struct { + struct ipv6_txoptions *ipv6_opt; + struct sk_buff *pktopts; + }; + }; +}; + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + u32 ts_opt_id; + long: 32; + u64 transmit_time; + u32 mark; + long: 32; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + +struct ipv6_pinfo; + +struct ip_mc_socklist; + +struct inet_sock { + struct sock sk; + struct ipv6_pinfo *pinet6; + long unsigned int inet_flags; + __be32 inet_saddr; + __s16 uc_ttl; + __be16 inet_sport; + struct ip_options_rcu *inet_opt; + atomic_t inet_id; + __u8 tos; + __u8 min_ttl; + __u8 mc_ttl; + __u8 pmtudisc; + __u8 rcv_tos; + __u8 convert_csum; + int uc_index; + int mc_index; + __be32 mc_addr; + u32 local_port_range; + struct ip_mc_socklist *mc_list; + long: 32; + struct inet_cork_full cork; +}; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +struct ipv6_mc_socklist; + +struct ipv6_ac_socklist; + +struct ipv6_fl_socklist; + +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; + __be32 flow_label; + __u32 frag_size; + s16 hop_limit; + u8 mcast_hops; + int ucast_oif; + int mcast_oif; + union { + struct { + __u16 srcrt: 1; + __u16 osrcrt: 1; + __u16 rxinfo: 1; + __u16 rxoinfo: 1; + __u16 rxhlim: 1; + __u16 rxohlim: 1; + __u16 hopopts: 1; + __u16 ohopopts: 1; + __u16 dstopts: 1; + __u16 odstopts: 1; + __u16 rxflow: 1; + __u16 rxtclass: 1; + __u16 rxpmtu: 1; + __u16 rxorigdstaddr: 1; + __u16 recvfragsize: 1; + } bits; + __u16 all; + } rxopt; + __u8 srcprefs; + __u8 pmtudisc; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + __u32 dst_cookie; + struct ipv6_mc_socklist *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist *ipv6_fl_list; + struct ipv6_txoptions *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +struct inet_connection_sock_af_ops { + int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); + void (*send_check)(struct sock *, struct sk_buff *); + int (*rebuild_header)(struct sock *); + void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); + int (*conn_request)(struct sock *, struct sk_buff *); + struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); + u16 net_header_len; + u16 sockaddr_len; + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*addr2sockaddr)(struct sock *, struct sockaddr *); + void (*mtu_reduced)(struct sock *); +}; + +struct inet_bind_bucket; + +struct inet_bind2_bucket; + +struct tcp_ulp_ops; + +struct inet_connection_sock { + struct inet_sock icsk_inet; + struct request_sock_queue icsk_accept_queue; + struct inet_bind_bucket *icsk_bind_hash; + struct inet_bind2_bucket *icsk_bind2_hash; + long unsigned int icsk_timeout; + struct timer_list icsk_retransmit_timer; + struct timer_list icsk_delack_timer; + __u32 icsk_rto; + __u32 icsk_rto_min; + __u32 icsk_delack_max; + __u32 icsk_pmtu_cookie; + const struct tcp_congestion_ops *icsk_ca_ops; + const struct inet_connection_sock_af_ops *icsk_af_ops; + const struct tcp_ulp_ops *icsk_ulp_ops; + void *icsk_ulp_data; + void (*icsk_clean_acked)(struct sock *, u32); + unsigned int (*icsk_sync_mss)(struct sock *, u32); + __u8 icsk_ca_state: 5; + __u8 icsk_ca_initialized: 1; + __u8 icsk_ca_setsockopt: 1; + __u8 icsk_ca_dst_locked: 1; + __u8 icsk_retransmits; + __u8 icsk_pending; + __u8 icsk_backoff; + __u8 icsk_syn_retries; + __u8 icsk_probes_out; + __u16 icsk_ext_hdr_len; + struct { + __u8 pending; + __u8 quick; + __u8 pingpong; + __u8 retry; + __u32 ato: 8; + __u32 lrcv_flowlabel: 20; + __u32 unused: 4; + long unsigned int timeout; + __u32 lrcvtime; + __u16 last_seg_size; + __u16 rcv_mss; + } icsk_ack; + struct { + int search_high; + int search_low; + u32 probe_size: 31; + u32 enabled: 1; + u32 probe_timestamp; + } icsk_mtup; + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + long: 32; + u64 icsk_ca_priv[13]; +}; + +struct inet_bind_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct in6_addr fast_v6_rcv_saddr; + __be32 fast_rcv_saddr; + short unsigned int fast_sk_family; + bool fast_ipv6_only; + struct hlist_node node; + struct hlist_head bhash2; +}; + +struct inet_bind2_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + short unsigned int addr_type; + struct in6_addr v6_rcv_saddr; + struct hlist_node node; + struct hlist_node bhash_node; + struct hlist_head owners; +}; + +struct tcp_ulp_ops { + struct list_head list; + int (*init)(struct sock *); + void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); + void (*release)(struct sock *); + int (*get_info)(struct sock *, struct sk_buff *); + size_t (*get_info_size)(const struct sock *); + void (*clone)(const struct request_sock *, struct sock *, const gfp_t); + char name[16]; + struct module *owner; +}; + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __u16 res1: 4; + __u16 doff: 4; + __u16 fin: 1; + __u16 syn: 1; + __u16 rst: 1; + __u16 psh: 1; + __u16 ack: 1; + __u16 urg: 1; + __u16 ece: 1; + __u16 cwr: 1; + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4, +}; + +struct tcp_fastopen_cookie { + __le64 val[2]; + s8 len; + bool exp; + long: 32; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +struct tcp_options_received { + int ts_recent_stamp; + u32 ts_recent; + u32 rcv_tsval; + u32 rcv_tsecr; + u16 saw_tstamp: 1; + u16 tstamp_ok: 1; + u16 dsack: 1; + u16 wscale_ok: 1; + u16 sack_ok: 3; + u16 smc_ok: 1; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u8 saw_unknown: 1; + u8 unused: 7; + u8 num_sacks; + u16 user_mss; + u16 mss_clamp; +}; + +struct tcp_rack { + u64 mstamp; + u32 rtt_us; + u32 end_seq; + u32 last_delivered; + u8 reo_wnd_steps; + u8 reo_wnd_persist: 5; + u8 dsack_seen: 1; + u8 advanced: 1; +}; + +struct tcp_fastopen_request; + +struct tcp_sock { + struct inet_connection_sock inet_conn; + __u8 __cacheline_group_begin__tcp_sock_read_tx[0]; + u32 max_window; + u32 rcv_ssthresh; + u32 reordering; + u32 notsent_lowat; + u16 gso_segs; + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + __u8 __cacheline_group_end__tcp_sock_read_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_txrx[0]; + u32 tsoffset; + u32 snd_wnd; + u32 mss_cache; + u32 snd_cwnd; + u32 prr_out; + u32 lost_out; + u32 sacked_out; + u16 tcp_header_len; + u8 scaling_ratio; + u8 chrono_type: 2; + u8 repair: 1; + u8 tcp_usec_ts: 1; + u8 is_sack_reneg: 1; + u8 is_cwnd_limited: 1; + __u8 __cacheline_group_end__tcp_sock_read_txrx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_rx[0]; + u32 copied_seq; + u32 rcv_tstamp; + u32 snd_wl1; + u32 tlp_high_seq; + u32 rttvar_us; + u32 retrans_out; + u16 advmss; + u16 urg_data; + u32 lost; + struct minmax rtt_min; + struct rb_root out_of_order_queue; + u32 snd_ssthresh; + u8 recvmsg_inq: 1; + __u8 __cacheline_group_end__tcp_sock_read_rx[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + __u8 __cacheline_group_begin__tcp_sock_write_tx[0]; + u32 segs_out; + u32 data_segs_out; + u64 bytes_sent; + u32 snd_sml; + u32 chrono_start; + u32 chrono_stat[3]; + u32 write_seq; + u32 pushed_seq; + u32 lsndtime; + u32 mdev_us; + u32 rtt_seq; + u64 tcp_wstamp_ns; + struct list_head tsorted_sent_queue; + struct sk_buff *highest_sack; + u8 ecn_flags; + __u8 __cacheline_group_end__tcp_sock_write_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_write_txrx[0]; + __be32 pred_flags; + long: 32; + u64 tcp_clock_cache; + u64 tcp_mstamp; + u32 rcv_nxt; + u32 snd_nxt; + u32 snd_una; + u32 window_clamp; + u32 srtt_us; + u32 packets_out; + u32 snd_up; + u32 delivered; + u32 delivered_ce; + u32 app_limited; + u32 rcv_wnd; + struct tcp_options_received rx_opt; + u8 nonagle: 4; + u8 rate_app_limited: 1; + __u8 __cacheline_group_end__tcp_sock_write_txrx[0]; + long: 0; + __u8 __cacheline_group_begin__tcp_sock_write_rx[0]; + u64 bytes_received; + u32 segs_in; + u32 data_segs_in; + u32 rcv_wup; + u32 max_packets_out; + u32 cwnd_usage_seq; + u32 rate_delivered; + u32 rate_interval_us; + u32 rcv_rtt_last_tsecr; + u64 first_tx_mstamp; + u64 delivered_mstamp; + u64 bytes_acked; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + __u8 __cacheline_group_end__tcp_sock_write_rx[0]; + u32 dsack_dups; + u32 compressed_ack_rcv_nxt; + struct list_head tsq_node; + struct tcp_rack rack; + u8 compressed_ack; + u8 dup_ack_counter: 2; + u8 tlp_retrans: 1; + u8 unused: 5; + u8 thin_lto: 1; + u8 fastopen_connect: 1; + u8 fastopen_no_cookie: 1; + u8 fastopen_client_fail: 2; + u8 frto: 1; + u8 repair_queue; + u8 save_syn: 2; + u8 syn_data: 1; + u8 syn_fastopen: 1; + u8 syn_fastopen_exp: 1; + u8 syn_fastopen_ch: 1; + u8 syn_data_acked: 1; + u8 keepalive_probes; + u32 tcp_tx_delay; + u32 mdev_max_us; + u32 reord_seen; + u32 snd_cwnd_cnt; + u32 snd_cwnd_clamp; + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; + u32 prr_delivered; + u32 last_oow_ack_time; + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + struct sk_buff *ooo_last_skb; + struct tcp_sack_block duplicate_sack[1]; + struct tcp_sack_block selective_acks[4]; + struct tcp_sack_block recv_sack_cache[4]; + int lost_cnt_hint; + u32 prior_ssthresh; + u32 high_seq; + u32 retrans_stamp; + u32 undo_marker; + int undo_retrans; + long: 32; + u64 bytes_retrans; + u32 total_retrans; + u32 rto_stamp; + u16 total_rto; + u16 total_rto_recoveries; + u32 total_rto_time; + u32 urg_seq; + unsigned int keepalive_time; + unsigned int keepalive_intvl; + int linger2; + u8 bpf_sock_ops_cb_flags; + u8 bpf_chg_cc_inprogress: 1; + u16 timeout_rehash; + u32 rcv_ooopack; + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 plb_rehash; + u32 mtu_info; + struct tcp_fastopen_request *fastopen_req; + struct request_sock *fastopen_rsk; + struct saved_syn *saved_syn; + long: 32; +}; + +struct tcp_fastopen_request { + struct tcp_fastopen_cookie cookie; + struct msghdr *data; + size_t size; + int copied; + struct ubuf_info *uarg; +}; + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +struct ip6_sf_socklist; + +struct ipv6_mc_socklist { + struct in6_addr addr; + int ifindex; + unsigned int sfmode; + struct ipv6_mc_socklist *next; + struct ip6_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ipv6_ac_socklist { + struct in6_addr acl_addr; + int acl_ifindex; + struct ipv6_ac_socklist *acl_next; +}; + +struct ip6_flowlabel; + +struct ipv6_fl_socklist { + struct ipv6_fl_socklist *next; + struct ip6_flowlabel *fl; + struct callback_head rcu; +}; + +struct ip6_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + struct in6_addr sl_addr[0]; +}; + +struct ip6_flowlabel { + struct ip6_flowlabel *next; + __be32 label; + atomic_t users; + struct in6_addr dst; + struct ipv6_txoptions *opt; + long unsigned int linger; + struct callback_head rcu; + u8 share; + union { + struct pid *pid; + kuid_t uid; + } owner; + long unsigned int lastuse; + long unsigned int expires; + struct net *fl_net; +}; + +struct trace_event_raw_sock_rcvqueue_full { + struct trace_entry ent; + int rmem_alloc; + unsigned int truesize; + int sk_rcvbuf; + char __data[0]; +}; + +struct trace_event_raw_sock_exceed_buf_limit { + struct trace_entry ent; + char name[32]; + long int sysctl_mem[3]; + long int allocated; + int sysctl_rmem; + int rmem_alloc; + int sysctl_wmem; + int wmem_alloc; + int wmem_queued; + int kind; + char __data[0]; +}; + +struct trace_event_raw_inet_sock_set_state { + struct trace_entry ent; + const void *skaddr; + int oldstate; + int newstate; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_inet_sk_error_report { + struct trace_entry ent; + int error; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_sk_data_ready { + struct trace_entry ent; + const void *skaddr; + __u16 family; + __u16 protocol; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_sock_msg_length { + struct trace_entry ent; + void *sk; + __u16 family; + __u16 protocol; + int ret; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_sock_rcvqueue_full {}; + +struct trace_event_data_offsets_sock_exceed_buf_limit {}; + +struct trace_event_data_offsets_inet_sock_set_state {}; + +struct trace_event_data_offsets_inet_sk_error_report {}; + +struct trace_event_data_offsets_sk_data_ready {}; + +struct trace_event_data_offsets_sock_msg_length {}; + +typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int); + +typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); + +typedef void (*btf_trace_inet_sk_error_report)(void *, const struct sock *); + +typedef void (*btf_trace_sk_data_ready)(void *, const struct sock *); + +typedef void (*btf_trace_sock_send_length)(void *, struct sock *, int, int); + +typedef void (*btf_trace_sock_recv_length)(void *, struct sock *, int, int); + +struct trace_event_raw_udp_fail_queue_rcv_skb { + struct trace_entry ent; + int rc; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; + +typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *, struct sk_buff *); + +struct fib_nh_exception { + struct fib_nh_exception *fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + bool fnhe_mtu_locked; + __be32 fnhe_gw; + long unsigned int fnhe_expires; + struct rtable *fnhe_rth_input; + struct rtable *fnhe_rth_output; + long unsigned int fnhe_stamp; + struct callback_head rcu; +}; + +struct rtable { + struct dst_entry dst; + int rt_genid; + unsigned int rt_flags; + __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; + int rt_iif; + u8 rt_gw_family; + union { + __be32 rt_gw4; + struct in6_addr rt_gw6; + }; + u32 rt_mtu_locked: 1; + u32 rt_pmtu: 31; +}; + +struct fnhe_hash_bucket { + struct fib_nh_exception *chain; +}; + +struct inet_ehash_bucket { + struct hlist_nulls_head chain; +}; + +struct inet_bind_hashbucket { + spinlock_t lock; + struct hlist_head chain; +}; + +struct inet_listen_hashbucket { + spinlock_t lock; + struct hlist_nulls_head nulls_head; +}; + +struct qdisc_walker { + int stop; + int skip; + int count; + int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); +}; + +struct tcf_walker { + int stop; + int skip; + int count; + bool nonempty; + long unsigned int cookie; + int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); +}; + +struct tcf_exts { + int action; + int police; +}; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[0]; +}; + +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; + u32 in_flight; +}; + +struct rate_sample { + u64 prior_mstamp; + u32 prior_delivered; + u32 prior_delivered_ce; + s32 delivered; + s32 delivered_ce; + long int interval_us; + u32 snd_interval_us; + u32 rcv_interval_us; + long int rtt_us; + int losses; + u32 acked_sacked; + u32 prior_in_flight; + u32 last_end_seq; + bool is_app_limited; + bool is_retrans; + bool is_ack_delayed; + long: 32; +}; + +struct trace_event_raw_tcp_event_sk_skb { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_send_reset { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + enum sk_rst_reason reason; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + long: 32; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_retransmit_synack { + struct trace_entry ent; + const void *skaddr; + const void *req; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_probe { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 mark; + __u16 data_len; + __u32 snd_nxt; + __u32 snd_una; + __u32 snd_cwnd; + __u32 ssthresh; + __u32 snd_wnd; + __u32 srtt; + __u32 rcv_wnd; + long: 32; + __u64 sock_cookie; + const void *skbaddr; + const void *skaddr; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_skb { + struct trace_entry ent; + const void *skbaddr; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_cong_state_set { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u8 cong_state; + char __data[0]; +}; + +struct trace_event_raw_tcp_hash_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_tcp_ao_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + __u8 keyid; + __u8 rnext; + __u8 maclen; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sk { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u8 keyid; + __u8 rnext; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sne { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 new_sne; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_tcp_event_sk_skb {}; + +struct trace_event_data_offsets_tcp_send_reset {}; + +struct trace_event_data_offsets_tcp_event_sk {}; + +struct trace_event_data_offsets_tcp_retransmit_synack {}; + +struct trace_event_data_offsets_tcp_probe {}; + +struct trace_event_data_offsets_tcp_event_skb {}; + +struct trace_event_data_offsets_tcp_cong_state_set {}; + +struct trace_event_data_offsets_tcp_hash_event {}; + +struct trace_event_data_offsets_tcp_ao_event {}; + +struct trace_event_data_offsets_tcp_ao_event_sk {}; + +struct trace_event_data_offsets_tcp_ao_event_sne {}; + +typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *, const enum sk_rst_reason); + +typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); + +typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); + +typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); + +typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_tcp_bad_csum)(void *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_cong_state_set)(void *, struct sock *, const u8); + +typedef void (*btf_trace_tcp_hash_bad_header)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_unexpected)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_mismatch)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_ao_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_ao_handshake_failure)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_wrong_maclen)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_mismatch)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_key_not_found)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_rnext_request)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_synack_no_key)(void *, const struct sock *, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_snd_sne_update)(void *, const struct sock *, __u32); + +typedef void (*btf_trace_tcp_ao_rcv_sne_update)(void *, const struct sock *, __u32); + +struct trace_event_raw_fib_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + u8 proto; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[4]; + __u8 dst[4]; + __u8 gw4[4]; + __u8 gw6[16]; + u16 sport; + u16 dport; + char name[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib_table_lookup {}; + +typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); + +struct trace_event_raw_qdisc_dequeue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + int packets; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + long unsigned int txq_state; + char __data[0]; +}; + +struct trace_event_raw_qdisc_enqueue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + char __data[0]; +}; + +struct trace_event_raw_qdisc_reset { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_destroy { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_create { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + char __data[0]; +}; + +struct trace_event_data_offsets_qdisc_dequeue {}; + +struct trace_event_data_offsets_qdisc_enqueue {}; + +struct trace_event_data_offsets_qdisc_reset { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_destroy { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_create { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); + +typedef void (*btf_trace_qdisc_enqueue)(void *, struct Qdisc *, const struct netdev_queue *, struct sk_buff *); + +typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); + +struct bridge_stp_xstats { + __u64 transition_blk; + __u64 transition_fwd; + __u64 rx_bpdu; + __u64 tx_bpdu; + __u64 rx_tcn; + __u64 tx_tcn; +}; + +enum { + BR_MCAST_DIR_RX = 0, + BR_MCAST_DIR_TX = 1, + BR_MCAST_DIR_SIZE = 2, +}; + +struct br_mcast_stats { + __u64 igmp_v1queries[2]; + __u64 igmp_v2queries[2]; + __u64 igmp_v3queries[2]; + __u64 igmp_leaves[2]; + __u64 igmp_v1reports[2]; + __u64 igmp_v2reports[2]; + __u64 igmp_v3reports[2]; + __u64 igmp_parse_errors; + __u64 mld_v1queries[2]; + __u64 mld_v2queries[2]; + __u64 mld_leaves[2]; + __u64 mld_v1reports[2]; + __u64 mld_v2reports[2]; + __u64 mld_parse_errors; + __u64 mcast_bytes[2]; + __u64 mcast_packets[2]; +}; + +struct br_ip { + union { + __be32 ip4; + struct in6_addr ip6; + } src; + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } dst; + __be16 proto; + __u16 vid; +}; + +struct rt6_exception_bucket { + struct hlist_head chain; + int depth; +}; + +struct rhash_lock_head {}; + +struct bridge_id { + unsigned char prio[2]; + unsigned char addr[6]; +}; + +typedef struct bridge_id bridge_id; + +struct mac_addr { + unsigned char addr[6]; +}; + +typedef struct mac_addr mac_addr; + +typedef __u16 port_id; + +struct bridge_mcast_own_query { + struct timer_list timer; + u32 startup_sent; +}; + +struct bridge_mcast_other_query { + struct timer_list timer; + struct timer_list delay_timer; +}; + +struct bridge_mcast_querier { + struct br_ip addr; + int port_ifidx; + seqcount_spinlock_t seq; +}; + +struct bridge_mcast_stats { + struct br_mcast_stats mstats; + struct u64_stats_sync syncp; + long: 32; +}; + +struct net_bridge; + +struct net_bridge_vlan; + +struct net_bridge_mcast { + struct net_bridge *br; + struct net_bridge_vlan *vlan; + u32 multicast_last_member_count; + u32 multicast_startup_query_count; + u8 multicast_querier; + u8 multicast_igmp_version; + u8 multicast_router; + u8 multicast_mld_version; + long unsigned int multicast_last_member_interval; + long unsigned int multicast_membership_interval; + long unsigned int multicast_querier_interval; + long unsigned int multicast_query_interval; + long unsigned int multicast_query_response_interval; + long unsigned int multicast_startup_query_interval; + struct hlist_head ip4_mc_router_list; + struct timer_list ip4_mc_router_timer; + struct bridge_mcast_other_query ip4_other_query; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_querier ip4_querier; + struct hlist_head ip6_mc_router_list; + struct timer_list ip6_mc_router_timer; + struct bridge_mcast_other_query ip6_other_query; + struct bridge_mcast_own_query ip6_own_query; + struct bridge_mcast_querier ip6_querier; +}; + +struct net_bridge { + spinlock_t lock; + spinlock_t hash_lock; + struct hlist_head frame_type_list; + struct net_device *dev; + long unsigned int options; + struct rhashtable fdb_hash_tbl; + struct list_head port_list; + union { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; + u16 group_fwd_mask; + u16 group_fwd_mask_required; + bridge_id designated_root; + bridge_id bridge_id; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; + long unsigned int max_age; + long unsigned int hello_time; + long unsigned int forward_delay; + long unsigned int ageing_time; + long unsigned int bridge_max_age; + long unsigned int bridge_hello_time; + long unsigned int bridge_forward_delay; + long unsigned int bridge_ageing_time; + u32 root_path_cost; + u8 group_addr[6]; + enum { + BR_NO_STP = 0, + BR_KERNEL_STP = 1, + BR_USER_STP = 2, + } stp_enabled; + struct net_bridge_mcast multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 hash_max; + spinlock_t multicast_lock; + struct rhashtable mdb_hash_tbl; + struct rhashtable sg_port_tbl; + struct hlist_head mcast_gc_list; + struct hlist_head mdb_list; + struct work_struct mcast_gc_work; + struct timer_list hello_timer; + struct timer_list tcn_timer; + struct timer_list topology_change_timer; + struct delayed_work gc_work; + struct kobject *ifobj; + u32 auto_cnt; + atomic_t fdb_n_learned; + u32 fdb_max_learned; + struct hlist_head fdb_list; +}; + +struct net_bridge_port; + +struct net_bridge_mcast_port { + struct net_bridge_port *port; + struct net_bridge_vlan *vlan; + struct bridge_mcast_own_query ip4_own_query; + struct timer_list ip4_mc_router_timer; + struct hlist_node ip4_rlist; + struct bridge_mcast_own_query ip6_own_query; + struct timer_list ip6_mc_router_timer; + struct hlist_node ip6_rlist; + unsigned char multicast_router; + u32 mdb_n_entries; + u32 mdb_max_entries; +}; + +struct net_bridge_port { + struct net_bridge *br; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + long unsigned int flags; + struct net_bridge_port *backup_port; + u32 backup_nhid; + u8 priority; + u8 state; + u16 port_no; + unsigned char topology_change_ack; + unsigned char config_pending; + port_id port_id; + port_id designated_port; + bridge_id designated_root; + bridge_id designated_bridge; + u32 path_cost; + u32 designated_cost; + long unsigned int designated_age; + struct timer_list forward_delay_timer; + struct timer_list hold_timer; + struct timer_list message_age_timer; + struct kobject kobj; + struct callback_head rcu; + struct net_bridge_mcast_port multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 multicast_eht_hosts_limit; + u32 multicast_eht_hosts_cnt; + struct hlist_head mglist; + char sysfs_name[16]; + u16 group_fwd_mask; + u16 backup_redirected_cnt; + long: 32; + struct bridge_stp_xstats stp_xstats; +}; + +struct metadata_dst; + +struct br_tunnel_info { + __be64 tunnel_id; + struct metadata_dst *tunnel_dst; + long: 32; +}; + +struct net_bridge_vlan { + struct rhash_head vnode; + struct rhash_head tnode; + u16 vid; + u16 flags; + u16 priv_flags; + u8 state; + struct pcpu_sw_netstats *stats; + union { + struct net_bridge *br; + struct net_bridge_port *port; + }; + union { + refcount_t refcnt; + struct net_bridge_vlan *brvlan; + }; + long: 32; + struct br_tunnel_info tinfo; + union { + struct net_bridge_mcast br_mcast_ctx; + struct net_bridge_mcast_port port_mcast_ctx; + }; + u16 msti; + struct list_head vlist; + struct callback_head rcu; +}; + +struct net_bridge_fdb_key { + mac_addr addr; + u16 vlan_id; +}; + +struct net_bridge_fdb_entry { + struct rhash_head rhnode; + struct net_bridge_port *dst; + struct net_bridge_fdb_key key; + struct hlist_node fdb_node; + long unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int updated; + long unsigned int used; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct trace_event_raw_br_fdb_add { + struct trace_entry ent; + u8 ndm_flags; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + u16 nlh_flags; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_external_learn_add { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_fdb_delete { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_update { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_br_mdb_full { + struct trace_entry ent; + u32 __data_loc_dev; + int af; + u16 vid; + __u8 src[16]; + __u8 grp[16]; + __u8 grpmac[6]; + char __data[0]; +}; + +struct trace_event_data_offsets_br_fdb_add { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_external_learn_add { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_fdb_delete { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_update { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_mdb_full { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16); + +typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16); + +typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *); + +typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, long unsigned int); + +typedef void (*btf_trace_br_mdb_full)(void *, const struct net_device *, const struct br_ip *); + +enum compact_priority { + COMPACT_PRIO_SYNC_FULL = 0, + MIN_COMPACT_PRIORITY = 0, + COMPACT_PRIO_SYNC_LIGHT = 1, + MIN_COMPACT_COSTLY_PRIORITY = 1, + DEF_COMPACT_PRIORITY = 1, + COMPACT_PRIO_ASYNC = 2, + INIT_COMPACT_PRIORITY = 2, +}; + +enum compact_result { + COMPACT_NOT_SUITABLE_ZONE = 0, + COMPACT_SKIPPED = 1, + COMPACT_DEFERRED = 2, + COMPACT_NO_SUITABLE_PAGE = 3, + COMPACT_CONTINUE = 4, + COMPACT_COMPLETE = 5, + COMPACT_PARTIAL_SKIPPED = 6, + COMPACT_CONTENDED = 7, + COMPACT_SUCCESS = 8, +}; + +struct page_pool_recycle_stats { + u64 cached; + u64 cache_full; + u64 ring; + u64 ring_full; + u64 released_refcnt; +}; + +struct trace_event_raw_page_pool_release { + struct trace_entry ent; + const struct page_pool *pool; + s32 inflight; + u32 hold; + u32 release; + u64 cnt; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_release { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 release; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_hold { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 hold; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_update_nid { + struct trace_entry ent; + const struct page_pool *pool; + int pool_nid; + int new_nid; + char __data[0]; +}; + +struct trace_event_data_offsets_page_pool_release {}; + +struct trace_event_data_offsets_page_pool_state_release {}; + +struct trace_event_data_offsets_page_pool_state_hold {}; + +struct trace_event_data_offsets_page_pool_update_nid {}; + +typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); + +typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); + +struct trace_event_raw_neigh_create { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + int entries; + u8 created; + u8 gc_exempt; + u8 primary_key4[4]; + u8 primary_key6[16]; + char __data[0]; +}; + +struct trace_event_raw_neigh_update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u8 new_lladdr[32]; + u8 new_state; + u32 update_flags; + u32 pid; + char __data[0]; +}; + +struct trace_event_raw_neigh__update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u32 err; + char __data[0]; +}; + +struct trace_event_data_offsets_neigh_create { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh_update { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh__update { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); + +typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); + +typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); + +struct nf_hook_state; + +typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + struct nf_hook_entry hooks[0]; +}; + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *); + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); +}; + +enum { + NLA_UNSPEC = 0, + NLA_U8 = 1, + NLA_U16 = 2, + NLA_U32 = 3, + NLA_U64 = 4, + NLA_STRING = 5, + NLA_FLAG = 6, + NLA_MSECS = 7, + NLA_NESTED = 8, + NLA_NESTED_ARRAY = 9, + NLA_NUL_STRING = 10, + NLA_BINARY = 11, + NLA_S8 = 12, + NLA_S16 = 13, + NLA_S32 = 14, + NLA_S64 = 15, + NLA_BITFIELD32 = 16, + NLA_REJECT = 17, + NLA_BE16 = 18, + NLA_BE32 = 19, + NLA_SINT = 20, + NLA_UINT = 21, + __NLA_TYPE_MAX = 22, +}; + +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = 1, + NL_VALIDATE_MAXTYPE = 2, + NL_VALIDATE_UNSPEC = 4, + NL_VALIDATE_STRICT_ATTRS = 8, + NL_VALIDATE_NESTED = 16, +}; + +struct nf_hook_state { + u8 hook; + u8 pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +enum nfnetlink_groups { + NFNLGRP_NONE = 0, + NFNLGRP_CONNTRACK_NEW = 1, + NFNLGRP_CONNTRACK_UPDATE = 2, + NFNLGRP_CONNTRACK_DESTROY = 3, + NFNLGRP_CONNTRACK_EXP_NEW = 4, + NFNLGRP_CONNTRACK_EXP_UPDATE = 5, + NFNLGRP_CONNTRACK_EXP_DESTROY = 6, + NFNLGRP_NFTABLES = 7, + NFNLGRP_ACCT_QUOTA = 8, + NFNLGRP_NFTRACE = 9, + __NFNLGRP_MAX = 10, +}; + +struct nfgenmsg { + __u8 nfgen_family; + __u8 version; + __be16 res_id; +}; + +enum nfnl_batch_attributes { + NFNL_BATCH_UNSPEC = 0, + NFNL_BATCH_GENID = 1, + __NFNL_BATCH_MAX = 2, +}; + +struct nfnl_info { + struct net *net; + struct sock *sk; + const struct nlmsghdr *nlh; + const struct nfgenmsg *nfmsg; + struct netlink_ext_ack *extack; +}; + +enum nfnl_callback_type { + NFNL_CB_UNSPEC = 0, + NFNL_CB_MUTEX = 1, + NFNL_CB_RCU = 2, + NFNL_CB_BATCH = 3, +}; + +struct nfnl_callback { + int (*call)(struct sk_buff *, const struct nfnl_info *, const struct nlattr * const *); + const struct nla_policy *policy; + enum nfnl_callback_type type; + __u16 attr_count; +}; + +enum nfnl_abort_action { + NFNL_ABORT_NONE = 0, + NFNL_ABORT_AUTOLOAD = 1, + NFNL_ABORT_VALIDATE = 2, +}; + +struct nfnetlink_subsystem { + const char *name; + __u8 subsys_id; + __u8 cb_count; + const struct nfnl_callback *cb; + struct module *owner; + int (*commit)(struct net *, struct sk_buff *); + int (*abort)(struct net *, struct sk_buff *, enum nfnl_abort_action); + bool (*valid_genid)(struct net *, u32); +}; + +struct nfnl_net { + struct sock *nfnl; +}; + +struct nfnl_err { + struct list_head head; + struct nlmsghdr *nlh; + int err; + struct netlink_ext_ack extack; +}; + +enum { + NFNL_BATCH_FAILURE = 1, + NFNL_BATCH_DONE = 2, + NFNL_BATCH_REPLAY = 4, +}; + +typedef u16 u_int16_t; + +typedef u32 u_int32_t; + +typedef u64 u_int64_t; + +enum ip_conntrack_info { + IP_CT_ESTABLISHED = 0, + IP_CT_RELATED = 1, + IP_CT_NEW = 2, + IP_CT_IS_REPLY = 3, + IP_CT_ESTABLISHED_REPLY = 3, + IP_CT_RELATED_REPLY = 4, + IP_CT_NUMBER = 5, + IP_CT_UNTRACKED = 7, +}; + +struct nf_conntrack { + refcount_t use; +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + u_int8_t last_wscale; + u_int8_t last_flags; +}; + +struct ip_tunnel_parm_kern { + char name[16]; + long unsigned int i_flags[1]; + long unsigned int o_flags[1]; + __be32 i_key; + __be32 o_key; + int link; + struct iphdr iph; +}; + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN = 2, + NETDEV_REBOOT = 3, + NETDEV_CHANGE = 4, + NETDEV_REGISTER = 5, + NETDEV_UNREGISTER = 6, + NETDEV_CHANGEMTU = 7, + NETDEV_CHANGEADDR = 8, + NETDEV_PRE_CHANGEADDR = 9, + NETDEV_GOING_DOWN = 10, + NETDEV_CHANGENAME = 11, + NETDEV_FEAT_CHANGE = 12, + NETDEV_BONDING_FAILOVER = 13, + NETDEV_PRE_UP = 14, + NETDEV_PRE_TYPE_CHANGE = 15, + NETDEV_POST_TYPE_CHANGE = 16, + NETDEV_POST_INIT = 17, + NETDEV_PRE_UNINIT = 18, + NETDEV_RELEASE = 19, + NETDEV_NOTIFY_PEERS = 20, + NETDEV_JOIN = 21, + NETDEV_CHANGEUPPER = 22, + NETDEV_RESEND_IGMP = 23, + NETDEV_PRECHANGEMTU = 24, + NETDEV_CHANGEINFODATA = 25, + NETDEV_BONDING_INFO = 26, + NETDEV_PRECHANGEUPPER = 27, + NETDEV_CHANGELOWERSTATE = 28, + NETDEV_UDP_TUNNEL_PUSH_INFO = 29, + NETDEV_UDP_TUNNEL_DROP_INFO = 30, + NETDEV_CHANGE_TX_QUEUE_LEN = 31, + NETDEV_CVLAN_FILTER_PUSH_INFO = 32, + NETDEV_CVLAN_FILTER_DROP_INFO = 33, + NETDEV_SVLAN_FILTER_PUSH_INFO = 34, + NETDEV_SVLAN_FILTER_DROP_INFO = 35, + NETDEV_OFFLOAD_XSTATS_ENABLE = 36, + NETDEV_OFFLOAD_XSTATS_DISABLE = 37, + NETDEV_OFFLOAD_XSTATS_REPORT_USED = 38, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA = 39, + NETDEV_XDP_FEAT_CHANGE = 40, +}; + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +enum rt_scope_t { + RT_SCOPE_UNIVERSE = 0, + RT_SCOPE_SITE = 200, + RT_SCOPE_LINK = 253, + RT_SCOPE_HOST = 254, + RT_SCOPE_NOWHERE = 255, +}; + +union nf_conntrack_man_proto { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; + +struct nf_conntrack_man { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + u_int16_t l3num; +}; + +struct nf_conntrack_tuple { + struct nf_conntrack_man src; + struct { + union nf_inet_addr u3; + union { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + u_int8_t type; + u_int8_t code; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; + } u; + u_int8_t protonum; + struct {} __nfct_hash_offsetend; + u_int8_t dir; + } dst; +}; + +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; + struct nf_conntrack_tuple tuple; +}; + +struct nf_ct_dccp { + u_int8_t role[2]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE = 0, + SCTP_CONNTRACK_CLOSED = 1, + SCTP_CONNTRACK_COOKIE_WAIT = 2, + SCTP_CONNTRACK_COOKIE_ECHOED = 3, + SCTP_CONNTRACK_ESTABLISHED = 4, + SCTP_CONNTRACK_SHUTDOWN_SENT = 5, + SCTP_CONNTRACK_SHUTDOWN_RECD = 6, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, + SCTP_CONNTRACK_HEARTBEAT_SENT = 8, + SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, + SCTP_CONNTRACK_MAX = 10, +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + __be32 vtag[2]; + u8 init[2]; + u8 last_dir; + u8 flags; +}; + +struct nf_ct_udp { + long unsigned int stream_ts; +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +union nf_conntrack_proto { + struct nf_ct_dccp dccp; + struct ip_ct_sctp sctp; + struct ip_ct_tcp tcp; + struct nf_ct_udp udp; + struct nf_ct_gre gre; + unsigned int tmpl_padto; +}; + +struct nf_ct_ext; + +struct nf_conn { + struct nf_conntrack ct_general; + spinlock_t lock; + u32 timeout; + struct nf_conntrack_tuple_hash tuplehash[2]; + long unsigned int status; + possible_net_t ct_net; + struct hlist_node nat_bysource; + struct {} __nfct_init_offset; + struct nf_conn *master; + u_int32_t mark; + struct nf_ct_ext *ext; + union nf_conntrack_proto proto; +}; + +enum nf_nat_manip_type { + NF_NAT_MANIP_SRC = 0, + NF_NAT_MANIP_DST = 1, +}; + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL = 0, + IP_CT_DIR_REPLY = 1, + IP_CT_DIR_MAX = 2, +}; + +struct inet6_ifaddr { + struct in6_addr addr; + __u32 prefix_len; + __u32 rt_priority; + __u32 valid_lft; + __u32 prefered_lft; + refcount_t refcnt; + spinlock_t lock; + int state; + __u32 flags; + __u8 dad_probes; + __u8 stable_privacy_retry; + __u16 scope; + long: 32; + __u64 dad_nonce; + long unsigned int cstamp; + long unsigned int tstamp; + struct delayed_work dad_work; + struct inet6_dev *idev; + struct fib6_info *rt; + struct hlist_node addr_lst; + struct list_head if_list; + struct list_head if_list_aux; + struct list_head tmp_list; + struct inet6_ifaddr *ifpub; + int regen_count; + bool tokenized; + u8 ifa_proto; + struct callback_head rcu; + struct in6_addr peer_addr; +}; + +struct fib_info; + +struct fib_nh { + struct fib_nh_common nh_common; + struct hlist_node nh_hash; + struct fib_info *nh_parent; + __be32 nh_saddr; + int nh_saddr_genid; +}; + +struct fib_info { + struct hlist_node fib_hash; + struct hlist_node fib_lhash; + struct list_head nh_list; + struct net *fib_net; + refcount_t fib_treeref; + refcount_t fib_clntref; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_tb_id; + u32 fib_priority; + struct dst_metrics *fib_metrics; + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; + bool pfsrc_removed; + struct nexthop *nh; + struct callback_head rcu; + struct fib_nh fib_nh[0]; +}; + +struct nh_info; + +struct nh_group; + +struct nexthop { + struct rb_node rb_node; + struct list_head fi_list; + struct list_head f6i_list; + struct list_head fdb_list; + struct list_head grp_list; + struct net *net; + u32 id; + u8 protocol; + u8 nh_flags; + bool is_group; + refcount_t refcnt; + struct callback_head rcu; + union { + struct nh_info *nh_info; + struct nh_group *nh_grp; + }; +}; + +enum { + IP_TUNNEL_CSUM_BIT = 0, + IP_TUNNEL_ROUTING_BIT = 1, + IP_TUNNEL_KEY_BIT = 2, + IP_TUNNEL_SEQ_BIT = 3, + IP_TUNNEL_STRICT_BIT = 4, + IP_TUNNEL_REC_BIT = 5, + IP_TUNNEL_VERSION_BIT = 6, + IP_TUNNEL_NO_KEY_BIT = 7, + IP_TUNNEL_DONT_FRAGMENT_BIT = 8, + IP_TUNNEL_OAM_BIT = 9, + IP_TUNNEL_CRIT_OPT_BIT = 10, + IP_TUNNEL_GENEVE_OPT_BIT = 11, + IP_TUNNEL_VXLAN_OPT_BIT = 12, + IP_TUNNEL_NOCACHE_BIT = 13, + IP_TUNNEL_ERSPAN_OPT_BIT = 14, + IP_TUNNEL_GTP_OPT_BIT = 15, + IP_TUNNEL_VTI_BIT = 16, + IP_TUNNEL_SIT_ISATAP_BIT = 16, + IP_TUNNEL_PFCP_OPT_BIT = 17, + __IP_TUNNEL_FLAG_NUM = 18, +}; + +struct nh_info { + struct hlist_node dev_hash; + struct nexthop *nh_parent; + u8 family; + bool reject_nh; + bool fdb_nh; + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct nh_grp_entry; + +struct nh_res_bucket { + struct nh_grp_entry *nh_entry; + atomic_long_t used_time; + long unsigned int migrated_time; + bool occupied; + u8 nh_flags; +}; + +struct nh_grp_entry_stats; + +struct nh_grp_entry { + struct nexthop *nh; + struct nh_grp_entry_stats *stats; + u16 weight; + union { + struct { + atomic_t upper_bound; + } hthr; + struct { + struct list_head uw_nh_entry; + u16 count_buckets; + u16 wants_buckets; + } res; + }; + struct list_head nh_list; + struct nexthop *nh_parent; + long: 32; + u64 packets_hw; +}; + +struct nh_res_table { + struct net *net; + u32 nhg_id; + struct delayed_work upkeep_dw; + struct list_head uw_nh_entries; + long unsigned int unbalanced_since; + u32 idle_timer; + u32 unbalanced_timer; + u16 num_nh_buckets; + struct nh_res_bucket nh_buckets[0]; +}; + +struct nh_grp_entry_stats { + u64_stats_t packets; + struct u64_stats_sync syncp; + long: 32; +}; + +struct nh_group { + struct nh_group *spare; + u16 num_nh; + bool is_multipath; + bool hash_threshold; + bool resilient; + bool fdb_nh; + bool has_v4; + bool hw_stats; + struct nh_res_table *res_table; + struct nh_grp_entry nh_entries[0]; +}; + +struct nf_ct_ext { + u8 offset[4]; + u8 len; + unsigned int gen_id; + long: 32; + char data[0]; +}; + +struct nf_ct_iter_data { + struct net *net; + void *data; + u32 portid; + int report; +}; + +enum nf_ct_ext_id { + NF_CT_EXT_HELPER = 0, + NF_CT_EXT_NAT = 1, + NF_CT_EXT_SEQADJ = 2, + NF_CT_EXT_ACCT = 3, + NF_CT_EXT_NUM = 4, +}; + +struct nf_nat_range2 { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; + union nf_conntrack_man_proto base_proto; +}; + +union nf_conntrack_nat_help {}; + +struct nf_conn_nat { + union nf_conntrack_nat_help help; + int masq_index; +}; + +struct masq_dev_work { + struct work_struct work; + struct net *net; + netns_tracker ns_tracker; + union nf_inet_addr addr; + int ifindex; + int (*iter)(struct nf_conn *, void *); +}; + +enum nf_hook_ops_type { + NF_HOOK_OP_UNDEFINED = 0, + NF_HOOK_OP_NF_TABLES = 1, + NF_HOOK_OP_BPF = 2, +}; + +struct nf_hook_ops { + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u8 pf; + enum nf_hook_ops_type hook_ops_type: 8; + unsigned int hooknum; + int priority; +}; + +enum { + RTN_UNSPEC = 0, + RTN_UNICAST = 1, + RTN_LOCAL = 2, + RTN_BROADCAST = 3, + RTN_ANYCAST = 4, + RTN_MULTICAST = 5, + RTN_BLACKHOLE = 6, + RTN_UNREACHABLE = 7, + RTN_PROHIBIT = 8, + RTN_THROW = 9, + RTN_NAT = 10, + RTN_XRESOLVE = 11, + __RTN_MAX = 12, +}; + +enum nft_registers { + NFT_REG_VERDICT = 0, + NFT_REG_1 = 1, + NFT_REG_2 = 2, + NFT_REG_3 = 3, + NFT_REG_4 = 4, + __NFT_REG_MAX = 5, + NFT_REG32_00 = 8, + NFT_REG32_01 = 9, + NFT_REG32_02 = 10, + NFT_REG32_03 = 11, + NFT_REG32_04 = 12, + NFT_REG32_05 = 13, + NFT_REG32_06 = 14, + NFT_REG32_07 = 15, + NFT_REG32_08 = 16, + NFT_REG32_09 = 17, + NFT_REG32_10 = 18, + NFT_REG32_11 = 19, + NFT_REG32_12 = 20, + NFT_REG32_13 = 21, + NFT_REG32_14 = 22, + NFT_REG32_15 = 23, +}; + +enum l2tp_debug_flags { + L2TP_MSG_DEBUG = 1, + L2TP_MSG_CONTROL = 2, + L2TP_MSG_SEQ = 4, + L2TP_MSG_DATA = 8, +}; + +enum { + NFT_PKTINFO_L4PROTO = 1, + NFT_PKTINFO_INNER = 2, + NFT_PKTINFO_INNER_FULL = 4, +}; + +struct nft_pktinfo { + struct sk_buff *skb; + const struct nf_hook_state *state; + u8 flags; + u8 tprot; + u16 fragoff; + u16 thoff; + u16 inneroff; +}; + +enum nft_set_extensions { + NFT_SET_EXT_KEY = 0, + NFT_SET_EXT_KEY_END = 1, + NFT_SET_EXT_DATA = 2, + NFT_SET_EXT_FLAGS = 3, + NFT_SET_EXT_TIMEOUT = 4, + NFT_SET_EXT_USERDATA = 5, + NFT_SET_EXT_EXPRESSIONS = 6, + NFT_SET_EXT_OBJREF = 7, + NFT_SET_EXT_NUM = 8, +}; + +enum nft_chain_types { + NFT_CHAIN_T_DEFAULT = 0, + NFT_CHAIN_T_ROUTE = 1, + NFT_CHAIN_T_NAT = 2, + NFT_CHAIN_T_MAX = 3, +}; + +struct nft_chain_type { + const char *name; + enum nft_chain_types type; + int family; + struct module *owner; + unsigned int hook_mask; + nf_hookfn *hooks[6]; + int (*ops_register)(struct net *, const struct nf_hook_ops *); + void (*ops_unregister)(struct net *, const struct nf_hook_ops *); +}; + +enum sock_flags { + SOCK_DEAD = 0, + SOCK_DONE = 1, + SOCK_URGINLINE = 2, + SOCK_KEEPOPEN = 3, + SOCK_LINGER = 4, + SOCK_DESTROY = 5, + SOCK_BROADCAST = 6, + SOCK_TIMESTAMP = 7, + SOCK_ZAPPED = 8, + SOCK_USE_WRITE_QUEUE = 9, + SOCK_DBG = 10, + SOCK_RCVTSTAMP = 11, + SOCK_RCVTSTAMPNS = 12, + SOCK_LOCALROUTE = 13, + SOCK_MEMALLOC = 14, + SOCK_TIMESTAMPING_RX_SOFTWARE = 15, + SOCK_FASYNC = 16, + SOCK_RXQ_OVFL = 17, + SOCK_ZEROCOPY = 18, + SOCK_WIFI_STATUS = 19, + SOCK_NOFCS = 20, + SOCK_FILTER_LOCKED = 21, + SOCK_SELECT_ERR_QUEUE = 22, + SOCK_RCU_FREE = 23, + SOCK_TXTIME = 24, + SOCK_XDP = 25, + SOCK_TSTAMP_NEW = 26, + SOCK_RCVMARK = 27, +}; + +enum { + INET_FLAGS_PKTINFO = 0, + INET_FLAGS_TTL = 1, + INET_FLAGS_TOS = 2, + INET_FLAGS_RECVOPTS = 3, + INET_FLAGS_RETOPTS = 4, + INET_FLAGS_PASSSEC = 5, + INET_FLAGS_ORIGDSTADDR = 6, + INET_FLAGS_CHECKSUM = 7, + INET_FLAGS_RECVFRAGSIZE = 8, + INET_FLAGS_RECVERR = 9, + INET_FLAGS_RECVERR_RFC4884 = 10, + INET_FLAGS_FREEBIND = 11, + INET_FLAGS_HDRINCL = 12, + INET_FLAGS_MC_LOOP = 13, + INET_FLAGS_MC_ALL = 14, + INET_FLAGS_TRANSPARENT = 15, + INET_FLAGS_IS_ICSK = 16, + INET_FLAGS_NODEFRAG = 17, + INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, + INET_FLAGS_DEFER_CONNECT = 19, + INET_FLAGS_MC6_LOOP = 20, + INET_FLAGS_RECVERR6_RFC4884 = 21, + INET_FLAGS_MC6_ALL = 22, + INET_FLAGS_AUTOFLOWLABEL_SET = 23, + INET_FLAGS_AUTOFLOWLABEL = 24, + INET_FLAGS_DONTFRAG = 25, + INET_FLAGS_RECVERR6 = 26, + INET_FLAGS_REPFLOW = 27, + INET_FLAGS_RTALERT_ISOLATE = 28, + INET_FLAGS_SNDFLOW = 29, + INET_FLAGS_RTALERT = 30, +}; + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + long: 32; + u64 snt_synack; + bool tfo_listener; + bool is_mptcp; + bool req_usec_ts; + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; + u32 rcv_nxt; + u8 syn_tos; +}; + +enum tcp_synack_type { + TCP_SYNACK_NORMAL = 0, + TCP_SYNACK_FASTOPEN = 1, + TCP_SYNACK_COOKIE = 2, +}; + +struct tcp_request_sock_ops { + u16 mss_clamp; + __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); + struct dst_entry * (*route_req)(const struct sock *, struct sk_buff *, struct flowi *, struct request_sock *, u32); + u32 (*init_seq)(const struct sk_buff *); + u32 (*init_ts_off)(const struct net *, const struct sk_buff *); + int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); +}; + +struct udp_hslot; + +struct udp_hslot_main; + +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot_main *hash2; + struct udp_hslot *hash4; + unsigned int mask; + unsigned int log; +}; + +struct udp_hslot { + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; + int count; + spinlock_t lock; + long: 32; +}; + +struct udp_hslot_main { + struct udp_hslot hslot; + u32 hash4_cnt; + long: 32; +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, + NETEVENT_REDIRECT = 2, + NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, + NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, + NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, +}; + +struct seg6_pernet_data { + struct mutex lock; + struct in6_addr *tun_src; +}; + +typedef union { + __be32 a4; + __be32 a6[4]; + struct in6_addr in6; +} xfrm_address_t; + +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; + +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_trunc_len; + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_icv_len; + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255, +}; + +enum { + XFRM_MSG_BASE = 16, + XFRM_MSG_NEWSA = 16, + XFRM_MSG_DELSA = 17, + XFRM_MSG_GETSA = 18, + XFRM_MSG_NEWPOLICY = 19, + XFRM_MSG_DELPOLICY = 20, + XFRM_MSG_GETPOLICY = 21, + XFRM_MSG_ALLOCSPI = 22, + XFRM_MSG_ACQUIRE = 23, + XFRM_MSG_EXPIRE = 24, + XFRM_MSG_UPDPOLICY = 25, + XFRM_MSG_UPDSA = 26, + XFRM_MSG_POLEXPIRE = 27, + XFRM_MSG_FLUSHSA = 28, + XFRM_MSG_FLUSHPOLICY = 29, + XFRM_MSG_NEWAE = 30, + XFRM_MSG_GETAE = 31, + XFRM_MSG_REPORT = 32, + XFRM_MSG_MIGRATE = 33, + XFRM_MSG_NEWSADINFO = 34, + XFRM_MSG_GETSADINFO = 35, + XFRM_MSG_NEWSPDINFO = 36, + XFRM_MSG_GETSPDINFO = 37, + XFRM_MSG_MAPPING = 38, + XFRM_MSG_SETDEFAULT = 39, + XFRM_MSG_GETDEFAULT = 40, + __XFRM_MSG_MAX = 41, +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + +enum xfrm_attr_type_t { + XFRMA_UNSPEC = 0, + XFRMA_ALG_AUTH = 1, + XFRMA_ALG_CRYPT = 2, + XFRMA_ALG_COMP = 3, + XFRMA_ENCAP = 4, + XFRMA_TMPL = 5, + XFRMA_SA = 6, + XFRMA_POLICY = 7, + XFRMA_SEC_CTX = 8, + XFRMA_LTIME_VAL = 9, + XFRMA_REPLAY_VAL = 10, + XFRMA_REPLAY_THRESH = 11, + XFRMA_ETIMER_THRESH = 12, + XFRMA_SRCADDR = 13, + XFRMA_COADDR = 14, + XFRMA_LASTUSED = 15, + XFRMA_POLICY_TYPE = 16, + XFRMA_MIGRATE = 17, + XFRMA_ALG_AEAD = 18, + XFRMA_KMADDRESS = 19, + XFRMA_ALG_AUTH_TRUNC = 20, + XFRMA_MARK = 21, + XFRMA_TFCPAD = 22, + XFRMA_REPLAY_ESN_VAL = 23, + XFRMA_SA_EXTRA_FLAGS = 24, + XFRMA_PROTO = 25, + XFRMA_ADDRESS_FILTER = 26, + XFRMA_PAD = 27, + XFRMA_OFFLOAD_DEV = 28, + XFRMA_SET_MARK = 29, + XFRMA_SET_MARK_MASK = 30, + XFRMA_IF_ID = 31, + XFRMA_MTIMER_THRESH = 32, + XFRMA_SA_DIR = 33, + XFRMA_NAT_KEEPALIVE_INTERVAL = 34, + XFRMA_SA_PCPU = 35, + __XFRMA_MAX = 36, +}; + +struct xfrm_mark { + __u32 v; + __u32 m; +}; + +struct xfrm_address_filter { + xfrm_address_t saddr; + xfrm_address_t daddr; + __u16 family; + __u8 splen; + __u8 dplen; +}; + +enum { + SKB_GSO_TCPV4 = 1, + SKB_GSO_DODGY = 2, + SKB_GSO_TCP_ECN = 4, + SKB_GSO_TCP_FIXEDID = 8, + SKB_GSO_TCPV6 = 16, + SKB_GSO_FCOE = 32, + SKB_GSO_GRE = 64, + SKB_GSO_GRE_CSUM = 128, + SKB_GSO_IPXIP4 = 256, + SKB_GSO_IPXIP6 = 512, + SKB_GSO_UDP_TUNNEL = 1024, + SKB_GSO_UDP_TUNNEL_CSUM = 2048, + SKB_GSO_PARTIAL = 4096, + SKB_GSO_TUNNEL_REMCSUM = 8192, + SKB_GSO_SCTP = 16384, + SKB_GSO_ESP = 32768, + SKB_GSO_UDP = 65536, + SKB_GSO_UDP_L4 = 131072, + SKB_GSO_FRAGLIST = 262144, +}; + +struct seq_net_private { + struct net *net; + netns_tracker ns_tracker; +}; + +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +struct offload_callbacks { + struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); + struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sk_buff *, int); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = 1, + SOF_TIMESTAMPING_TX_SOFTWARE = 2, + SOF_TIMESTAMPING_RX_HARDWARE = 4, + SOF_TIMESTAMPING_RX_SOFTWARE = 8, + SOF_TIMESTAMPING_SOFTWARE = 16, + SOF_TIMESTAMPING_SYS_HARDWARE = 32, + SOF_TIMESTAMPING_RAW_HARDWARE = 64, + SOF_TIMESTAMPING_OPT_ID = 128, + SOF_TIMESTAMPING_TX_SCHED = 256, + SOF_TIMESTAMPING_TX_ACK = 512, + SOF_TIMESTAMPING_OPT_CMSG = 1024, + SOF_TIMESTAMPING_OPT_TSONLY = 2048, + SOF_TIMESTAMPING_OPT_STATS = 4096, + SOF_TIMESTAMPING_OPT_PKTINFO = 8192, + SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, + SOF_TIMESTAMPING_BIND_PHC = 32768, + SOF_TIMESTAMPING_OPT_ID_TCP = 65536, + SOF_TIMESTAMPING_OPT_RX_FILTER = 131072, + SOF_TIMESTAMPING_LAST = 131072, + SOF_TIMESTAMPING_MASK = 262143, +}; + +struct raw_hashinfo { + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct hlist_head ht[256]; +}; + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u32 tsflags; + u32 ts_opt_id; + long: 32; +}; + +enum { + UDP_FLAGS_CORK = 0, + UDP_FLAGS_NO_CHECK6_TX = 1, + UDP_FLAGS_NO_CHECK6_RX = 2, + UDP_FLAGS_GRO_ENABLED = 3, + UDP_FLAGS_ACCEPT_FRAGLIST = 4, + UDP_FLAGS_ACCEPT_L4 = 5, + UDP_FLAGS_ENCAP_ENABLED = 6, + UDP_FLAGS_UDPLITE_SEND_CC = 7, + UDP_FLAGS_UDPLITE_RECV_CC = 8, +}; + +struct udp_sock { + struct inet_sock inet; + long unsigned int udp_flags; + int pending; + __u8 encap_type; + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; + __u16 len; + __u16 gso_size; + __u16 pcslen; + __u16 pcrlen; + int (*encap_rcv)(struct sock *, struct sk_buff *); + void (*encap_err_rcv)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*encap_err_lookup)(struct sock *, struct sk_buff *); + void (*encap_destroy)(struct sock *); + struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sock *, struct sk_buff *, int); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sk_buff_head reader_queue; + int forward_deficit; + int forward_threshold; + bool peeking_with_offset; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; + __u16 frag_max_size; + __u16 srhoff; +}; + +struct static_key_false_deferred { + struct static_key_false key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct ipcm6_cookie { + struct sockcm_cookie sockc; + __s16 hlimit; + __s16 tclass; + __u16 gso_size; + __s8 dontfrag; + struct ipv6_txoptions *opt; + long: 32; +}; + +struct net_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, u32); + unsigned int no_policy: 1; + unsigned int icmp_strict_tag_validation: 1; + u32 secret; +}; + +struct inet6_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + unsigned int flags; + u32 secret; +}; + +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; + u32 secret; +}; + +struct inet_protosw { + struct list_head list; + short unsigned int type; + short unsigned int protocol; + struct proto *prot; + const struct proto_ops *ops; + unsigned char flags; +}; + +struct ipv6_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u8 first_segment; + __u8 flags; + __u16 tag; + struct in6_addr segments[0]; +}; + +struct inet_skb_parm { + int iif; + struct ip_options opt; + u16 flags; + u16 frag_max_size; +}; + +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); +}; + +struct xfrm_state_walk { + struct list_head all; + u8 state; + u8 dying; + u8 proto; + u32 seq; + struct xfrm_address_filter *filter; +}; + +struct xfrm_dev_offload { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net_device *real_dev; + long unsigned int offload_handle; + u8 dir: 2; + u8 type: 2; + u8 flags: 2; +}; + +struct xfrm_mode { + u8 encap; + u8 family; + u8 flags; +}; + +enum xfrm_replay_mode { + XFRM_REPLAY_MODE_LEGACY = 0, + XFRM_REPLAY_MODE_BMP = 1, + XFRM_REPLAY_MODE_ESN = 2, +}; + +struct xfrm_type; + +struct xfrm_type_offload; + +struct xfrm_state { + possible_net_t xs_net; + union { + struct hlist_node gclist; + struct hlist_node bydst; + }; + union { + struct hlist_node dev_gclist; + struct hlist_node bysrc; + }; + struct hlist_node byspi; + struct hlist_node byseq; + struct hlist_node state_cache; + struct hlist_node state_cache_input; + refcount_t refcnt; + spinlock_t lock; + u32 pcpu_num; + struct xfrm_id id; + struct xfrm_selector sel; + struct xfrm_mark mark; + u32 if_id; + u32 tfcpad; + u32 genid; + struct xfrm_state_walk km; + struct { + u32 reqid; + u8 mode; + u8 replay_window; + u8 aalgo; + u8 ealgo; + u8 calgo; + u8 flags; + u16 family; + xfrm_address_t saddr; + int header_len; + int trailer_len; + u32 extra_flags; + struct xfrm_mark smark; + } props; + struct xfrm_lifetime_cfg lft; + struct xfrm_algo_auth *aalg; + struct xfrm_algo *ealg; + struct xfrm_algo *calg; + struct xfrm_algo_aead *aead; + const char *geniv; + __be16 new_mapping_sport; + u32 new_mapping; + u32 mapping_maxage; + struct xfrm_encap_tmpl *encap; + struct sock *encap_sk; + u32 nat_keepalive_interval; + long: 32; + time64_t nat_keepalive_expiration; + xfrm_address_t *coaddr; + struct xfrm_state *tunnel; + atomic_t tunnel_users; + struct xfrm_replay_state replay; + struct xfrm_replay_state_esn *replay_esn; + struct xfrm_replay_state preplay; + struct xfrm_replay_state_esn *preplay_esn; + enum xfrm_replay_mode repl_mode; + u32 xflags; + u32 replay_maxage; + u32 replay_maxdiff; + struct timer_list rtimer; + struct xfrm_stats stats; + long: 32; + struct xfrm_lifetime_cur curlft; + struct hrtimer mtimer; + struct xfrm_dev_offload xso; + long int saved_tmo; + long: 32; + time64_t lastused; + struct page_frag xfrag; + const struct xfrm_type *type; + struct xfrm_mode inner_mode; + struct xfrm_mode inner_mode_iaf; + struct xfrm_mode outer_mode; + const struct xfrm_type_offload *type_offload; + struct xfrm_sec_ctx *security; + void *data; + u8 dir; +}; + +struct xfrm_type { + struct module *owner; + u8 proto; + u8 flags; + int (*init_state)(struct xfrm_state *, struct netlink_ext_ack *); + void (*destructor)(struct xfrm_state *); + int (*input)(struct xfrm_state *, struct sk_buff *); + int (*output)(struct xfrm_state *, struct sk_buff *); + int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); +}; + +struct xfrm_type_offload { + struct module *owner; + u8 proto; + void (*encap)(struct xfrm_state *, struct sk_buff *); + int (*input_tail)(struct xfrm_state *, struct sk_buff *); + int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); +}; + +typedef u32 inet6_ehashfn_t(const struct net *, const struct in6_addr *, const u16, const struct in6_addr *, const __be16); + +struct udp_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + __u16 cscov; + __u8 partial_cov; +}; + +struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; +}; + +struct udp_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct rps_sock_flow_table; + +struct net_hotdata { + struct packet_offload ip_packet_offload; + struct net_offload tcpv4_offload; + struct net_protocol tcp_protocol; + struct net_offload udpv4_offload; + struct net_protocol udp_protocol; + struct packet_offload ipv6_packet_offload; + struct net_offload tcpv6_offload; + struct inet6_protocol tcpv6_protocol; + struct inet6_protocol udpv6_protocol; + struct net_offload udpv6_offload; + struct list_head offload_base; + struct list_head ptype_all; + struct kmem_cache *skbuff_cache; + struct kmem_cache *skbuff_fclone_cache; + struct kmem_cache *skb_small_head_cache; + struct rps_sock_flow_table *rps_sock_flow_table; + u32 rps_cpu_mask; + int gro_normal_batch; + int netdev_budget; + int netdev_budget_usecs; + int tstamp_prequeue; + int max_backlog; + int dev_tx_weight; + int dev_rx_weight; + int sysctl_max_skb_frags; + int sysctl_skb_defer_max; + int sysctl_mem_pcpu_rsv; +}; + +struct napi_gro_cb { + union { + struct { + void *frag0; + unsigned int frag0_len; + }; + struct { + struct sk_buff *last; + long unsigned int age; + }; + }; + int data_offset; + u16 flush; + u16 count; + u16 proto; + u16 pad; + union { + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + }; + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + } zeroed; + }; + __wsum csum; + union { + struct { + u16 network_offset; + u16 inner_network_offset; + }; + u16 network_offsets[2]; + }; +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +typedef int (*netlink_filter_fn)(struct sock *, struct sk_buff *, void *); + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +enum netdev_state_t { + __LINK_STATE_START = 0, + __LINK_STATE_PRESENT = 1, + __LINK_STATE_NOCARRIER = 2, + __LINK_STATE_LINKWATCH_PENDING = 3, + __LINK_STATE_DORMANT = 4, + __LINK_STATE_TESTING = 5, +}; + +struct ieee80211_s1g_cap { + u8 capab_info[10]; + u8 supp_mcs_nss[5]; +}; + +enum ieee80211_vht_mcs_support { + IEEE80211_VHT_MCS_SUPPORT_0_7 = 0, + IEEE80211_VHT_MCS_SUPPORT_0_8 = 1, + IEEE80211_VHT_MCS_SUPPORT_0_9 = 2, + IEEE80211_VHT_MCS_NOT_SUPPORTED = 3, +}; + +enum ieee80211_he_mcs_support { + IEEE80211_HE_MCS_SUPPORT_0_7 = 0, + IEEE80211_HE_MCS_SUPPORT_0_9 = 1, + IEEE80211_HE_MCS_SUPPORT_0_11 = 2, + IEEE80211_HE_MCS_NOT_SUPPORTED = 3, +}; + +struct ieee80211_he_operation { + __le32 he_oper_params; + __le16 he_mcs_nss_set; + u8 optional[0]; +} __attribute__((packed)); + +struct ieee80211_eht_cap_elem { + struct ieee80211_eht_cap_elem_fixed fixed; + u8 optional[0]; +}; + +struct ieee80211_eht_operation { + u8 params; + struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss; + u8 optional[0]; +}; + +enum ieee80211_statuscode { + WLAN_STATUS_SUCCESS = 0, + WLAN_STATUS_UNSPECIFIED_FAILURE = 1, + WLAN_STATUS_CAPS_UNSUPPORTED = 10, + WLAN_STATUS_REASSOC_NO_ASSOC = 11, + WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12, + WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13, + WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14, + WLAN_STATUS_CHALLENGE_FAIL = 15, + WLAN_STATUS_AUTH_TIMEOUT = 16, + WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, + WLAN_STATUS_ASSOC_DENIED_RATES = 18, + WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19, + WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20, + WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21, + WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22, + WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23, + WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24, + WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, + WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, + WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30, + WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31, + WLAN_STATUS_INVALID_IE = 40, + WLAN_STATUS_INVALID_GROUP_CIPHER = 41, + WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42, + WLAN_STATUS_INVALID_AKMP = 43, + WLAN_STATUS_UNSUPP_RSN_VERSION = 44, + WLAN_STATUS_INVALID_RSN_IE_CAP = 45, + WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, + WLAN_STATUS_UNSPECIFIED_QOS = 32, + WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33, + WLAN_STATUS_ASSOC_DENIED_LOWACK = 34, + WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35, + WLAN_STATUS_REQUEST_DECLINED = 37, + WLAN_STATUS_INVALID_QOS_PARAM = 38, + WLAN_STATUS_CHANGE_TSPEC = 39, + WLAN_STATUS_WAIT_TS_DELAY = 47, + WLAN_STATUS_NO_DIRECT_LINK = 48, + WLAN_STATUS_STA_NOT_PRESENT = 49, + WLAN_STATUS_STA_NOT_QSTA = 50, + WLAN_STATUS_ANTI_CLOG_REQUIRED = 76, + WLAN_STATUS_FCG_NOT_SUPP = 78, + WLAN_STATUS_STA_NO_TBTT = 78, + WLAN_STATUS_REJECTED_WITH_SUGGESTED_CHANGES = 39, + WLAN_STATUS_REJECTED_FOR_DELAY_PERIOD = 47, + WLAN_STATUS_REJECT_WITH_SCHEDULE = 83, + WLAN_STATUS_PENDING_ADMITTING_FST_SESSION = 86, + WLAN_STATUS_PERFORMING_FST_NOW = 87, + WLAN_STATUS_PENDING_GAP_IN_BA_WINDOW = 88, + WLAN_STATUS_REJECT_U_PID_SETTING = 89, + WLAN_STATUS_REJECT_DSE_BAND = 96, + WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99, + WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103, + WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, + WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, + WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126, + WLAN_STATUS_SAE_PK = 127, + WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133, + WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED = 134, +}; + +enum ieee80211_reasoncode { + WLAN_REASON_UNSPECIFIED = 1, + WLAN_REASON_PREV_AUTH_NOT_VALID = 2, + WLAN_REASON_DEAUTH_LEAVING = 3, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4, + WLAN_REASON_DISASSOC_AP_BUSY = 5, + WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6, + WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7, + WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8, + WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9, + WLAN_REASON_DISASSOC_BAD_POWER = 10, + WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11, + WLAN_REASON_INVALID_IE = 13, + WLAN_REASON_MIC_FAILURE = 14, + WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, + WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16, + WLAN_REASON_IE_DIFFERENT = 17, + WLAN_REASON_INVALID_GROUP_CIPHER = 18, + WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19, + WLAN_REASON_INVALID_AKMP = 20, + WLAN_REASON_UNSUPP_RSN_VERSION = 21, + WLAN_REASON_INVALID_RSN_IE_CAP = 22, + WLAN_REASON_IEEE8021X_FAILED = 23, + WLAN_REASON_CIPHER_SUITE_REJECTED = 24, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25, + WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26, + WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32, + WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33, + WLAN_REASON_DISASSOC_LOW_ACK = 34, + WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35, + WLAN_REASON_QSTA_LEAVE_QBSS = 36, + WLAN_REASON_QSTA_NOT_USE = 37, + WLAN_REASON_QSTA_REQUIRE_SETUP = 38, + WLAN_REASON_QSTA_TIMEOUT = 39, + WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, + WLAN_REASON_MESH_PEER_CANCELED = 52, + WLAN_REASON_MESH_MAX_PEERS = 53, + WLAN_REASON_MESH_CONFIG = 54, + WLAN_REASON_MESH_CLOSE = 55, + WLAN_REASON_MESH_MAX_RETRIES = 56, + WLAN_REASON_MESH_CONFIRM_TIMEOUT = 57, + WLAN_REASON_MESH_INVALID_GTK = 58, + WLAN_REASON_MESH_INCONSISTENT_PARAM = 59, + WLAN_REASON_MESH_INVALID_SECURITY = 60, + WLAN_REASON_MESH_PATH_ERROR = 61, + WLAN_REASON_MESH_PATH_NOFORWARD = 62, + WLAN_REASON_MESH_PATH_DEST_UNREACHABLE = 63, + WLAN_REASON_MAC_EXISTS_IN_MBSS = 64, + WLAN_REASON_MESH_CHAN_REGULATORY = 65, + WLAN_REASON_MESH_CHAN = 66, +}; + +enum ieee80211_eid_ext { + WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, + WLAN_EID_EXT_FILS_REQ_PARAMS = 2, + WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, + WLAN_EID_EXT_FILS_SESSION = 4, + WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, + WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, + WLAN_EID_EXT_KEY_DELIVERY = 7, + WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, + WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, + WLAN_EID_EXT_FILS_NONCE = 13, + WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, + WLAN_EID_EXT_HE_CAPABILITY = 35, + WLAN_EID_EXT_HE_OPERATION = 36, + WLAN_EID_EXT_UORA = 37, + WLAN_EID_EXT_HE_MU_EDCA = 38, + WLAN_EID_EXT_HE_SPR = 39, + WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET = 41, + WLAN_EID_EXT_BSS_COLOR_CHG_ANN = 42, + WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP = 43, + WLAN_EID_EXT_ESS_REPORT = 45, + WLAN_EID_EXT_OPS = 46, + WLAN_EID_EXT_HE_BSS_LOAD = 47, + WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, + WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, + WLAN_EID_EXT_NON_INHERITANCE = 56, + WLAN_EID_EXT_KNOWN_BSSID = 57, + WLAN_EID_EXT_SHORT_SSID_LIST = 58, + WLAN_EID_EXT_HE_6GHZ_CAPA = 59, + WLAN_EID_EXT_UL_MU_POWER_CAPA = 60, + WLAN_EID_EXT_EHT_OPERATION = 106, + WLAN_EID_EXT_EHT_MULTI_LINK = 107, + WLAN_EID_EXT_EHT_CAPABILITY = 108, + WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109, + WLAN_EID_EXT_BANDWIDTH_INDICATION = 135, +}; + +enum ieee80211_key_len { + WLAN_KEY_LEN_WEP40 = 5, + WLAN_KEY_LEN_WEP104 = 13, + WLAN_KEY_LEN_CCMP = 16, + WLAN_KEY_LEN_CCMP_256 = 32, + WLAN_KEY_LEN_TKIP = 32, + WLAN_KEY_LEN_AES_CMAC = 16, + WLAN_KEY_LEN_SMS4 = 32, + WLAN_KEY_LEN_GCMP = 16, + WLAN_KEY_LEN_GCMP_256 = 32, + WLAN_KEY_LEN_BIP_CMAC_256 = 32, + WLAN_KEY_LEN_BIP_GMAC_128 = 16, + WLAN_KEY_LEN_BIP_GMAC_256 = 32, +}; + +enum ieee80211_mesh_sync_method { + IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, + IEEE80211_SYNC_METHOD_VENDOR = 255, +}; + +enum ieee80211_mesh_path_protocol { + IEEE80211_PATH_PROTOCOL_HWMP = 1, + IEEE80211_PATH_PROTOCOL_VENDOR = 255, +}; + +enum ieee80211_mesh_path_metric { + IEEE80211_PATH_METRIC_AIRTIME = 1, + IEEE80211_PATH_METRIC_VENDOR = 255, +}; + +struct element { + u8 id; + u8 datalen; + u8 data[0]; +}; + +enum nl80211_commands { + NL80211_CMD_UNSPEC = 0, + NL80211_CMD_GET_WIPHY = 1, + NL80211_CMD_SET_WIPHY = 2, + NL80211_CMD_NEW_WIPHY = 3, + NL80211_CMD_DEL_WIPHY = 4, + NL80211_CMD_GET_INTERFACE = 5, + NL80211_CMD_SET_INTERFACE = 6, + NL80211_CMD_NEW_INTERFACE = 7, + NL80211_CMD_DEL_INTERFACE = 8, + NL80211_CMD_GET_KEY = 9, + NL80211_CMD_SET_KEY = 10, + NL80211_CMD_NEW_KEY = 11, + NL80211_CMD_DEL_KEY = 12, + NL80211_CMD_GET_BEACON = 13, + NL80211_CMD_SET_BEACON = 14, + NL80211_CMD_START_AP = 15, + NL80211_CMD_NEW_BEACON = 15, + NL80211_CMD_STOP_AP = 16, + NL80211_CMD_DEL_BEACON = 16, + NL80211_CMD_GET_STATION = 17, + NL80211_CMD_SET_STATION = 18, + NL80211_CMD_NEW_STATION = 19, + NL80211_CMD_DEL_STATION = 20, + NL80211_CMD_GET_MPATH = 21, + NL80211_CMD_SET_MPATH = 22, + NL80211_CMD_NEW_MPATH = 23, + NL80211_CMD_DEL_MPATH = 24, + NL80211_CMD_SET_BSS = 25, + NL80211_CMD_SET_REG = 26, + NL80211_CMD_REQ_SET_REG = 27, + NL80211_CMD_GET_MESH_CONFIG = 28, + NL80211_CMD_SET_MESH_CONFIG = 29, + NL80211_CMD_SET_MGMT_EXTRA_IE = 30, + NL80211_CMD_GET_REG = 31, + NL80211_CMD_GET_SCAN = 32, + NL80211_CMD_TRIGGER_SCAN = 33, + NL80211_CMD_NEW_SCAN_RESULTS = 34, + NL80211_CMD_SCAN_ABORTED = 35, + NL80211_CMD_REG_CHANGE = 36, + NL80211_CMD_AUTHENTICATE = 37, + NL80211_CMD_ASSOCIATE = 38, + NL80211_CMD_DEAUTHENTICATE = 39, + NL80211_CMD_DISASSOCIATE = 40, + NL80211_CMD_MICHAEL_MIC_FAILURE = 41, + NL80211_CMD_REG_BEACON_HINT = 42, + NL80211_CMD_JOIN_IBSS = 43, + NL80211_CMD_LEAVE_IBSS = 44, + NL80211_CMD_TESTMODE = 45, + NL80211_CMD_CONNECT = 46, + NL80211_CMD_ROAM = 47, + NL80211_CMD_DISCONNECT = 48, + NL80211_CMD_SET_WIPHY_NETNS = 49, + NL80211_CMD_GET_SURVEY = 50, + NL80211_CMD_NEW_SURVEY_RESULTS = 51, + NL80211_CMD_SET_PMKSA = 52, + NL80211_CMD_DEL_PMKSA = 53, + NL80211_CMD_FLUSH_PMKSA = 54, + NL80211_CMD_REMAIN_ON_CHANNEL = 55, + NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 56, + NL80211_CMD_SET_TX_BITRATE_MASK = 57, + NL80211_CMD_REGISTER_FRAME = 58, + NL80211_CMD_REGISTER_ACTION = 58, + NL80211_CMD_FRAME = 59, + NL80211_CMD_ACTION = 59, + NL80211_CMD_FRAME_TX_STATUS = 60, + NL80211_CMD_ACTION_TX_STATUS = 60, + NL80211_CMD_SET_POWER_SAVE = 61, + NL80211_CMD_GET_POWER_SAVE = 62, + NL80211_CMD_SET_CQM = 63, + NL80211_CMD_NOTIFY_CQM = 64, + NL80211_CMD_SET_CHANNEL = 65, + NL80211_CMD_SET_WDS_PEER = 66, + NL80211_CMD_FRAME_WAIT_CANCEL = 67, + NL80211_CMD_JOIN_MESH = 68, + NL80211_CMD_LEAVE_MESH = 69, + NL80211_CMD_UNPROT_DEAUTHENTICATE = 70, + NL80211_CMD_UNPROT_DISASSOCIATE = 71, + NL80211_CMD_NEW_PEER_CANDIDATE = 72, + NL80211_CMD_GET_WOWLAN = 73, + NL80211_CMD_SET_WOWLAN = 74, + NL80211_CMD_START_SCHED_SCAN = 75, + NL80211_CMD_STOP_SCHED_SCAN = 76, + NL80211_CMD_SCHED_SCAN_RESULTS = 77, + NL80211_CMD_SCHED_SCAN_STOPPED = 78, + NL80211_CMD_SET_REKEY_OFFLOAD = 79, + NL80211_CMD_PMKSA_CANDIDATE = 80, + NL80211_CMD_TDLS_OPER = 81, + NL80211_CMD_TDLS_MGMT = 82, + NL80211_CMD_UNEXPECTED_FRAME = 83, + NL80211_CMD_PROBE_CLIENT = 84, + NL80211_CMD_REGISTER_BEACONS = 85, + NL80211_CMD_UNEXPECTED_4ADDR_FRAME = 86, + NL80211_CMD_SET_NOACK_MAP = 87, + NL80211_CMD_CH_SWITCH_NOTIFY = 88, + NL80211_CMD_START_P2P_DEVICE = 89, + NL80211_CMD_STOP_P2P_DEVICE = 90, + NL80211_CMD_CONN_FAILED = 91, + NL80211_CMD_SET_MCAST_RATE = 92, + NL80211_CMD_SET_MAC_ACL = 93, + NL80211_CMD_RADAR_DETECT = 94, + NL80211_CMD_GET_PROTOCOL_FEATURES = 95, + NL80211_CMD_UPDATE_FT_IES = 96, + NL80211_CMD_FT_EVENT = 97, + NL80211_CMD_CRIT_PROTOCOL_START = 98, + NL80211_CMD_CRIT_PROTOCOL_STOP = 99, + NL80211_CMD_GET_COALESCE = 100, + NL80211_CMD_SET_COALESCE = 101, + NL80211_CMD_CHANNEL_SWITCH = 102, + NL80211_CMD_VENDOR = 103, + NL80211_CMD_SET_QOS_MAP = 104, + NL80211_CMD_ADD_TX_TS = 105, + NL80211_CMD_DEL_TX_TS = 106, + NL80211_CMD_GET_MPP = 107, + NL80211_CMD_JOIN_OCB = 108, + NL80211_CMD_LEAVE_OCB = 109, + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 110, + NL80211_CMD_TDLS_CHANNEL_SWITCH = 111, + NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH = 112, + NL80211_CMD_WIPHY_REG_CHANGE = 113, + NL80211_CMD_ABORT_SCAN = 114, + NL80211_CMD_START_NAN = 115, + NL80211_CMD_STOP_NAN = 116, + NL80211_CMD_ADD_NAN_FUNCTION = 117, + NL80211_CMD_DEL_NAN_FUNCTION = 118, + NL80211_CMD_CHANGE_NAN_CONFIG = 119, + NL80211_CMD_NAN_MATCH = 120, + NL80211_CMD_SET_MULTICAST_TO_UNICAST = 121, + NL80211_CMD_UPDATE_CONNECT_PARAMS = 122, + NL80211_CMD_SET_PMK = 123, + NL80211_CMD_DEL_PMK = 124, + NL80211_CMD_PORT_AUTHORIZED = 125, + NL80211_CMD_RELOAD_REGDB = 126, + NL80211_CMD_EXTERNAL_AUTH = 127, + NL80211_CMD_STA_OPMODE_CHANGED = 128, + NL80211_CMD_CONTROL_PORT_FRAME = 129, + NL80211_CMD_GET_FTM_RESPONDER_STATS = 130, + NL80211_CMD_PEER_MEASUREMENT_START = 131, + NL80211_CMD_PEER_MEASUREMENT_RESULT = 132, + NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 133, + NL80211_CMD_NOTIFY_RADAR = 134, + NL80211_CMD_UPDATE_OWE_INFO = 135, + NL80211_CMD_PROBE_MESH_LINK = 136, + NL80211_CMD_SET_TID_CONFIG = 137, + NL80211_CMD_UNPROT_BEACON = 138, + NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS = 139, + NL80211_CMD_SET_SAR_SPECS = 140, + NL80211_CMD_OBSS_COLOR_COLLISION = 141, + NL80211_CMD_COLOR_CHANGE_REQUEST = 142, + NL80211_CMD_COLOR_CHANGE_STARTED = 143, + NL80211_CMD_COLOR_CHANGE_ABORTED = 144, + NL80211_CMD_COLOR_CHANGE_COMPLETED = 145, + NL80211_CMD_SET_FILS_AAD = 146, + NL80211_CMD_ASSOC_COMEBACK = 147, + NL80211_CMD_ADD_LINK = 148, + NL80211_CMD_REMOVE_LINK = 149, + NL80211_CMD_ADD_LINK_STA = 150, + NL80211_CMD_MODIFY_LINK_STA = 151, + NL80211_CMD_REMOVE_LINK_STA = 152, + NL80211_CMD_SET_HW_TIMESTAMP = 153, + NL80211_CMD_LINKS_REMOVED = 154, + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 155, + __NL80211_CMD_AFTER_LAST = 156, + NL80211_CMD_MAX = 155, +}; + +enum nl80211_attrs { + NL80211_ATTR_UNSPEC = 0, + NL80211_ATTR_WIPHY = 1, + NL80211_ATTR_WIPHY_NAME = 2, + NL80211_ATTR_IFINDEX = 3, + NL80211_ATTR_IFNAME = 4, + NL80211_ATTR_IFTYPE = 5, + NL80211_ATTR_MAC = 6, + NL80211_ATTR_KEY_DATA = 7, + NL80211_ATTR_KEY_IDX = 8, + NL80211_ATTR_KEY_CIPHER = 9, + NL80211_ATTR_KEY_SEQ = 10, + NL80211_ATTR_KEY_DEFAULT = 11, + NL80211_ATTR_BEACON_INTERVAL = 12, + NL80211_ATTR_DTIM_PERIOD = 13, + NL80211_ATTR_BEACON_HEAD = 14, + NL80211_ATTR_BEACON_TAIL = 15, + NL80211_ATTR_STA_AID = 16, + NL80211_ATTR_STA_FLAGS = 17, + NL80211_ATTR_STA_LISTEN_INTERVAL = 18, + NL80211_ATTR_STA_SUPPORTED_RATES = 19, + NL80211_ATTR_STA_VLAN = 20, + NL80211_ATTR_STA_INFO = 21, + NL80211_ATTR_WIPHY_BANDS = 22, + NL80211_ATTR_MNTR_FLAGS = 23, + NL80211_ATTR_MESH_ID = 24, + NL80211_ATTR_STA_PLINK_ACTION = 25, + NL80211_ATTR_MPATH_NEXT_HOP = 26, + NL80211_ATTR_MPATH_INFO = 27, + NL80211_ATTR_BSS_CTS_PROT = 28, + NL80211_ATTR_BSS_SHORT_PREAMBLE = 29, + NL80211_ATTR_BSS_SHORT_SLOT_TIME = 30, + NL80211_ATTR_HT_CAPABILITY = 31, + NL80211_ATTR_SUPPORTED_IFTYPES = 32, + NL80211_ATTR_REG_ALPHA2 = 33, + NL80211_ATTR_REG_RULES = 34, + NL80211_ATTR_MESH_CONFIG = 35, + NL80211_ATTR_BSS_BASIC_RATES = 36, + NL80211_ATTR_WIPHY_TXQ_PARAMS = 37, + NL80211_ATTR_WIPHY_FREQ = 38, + NL80211_ATTR_WIPHY_CHANNEL_TYPE = 39, + NL80211_ATTR_KEY_DEFAULT_MGMT = 40, + NL80211_ATTR_MGMT_SUBTYPE = 41, + NL80211_ATTR_IE = 42, + NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 43, + NL80211_ATTR_SCAN_FREQUENCIES = 44, + NL80211_ATTR_SCAN_SSIDS = 45, + NL80211_ATTR_GENERATION = 46, + NL80211_ATTR_BSS = 47, + NL80211_ATTR_REG_INITIATOR = 48, + NL80211_ATTR_REG_TYPE = 49, + NL80211_ATTR_SUPPORTED_COMMANDS = 50, + NL80211_ATTR_FRAME = 51, + NL80211_ATTR_SSID = 52, + NL80211_ATTR_AUTH_TYPE = 53, + NL80211_ATTR_REASON_CODE = 54, + NL80211_ATTR_KEY_TYPE = 55, + NL80211_ATTR_MAX_SCAN_IE_LEN = 56, + NL80211_ATTR_CIPHER_SUITES = 57, + NL80211_ATTR_FREQ_BEFORE = 58, + NL80211_ATTR_FREQ_AFTER = 59, + NL80211_ATTR_FREQ_FIXED = 60, + NL80211_ATTR_WIPHY_RETRY_SHORT = 61, + NL80211_ATTR_WIPHY_RETRY_LONG = 62, + NL80211_ATTR_WIPHY_FRAG_THRESHOLD = 63, + NL80211_ATTR_WIPHY_RTS_THRESHOLD = 64, + NL80211_ATTR_TIMED_OUT = 65, + NL80211_ATTR_USE_MFP = 66, + NL80211_ATTR_STA_FLAGS2 = 67, + NL80211_ATTR_CONTROL_PORT = 68, + NL80211_ATTR_TESTDATA = 69, + NL80211_ATTR_PRIVACY = 70, + NL80211_ATTR_DISCONNECTED_BY_AP = 71, + NL80211_ATTR_STATUS_CODE = 72, + NL80211_ATTR_CIPHER_SUITES_PAIRWISE = 73, + NL80211_ATTR_CIPHER_SUITE_GROUP = 74, + NL80211_ATTR_WPA_VERSIONS = 75, + NL80211_ATTR_AKM_SUITES = 76, + NL80211_ATTR_REQ_IE = 77, + NL80211_ATTR_RESP_IE = 78, + NL80211_ATTR_PREV_BSSID = 79, + NL80211_ATTR_KEY = 80, + NL80211_ATTR_KEYS = 81, + NL80211_ATTR_PID = 82, + NL80211_ATTR_4ADDR = 83, + NL80211_ATTR_SURVEY_INFO = 84, + NL80211_ATTR_PMKID = 85, + NL80211_ATTR_MAX_NUM_PMKIDS = 86, + NL80211_ATTR_DURATION = 87, + NL80211_ATTR_COOKIE = 88, + NL80211_ATTR_WIPHY_COVERAGE_CLASS = 89, + NL80211_ATTR_TX_RATES = 90, + NL80211_ATTR_FRAME_MATCH = 91, + NL80211_ATTR_ACK = 92, + NL80211_ATTR_PS_STATE = 93, + NL80211_ATTR_CQM = 94, + NL80211_ATTR_LOCAL_STATE_CHANGE = 95, + NL80211_ATTR_AP_ISOLATE = 96, + NL80211_ATTR_WIPHY_TX_POWER_SETTING = 97, + NL80211_ATTR_WIPHY_TX_POWER_LEVEL = 98, + NL80211_ATTR_TX_FRAME_TYPES = 99, + NL80211_ATTR_RX_FRAME_TYPES = 100, + NL80211_ATTR_FRAME_TYPE = 101, + NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 102, + NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT = 103, + NL80211_ATTR_SUPPORT_IBSS_RSN = 104, + NL80211_ATTR_WIPHY_ANTENNA_TX = 105, + NL80211_ATTR_WIPHY_ANTENNA_RX = 106, + NL80211_ATTR_MCAST_RATE = 107, + NL80211_ATTR_OFFCHANNEL_TX_OK = 108, + NL80211_ATTR_BSS_HT_OPMODE = 109, + NL80211_ATTR_KEY_DEFAULT_TYPES = 110, + NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION = 111, + NL80211_ATTR_MESH_SETUP = 112, + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX = 113, + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX = 114, + NL80211_ATTR_SUPPORT_MESH_AUTH = 115, + NL80211_ATTR_STA_PLINK_STATE = 116, + NL80211_ATTR_WOWLAN_TRIGGERS = 117, + NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED = 118, + NL80211_ATTR_SCHED_SCAN_INTERVAL = 119, + NL80211_ATTR_INTERFACE_COMBINATIONS = 120, + NL80211_ATTR_SOFTWARE_IFTYPES = 121, + NL80211_ATTR_REKEY_DATA = 122, + NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS = 123, + NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 124, + NL80211_ATTR_SCAN_SUPP_RATES = 125, + NL80211_ATTR_HIDDEN_SSID = 126, + NL80211_ATTR_IE_PROBE_RESP = 127, + NL80211_ATTR_IE_ASSOC_RESP = 128, + NL80211_ATTR_STA_WME = 129, + NL80211_ATTR_SUPPORT_AP_UAPSD = 130, + NL80211_ATTR_ROAM_SUPPORT = 131, + NL80211_ATTR_SCHED_SCAN_MATCH = 132, + NL80211_ATTR_MAX_MATCH_SETS = 133, + NL80211_ATTR_PMKSA_CANDIDATE = 134, + NL80211_ATTR_TX_NO_CCK_RATE = 135, + NL80211_ATTR_TDLS_ACTION = 136, + NL80211_ATTR_TDLS_DIALOG_TOKEN = 137, + NL80211_ATTR_TDLS_OPERATION = 138, + NL80211_ATTR_TDLS_SUPPORT = 139, + NL80211_ATTR_TDLS_EXTERNAL_SETUP = 140, + NL80211_ATTR_DEVICE_AP_SME = 141, + NL80211_ATTR_DONT_WAIT_FOR_ACK = 142, + NL80211_ATTR_FEATURE_FLAGS = 143, + NL80211_ATTR_PROBE_RESP_OFFLOAD = 144, + NL80211_ATTR_PROBE_RESP = 145, + NL80211_ATTR_DFS_REGION = 146, + NL80211_ATTR_DISABLE_HT = 147, + NL80211_ATTR_HT_CAPABILITY_MASK = 148, + NL80211_ATTR_NOACK_MAP = 149, + NL80211_ATTR_INACTIVITY_TIMEOUT = 150, + NL80211_ATTR_RX_SIGNAL_DBM = 151, + NL80211_ATTR_BG_SCAN_PERIOD = 152, + NL80211_ATTR_WDEV = 153, + NL80211_ATTR_USER_REG_HINT_TYPE = 154, + NL80211_ATTR_CONN_FAILED_REASON = 155, + NL80211_ATTR_AUTH_DATA = 156, + NL80211_ATTR_VHT_CAPABILITY = 157, + NL80211_ATTR_SCAN_FLAGS = 158, + NL80211_ATTR_CHANNEL_WIDTH = 159, + NL80211_ATTR_CENTER_FREQ1 = 160, + NL80211_ATTR_CENTER_FREQ2 = 161, + NL80211_ATTR_P2P_CTWINDOW = 162, + NL80211_ATTR_P2P_OPPPS = 163, + NL80211_ATTR_LOCAL_MESH_POWER_MODE = 164, + NL80211_ATTR_ACL_POLICY = 165, + NL80211_ATTR_MAC_ADDRS = 166, + NL80211_ATTR_MAC_ACL_MAX = 167, + NL80211_ATTR_RADAR_EVENT = 168, + NL80211_ATTR_EXT_CAPA = 169, + NL80211_ATTR_EXT_CAPA_MASK = 170, + NL80211_ATTR_STA_CAPABILITY = 171, + NL80211_ATTR_STA_EXT_CAPABILITY = 172, + NL80211_ATTR_PROTOCOL_FEATURES = 173, + NL80211_ATTR_SPLIT_WIPHY_DUMP = 174, + NL80211_ATTR_DISABLE_VHT = 175, + NL80211_ATTR_VHT_CAPABILITY_MASK = 176, + NL80211_ATTR_MDID = 177, + NL80211_ATTR_IE_RIC = 178, + NL80211_ATTR_CRIT_PROT_ID = 179, + NL80211_ATTR_MAX_CRIT_PROT_DURATION = 180, + NL80211_ATTR_PEER_AID = 181, + NL80211_ATTR_COALESCE_RULE = 182, + NL80211_ATTR_CH_SWITCH_COUNT = 183, + NL80211_ATTR_CH_SWITCH_BLOCK_TX = 184, + NL80211_ATTR_CSA_IES = 185, + NL80211_ATTR_CNTDWN_OFFS_BEACON = 186, + NL80211_ATTR_CNTDWN_OFFS_PRESP = 187, + NL80211_ATTR_RXMGMT_FLAGS = 188, + NL80211_ATTR_STA_SUPPORTED_CHANNELS = 189, + NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES = 190, + NL80211_ATTR_HANDLE_DFS = 191, + NL80211_ATTR_SUPPORT_5_MHZ = 192, + NL80211_ATTR_SUPPORT_10_MHZ = 193, + NL80211_ATTR_OPMODE_NOTIF = 194, + NL80211_ATTR_VENDOR_ID = 195, + NL80211_ATTR_VENDOR_SUBCMD = 196, + NL80211_ATTR_VENDOR_DATA = 197, + NL80211_ATTR_VENDOR_EVENTS = 198, + NL80211_ATTR_QOS_MAP = 199, + NL80211_ATTR_MAC_HINT = 200, + NL80211_ATTR_WIPHY_FREQ_HINT = 201, + NL80211_ATTR_MAX_AP_ASSOC_STA = 202, + NL80211_ATTR_TDLS_PEER_CAPABILITY = 203, + NL80211_ATTR_SOCKET_OWNER = 204, + NL80211_ATTR_CSA_C_OFFSETS_TX = 205, + NL80211_ATTR_MAX_CSA_COUNTERS = 206, + NL80211_ATTR_TDLS_INITIATOR = 207, + NL80211_ATTR_USE_RRM = 208, + NL80211_ATTR_WIPHY_DYN_ACK = 209, + NL80211_ATTR_TSID = 210, + NL80211_ATTR_USER_PRIO = 211, + NL80211_ATTR_ADMITTED_TIME = 212, + NL80211_ATTR_SMPS_MODE = 213, + NL80211_ATTR_OPER_CLASS = 214, + NL80211_ATTR_MAC_MASK = 215, + NL80211_ATTR_WIPHY_SELF_MANAGED_REG = 216, + NL80211_ATTR_EXT_FEATURES = 217, + NL80211_ATTR_SURVEY_RADIO_STATS = 218, + NL80211_ATTR_NETNS_FD = 219, + NL80211_ATTR_SCHED_SCAN_DELAY = 220, + NL80211_ATTR_REG_INDOOR = 221, + NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 222, + NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 223, + NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 224, + NL80211_ATTR_SCHED_SCAN_PLANS = 225, + NL80211_ATTR_PBSS = 226, + NL80211_ATTR_BSS_SELECT = 227, + NL80211_ATTR_STA_SUPPORT_P2P_PS = 228, + NL80211_ATTR_PAD = 229, + NL80211_ATTR_IFTYPE_EXT_CAPA = 230, + NL80211_ATTR_MU_MIMO_GROUP_DATA = 231, + NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR = 232, + NL80211_ATTR_SCAN_START_TIME_TSF = 233, + NL80211_ATTR_SCAN_START_TIME_TSF_BSSID = 234, + NL80211_ATTR_MEASUREMENT_DURATION = 235, + NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY = 236, + NL80211_ATTR_MESH_PEER_AID = 237, + NL80211_ATTR_NAN_MASTER_PREF = 238, + NL80211_ATTR_BANDS = 239, + NL80211_ATTR_NAN_FUNC = 240, + NL80211_ATTR_NAN_MATCH = 241, + NL80211_ATTR_FILS_KEK = 242, + NL80211_ATTR_FILS_NONCES = 243, + NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED = 244, + NL80211_ATTR_BSSID = 245, + NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI = 246, + NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST = 247, + NL80211_ATTR_TIMEOUT_REASON = 248, + NL80211_ATTR_FILS_ERP_USERNAME = 249, + NL80211_ATTR_FILS_ERP_REALM = 250, + NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM = 251, + NL80211_ATTR_FILS_ERP_RRK = 252, + NL80211_ATTR_FILS_CACHE_ID = 253, + NL80211_ATTR_PMK = 254, + NL80211_ATTR_SCHED_SCAN_MULTI = 255, + NL80211_ATTR_SCHED_SCAN_MAX_REQS = 256, + NL80211_ATTR_WANT_1X_4WAY_HS = 257, + NL80211_ATTR_PMKR0_NAME = 258, + NL80211_ATTR_PORT_AUTHORIZED = 259, + NL80211_ATTR_EXTERNAL_AUTH_ACTION = 260, + NL80211_ATTR_EXTERNAL_AUTH_SUPPORT = 261, + NL80211_ATTR_NSS = 262, + NL80211_ATTR_ACK_SIGNAL = 263, + NL80211_ATTR_CONTROL_PORT_OVER_NL80211 = 264, + NL80211_ATTR_TXQ_STATS = 265, + NL80211_ATTR_TXQ_LIMIT = 266, + NL80211_ATTR_TXQ_MEMORY_LIMIT = 267, + NL80211_ATTR_TXQ_QUANTUM = 268, + NL80211_ATTR_HE_CAPABILITY = 269, + NL80211_ATTR_FTM_RESPONDER = 270, + NL80211_ATTR_FTM_RESPONDER_STATS = 271, + NL80211_ATTR_TIMEOUT = 272, + NL80211_ATTR_PEER_MEASUREMENTS = 273, + NL80211_ATTR_AIRTIME_WEIGHT = 274, + NL80211_ATTR_STA_TX_POWER_SETTING = 275, + NL80211_ATTR_STA_TX_POWER = 276, + NL80211_ATTR_SAE_PASSWORD = 277, + NL80211_ATTR_TWT_RESPONDER = 278, + NL80211_ATTR_HE_OBSS_PD = 279, + NL80211_ATTR_WIPHY_EDMG_CHANNELS = 280, + NL80211_ATTR_WIPHY_EDMG_BW_CONFIG = 281, + NL80211_ATTR_VLAN_ID = 282, + NL80211_ATTR_HE_BSS_COLOR = 283, + NL80211_ATTR_IFTYPE_AKM_SUITES = 284, + NL80211_ATTR_TID_CONFIG = 285, + NL80211_ATTR_CONTROL_PORT_NO_PREAUTH = 286, + NL80211_ATTR_PMK_LIFETIME = 287, + NL80211_ATTR_PMK_REAUTH_THRESHOLD = 288, + NL80211_ATTR_RECEIVE_MULTICAST = 289, + NL80211_ATTR_WIPHY_FREQ_OFFSET = 290, + NL80211_ATTR_CENTER_FREQ1_OFFSET = 291, + NL80211_ATTR_SCAN_FREQ_KHZ = 292, + NL80211_ATTR_HE_6GHZ_CAPABILITY = 293, + NL80211_ATTR_FILS_DISCOVERY = 294, + NL80211_ATTR_UNSOL_BCAST_PROBE_RESP = 295, + NL80211_ATTR_S1G_CAPABILITY = 296, + NL80211_ATTR_S1G_CAPABILITY_MASK = 297, + NL80211_ATTR_SAE_PWE = 298, + NL80211_ATTR_RECONNECT_REQUESTED = 299, + NL80211_ATTR_SAR_SPEC = 300, + NL80211_ATTR_DISABLE_HE = 301, + NL80211_ATTR_OBSS_COLOR_BITMAP = 302, + NL80211_ATTR_COLOR_CHANGE_COUNT = 303, + NL80211_ATTR_COLOR_CHANGE_COLOR = 304, + NL80211_ATTR_COLOR_CHANGE_ELEMS = 305, + NL80211_ATTR_MBSSID_CONFIG = 306, + NL80211_ATTR_MBSSID_ELEMS = 307, + NL80211_ATTR_RADAR_BACKGROUND = 308, + NL80211_ATTR_AP_SETTINGS_FLAGS = 309, + NL80211_ATTR_EHT_CAPABILITY = 310, + NL80211_ATTR_DISABLE_EHT = 311, + NL80211_ATTR_MLO_LINKS = 312, + NL80211_ATTR_MLO_LINK_ID = 313, + NL80211_ATTR_MLD_ADDR = 314, + NL80211_ATTR_MLO_SUPPORT = 315, + NL80211_ATTR_MAX_NUM_AKM_SUITES = 316, + NL80211_ATTR_EML_CAPABILITY = 317, + NL80211_ATTR_MLD_CAPA_AND_OPS = 318, + NL80211_ATTR_TX_HW_TIMESTAMP = 319, + NL80211_ATTR_RX_HW_TIMESTAMP = 320, + NL80211_ATTR_TD_BITMAP = 321, + NL80211_ATTR_PUNCT_BITMAP = 322, + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 323, + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 324, + NL80211_ATTR_EMA_RNR_ELEMS = 325, + NL80211_ATTR_MLO_LINK_DISABLED = 326, + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 327, + NL80211_ATTR_MLO_TTLM_DLINK = 328, + NL80211_ATTR_MLO_TTLM_ULINK = 329, + NL80211_ATTR_ASSOC_SPP_AMSDU = 330, + NL80211_ATTR_WIPHY_RADIOS = 331, + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 332, + NL80211_ATTR_VIF_RADIO_MASK = 333, + __NL80211_ATTR_AFTER_LAST = 334, + NUM_NL80211_ATTR = 334, + NL80211_ATTR_MAX = 333, +}; + +enum nl80211_sta_flags { + __NL80211_STA_FLAG_INVALID = 0, + NL80211_STA_FLAG_AUTHORIZED = 1, + NL80211_STA_FLAG_SHORT_PREAMBLE = 2, + NL80211_STA_FLAG_WME = 3, + NL80211_STA_FLAG_MFP = 4, + NL80211_STA_FLAG_AUTHENTICATED = 5, + NL80211_STA_FLAG_TDLS_PEER = 6, + NL80211_STA_FLAG_ASSOCIATED = 7, + NL80211_STA_FLAG_SPP_AMSDU = 8, + __NL80211_STA_FLAG_AFTER_LAST = 9, + NL80211_STA_FLAG_MAX = 8, +}; + +enum nl80211_sta_p2p_ps_status { + NL80211_P2P_PS_UNSUPPORTED = 0, + NL80211_P2P_PS_SUPPORTED = 1, + NUM_NL80211_P2P_PS_STATUS = 2, +}; + +struct nl80211_sta_flag_update { + __u32 mask; + __u32 set; +}; + +enum nl80211_rate_info { + __NL80211_RATE_INFO_INVALID = 0, + NL80211_RATE_INFO_BITRATE = 1, + NL80211_RATE_INFO_MCS = 2, + NL80211_RATE_INFO_40_MHZ_WIDTH = 3, + NL80211_RATE_INFO_SHORT_GI = 4, + NL80211_RATE_INFO_BITRATE32 = 5, + NL80211_RATE_INFO_VHT_MCS = 6, + NL80211_RATE_INFO_VHT_NSS = 7, + NL80211_RATE_INFO_80_MHZ_WIDTH = 8, + NL80211_RATE_INFO_80P80_MHZ_WIDTH = 9, + NL80211_RATE_INFO_160_MHZ_WIDTH = 10, + NL80211_RATE_INFO_10_MHZ_WIDTH = 11, + NL80211_RATE_INFO_5_MHZ_WIDTH = 12, + NL80211_RATE_INFO_HE_MCS = 13, + NL80211_RATE_INFO_HE_NSS = 14, + NL80211_RATE_INFO_HE_GI = 15, + NL80211_RATE_INFO_HE_DCM = 16, + NL80211_RATE_INFO_HE_RU_ALLOC = 17, + NL80211_RATE_INFO_320_MHZ_WIDTH = 18, + NL80211_RATE_INFO_EHT_MCS = 19, + NL80211_RATE_INFO_EHT_NSS = 20, + NL80211_RATE_INFO_EHT_GI = 21, + NL80211_RATE_INFO_EHT_RU_ALLOC = 22, + NL80211_RATE_INFO_S1G_MCS = 23, + NL80211_RATE_INFO_S1G_NSS = 24, + NL80211_RATE_INFO_1_MHZ_WIDTH = 25, + NL80211_RATE_INFO_2_MHZ_WIDTH = 26, + NL80211_RATE_INFO_4_MHZ_WIDTH = 27, + NL80211_RATE_INFO_8_MHZ_WIDTH = 28, + NL80211_RATE_INFO_16_MHZ_WIDTH = 29, + __NL80211_RATE_INFO_AFTER_LAST = 30, + NL80211_RATE_INFO_MAX = 29, +}; + +enum nl80211_sta_bss_param { + __NL80211_STA_BSS_PARAM_INVALID = 0, + NL80211_STA_BSS_PARAM_CTS_PROT = 1, + NL80211_STA_BSS_PARAM_SHORT_PREAMBLE = 2, + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME = 3, + NL80211_STA_BSS_PARAM_DTIM_PERIOD = 4, + NL80211_STA_BSS_PARAM_BEACON_INTERVAL = 5, + __NL80211_STA_BSS_PARAM_AFTER_LAST = 6, + NL80211_STA_BSS_PARAM_MAX = 5, +}; + +enum nl80211_sta_info { + __NL80211_STA_INFO_INVALID = 0, + NL80211_STA_INFO_INACTIVE_TIME = 1, + NL80211_STA_INFO_RX_BYTES = 2, + NL80211_STA_INFO_TX_BYTES = 3, + NL80211_STA_INFO_LLID = 4, + NL80211_STA_INFO_PLID = 5, + NL80211_STA_INFO_PLINK_STATE = 6, + NL80211_STA_INFO_SIGNAL = 7, + NL80211_STA_INFO_TX_BITRATE = 8, + NL80211_STA_INFO_RX_PACKETS = 9, + NL80211_STA_INFO_TX_PACKETS = 10, + NL80211_STA_INFO_TX_RETRIES = 11, + NL80211_STA_INFO_TX_FAILED = 12, + NL80211_STA_INFO_SIGNAL_AVG = 13, + NL80211_STA_INFO_RX_BITRATE = 14, + NL80211_STA_INFO_BSS_PARAM = 15, + NL80211_STA_INFO_CONNECTED_TIME = 16, + NL80211_STA_INFO_STA_FLAGS = 17, + NL80211_STA_INFO_BEACON_LOSS = 18, + NL80211_STA_INFO_T_OFFSET = 19, + NL80211_STA_INFO_LOCAL_PM = 20, + NL80211_STA_INFO_PEER_PM = 21, + NL80211_STA_INFO_NONPEER_PM = 22, + NL80211_STA_INFO_RX_BYTES64 = 23, + NL80211_STA_INFO_TX_BYTES64 = 24, + NL80211_STA_INFO_CHAIN_SIGNAL = 25, + NL80211_STA_INFO_CHAIN_SIGNAL_AVG = 26, + NL80211_STA_INFO_EXPECTED_THROUGHPUT = 27, + NL80211_STA_INFO_RX_DROP_MISC = 28, + NL80211_STA_INFO_BEACON_RX = 29, + NL80211_STA_INFO_BEACON_SIGNAL_AVG = 30, + NL80211_STA_INFO_TID_STATS = 31, + NL80211_STA_INFO_RX_DURATION = 32, + NL80211_STA_INFO_PAD = 33, + NL80211_STA_INFO_ACK_SIGNAL = 34, + NL80211_STA_INFO_ACK_SIGNAL_AVG = 35, + NL80211_STA_INFO_RX_MPDUS = 36, + NL80211_STA_INFO_FCS_ERROR_COUNT = 37, + NL80211_STA_INFO_CONNECTED_TO_GATE = 38, + NL80211_STA_INFO_TX_DURATION = 39, + NL80211_STA_INFO_AIRTIME_WEIGHT = 40, + NL80211_STA_INFO_AIRTIME_LINK_METRIC = 41, + NL80211_STA_INFO_ASSOC_AT_BOOTTIME = 42, + NL80211_STA_INFO_CONNECTED_TO_AS = 43, + __NL80211_STA_INFO_AFTER_LAST = 44, + NL80211_STA_INFO_MAX = 43, +}; + +enum nl80211_tid_stats { + __NL80211_TID_STATS_INVALID = 0, + NL80211_TID_STATS_RX_MSDU = 1, + NL80211_TID_STATS_TX_MSDU = 2, + NL80211_TID_STATS_TX_MSDU_RETRIES = 3, + NL80211_TID_STATS_TX_MSDU_FAILED = 4, + NL80211_TID_STATS_PAD = 5, + NL80211_TID_STATS_TXQ_STATS = 6, + NUM_NL80211_TID_STATS = 7, + NL80211_TID_STATS_MAX = 6, +}; + +enum nl80211_txq_stats { + __NL80211_TXQ_STATS_INVALID = 0, + NL80211_TXQ_STATS_BACKLOG_BYTES = 1, + NL80211_TXQ_STATS_BACKLOG_PACKETS = 2, + NL80211_TXQ_STATS_FLOWS = 3, + NL80211_TXQ_STATS_DROPS = 4, + NL80211_TXQ_STATS_ECN_MARKS = 5, + NL80211_TXQ_STATS_OVERLIMIT = 6, + NL80211_TXQ_STATS_OVERMEMORY = 7, + NL80211_TXQ_STATS_COLLISIONS = 8, + NL80211_TXQ_STATS_TX_BYTES = 9, + NL80211_TXQ_STATS_TX_PACKETS = 10, + NL80211_TXQ_STATS_MAX_FLOWS = 11, + NUM_NL80211_TXQ_STATS = 12, + NL80211_TXQ_STATS_MAX = 11, +}; + +enum nl80211_mpath_info { + __NL80211_MPATH_INFO_INVALID = 0, + NL80211_MPATH_INFO_FRAME_QLEN = 1, + NL80211_MPATH_INFO_SN = 2, + NL80211_MPATH_INFO_METRIC = 3, + NL80211_MPATH_INFO_EXPTIME = 4, + NL80211_MPATH_INFO_FLAGS = 5, + NL80211_MPATH_INFO_DISCOVERY_TIMEOUT = 6, + NL80211_MPATH_INFO_DISCOVERY_RETRIES = 7, + NL80211_MPATH_INFO_HOP_COUNT = 8, + NL80211_MPATH_INFO_PATH_CHANGE = 9, + __NL80211_MPATH_INFO_AFTER_LAST = 10, + NL80211_MPATH_INFO_MAX = 9, +}; + +enum nl80211_band_iftype_attr { + __NL80211_BAND_IFTYPE_ATTR_INVALID = 0, + NL80211_BAND_IFTYPE_ATTR_IFTYPES = 1, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 2, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 3, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 4, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 5, + NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 6, + NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS = 7, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 8, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY = 9, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET = 10, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE = 11, + __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST = 12, + NL80211_BAND_IFTYPE_ATTR_MAX = 11, +}; + +enum nl80211_band_attr { + __NL80211_BAND_ATTR_INVALID = 0, + NL80211_BAND_ATTR_FREQS = 1, + NL80211_BAND_ATTR_RATES = 2, + NL80211_BAND_ATTR_HT_MCS_SET = 3, + NL80211_BAND_ATTR_HT_CAPA = 4, + NL80211_BAND_ATTR_HT_AMPDU_FACTOR = 5, + NL80211_BAND_ATTR_HT_AMPDU_DENSITY = 6, + NL80211_BAND_ATTR_VHT_MCS_SET = 7, + NL80211_BAND_ATTR_VHT_CAPA = 8, + NL80211_BAND_ATTR_IFTYPE_DATA = 9, + NL80211_BAND_ATTR_EDMG_CHANNELS = 10, + NL80211_BAND_ATTR_EDMG_BW_CONFIG = 11, + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 12, + NL80211_BAND_ATTR_S1G_CAPA = 13, + __NL80211_BAND_ATTR_AFTER_LAST = 14, + NL80211_BAND_ATTR_MAX = 13, +}; + +enum nl80211_wmm_rule { + __NL80211_WMMR_INVALID = 0, + NL80211_WMMR_CW_MIN = 1, + NL80211_WMMR_CW_MAX = 2, + NL80211_WMMR_AIFSN = 3, + NL80211_WMMR_TXOP = 4, + __NL80211_WMMR_LAST = 5, + NL80211_WMMR_MAX = 4, +}; + +enum nl80211_frequency_attr { + __NL80211_FREQUENCY_ATTR_INVALID = 0, + NL80211_FREQUENCY_ATTR_FREQ = 1, + NL80211_FREQUENCY_ATTR_DISABLED = 2, + NL80211_FREQUENCY_ATTR_NO_IR = 3, + __NL80211_FREQUENCY_ATTR_NO_IBSS = 4, + NL80211_FREQUENCY_ATTR_RADAR = 5, + NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 6, + NL80211_FREQUENCY_ATTR_DFS_STATE = 7, + NL80211_FREQUENCY_ATTR_DFS_TIME = 8, + NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 9, + NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 10, + NL80211_FREQUENCY_ATTR_NO_80MHZ = 11, + NL80211_FREQUENCY_ATTR_NO_160MHZ = 12, + NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 13, + NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 14, + NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 15, + NL80211_FREQUENCY_ATTR_NO_20MHZ = 16, + NL80211_FREQUENCY_ATTR_NO_10MHZ = 17, + NL80211_FREQUENCY_ATTR_WMM = 18, + NL80211_FREQUENCY_ATTR_NO_HE = 19, + NL80211_FREQUENCY_ATTR_OFFSET = 20, + NL80211_FREQUENCY_ATTR_1MHZ = 21, + NL80211_FREQUENCY_ATTR_2MHZ = 22, + NL80211_FREQUENCY_ATTR_4MHZ = 23, + NL80211_FREQUENCY_ATTR_8MHZ = 24, + NL80211_FREQUENCY_ATTR_16MHZ = 25, + NL80211_FREQUENCY_ATTR_NO_320MHZ = 26, + NL80211_FREQUENCY_ATTR_NO_EHT = 27, + NL80211_FREQUENCY_ATTR_PSD = 28, + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 29, + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 30, + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 31, + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 32, + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 33, + __NL80211_FREQUENCY_ATTR_AFTER_LAST = 34, + NL80211_FREQUENCY_ATTR_MAX = 33, +}; + +enum nl80211_bitrate_attr { + __NL80211_BITRATE_ATTR_INVALID = 0, + NL80211_BITRATE_ATTR_RATE = 1, + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 2, + __NL80211_BITRATE_ATTR_AFTER_LAST = 3, + NL80211_BITRATE_ATTR_MAX = 2, +}; + +enum nl80211_reg_type { + NL80211_REGDOM_TYPE_COUNTRY = 0, + NL80211_REGDOM_TYPE_WORLD = 1, + NL80211_REGDOM_TYPE_CUSTOM_WORLD = 2, + NL80211_REGDOM_TYPE_INTERSECTION = 3, +}; + +enum nl80211_reg_rule_attr { + __NL80211_REG_RULE_ATTR_INVALID = 0, + NL80211_ATTR_REG_RULE_FLAGS = 1, + NL80211_ATTR_FREQ_RANGE_START = 2, + NL80211_ATTR_FREQ_RANGE_END = 3, + NL80211_ATTR_FREQ_RANGE_MAX_BW = 4, + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 5, + NL80211_ATTR_POWER_RULE_MAX_EIRP = 6, + NL80211_ATTR_DFS_CAC_TIME = 7, + NL80211_ATTR_POWER_RULE_PSD = 8, + __NL80211_REG_RULE_ATTR_AFTER_LAST = 9, + NL80211_REG_RULE_ATTR_MAX = 8, +}; + +enum nl80211_sched_scan_match_attr { + __NL80211_SCHED_SCAN_MATCH_ATTR_INVALID = 0, + NL80211_SCHED_SCAN_MATCH_ATTR_SSID = 1, + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI = 2, + NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI = 3, + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST = 4, + NL80211_SCHED_SCAN_MATCH_ATTR_BSSID = 5, + NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI = 6, + __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST = 7, + NL80211_SCHED_SCAN_MATCH_ATTR_MAX = 6, +}; + +enum nl80211_reg_rule_flags { + NL80211_RRF_NO_OFDM = 1, + NL80211_RRF_NO_CCK = 2, + NL80211_RRF_NO_INDOOR = 4, + NL80211_RRF_NO_OUTDOOR = 8, + NL80211_RRF_DFS = 16, + NL80211_RRF_PTP_ONLY = 32, + NL80211_RRF_PTMP_ONLY = 64, + NL80211_RRF_NO_IR = 128, + __NL80211_RRF_NO_IBSS = 256, + NL80211_RRF_AUTO_BW = 2048, + NL80211_RRF_IR_CONCURRENT = 4096, + NL80211_RRF_NO_HT40MINUS = 8192, + NL80211_RRF_NO_HT40PLUS = 16384, + NL80211_RRF_NO_80MHZ = 32768, + NL80211_RRF_NO_160MHZ = 65536, + NL80211_RRF_NO_HE = 131072, + NL80211_RRF_NO_320MHZ = 262144, + NL80211_RRF_NO_EHT = 524288, + NL80211_RRF_PSD = 1048576, + NL80211_RRF_DFS_CONCURRENT = 2097152, + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 4194304, + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 8388608, + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 16777216, +}; + +enum nl80211_survey_info { + __NL80211_SURVEY_INFO_INVALID = 0, + NL80211_SURVEY_INFO_FREQUENCY = 1, + NL80211_SURVEY_INFO_NOISE = 2, + NL80211_SURVEY_INFO_IN_USE = 3, + NL80211_SURVEY_INFO_TIME = 4, + NL80211_SURVEY_INFO_TIME_BUSY = 5, + NL80211_SURVEY_INFO_TIME_EXT_BUSY = 6, + NL80211_SURVEY_INFO_TIME_RX = 7, + NL80211_SURVEY_INFO_TIME_TX = 8, + NL80211_SURVEY_INFO_TIME_SCAN = 9, + NL80211_SURVEY_INFO_PAD = 10, + NL80211_SURVEY_INFO_TIME_BSS_RX = 11, + NL80211_SURVEY_INFO_FREQUENCY_OFFSET = 12, + __NL80211_SURVEY_INFO_AFTER_LAST = 13, + NL80211_SURVEY_INFO_MAX = 12, +}; + +enum nl80211_mesh_power_mode { + NL80211_MESH_POWER_UNKNOWN = 0, + NL80211_MESH_POWER_ACTIVE = 1, + NL80211_MESH_POWER_LIGHT_SLEEP = 2, + NL80211_MESH_POWER_DEEP_SLEEP = 3, + __NL80211_MESH_POWER_AFTER_LAST = 4, + NL80211_MESH_POWER_MAX = 3, +}; + +enum nl80211_meshconf_params { + __NL80211_MESHCONF_INVALID = 0, + NL80211_MESHCONF_RETRY_TIMEOUT = 1, + NL80211_MESHCONF_CONFIRM_TIMEOUT = 2, + NL80211_MESHCONF_HOLDING_TIMEOUT = 3, + NL80211_MESHCONF_MAX_PEER_LINKS = 4, + NL80211_MESHCONF_MAX_RETRIES = 5, + NL80211_MESHCONF_TTL = 6, + NL80211_MESHCONF_AUTO_OPEN_PLINKS = 7, + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES = 8, + NL80211_MESHCONF_PATH_REFRESH_TIME = 9, + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT = 10, + NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT = 11, + NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL = 12, + NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME = 13, + NL80211_MESHCONF_HWMP_ROOTMODE = 14, + NL80211_MESHCONF_ELEMENT_TTL = 15, + NL80211_MESHCONF_HWMP_RANN_INTERVAL = 16, + NL80211_MESHCONF_GATE_ANNOUNCEMENTS = 17, + NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL = 18, + NL80211_MESHCONF_FORWARDING = 19, + NL80211_MESHCONF_RSSI_THRESHOLD = 20, + NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR = 21, + NL80211_MESHCONF_HT_OPMODE = 22, + NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT = 23, + NL80211_MESHCONF_HWMP_ROOT_INTERVAL = 24, + NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL = 25, + NL80211_MESHCONF_POWER_MODE = 26, + NL80211_MESHCONF_AWAKE_WINDOW = 27, + NL80211_MESHCONF_PLINK_TIMEOUT = 28, + NL80211_MESHCONF_CONNECTED_TO_GATE = 29, + NL80211_MESHCONF_NOLEARN = 30, + NL80211_MESHCONF_CONNECTED_TO_AS = 31, + __NL80211_MESHCONF_ATTR_AFTER_LAST = 32, + NL80211_MESHCONF_ATTR_MAX = 31, +}; + +enum nl80211_mesh_setup_params { + __NL80211_MESH_SETUP_INVALID = 0, + NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL = 1, + NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC = 2, + NL80211_MESH_SETUP_IE = 3, + NL80211_MESH_SETUP_USERSPACE_AUTH = 4, + NL80211_MESH_SETUP_USERSPACE_AMPE = 5, + NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC = 6, + NL80211_MESH_SETUP_USERSPACE_MPM = 7, + NL80211_MESH_SETUP_AUTH_PROTOCOL = 8, + __NL80211_MESH_SETUP_ATTR_AFTER_LAST = 9, + NL80211_MESH_SETUP_ATTR_MAX = 8, +}; + +enum nl80211_txq_attr { + __NL80211_TXQ_ATTR_INVALID = 0, + NL80211_TXQ_ATTR_AC = 1, + NL80211_TXQ_ATTR_TXOP = 2, + NL80211_TXQ_ATTR_CWMIN = 3, + NL80211_TXQ_ATTR_CWMAX = 4, + NL80211_TXQ_ATTR_AIFS = 5, + __NL80211_TXQ_ATTR_AFTER_LAST = 6, + NL80211_TXQ_ATTR_MAX = 5, +}; + +enum nl80211_ac { + NL80211_AC_VO = 0, + NL80211_AC_VI = 1, + NL80211_AC_BE = 2, + NL80211_AC_BK = 3, + NL80211_NUM_ACS = 4, +}; + +enum nl80211_channel_type { + NL80211_CHAN_NO_HT = 0, + NL80211_CHAN_HT20 = 1, + NL80211_CHAN_HT40MINUS = 2, + NL80211_CHAN_HT40PLUS = 3, +}; + +enum nl80211_key_mode { + NL80211_KEY_RX_TX = 0, + NL80211_KEY_NO_TX = 1, + NL80211_KEY_SET_TX = 2, +}; + +enum nl80211_bss_use_for { + NL80211_BSS_USE_FOR_NORMAL = 1, + NL80211_BSS_USE_FOR_MLD_LINK = 2, +}; + +enum nl80211_bss { + __NL80211_BSS_INVALID = 0, + NL80211_BSS_BSSID = 1, + NL80211_BSS_FREQUENCY = 2, + NL80211_BSS_TSF = 3, + NL80211_BSS_BEACON_INTERVAL = 4, + NL80211_BSS_CAPABILITY = 5, + NL80211_BSS_INFORMATION_ELEMENTS = 6, + NL80211_BSS_SIGNAL_MBM = 7, + NL80211_BSS_SIGNAL_UNSPEC = 8, + NL80211_BSS_STATUS = 9, + NL80211_BSS_SEEN_MS_AGO = 10, + NL80211_BSS_BEACON_IES = 11, + NL80211_BSS_CHAN_WIDTH = 12, + NL80211_BSS_BEACON_TSF = 13, + NL80211_BSS_PRESP_DATA = 14, + NL80211_BSS_LAST_SEEN_BOOTTIME = 15, + NL80211_BSS_PAD = 16, + NL80211_BSS_PARENT_TSF = 17, + NL80211_BSS_PARENT_BSSID = 18, + NL80211_BSS_CHAIN_SIGNAL = 19, + NL80211_BSS_FREQUENCY_OFFSET = 20, + NL80211_BSS_MLO_LINK_ID = 21, + NL80211_BSS_MLD_ADDR = 22, + NL80211_BSS_USE_FOR = 23, + NL80211_BSS_CANNOT_USE_REASONS = 24, + __NL80211_BSS_AFTER_LAST = 25, + NL80211_BSS_MAX = 24, +}; + +enum nl80211_bss_status { + NL80211_BSS_STATUS_AUTHENTICATED = 0, + NL80211_BSS_STATUS_ASSOCIATED = 1, + NL80211_BSS_STATUS_IBSS_JOINED = 2, +}; + +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM = 0, + NL80211_AUTHTYPE_SHARED_KEY = 1, + NL80211_AUTHTYPE_FT = 2, + NL80211_AUTHTYPE_NETWORK_EAP = 3, + NL80211_AUTHTYPE_SAE = 4, + NL80211_AUTHTYPE_FILS_SK = 5, + NL80211_AUTHTYPE_FILS_SK_PFS = 6, + NL80211_AUTHTYPE_FILS_PK = 7, + __NL80211_AUTHTYPE_NUM = 8, + NL80211_AUTHTYPE_MAX = 7, + NL80211_AUTHTYPE_AUTOMATIC = 8, +}; + +enum nl80211_key_type { + NL80211_KEYTYPE_GROUP = 0, + NL80211_KEYTYPE_PAIRWISE = 1, + NL80211_KEYTYPE_PEERKEY = 2, + NUM_NL80211_KEYTYPES = 3, +}; + +enum nl80211_mfp { + NL80211_MFP_NO = 0, + NL80211_MFP_REQUIRED = 1, + NL80211_MFP_OPTIONAL = 2, +}; + +enum nl80211_wpa_versions { + NL80211_WPA_VERSION_1 = 1, + NL80211_WPA_VERSION_2 = 2, + NL80211_WPA_VERSION_3 = 4, +}; + +enum nl80211_key_default_types { + __NL80211_KEY_DEFAULT_TYPE_INVALID = 0, + NL80211_KEY_DEFAULT_TYPE_UNICAST = 1, + NL80211_KEY_DEFAULT_TYPE_MULTICAST = 2, + NUM_NL80211_KEY_DEFAULT_TYPES = 3, +}; + +enum nl80211_key_attributes { + __NL80211_KEY_INVALID = 0, + NL80211_KEY_DATA = 1, + NL80211_KEY_IDX = 2, + NL80211_KEY_CIPHER = 3, + NL80211_KEY_SEQ = 4, + NL80211_KEY_DEFAULT = 5, + NL80211_KEY_DEFAULT_MGMT = 6, + NL80211_KEY_TYPE = 7, + NL80211_KEY_DEFAULT_TYPES = 8, + NL80211_KEY_MODE = 9, + NL80211_KEY_DEFAULT_BEACON = 10, + __NL80211_KEY_AFTER_LAST = 11, + NL80211_KEY_MAX = 10, +}; + +enum nl80211_tx_rate_attributes { + __NL80211_TXRATE_INVALID = 0, + NL80211_TXRATE_LEGACY = 1, + NL80211_TXRATE_HT = 2, + NL80211_TXRATE_VHT = 3, + NL80211_TXRATE_GI = 4, + NL80211_TXRATE_HE = 5, + NL80211_TXRATE_HE_GI = 6, + NL80211_TXRATE_HE_LTF = 7, + __NL80211_TXRATE_AFTER_LAST = 8, + NL80211_TXRATE_MAX = 7, +}; + +struct nl80211_txrate_vht { + __u16 mcs[8]; +}; + +struct nl80211_txrate_he { + __u16 mcs[8]; +}; + +enum nl80211_ps_state { + NL80211_PS_DISABLED = 0, + NL80211_PS_ENABLED = 1, +}; + +enum nl80211_attr_cqm { + __NL80211_ATTR_CQM_INVALID = 0, + NL80211_ATTR_CQM_RSSI_THOLD = 1, + NL80211_ATTR_CQM_RSSI_HYST = 2, + NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT = 3, + NL80211_ATTR_CQM_PKT_LOSS_EVENT = 4, + NL80211_ATTR_CQM_TXE_RATE = 5, + NL80211_ATTR_CQM_TXE_PKTS = 6, + NL80211_ATTR_CQM_TXE_INTVL = 7, + NL80211_ATTR_CQM_BEACON_LOSS_EVENT = 8, + NL80211_ATTR_CQM_RSSI_LEVEL = 9, + __NL80211_ATTR_CQM_AFTER_LAST = 10, + NL80211_ATTR_CQM_MAX = 9, +}; + +enum nl80211_cqm_rssi_threshold_event { + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW = 0, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH = 1, + NL80211_CQM_RSSI_BEACON_LOSS_EVENT = 2, +}; + +enum nl80211_tid_config { + NL80211_TID_CONFIG_ENABLE = 0, + NL80211_TID_CONFIG_DISABLE = 1, +}; + +enum nl80211_tx_rate_setting { + NL80211_TX_RATE_AUTOMATIC = 0, + NL80211_TX_RATE_LIMITED = 1, + NL80211_TX_RATE_FIXED = 2, +}; + +enum nl80211_tid_config_attr { + __NL80211_TID_CONFIG_ATTR_INVALID = 0, + NL80211_TID_CONFIG_ATTR_PAD = 1, + NL80211_TID_CONFIG_ATTR_VIF_SUPP = 2, + NL80211_TID_CONFIG_ATTR_PEER_SUPP = 3, + NL80211_TID_CONFIG_ATTR_OVERRIDE = 4, + NL80211_TID_CONFIG_ATTR_TIDS = 5, + NL80211_TID_CONFIG_ATTR_NOACK = 6, + NL80211_TID_CONFIG_ATTR_RETRY_SHORT = 7, + NL80211_TID_CONFIG_ATTR_RETRY_LONG = 8, + NL80211_TID_CONFIG_ATTR_AMPDU_CTRL = 9, + NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL = 10, + NL80211_TID_CONFIG_ATTR_AMSDU_CTRL = 11, + NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE = 12, + NL80211_TID_CONFIG_ATTR_TX_RATE = 13, + __NL80211_TID_CONFIG_ATTR_AFTER_LAST = 14, + NL80211_TID_CONFIG_ATTR_MAX = 13, +}; + +enum nl80211_packet_pattern_attr { + __NL80211_PKTPAT_INVALID = 0, + NL80211_PKTPAT_MASK = 1, + NL80211_PKTPAT_PATTERN = 2, + NL80211_PKTPAT_OFFSET = 3, + NUM_NL80211_PKTPAT = 4, + MAX_NL80211_PKTPAT = 3, +}; + +struct nl80211_pattern_support { + __u32 max_patterns; + __u32 min_pattern_len; + __u32 max_pattern_len; + __u32 max_pkt_offset; +}; + +enum nl80211_wowlan_triggers { + __NL80211_WOWLAN_TRIG_INVALID = 0, + NL80211_WOWLAN_TRIG_ANY = 1, + NL80211_WOWLAN_TRIG_DISCONNECT = 2, + NL80211_WOWLAN_TRIG_MAGIC_PKT = 3, + NL80211_WOWLAN_TRIG_PKT_PATTERN = 4, + NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED = 5, + NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE = 6, + NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST = 7, + NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE = 8, + NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 9, + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 10, + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 11, + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 12, + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN = 13, + NL80211_WOWLAN_TRIG_TCP_CONNECTION = 14, + NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH = 15, + NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST = 16, + NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS = 17, + NL80211_WOWLAN_TRIG_NET_DETECT = 18, + NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS = 19, + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 20, + NUM_NL80211_WOWLAN_TRIG = 21, + MAX_NL80211_WOWLAN_TRIG = 20, +}; + +enum nl80211_wowlan_tcp_attrs { + __NL80211_WOWLAN_TCP_INVALID = 0, + NL80211_WOWLAN_TCP_SRC_IPV4 = 1, + NL80211_WOWLAN_TCP_DST_IPV4 = 2, + NL80211_WOWLAN_TCP_DST_MAC = 3, + NL80211_WOWLAN_TCP_SRC_PORT = 4, + NL80211_WOWLAN_TCP_DST_PORT = 5, + NL80211_WOWLAN_TCP_DATA_PAYLOAD = 6, + NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ = 7, + NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN = 8, + NL80211_WOWLAN_TCP_DATA_INTERVAL = 9, + NL80211_WOWLAN_TCP_WAKE_PAYLOAD = 10, + NL80211_WOWLAN_TCP_WAKE_MASK = 11, + NUM_NL80211_WOWLAN_TCP = 12, + MAX_NL80211_WOWLAN_TCP = 11, +}; + +struct nl80211_coalesce_rule_support { + __u32 max_rules; + struct nl80211_pattern_support pat; + __u32 max_delay; +}; + +enum nl80211_attr_coalesce_rule { + __NL80211_COALESCE_RULE_INVALID = 0, + NL80211_ATTR_COALESCE_RULE_DELAY = 1, + NL80211_ATTR_COALESCE_RULE_CONDITION = 2, + NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 3, + NUM_NL80211_ATTR_COALESCE_RULE = 4, + NL80211_ATTR_COALESCE_RULE_MAX = 3, +}; + +enum nl80211_coalesce_condition { + NL80211_COALESCE_CONDITION_MATCH = 0, + NL80211_COALESCE_CONDITION_NO_MATCH = 1, +}; + +enum nl80211_iface_limit_attrs { + NL80211_IFACE_LIMIT_UNSPEC = 0, + NL80211_IFACE_LIMIT_MAX = 1, + NL80211_IFACE_LIMIT_TYPES = 2, + NUM_NL80211_IFACE_LIMIT = 3, + MAX_NL80211_IFACE_LIMIT = 2, +}; + +enum nl80211_if_combination_attrs { + NL80211_IFACE_COMB_UNSPEC = 0, + NL80211_IFACE_COMB_LIMITS = 1, + NL80211_IFACE_COMB_MAXNUM = 2, + NL80211_IFACE_COMB_STA_AP_BI_MATCH = 3, + NL80211_IFACE_COMB_NUM_CHANNELS = 4, + NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS = 5, + NL80211_IFACE_COMB_RADAR_DETECT_REGIONS = 6, + NL80211_IFACE_COMB_BI_MIN_GCD = 7, + NUM_NL80211_IFACE_COMB = 8, + MAX_NL80211_IFACE_COMB = 7, +}; + +enum nl80211_plink_state { + NL80211_PLINK_LISTEN = 0, + NL80211_PLINK_OPN_SNT = 1, + NL80211_PLINK_OPN_RCVD = 2, + NL80211_PLINK_CNF_RCVD = 3, + NL80211_PLINK_ESTAB = 4, + NL80211_PLINK_HOLDING = 5, + NL80211_PLINK_BLOCKED = 6, + NUM_NL80211_PLINK_STATES = 7, + MAX_NL80211_PLINK_STATES = 6, +}; + +enum nl80211_plink_action { + NL80211_PLINK_ACTION_NO_ACTION = 0, + NL80211_PLINK_ACTION_OPEN = 1, + NL80211_PLINK_ACTION_BLOCK = 2, + NUM_NL80211_PLINK_ACTIONS = 3, +}; + +enum nl80211_rekey_data { + __NL80211_REKEY_DATA_INVALID = 0, + NL80211_REKEY_DATA_KEK = 1, + NL80211_REKEY_DATA_KCK = 2, + NL80211_REKEY_DATA_REPLAY_CTR = 3, + NL80211_REKEY_DATA_AKM = 4, + NUM_NL80211_REKEY_DATA = 5, + MAX_NL80211_REKEY_DATA = 4, +}; + +enum nl80211_hidden_ssid { + NL80211_HIDDEN_SSID_NOT_IN_USE = 0, + NL80211_HIDDEN_SSID_ZERO_LEN = 1, + NL80211_HIDDEN_SSID_ZERO_CONTENTS = 2, +}; + +enum nl80211_sta_wme_attr { + __NL80211_STA_WME_INVALID = 0, + NL80211_STA_WME_UAPSD_QUEUES = 1, + NL80211_STA_WME_MAX_SP = 2, + __NL80211_STA_WME_AFTER_LAST = 3, + NL80211_STA_WME_MAX = 2, +}; + +enum nl80211_pmksa_candidate_attr { + __NL80211_PMKSA_CANDIDATE_INVALID = 0, + NL80211_PMKSA_CANDIDATE_INDEX = 1, + NL80211_PMKSA_CANDIDATE_BSSID = 2, + NL80211_PMKSA_CANDIDATE_PREAUTH = 3, + NUM_NL80211_PMKSA_CANDIDATE = 4, + MAX_NL80211_PMKSA_CANDIDATE = 3, +}; + +enum nl80211_tdls_operation { + NL80211_TDLS_DISCOVERY_REQ = 0, + NL80211_TDLS_SETUP = 1, + NL80211_TDLS_TEARDOWN = 2, + NL80211_TDLS_ENABLE_LINK = 3, + NL80211_TDLS_DISABLE_LINK = 4, +}; + +enum nl80211_feature_flags { + NL80211_FEATURE_SK_TX_STATUS = 1, + NL80211_FEATURE_HT_IBSS = 2, + NL80211_FEATURE_INACTIVITY_TIMER = 4, + NL80211_FEATURE_CELL_BASE_REG_HINTS = 8, + NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 16, + NL80211_FEATURE_SAE = 32, + NL80211_FEATURE_LOW_PRIORITY_SCAN = 64, + NL80211_FEATURE_SCAN_FLUSH = 128, + NL80211_FEATURE_AP_SCAN = 256, + NL80211_FEATURE_VIF_TXPOWER = 512, + NL80211_FEATURE_NEED_OBSS_SCAN = 1024, + NL80211_FEATURE_P2P_GO_CTWIN = 2048, + NL80211_FEATURE_P2P_GO_OPPPS = 4096, + NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 16384, + NL80211_FEATURE_FULL_AP_CLIENT_STATE = 32768, + NL80211_FEATURE_USERSPACE_MPM = 65536, + NL80211_FEATURE_ACTIVE_MONITOR = 131072, + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 262144, + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 524288, + NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 1048576, + NL80211_FEATURE_QUIET = 2097152, + NL80211_FEATURE_TX_POWER_INSERTION = 4194304, + NL80211_FEATURE_ACKTO_ESTIMATION = 8388608, + NL80211_FEATURE_STATIC_SMPS = 16777216, + NL80211_FEATURE_DYNAMIC_SMPS = 33554432, + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 67108864, + NL80211_FEATURE_MAC_ON_CREATE = 134217728, + NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 268435456, + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 536870912, + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1073741824, + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 2147483648, +}; + +enum nl80211_connect_failed_reason { + NL80211_CONN_FAIL_MAX_CLIENTS = 0, + NL80211_CONN_FAIL_BLOCKED_CLIENT = 1, +}; + +enum nl80211_timeout_reason { + NL80211_TIMEOUT_UNSPECIFIED = 0, + NL80211_TIMEOUT_SCAN = 1, + NL80211_TIMEOUT_AUTH = 2, + NL80211_TIMEOUT_ASSOC = 3, +}; + +enum nl80211_scan_flags { + NL80211_SCAN_FLAG_LOW_PRIORITY = 1, + NL80211_SCAN_FLAG_FLUSH = 2, + NL80211_SCAN_FLAG_AP = 4, + NL80211_SCAN_FLAG_RANDOM_ADDR = 8, + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = 16, + NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 32, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 64, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 128, + NL80211_SCAN_FLAG_LOW_SPAN = 256, + NL80211_SCAN_FLAG_LOW_POWER = 512, + NL80211_SCAN_FLAG_HIGH_ACCURACY = 1024, + NL80211_SCAN_FLAG_RANDOM_SN = 2048, + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 4096, + NL80211_SCAN_FLAG_FREQ_KHZ = 8192, + NL80211_SCAN_FLAG_COLOCATED_6GHZ = 16384, +}; + +enum nl80211_acl_policy { + NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED = 0, + NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 1, +}; + +enum nl80211_smps_mode { + NL80211_SMPS_OFF = 0, + NL80211_SMPS_STATIC = 1, + NL80211_SMPS_DYNAMIC = 2, + __NL80211_SMPS_AFTER_LAST = 3, + NL80211_SMPS_MAX = 2, +}; + +enum nl80211_radar_event { + NL80211_RADAR_DETECTED = 0, + NL80211_RADAR_CAC_FINISHED = 1, + NL80211_RADAR_CAC_ABORTED = 2, + NL80211_RADAR_NOP_FINISHED = 3, + NL80211_RADAR_PRE_CAC_EXPIRED = 4, + NL80211_RADAR_CAC_STARTED = 5, +}; + +enum nl80211_protocol_features { + NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP = 1, +}; + +enum nl80211_crit_proto_id { + NL80211_CRIT_PROTO_UNSPEC = 0, + NL80211_CRIT_PROTO_DHCP = 1, + NL80211_CRIT_PROTO_EAPOL = 2, + NL80211_CRIT_PROTO_APIPA = 3, + NUM_NL80211_CRIT_PROTO = 4, +}; + +enum nl80211_sched_scan_plan { + __NL80211_SCHED_SCAN_PLAN_INVALID = 0, + NL80211_SCHED_SCAN_PLAN_INTERVAL = 1, + NL80211_SCHED_SCAN_PLAN_ITERATIONS = 2, + __NL80211_SCHED_SCAN_PLAN_AFTER_LAST = 3, + NL80211_SCHED_SCAN_PLAN_MAX = 2, +}; + +struct nl80211_bss_select_rssi_adjust { + __u8 band; + __s8 delta; +}; + +enum nl80211_bss_select_attr { + __NL80211_BSS_SELECT_ATTR_INVALID = 0, + NL80211_BSS_SELECT_ATTR_RSSI = 1, + NL80211_BSS_SELECT_ATTR_BAND_PREF = 2, + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 3, + __NL80211_BSS_SELECT_ATTR_AFTER_LAST = 4, + NL80211_BSS_SELECT_ATTR_MAX = 3, +}; + +enum nl80211_nan_function_type { + NL80211_NAN_FUNC_PUBLISH = 0, + NL80211_NAN_FUNC_SUBSCRIBE = 1, + NL80211_NAN_FUNC_FOLLOW_UP = 2, + __NL80211_NAN_FUNC_TYPE_AFTER_LAST = 3, + NL80211_NAN_FUNC_MAX_TYPE = 2, +}; + +enum nl80211_nan_publish_type { + NL80211_NAN_SOLICITED_PUBLISH = 1, + NL80211_NAN_UNSOLICITED_PUBLISH = 2, +}; + +enum nl80211_nan_func_term_reason { + NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST = 0, + NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED = 1, + NL80211_NAN_FUNC_TERM_REASON_ERROR = 2, +}; + +enum nl80211_nan_func_attributes { + __NL80211_NAN_FUNC_INVALID = 0, + NL80211_NAN_FUNC_TYPE = 1, + NL80211_NAN_FUNC_SERVICE_ID = 2, + NL80211_NAN_FUNC_PUBLISH_TYPE = 3, + NL80211_NAN_FUNC_PUBLISH_BCAST = 4, + NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE = 5, + NL80211_NAN_FUNC_FOLLOW_UP_ID = 6, + NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID = 7, + NL80211_NAN_FUNC_FOLLOW_UP_DEST = 8, + NL80211_NAN_FUNC_CLOSE_RANGE = 9, + NL80211_NAN_FUNC_TTL = 10, + NL80211_NAN_FUNC_SERVICE_INFO = 11, + NL80211_NAN_FUNC_SRF = 12, + NL80211_NAN_FUNC_RX_MATCH_FILTER = 13, + NL80211_NAN_FUNC_TX_MATCH_FILTER = 14, + NL80211_NAN_FUNC_INSTANCE_ID = 15, + NL80211_NAN_FUNC_TERM_REASON = 16, + NUM_NL80211_NAN_FUNC_ATTR = 17, + NL80211_NAN_FUNC_ATTR_MAX = 16, +}; + +enum nl80211_nan_srf_attributes { + __NL80211_NAN_SRF_INVALID = 0, + NL80211_NAN_SRF_INCLUDE = 1, + NL80211_NAN_SRF_BF = 2, + NL80211_NAN_SRF_BF_IDX = 3, + NL80211_NAN_SRF_MAC_ADDRS = 4, + NUM_NL80211_NAN_SRF_ATTR = 5, + NL80211_NAN_SRF_ATTR_MAX = 4, +}; + +enum nl80211_nan_match_attributes { + __NL80211_NAN_MATCH_INVALID = 0, + NL80211_NAN_MATCH_FUNC_LOCAL = 1, + NL80211_NAN_MATCH_FUNC_PEER = 2, + NUM_NL80211_NAN_MATCH_ATTR = 3, + NL80211_NAN_MATCH_ATTR_MAX = 2, +}; + +enum nl80211_external_auth_action { + NL80211_EXTERNAL_AUTH_START = 0, + NL80211_EXTERNAL_AUTH_ABORT = 1, +}; + +enum nl80211_ftm_responder_attributes { + __NL80211_FTM_RESP_ATTR_INVALID = 0, + NL80211_FTM_RESP_ATTR_ENABLED = 1, + NL80211_FTM_RESP_ATTR_LCI = 2, + NL80211_FTM_RESP_ATTR_CIVICLOC = 3, + __NL80211_FTM_RESP_ATTR_LAST = 4, + NL80211_FTM_RESP_ATTR_MAX = 3, +}; + +enum nl80211_ftm_responder_stats { + __NL80211_FTM_STATS_INVALID = 0, + NL80211_FTM_STATS_SUCCESS_NUM = 1, + NL80211_FTM_STATS_PARTIAL_NUM = 2, + NL80211_FTM_STATS_FAILED_NUM = 3, + NL80211_FTM_STATS_ASAP_NUM = 4, + NL80211_FTM_STATS_NON_ASAP_NUM = 5, + NL80211_FTM_STATS_TOTAL_DURATION_MSEC = 6, + NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM = 7, + NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM = 8, + NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM = 9, + NL80211_FTM_STATS_PAD = 10, + __NL80211_FTM_STATS_AFTER_LAST = 11, + NL80211_FTM_STATS_MAX = 10, +}; + +enum nl80211_peer_measurement_type { + NL80211_PMSR_TYPE_INVALID = 0, + NL80211_PMSR_TYPE_FTM = 1, + NUM_NL80211_PMSR_TYPES = 2, + NL80211_PMSR_TYPE_MAX = 1, +}; + +enum nl80211_peer_measurement_req { + __NL80211_PMSR_REQ_ATTR_INVALID = 0, + NL80211_PMSR_REQ_ATTR_DATA = 1, + NL80211_PMSR_REQ_ATTR_GET_AP_TSF = 2, + NUM_NL80211_PMSR_REQ_ATTRS = 3, + NL80211_PMSR_REQ_ATTR_MAX = 2, +}; + +enum nl80211_peer_measurement_peer_attrs { + __NL80211_PMSR_PEER_ATTR_INVALID = 0, + NL80211_PMSR_PEER_ATTR_ADDR = 1, + NL80211_PMSR_PEER_ATTR_CHAN = 2, + NL80211_PMSR_PEER_ATTR_REQ = 3, + NL80211_PMSR_PEER_ATTR_RESP = 4, + NUM_NL80211_PMSR_PEER_ATTRS = 5, + NL80211_PMSR_PEER_ATTR_MAX = 4, +}; + +enum nl80211_peer_measurement_attrs { + __NL80211_PMSR_ATTR_INVALID = 0, + NL80211_PMSR_ATTR_MAX_PEERS = 1, + NL80211_PMSR_ATTR_REPORT_AP_TSF = 2, + NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR = 3, + NL80211_PMSR_ATTR_TYPE_CAPA = 4, + NL80211_PMSR_ATTR_PEERS = 5, + NUM_NL80211_PMSR_ATTR = 6, + NL80211_PMSR_ATTR_MAX = 5, +}; + +enum nl80211_peer_measurement_ftm_capa { + __NL80211_PMSR_FTM_CAPA_ATTR_INVALID = 0, + NL80211_PMSR_FTM_CAPA_ATTR_ASAP = 1, + NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP = 2, + NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI = 3, + NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC = 4, + NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES = 5, + NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS = 6, + NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT = 7, + NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST = 8, + NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED = 9, + NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED = 10, + NUM_NL80211_PMSR_FTM_CAPA_ATTR = 11, + NL80211_PMSR_FTM_CAPA_ATTR_MAX = 10, +}; + +enum nl80211_peer_measurement_ftm_req { + __NL80211_PMSR_FTM_REQ_ATTR_INVALID = 0, + NL80211_PMSR_FTM_REQ_ATTR_ASAP = 1, + NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE = 2, + NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP = 3, + NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 4, + NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 5, + NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 6, + NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES = 7, + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI = 8, + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC = 9, + NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED = 10, + NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED = 11, + NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK = 12, + NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR = 13, + NUM_NL80211_PMSR_FTM_REQ_ATTR = 14, + NL80211_PMSR_FTM_REQ_ATTR_MAX = 13, +}; + +enum nl80211_obss_pd_attributes { + __NL80211_HE_OBSS_PD_ATTR_INVALID = 0, + NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET = 1, + NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET = 2, + NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET = 3, + NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP = 4, + NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP = 5, + NL80211_HE_OBSS_PD_ATTR_SR_CTRL = 6, + __NL80211_HE_OBSS_PD_ATTR_LAST = 7, + NL80211_HE_OBSS_PD_ATTR_MAX = 6, +}; + +enum nl80211_bss_color_attributes { + __NL80211_HE_BSS_COLOR_ATTR_INVALID = 0, + NL80211_HE_BSS_COLOR_ATTR_COLOR = 1, + NL80211_HE_BSS_COLOR_ATTR_DISABLED = 2, + NL80211_HE_BSS_COLOR_ATTR_PARTIAL = 3, + __NL80211_HE_BSS_COLOR_ATTR_LAST = 4, + NL80211_HE_BSS_COLOR_ATTR_MAX = 3, +}; + +enum nl80211_iftype_akm_attributes { + __NL80211_IFTYPE_AKM_ATTR_INVALID = 0, + NL80211_IFTYPE_AKM_ATTR_IFTYPES = 1, + NL80211_IFTYPE_AKM_ATTR_SUITES = 2, + __NL80211_IFTYPE_AKM_ATTR_LAST = 3, + NL80211_IFTYPE_AKM_ATTR_MAX = 2, +}; + +enum nl80211_fils_discovery_attributes { + __NL80211_FILS_DISCOVERY_ATTR_INVALID = 0, + NL80211_FILS_DISCOVERY_ATTR_INT_MIN = 1, + NL80211_FILS_DISCOVERY_ATTR_INT_MAX = 2, + NL80211_FILS_DISCOVERY_ATTR_TMPL = 3, + __NL80211_FILS_DISCOVERY_ATTR_LAST = 4, + NL80211_FILS_DISCOVERY_ATTR_MAX = 3, +}; + +enum nl80211_unsol_bcast_probe_resp_attributes { + __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID = 0, + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT = 1, + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL = 2, + __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST = 3, + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX = 2, +}; + +enum nl80211_sae_pwe_mechanism { + NL80211_SAE_PWE_UNSPECIFIED = 0, + NL80211_SAE_PWE_HUNT_AND_PECK = 1, + NL80211_SAE_PWE_HASH_TO_ELEMENT = 2, + NL80211_SAE_PWE_BOTH = 3, +}; + +enum nl80211_sar_attrs { + __NL80211_SAR_ATTR_INVALID = 0, + NL80211_SAR_ATTR_TYPE = 1, + NL80211_SAR_ATTR_SPECS = 2, + __NL80211_SAR_ATTR_LAST = 3, + NL80211_SAR_ATTR_MAX = 2, +}; + +enum nl80211_sar_specs_attrs { + __NL80211_SAR_ATTR_SPECS_INVALID = 0, + NL80211_SAR_ATTR_SPECS_POWER = 1, + NL80211_SAR_ATTR_SPECS_RANGE_INDEX = 2, + NL80211_SAR_ATTR_SPECS_START_FREQ = 3, + NL80211_SAR_ATTR_SPECS_END_FREQ = 4, + __NL80211_SAR_ATTR_SPECS_LAST = 5, + NL80211_SAR_ATTR_SPECS_MAX = 4, +}; + +enum nl80211_mbssid_config_attributes { + __NL80211_MBSSID_CONFIG_ATTR_INVALID = 0, + NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 1, + NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 2, + NL80211_MBSSID_CONFIG_ATTR_INDEX = 3, + NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 4, + NL80211_MBSSID_CONFIG_ATTR_EMA = 5, + __NL80211_MBSSID_CONFIG_ATTR_LAST = 6, + NL80211_MBSSID_CONFIG_ATTR_MAX = 5, +}; + +enum nl80211_ap_settings_flags { + NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 1, + NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 2, +}; + +enum nl80211_wiphy_radio_attrs { + __NL80211_WIPHY_RADIO_ATTR_INVALID = 0, + NL80211_WIPHY_RADIO_ATTR_INDEX = 1, + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 2, + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 3, + NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK = 4, + __NL80211_WIPHY_RADIO_ATTR_LAST = 5, + NL80211_WIPHY_RADIO_ATTR_MAX = 4, +}; + +enum nl80211_wiphy_radio_freq_range { + __NL80211_WIPHY_RADIO_FREQ_ATTR_INVALID = 0, + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 1, + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 2, + __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST = 3, + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 2, +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE = 0, + NLA_VALIDATE_RANGE = 1, + NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, + NLA_VALIDATE_MIN = 3, + NLA_VALIDATE_MAX = 4, + NLA_VALIDATE_MASK = 5, + NLA_VALIDATE_RANGE_PTR = 6, + NLA_VALIDATE_FUNCTION = 7, +}; + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +struct genl_multicast_group { + char name[16]; + u8 flags; +}; + +struct genl_split_ops; + +struct genl_info; + +struct genl_ops; + +struct genl_small_ops; + +struct genl_family { + unsigned int hdrsize; + char name[16]; + unsigned int version; + unsigned int maxattr; + u8 netnsok: 1; + u8 parallel_ops: 1; + u8 n_ops; + u8 n_small_ops; + u8 n_split_ops; + u8 n_mcgrps; + u8 resv_start_op; + const struct nla_policy *policy; + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*bind)(int); + void (*unbind)(int); + const struct genl_ops *ops; + const struct genl_small_ops *small_ops; + const struct genl_split_ops *split_ops; + const struct genl_multicast_group *mcgrps; + struct module *module; + size_t sock_priv_size; + void (*sock_priv_init)(void *); + void (*sock_priv_destroy)(void *); + int id; + unsigned int mcgrp_offset; + struct xarray *sock_privs; +}; + +struct genl_split_ops { + union { + struct { + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*doit)(struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + }; + struct { + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + }; + }; + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_info { + u32 snd_seq; + u32 snd_portid; + const struct genl_family *family; + const struct nlmsghdr *nlhdr; + struct genlmsghdr *genlhdr; + struct nlattr **attrs; + possible_net_t _net; + union { + u8 ctx[48]; + void *user_ptr[2]; + }; + struct netlink_ext_ack *extack; +}; + +struct genl_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_small_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +enum genl_validate_flags { + GENL_DONT_VALIDATE_STRICT = 1, + GENL_DONT_VALIDATE_DUMP = 2, + GENL_DONT_VALIDATE_DUMP_STRICT = 4, +}; + +struct rfkill_ops { + void (*poll)(struct rfkill *, void *); + void (*query)(struct rfkill *, void *); + int (*set_block)(void *, bool); +}; + +enum ieee80211_regulatory_flags { + REGULATORY_CUSTOM_REG = 1, + REGULATORY_STRICT_REG = 2, + REGULATORY_DISABLE_BEACON_HINTS = 4, + REGULATORY_COUNTRY_IE_FOLLOW_POWER = 8, + REGULATORY_COUNTRY_IE_IGNORE = 16, + REGULATORY_ENABLE_RELAX_NO_IR = 32, + REGULATORY_WIPHY_SELF_MANAGED = 128, +}; + +enum ieee80211_channel_flags { + IEEE80211_CHAN_DISABLED = 1, + IEEE80211_CHAN_NO_IR = 2, + IEEE80211_CHAN_PSD = 4, + IEEE80211_CHAN_RADAR = 8, + IEEE80211_CHAN_NO_HT40PLUS = 16, + IEEE80211_CHAN_NO_HT40MINUS = 32, + IEEE80211_CHAN_NO_OFDM = 64, + IEEE80211_CHAN_NO_80MHZ = 128, + IEEE80211_CHAN_NO_160MHZ = 256, + IEEE80211_CHAN_INDOOR_ONLY = 512, + IEEE80211_CHAN_IR_CONCURRENT = 1024, + IEEE80211_CHAN_NO_20MHZ = 2048, + IEEE80211_CHAN_NO_10MHZ = 4096, + IEEE80211_CHAN_NO_HE = 8192, + IEEE80211_CHAN_1MHZ = 16384, + IEEE80211_CHAN_2MHZ = 32768, + IEEE80211_CHAN_4MHZ = 65536, + IEEE80211_CHAN_8MHZ = 131072, + IEEE80211_CHAN_16MHZ = 262144, + IEEE80211_CHAN_NO_320MHZ = 524288, + IEEE80211_CHAN_NO_EHT = 1048576, + IEEE80211_CHAN_DFS_CONCURRENT = 2097152, + IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 4194304, + IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 8388608, + IEEE80211_CHAN_CAN_MONITOR = 16777216, + IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = 33554432, +}; + +enum ieee80211_rate_flags { + IEEE80211_RATE_SHORT_PREAMBLE = 1, + IEEE80211_RATE_MANDATORY_A = 2, + IEEE80211_RATE_MANDATORY_B = 4, + IEEE80211_RATE_MANDATORY_G = 8, + IEEE80211_RATE_ERP_G = 16, + IEEE80211_RATE_SUPPORTS_5MHZ = 32, + IEEE80211_RATE_SUPPORTS_10MHZ = 64, +}; + +enum ieee80211_privacy { + IEEE80211_PRIVACY_ON = 0, + IEEE80211_PRIVACY_OFF = 1, + IEEE80211_PRIVACY_ANY = 2, +}; + +struct vif_params { + u32 flags; + int use_4addr; + u8 macaddr[6]; + const u8 *vht_mumimo_groups; + const u8 *vht_mumimo_follow_addr; +}; + +struct key_params { + const u8 *key; + const u8 *seq; + int key_len; + int seq_len; + u16 vlan_id; + u32 cipher; + enum nl80211_key_mode mode; +}; + +struct cfg80211_tid_cfg { + bool config_override; + u8 tids; + long: 32; + u64 mask; + enum nl80211_tid_config noack; + u8 retry_long; + u8 retry_short; + enum nl80211_tid_config ampdu; + enum nl80211_tid_config rtscts; + enum nl80211_tid_config amsdu; + enum nl80211_tx_rate_setting txrate_type; + struct cfg80211_bitrate_mask txrate_mask; +}; + +struct cfg80211_tid_config { + const u8 *peer; + u32 n_tid_conf; + struct cfg80211_tid_cfg tid_conf[0]; +}; + +struct cfg80211_fils_aad { + const u8 *macaddr; + const u8 *kek; + u8 kek_len; + const u8 *snonce; + const u8 *anonce; +}; + +struct cfg80211_set_hw_timestamp { + const u8 *macaddr; + bool enable; +}; + +enum survey_info_flags { + SURVEY_INFO_NOISE_DBM = 1, + SURVEY_INFO_IN_USE = 2, + SURVEY_INFO_TIME = 4, + SURVEY_INFO_TIME_BUSY = 8, + SURVEY_INFO_TIME_EXT_BUSY = 16, + SURVEY_INFO_TIME_RX = 32, + SURVEY_INFO_TIME_TX = 64, + SURVEY_INFO_TIME_SCAN = 128, + SURVEY_INFO_TIME_BSS_RX = 256, +}; + +struct survey_info { + struct ieee80211_channel *channel; + long: 32; + u64 time; + u64 time_busy; + u64 time_ext_busy; + u64 time_rx; + u64 time_tx; + u64 time_scan; + u64 time_bss_rx; + u32 filled; + s8 noise; +}; + +struct cfg80211_crypto_settings { + u32 wpa_versions; + u32 cipher_group; + int n_ciphers_pairwise; + u32 ciphers_pairwise[5]; + int n_akm_suites; + u32 akm_suites[10]; + bool control_port; + __be16 control_port_ethertype; + bool control_port_no_encrypt; + bool control_port_over_nl80211; + bool control_port_no_preauth; + const u8 *psk; + const u8 *sae_pwd; + u8 sae_pwd_len; + enum nl80211_sae_pwe_mechanism sae_pwe; +}; + +struct cfg80211_mbssid_config { + struct wireless_dev *tx_wdev; + u8 index; + bool ema; +}; + +struct cfg80211_mbssid_elems { + u8 cnt; + struct { + const u8 *data; + size_t len; + } elem[0]; +}; + +struct cfg80211_rnr_elems { + u8 cnt; + struct { + const u8 *data; + size_t len; + } elem[0]; +}; + +struct cfg80211_beacon_data { + unsigned int link_id; + const u8 *head; + const u8 *tail; + const u8 *beacon_ies; + const u8 *proberesp_ies; + const u8 *assocresp_ies; + const u8 *probe_resp; + const u8 *lci; + const u8 *civicloc; + struct cfg80211_mbssid_elems *mbssid_ies; + struct cfg80211_rnr_elems *rnr_ies; + s8 ftm_responder; + size_t head_len; + size_t tail_len; + size_t beacon_ies_len; + size_t proberesp_ies_len; + size_t assocresp_ies_len; + size_t probe_resp_len; + size_t lci_len; + size_t civicloc_len; + struct cfg80211_he_bss_color he_bss_color; + bool he_bss_color_valid; +}; + +struct cfg80211_acl_data { + enum nl80211_acl_policy acl_policy; + int n_acl_entries; + struct mac_address mac_addrs[0]; +}; + +struct cfg80211_fils_discovery { + bool update; + u32 min_interval; + u32 max_interval; + size_t tmpl_len; + const u8 *tmpl; +}; + +struct cfg80211_unsol_bcast_probe_resp { + bool update; + u32 interval; + size_t tmpl_len; + const u8 *tmpl; +}; + +struct cfg80211_ap_settings { + struct cfg80211_chan_def chandef; + struct cfg80211_beacon_data beacon; + int beacon_interval; + int dtim_period; + const u8 *ssid; + size_t ssid_len; + enum nl80211_hidden_ssid hidden_ssid; + struct cfg80211_crypto_settings crypto; + bool privacy; + enum nl80211_auth_type auth_type; + int inactivity_timeout; + u8 p2p_ctwindow; + bool p2p_opp_ps; + const struct cfg80211_acl_data *acl; + bool pbss; + struct cfg80211_bitrate_mask beacon_rate; + const struct ieee80211_ht_cap *ht_cap; + const struct ieee80211_vht_cap *vht_cap; + const struct ieee80211_he_cap_elem *he_cap; + const struct ieee80211_he_operation *he_oper; + const struct ieee80211_eht_cap_elem *eht_cap; + const struct ieee80211_eht_operation *eht_oper; + bool ht_required; + bool vht_required; + bool he_required; + bool sae_h2e_required; + bool twt_responder; + u32 flags; + struct ieee80211_he_obss_pd he_obss_pd; + struct cfg80211_fils_discovery fils_discovery; + struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; + struct cfg80211_mbssid_config mbssid_config; +}; + +struct cfg80211_ap_update { + struct cfg80211_beacon_data beacon; + struct cfg80211_fils_discovery fils_discovery; + struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; +}; + +struct cfg80211_csa_settings { + struct cfg80211_chan_def chandef; + struct cfg80211_beacon_data beacon_csa; + const u16 *counter_offsets_beacon; + const u16 *counter_offsets_presp; + unsigned int n_counter_offsets_beacon; + unsigned int n_counter_offsets_presp; + struct cfg80211_beacon_data beacon_after; + bool radar_required; + bool block_tx; + u8 count; + u8 link_id; +}; + +struct cfg80211_color_change_settings { + struct cfg80211_beacon_data beacon_color_change; + u16 counter_offset_beacon; + u16 counter_offset_presp; + struct cfg80211_beacon_data beacon_next; + u8 count; + u8 color; + u8 link_id; +}; + +enum station_parameters_apply_mask { + STATION_PARAM_APPLY_UAPSD = 1, + STATION_PARAM_APPLY_CAPABILITY = 2, + STATION_PARAM_APPLY_PLINK_STATE = 4, +}; + +struct sta_txpwr { + s16 power; + enum nl80211_tx_power_setting type; +}; + +struct link_station_parameters { + const u8 *mld_mac; + int link_id; + const u8 *link_mac; + const u8 *supported_rates; + u8 supported_rates_len; + const struct ieee80211_ht_cap *ht_capa; + const struct ieee80211_vht_cap *vht_capa; + u8 opmode_notif; + bool opmode_notif_used; + const struct ieee80211_he_cap_elem *he_capa; + u8 he_capa_len; + struct sta_txpwr txpwr; + bool txpwr_set; + const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_eht_cap_elem *eht_capa; + u8 eht_capa_len; +}; + +struct link_station_del_parameters { + const u8 *mld_mac; + u32 link_id; +}; + +struct cfg80211_ttlm_params { + u16 dlink[8]; + u16 ulink[8]; +}; + +struct station_parameters { + struct net_device *vlan; + u32 sta_flags_mask; + u32 sta_flags_set; + u32 sta_modify_mask; + int listen_interval; + u16 aid; + u16 vlan_id; + u16 peer_aid; + u8 plink_action; + u8 plink_state; + u8 uapsd_queues; + u8 max_sp; + enum nl80211_mesh_power_mode local_pm; + u16 capability; + const u8 *ext_capab; + u8 ext_capab_len; + const u8 *supported_channels; + u8 supported_channels_len; + const u8 *supported_oper_classes; + u8 supported_oper_classes_len; + int support_p2p_ps; + u16 airtime_weight; + struct link_station_parameters link_sta_params; +}; + +struct station_del_parameters { + const u8 *mac; + u8 subtype; + u16 reason_code; + int link_id; +}; + +enum cfg80211_station_type { + CFG80211_STA_AP_CLIENT = 0, + CFG80211_STA_AP_CLIENT_UNASSOC = 1, + CFG80211_STA_AP_MLME_CLIENT = 2, + CFG80211_STA_AP_STA = 3, + CFG80211_STA_IBSS = 4, + CFG80211_STA_TDLS_PEER_SETUP = 5, + CFG80211_STA_TDLS_PEER_ACTIVE = 6, + CFG80211_STA_MESH_PEER_KERNEL = 7, + CFG80211_STA_MESH_PEER_USER = 8, +}; + +enum rate_info_flags { + RATE_INFO_FLAGS_MCS = 1, + RATE_INFO_FLAGS_VHT_MCS = 2, + RATE_INFO_FLAGS_SHORT_GI = 4, + RATE_INFO_FLAGS_DMG = 8, + RATE_INFO_FLAGS_HE_MCS = 16, + RATE_INFO_FLAGS_EDMG = 32, + RATE_INFO_FLAGS_EXTENDED_SC_DMG = 64, + RATE_INFO_FLAGS_EHT_MCS = 128, + RATE_INFO_FLAGS_S1G_MCS = 256, +}; + +enum rate_info_bw { + RATE_INFO_BW_20 = 0, + RATE_INFO_BW_5 = 1, + RATE_INFO_BW_10 = 2, + RATE_INFO_BW_40 = 3, + RATE_INFO_BW_80 = 4, + RATE_INFO_BW_160 = 5, + RATE_INFO_BW_HE_RU = 6, + RATE_INFO_BW_320 = 7, + RATE_INFO_BW_EHT_RU = 8, + RATE_INFO_BW_1 = 9, + RATE_INFO_BW_2 = 10, + RATE_INFO_BW_4 = 11, + RATE_INFO_BW_8 = 12, + RATE_INFO_BW_16 = 13, +}; + +enum bss_param_flags { + BSS_PARAM_FLAGS_CTS_PROT = 1, + BSS_PARAM_FLAGS_SHORT_PREAMBLE = 2, + BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 4, +}; + +struct sta_bss_parameters { + u8 flags; + u8 dtim_period; + u16 beacon_interval; +}; + +struct cfg80211_txq_stats { + u32 filled; + u32 backlog_bytes; + u32 backlog_packets; + u32 flows; + u32 drops; + u32 ecn_marks; + u32 overlimit; + u32 overmemory; + u32 collisions; + u32 tx_bytes; + u32 tx_packets; + u32 max_flows; +}; + +struct cfg80211_tid_stats { + u32 filled; + long: 32; + u64 rx_msdu; + u64 tx_msdu; + u64 tx_msdu_retries; + u64 tx_msdu_failed; + struct cfg80211_txq_stats txq_stats; +}; + +struct station_info { + u64 filled; + u32 connected_time; + u32 inactive_time; + u64 assoc_at; + u64 rx_bytes; + u64 tx_bytes; + u16 llid; + u16 plid; + u8 plink_state; + s8 signal; + s8 signal_avg; + u8 chains; + s8 chain_signal[4]; + s8 chain_signal_avg[4]; + struct rate_info txrate; + struct rate_info rxrate; + u32 rx_packets; + u32 tx_packets; + u32 tx_retries; + u32 tx_failed; + u32 rx_dropped_misc; + struct sta_bss_parameters bss_param; + struct nl80211_sta_flag_update sta_flags; + int generation; + const u8 *assoc_req_ies; + size_t assoc_req_ies_len; + u32 beacon_loss_count; + long: 32; + s64 t_offset; + enum nl80211_mesh_power_mode local_pm; + enum nl80211_mesh_power_mode peer_pm; + enum nl80211_mesh_power_mode nonpeer_pm; + u32 expected_throughput; + u64 tx_duration; + u64 rx_duration; + u64 rx_beacon; + u8 rx_beacon_signal_avg; + u8 connected_to_gate; + struct cfg80211_tid_stats *pertid; + s8 ack_signal; + s8 avg_ack_signal; + u16 airtime_weight; + u32 rx_mpdu_count; + u32 fcs_err_count; + u32 airtime_link_metric; + u8 connected_to_as; + bool mlo_params_valid; + u8 assoc_link_id; + long: 0; + u8 mld_addr[6]; + const u8 *assoc_resp_ies; + size_t assoc_resp_ies_len; + long: 32; +}; + +struct cfg80211_sar_sub_specs { + s32 power; + u32 freq_range_index; +}; + +struct cfg80211_sar_specs { + enum nl80211_sar_type type; + u32 num_sub_specs; + struct cfg80211_sar_sub_specs sub_specs[0]; +}; + +enum monitor_flags { + MONITOR_FLAG_CHANGED = 1, + MONITOR_FLAG_FCSFAIL = 2, + MONITOR_FLAG_PLCPFAIL = 4, + MONITOR_FLAG_CONTROL = 8, + MONITOR_FLAG_OTHER_BSS = 16, + MONITOR_FLAG_COOK_FRAMES = 32, + MONITOR_FLAG_ACTIVE = 64, + MONITOR_FLAG_SKIP_TX = 128, +}; + +enum mpath_info_flags { + MPATH_INFO_FRAME_QLEN = 1, + MPATH_INFO_SN = 2, + MPATH_INFO_METRIC = 4, + MPATH_INFO_EXPTIME = 8, + MPATH_INFO_DISCOVERY_TIMEOUT = 16, + MPATH_INFO_DISCOVERY_RETRIES = 32, + MPATH_INFO_FLAGS = 64, + MPATH_INFO_HOP_COUNT = 128, + MPATH_INFO_PATH_CHANGE = 256, +}; + +struct mpath_info { + u32 filled; + u32 frame_qlen; + u32 sn; + u32 metric; + u32 exptime; + u32 discovery_timeout; + u8 discovery_retries; + u8 flags; + u8 hop_count; + u32 path_change_count; + int generation; +}; + +struct bss_parameters { + int link_id; + int use_cts_prot; + int use_short_preamble; + int use_short_slot_time; + const u8 *basic_rates; + u8 basic_rates_len; + int ap_isolate; + int ht_opmode; + s8 p2p_ctwindow; + s8 p2p_opp_ps; +}; + +struct mesh_config { + u16 dot11MeshRetryTimeout; + u16 dot11MeshConfirmTimeout; + u16 dot11MeshHoldingTimeout; + u16 dot11MeshMaxPeerLinks; + u8 dot11MeshMaxRetries; + u8 dot11MeshTTL; + u8 element_ttl; + bool auto_open_plinks; + u32 dot11MeshNbrOffsetMaxNeighbor; + u8 dot11MeshHWMPmaxPREQretries; + u32 path_refresh_time; + u16 min_discovery_timeout; + u32 dot11MeshHWMPactivePathTimeout; + u16 dot11MeshHWMPpreqMinInterval; + u16 dot11MeshHWMPperrMinInterval; + u16 dot11MeshHWMPnetDiameterTraversalTime; + u8 dot11MeshHWMPRootMode; + bool dot11MeshConnectedToMeshGate; + bool dot11MeshConnectedToAuthServer; + u16 dot11MeshHWMPRannInterval; + bool dot11MeshGateAnnouncementProtocol; + bool dot11MeshForwarding; + s32 rssi_threshold; + u16 ht_opmode; + u32 dot11MeshHWMPactivePathToRootTimeout; + u16 dot11MeshHWMProotInterval; + u16 dot11MeshHWMPconfirmationInterval; + enum nl80211_mesh_power_mode power_mode; + u16 dot11MeshAwakeWindowDuration; + u32 plink_timeout; + bool dot11MeshNolearn; +}; + +struct mesh_setup { + struct cfg80211_chan_def chandef; + const u8 *mesh_id; + u8 mesh_id_len; + u8 sync_method; + u8 path_sel_proto; + u8 path_metric; + u8 auth_id; + const u8 *ie; + u8 ie_len; + bool is_authenticated; + bool is_secure; + bool user_mpm; + u8 dtim_period; + u16 beacon_interval; + int mcast_rate[6]; + u32 basic_rates; + struct cfg80211_bitrate_mask beacon_rate; + bool userspace_handles_dfs; + bool control_port_over_nl80211; +}; + +struct ocb_setup { + struct cfg80211_chan_def chandef; +}; + +struct ieee80211_txq_params { + enum nl80211_ac ac; + u16 txop; + u16 cwmin; + u16 cwmax; + u8 aifs; + int link_id; +}; + +struct cfg80211_auth_request { + struct cfg80211_bss *bss; + const u8 *ie; + size_t ie_len; + enum nl80211_auth_type auth_type; + const u8 *key; + u8 key_len; + s8 key_idx; + const u8 *auth_data; + size_t auth_data_len; + s8 link_id; + const u8 *ap_mld_addr; +}; + +struct cfg80211_assoc_link { + struct cfg80211_bss *bss; + const u8 *elems; + size_t elems_len; + bool disabled; + int error; +}; + +enum cfg80211_assoc_req_flags { + ASSOC_REQ_DISABLE_HT = 1, + ASSOC_REQ_DISABLE_VHT = 2, + ASSOC_REQ_USE_RRM = 4, + CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = 8, + ASSOC_REQ_DISABLE_HE = 16, + ASSOC_REQ_DISABLE_EHT = 32, + CONNECT_REQ_MLO_SUPPORT = 64, + ASSOC_REQ_SPP_AMSDU = 128, +}; + +struct cfg80211_assoc_request { + struct cfg80211_bss *bss; + const u8 *ie; + const u8 *prev_bssid; + size_t ie_len; + struct cfg80211_crypto_settings crypto; + bool use_mfp; + u32 flags; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; + const u8 *fils_kek; + size_t fils_kek_len; + const u8 *fils_nonces; + struct ieee80211_s1g_cap s1g_capa; + struct ieee80211_s1g_cap s1g_capa_mask; + struct cfg80211_assoc_link links[15]; + const u8 *ap_mld_addr; + s8 link_id; +}; + +struct cfg80211_deauth_request { + const u8 *bssid; + const u8 *ie; + size_t ie_len; + u16 reason_code; + bool local_state_change; +}; + +struct cfg80211_disassoc_request { + const u8 *ap_addr; + const u8 *ie; + size_t ie_len; + u16 reason_code; + bool local_state_change; +}; + +struct cfg80211_ibss_params { + const u8 *ssid; + const u8 *bssid; + struct cfg80211_chan_def chandef; + const u8 *ie; + u8 ssid_len; + u8 ie_len; + u16 beacon_interval; + u32 basic_rates; + bool channel_fixed; + bool privacy; + bool control_port; + bool control_port_over_nl80211; + bool userspace_handles_dfs; + int mcast_rate[6]; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct key_params *wep_keys; + int wep_tx_key; +}; + +struct cfg80211_bss_selection { + enum nl80211_bss_select_attr behaviour; + union { + enum nl80211_band band_pref; + struct cfg80211_bss_select_adjust adjust; + } param; +}; + +struct cfg80211_connect_params { + struct ieee80211_channel *channel; + struct ieee80211_channel *channel_hint; + const u8 *bssid; + const u8 *bssid_hint; + const u8 *ssid; + size_t ssid_len; + enum nl80211_auth_type auth_type; + const u8 *ie; + size_t ie_len; + bool privacy; + enum nl80211_mfp mfp; + struct cfg80211_crypto_settings crypto; + const u8 *key; + u8 key_len; + u8 key_idx; + u32 flags; + int bg_scan_period; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; + bool pbss; + struct cfg80211_bss_selection bss_select; + const u8 *prev_bssid; + const u8 *fils_erp_username; + size_t fils_erp_username_len; + const u8 *fils_erp_realm; + size_t fils_erp_realm_len; + u16 fils_erp_next_seq_num; + const u8 *fils_erp_rrk; + size_t fils_erp_rrk_len; + bool want_1x; + struct ieee80211_edmg edmg; +}; + +enum cfg80211_connect_params_changed { + UPDATE_ASSOC_IES = 1, + UPDATE_FILS_ERP_INFO = 2, + UPDATE_AUTH_TYPE = 4, +}; + +enum wiphy_params_flags { + WIPHY_PARAM_RETRY_SHORT = 1, + WIPHY_PARAM_RETRY_LONG = 2, + WIPHY_PARAM_FRAG_THRESHOLD = 4, + WIPHY_PARAM_RTS_THRESHOLD = 8, + WIPHY_PARAM_COVERAGE_CLASS = 16, + WIPHY_PARAM_DYN_ACK = 32, + WIPHY_PARAM_TXQ_LIMIT = 64, + WIPHY_PARAM_TXQ_MEMORY_LIMIT = 128, + WIPHY_PARAM_TXQ_QUANTUM = 256, +}; + +struct cfg80211_pmksa { + const u8 *bssid; + const u8 *pmkid; + const u8 *pmk; + size_t pmk_len; + const u8 *ssid; + size_t ssid_len; + const u8 *cache_id; + u32 pmk_lifetime; + u8 pmk_reauth_threshold; +}; + +struct cfg80211_coalesce_rules { + int delay; + enum nl80211_coalesce_condition condition; + struct cfg80211_pkt_pattern *patterns; + int n_patterns; +}; + +struct cfg80211_coalesce { + int n_rules; + struct cfg80211_coalesce_rules rules[0]; +}; + +struct cfg80211_wowlan_nd_match { + struct cfg80211_ssid ssid; + int n_channels; + u32 channels[0]; +}; + +struct cfg80211_wowlan_nd_info { + int n_matches; + struct cfg80211_wowlan_nd_match *matches[0]; +}; + +struct cfg80211_wowlan_wakeup { + bool disconnect; + bool magic_pkt; + bool gtk_rekey_failure; + bool eap_identity_req; + bool four_way_handshake; + bool rfkill_release; + bool packet_80211; + bool tcp_match; + bool tcp_connlost; + bool tcp_nomoretokens; + bool unprot_deauth_disassoc; + s32 pattern_idx; + u32 packet_present_len; + u32 packet_len; + const void *packet; + struct cfg80211_wowlan_nd_info *net_detect; +}; + +struct cfg80211_gtk_rekey_data { + const u8 *kek; + const u8 *kck; + const u8 *replay_ctr; + u32 akm; + u8 kek_len; + u8 kck_len; +}; + +struct cfg80211_update_ft_ies_params { + u16 md; + const u8 *ie; + size_t ie_len; +}; + +struct cfg80211_mgmt_tx_params { + struct ieee80211_channel *chan; + bool offchan; + unsigned int wait; + const u8 *buf; + size_t len; + bool no_cck; + bool dont_wait_for_ack; + int n_csa_offsets; + const u16 *csa_offsets; + int link_id; +}; + +struct cfg80211_dscp_exception { + u8 dscp; + u8 up; +}; + +struct cfg80211_dscp_range { + u8 low; + u8 high; +}; + +struct cfg80211_qos_map { + u8 num_des; + struct cfg80211_dscp_exception dscp_exception[21]; + struct cfg80211_dscp_range up[8]; +}; + +struct cfg80211_nan_conf { + u8 master_pref; + u8 bands; +}; + +enum cfg80211_nan_conf_changes { + CFG80211_NAN_CONF_CHANGED_PREF = 1, + CFG80211_NAN_CONF_CHANGED_BANDS = 2, +}; + +struct cfg80211_nan_func_filter { + const u8 *filter; + u8 len; +}; + +struct cfg80211_nan_func { + enum nl80211_nan_function_type type; + u8 service_id[6]; + u8 publish_type; + bool close_range; + bool publish_bcast; + bool subscribe_active; + u8 followup_id; + u8 followup_reqid; + struct mac_address followup_dest; + u32 ttl; + const u8 *serv_spec_info; + u8 serv_spec_info_len; + bool srf_include; + const u8 *srf_bf; + u8 srf_bf_len; + u8 srf_bf_idx; + struct mac_address *srf_macs; + int srf_num_macs; + struct cfg80211_nan_func_filter *rx_filters; + struct cfg80211_nan_func_filter *tx_filters; + u8 num_tx_filters; + u8 num_rx_filters; + u8 instance_id; + u64 cookie; +}; + +struct cfg80211_pmk_conf { + const u8 *aa; + u8 pmk_len; + const u8 *pmk; + const u8 *pmk_r0_name; +}; + +struct cfg80211_external_auth_params { + enum nl80211_external_auth_action action; + u8 bssid[6]; + struct cfg80211_ssid ssid; + unsigned int key_mgmt_suite; + u16 status; + const u8 *pmkid; + u8 mld_addr[6]; +}; + +struct cfg80211_update_owe_info { + u8 peer[6]; + u16 status; + const u8 *ie; + size_t ie_len; + int assoc_link_id; + u8 peer_mld_addr[6]; +}; + +struct mgmt_frame_regs { + u32 global_stypes; + u32 interface_stypes; + u32 global_mcast_stypes; + u32 interface_mcast_stypes; +}; + +struct cfg80211_ops { + int (*suspend)(struct wiphy *, struct cfg80211_wowlan *); + int (*resume)(struct wiphy *); + void (*set_wakeup)(struct wiphy *, bool); + struct wireless_dev * (*add_virtual_intf)(struct wiphy *, const char *, unsigned char, enum nl80211_iftype, struct vif_params *); + int (*del_virtual_intf)(struct wiphy *, struct wireless_dev *); + int (*change_virtual_intf)(struct wiphy *, struct net_device *, enum nl80211_iftype, struct vif_params *); + int (*add_intf_link)(struct wiphy *, struct wireless_dev *, unsigned int); + void (*del_intf_link)(struct wiphy *, struct wireless_dev *, unsigned int); + int (*add_key)(struct wiphy *, struct net_device *, int, u8, bool, const u8 *, struct key_params *); + int (*get_key)(struct wiphy *, struct net_device *, int, u8, bool, const u8 *, void *, void (*)(void *, struct key_params *)); + int (*del_key)(struct wiphy *, struct net_device *, int, u8, bool, const u8 *); + int (*set_default_key)(struct wiphy *, struct net_device *, int, u8, bool, bool); + int (*set_default_mgmt_key)(struct wiphy *, struct net_device *, int, u8); + int (*set_default_beacon_key)(struct wiphy *, struct net_device *, int, u8); + int (*start_ap)(struct wiphy *, struct net_device *, struct cfg80211_ap_settings *); + int (*change_beacon)(struct wiphy *, struct net_device *, struct cfg80211_ap_update *); + int (*stop_ap)(struct wiphy *, struct net_device *, unsigned int); + int (*add_station)(struct wiphy *, struct net_device *, const u8 *, struct station_parameters *); + int (*del_station)(struct wiphy *, struct net_device *, struct station_del_parameters *); + int (*change_station)(struct wiphy *, struct net_device *, const u8 *, struct station_parameters *); + int (*get_station)(struct wiphy *, struct net_device *, const u8 *, struct station_info *); + int (*dump_station)(struct wiphy *, struct net_device *, int, u8 *, struct station_info *); + int (*add_mpath)(struct wiphy *, struct net_device *, const u8 *, const u8 *); + int (*del_mpath)(struct wiphy *, struct net_device *, const u8 *); + int (*change_mpath)(struct wiphy *, struct net_device *, const u8 *, const u8 *); + int (*get_mpath)(struct wiphy *, struct net_device *, u8 *, u8 *, struct mpath_info *); + int (*dump_mpath)(struct wiphy *, struct net_device *, int, u8 *, u8 *, struct mpath_info *); + int (*get_mpp)(struct wiphy *, struct net_device *, u8 *, u8 *, struct mpath_info *); + int (*dump_mpp)(struct wiphy *, struct net_device *, int, u8 *, u8 *, struct mpath_info *); + int (*get_mesh_config)(struct wiphy *, struct net_device *, struct mesh_config *); + int (*update_mesh_config)(struct wiphy *, struct net_device *, u32, const struct mesh_config *); + int (*join_mesh)(struct wiphy *, struct net_device *, const struct mesh_config *, const struct mesh_setup *); + int (*leave_mesh)(struct wiphy *, struct net_device *); + int (*join_ocb)(struct wiphy *, struct net_device *, struct ocb_setup *); + int (*leave_ocb)(struct wiphy *, struct net_device *); + int (*change_bss)(struct wiphy *, struct net_device *, struct bss_parameters *); + void (*inform_bss)(struct wiphy *, struct cfg80211_bss *, const struct cfg80211_bss_ies *, void *); + int (*set_txq_params)(struct wiphy *, struct net_device *, struct ieee80211_txq_params *); + int (*libertas_set_mesh_channel)(struct wiphy *, struct net_device *, struct ieee80211_channel *); + int (*set_monitor_channel)(struct wiphy *, struct net_device *, struct cfg80211_chan_def *); + int (*scan)(struct wiphy *, struct cfg80211_scan_request *); + void (*abort_scan)(struct wiphy *, struct wireless_dev *); + int (*auth)(struct wiphy *, struct net_device *, struct cfg80211_auth_request *); + int (*assoc)(struct wiphy *, struct net_device *, struct cfg80211_assoc_request *); + int (*deauth)(struct wiphy *, struct net_device *, struct cfg80211_deauth_request *); + int (*disassoc)(struct wiphy *, struct net_device *, struct cfg80211_disassoc_request *); + int (*connect)(struct wiphy *, struct net_device *, struct cfg80211_connect_params *); + int (*update_connect_params)(struct wiphy *, struct net_device *, struct cfg80211_connect_params *, u32); + int (*disconnect)(struct wiphy *, struct net_device *, u16); + int (*join_ibss)(struct wiphy *, struct net_device *, struct cfg80211_ibss_params *); + int (*leave_ibss)(struct wiphy *, struct net_device *); + int (*set_mcast_rate)(struct wiphy *, struct net_device *, int *); + int (*set_wiphy_params)(struct wiphy *, u32); + int (*set_tx_power)(struct wiphy *, struct wireless_dev *, enum nl80211_tx_power_setting, int); + int (*get_tx_power)(struct wiphy *, struct wireless_dev *, int *); + void (*rfkill_poll)(struct wiphy *); + int (*set_bitrate_mask)(struct wiphy *, struct net_device *, unsigned int, const u8 *, const struct cfg80211_bitrate_mask *); + int (*dump_survey)(struct wiphy *, struct net_device *, int, struct survey_info *); + int (*set_pmksa)(struct wiphy *, struct net_device *, struct cfg80211_pmksa *); + int (*del_pmksa)(struct wiphy *, struct net_device *, struct cfg80211_pmksa *); + int (*flush_pmksa)(struct wiphy *, struct net_device *); + int (*remain_on_channel)(struct wiphy *, struct wireless_dev *, struct ieee80211_channel *, unsigned int, u64 *); + int (*cancel_remain_on_channel)(struct wiphy *, struct wireless_dev *, u64); + int (*mgmt_tx)(struct wiphy *, struct wireless_dev *, struct cfg80211_mgmt_tx_params *, u64 *); + int (*mgmt_tx_cancel_wait)(struct wiphy *, struct wireless_dev *, u64); + int (*set_power_mgmt)(struct wiphy *, struct net_device *, bool, int); + int (*set_cqm_rssi_config)(struct wiphy *, struct net_device *, s32, u32); + int (*set_cqm_rssi_range_config)(struct wiphy *, struct net_device *, s32, s32); + int (*set_cqm_txe_config)(struct wiphy *, struct net_device *, u32, u32, u32); + void (*update_mgmt_frame_registrations)(struct wiphy *, struct wireless_dev *, struct mgmt_frame_regs *); + int (*set_antenna)(struct wiphy *, u32, u32); + int (*get_antenna)(struct wiphy *, u32 *, u32 *); + int (*sched_scan_start)(struct wiphy *, struct net_device *, struct cfg80211_sched_scan_request *); + int (*sched_scan_stop)(struct wiphy *, struct net_device *, u64); + int (*set_rekey_data)(struct wiphy *, struct net_device *, struct cfg80211_gtk_rekey_data *); + int (*tdls_mgmt)(struct wiphy *, struct net_device *, const u8 *, int, u8, u8, u16, u32, bool, const u8 *, size_t); + int (*tdls_oper)(struct wiphy *, struct net_device *, const u8 *, enum nl80211_tdls_operation); + int (*probe_client)(struct wiphy *, struct net_device *, const u8 *, u64 *); + int (*set_noack_map)(struct wiphy *, struct net_device *, u16); + int (*get_channel)(struct wiphy *, struct wireless_dev *, unsigned int, struct cfg80211_chan_def *); + int (*start_p2p_device)(struct wiphy *, struct wireless_dev *); + void (*stop_p2p_device)(struct wiphy *, struct wireless_dev *); + int (*set_mac_acl)(struct wiphy *, struct net_device *, const struct cfg80211_acl_data *); + int (*start_radar_detection)(struct wiphy *, struct net_device *, struct cfg80211_chan_def *, u32, int); + void (*end_cac)(struct wiphy *, struct net_device *, unsigned int); + int (*update_ft_ies)(struct wiphy *, struct net_device *, struct cfg80211_update_ft_ies_params *); + int (*crit_proto_start)(struct wiphy *, struct wireless_dev *, enum nl80211_crit_proto_id, u16); + void (*crit_proto_stop)(struct wiphy *, struct wireless_dev *); + int (*set_coalesce)(struct wiphy *, struct cfg80211_coalesce *); + int (*channel_switch)(struct wiphy *, struct net_device *, struct cfg80211_csa_settings *); + int (*set_qos_map)(struct wiphy *, struct net_device *, struct cfg80211_qos_map *); + int (*set_ap_chanwidth)(struct wiphy *, struct net_device *, unsigned int, struct cfg80211_chan_def *); + int (*add_tx_ts)(struct wiphy *, struct net_device *, u8, const u8 *, u8, u16); + int (*del_tx_ts)(struct wiphy *, struct net_device *, u8, const u8 *); + int (*tdls_channel_switch)(struct wiphy *, struct net_device *, const u8 *, u8, struct cfg80211_chan_def *); + void (*tdls_cancel_channel_switch)(struct wiphy *, struct net_device *, const u8 *); + int (*start_nan)(struct wiphy *, struct wireless_dev *, struct cfg80211_nan_conf *); + void (*stop_nan)(struct wiphy *, struct wireless_dev *); + int (*add_nan_func)(struct wiphy *, struct wireless_dev *, struct cfg80211_nan_func *); + void (*del_nan_func)(struct wiphy *, struct wireless_dev *, u64); + int (*nan_change_conf)(struct wiphy *, struct wireless_dev *, struct cfg80211_nan_conf *, u32); + int (*set_multicast_to_unicast)(struct wiphy *, struct net_device *, const bool); + int (*get_txq_stats)(struct wiphy *, struct wireless_dev *, struct cfg80211_txq_stats *); + int (*set_pmk)(struct wiphy *, struct net_device *, const struct cfg80211_pmk_conf *); + int (*del_pmk)(struct wiphy *, struct net_device *, const u8 *); + int (*external_auth)(struct wiphy *, struct net_device *, struct cfg80211_external_auth_params *); + int (*tx_control_port)(struct wiphy *, struct net_device *, const u8 *, size_t, const u8 *, const __be16, const bool, int, u64 *); + int (*get_ftm_responder_stats)(struct wiphy *, struct net_device *, struct cfg80211_ftm_responder_stats *); + int (*start_pmsr)(struct wiphy *, struct wireless_dev *, struct cfg80211_pmsr_request *); + void (*abort_pmsr)(struct wiphy *, struct wireless_dev *, struct cfg80211_pmsr_request *); + int (*update_owe_info)(struct wiphy *, struct net_device *, struct cfg80211_update_owe_info *); + int (*probe_mesh_link)(struct wiphy *, struct net_device *, const u8 *, size_t); + int (*set_tid_config)(struct wiphy *, struct net_device *, struct cfg80211_tid_config *); + int (*reset_tid_config)(struct wiphy *, struct net_device *, const u8 *, u8); + int (*set_sar_specs)(struct wiphy *, struct cfg80211_sar_specs *); + int (*color_change)(struct wiphy *, struct net_device *, struct cfg80211_color_change_settings *); + int (*set_fils_aad)(struct wiphy *, struct net_device *, struct cfg80211_fils_aad *); + int (*set_radar_background)(struct wiphy *, struct cfg80211_chan_def *); + int (*add_link_station)(struct wiphy *, struct net_device *, struct link_station_parameters *); + int (*mod_link_station)(struct wiphy *, struct net_device *, struct link_station_parameters *); + int (*del_link_station)(struct wiphy *, struct net_device *, struct link_station_del_parameters *); + int (*set_hw_timestamp)(struct wiphy *, struct net_device *, struct cfg80211_set_hw_timestamp *); + int (*set_ttlm)(struct wiphy *, struct net_device *, struct cfg80211_ttlm_params *); + u32 (*get_radio_mask)(struct wiphy *, struct net_device *); +}; + +enum wiphy_flags { + WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = 1, + WIPHY_FLAG_SUPPORTS_MLO = 2, + WIPHY_FLAG_SPLIT_SCAN_6GHZ = 4, + WIPHY_FLAG_NETNS_OK = 8, + WIPHY_FLAG_PS_ON_BY_DEFAULT = 16, + WIPHY_FLAG_4ADDR_AP = 32, + WIPHY_FLAG_4ADDR_STATION = 64, + WIPHY_FLAG_CONTROL_PORT_PROTOCOL = 128, + WIPHY_FLAG_IBSS_RSN = 256, + WIPHY_FLAG_DISABLE_WEXT = 512, + WIPHY_FLAG_MESH_AUTH = 1024, + WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = 2048, + WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = 4096, + WIPHY_FLAG_SUPPORTS_FW_ROAM = 8192, + WIPHY_FLAG_AP_UAPSD = 16384, + WIPHY_FLAG_SUPPORTS_TDLS = 32768, + WIPHY_FLAG_TDLS_EXTERNAL_SETUP = 65536, + WIPHY_FLAG_HAVE_AP_SME = 131072, + WIPHY_FLAG_REPORTS_OBSS = 262144, + WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = 524288, + WIPHY_FLAG_OFFCHAN_TX = 1048576, + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = 2097152, + WIPHY_FLAG_SUPPORTS_5_10_MHZ = 4194304, + WIPHY_FLAG_HAS_CHANNEL_SWITCH = 8388608, + WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = 16777216, + WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = 33554432, +}; + +enum wiphy_wowlan_support_flags { + WIPHY_WOWLAN_ANY = 1, + WIPHY_WOWLAN_MAGIC_PKT = 2, + WIPHY_WOWLAN_DISCONNECT = 4, + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = 8, + WIPHY_WOWLAN_GTK_REKEY_FAILURE = 16, + WIPHY_WOWLAN_EAP_IDENTITY_REQ = 32, + WIPHY_WOWLAN_4WAY_HANDSHAKE = 64, + WIPHY_WOWLAN_RFKILL_RELEASE = 128, + WIPHY_WOWLAN_NET_DETECT = 256, +}; + +enum wiphy_vendor_command_flags { + WIPHY_VENDOR_CMD_NEED_WDEV = 1, + WIPHY_VENDOR_CMD_NEED_NETDEV = 2, + WIPHY_VENDOR_CMD_NEED_RUNNING = 4, +}; + +enum wiphy_opmode_flag { + STA_OPMODE_MAX_BW_CHANGED = 1, + STA_OPMODE_SMPS_MODE_CHANGED = 2, + STA_OPMODE_N_SS_CHANGED = 4, +}; + +struct sta_opmode_info { + u32 changed; + enum nl80211_smps_mode smps_mode; + enum nl80211_chan_width bw; + u8 rx_nss; +}; + +enum bss_source_type { + BSS_SOURCE_DIRECT = 0, + BSS_SOURCE_MBSSID = 1, + BSS_SOURCE_STA_PROFILE = 2, +}; + +struct cfg80211_internal_bss { + struct list_head list; + struct list_head hidden_list; + struct rb_node rbn; + long: 32; + u64 ts_boottime; + long unsigned int ts; + long unsigned int refcount; + atomic_t hold; + long: 32; + u64 parent_tsf; + u8 parent_bssid[6]; + enum bss_source_type bss_source; + struct cfg80211_bss pub; + long: 32; +}; + +struct cfg80211_cached_keys { + struct key_params params[4]; + u8 data[52]; + int def; +}; + +struct cfg80211_cqm_config { + struct callback_head callback_head; + u32 rssi_hyst; + s32 last_rssi_event_value; + enum nl80211_cqm_rssi_threshold_event last_rssi_event_type; + bool use_range_api; + int n_rssi_thresholds; + s32 rssi_thresholds[0]; +}; + +struct cfg80211_rx_assoc_resp_data { + const u8 *buf; + size_t len; + const u8 *req_ies; + size_t req_ies_len; + int uapsd_queues; + const u8 *ap_mld_addr; + struct { + u8 addr[6]; + struct cfg80211_bss *bss; + u16 status; + } links[15]; +}; + +struct cfg80211_fils_resp_params { + const u8 *kek; + size_t kek_len; + bool update_erp_next_seq_num; + u16 erp_next_seq_num; + const u8 *pmk; + size_t pmk_len; + const u8 *pmkid; +}; + +struct cfg80211_connect_resp_params { + int status; + const u8 *req_ie; + size_t req_ie_len; + const u8 *resp_ie; + size_t resp_ie_len; + struct cfg80211_fils_resp_params fils; + enum nl80211_timeout_reason timeout_reason; + const u8 *ap_mld_addr; + u16 valid_links; + struct { + const u8 *addr; + const u8 *bssid; + struct cfg80211_bss *bss; + u16 status; + } links[15]; +}; + +struct cfg80211_roam_info { + const u8 *req_ie; + size_t req_ie_len; + const u8 *resp_ie; + size_t resp_ie_len; + struct cfg80211_fils_resp_params fils; + const u8 *ap_mld_addr; + u16 valid_links; + struct { + const u8 *addr; + const u8 *bssid; + struct ieee80211_channel *channel; + struct cfg80211_bss *bss; + } links[15]; +}; + +struct cfg80211_rx_info { + int freq; + int sig_dbm; + bool have_link_id; + u8 link_id; + const u8 *buf; + size_t len; + u32 flags; + u64 rx_tstamp; + u64 ack_tstamp; +}; + +struct cfg80211_tx_status { + u64 cookie; + u64 tx_tstamp; + u64 ack_tstamp; + const u8 *buf; + size_t len; + bool ack; + long: 32; +}; + +struct cfg80211_beaconing_check_config { + enum nl80211_iftype iftype; + enum ieee80211_ap_reg_power reg_power; + bool relax; +}; + +struct cfg80211_ft_event_params { + const u8 *ies; + size_t ies_len; + const u8 *target_ap; + const u8 *ric_ies; + size_t ric_ies_len; +}; + +struct cfg80211_nan_match_params { + enum nl80211_nan_function_type type; + u8 inst_id; + u8 peer_inst_id; + const u8 *addr; + u8 info_len; + const u8 *info; + long: 32; + u64 cookie; +}; + +enum ieee80211_regd_source { + REGD_SOURCE_INTERNAL_DB = 0, + REGD_SOURCE_CRDA = 1, + REGD_SOURCE_CACHED = 2, +}; + +struct cfg80211_registered_device { + const struct cfg80211_ops *ops; + struct list_head list; + struct rfkill_ops rfkill_ops; + struct work_struct rfkill_block; + char country_ie_alpha2[2]; + const struct ieee80211_regdomain *requested_regd; + enum environment_cap env; + int wiphy_idx; + int devlist_generation; + int wdev_id; + int opencount; + wait_queue_head_t dev_wait; + struct list_head beacon_registrations; + spinlock_t beacon_registrations_lock; + int num_running_ifaces; + int num_running_monitor_ifaces; + long: 32; + u64 cookie_counter; + spinlock_t bss_lock; + struct list_head bss_list; + struct rb_root bss_tree; + u32 bss_generation; + u32 bss_entries; + struct cfg80211_scan_request *scan_req; + struct cfg80211_scan_request *int_scan_req; + struct sk_buff *scan_msg; + struct list_head sched_scan_req_list; + long: 32; + time64_t suspend_at; + struct wiphy_work scan_done_wk; + struct genl_info *cur_cmd_info; + struct work_struct conn_work; + struct work_struct event_work; + struct delayed_work dfs_update_channels_wk; + struct wireless_dev *background_radar_wdev; + struct cfg80211_chan_def background_radar_chandef; + struct delayed_work background_cac_done_wk; + struct work_struct background_cac_abort_wk; + u32 crit_proto_nlportid; + struct cfg80211_coalesce *coalesce; + struct work_struct destroy_work; + struct wiphy_work sched_scan_stop_wk; + struct work_struct sched_scan_res_wk; + struct cfg80211_chan_def radar_chandef; + struct work_struct propagate_radar_detect_wk; + struct cfg80211_chan_def cac_done_chandef; + struct work_struct propagate_cac_done_wk; + struct work_struct mgmt_registrations_update_wk; + spinlock_t mgmt_registrations_lock; + struct work_struct wiphy_work; + struct list_head wiphy_work_list; + spinlock_t wiphy_work_lock; + bool suspended; + struct wiphy wiphy; +}; + +struct cfg80211_beacon_registration { + struct list_head list; + u32 nlportid; +}; + +enum nl80211_multicast_groups { + NL80211_MCGRP_CONFIG = 0, + NL80211_MCGRP_SCAN = 1, + NL80211_MCGRP_REGULATORY = 2, + NL80211_MCGRP_MLME = 3, + NL80211_MCGRP_VENDOR = 4, + NL80211_MCGRP_NAN = 5, + NL80211_MCGRP_TESTMODE = 6, +}; + +struct key_parse { + struct key_params p; + int idx; + int type; + bool def; + bool defmgmt; + bool defbeacon; + bool def_uni; + bool def_multi; +}; + +struct nl80211_dump_wiphy_state { + s64 filter_wiphy; + long int start; + long int split_start; + long int band_start; + long int chan_start; + long int capa_start; + bool split; +}; + +struct get_key_cookie { + struct sk_buff *msg; + int error; + int idx; +}; + +enum nl80211_internal_flags_selector { + NL80211_IFL_SEL_NONE = 0, + NL80211_IFL_SEL_WIPHY = 1, + NL80211_IFL_SEL_WDEV = 2, + NL80211_IFL_SEL_NETDEV = 3, + NL80211_IFL_SEL_NETDEV_LINK = 4, + NL80211_IFL_SEL_NETDEV_NO_MLO = 5, + NL80211_IFL_SEL_WIPHY_RTNL = 6, + NL80211_IFL_SEL_WIPHY_RTNL_NOMTX = 7, + NL80211_IFL_SEL_WDEV_RTNL = 8, + NL80211_IFL_SEL_NETDEV_RTNL = 9, + NL80211_IFL_SEL_NETDEV_UP = 10, + NL80211_IFL_SEL_NETDEV_UP_LINK = 11, + NL80211_IFL_SEL_NETDEV_UP_NO_MLO = 12, + NL80211_IFL_SEL_NETDEV_UP_NO_MLO_CLEAR = 13, + NL80211_IFL_SEL_NETDEV_UP_NOTMX = 14, + NL80211_IFL_SEL_NETDEV_UP_NOTMX_MLO = 15, + NL80211_IFL_SEL_NETDEV_UP_CLEAR = 16, + NL80211_IFL_SEL_WDEV_UP = 17, + NL80211_IFL_SEL_WDEV_UP_LINK = 18, + NL80211_IFL_SEL_WDEV_UP_RTNL = 19, + NL80211_IFL_SEL_WIPHY_CLEAR = 20, +}; + +struct nl80211_mlme_event { + enum nl80211_commands cmd; + const u8 *buf; + size_t buf_len; + int uapsd_queues; + const u8 *req_ies; + size_t req_ies_len; + bool reconnect; +}; + +struct smp_operations { + void (*smp_init_cpus)(); + void (*smp_prepare_cpus)(unsigned int); + void (*smp_secondary_init)(unsigned int); + int (*smp_boot_secondary)(unsigned int, struct task_struct *); + int (*cpu_kill)(unsigned int); + void (*cpu_die)(unsigned int); + bool (*cpu_can_disable)(unsigned int); + int (*cpu_disable)(unsigned int); +}; + +struct tag_header { + __u32 size; + __u32 tag; +}; + +struct tag_core { + __u32 flags; + __u32 pagesize; + __u32 rootdev; +}; + +struct tag_mem32 { + __u32 size; + __u32 start; +}; + +struct tag_videotext { + __u8 x; + __u8 y; + __u16 video_page; + __u8 video_mode; + __u8 video_cols; + __u16 video_ega_bx; + __u8 video_lines; + __u8 video_isvga; + __u16 video_points; +}; + +struct tag_ramdisk { + __u32 flags; + __u32 size; + __u32 start; +}; + +struct tag_initrd { + __u32 start; + __u32 size; +}; + +struct tag_serialnr { + __u32 low; + __u32 high; +}; + +struct tag_revision { + __u32 rev; +}; + +struct tag_videolfb { + __u16 lfb_width; + __u16 lfb_height; + __u16 lfb_depth; + __u16 lfb_linelength; + __u32 lfb_base; + __u32 lfb_size; + __u8 red_size; + __u8 red_pos; + __u8 green_size; + __u8 green_pos; + __u8 blue_size; + __u8 blue_pos; + __u8 rsvd_size; + __u8 rsvd_pos; +}; + +struct tag_cmdline { + char cmdline[1]; +}; + +struct tag_acorn { + __u32 memc_control_reg; + __u32 vram_pages; + __u8 sounddefault; + __u8 adfsdrives; +}; + +struct tag_memclk { + __u32 fmemclk; +}; + +struct tag { + struct tag_header hdr; + union { + struct tag_core core; + struct tag_mem32 mem; + struct tag_videotext videotext; + struct tag_ramdisk ramdisk; + struct tag_initrd initrd; + struct tag_serialnr serialnr; + struct tag_revision revision; + struct tag_videolfb videolfb; + struct tag_cmdline cmdline; + struct tag_acorn acorn; + struct tag_memclk memclk; + } u; +}; + +struct tagtable { + __u32 tag; + int (*parse)(const struct tag *); +}; + +enum reboot_mode { + REBOOT_UNDEFINED = -1, + REBOOT_COLD = 0, + REBOOT_WARM = 1, + REBOOT_HARD = 2, + REBOOT_SOFT = 3, + REBOOT_GPIO = 4, +}; + +struct machine_desc { + unsigned int nr; + const char *name; + long unsigned int atag_offset; + const char * const *dt_compat; + unsigned int nr_irqs; + unsigned int video_start; + unsigned int video_end; + unsigned char reserve_lp0: 1; + unsigned char reserve_lp1: 1; + unsigned char reserve_lp2: 1; + enum reboot_mode reboot_mode; + unsigned int l2c_aux_val; + unsigned int l2c_aux_mask; + void (*l2c_write_sec)(long unsigned int, unsigned int); + const struct smp_operations *smp; + bool (*smp_init)(); + void (*fixup)(struct tag *, char **); + void (*dt_fixup)(); + long long int (*pv_fixup)(); + void (*reserve)(); + void (*map_io)(); + void (*init_early)(); + void (*init_irq)(); + void (*init_time)(); + void (*init_machine)(); + void (*init_late)(); + void (*restart)(enum reboot_mode, const char *); +}; + +struct smp_hotplug_thread { + struct task_struct **store; + struct list_head list; + int (*thread_should_run)(unsigned int); + void (*thread_fn)(unsigned int); + void (*create)(unsigned int); + void (*setup)(unsigned int); + void (*cleanup)(unsigned int, bool); + void (*park)(unsigned int); + void (*unpark)(unsigned int); + bool selfparking; + const char *thread_comm; +}; + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE = 1, + HP_THREAD_PARKED = 2, +}; + +typedef void (*swap_func_t)(void *, void *, int); + +enum { + UNAME26 = 131072, + ADDR_NO_RANDOMIZE = 262144, + FDPIC_FUNCPTRS = 524288, + MMAP_PAGE_ZERO = 1048576, + ADDR_COMPAT_LAYOUT = 2097152, + READ_IMPLIES_EXEC = 4194304, + ADDR_LIMIT_32BIT = 8388608, + SHORT_INODE = 16777216, + WHOLE_SECONDS = 33554432, + STICKY_TIMEOUTS = 67108864, + ADDR_LIMIT_3GB = 134217728, +}; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_FREEING_INITMEM = 2, + SYSTEM_RUNNING = 3, + SYSTEM_HALT = 4, + SYSTEM_POWER_OFF = 5, + SYSTEM_RESTART = 6, + SYSTEM_SUSPEND = 7, +}; + +struct plist_head { + struct list_head node_list; +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); + ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED = 0, + PM_QOS_MAX = 1, + PM_QOS_MIN = 2, +}; + +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; +}; + +struct dev_pm_qos_request; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct freq_constraints freq; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +struct platform_hibernation_ops { + int (*begin)(pm_message_t); + void (*end)(); + int (*pre_snapshot)(); + void (*finish)(); + int (*prepare)(); + int (*enter)(); + void (*leave)(); + int (*pre_restore)(); + void (*restore_cleanup)(); + void (*recover)(); +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; +}; + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX = 2, +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE = 2, + DEV_PM_QOS_MIN_FREQUENCY = 3, + DEV_PM_QOS_MAX_FREQUENCY = 4, + DEV_PM_QOS_FLAGS = 5, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + struct freq_qos_request freq; + } data; + struct device *dev; +}; + +enum { + TEST_NONE = 0, + TEST_CORE = 1, + TEST_CPUS = 2, + TEST_PLATFORM = 3, + TEST_DEVICES = 4, + TEST_FREEZER = 5, + __TEST_AFTER_LAST = 6, +}; + +enum { + HIBERNATION_INVALID = 0, + HIBERNATION_PLATFORM = 1, + HIBERNATION_SHUTDOWN = 2, + HIBERNATION_REBOOT = 3, + HIBERNATION_SUSPEND = 4, + HIBERNATION_TEST_RESUME = 5, + __HIBERNATION_AFTER_LAST = 6, +}; + +enum { + CSS_NO_REF = 1, + CSS_ONLINE = 2, + CSS_RELEASED = 4, + CSS_VISIBLE = 8, + CSS_DYING = 16, +}; + +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + struct list_head *cset_pos; + struct list_head *cset_head; + struct list_head *tcset_pos; + struct list_head *tcset_head; + struct list_head *task_pos; + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = 1, + CGROUP_FREEZING_SELF = 2, + CGROUP_FREEZING_PARENT = 4, + CGROUP_FROZEN = 8, + CGROUP_FREEZING = 6, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; + long: 32; +}; + +enum ftrace_dump_mode { + DUMP_NONE = 0, + DUMP_ALL = 1, + DUMP_ORIG = 2, + DUMP_PARAM = 3, +}; + +typedef void (*smp_call_func_t)(void *); + +typedef bool (*smp_cond_func_t)(int, void *); + +struct pipe_buffer; + +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait; + wait_queue_head_t wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + bool poll_usage; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +struct fsnotify_mark_connector { + spinlock_t lock; + unsigned char type; + unsigned char prio; + short unsigned int flags; + union { + void *obj; + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +struct fsnotify_sb_info { + struct fsnotify_mark_connector *sb_marks; + atomic_long_t watched_objects[3]; +}; + +typedef bool (*ring_buffer_cond_fn)(void *); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1, +}; + +typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); + +struct pipe_buf_operations; + +struct pipe_buffer { + struct page *page; + unsigned int offset; + unsigned int len; + const struct pipe_buf_operations *ops; + unsigned int flags; + long unsigned int private; +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + long unsigned int private; +}; + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +enum die_val { + DIE_UNUSED = 0, + DIE_OOPS = 1, +}; + +struct trace_export { + struct trace_export *next; + void (*write)(struct trace_export *, const void *, unsigned int); + int flags; +}; + +enum fsnotify_group_prio { + FSNOTIFY_PRIO_NORMAL = 0, + FSNOTIFY_PRIO_CONTENT = 1, + FSNOTIFY_PRIO_PRE_CONTENT = 2, + __FSNOTIFY_PRIO_NUM = 3, +}; + +enum fsnotify_iter_type { + FSNOTIFY_ITER_TYPE_INODE = 0, + FSNOTIFY_ITER_TYPE_VFSMOUNT = 1, + FSNOTIFY_ITER_TYPE_SB = 2, + FSNOTIFY_ITER_TYPE_PARENT = 3, + FSNOTIFY_ITER_TYPE_INODE2 = 4, + FSNOTIFY_ITER_TYPE_COUNT = 5, +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 1, + TRACE_FLAG_NEED_RESCHED_LAZY = 2, + TRACE_FLAG_NEED_RESCHED = 4, + TRACE_FLAG_HARDIRQ = 8, + TRACE_FLAG_SOFTIRQ = 16, + TRACE_FLAG_PREEMPT_RESCHED = 32, + TRACE_FLAG_NMI = 64, + TRACE_FLAG_BH_OFF = 128, +}; + +enum event_trigger_type { + ETT_NONE = 0, + ETT_TRACE_ONOFF = 1, + ETT_SNAPSHOT = 2, + ETT_STACKTRACE = 4, + ETT_EVENT_ENABLE = 8, + ETT_EVENT_HIST = 16, + ETT_HIST_ENABLE = 32, + ETT_EVENT_EPROBE = 64, +}; + +struct ftrace_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; +}; + +struct stack_entry { + struct trace_entry ent; + int size; + long unsigned int caller[0]; +}; + +struct bprint_entry { + struct trace_entry ent; + long unsigned int ip; + const char *fmt; + u32 buf[0]; +}; + +struct print_entry { + struct trace_entry ent; + long unsigned int ip; + char buf[0]; +}; + +struct raw_data_entry { + struct trace_entry ent; + unsigned int id; + char buf[0]; +}; + +struct bputs_entry { + struct trace_entry ent; + long unsigned int ip; + const char *str; +}; + +struct func_repeats_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; + u16 count; + u16 top_delta_ts; + u32 bottom_delta_ts; +}; + +typedef bool (*cond_update_fn_t)(struct trace_array *, void *); + +enum { + TRACE_ARRAY_FL_GLOBAL = 1, + TRACE_ARRAY_FL_BOOT = 2, +}; + +struct trace_parser { + bool cont; + char *buffer; + unsigned int idx; + unsigned int size; +}; + +enum trace_iterator_flags { + TRACE_ITER_PRINT_PARENT = 1, + TRACE_ITER_SYM_OFFSET = 2, + TRACE_ITER_SYM_ADDR = 4, + TRACE_ITER_VERBOSE = 8, + TRACE_ITER_RAW = 16, + TRACE_ITER_HEX = 32, + TRACE_ITER_BIN = 64, + TRACE_ITER_BLOCK = 128, + TRACE_ITER_FIELDS = 256, + TRACE_ITER_PRINTK = 512, + TRACE_ITER_ANNOTATE = 1024, + TRACE_ITER_USERSTACKTRACE = 2048, + TRACE_ITER_SYM_USEROBJ = 4096, + TRACE_ITER_PRINTK_MSGONLY = 8192, + TRACE_ITER_CONTEXT_INFO = 16384, + TRACE_ITER_LATENCY_FMT = 32768, + TRACE_ITER_RECORD_CMD = 65536, + TRACE_ITER_RECORD_TGID = 131072, + TRACE_ITER_OVERWRITE = 262144, + TRACE_ITER_STOP_ON_FREE = 524288, + TRACE_ITER_IRQ_INFO = 1048576, + TRACE_ITER_MARKERS = 2097152, + TRACE_ITER_EVENT_FORK = 4194304, + TRACE_ITER_TRACE_PRINTK = 8388608, + TRACE_ITER_PAUSE_ON_TRACE = 16777216, + TRACE_ITER_HASH_PTR = 33554432, + TRACE_ITER_FUNCTION = 67108864, + TRACE_ITER_FUNC_FORK = 134217728, + TRACE_ITER_DISPLAY_GRAPH = 268435456, + TRACE_ITER_STACKTRACE = 536870912, +}; + +struct trace_min_max_param { + struct mutex *lock; + u64 *val; + u64 *min; + u64 *max; +}; + +struct pipe_wait { + struct trace_iterator *iter; + int wait_index; +}; + +struct ftrace_stack { + long unsigned int calls[1024]; +}; + +struct ftrace_stacks { + struct ftrace_stack stacks[4]; +}; + +struct trace_buffer_struct { + int nesting; + char buffer[4096]; +}; + +struct ftrace_buffer_info { + struct trace_iterator iter; + void *spare; + unsigned int spare_cpu; + unsigned int spare_size; + unsigned int read; +}; + +struct err_info { + const char **errs; + u8 type; + u16 pos; + u64 ts; +}; + +struct tracing_log_err { + struct list_head list; + struct err_info info; + char loc[128]; + char *cmd; + long: 32; +}; + +struct buffer_ref { + struct trace_buffer *buffer; + void *page; + int cpu; + refcount_t refcount; +}; + +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +}; + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, +}; + +struct rhltable { + struct rhashtable ht; +}; + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context = 1, + perf_nr_task_contexts = 2, +}; + +struct __una_u32 { + u32 x; +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = 3, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = 7, +}; + +enum bp_type_idx { + TYPE_INST = 0, + TYPE_DATA = 1, + TYPE_MAX = 2, +}; + +struct bp_slots_histogram { + atomic_t *count; +}; + +struct bp_cpuinfo { + unsigned int cpu_pinned; + struct bp_slots_histogram tsk_pinned; +}; + +typedef struct pglist_data pg_data_t; + +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node *parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void *slots[64]; + union { + long unsigned int tags[6]; + long unsigned int marks[6]; + }; +}; + +typedef void (*xa_update_node_t)(struct xa_node *); + +enum lru_status { + LRU_REMOVED = 0, + LRU_REMOVED_RETRY = 1, + LRU_ROTATE = 2, + LRU_SKIP = 3, + LRU_RETRY = 4, + LRU_STOP = 5, +}; + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, void *); + +enum objext_flags { + OBJEXTS_ALLOC_FAIL = 4, + __NR_OBJEXTS_FLAGS = 8, +}; + +struct fs_parse_result { + bool negated; + long: 32; + union { + bool boolean; + int int_32; + unsigned int uint_32; + u64 uint_64; + kuid_t uid; + kgid_t gid; + }; +}; + +enum _slab_flag_bits { + _SLAB_CONSISTENCY_CHECKS = 0, + _SLAB_RED_ZONE = 1, + _SLAB_POISON = 2, + _SLAB_KMALLOC = 3, + _SLAB_HWCACHE_ALIGN = 4, + _SLAB_CACHE_DMA = 5, + _SLAB_CACHE_DMA32 = 6, + _SLAB_STORE_USER = 7, + _SLAB_PANIC = 8, + _SLAB_TYPESAFE_BY_RCU = 9, + _SLAB_TRACE = 10, + _SLAB_NOLEAKTRACE = 11, + _SLAB_NO_MERGE = 12, + _SLAB_ACCOUNT = 13, + _SLAB_KASAN = 14, + _SLAB_NO_USER_FLAGS = 15, + _SLAB_RECLAIM_ACCOUNT = 16, + _SLAB_OBJECT_POISON = 17, + _SLAB_CMPXCHG_DOUBLE = 18, + _SLAB_NO_OBJ_EXT = 19, + _SLAB_FLAGS_LAST_BIT = 20, +}; + +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; + +struct reciprocal_value { + u32 m; + u8 sh1; + u8 sh2; +}; + +struct kmem_cache_order_objects { + unsigned int x; +}; + +struct kmem_cache_cpu; + +struct kmem_cache_node; + +struct kmem_cache { + struct kmem_cache_cpu *cpu_slab; + slab_flags_t flags; + long unsigned int min_partial; + unsigned int size; + unsigned int object_size; + struct reciprocal_value reciprocal_size; + unsigned int offset; + unsigned int cpu_partial; + unsigned int cpu_partial_slabs; + struct kmem_cache_order_objects oo; + struct kmem_cache_order_objects min; + gfp_t allocflags; + int refcount; + void (*ctor)(void *); + unsigned int inuse; + unsigned int align; + unsigned int red_left_pad; + const char *name; + struct list_head list; + struct kobject kobj; + struct kasan_cache kasan_info; + struct kmem_cache_node *node[1]; +}; + +typedef u32 depot_stack_handle_t; + +typedef u32 depot_flags_t; + +struct kasan_track { + u32 pid; + depot_stack_handle_t stack; +}; + +struct kasan_source_location { + const char *filename; + int line_no; + int column_no; +}; + +struct kasan_global { + const void *beg; + size_t size; + size_t size_with_redzone; + const void *name; + const void *module_name; + long unsigned int has_dynamic_init; + struct kasan_source_location *location; + char *odr_indicator; +}; + +struct kasan_alloc_meta { + struct kasan_track alloc_track; + depot_stack_handle_t aux_stack[2]; +}; + +struct qlist_node { + struct qlist_node *next; +}; + +struct kasan_free_meta { + struct qlist_node quarantine_link; + struct kasan_track free_track; +}; + +struct slab { + long unsigned int __page_flags; + struct kmem_cache *slab_cache; + union { + struct { + union { + struct list_head slab_list; + struct { + struct slab *next; + int slabs; + }; + }; + union { + struct { + void *freelist; + union { + long unsigned int counters; + struct { + unsigned int inuse: 16; + unsigned int objects: 15; + unsigned int frozen: 1; + }; + }; + }; + }; + }; + struct callback_head callback_head; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int obj_exts; +}; + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_spinlock_t seq; + int umask; + int in_exec; + struct path root; + struct path pwd; +}; + +enum kernel_read_file_id { + READING_UNKNOWN = 0, + READING_FIRMWARE = 1, + READING_MODULE = 2, + READING_KEXEC_IMAGE = 3, + READING_KEXEC_INITRAMFS = 4, + READING_POLICY = 5, + READING_X509_CERTIFICATE = 6, + READING_MAX_ID = 7, +}; + +typedef __kernel_long_t __kernel_off_t; + +typedef __kernel_off_t off_t; + +struct fdtable { + unsigned int max_fds; + struct file **fd; + long unsigned int *close_on_exec; + long unsigned int *open_fds; + long unsigned int *full_fds_bits; + struct callback_head rcu; +}; + +struct files_struct { + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable *fdt; + struct fdtable fdtab; + long: 32; + long: 32; + long: 32; + spinlock_t file_lock; + unsigned int next_fd; + long unsigned int close_on_exec_init[1]; + long unsigned int open_fds_init[1]; + long unsigned int full_fds_bits_init[1]; + struct file *fd_array[32]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_usage srcuu; + struct srcu_struct srcu; + struct notifier_block *head; +}; + +struct flock { + short int l_type; + short int l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; +}; + +struct flock64 { + short int l_type; + short int l_whence; + long: 32; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; + long: 32; +}; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct file_lock_core { + struct file_lock_core *flc_blocker; + struct list_head flc_list; + struct hlist_node flc_link; + struct list_head flc_blocked_requests; + struct list_head flc_blocked_member; + fl_owner_t flc_owner; + unsigned int flc_flags; + unsigned char flc_type; + pid_t flc_pid; + int flc_link_cpu; + wait_queue_head_t flc_wait; + struct file *flc_file; +}; + +struct nlm_lockowner; + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; + +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +struct file_lock_operations; + +struct lock_manager_operations; + +struct file_lock { + struct file_lock_core c; + loff_t fl_start; + loff_t fl_end; + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + struct { + struct inode *inode; + } ceph; + } fl_u; +}; + +struct lease_manager_operations; + +struct file_lease { + struct file_lock_core c; + struct fasync_struct *fl_fasync; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + const struct lease_manager_operations *fl_lmops; +}; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + void *lm_mod_owner; + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_lock_expirable)(struct file_lock *); + void (*lm_expire_lock)(); +}; + +struct lease_manager_operations { + bool (*lm_break)(struct file_lease *); + int (*lm_change)(struct file_lease *, int, struct list_head *); + void (*lm_setup)(struct file_lease *, void **); + bool (*lm_breaker_owns_lease)(struct file_lease *); +}; + +struct trace_event_raw_locks_get_lock_context { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + unsigned char type; + struct file_lock_context *ctx; + char __data[0]; +}; + +struct trace_event_raw_filelock_lock { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int pid; + unsigned int flags; + unsigned char type; + loff_t fl_start; + loff_t fl_end; + int ret; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_filelock_lease { + struct trace_entry ent; + struct file_lease *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + long unsigned int break_time; + long unsigned int downgrade_time; + char __data[0]; +}; + +struct trace_event_raw_generic_add_lease { + struct trace_entry ent; + long unsigned int i_ino; + int wcount; + int rcount; + int icount; + dev_t s_dev; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + char __data[0]; +}; + +struct trace_event_raw_leases_conflict { + struct trace_entry ent; + void *lease; + void *breaker; + unsigned int l_fl_flags; + unsigned int b_fl_flags; + unsigned char l_fl_type; + unsigned char b_fl_type; + bool conflict; + char __data[0]; +}; + +struct trace_event_data_offsets_locks_get_lock_context {}; + +struct trace_event_data_offsets_filelock_lock {}; + +struct trace_event_data_offsets_filelock_lease {}; + +struct trace_event_data_offsets_generic_add_lease {}; + +struct trace_event_data_offsets_leases_conflict {}; + +typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); + +typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lease *, struct file_lease *); + +struct file_lock_list_struct { + spinlock_t lock; + struct hlist_head hlist; +}; + +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, +}; + +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; + struct dentry *proc_thread_self; + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; + struct callback_head rcu; +}; + +struct locks_iterator { + int li_cpu; + long: 32; + loff_t li_pos; +}; + +typedef unsigned int fgf_t; + +enum bh_state_bits { + BH_Uptodate = 0, + BH_Dirty = 1, + BH_Lock = 2, + BH_Req = 3, + BH_Mapped = 4, + BH_New = 5, + BH_Async_Read = 6, + BH_Async_Write = 7, + BH_Delay = 8, + BH_Boundary = 9, + BH_Write_EIO = 10, + BH_Unwritten = 11, + BH_Quiet = 12, + BH_Meta = 13, + BH_Prio = 14, + BH_Defer_Completion = 15, + BH_PrivateStart = 16, +}; + +struct bgl_lock { + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct blockgroup_lock { + struct bgl_lock locks[128]; +}; + +struct fscrypt_dummy_policy {}; + +typedef int ext4_grpblk_t; + +typedef long long unsigned int ext4_fsblk_t; + +typedef __u32 ext4_lblk_t; + +typedef unsigned int ext4_group_t; + +struct ext4_system_blocks { + struct rb_root root; + struct callback_head rcu; +}; + +struct flex_groups { + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; +}; + +enum { + EXT4_INODE_SECRM = 0, + EXT4_INODE_UNRM = 1, + EXT4_INODE_COMPR = 2, + EXT4_INODE_SYNC = 3, + EXT4_INODE_IMMUTABLE = 4, + EXT4_INODE_APPEND = 5, + EXT4_INODE_NODUMP = 6, + EXT4_INODE_NOATIME = 7, + EXT4_INODE_DIRTY = 8, + EXT4_INODE_COMPRBLK = 9, + EXT4_INODE_NOCOMPR = 10, + EXT4_INODE_ENCRYPT = 11, + EXT4_INODE_INDEX = 12, + EXT4_INODE_IMAGIC = 13, + EXT4_INODE_JOURNAL_DATA = 14, + EXT4_INODE_NOTAIL = 15, + EXT4_INODE_DIRSYNC = 16, + EXT4_INODE_TOPDIR = 17, + EXT4_INODE_HUGE_FILE = 18, + EXT4_INODE_EXTENTS = 19, + EXT4_INODE_VERITY = 20, + EXT4_INODE_EA_INODE = 21, + EXT4_INODE_DAX = 25, + EXT4_INODE_INLINE_DATA = 28, + EXT4_INODE_PROJINHERIT = 29, + EXT4_INODE_CASEFOLD = 30, + EXT4_INODE_RESERVED = 31, +}; + +struct extent_status { + struct rb_node rb_node; + ext4_lblk_t es_lblk; + ext4_lblk_t es_len; + long: 32; + ext4_fsblk_t es_pblk; +}; + +struct ext4_es_tree { + struct rb_root root; + struct extent_status *cache_es; +}; + +struct ext4_es_stats { + long unsigned int es_stats_shrunk; + long: 32; + struct percpu_counter es_stats_cache_hits; + struct percpu_counter es_stats_cache_misses; + u64 es_stats_scan_time; + u64 es_stats_max_scan_time; + struct percpu_counter es_stats_all_cnt; + struct percpu_counter es_stats_shk_cnt; +}; + +struct ext4_pending_tree { + struct rb_root root; +}; + +struct ext4_fc_stats { + unsigned int fc_ineligible_reason_count[10]; + long unsigned int fc_num_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_failed_commits; + long unsigned int fc_skipped_commits; + long unsigned int fc_numblks; + long: 32; + u64 s_fc_avg_commit_time; +}; + +struct ext4_fc_alloc_region { + ext4_lblk_t lblk; + long: 32; + ext4_fsblk_t pblk; + int ino; + int len; +}; + +struct ext4_fc_replay_state { + int fc_replay_num_tags; + int fc_replay_expected_off; + int fc_current_pass; + int fc_cur_tag; + int fc_crc; + struct ext4_fc_alloc_region *fc_regions; + int fc_regions_size; + int fc_regions_used; + int fc_regions_valid; + int *fc_modified_inodes; + int fc_modified_inodes_used; + int fc_modified_inodes_size; +}; + +struct ext4_inode_info { + __le32 i_data[15]; + __u32 i_dtime; + ext4_fsblk_t i_file_acl; + ext4_group_t i_block_group; + ext4_lblk_t i_dir_start_lookup; + long unsigned int i_state_flags; + long unsigned int i_flags; + struct rw_semaphore xattr_sem; + union { + struct list_head i_orphan; + unsigned int i_orphan_idx; + }; + struct list_head i_fc_dilist; + struct list_head i_fc_list; + ext4_lblk_t i_fc_lblk_start; + ext4_lblk_t i_fc_lblk_len; + atomic_t i_fc_updates; + atomic_t i_unwritten; + wait_queue_head_t i_fc_wait; + struct mutex i_fc_lock; + loff_t i_disksize; + struct rw_semaphore i_data_sem; + struct inode vfs_inode; + struct jbd2_inode *jinode; + spinlock_t i_raw_lock; + struct timespec64 i_crtime; + atomic_t i_prealloc_active; + unsigned int i_reserved_data_blocks; + struct rb_root i_prealloc_node; + rwlock_t i_prealloc_lock; + struct ext4_es_tree i_es_tree; + rwlock_t i_es_lock; + struct list_head i_es_list; + unsigned int i_es_all_nr; + unsigned int i_es_shk_nr; + ext4_lblk_t i_es_shrink_lblk; + ext4_group_t i_last_alloc_group; + struct ext4_pending_tree i_pending_tree; + __u16 i_extra_isize; + u16 i_inline_off; + u16 i_inline_size; + spinlock_t i_completed_io_lock; + struct list_head i_rsv_conversion_list; + struct work_struct i_rsv_conversion_work; + spinlock_t i_block_reservation_lock; + tid_t i_sync_tid; + tid_t i_datasync_tid; + __u32 i_csum_seed; + kprojid_t i_projid; +}; + +struct ext4_super_block { + __le32 s_inodes_count; + __le32 s_blocks_count_lo; + __le32 s_r_blocks_count_lo; + __le32 s_free_blocks_count_lo; + __le32 s_free_inodes_count; + __le32 s_first_data_block; + __le32 s_log_block_size; + __le32 s_log_cluster_size; + __le32 s_blocks_per_group; + __le32 s_clusters_per_group; + __le32 s_inodes_per_group; + __le32 s_mtime; + __le32 s_wtime; + __le16 s_mnt_count; + __le16 s_max_mnt_count; + __le16 s_magic; + __le16 s_state; + __le16 s_errors; + __le16 s_minor_rev_level; + __le32 s_lastcheck; + __le32 s_checkinterval; + __le32 s_creator_os; + __le32 s_rev_level; + __le16 s_def_resuid; + __le16 s_def_resgid; + __le32 s_first_ino; + __le16 s_inode_size; + __le16 s_block_group_nr; + __le32 s_feature_compat; + __le32 s_feature_incompat; + __le32 s_feature_ro_compat; + __u8 s_uuid[16]; + char s_volume_name[16]; + char s_last_mounted[64]; + __le32 s_algorithm_usage_bitmap; + __u8 s_prealloc_blocks; + __u8 s_prealloc_dir_blocks; + __le16 s_reserved_gdt_blocks; + __u8 s_journal_uuid[16]; + __le32 s_journal_inum; + __le32 s_journal_dev; + __le32 s_last_orphan; + __le32 s_hash_seed[4]; + __u8 s_def_hash_version; + __u8 s_jnl_backup_type; + __le16 s_desc_size; + __le32 s_default_mount_opts; + __le32 s_first_meta_bg; + __le32 s_mkfs_time; + __le32 s_jnl_blocks[17]; + __le32 s_blocks_count_hi; + __le32 s_r_blocks_count_hi; + __le32 s_free_blocks_count_hi; + __le16 s_min_extra_isize; + __le16 s_want_extra_isize; + __le32 s_flags; + __le16 s_raid_stride; + __le16 s_mmp_update_interval; + __le64 s_mmp_block; + __le32 s_raid_stripe_width; + __u8 s_log_groups_per_flex; + __u8 s_checksum_type; + __u8 s_encryption_level; + __u8 s_reserved_pad; + __le64 s_kbytes_written; + __le32 s_snapshot_inum; + __le32 s_snapshot_id; + __le64 s_snapshot_r_blocks_count; + __le32 s_snapshot_list; + __le32 s_error_count; + __le32 s_first_error_time; + __le32 s_first_error_ino; + __le64 s_first_error_block; + __u8 s_first_error_func[32]; + __le32 s_first_error_line; + __le32 s_last_error_time; + __le32 s_last_error_ino; + __le32 s_last_error_line; + __le64 s_last_error_block; + __u8 s_last_error_func[32]; + __u8 s_mount_opts[64]; + __le32 s_usr_quota_inum; + __le32 s_grp_quota_inum; + __le32 s_overhead_clusters; + __le32 s_backup_bgs[2]; + __u8 s_encrypt_algos[4]; + __u8 s_encrypt_pw_salt[16]; + __le32 s_lpf_ino; + __le32 s_prj_quota_inum; + __le32 s_checksum_seed; + __u8 s_wtime_hi; + __u8 s_mtime_hi; + __u8 s_mkfs_time_hi; + __u8 s_lastcheck_hi; + __u8 s_first_error_time_hi; + __u8 s_last_error_time_hi; + __u8 s_first_error_errcode; + __u8 s_last_error_errcode; + __le16 s_encoding; + __le16 s_encoding_flags; + __le32 s_orphan_file_inum; + __le32 s_reserved[94]; + __le32 s_checksum; +}; + +struct ext4_journal_trigger { + struct jbd2_buffer_trigger_type tr_triggers; + struct super_block *sb; +}; + +struct ext4_orphan_block { + atomic_t ob_free_entries; + struct buffer_head *ob_bh; +}; + +struct ext4_orphan_info { + int of_blocks; + __u32 of_csum_seed; + struct ext4_orphan_block *of_binfo; +}; + +struct ext4_group_info; + +struct ext4_locality_group; + +struct ext4_li_request; + +struct mb_cache; + +struct ext4_sb_info { + long unsigned int s_desc_size; + long unsigned int s_inodes_per_block; + long unsigned int s_blocks_per_group; + long unsigned int s_clusters_per_group; + long unsigned int s_inodes_per_group; + long unsigned int s_itb_per_group; + long unsigned int s_gdb_count; + long unsigned int s_desc_per_block; + ext4_group_t s_groups_count; + ext4_group_t s_blockfile_groups; + long unsigned int s_overhead; + unsigned int s_cluster_ratio; + unsigned int s_cluster_bits; + long: 32; + loff_t s_bitmap_maxbytes; + struct buffer_head *s_sbh; + struct ext4_super_block *s_es; + struct buffer_head **s_group_desc; + unsigned int s_mount_opt; + unsigned int s_mount_opt2; + long unsigned int s_mount_flags; + unsigned int s_def_mount_opt; + unsigned int s_def_mount_opt2; + ext4_fsblk_t s_sb_block; + atomic64_t s_resv_clusters; + kuid_t s_resuid; + kgid_t s_resgid; + short unsigned int s_mount_state; + short unsigned int s_pad; + int s_addr_per_block_bits; + int s_desc_per_block_bits; + int s_inode_size; + int s_first_ino; + unsigned int s_inode_readahead_blks; + unsigned int s_inode_goal; + u32 s_hash_seed[4]; + int s_def_hash_version; + int s_hash_unsigned; + long: 32; + struct percpu_counter s_freeclusters_counter; + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; + struct percpu_counter s_dirtyclusters_counter; + struct percpu_counter s_sra_exceeded_retry_limit; + struct blockgroup_lock *s_blockgroup_lock; + struct proc_dir_entry *s_proc; + struct kobject s_kobj; + struct completion s_kobj_unregister; + struct super_block *s_sb; + struct buffer_head *s_mmp_bh; + struct journal_s *s_journal; + long unsigned int s_ext4_flags; + struct mutex s_orphan_lock; + struct list_head s_orphan; + struct ext4_orphan_info s_orphan_info; + long unsigned int s_commit_interval; + u32 s_max_batch_time; + u32 s_min_batch_time; + struct file *s_journal_bdev_file; + unsigned int s_want_extra_isize; + struct ext4_system_blocks *s_system_blks; + struct ext4_group_info ***s_group_info; + struct inode *s_buddy_cache; + spinlock_t s_md_lock; + short unsigned int *s_mb_offsets; + unsigned int *s_mb_maxs; + unsigned int s_group_info_size; + unsigned int s_mb_free_pending; + struct list_head s_freed_data_list[2]; + struct list_head s_discard_list; + struct work_struct s_discard_work; + atomic_t s_retry_alloc_pending; + struct list_head *s_mb_avg_fragment_size; + rwlock_t *s_mb_avg_fragment_size_locks; + struct list_head *s_mb_largest_free_orders; + rwlock_t *s_mb_largest_free_orders_locks; + long unsigned int s_stripe; + unsigned int s_mb_max_linear_groups; + unsigned int s_mb_stream_request; + unsigned int s_mb_max_to_scan; + unsigned int s_mb_min_to_scan; + unsigned int s_mb_stats; + unsigned int s_mb_order2_reqs; + unsigned int s_mb_group_prealloc; + unsigned int s_max_dir_size_kb; + long unsigned int s_mb_last_group; + long unsigned int s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; + unsigned int s_mb_best_avail_max_trim_order; + atomic_t s_bal_reqs; + atomic_t s_bal_success; + atomic_t s_bal_allocated; + atomic_t s_bal_ex_scanned; + atomic_t s_bal_cX_ex_scanned[5]; + atomic_t s_bal_groups_scanned; + atomic_t s_bal_goals; + atomic_t s_bal_len_goals; + atomic_t s_bal_breaks; + atomic_t s_bal_2orders; + atomic_t s_bal_p2_aligned_bad_suggestions; + atomic_t s_bal_goal_fast_bad_suggestions; + atomic_t s_bal_best_avail_bad_suggestions; + atomic64_t s_bal_cX_groups_considered[5]; + atomic64_t s_bal_cX_hits[5]; + atomic64_t s_bal_cX_failed[5]; + atomic_t s_mb_buddies_generated; + long: 32; + atomic64_t s_mb_generation_time; + atomic_t s_mb_lost_chunks; + atomic_t s_mb_preallocated; + atomic_t s_mb_discarded; + atomic_t s_lock_busy; + struct ext4_locality_group *s_locality_groups; + long unsigned int s_sectors_written_start; + u64 s_kbytes_written; + unsigned int s_extent_max_zeroout_kb; + unsigned int s_log_groups_per_flex; + struct flex_groups **s_flex_groups; + ext4_group_t s_flex_groups_allocated; + struct workqueue_struct *rsv_conversion_wq; + struct timer_list s_err_report; + struct ext4_li_request *s_li_request; + unsigned int s_li_wait_mult; + struct task_struct *s_mmp_tsk; + long unsigned int s_last_trim_minblks; + struct crypto_shash *s_chksum_driver; + __u32 s_csum_seed; + struct shrinker *s_es_shrinker; + struct list_head s_es_list; + long int s_es_nr_inode; + struct ext4_es_stats s_es_stats; + struct mb_cache *s_ea_block_cache; + struct mb_cache *s_ea_inode_cache; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t s_es_lock; + struct ext4_journal_trigger s_journal_triggers[1]; + struct ratelimit_state s_err_ratelimit_state; + struct ratelimit_state s_warning_ratelimit_state; + struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; + struct fscrypt_dummy_policy s_dummy_enc_policy; + struct percpu_rw_semaphore s_writepages_rwsem; + struct dax_device *s_daxdev; + long: 32; + u64 s_dax_part_off; + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; + spinlock_t s_error_lock; + int s_add_error_count; + int s_first_error_code; + __u32 s_first_error_line; + __u32 s_first_error_ino; + long: 32; + __u64 s_first_error_block; + const char *s_first_error_func; + long: 32; + time64_t s_first_error_time; + int s_last_error_code; + __u32 s_last_error_line; + __u32 s_last_error_ino; + long: 32; + __u64 s_last_error_block; + const char *s_last_error_func; + long: 32; + time64_t s_last_error_time; + struct work_struct s_sb_upd_work; + unsigned int s_awu_min; + unsigned int s_awu_max; + atomic_t s_fc_subtid; + struct list_head s_fc_q[2]; + struct list_head s_fc_dentry_q[2]; + unsigned int s_fc_bytes; + spinlock_t s_fc_lock; + struct buffer_head *s_fc_bh; + struct ext4_fc_stats s_fc_stats; + tid_t s_fc_ineligible_tid; + struct ext4_fc_replay_state s_fc_replay_state; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ext4_group_info { + long unsigned int bb_state; + struct rb_root bb_free_root; + ext4_grpblk_t bb_first_free; + ext4_grpblk_t bb_free; + ext4_grpblk_t bb_fragments; + int bb_avg_fragment_size_order; + ext4_grpblk_t bb_largest_free_order; + ext4_group_t bb_group; + struct list_head bb_prealloc_list; + struct rw_semaphore alloc_sem; + struct list_head bb_avg_fragment_size_node; + struct list_head bb_largest_free_order_node; + ext4_grpblk_t bb_counters[0]; +}; + +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP = 0, + EXT4_LI_MODE_ITABLE = 1, +}; + +struct ext4_li_request { + struct super_block *lr_super; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; + ext4_group_t lr_next_group; + struct list_head lr_request; + long unsigned int lr_next_sched; + long unsigned int lr_timeout; +}; + +struct ext4_extent { + __le32 ee_block; + __le16 ee_len; + __le16 ee_start_hi; + __le32 ee_start_lo; +}; + +struct ext4_extent_idx { + __le32 ei_block; + __le32 ei_leaf_lo; + __le16 ei_leaf_hi; + __u16 ei_unused; +}; + +struct ext4_extent_header { + __le16 eh_magic; + __le16 eh_entries; + __le16 eh_max; + __le16 eh_depth; + __le32 eh_generation; +}; + +struct ext4_ext_path { + ext4_fsblk_t p_block; + __u16 p_depth; + __u16 p_maxdepth; + struct ext4_extent *p_ext; + struct ext4_extent_idx *p_idx; + struct ext4_extent_header *p_hdr; + struct buffer_head *p_bh; + long: 32; +}; + +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[8]; + __be64 h_commit_sec; + __be32 h_commit_nsec; + long: 32; +}; + +struct journal_block_tag3_s { + __be32 t_blocknr; + __be32 t_flags; + __be32 t_blocknr_high; + __be32 t_checksum; +}; + +typedef struct journal_block_tag3_s journal_block_tag3_t; + +struct journal_block_tag_s { + __be32 t_blocknr; + __be16 t_checksum; + __be16 t_flags; + __be32 t_blocknr_high; +}; + +typedef struct journal_block_tag_s journal_block_tag_t; + +enum jbd_state_bits { + BH_JBD = 16, + BH_JWrite = 17, + BH_Freed = 18, + BH_Revoked = 19, + BH_RevokeValid = 20, + BH_JBDDirty = 21, + BH_JournalHead = 22, + BH_Shadow = 23, + BH_Verified = 24, + BH_JBDPrivateStart = 25, +}; + +enum jbd2_shrink_type { + JBD2_SHRINK_DESTROY = 0, + JBD2_SHRINK_BUSY_STOP = 1, + JBD2_SHRINK_BUSY_SKIP = 2, +}; + +typedef u8 uint8_t; + +struct fuse_forget_one { + uint64_t nodeid; + uint64_t nlookup; +}; + +struct fuse_in_header { + uint32_t len; + uint32_t opcode; + uint64_t unique; + uint64_t nodeid; + uint32_t uid; + uint32_t gid; + uint32_t pid; + uint16_t total_extlen; + uint16_t padding; +}; + +struct fuse_out_header { + uint32_t len; + int32_t error; + uint64_t unique; +}; + +struct tree_descr { + const char *name; + const struct file_operations *ops; + int mode; +}; + +struct fuse_forget_link { + struct fuse_forget_one forget_one; + struct fuse_forget_link *next; + long: 32; +}; + +struct fuse_conn; + +struct fuse_mount { + struct fuse_conn *fc; + struct super_block *sb; + struct list_head fc_entry; + struct callback_head rcu; +}; + +struct fuse_in_arg { + unsigned int size; + const void *value; +}; + +struct fuse_arg { + unsigned int size; + void *value; +}; + +struct fuse_args { + uint64_t nodeid; + uint32_t opcode; + uint8_t in_numargs; + uint8_t out_numargs; + uint8_t ext_idx; + bool force: 1; + bool noreply: 1; + bool nocreds: 1; + bool in_pages: 1; + bool out_pages: 1; + bool user_pages: 1; + bool out_argvar: 1; + bool page_zeroing: 1; + bool page_replace: 1; + bool may_block: 1; + bool is_ext: 1; + bool is_pinned: 1; + bool invalidate_vmap: 1; + struct fuse_in_arg in_args[3]; + struct fuse_arg out_args[2]; + void (*end)(struct fuse_mount *, struct fuse_args *, int); + void *vmap_base; + long: 32; +}; + +struct fuse_req { + struct list_head list; + struct list_head intr_entry; + struct fuse_args *args; + refcount_t count; + long unsigned int flags; + long: 32; + struct { + struct fuse_in_header h; + } in; + struct { + struct fuse_out_header h; + } out; + wait_queue_head_t waitq; + struct fuse_mount *fm; +}; + +struct fuse_iqueue; + +struct fuse_iqueue_ops { + void (*send_forget)(struct fuse_iqueue *, struct fuse_forget_link *); + void (*send_interrupt)(struct fuse_iqueue *, struct fuse_req *); + void (*send_req)(struct fuse_iqueue *, struct fuse_req *); + void (*release)(struct fuse_iqueue *); +}; + +struct fuse_iqueue { + unsigned int connected; + spinlock_t lock; + wait_queue_head_t waitq; + long: 32; + u64 reqctr; + struct list_head pending; + struct list_head interrupts; + struct fuse_forget_link forget_list_head; + struct fuse_forget_link *forget_list_tail; + int forget_batch; + struct fasync_struct *fasync; + const struct fuse_iqueue_ops *ops; + void *priv; + long: 32; +}; + +struct fuse_sync_bucket; + +struct fuse_conn { + spinlock_t lock; + refcount_t count; + atomic_t dev_count; + struct callback_head rcu; + kuid_t user_id; + kgid_t group_id; + struct pid_namespace *pid_ns; + struct user_namespace *user_ns; + unsigned int max_read; + unsigned int max_write; + unsigned int max_pages; + unsigned int max_pages_limit; + long: 32; + struct fuse_iqueue iq; + atomic64_t khctr; + struct rb_root polled_files; + unsigned int max_background; + unsigned int congestion_threshold; + unsigned int num_background; + unsigned int active_background; + struct list_head bg_queue; + spinlock_t bg_lock; + int initialized; + int blocked; + wait_queue_head_t blocked_waitq; + unsigned int connected; + bool aborted; + unsigned int conn_error: 1; + unsigned int conn_init: 1; + unsigned int async_read: 1; + unsigned int abort_err: 1; + unsigned int atomic_o_trunc: 1; + unsigned int export_support: 1; + unsigned int writeback_cache: 1; + unsigned int parallel_dirops: 1; + unsigned int handle_killpriv: 1; + unsigned int cache_symlinks: 1; + unsigned int legacy_opts_show: 1; + unsigned int handle_killpriv_v2: 1; + unsigned int no_open: 1; + unsigned int no_opendir: 1; + unsigned int no_fsync: 1; + unsigned int no_fsyncdir: 1; + unsigned int no_flush: 1; + unsigned int no_setxattr: 1; + unsigned int setxattr_ext: 1; + unsigned int no_getxattr: 1; + unsigned int no_listxattr: 1; + unsigned int no_removexattr: 1; + unsigned int no_lock: 1; + unsigned int no_access: 1; + unsigned int no_create: 1; + unsigned int no_interrupt: 1; + unsigned int no_bmap: 1; + unsigned int no_poll: 1; + unsigned int big_writes: 1; + unsigned int dont_mask: 1; + unsigned int no_flock: 1; + unsigned int no_fallocate: 1; + unsigned int no_rename2: 1; + unsigned int auto_inval_data: 1; + unsigned int explicit_inval_data: 1; + unsigned int do_readdirplus: 1; + unsigned int readdirplus_auto: 1; + unsigned int async_dio: 1; + unsigned int no_lseek: 1; + unsigned int posix_acl: 1; + unsigned int default_permissions: 1; + unsigned int allow_other: 1; + unsigned int no_copy_file_range: 1; + unsigned int destroy: 1; + unsigned int delete_stale: 1; + unsigned int no_control: 1; + unsigned int no_force_umount: 1; + unsigned int auto_submounts: 1; + unsigned int sync_fs: 1; + unsigned int init_security: 1; + unsigned int create_supp_group: 1; + unsigned int inode_dax: 1; + unsigned int no_tmpfile: 1; + unsigned int direct_io_allow_mmap: 1; + unsigned int no_statx: 1; + unsigned int passthrough: 1; + unsigned int use_pages_for_kvec_io: 1; + int max_stack_depth; + atomic_t num_waiting; + unsigned int minor; + struct list_head entry; + dev_t dev; + struct dentry *ctl_dentry[5]; + int ctl_ndents; + u32 scramble_key[4]; + long: 32; + atomic64_t attr_version; + atomic64_t evict_ctr; + void (*release)(struct fuse_conn *); + struct rw_semaphore killsb; + struct list_head devices; + struct list_head mounts; + struct fuse_sync_bucket *curr_bucket; + struct idr backing_files_map; + long: 32; +}; + +struct fuse_sync_bucket { + atomic_t count; + wait_queue_head_t waitq; + struct callback_head rcu; +}; + +struct btrfs_ioctl_defrag_range_args { + __u64 start; + __u64 len; + __u64 flags; + __u32 extent_thresh; + __u32 compress_type; + __u32 unused[4]; +}; + +struct btrfs_ordered_extent { + u64 file_offset; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 bytes_left; + u64 truncated_len; + long unsigned int flags; + int compress_type; + int qgroup_rsv; + refcount_t refs; + struct btrfs_inode *inode; + struct list_head list; + struct list_head log_list; + wait_queue_head_t wait; + struct rb_node rb_node; + struct list_head root_extent_list; + struct btrfs_work work; + struct completion completion; + struct btrfs_work flush_work; + struct list_head work_list; + struct list_head bioc_list; + long: 32; +}; + +enum btrfs_compression_type { + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_LZO = 2, + BTRFS_COMPRESS_ZSTD = 3, + BTRFS_NR_COMPRESS_TYPES = 4, +}; + +enum { + __EXTENT_FLAG_PINNED_BIT = 0, + EXTENT_FLAG_PINNED = 1, + __EXTENT_FLAG_PINNED_SEQ = 0, + __EXTENT_FLAG_COMPRESS_ZLIB_BIT = 1, + EXTENT_FLAG_COMPRESS_ZLIB = 2, + __EXTENT_FLAG_COMPRESS_ZLIB_SEQ = 1, + __EXTENT_FLAG_COMPRESS_LZO_BIT = 2, + EXTENT_FLAG_COMPRESS_LZO = 4, + __EXTENT_FLAG_COMPRESS_LZO_SEQ = 2, + __EXTENT_FLAG_COMPRESS_ZSTD_BIT = 3, + EXTENT_FLAG_COMPRESS_ZSTD = 8, + __EXTENT_FLAG_COMPRESS_ZSTD_SEQ = 3, + __EXTENT_FLAG_PREALLOC_BIT = 4, + EXTENT_FLAG_PREALLOC = 16, + __EXTENT_FLAG_PREALLOC_SEQ = 4, + __EXTENT_FLAG_LOGGING_BIT = 5, + EXTENT_FLAG_LOGGING = 32, + __EXTENT_FLAG_LOGGING_SEQ = 5, + __EXTENT_FLAG_MERGED_BIT = 6, + EXTENT_FLAG_MERGED = 64, + __EXTENT_FLAG_MERGED_SEQ = 6, +}; + +struct extent_map { + struct rb_node rb_node; + long: 32; + u64 start; + u64 len; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 ram_bytes; + u64 generation; + u32 flags; + refcount_t refs; + struct list_head list; +}; + +struct inode_defrag { + struct rb_node rb_node; + long: 32; + u64 ino; + u64 transid; + u64 root; + u32 extent_thresh; + long: 32; +}; + +struct defrag_target_range { + struct list_head list; + u64 start; + u64 len; +}; + +struct btrfs_ioctl_quota_ctl_args { + __u64 cmd; + __u64 status; +}; + +struct btrfs_qgroup_status_item { + __le64 version; + __le64 generation; + __le64 flags; + __le64 rescan; + __le64 enable_gen; +}; + +struct btrfs_qgroup_info_item { + __le64 generation; + __le64 rfer; + __le64 rfer_cmpr; + __le64 excl; + __le64 excl_cmpr; +}; + +struct btrfs_qgroup_limit_item { + __le64 flags; + __le64 max_rfer; + __le64 max_excl; + __le64 rsv_rfer; + __le64 rsv_excl; +}; + +struct ulist_iterator { + struct list_head *cur_list; +}; + +typedef int iterate_extent_inodes_t(u64, u64, u64, u64, void *); + +struct btrfs_backref_walk_ctx { + u64 bytenr; + u64 extent_item_pos; + bool ignore_extent_item_pos; + bool skip_inode_ref_list; + struct btrfs_trans_handle *trans; + struct btrfs_fs_info *fs_info; + long: 32; + u64 time_seq; + struct ulist *refs; + struct ulist *roots; + bool (*cache_lookup)(u64, void *, const u64 **, int *); + void (*cache_store)(u64, const struct ulist *, void *); + iterate_extent_inodes_t *indirect_ref_iterator; + int (*check_extent_item)(u64, const struct btrfs_extent_item *, const struct extent_buffer *, void *); + bool (*skip_data_ref)(u64, u64, u64, void *); + void *user_ctx; +}; + +struct btrfs_qgroup_extent_record { + u64 num_bytes; + u32 data_rsv; + long: 32; + u64 data_rsv_refroot; + struct ulist *old_roots; + long: 32; +}; + +struct btrfs_qgroup_swapped_block { + struct rb_node node; + int level; + bool trace_leaf; + long: 32; + u64 subvol_bytenr; + u64 subvol_generation; + u64 reloc_bytenr; + u64 reloc_generation; + u64 last_snapshot; + struct btrfs_key first_key; + long: 32; +}; + +struct btrfs_qgroup_rsv { + u64 values[3]; +}; + +struct btrfs_qgroup { + u64 qgroupid; + u64 rfer; + u64 rfer_cmpr; + u64 excl; + u64 excl_cmpr; + u64 lim_flags; + u64 max_rfer; + u64 max_excl; + u64 rsv_rfer; + u64 rsv_excl; + struct btrfs_qgroup_rsv rsv; + struct list_head groups; + struct list_head members; + struct list_head dirty; + struct list_head iterator; + struct list_head nested_iterator; + struct rb_node node; + long: 32; + u64 old_refcnt; + u64 new_refcnt; + struct kobject kobj; + long: 32; +}; + +struct btrfs_qgroup_list { + struct list_head next_group; + struct list_head next_member; + struct btrfs_qgroup *group; + struct btrfs_qgroup *member; +}; + +enum { + __QGROUP_RESERVE_BIT = 0, + QGROUP_RESERVE = 1, + __QGROUP_RESERVE_SEQ = 0, + __QGROUP_RELEASE_BIT = 1, + QGROUP_RELEASE = 2, + __QGROUP_RELEASE_SEQ = 1, + __QGROUP_FREE_BIT = 2, + QGROUP_FREE = 4, + __QGROUP_FREE_SEQ = 2, +}; + +typedef long unsigned int mpi_limb_t; + +struct gcry_mpi { + int alloced; + int nlimbs; + int nbits; + int sign; + unsigned int flags; + mpi_limb_t *d; +}; + +typedef struct gcry_mpi *MPI; + +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_akcipher { + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct akcipher_alg { + int (*encrypt)(struct akcipher_request *); + int (*decrypt)(struct akcipher_request *); + int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); + unsigned int (*max_size)(struct crypto_akcipher *); + int (*init)(struct crypto_akcipher *); + void (*exit)(struct crypto_akcipher *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; + MPI p; + MPI q; + MPI dp; + MPI dq; + MPI qinv; +}; + +struct crypto_aes_ctx { + u32 key_enc[60]; + u32 key_dec[60]; + u32 key_length; +}; + +enum x509_akid_actions { + ACT_x509_akid_note_kid = 0, + ACT_x509_akid_note_name = 1, + ACT_x509_akid_note_serial = 2, + ACT_x509_extract_name_segment = 3, + ACT_x509_note_OID = 4, + NR__x509_akid_actions = 5, +}; + +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + int (*parse)(struct key_preparsed_payload *); +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +enum asymmetric_payload_bits { + asym_crypto = 0, + asym_subtype = 1, + asym_key_ids = 2, + asym_auth = 3, +}; + +struct asymmetric_key_id { + short unsigned int len; + unsigned char data[0]; +}; + +struct asymmetric_key_ids { + void *id[3]; +}; + +struct public_key_signature; + +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + short unsigned int name_len; + void (*describe)(const struct key *, struct seq_file *); + void (*destroy)(void *, void *); + int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*verify_signature)(const struct key *, const struct public_key_signature *); +}; + +struct public_key_signature { + struct asymmetric_key_id *auth_ids[3]; + u8 *s; + u8 *digest; + u32 s_size; + u32 digest_size; + const char *pkey_algo; + const char *hash_algo; + const char *encoding; +}; + +enum blacklist_hash_type { + BLACKLIST_HASH_X509_TBS = 1, + BLACKLIST_HASH_BINARY = 2, +}; + +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; + long unsigned int key_eflags; +}; + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; + struct public_key *pub; + struct public_key_signature *sig; + char *issuer; + char *subject; + struct asymmetric_key_id *id; + struct asymmetric_key_id *skid; + time64_t valid_from; + time64_t valid_to; + const void *tbs; + unsigned int tbs_size; + unsigned int raw_sig_size; + const void *raw_sig; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_subject; + unsigned int raw_subject_size; + unsigned int raw_skid_size; + const void *raw_skid; + unsigned int index; + bool seen; + bool verified; + bool self_signed; + bool unsupported_sig; + bool blacklisted; + long: 32; +}; + +struct rq_wait { + wait_queue_head_t wait; + atomic_t inflight; +}; + +struct rq_depth { + unsigned int max_depth; + int scale_step; + bool scaled_max; + unsigned int queue_depth; + unsigned int default_depth; +}; + +typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); + +typedef void cleanup_cb_t(struct rq_wait *, void *); + +struct rq_qos_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wait *rqw; + acquire_inflight_cb_t *cb; + void *private_data; + bool got_token; +}; + +struct io_ring_ctx; + +struct io_wq; + +struct io_uring_task { + int cached_refs; + const struct io_ring_ctx *last; + struct task_struct *task; + struct io_wq *io_wq; + struct file *registered_rings[16]; + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_cancel; + atomic_t inflight_tracked; + struct percpu_counter inflight; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + struct llist_head task_list; + struct callback_head task_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; +}; + +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + struct { + __u32 cmd_op; + __u32 __pad1; + }; + }; + union { + __u64 addr; + __u64 splice_off_in; + struct { + __u32 level; + __u32 optname; + }; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + __u32 hardlink_flags; + __u32 xattr_flags; + __u32 msg_ring_flags; + __u32 uring_cmd_flags; + __u32 waitid_flags; + __u32 futex_flags; + __u32 install_fd_flags; + __u32 nop_flags; + }; + __u64 user_data; + union { + __u16 buf_index; + __u16 buf_group; + }; + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; + __u32 optlen; + struct { + __u16 addr_len; + __u16 __pad3[1]; + }; + }; + union { + struct { + __u64 addr3; + __u64 __pad2[1]; + }; + __u64 optval; + __u8 cmd[0]; + }; +}; + +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; + __u64 big_cqe[0]; +}; + +struct io_uring_buf { + __u64 addr; + __u32 len; + __u16 bid; + __u16 resv; +}; + +struct io_uring_buf_ring { + union { + struct { + __u64 resv1; + __u32 resv2; + __u16 resv3; + __u16 tail; + }; + struct { + struct {} __empty_bufs; + struct io_uring_buf bufs[0]; + }; + }; +}; + +enum io_uring_register_pbuf_ring_flags { + IOU_PBUF_RING_MMAP = 1, + IOU_PBUF_RING_INC = 2, +}; + +struct io_uring_buf_reg { + __u64 ring_addr; + __u32 ring_entries; + __u16 bgid; + __u16 flags; + __u64 resv[3]; +}; + +struct io_uring_buf_status { + __u32 buf_group; + __u32 head; + __u32 resv[8]; +}; + +enum task_work_notify_mode { + TWA_NONE = 0, + TWA_RESUME = 1, + TWA_SIGNAL = 2, + TWA_SIGNAL_NO_IPI = 3, + TWA_NMI_CURRENT = 4, + TWA_FLAGS = 65280, + TWAF_NO_ALLOC = 256, +}; + +enum io_uring_cmd_flags { + IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, + IO_URING_F_MULTISHOT = 4, + IO_URING_F_IOWQ = 8, + IO_URING_F_NONBLOCK = -2147483648, + IO_URING_F_SQE128 = 256, + IO_URING_F_CQE32 = 512, + IO_URING_F_IOPOLL = 1024, + IO_URING_F_CANCEL = 2048, + IO_URING_F_COMPAT = 4096, + IO_URING_F_TASK_DEAD = 8192, +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_wq_work { + struct io_wq_work_node list; + atomic_t flags; + int cancel_seq; +}; + +struct io_rsrc_node; + +struct io_rsrc_data { + unsigned int nr; + struct io_rsrc_node **nodes; +}; + +struct io_mapped_ubuf; + +struct io_rsrc_node { + unsigned char type; + int refs; + u64 tag; + union { + long unsigned int file_ptr; + struct io_mapped_ubuf *buf; + }; + long: 32; +}; + +struct io_file_table { + struct io_rsrc_data data; + long unsigned int *bitmap; + unsigned int alloc_hint; +}; + +struct io_hash_bucket { + struct hlist_head list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned int hash_bits; +}; + +struct io_mapped_region { + struct page **pages; + void *vmap_ptr; + size_t nr_pages; +}; + +struct io_kiocb; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + struct io_wq_work_node free_list; + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + bool plug_started; + bool need_plug; + bool cq_flush; + short unsigned int submit_nr; + long: 32; + struct blk_plug plug; +}; + +struct io_alloc_cache { + void **entries; + unsigned int nr_cached; + unsigned int max_cached; + size_t elem_size; +}; + +struct io_restriction { + long unsigned int register_op[2]; + long unsigned int sqe_op[2]; + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_rings; + +struct io_ev_fd; + +struct io_sq_data; + +struct io_wq_hash; + +struct io_ring_ctx { + struct { + unsigned int flags; + unsigned int drain_next: 1; + unsigned int restricted: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + unsigned int has_evfd: 1; + unsigned int task_complete: 1; + unsigned int lockless_cq: 1; + unsigned int syscall_iopoll: 1; + unsigned int poll_activated: 1; + unsigned int drain_disabled: 1; + unsigned int compat: 1; + unsigned int iowq_limits_set: 1; + struct task_struct *submitter_task; + struct io_rings *rings; + struct percpu_ref refs; + clockid_t clockid; + enum tk_offsets clock_offset; + enum task_work_notify_mode notify_method; + unsigned int sq_thread_idle; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct mutex uring_lock; + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned int cached_sq_head; + unsigned int sq_entries; + atomic_t cancel_seq; + bool poll_multi_queue; + struct io_wq_work_list iopoll_list; + struct io_file_table file_table; + struct io_rsrc_data buf_table; + long: 32; + struct io_submit_state submit_state; + struct xarray io_bl_xa; + struct io_hash_table cancel_table; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + struct io_alloc_cache rw_cache; + struct io_alloc_cache uring_cache; + struct hlist_head cancelable_uring_cmd; + u64 hybrid_poll_time; + long: 32; + long: 32; + }; + struct { + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + unsigned int cached_cq_tail; + unsigned int cq_entries; + struct io_ev_fd *io_ev_fd; + unsigned int cq_extra; + void *cq_wait_arg; + size_t cq_wait_size; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct llist_head work_llist; + struct llist_head retry_llist; + long unsigned int check_cq; + atomic_t cq_wait_nr; + atomic_t cq_timeouts; + struct wait_queue_head cq_wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + spinlock_t timeout_lock; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned int cq_last_tm_flush; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + spinlock_t completion_lock; + struct list_head io_buffers_comp; + struct list_head cq_overflow_list; + struct hlist_head waitid_list; + struct hlist_head futex_list; + struct io_alloc_cache futex_cache; + const struct cred *sq_creds; + struct io_sq_data *sq_data; + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + unsigned int file_alloc_start; + unsigned int file_alloc_end; + struct list_head io_buffers_cache; + struct wait_queue_head poll_wq; + struct io_restriction restrictions; + u32 pers_next; + struct xarray personalities; + struct io_wq_hash *hash_map; + struct user_struct *user; + struct mm_struct *mm_account; + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + u32 iowq_limits[2]; + struct callback_head poll_wq_task_work; + struct list_head defer_list; + struct io_alloc_cache msg_cache; + spinlock_t msg_lock; + struct list_head napi_list; + spinlock_t napi_lock; + long: 32; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; + u8 napi_track_mode; + struct hlist_head napi_ht[16]; + unsigned int evfd_last_cq_tail; + struct mutex resize_lock; + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_mapped_region param_region; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct io_uring { + u32 head; + u32 tail; +}; + +struct io_rings { + struct io_uring sq; + struct io_uring cq; + u32 sq_ring_mask; + u32 cq_ring_mask; + u32 sq_ring_entries; + u32 cq_ring_entries; + u32 sq_dropped; + atomic_t sq_flags; + u32 cq_flags; + u32 cq_overflow; + long: 32; + long: 32; + long: 32; + long: 32; + struct io_uring_cqe cqes[0]; +}; + +struct io_cmd_data { + struct file *file; + __u8 data[56]; +}; + +typedef u64 io_req_flags_t; + +struct io_cqe { + __u64 user_data; + __s32 res; + union { + __u32 flags; + int fd; + }; +}; + +struct io_tw_state; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *, struct io_tw_state *); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_buffer; + +struct io_buffer_list; + +struct async_poll; + +struct io_kiocb { + union { + struct file *file; + struct io_cmd_data cmd; + }; + u8 opcode; + u8 iopoll_completed; + u16 buf_index; + unsigned int nr_tw; + long: 32; + io_req_flags_t flags; + struct io_cqe cqe; + struct io_ring_ctx *ctx; + struct io_uring_task *tctx; + union { + struct io_buffer *kbuf; + struct io_buffer_list *buf_list; + struct io_rsrc_node *buf_node; + }; + union { + struct io_wq_work_node comp_list; + __poll_t apoll_events; + }; + struct io_rsrc_node *file_node; + atomic_t refs; + bool cancel_seq_set; + struct io_task_work io_task_work; + long: 32; + union { + struct hlist_node hash_node; + u64 iopoll_start; + }; + struct async_poll *apoll; + void *async_data; + atomic_t poll_refs; + struct io_kiocb *link; + const struct cred *creds; + struct io_wq_work work; + struct { + u64 extra1; + u64 extra2; + } big_cqe; +}; + +struct io_wq_hash { + refcount_t refs; + long unsigned int map; + struct wait_queue_head wait; +}; + +struct io_tw_state {}; + +enum { + REQ_F_FIXED_FILE = 1ULL, + REQ_F_IO_DRAIN = 2ULL, + REQ_F_LINK = 4ULL, + REQ_F_HARDLINK = 8ULL, + REQ_F_FORCE_ASYNC = 16ULL, + REQ_F_BUFFER_SELECT = 32ULL, + REQ_F_CQE_SKIP = 64ULL, + REQ_F_FAIL = 256ULL, + REQ_F_INFLIGHT = 512ULL, + REQ_F_CUR_POS = 1024ULL, + REQ_F_NOWAIT = 2048ULL, + REQ_F_LINK_TIMEOUT = 4096ULL, + REQ_F_NEED_CLEANUP = 8192ULL, + REQ_F_POLLED = 16384ULL, + REQ_F_IOPOLL_STATE = 32768ULL, + REQ_F_BUFFER_SELECTED = 65536ULL, + REQ_F_BUFFER_RING = 131072ULL, + REQ_F_REISSUE = 262144ULL, + REQ_F_SUPPORT_NOWAIT = 268435456ULL, + REQ_F_ISREG = 536870912ULL, + REQ_F_CREDS = 524288ULL, + REQ_F_REFCOUNT = 1048576ULL, + REQ_F_ARM_LTIMEOUT = 2097152ULL, + REQ_F_ASYNC_DATA = 4194304ULL, + REQ_F_SKIP_LINK_CQES = 8388608ULL, + REQ_F_SINGLE_POLL = 16777216ULL, + REQ_F_DOUBLE_POLL = 33554432ULL, + REQ_F_APOLL_MULTISHOT = 67108864ULL, + REQ_F_CLEAR_POLLIN = 134217728ULL, + REQ_F_POLL_NO_LAZY = 1073741824ULL, + REQ_F_CAN_POLL = 2147483648ULL, + REQ_F_BL_EMPTY = 4294967296ULL, + REQ_F_BL_NO_RECYCLE = 8589934592ULL, + REQ_F_BUFFERS_COMMIT = 17179869184ULL, + REQ_F_BUF_NODE = 34359738368ULL, +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __u32 len; + __u16 bid; + __u16 bgid; +}; + +struct io_buffer_list { + union { + struct list_head buf_list; + struct { + struct page **buf_pages; + struct io_uring_buf_ring *buf_ring; + }; + struct callback_head rcu; + }; + __u16 bgid; + __u16 buf_nr_pages; + __u16 nr_entries; + __u16 head; + __u16 mask; + __u16 flags; + atomic_t refs; +}; + +struct io_mapped_ubuf { + u64 ubuf; + unsigned int len; + unsigned int nr_bvecs; + unsigned int folio_shift; + refcount_t refs; + long unsigned int acct_pages; + struct bio_vec bvec[0]; + long: 32; +}; + +enum { + IOU_OK = 0, + IOU_ISSUE_SKIP_COMPLETE = -529, + IOU_REQUEUE = -3072, + IOU_STOP_MULTISHOT = -125, +}; + +enum { + IOBL_BUF_RING = 1, + IOBL_MMAP = 2, + IOBL_INC = 4, +}; + +enum { + KBUF_MODE_EXPAND = 1, + KBUF_MODE_FREE = 2, +}; + +struct buf_sel_arg { + struct iovec *iovs; + size_t out_len; + size_t max_len; + short unsigned int nr_iovs; + short unsigned int mode; +}; + +struct io_provide_buf { + struct file *file; + long: 32; + __u64 addr; + __u32 len; + __u32 bgid; + __u32 nbufs; + __u16 bid; +}; + +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; + +typedef void (*task_work_func_t)(struct callback_head *); + +enum io_wq_type { + IO_WQ_BOUND = 0, + IO_WQ_UNBOUND = 1, +}; + +typedef struct io_wq_work *free_work_fn(struct io_wq_work *); + +typedef void io_wq_work_fn(struct io_wq_work *); + +struct io_wq_acct { + unsigned int nr_workers; + unsigned int max_workers; + int index; + atomic_t nr_running; + raw_spinlock_t lock; + struct io_wq_work_list work_list; + long unsigned int flags; +}; + +struct io_wq { + long unsigned int state; + free_work_fn *free_work; + io_wq_work_fn *do_work; + struct io_wq_hash *hash; + atomic_t worker_refs; + struct completion worker_done; + struct hlist_node cpuhp_node; + struct task_struct *task; + struct io_wq_acct acct[2]; + raw_spinlock_t lock; + struct hlist_nulls_head free_list; + struct list_head all_list; + struct wait_queue_entry wait; + struct io_wq_work *hash_tail[32]; + cpumask_var_t cpu_mask; +}; + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_CONCURRENT = 16, + IO_WQ_HASH_SHIFT = 24, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK = 0, + IO_WQ_CANCEL_RUNNING = 1, + IO_WQ_CANCEL_NOTFOUND = 2, +}; + +struct io_wq_data { + struct io_wq_hash *hash; + struct task_struct *task; + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +typedef bool work_cancel_fn(struct io_wq_work *, void *); + +enum { + IO_WORKER_F_UP = 0, + IO_WORKER_F_RUNNING = 1, + IO_WORKER_F_FREE = 2, + IO_WORKER_F_BOUND = 3, +}; + +enum { + IO_WQ_BIT_EXIT = 0, +}; + +enum { + IO_ACCT_STALLED_BIT = 0, +}; + +struct io_worker { + refcount_t ref; + int create_index; + long unsigned int flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wq *wq; + struct io_wq_work *cur_work; + raw_spinlock_t lock; + struct completion ref_done; + long unsigned int create_state; + struct callback_head create_work; + int init_retries; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +enum { + IO_WQ_ACCT_BOUND = 0, + IO_WQ_ACCT_UNBOUND = 1, + IO_WQ_ACCT_NR = 2, +}; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +struct online_data { + unsigned int cpu; + bool online; +}; + +typedef u32 unative_t; + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char *); + ssize_t (*store)(struct device_driver *, const char *, size_t); +}; + +struct iommu_group {}; + +enum { + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, +}; + +enum pci_fixup_pass { + pci_fixup_early = 0, + pci_fixup_header = 1, + pci_fixup_final = 2, + pci_fixup_enable = 3, + pci_fixup_resume = 4, + pci_fixup_suspend = 5, + pci_fixup_resume_early = 6, + pci_fixup_suspend_late = 7, +}; + +enum hk_type { + HK_TYPE_TIMER = 0, + HK_TYPE_RCU = 1, + HK_TYPE_MISC = 2, + HK_TYPE_SCHED = 3, + HK_TYPE_TICK = 4, + HK_TYPE_DOMAIN = 5, + HK_TYPE_WQ = 6, + HK_TYPE_MANAGED_IRQ = 7, + HK_TYPE_KTHREAD = 8, + HK_TYPE_MAX = 9, +}; + +struct pci_dynid { + struct list_head node; + struct pci_device_id id; +}; + +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +struct acpi_device; + +typedef long unsigned int cycles_t; + +struct timer_rand_state { + long unsigned int last_time; + long int last_delta; + long int last_delta2; +}; + +enum chacha_constants { + CHACHA_CONSTANT_EXPA = 1634760805, + CHACHA_CONSTANT_ND_3 = 857760878, + CHACHA_CONSTANT_2_BY = 2036477234, + CHACHA_CONSTANT_TE_K = 1797285236, +}; + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[64]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2s_iv { + BLAKE2S_IV0 = 1779033703, + BLAKE2S_IV1 = 3144134277, + BLAKE2S_IV2 = 1013904242, + BLAKE2S_IV3 = 2773480762, + BLAKE2S_IV4 = 1359893119, + BLAKE2S_IV5 = 2600822924, + BLAKE2S_IV6 = 528734635, + BLAKE2S_IV7 = 1541459225, +}; + +enum { + CRNG_EMPTY = 0, + CRNG_EARLY = 1, + CRNG_READY = 2, +}; + +enum { + CRNG_RESEED_START_INTERVAL = 1000, + CRNG_RESEED_INTERVAL = 60000, +}; + +struct crng { + u8 key[32]; + long unsigned int generation; + local_lock_t lock; +}; + +struct batch_u8 { + u8 entropy[96]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u16 { + u16 entropy[48]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u32 { + u32 entropy[24]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u64 { + u64 entropy[12]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +enum { + POOL_BITS = 256, + POOL_READY_BITS = 256, + POOL_EARLY_BITS = 128, +}; + +struct fast_pool { + long unsigned int pool[4]; + long unsigned int last; + unsigned int count; + struct timer_list mix; +}; + +struct entropy_timer_state { + long unsigned int entropy; + struct timer_list timer; + atomic_t samples; + unsigned int samples_per_bit; +}; + +enum { + NUM_TRIAL_SAMPLES = 8192, + MAX_SAMPLES_PER_BIT = 66, +}; + +enum { + MIX_INFLIGHT = 2147483648, +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +}; + +enum dmi_field { + DMI_NONE = 0, + DMI_BIOS_VENDOR = 1, + DMI_BIOS_VERSION = 2, + DMI_BIOS_DATE = 3, + DMI_BIOS_RELEASE = 4, + DMI_EC_FIRMWARE_RELEASE = 5, + DMI_SYS_VENDOR = 6, + DMI_PRODUCT_NAME = 7, + DMI_PRODUCT_VERSION = 8, + DMI_PRODUCT_SERIAL = 9, + DMI_PRODUCT_UUID = 10, + DMI_PRODUCT_SKU = 11, + DMI_PRODUCT_FAMILY = 12, + DMI_BOARD_VENDOR = 13, + DMI_BOARD_NAME = 14, + DMI_BOARD_VERSION = 15, + DMI_BOARD_SERIAL = 16, + DMI_BOARD_ASSET_TAG = 17, + DMI_CHASSIS_VENDOR = 18, + DMI_CHASSIS_TYPE = 19, + DMI_CHASSIS_VERSION = 20, + DMI_CHASSIS_SERIAL = 21, + DMI_CHASSIS_ASSET_TAG = 22, + DMI_STRING_MAX = 23, + DMI_OEM_STRING = 24, +}; + +struct dmi_strmatch { + unsigned char slot: 7; + unsigned char exact_match: 1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; + +enum { + BLK_TAG_ALLOC_FIFO = 0, + BLK_TAG_ALLOC_RR = 1, + BLK_TAG_ALLOC_MAX = 2, +}; + +enum { + ATA_MAX_DEVICES = 2, + ATA_MAX_PRD = 256, + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_LBA48 = 65535, + ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_TRUSTED = 48, + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_SATA_CAPABILITY = 76, + ATA_ID_SATA_CAPABILITY_2 = 77, + ATA_ID_FEATURE_SUPP = 78, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, + ATA_ID_COMMAND_SET_3 = 119, + ATA_ID_COMMAND_SET_4 = 120, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = 2, + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + ATA_PCI_CTL_OFS = 2, + ATA_PIO0 = 1, + ATA_PIO1 = 3, + ATA_PIO2 = 7, + ATA_PIO3 = 15, + ATA_PIO4 = 31, + ATA_PIO5 = 63, + ATA_PIO6 = 127, + ATA_PIO4_ONLY = 16, + ATA_SWDMA0 = 1, + ATA_SWDMA1 = 3, + ATA_SWDMA2 = 7, + ATA_SWDMA2_ONLY = 4, + ATA_MWDMA0 = 1, + ATA_MWDMA1 = 3, + ATA_MWDMA2 = 7, + ATA_MWDMA3 = 15, + ATA_MWDMA4 = 31, + ATA_MWDMA12_ONLY = 6, + ATA_MWDMA2_ONLY = 4, + ATA_UDMA0 = 1, + ATA_UDMA1 = 3, + ATA_UDMA2 = 7, + ATA_UDMA3 = 15, + ATA_UDMA4 = 31, + ATA_UDMA5 = 63, + ATA_UDMA6 = 127, + ATA_UDMA7 = 255, + ATA_UDMA24_ONLY = 20, + ATA_UDMA_MASK_40C = 7, + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = 2048, + ATA_PRD_EOT = -2147483648, + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = 8, + ATA_DMA_START = 1, + ATA_DMA_INTR = 4, + ATA_DMA_ERR = 2, + ATA_DMA_ACTIVE = 1, + ATA_HOB = 128, + ATA_NIEN = 2, + ATA_LBA = 64, + ATA_DEV1 = 16, + ATA_DEVICE_OBS = 160, + ATA_DEVCTL_OBS = 8, + ATA_BUSY = 128, + ATA_DRDY = 64, + ATA_DF = 32, + ATA_DSC = 16, + ATA_DRQ = 8, + ATA_CORR = 4, + ATA_SENSE = 2, + ATA_ERR = 1, + ATA_SRST = 4, + ATA_ICRC = 128, + ATA_BBK = 128, + ATA_UNC = 64, + ATA_MC = 32, + ATA_IDNF = 16, + ATA_MCR = 8, + ATA_ABORTED = 4, + ATA_TRK0NF = 2, + ATA_AMNF = 1, + ATAPI_LFS = 240, + ATAPI_EOM = 2, + ATAPI_ILI = 1, + ATAPI_IO = 2, + ATAPI_COD = 1, + ATA_REG_DATA = 0, + ATA_REG_ERR = 1, + ATA_REG_NSECT = 2, + ATA_REG_LBAL = 3, + ATA_REG_LBAM = 4, + ATA_REG_LBAH = 5, + ATA_REG_DEVICE = 6, + ATA_REG_STATUS = 7, + ATA_REG_FEATURE = 1, + ATA_REG_CMD = 7, + ATA_REG_BYTEL = 4, + ATA_REG_BYTEH = 5, + ATA_REG_DEVSEL = 6, + ATA_REG_IRQ = 2, + ATA_CMD_DEV_RESET = 8, + ATA_CMD_CHK_POWER = 229, + ATA_CMD_STANDBY = 226, + ATA_CMD_IDLE = 227, + ATA_CMD_EDD = 144, + ATA_CMD_DOWNLOAD_MICRO = 146, + ATA_CMD_DOWNLOAD_MICRO_DMA = 147, + ATA_CMD_NOP = 0, + ATA_CMD_FLUSH = 231, + ATA_CMD_FLUSH_EXT = 234, + ATA_CMD_ID_ATA = 236, + ATA_CMD_ID_ATAPI = 161, + ATA_CMD_SERVICE = 162, + ATA_CMD_READ = 200, + ATA_CMD_READ_EXT = 37, + ATA_CMD_READ_QUEUED = 38, + ATA_CMD_READ_STREAM_EXT = 43, + ATA_CMD_READ_STREAM_DMA_EXT = 42, + ATA_CMD_WRITE = 202, + ATA_CMD_WRITE_EXT = 53, + ATA_CMD_WRITE_QUEUED = 54, + ATA_CMD_WRITE_STREAM_EXT = 59, + ATA_CMD_WRITE_STREAM_DMA_EXT = 58, + ATA_CMD_WRITE_FUA_EXT = 61, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 62, + ATA_CMD_FPDMA_READ = 96, + ATA_CMD_FPDMA_WRITE = 97, + ATA_CMD_NCQ_NON_DATA = 99, + ATA_CMD_FPDMA_SEND = 100, + ATA_CMD_FPDMA_RECV = 101, + ATA_CMD_PIO_READ = 32, + ATA_CMD_PIO_READ_EXT = 36, + ATA_CMD_PIO_WRITE = 48, + ATA_CMD_PIO_WRITE_EXT = 52, + ATA_CMD_READ_MULTI = 196, + ATA_CMD_READ_MULTI_EXT = 41, + ATA_CMD_WRITE_MULTI = 197, + ATA_CMD_WRITE_MULTI_EXT = 57, + ATA_CMD_WRITE_MULTI_FUA_EXT = 206, + ATA_CMD_SET_FEATURES = 239, + ATA_CMD_SET_MULTI = 198, + ATA_CMD_PACKET = 160, + ATA_CMD_VERIFY = 64, + ATA_CMD_VERIFY_EXT = 66, + ATA_CMD_WRITE_UNCORR_EXT = 69, + ATA_CMD_STANDBYNOW1 = 224, + ATA_CMD_IDLEIMMEDIATE = 225, + ATA_CMD_SLEEP = 230, + ATA_CMD_INIT_DEV_PARAMS = 145, + ATA_CMD_READ_NATIVE_MAX = 248, + ATA_CMD_READ_NATIVE_MAX_EXT = 39, + ATA_CMD_SET_MAX = 249, + ATA_CMD_SET_MAX_EXT = 55, + ATA_CMD_READ_LOG_EXT = 47, + ATA_CMD_WRITE_LOG_EXT = 63, + ATA_CMD_READ_LOG_DMA_EXT = 71, + ATA_CMD_WRITE_LOG_DMA_EXT = 87, + ATA_CMD_TRUSTED_NONDATA = 91, + ATA_CMD_TRUSTED_RCV = 92, + ATA_CMD_TRUSTED_RCV_DMA = 93, + ATA_CMD_TRUSTED_SND = 94, + ATA_CMD_TRUSTED_SND_DMA = 95, + ATA_CMD_PMP_READ = 228, + ATA_CMD_PMP_READ_DMA = 233, + ATA_CMD_PMP_WRITE = 232, + ATA_CMD_PMP_WRITE_DMA = 235, + ATA_CMD_CONF_OVERLAY = 177, + ATA_CMD_SEC_SET_PASS = 241, + ATA_CMD_SEC_UNLOCK = 242, + ATA_CMD_SEC_ERASE_PREP = 243, + ATA_CMD_SEC_ERASE_UNIT = 244, + ATA_CMD_SEC_FREEZE_LOCK = 245, + ATA_CMD_SEC_DISABLE_PASS = 246, + ATA_CMD_CONFIG_STREAM = 81, + ATA_CMD_SMART = 176, + ATA_CMD_MEDIA_LOCK = 222, + ATA_CMD_MEDIA_UNLOCK = 223, + ATA_CMD_DSM = 6, + ATA_CMD_CHK_MED_CRD_TYP = 209, + ATA_CMD_CFA_REQ_EXT_ERR = 3, + ATA_CMD_CFA_WRITE_NE = 56, + ATA_CMD_CFA_TRANS_SECT = 135, + ATA_CMD_CFA_ERASE = 192, + ATA_CMD_CFA_WRITE_MULT_NE = 205, + ATA_CMD_REQ_SENSE_DATA = 11, + ATA_CMD_SANITIZE_DEVICE = 180, + ATA_CMD_ZAC_MGMT_IN = 74, + ATA_CMD_ZAC_MGMT_OUT = 159, + ATA_CMD_RESTORE = 16, + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 1, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 2, + ATA_SUBCMD_FPDMA_SEND_DSM = 0, + ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 2, + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 5, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 6, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 7, + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0, + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 1, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 2, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 3, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 4, + ATA_LOG_DIRECTORY = 0, + ATA_LOG_SATA_NCQ = 16, + ATA_LOG_NCQ_NON_DATA = 18, + ATA_LOG_NCQ_SEND_RECV = 19, + ATA_LOG_CDL = 24, + ATA_LOG_CDL_SIZE = 512, + ATA_LOG_IDENTIFY_DEVICE = 48, + ATA_LOG_SENSE_NCQ = 15, + ATA_LOG_SENSE_NCQ_SIZE = 1024, + ATA_LOG_CONCURRENT_POSITIONING_RANGES = 71, + ATA_LOG_SUPPORTED_CAPABILITIES = 3, + ATA_LOG_CURRENT_SETTINGS = 4, + ATA_LOG_SECURITY = 6, + ATA_LOG_SATA_SETTINGS = 8, + ATA_LOG_ZONED_INFORMATION = 9, + ATA_LOG_DEVSLP_OFFSET = 48, + ATA_LOG_DEVSLP_SIZE = 8, + ATA_LOG_DEVSLP_MDAT = 0, + ATA_LOG_DEVSLP_MDAT_MASK = 31, + ATA_LOG_DEVSLP_DETO = 1, + ATA_LOG_DEVSLP_VALID = 7, + ATA_LOG_DEVSLP_VALID_MASK = 128, + ATA_LOG_NCQ_PRIO_OFFSET = 9, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = 1, + ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 4, + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = 1, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 8, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 12, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 16, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = 2, + ATA_LOG_NCQ_SEND_RECV_SIZE = 20, + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = 1, + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = 2, + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = 4, + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = 8, + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = 16, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 28, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = 1, + ATA_LOG_NCQ_NON_DATA_SIZE = 64, + ATA_CMD_READ_LONG = 34, + ATA_CMD_READ_LONG_ONCE = 35, + ATA_CMD_WRITE_LONG = 50, + ATA_CMD_WRITE_LONG_ONCE = 51, + SETFEATURES_XFER = 3, + XFER_UDMA_7 = 71, + XFER_UDMA_6 = 70, + XFER_UDMA_5 = 69, + XFER_UDMA_4 = 68, + XFER_UDMA_3 = 67, + XFER_UDMA_2 = 66, + XFER_UDMA_1 = 65, + XFER_UDMA_0 = 64, + XFER_MW_DMA_4 = 36, + XFER_MW_DMA_3 = 35, + XFER_MW_DMA_2 = 34, + XFER_MW_DMA_1 = 33, + XFER_MW_DMA_0 = 32, + XFER_SW_DMA_2 = 18, + XFER_SW_DMA_1 = 17, + XFER_SW_DMA_0 = 16, + XFER_PIO_6 = 14, + XFER_PIO_5 = 13, + XFER_PIO_4 = 12, + XFER_PIO_3 = 11, + XFER_PIO_2 = 10, + XFER_PIO_1 = 9, + XFER_PIO_0 = 8, + XFER_PIO_SLOW = 0, + SETFEATURES_WC_ON = 2, + SETFEATURES_WC_OFF = 130, + SETFEATURES_RA_ON = 170, + SETFEATURES_RA_OFF = 85, + SETFEATURES_AAM_ON = 66, + SETFEATURES_AAM_OFF = 194, + SETFEATURES_SPINUP = 7, + SETFEATURES_SPINUP_TIMEOUT = 30000, + SETFEATURES_SATA_ENABLE = 16, + SETFEATURES_SATA_DISABLE = 144, + SETFEATURES_CDL = 13, + SATA_FPDMA_OFFSET = 1, + SATA_FPDMA_AA = 2, + SATA_DIPM = 3, + SATA_FPDMA_IN_ORDER = 4, + SATA_AN = 5, + SATA_SSP = 6, + SATA_DEVSLP = 9, + SETFEATURE_SENSE_DATA = 195, + SETFEATURE_SENSE_DATA_SUCC_NCQ = 196, + ATA_SET_MAX_ADDR = 0, + ATA_SET_MAX_PASSWD = 1, + ATA_SET_MAX_LOCK = 2, + ATA_SET_MAX_UNLOCK = 3, + ATA_SET_MAX_FREEZE_LOCK = 4, + ATA_SET_MAX_PASSWD_DMA = 5, + ATA_SET_MAX_UNLOCK_DMA = 6, + ATA_DCO_RESTORE = 192, + ATA_DCO_FREEZE_LOCK = 193, + ATA_DCO_IDENTIFY = 194, + ATA_DCO_SET = 195, + ATA_SMART_ENABLE = 216, + ATA_SMART_READ_VALUES = 208, + ATA_SMART_READ_THRESHOLDS = 209, + ATA_DSM_TRIM = 1, + ATA_SMART_LBAM_PASS = 79, + ATA_SMART_LBAH_PASS = 194, + ATAPI_PKT_DMA = 1, + ATAPI_DMADIR = 4, + ATAPI_CDB_LEN = 16, + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + SATA_PMP_FEAT_BIST = 1, + SATA_PMP_FEAT_PMREQ = 2, + SATA_PMP_FEAT_DYNSSC = 4, + SATA_PMP_FEAT_NOTIFY = 8, + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, + ATA_CBL_PATA_UNK = 4, + ATA_CBL_PATA_IGN = 5, + ATA_CBL_SATA = 6, + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + SERR_DATA_RECOVERED = 1, + SERR_COMM_RECOVERED = 2, + SERR_DATA = 256, + SERR_PERSISTENT = 512, + SERR_PROTOCOL = 1024, + SERR_INTERNAL = 2048, + SERR_PHYRDY_CHG = 65536, + SERR_PHY_INT_ERR = 131072, + SERR_COMM_WAKE = 262144, + SERR_10B_8B_ERR = 524288, + SERR_DISPARITY = 1048576, + SERR_CRC = 2097152, + SERR_HANDSHAKE = 4194304, + SERR_LINK_SEQ_ERR = 8388608, + SERR_TRANS_ST_ERROR = 16777216, + SERR_UNRECOG_FIS = 33554432, + SERR_DEV_XCHG = 67108864, +}; + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +typedef u64 async_cookie_t; + +enum ata_quirks { + __ATA_QUIRK_DIAGNOSTIC = 0, + __ATA_QUIRK_NODMA = 1, + __ATA_QUIRK_NONCQ = 2, + __ATA_QUIRK_MAX_SEC_128 = 3, + __ATA_QUIRK_BROKEN_HPA = 4, + __ATA_QUIRK_DISABLE = 5, + __ATA_QUIRK_HPA_SIZE = 6, + __ATA_QUIRK_IVB = 7, + __ATA_QUIRK_STUCK_ERR = 8, + __ATA_QUIRK_BRIDGE_OK = 9, + __ATA_QUIRK_ATAPI_MOD16_DMA = 10, + __ATA_QUIRK_FIRMWARE_WARN = 11, + __ATA_QUIRK_1_5_GBPS = 12, + __ATA_QUIRK_NOSETXFER = 13, + __ATA_QUIRK_BROKEN_FPDMA_AA = 14, + __ATA_QUIRK_DUMP_ID = 15, + __ATA_QUIRK_MAX_SEC_LBA48 = 16, + __ATA_QUIRK_ATAPI_DMADIR = 17, + __ATA_QUIRK_NO_NCQ_TRIM = 18, + __ATA_QUIRK_NOLPM = 19, + __ATA_QUIRK_WD_BROKEN_LPM = 20, + __ATA_QUIRK_ZERO_AFTER_TRIM = 21, + __ATA_QUIRK_NO_DMA_LOG = 22, + __ATA_QUIRK_NOTRIM = 23, + __ATA_QUIRK_MAX_SEC_1024 = 24, + __ATA_QUIRK_MAX_TRIM_128M = 25, + __ATA_QUIRK_NO_NCQ_ON_ATI = 26, + __ATA_QUIRK_NO_ID_DEV_LOG = 27, + __ATA_QUIRK_NO_LOG_DIR = 28, + __ATA_QUIRK_NO_FUA = 29, + __ATA_QUIRK_MAX = 30, +}; + +enum { + LIBATA_MAX_PRD = 128, + LIBATA_DUMB_MAX_PRD = 64, + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = 32, + ATA_SHORT_PAUSE = 16, + ATAPI_MAX_DRAIN = 16384, + ATA_ALL_DEVICES = 3, + ATA_SHT_EMULATED = 1, + ATA_SHT_THIS_ID = -1, + ATA_TFLAG_LBA48 = 1, + ATA_TFLAG_ISADDR = 2, + ATA_TFLAG_DEVICE = 4, + ATA_TFLAG_WRITE = 8, + ATA_TFLAG_LBA = 16, + ATA_TFLAG_FUA = 32, + ATA_TFLAG_POLLING = 64, + ATA_DFLAG_LBA = 1, + ATA_DFLAG_LBA48 = 2, + ATA_DFLAG_CDB_INTR = 4, + ATA_DFLAG_NCQ = 8, + ATA_DFLAG_FLUSH_EXT = 16, + ATA_DFLAG_ACPI_PENDING = 32, + ATA_DFLAG_ACPI_FAILED = 64, + ATA_DFLAG_AN = 128, + ATA_DFLAG_TRUSTED = 256, + ATA_DFLAG_FUA = 512, + ATA_DFLAG_DMADIR = 1024, + ATA_DFLAG_NCQ_SEND_RECV = 2048, + ATA_DFLAG_NCQ_PRIO = 4096, + ATA_DFLAG_CDL = 8192, + ATA_DFLAG_CFG_MASK = 16383, + ATA_DFLAG_PIO = 16384, + ATA_DFLAG_NCQ_OFF = 32768, + ATA_DFLAG_SLEEPING = 65536, + ATA_DFLAG_DUBIOUS_XFER = 131072, + ATA_DFLAG_NO_UNLOAD = 262144, + ATA_DFLAG_UNLOCK_HPA = 524288, + ATA_DFLAG_INIT_MASK = 1048575, + ATA_DFLAG_NCQ_PRIO_ENABLED = 1048576, + ATA_DFLAG_CDL_ENABLED = 2097152, + ATA_DFLAG_RESUMING = 4194304, + ATA_DFLAG_DETACH = 16777216, + ATA_DFLAG_DETACHED = 33554432, + ATA_DFLAG_DA = 67108864, + ATA_DFLAG_DEVSLP = 134217728, + ATA_DFLAG_ACPI_DISABLED = 268435456, + ATA_DFLAG_D_SENSE = 536870912, + ATA_DFLAG_ZAC = 1073741824, + ATA_DFLAG_FEATURES_MASK = 201341696, + ATA_DEV_UNKNOWN = 0, + ATA_DEV_ATA = 1, + ATA_DEV_ATA_UNSUP = 2, + ATA_DEV_ATAPI = 3, + ATA_DEV_ATAPI_UNSUP = 4, + ATA_DEV_PMP = 5, + ATA_DEV_PMP_UNSUP = 6, + ATA_DEV_SEMB = 7, + ATA_DEV_SEMB_UNSUP = 8, + ATA_DEV_ZAC = 9, + ATA_DEV_ZAC_UNSUP = 10, + ATA_DEV_NONE = 11, + ATA_LFLAG_NO_HRST = 2, + ATA_LFLAG_NO_SRST = 4, + ATA_LFLAG_ASSUME_ATA = 8, + ATA_LFLAG_ASSUME_SEMB = 16, + ATA_LFLAG_ASSUME_CLASS = 24, + ATA_LFLAG_NO_RETRY = 32, + ATA_LFLAG_DISABLED = 64, + ATA_LFLAG_SW_ACTIVITY = 128, + ATA_LFLAG_NO_LPM = 256, + ATA_LFLAG_RST_ONCE = 512, + ATA_LFLAG_CHANGED = 1024, + ATA_LFLAG_NO_DEBOUNCE_DELAY = 2048, + ATA_FLAG_SLAVE_POSS = 1, + ATA_FLAG_SATA = 2, + ATA_FLAG_NO_LPM = 4, + ATA_FLAG_NO_LOG_PAGE = 32, + ATA_FLAG_NO_ATAPI = 64, + ATA_FLAG_PIO_DMA = 128, + ATA_FLAG_PIO_LBA48 = 256, + ATA_FLAG_PIO_POLLING = 512, + ATA_FLAG_NCQ = 1024, + ATA_FLAG_NO_POWEROFF_SPINDOWN = 2048, + ATA_FLAG_NO_HIBERNATE_SPINDOWN = 4096, + ATA_FLAG_DEBUGMSG = 8192, + ATA_FLAG_FPDMA_AA = 16384, + ATA_FLAG_IGN_SIMPLEX = 32768, + ATA_FLAG_NO_IORDY = 65536, + ATA_FLAG_ACPI_SATA = 131072, + ATA_FLAG_AN = 262144, + ATA_FLAG_PMP = 524288, + ATA_FLAG_FPDMA_AUX = 1048576, + ATA_FLAG_EM = 2097152, + ATA_FLAG_SW_ACTIVITY = 4194304, + ATA_FLAG_NO_DIPM = 8388608, + ATA_FLAG_SAS_HOST = 16777216, + ATA_PFLAG_EH_PENDING = 1, + ATA_PFLAG_EH_IN_PROGRESS = 2, + ATA_PFLAG_FROZEN = 4, + ATA_PFLAG_RECOVERED = 8, + ATA_PFLAG_LOADING = 16, + ATA_PFLAG_SCSI_HOTPLUG = 64, + ATA_PFLAG_INITIALIZING = 128, + ATA_PFLAG_RESETTING = 256, + ATA_PFLAG_UNLOADING = 512, + ATA_PFLAG_UNLOADED = 1024, + ATA_PFLAG_RESUMING = 65536, + ATA_PFLAG_SUSPENDED = 131072, + ATA_PFLAG_PM_PENDING = 262144, + ATA_PFLAG_INIT_GTM_VALID = 524288, + ATA_PFLAG_PIO32 = 1048576, + ATA_PFLAG_PIO32CHANGE = 2097152, + ATA_PFLAG_EXTERNAL = 4194304, + ATA_QCFLAG_ACTIVE = 1, + ATA_QCFLAG_DMAMAP = 2, + ATA_QCFLAG_RTF_FILLED = 4, + ATA_QCFLAG_IO = 8, + ATA_QCFLAG_RESULT_TF = 16, + ATA_QCFLAG_CLEAR_EXCL = 32, + ATA_QCFLAG_QUIET = 64, + ATA_QCFLAG_RETRY = 128, + ATA_QCFLAG_HAS_CDL = 256, + ATA_QCFLAG_EH = 65536, + ATA_QCFLAG_SENSE_VALID = 131072, + ATA_QCFLAG_EH_SCHEDULED = 262144, + ATA_QCFLAG_EH_SUCCESS_CMD = 524288, + ATA_HOST_SIMPLEX = 1, + ATA_HOST_STARTED = 2, + ATA_HOST_PARALLEL_SCAN = 4, + ATA_HOST_IGNORE_ATA = 8, + ATA_HOST_NO_PART = 16, + ATA_HOST_NO_SSC = 32, + ATA_HOST_NO_DEVSLP = 64, + ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, + ATA_TMOUT_FF_WAIT_LONG = 2000, + ATA_TMOUT_FF_WAIT = 800, + ATA_WAIT_AFTER_RESET = 150, + ATA_TMOUT_PMP_SRST_WAIT = 10000, + ATA_TMOUT_SPURIOUS_PHY = 10000, + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = 7, + ATA_SHIFT_UDMA = 12, + ATA_SHIFT_PRIO = 6, + ATA_PRIO_HIGH = 2, + ATA_DMA_PAD_SZ = 4, + ATA_ERING_SIZE = 32, + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + ATA_EH_DESC_LEN = 80, + ATA_EH_REVALIDATE = 1, + ATA_EH_SOFTRESET = 2, + ATA_EH_HARDRESET = 4, + ATA_EH_RESET = 6, + ATA_EH_ENABLE_LINK = 8, + ATA_EH_PARK = 32, + ATA_EH_GET_SUCCESS_SENSE = 64, + ATA_EH_SET_ACTIVE = 128, + ATA_EH_PERDEV_MASK = 225, + ATA_EH_ALL_ACTIONS = 15, + ATA_EHI_HOTPLUGGED = 1, + ATA_EHI_NO_AUTOPSY = 4, + ATA_EHI_QUIET = 8, + ATA_EHI_NO_RECOVERY = 16, + ATA_EHI_DID_SOFTRESET = 65536, + ATA_EHI_DID_HARDRESET = 131072, + ATA_EHI_PRINTINFO = 262144, + ATA_EHI_SETMODE = 524288, + ATA_EHI_POST_SETMODE = 1048576, + ATA_EHI_DID_PRINT_QUIRKS = 2097152, + ATA_EHI_DID_RESET = 196608, + ATA_EHI_TO_SLAVE_MASK = 12, + ATA_EH_MAX_TRIES = 5, + ATA_LINK_RESUME_TRIES = 5, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + SATA_PMP_RW_TIMEOUT = 3000, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, + ATA_QUIRK_DIAGNOSTIC = 1, + ATA_QUIRK_NODMA = 2, + ATA_QUIRK_NONCQ = 4, + ATA_QUIRK_MAX_SEC_128 = 8, + ATA_QUIRK_BROKEN_HPA = 16, + ATA_QUIRK_DISABLE = 32, + ATA_QUIRK_HPA_SIZE = 64, + ATA_QUIRK_IVB = 128, + ATA_QUIRK_STUCK_ERR = 256, + ATA_QUIRK_BRIDGE_OK = 512, + ATA_QUIRK_ATAPI_MOD16_DMA = 1024, + ATA_QUIRK_FIRMWARE_WARN = 2048, + ATA_QUIRK_1_5_GBPS = 4096, + ATA_QUIRK_NOSETXFER = 8192, + ATA_QUIRK_BROKEN_FPDMA_AA = 16384, + ATA_QUIRK_DUMP_ID = 32768, + ATA_QUIRK_MAX_SEC_LBA48 = 65536, + ATA_QUIRK_ATAPI_DMADIR = 131072, + ATA_QUIRK_NO_NCQ_TRIM = 262144, + ATA_QUIRK_NOLPM = 524288, + ATA_QUIRK_WD_BROKEN_LPM = 1048576, + ATA_QUIRK_ZERO_AFTER_TRIM = 2097152, + ATA_QUIRK_NO_DMA_LOG = 4194304, + ATA_QUIRK_NOTRIM = 8388608, + ATA_QUIRK_MAX_SEC_1024 = 16777216, + ATA_QUIRK_MAX_TRIM_128M = 33554432, + ATA_QUIRK_NO_NCQ_ON_ATI = 67108864, + ATA_QUIRK_NO_ID_DEV_LOG = 134217728, + ATA_QUIRK_NO_LOG_DIR = 268435456, + ATA_QUIRK_NO_FUA = 536870912, + ATA_DMA_MASK_ATA = 1, + ATA_DMA_MASK_ATAPI = 2, + ATA_DMA_MASK_CFA = 4, + ATAPI_READ = 0, + ATAPI_WRITE = 1, + ATAPI_READ_CD = 2, + ATAPI_PASS_THRU = 3, + ATAPI_MISC = 4, + ATA_TIMING_SETUP = 1, + ATA_TIMING_ACT8B = 2, + ATA_TIMING_REC8B = 4, + ATA_TIMING_CYC8B = 8, + ATA_TIMING_8BIT = 14, + ATA_TIMING_ACTIVE = 16, + ATA_TIMING_RECOVER = 32, + ATA_TIMING_DMACK_HOLD = 64, + ATA_TIMING_CYCLE = 128, + ATA_TIMING_UDMA = 256, + ATA_TIMING_ALL = 511, + ATA_ACPI_FILTER_SETXFER = 1, + ATA_ACPI_FILTER_LOCK = 2, + ATA_ACPI_FILTER_DIPM = 4, + ATA_ACPI_FILTER_FPDMA_OFFSET = 8, + ATA_ACPI_FILTER_FPDMA_AA = 16, + ATA_ACPI_FILTER_DEFAULT = 7, +}; + +enum ata_completion_errors { + AC_ERR_OK = 0, + AC_ERR_DEV = 1, + AC_ERR_HSM = 2, + AC_ERR_TIMEOUT = 4, + AC_ERR_MEDIA = 8, + AC_ERR_ATA_BUS = 16, + AC_ERR_HOST_BUS = 32, + AC_ERR_SYSTEM = 64, + AC_ERR_INVALID = 128, + AC_ERR_OTHER = 256, + AC_ERR_NODEV_HINT = 512, + AC_ERR_NCQ = 1024, +}; + +enum ata_lpm_policy { + ATA_LPM_UNKNOWN = 0, + ATA_LPM_MAX_POWER = 1, + ATA_LPM_MED_POWER = 2, + ATA_LPM_MED_POWER_WITH_DIPM = 3, + ATA_LPM_MIN_POWER_WITH_PARTIAL = 4, + ATA_LPM_MIN_POWER = 5, +}; + +struct ata_queued_cmd; + +typedef void (*ata_qc_cb_t)(struct ata_queued_cmd *); + +struct ata_taskfile { + long unsigned int flags; + u8 protocol; + u8 ctl; + u8 hob_feature; + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + union { + u8 error; + u8 feature; + }; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + union { + u8 status; + u8 command; + }; + u32 auxiliary; +}; + +struct ata_port; + +struct ata_device; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + struct ata_taskfile tf; + u8 cdb[16]; + long unsigned int flags; + unsigned int tag; + unsigned int hw_tag; + unsigned int n_elem; + unsigned int orig_n_elem; + int dma_dir; + unsigned int sect_size; + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + struct scatterlist sgent; + struct scatterlist *sg; + struct scatterlist *cursg; + unsigned int cursg_ofs; + unsigned int err_mask; + struct ata_taskfile result_tf; + ata_qc_cb_t complete_fn; + void *private_data; + void *lldd_task; +}; + +struct ata_link; + +typedef int (*ata_prereset_fn_t)(struct ata_link *, long unsigned int); + +struct ata_eh_info { + struct ata_device *dev; + u32 serror; + unsigned int err_mask; + unsigned int action; + unsigned int dev_action[2]; + unsigned int flags; + unsigned int probe_mask; + char desc[80]; + int desc_len; +}; + +struct ata_eh_context { + struct ata_eh_info i; + int tries[2]; + int cmd_timeout_idx[16]; + unsigned int classes[2]; + unsigned int did_probe_mask; + unsigned int unloaded_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[2]; + long unsigned int last_reset; +}; + +struct ata_ering_entry { + unsigned int eflags; + unsigned int err_mask; + u64 timestamp; +}; + +struct ata_ering { + int cursor; + long: 32; + struct ata_ering_entry ring[32]; +}; + +struct ata_cpr_log; + +struct ata_cdl; + +struct ata_device { + struct ata_link *link; + unsigned int devno; + unsigned int quirks; + long unsigned int flags; + struct scsi_device *sdev; + void *private_data; + struct device tdev; + u64 n_sectors; + u64 n_native_sectors; + unsigned int class; + long unsigned int unpark_deadline; + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; + unsigned int multi_count; + unsigned int max_sectors; + unsigned int cdb_len; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + u16 cylinders; + u16 heads; + u16 sectors; + long: 32; + long: 32; + long: 32; + long: 32; + union { + u16 id[256]; + u32 gscr[128]; + }; + u8 devslp_timing[8]; + u8 ncq_send_recv_cmds[20]; + u8 ncq_non_data_cmds[64]; + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; + struct ata_cpr_log *cpr_log; + struct ata_cdl *cdl; + int spdn_cnt; + struct ata_ering ering; + u8 sector_buf[512]; +}; + +struct ata_link { + struct ata_port *ap; + int pmp; + struct device tdev; + unsigned int active_tag; + u32 sactive; + unsigned int flags; + u32 saved_scontrol; + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; + enum ata_lpm_policy lpm_policy; + struct ata_eh_info eh_info; + struct ata_eh_context eh_context; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ata_device device[2]; + long unsigned int last_lpm_change; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef int (*ata_reset_fn_t)(struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*ata_postreset_fn_t)(struct ata_link *, unsigned int *); + +enum sw_activity { + OFF = 0, + BLINK_ON = 1, + BLINK_OFF = 2, +}; + +struct ata_ioports { + void *cmd_addr; + void *data_addr; + void *error_addr; + void *feature_addr; + void *nsect_addr; + void *lbal_addr; + void *lbam_addr; + void *lbah_addr; + void *device_addr; + void *status_addr; + void *command_addr; + void *altstatus_addr; + void *ctl_addr; + void *bmdma_addr; + void *scr_addr; +}; + +struct ata_port_operations; + +struct ata_host { + spinlock_t lock; + struct device *dev; + void * const *iomap; + unsigned int n_ports; + unsigned int n_tags; + void *private_data; + struct ata_port_operations *ops; + long unsigned int flags; + struct kref kref; + struct mutex eh_mutex; + struct task_struct *eh_owner; + struct ata_port *simplex_claimed; + struct ata_port *ports[0]; +}; + +struct ata_port_operations { + int (*qc_defer)(struct ata_queued_cmd *); + int (*check_atapi_dma)(struct ata_queued_cmd *); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *); + unsigned int (*qc_issue)(struct ata_queued_cmd *); + void (*qc_fill_rtf)(struct ata_queued_cmd *); + void (*qc_ncq_fill_rtf)(struct ata_port *, u64); + int (*cable_detect)(struct ata_port *); + unsigned int (*mode_filter)(struct ata_device *, unsigned int); + void (*set_piomode)(struct ata_port *, struct ata_device *); + void (*set_dmamode)(struct ata_port *, struct ata_device *); + int (*set_mode)(struct ata_link *, struct ata_device **); + unsigned int (*read_id)(struct ata_device *, struct ata_taskfile *, __le16 *); + void (*dev_config)(struct ata_device *); + void (*freeze)(struct ata_port *); + void (*thaw)(struct ata_port *); + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; + ata_prereset_fn_t pmp_prereset; + ata_reset_fn_t pmp_softreset; + ata_reset_fn_t pmp_hardreset; + ata_postreset_fn_t pmp_postreset; + void (*error_handler)(struct ata_port *); + void (*lost_interrupt)(struct ata_port *); + void (*post_internal_cmd)(struct ata_queued_cmd *); + void (*sched_eh)(struct ata_port *); + void (*end_eh)(struct ata_port *); + int (*scr_read)(struct ata_link *, unsigned int, u32 *); + int (*scr_write)(struct ata_link *, unsigned int, u32); + void (*pmp_attach)(struct ata_port *); + void (*pmp_detach)(struct ata_port *); + int (*set_lpm)(struct ata_link *, enum ata_lpm_policy, unsigned int); + int (*port_suspend)(struct ata_port *, pm_message_t); + int (*port_resume)(struct ata_port *); + int (*port_start)(struct ata_port *); + void (*port_stop)(struct ata_port *); + void (*host_stop)(struct ata_host *); + void (*sff_dev_select)(struct ata_port *, unsigned int); + void (*sff_set_devctl)(struct ata_port *, u8); + u8 (*sff_check_status)(struct ata_port *); + u8 (*sff_check_altstatus)(struct ata_port *); + void (*sff_tf_load)(struct ata_port *, const struct ata_taskfile *); + void (*sff_tf_read)(struct ata_port *, struct ata_taskfile *); + void (*sff_exec_command)(struct ata_port *, const struct ata_taskfile *); + unsigned int (*sff_data_xfer)(struct ata_queued_cmd *, unsigned char *, unsigned int, int); + void (*sff_irq_on)(struct ata_port *); + bool (*sff_irq_check)(struct ata_port *); + void (*sff_irq_clear)(struct ata_port *); + void (*sff_drain_fifo)(struct ata_queued_cmd *); + void (*bmdma_setup)(struct ata_queued_cmd *); + void (*bmdma_start)(struct ata_queued_cmd *); + void (*bmdma_stop)(struct ata_queued_cmd *); + u8 (*bmdma_status)(struct ata_port *); + ssize_t (*em_show)(struct ata_port *, char *); + ssize_t (*em_store)(struct ata_port *, const char *, size_t); + ssize_t (*sw_activity_show)(struct ata_device *, char *); + ssize_t (*sw_activity_store)(struct ata_device *, enum sw_activity); + ssize_t (*transmit_led_message)(struct ata_port *, u32, ssize_t); + const struct ata_port_operations *inherits; +}; + +struct ata_port_stats { + long unsigned int unhandled_irq; + long unsigned int idle_irq; + long unsigned int rw_reqbuf; +}; + +struct ata_port { + struct Scsi_Host *scsi_host; + struct ata_port_operations *ops; + spinlock_t *lock; + long unsigned int flags; + unsigned int pflags; + unsigned int print_id; + unsigned int port_no; + struct ata_ioports ioaddr; + u8 ctl; + u8 last_ctl; + struct ata_link *sff_pio_task_link; + struct delayed_work sff_pio_task; + struct ata_bmdma_prd *bmdma_prd; + dma_addr_t bmdma_prd_dma; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; + struct ata_queued_cmd qcmd[33]; + u64 qc_active; + int nr_active_links; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ata_link link; + struct ata_link *slave_link; + int nr_pmp_links; + struct ata_link *pmp_link; + struct ata_link *excl_link; + struct ata_port_stats stats; + struct ata_host *host; + struct device *dev; + long: 32; + struct device tdev; + struct mutex scsi_scan_mutex; + struct delayed_work hotplug_task; + struct delayed_work scsi_rescan_task; + unsigned int hsm_task_state; + struct list_head eh_done_q; + wait_queue_head_t eh_wait_q; + int eh_tries; + struct completion park_req_pending; + pm_message_t pm_mesg; + enum ata_lpm_policy target_lpm_policy; + struct timer_list fastdrain_timer; + unsigned int fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; + void *private_data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ata_cpr { + u8 num; + u8 num_storage_elements; + long: 32; + u64 start_lba; + u64 num_lbas; +}; + +struct ata_cpr_log { + u8 nr_cpr; + long: 32; + struct ata_cpr cpr[0]; +}; + +struct ata_cdl { + u8 desc_log_buf[512]; + u8 ncq_sense_log_buf[1024]; +}; + +struct ata_port_info { + long unsigned int flags; + long unsigned int link_flags; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + struct ata_port_operations *port_ops; + void *private_data; +}; + +struct clk; + +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +struct phy_configure_opts_dp { + unsigned int link_rate; + unsigned int lanes; + unsigned int voltage[4]; + unsigned int pre[4]; + u8 ssc: 1; + u8 set_rate: 1; + u8 set_lanes: 1; + u8 set_voltages: 1; +}; + +struct phy_configure_opts_lvds { + unsigned int bits_per_lane_and_dclk_cycle; + long unsigned int differential_clk_rate; + unsigned int lanes; + bool is_slave; +}; + +struct phy_configure_opts_mipi_dphy { + unsigned int clk_miss; + unsigned int clk_post; + unsigned int clk_pre; + unsigned int clk_prepare; + unsigned int clk_settle; + unsigned int clk_term_en; + unsigned int clk_trail; + unsigned int clk_zero; + unsigned int d_term_en; + unsigned int eot; + unsigned int hs_exit; + unsigned int hs_prepare; + unsigned int hs_settle; + unsigned int hs_skip; + unsigned int hs_trail; + unsigned int hs_zero; + unsigned int init; + unsigned int lpx; + unsigned int ta_get; + unsigned int ta_go; + unsigned int ta_sure; + unsigned int wakeup; + long unsigned int hs_clk_rate; + long unsigned int lp_clk_rate; + unsigned char lanes; +}; + +enum phy_mode { + PHY_MODE_INVALID = 0, + PHY_MODE_USB_HOST = 1, + PHY_MODE_USB_HOST_LS = 2, + PHY_MODE_USB_HOST_FS = 3, + PHY_MODE_USB_HOST_HS = 4, + PHY_MODE_USB_HOST_SS = 5, + PHY_MODE_USB_DEVICE = 6, + PHY_MODE_USB_DEVICE_LS = 7, + PHY_MODE_USB_DEVICE_FS = 8, + PHY_MODE_USB_DEVICE_HS = 9, + PHY_MODE_USB_DEVICE_SS = 10, + PHY_MODE_USB_OTG = 11, + PHY_MODE_UFS_HS_A = 12, + PHY_MODE_UFS_HS_B = 13, + PHY_MODE_PCIE = 14, + PHY_MODE_ETHERNET = 15, + PHY_MODE_MIPI_DPHY = 16, + PHY_MODE_SATA = 17, + PHY_MODE_LVDS = 18, + PHY_MODE_DP = 19, +}; + +enum phy_media { + PHY_MEDIA_DEFAULT = 0, + PHY_MEDIA_SR = 1, + PHY_MEDIA_DAC = 2, +}; + +union phy_configure_opts { + struct phy_configure_opts_mipi_dphy mipi_dphy; + struct phy_configure_opts_dp dp; + struct phy_configure_opts_lvds lvds; +}; + +struct phy; + +struct phy_ops { + int (*init)(struct phy *); + int (*exit)(struct phy *); + int (*power_on)(struct phy *); + int (*power_off)(struct phy *); + int (*set_mode)(struct phy *, enum phy_mode, int); + int (*set_media)(struct phy *, enum phy_media); + int (*set_speed)(struct phy *, int); + int (*configure)(struct phy *, union phy_configure_opts *); + int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); + int (*reset)(struct phy *); + int (*calibrate)(struct phy *); + int (*connect)(struct phy *, int); + int (*disconnect)(struct phy *, int); + void (*release)(struct phy *); + struct module *owner; +}; + +struct phy_attrs { + u32 bus_width; + u32 max_link_rate; + enum phy_mode mode; +}; + +struct phy { + struct device dev; + int id; + const struct phy_ops *ops; + struct mutex mutex; + int init_count; + int power_count; + struct phy_attrs attrs; + struct regulator *pwr; + struct dentry *debugfs; +}; + +enum { + AHCI_MAX_PORTS = 32, + AHCI_MAX_SG = 168, + AHCI_DMA_BOUNDARY = 4294967295, + AHCI_MAX_CMDS = 32, + AHCI_CMD_SZ = 32, + AHCI_CMD_SLOT_SZ = 1024, + AHCI_RX_FIS_SZ = 256, + AHCI_CMD_TBL_CDB = 64, + AHCI_CMD_TBL_HDR_SZ = 128, + AHCI_CMD_TBL_SZ = 2816, + AHCI_CMD_TBL_AR_SZ = 90112, + AHCI_PORT_PRIV_DMA_SZ = 91392, + AHCI_PORT_PRIV_FBS_DMA_SZ = 95232, + AHCI_IRQ_ON_SG = 2147483648, + AHCI_CMD_ATAPI = 32, + AHCI_CMD_WRITE = 64, + AHCI_CMD_PREFETCH = 128, + AHCI_CMD_RESET = 256, + AHCI_CMD_CLR_BUSY = 1024, + RX_FIS_PIO_SETUP = 32, + RX_FIS_D2H_REG = 64, + RX_FIS_SDB = 88, + RX_FIS_UNK = 96, + HOST_CAP = 0, + HOST_CTL = 4, + HOST_IRQ_STAT = 8, + HOST_PORTS_IMPL = 12, + HOST_VERSION = 16, + HOST_EM_LOC = 28, + HOST_EM_CTL = 32, + HOST_CAP2 = 36, + HOST_RESET = 1, + HOST_IRQ_EN = 2, + HOST_MRSM = 4, + HOST_AHCI_EN = 2147483648, + HOST_CAP_SXS = 32, + HOST_CAP_EMS = 64, + HOST_CAP_CCC = 128, + HOST_CAP_PART = 8192, + HOST_CAP_SSC = 16384, + HOST_CAP_PIO_MULTI = 32768, + HOST_CAP_FBS = 65536, + HOST_CAP_PMP = 131072, + HOST_CAP_ONLY = 262144, + HOST_CAP_CLO = 16777216, + HOST_CAP_LED = 33554432, + HOST_CAP_ALPM = 67108864, + HOST_CAP_SSS = 134217728, + HOST_CAP_MPS = 268435456, + HOST_CAP_SNTF = 536870912, + HOST_CAP_NCQ = 1073741824, + HOST_CAP_64 = 2147483648, + HOST_CAP2_BOH = 1, + HOST_CAP2_NVMHCI = 2, + HOST_CAP2_APST = 4, + HOST_CAP2_SDS = 8, + HOST_CAP2_SADM = 16, + HOST_CAP2_DESO = 32, + PORT_LST_ADDR = 0, + PORT_LST_ADDR_HI = 4, + PORT_FIS_ADDR = 8, + PORT_FIS_ADDR_HI = 12, + PORT_IRQ_STAT = 16, + PORT_IRQ_MASK = 20, + PORT_CMD = 24, + PORT_TFDATA = 32, + PORT_SIG = 36, + PORT_CMD_ISSUE = 56, + PORT_SCR_STAT = 40, + PORT_SCR_CTL = 44, + PORT_SCR_ERR = 48, + PORT_SCR_ACT = 52, + PORT_SCR_NTF = 60, + PORT_FBS = 64, + PORT_DEVSLP = 68, + PORT_IRQ_COLD_PRES = 2147483648, + PORT_IRQ_TF_ERR = 1073741824, + PORT_IRQ_HBUS_ERR = 536870912, + PORT_IRQ_HBUS_DATA_ERR = 268435456, + PORT_IRQ_IF_ERR = 134217728, + PORT_IRQ_IF_NONFATAL = 67108864, + PORT_IRQ_OVERFLOW = 16777216, + PORT_IRQ_BAD_PMP = 8388608, + PORT_IRQ_PHYRDY = 4194304, + PORT_IRQ_DMPS = 128, + PORT_IRQ_CONNECT = 64, + PORT_IRQ_SG_DONE = 32, + PORT_IRQ_UNK_FIS = 16, + PORT_IRQ_SDB_FIS = 8, + PORT_IRQ_DMAS_FIS = 4, + PORT_IRQ_PIOS_FIS = 2, + PORT_IRQ_D2H_REG_FIS = 1, + PORT_IRQ_FREEZE = 683671632, + PORT_IRQ_ERROR = 2025848912, + DEF_PORT_IRQ = 2025848959, + PORT_CMD_ASP = 134217728, + PORT_CMD_ALPE = 67108864, + PORT_CMD_ATAPI = 16777216, + PORT_CMD_FBSCP = 4194304, + PORT_CMD_ESP = 2097152, + PORT_CMD_CPD = 1048576, + PORT_CMD_MPSP = 524288, + PORT_CMD_HPCP = 262144, + PORT_CMD_PMP = 131072, + PORT_CMD_LIST_ON = 32768, + PORT_CMD_FIS_ON = 16384, + PORT_CMD_FIS_RX = 16, + PORT_CMD_CLO = 8, + PORT_CMD_POWER_ON = 4, + PORT_CMD_SPIN_UP = 2, + PORT_CMD_START = 1, + PORT_CMD_ICC_MASK = 4026531840, + PORT_CMD_ICC_ACTIVE = 268435456, + PORT_CMD_ICC_PARTIAL = 536870912, + PORT_CMD_ICC_SLUMBER = 1610612736, + PORT_CMD_CAP = 8126464, + PORT_FBS_DWE_OFFSET = 16, + PORT_FBS_ADO_OFFSET = 12, + PORT_FBS_DEV_OFFSET = 8, + PORT_FBS_DEV_MASK = 3840, + PORT_FBS_SDE = 4, + PORT_FBS_DEC = 2, + PORT_FBS_EN = 1, + PORT_DEVSLP_DM_OFFSET = 25, + PORT_DEVSLP_DM_MASK = 503316480, + PORT_DEVSLP_DITO_OFFSET = 15, + PORT_DEVSLP_MDAT_OFFSET = 10, + PORT_DEVSLP_DETO_OFFSET = 2, + PORT_DEVSLP_DSP = 2, + PORT_DEVSLP_ADSE = 1, + AHCI_HFLAG_NO_NCQ = 1, + AHCI_HFLAG_IGN_IRQ_IF_ERR = 2, + AHCI_HFLAG_IGN_SERR_INTERNAL = 4, + AHCI_HFLAG_32BIT_ONLY = 8, + AHCI_HFLAG_MV_PATA = 16, + AHCI_HFLAG_NO_MSI = 32, + AHCI_HFLAG_NO_PMP = 64, + AHCI_HFLAG_SECT255 = 256, + AHCI_HFLAG_YES_NCQ = 512, + AHCI_HFLAG_NO_SUSPEND = 1024, + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = 2048, + AHCI_HFLAG_NO_SNTF = 4096, + AHCI_HFLAG_NO_FPDMA_AA = 8192, + AHCI_HFLAG_YES_FBS = 16384, + AHCI_HFLAG_DELAY_ENGINE = 32768, + AHCI_HFLAG_NO_DEVSLP = 131072, + AHCI_HFLAG_NO_FBS = 262144, + AHCI_HFLAG_MULTI_MSI = 1048576, + AHCI_HFLAG_WAKE_BEFORE_STOP = 4194304, + AHCI_HFLAG_YES_ALPM = 8388608, + AHCI_HFLAG_NO_WRITE_TO_RO = 16777216, + AHCI_HFLAG_SUSPEND_PHYS = 33554432, + AHCI_HFLAG_NO_SXS = 67108864, + AHCI_HFLAG_43BIT_ONLY = 134217728, + AHCI_HFLAG_INTEL_PCS_QUIRK = 268435456, + AHCI_FLAG_COMMON = 393346, + ICH_MAP = 144, + PCS_6 = 146, + PCS_7 = 148, + EM_MAX_SLOTS = 15, + EM_MAX_RETRY = 5, + EM_CTL_RST = 512, + EM_CTL_TM = 256, + EM_CTL_MR = 1, + EM_CTL_ALHD = 67108864, + EM_CTL_XMT = 33554432, + EM_CTL_SMB = 16777216, + EM_CTL_SGPIO = 524288, + EM_CTL_SES = 262144, + EM_CTL_SAFTE = 131072, + EM_CTL_LED = 65536, + EM_MSG_TYPE_LED = 1, + EM_MSG_TYPE_SAFTE = 2, + EM_MSG_TYPE_SES2 = 4, + EM_MSG_TYPE_SGPIO = 8, +}; + +struct ahci_cmd_hdr { + __le32 opts; + __le32 status; + __le32 tbl_addr; + __le32 tbl_addr_hi; + __le32 reserved[4]; +}; + +struct ahci_em_priv { + enum sw_activity blink_policy; + struct timer_list timer; + long unsigned int saved_activity; + long unsigned int activity; + long unsigned int led_state; + struct ata_link *link; +}; + +struct ahci_port_priv { + struct ata_link *active_link; + struct ahci_cmd_hdr *cmd_slot; + dma_addr_t cmd_slot_dma; + void *cmd_tbl; + dma_addr_t cmd_tbl_dma; + void *rx_fis; + dma_addr_t rx_fis_dma; + unsigned int ncq_saw_d2h: 1; + unsigned int ncq_saw_dmas: 1; + unsigned int ncq_saw_sdb: 1; + spinlock_t lock; + u32 intr_mask; + bool fbs_supported; + bool fbs_enabled; + int fbs_last_dev; + struct ahci_em_priv em_priv[15]; + char *irq_desc; +}; + +struct reset_control; + +struct ahci_host_priv { + unsigned int flags; + u32 mask_port_map; + void *mmio; + u32 cap; + u32 cap2; + u32 version; + u32 port_map; + u32 saved_cap; + u32 saved_cap2; + u32 saved_port_map; + u32 saved_port_cap[32]; + u32 em_loc; + u32 em_buf_sz; + u32 em_msg_type; + u32 remapped_nvme; + bool got_runtime_pm; + unsigned int n_clks; + struct clk_bulk_data *clks; + unsigned int f_rsts; + struct reset_control *rsts; + struct regulator **target_pwrs; + struct regulator *ahci_regulator; + struct regulator *phy_regulator; + struct phy **phys; + unsigned int nports; + void *plat_data; + unsigned int irq; + void (*start_engine)(struct ata_port *); + int (*stop_engine)(struct ata_port *); + irqreturn_t (*irq_handler)(int, void *); + int (*get_irq_vector)(struct ata_host *, int); +}; + +enum { + AHCI_PCI_BAR_STA2X11 = 0, + AHCI_PCI_BAR_CAVIUM = 0, + AHCI_PCI_BAR_LOONGSON = 0, + AHCI_PCI_BAR_ENMOTUS = 2, + AHCI_PCI_BAR_CAVIUM_GEN5 = 4, + AHCI_PCI_BAR_STANDARD = 5, +}; + +enum board_ids { + board_ahci = 0, + board_ahci_43bit_dma = 1, + board_ahci_ign_iferr = 2, + board_ahci_no_debounce_delay = 3, + board_ahci_no_msi = 4, + board_ahci_pcs_quirk = 5, + board_ahci_pcs_quirk_no_devslp = 6, + board_ahci_pcs_quirk_no_sntf = 7, + board_ahci_yes_fbs = 8, + board_ahci_al = 9, + board_ahci_avn = 10, + board_ahci_mcp65 = 11, + board_ahci_mcp77 = 12, + board_ahci_mcp89 = 13, + board_ahci_mv = 14, + board_ahci_sb600 = 15, + board_ahci_sb700 = 16, + board_ahci_vt8251 = 17, + board_ahci_mcp_linux = 11, + board_ahci_mcp67 = 11, + board_ahci_mcp73 = 11, + board_ahci_mcp79 = 12, +}; + +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +enum e1000_mac_type___2 { + e1000_82571 = 0, + e1000_82572 = 1, + e1000_82573 = 2, + e1000_82574 = 3, + e1000_82583 = 4, + e1000_80003es2lan = 5, + e1000_ich8lan = 6, + e1000_ich9lan = 7, + e1000_ich10lan = 8, + e1000_pchlan = 9, + e1000_pch2lan = 10, + e1000_pch_lpt = 11, + e1000_pch_spt = 12, + e1000_pch_cnp = 13, + e1000_pch_tgp = 14, + e1000_pch_adp = 15, + e1000_pch_mtp = 16, + e1000_pch_lnp = 17, + e1000_pch_ptp = 18, + e1000_pch_nvp = 19, +}; + +enum e1000_nvm_type___2 { + e1000_nvm_unknown___2 = 0, + e1000_nvm_none___2 = 1, + e1000_nvm_eeprom_spi___2 = 2, + e1000_nvm_flash_hw___2 = 3, + e1000_nvm_flash_sw___2 = 4, +}; + +enum e1000_phy_type___2 { + e1000_phy_unknown___2 = 0, + e1000_phy_none___2 = 1, + e1000_phy_m88___2 = 2, + e1000_phy_igp___2 = 3, + e1000_phy_igp_2___2 = 4, + e1000_phy_gg82563___2 = 5, + e1000_phy_igp_3___2 = 6, + e1000_phy_ife___2 = 7, + e1000_phy_bm = 8, + e1000_phy_82578 = 9, + e1000_phy_82577 = 10, + e1000_phy_82579 = 11, + e1000_phy_i217 = 12, +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress = 1, + e1000_serdes_link_autoneg_complete = 2, + e1000_serdes_link_forced_up = 3, +}; + +struct e1000_hw_stats___2 { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_hw___2; + +struct e1000_mac_operations___2 { + s32 (*id_led_init)(struct e1000_hw___2 *); + s32 (*blink_led)(struct e1000_hw___2 *); + bool (*check_mng_mode)(struct e1000_hw___2 *); + s32 (*check_for_link)(struct e1000_hw___2 *); + s32 (*cleanup_led)(struct e1000_hw___2 *); + void (*clear_hw_cntrs)(struct e1000_hw___2 *); + void (*clear_vfta)(struct e1000_hw___2 *); + s32 (*get_bus_info)(struct e1000_hw___2 *); + void (*set_lan_id)(struct e1000_hw___2 *); + s32 (*get_link_up_info)(struct e1000_hw___2 *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw___2 *); + s32 (*led_off)(struct e1000_hw___2 *); + void (*update_mc_addr_list)(struct e1000_hw___2 *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw___2 *); + s32 (*init_hw)(struct e1000_hw___2 *); + s32 (*setup_link)(struct e1000_hw___2 *); + s32 (*setup_physical_interface)(struct e1000_hw___2 *); + s32 (*setup_led)(struct e1000_hw___2 *); + void (*write_vfta)(struct e1000_hw___2 *, u32, u32); + void (*config_collision_dist)(struct e1000_hw___2 *); + int (*rar_set)(struct e1000_hw___2 *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw___2 *); + u32 (*rar_get_count)(struct e1000_hw___2 *); +}; + +struct e1000_mac_info___2 { + struct e1000_mac_operations___2 ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type___2 type; + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_fc_info___2 { + u32 high_water; + u32 low_water; + u16 pause_time; + u16 refresh_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*cfg_on_link_up)(struct e1000_hw___2 *); + s32 (*check_polarity)(struct e1000_hw___2 *); + s32 (*check_reset_block)(struct e1000_hw___2 *); + s32 (*commit)(struct e1000_hw___2 *); + s32 (*force_speed_duplex)(struct e1000_hw___2 *); + s32 (*get_cfg_done)(struct e1000_hw___2 *); + s32 (*get_cable_length)(struct e1000_hw___2 *); + s32 (*get_info)(struct e1000_hw___2 *); + s32 (*set_page)(struct e1000_hw___2 *, u16); + s32 (*read_reg)(struct e1000_hw___2 *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw___2 *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw___2 *, u32, u16 *); + void (*release)(struct e1000_hw___2 *); + s32 (*reset)(struct e1000_hw___2 *); + s32 (*set_d0_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*write_reg)(struct e1000_hw___2 *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw___2 *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw___2 *, u32, u16); + void (*power_up)(struct e1000_hw___2 *); + void (*power_down)(struct e1000_hw___2 *); +}; + +struct e1000_phy_info___2 { + struct e1000_phy_operations___2 ops; + enum e1000_phy_type___2 type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + u32 retry_count; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; + bool retry_enabled; +}; + +struct e1000_nvm_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*read)(struct e1000_hw___2 *, u16, u16, u16 *); + void (*release)(struct e1000_hw___2 *); + void (*reload)(struct e1000_hw___2 *); + s32 (*update)(struct e1000_hw___2 *); + s32 (*valid_led_default)(struct e1000_hw___2 *, u16 *); + s32 (*validate)(struct e1000_hw___2 *); + s32 (*write)(struct e1000_hw___2 *, u16, u16, u16 *); +}; + +struct e1000_nvm_info___2 { + struct e1000_nvm_operations___2 ops; + enum e1000_nvm_type___2 type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info___2 { + enum e1000_bus_width width; + u16 func; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +enum e1000_ulp_state { + e1000_ulp_state_unknown = 0, + e1000_ulp_state_off = 1, + e1000_ulp_state_on = 2, +}; + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[2048]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; +}; + +struct e1000_adapter; + +struct e1000_hw___2 { + struct e1000_adapter *adapter; + void *hw_addr; + void *flash_address; + struct e1000_mac_info___2 mac; + struct e1000_fc_info___2 fc; + struct e1000_phy_info___2 phy; + struct e1000_nvm_info___2 nvm; + struct e1000_bus_info___2 bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +struct e1000_phy_regs { + u16 bmcr; + u16 bmsr; + u16 advertise; + u16 lpa; + u16 expansion; + u16 ctrl1000; + u16 stat1000; + u16 estatus; +}; + +struct e1000_buffer; + +struct e1000_ring { + struct e1000_adapter *adapter; + void *desc; + dma_addr_t dma; + unsigned int size; + unsigned int count; + u16 next_to_use; + u16 next_to_clean; + void *head; + void *tail; + struct e1000_buffer *buffer_info; + char name[21]; + u32 ims_val; + u32 itr_val; + void *itr_register; + int set_itr; + struct sk_buff *rx_skb_top; +}; + +struct e1000_info___2; + +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + struct work_struct reset_task; + struct work_struct watchdog_task; + const struct e1000_info___2 *ei; + long unsigned int active_vlans[128]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + long unsigned int state; + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + struct e1000_ring *tx_ring; + u32 tx_fifo_limit; + struct napi_struct napi; + unsigned int uncorr_errors; + unsigned int corr_errors; + unsigned int restart_queue; + u32 txd_cmd; + bool detect_tx_hung; + bool tx_hang_recheck; + u8 tx_timeout_factor; + u32 tx_int_delay; + u32 tx_abs_int_delay; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + long: 32; + u64 tpt_old; + u64 colc_old; + u32 gotc; + long: 32; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + bool (*clean_rx)(struct e1000_ring *, int *, int); + void (*alloc_rx_buf)(struct e1000_ring *, int, gfp_t); + struct e1000_ring *rx_ring; + u32 rx_int_delay; + u32 rx_abs_int_delay; + long: 32; + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + long: 32; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + struct net_device *netdev; + struct pci_dev *pdev; + struct e1000_hw___2 hw; + spinlock_t stats64_lock; + struct e1000_hw_stats___2 stats; + struct e1000_phy_info___2 phy_info; + struct e1000_phy_stats phy_stats; + struct e1000_phy_regs phy_regs; + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + bool fc_autoneg; + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + int phy_hang_count; + u16 tx_ring_count; + u16 rx_ring_count; + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + long unsigned int tx_hwtstamp_start; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; + long int ptp_delta; + u16 eee_advert; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct e1000_ps_page { + struct page *page; + long: 32; + u64 dma; +}; + +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + struct { + long unsigned int time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + struct { + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_info___2 { + enum e1000_mac_type___2 mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations___2 *mac_ops; + const struct e1000_phy_operations___2 *phy_ops; + const struct e1000_nvm_operations___2 *nvm_ops; +}; + +struct e1000_opt_list { + int i; + char *str; +}; + +struct e1000_option { + enum { + enable_option = 0, + range_option = 1, + list_option = 2, + } type; + const char *name; + const char *err; + int def; + union { + struct { + int min; + int max; + } r; + struct { + int nr; + struct e1000_opt_list *p; + } l; + } arg; +}; + +struct tso_t { + int next_frag_idx; + int size; + void *data; + u16 ip_id; + u8 tlen; + bool ipv6; + u32 tcp_seq; +}; + +enum { + SILICON_A_STEP = 0, + SILICON_B_STEP = 1, + SILICON_C_STEP = 2, + SILICON_D_STEP = 3, + SILICON_E_STEP = 4, + SILICON_TC_STEP = 14, + SILICON_Z_STEP = 15, +}; + +struct ieee80211_qos_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; + __le16 qos_ctrl; +}; + +struct ieee80211_qos_hdr_4addr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; + u8 addr4[6]; + __le16 qos_ctrl; +}; + +struct iwl_cmd_header_wide { + u8 cmd; + u8 group_id; + __le16 sequence; + __le16 length; + u8 reserved; + u8 version; +}; + +enum iwl_tx_queue_cfg_actions { + TX_QUEUE_CFG_ENABLE_QUEUE = 1, + TX_QUEUE_CFG_TFD_SHORT_FORMAT = 2, +}; + +struct iwl_tx_queue_cfg_cmd { + u8 sta_id; + u8 tid; + __le16 flags; + __le32 cb_size; + __le64 byte_cnt_addr; + __le64 tfdq_addr; +}; + +struct iwl_tx_queue_cfg_rsp { + __le16 queue_number; + __le16 flags; + __le16 write_pointer; + __le16 reserved; +}; + +struct iwl_device_cmd { + union { + struct { + struct iwl_cmd_header hdr; + u8 payload[320]; + }; + struct { + struct iwl_cmd_header_wide hdr_wide; + u8 payload_wide[316]; + }; + }; +}; + +enum iwl_trans_status { + STATUS_SYNC_HCMD_ACTIVE = 0, + STATUS_DEVICE_ENABLED = 1, + STATUS_TPOWER_PMI = 2, + STATUS_INT_ENABLED = 3, + STATUS_RFKILL_HW = 4, + STATUS_RFKILL_OPMODE = 5, + STATUS_FW_ERROR = 6, + STATUS_TRANS_DEAD = 7, + STATUS_SUPPRESS_CMD_ERROR_ONCE = 8, +}; + +struct iwl_dram_regions { + struct iwl_dram_data drams[64]; + struct iwl_dram_data prph_scratch_mem_desc; + u8 n_regions; +}; + +struct iwl_cmd_meta { + struct iwl_host_cmd *source; + u32 flags: 5; + u32 sg_offset: 12; + u32 tbs; +}; + +struct iwl_pcie_txq_entry { + void *cmd; + struct sk_buff *skb; + const void *free_buf; + struct iwl_cmd_meta meta; +}; + +struct iwl_pcie_first_tb_buf { + u8 buf[64]; +}; + +struct iwl_txq { + void *tfds; + struct iwl_pcie_first_tb_buf *first_tb_bufs; + dma_addr_t first_tb_dma; + struct iwl_pcie_txq_entry *entries; + spinlock_t lock; + spinlock_t reclaim_lock; + long unsigned int frozen_expiry_remainder; + struct timer_list stuck_timer; + struct iwl_trans *trans; + bool need_update; + bool frozen; + bool ampdu; + int block; + long unsigned int wd_timeout; + struct sk_buff_head overflow_q; + struct iwl_dma_ptr bc_tbl; + int write_ptr; + int read_ptr; + dma_addr_t dma_addr; + int n_window; + u32 id; + int low_mark; + int high_mark; + bool overflow_tx; +}; + +struct iwl_tfh_tb { + __le16 tb_len; + __le64 addr; +} __attribute__((packed)); + +struct iwl_tfh_tfd { + __le16 num_tbs; + struct iwl_tfh_tb tbs[25]; + __le32 __pad; +}; + +struct iwlagn_scd_bc_tbl { + __le16 tfd_offset[320]; +}; + +struct iwl_gen3_bc_tbl_entry { + __le16 tfd_offset; +}; + +struct iwl_context_info_version { + __le16 mac_id; + __le16 version; + __le16 size; + __le16 reserved; +}; + +struct iwl_context_info_control { + __le32 control_flags; + __le32 reserved; +}; + +struct iwl_context_info_dram { + __le64 umac_img[64]; + __le64 lmac_img[64]; + __le64 virtual_img[64]; +}; + +struct iwl_context_info_rbd_cfg { + __le64 free_rbd_addr; + __le64 used_rbd_addr; + __le64 status_wr_ptr; +}; + +struct iwl_context_info_hcmd_cfg { + __le64 cmd_queue_addr; + u8 cmd_queue_size; + u8 reserved[7]; +}; + +struct iwl_context_info_dump_cfg { + __le64 core_dump_addr; + __le32 core_dump_size; + __le32 reserved; +}; + +struct iwl_context_info_pnvm_cfg { + __le64 platform_nvm_addr; + __le32 platform_nvm_size; + __le32 reserved; +}; + +struct iwl_context_info_early_dbg_cfg { + __le64 early_debug_addr; + __le32 early_debug_size; + __le32 reserved; +}; + +struct iwl_context_info { + struct iwl_context_info_version version; + struct iwl_context_info_control control; + __le64 reserved0; + struct iwl_context_info_rbd_cfg rbd_cfg; + struct iwl_context_info_hcmd_cfg hcmd_cfg; + __le32 reserved1[4]; + struct iwl_context_info_dump_cfg dump_cfg; + struct iwl_context_info_early_dbg_cfg edbg_cfg; + struct iwl_context_info_pnvm_cfg pnvm_cfg; + __le32 reserved2[16]; + struct iwl_context_info_dram dram; + __le32 reserved3[16]; +}; + +struct iwl_rx_mem_buffer { + dma_addr_t page_dma; + struct page *page; + struct list_head list; + u32 offset; + u16 vid; + bool invalid; +}; + +struct isr_statistics { + u32 hw; + u32 sw; + u32 err_code; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 tx; + u32 unhandled; +}; + +struct iwl_rxq { + int id; + void *bd; + dma_addr_t bd_dma; + void *used_bd; + dma_addr_t used_bd_dma; + u32 read; + u32 write; + u32 free_count; + u32 used_count; + u32 write_actual; + u32 queue_size; + struct list_head rx_free; + struct list_head rx_used; + bool need_update; + bool next_rb_is_fragment; + void *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; + long: 32; + struct napi_struct napi; + struct iwl_rx_mem_buffer *queue[256]; +}; + +struct iwl_rb_allocator { + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; +}; + +enum iwl_pcie_fw_reset_state { + FW_RESET_IDLE = 0, + FW_RESET_REQUESTED = 1, + FW_RESET_OK = 2, + FW_RESET_ERROR = 3, +}; + +enum iwl_pcie_imr_status { + IMR_D2S_IDLE = 0, + IMR_D2S_REQUESTED = 1, + IMR_D2S_COMPLETED = 2, + IMR_D2S_ERROR = 3, +}; + +struct dma_pool; + +struct iwl_tso_hdr_page; + +struct iwl_pcie_txqs { + long unsigned int queue_used[16]; + long unsigned int queue_stopped[16]; + struct iwl_txq *txq[512]; + struct dma_pool *bc_pool; + size_t bc_tbl_size; + bool bc_table_dword; + u8 page_offs; + u8 dev_cmd_offs; + struct iwl_tso_hdr_page *tso_hdr_page; + struct { + u8 fifo; + u8 q_id; + unsigned int wdg_timeout; + } cmd; + struct { + u8 max_tbs; + u16 size; + u8 addr_size; + } tfd; + struct iwl_dma_ptr scd_bc_tbls; + u8 queue_alloc_cmd_ver; +}; + +struct iwl_tso_hdr_page { + struct page *page; + u8 *pos; +}; + +struct iwl_context_info_gen3; + +struct iwl_prph_info; + +struct iwl_prph_scratch; + +struct iwl_trans_pcie { + struct iwl_rxq *rxq; + struct iwl_rx_mem_buffer *rx_pool; + struct iwl_rx_mem_buffer **global_table; + struct iwl_rb_allocator rba; + union { + struct iwl_context_info *ctxt_info; + struct iwl_context_info_gen3 *ctxt_info_gen3; + }; + struct iwl_prph_info *prph_info; + struct iwl_prph_scratch *prph_scratch; + void *iml; + dma_addr_t ctxt_info_dma_addr; + dma_addr_t prph_info_dma_addr; + dma_addr_t prph_scratch_dma_addr; + dma_addr_t iml_dma_addr; + struct iwl_trans *trans; + struct net_device *napi_dev; + __le32 *ict_tbl; + dma_addr_t ict_tbl_dma; + int ict_index; + bool use_ict; + bool is_down; + bool opmode_down; + s8 debug_rfkill; + struct isr_statistics isr_stats; + spinlock_t irq_lock; + struct mutex mutex; + u32 inta_mask; + u32 scd_base_addr; + struct iwl_dma_ptr kw; + struct iwl_dram_regions pnvm_data; + struct iwl_dram_regions reduced_tables_data; + struct iwl_txq *txq_memory; + struct pci_dev *pci_dev; + u8 *hw_base; + bool ucode_write_complete; + bool sx_complete; + wait_queue_head_t ucode_write_waitq; + wait_queue_head_t sx_waitq; + u8 n_no_reclaim_cmds; + u8 no_reclaim_cmds[6]; + u16 num_rx_bufs; + enum iwl_amsdu_size rx_buf_size; + bool scd_set_active; + bool pcie_dbg_dumped_once; + u32 rx_page_order; + u32 rx_buf_bytes; + u32 supported_dma_mask; + spinlock_t alloc_page_lock; + struct page *alloc_page; + u32 alloc_page_used; + spinlock_t reg_lock; + bool cmd_hold_nic_awake; + struct msix_entry msix_entries[16]; + bool msix_enabled; + u8 shared_vec_mask; + u32 alloc_vecs; + u32 def_irq; + u32 fh_init_mask; + u32 hw_init_mask; + u32 fh_mask; + u32 hw_mask; + cpumask_t affinity_mask[16]; + u16 tx_cmd_queue_size; + bool in_rescan; + void *base_rb_stts; + dma_addr_t base_rb_stts_dma; + bool fw_reset_handshake; + enum iwl_pcie_fw_reset_state fw_reset_state; + wait_queue_head_t fw_reset_waitq; + enum iwl_pcie_imr_status imr_status; + wait_queue_head_t imr_waitq; + char rf_name[32]; + struct iwl_pcie_txqs txqs; +}; + +struct iwl_tso_page_info { + dma_addr_t dma_addr; + struct page *next; + refcount_t use_count; +}; + +struct iwl_dram_sec_info { + __le32 pn_low; + __le16 pn_high; + __le16 aux_info; +}; + +struct iwl_tx_cmd_gen2 { + __le16 len; + __le16 offload_assist; + __le32 flags; + struct iwl_dram_sec_info dram_info; + __le32 rate_n_flags; + struct ieee80211_hdr hdr[0]; +}; + +struct iwl_tx_cmd_gen3 { + __le16 len; + __le16 flags; + __le32 offload_assist; + struct iwl_dram_sec_info dram_info; + __le32 rate_n_flags; + u8 reserved[8]; + struct ieee80211_hdr hdr[0]; +}; + +enum iwl_data_path_subcmd_ids { + DQA_ENABLE_CMD = 0, + UPDATE_MU_GROUPS_CMD = 1, + TRIGGER_RX_QUEUES_NOTIF_CMD = 2, + WNM_PLATFORM_PTM_REQUEST_CMD = 3, + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD = 4, + STA_HE_CTXT_CMD = 7, + RLC_CONFIG_CMD = 8, + RFH_QUEUE_CONFIG_CMD = 13, + TLC_MNG_CONFIG_CMD = 15, + HE_AIR_SNIFFER_CONFIG_CMD = 19, + CHEST_COLLECTOR_FILTER_CONFIG_CMD = 20, + RX_BAID_ALLOCATION_CONFIG_CMD = 22, + SCD_QUEUE_CONFIG_CMD = 23, + SEC_KEY_CMD = 24, + ESR_MODE_NOTIF = 243, + MONITOR_NOTIF = 244, + RX_NO_DATA_NOTIF = 245, + THERMAL_DUAL_CHAIN_REQUEST = 246, + TLC_MNG_UPDATE_NOTIF = 247, + STA_PM_NOTIF = 253, + MU_GROUP_MGMT_NOTIF = 254, + RX_QUEUES_NOTIFICATION = 255, +}; + +enum iwl_scd_queue_cfg_operation { + IWL_SCD_QUEUE_ADD = 0, + IWL_SCD_QUEUE_REMOVE = 1, + IWL_SCD_QUEUE_MODIFY = 2, +}; + +struct iwl_scd_queue_cfg_cmd { + __le32 operation; + union { + struct { + __le32 sta_mask; + u8 tid; + u8 reserved[3]; + __le32 flags; + __le32 cb_size; + __le64 bc_dram_addr; + __le64 tfdq_dram_addr; + } add; + struct { + __le32 sta_mask; + __le32 tid; + } remove; + struct { + __le32 old_sta_mask; + __le32 tid; + __le32 new_sta_mask; + } modify; + } u; +}; + +enum iwl_fw_dbg_trigger_mode { + IWL_FW_DBG_TRIGGER_START = 1, + IWL_FW_DBG_TRIGGER_STOP = 2, + IWL_FW_DBG_TRIGGER_MONITOR_ONLY = 4, +}; + +enum iwl_fw_dbg_trigger_vif_type { + IWL_FW_DBG_CONF_VIF_ANY = 0, + IWL_FW_DBG_CONF_VIF_IBSS = 1, + IWL_FW_DBG_CONF_VIF_STATION = 2, + IWL_FW_DBG_CONF_VIF_AP = 3, + IWL_FW_DBG_CONF_VIF_P2P_CLIENT = 8, + IWL_FW_DBG_CONF_VIF_P2P_GO = 9, + IWL_FW_DBG_CONF_VIF_P2P_DEVICE = 10, +}; + +struct iwl_fw_dbg_trigger_mlme { + u8 stop_auth_denied; + u8 stop_auth_timeout; + u8 stop_rx_deauth; + u8 stop_tx_deauth; + u8 stop_assoc_denied; + u8 stop_assoc_timeout; + u8 stop_connection_loss; + u8 reserved; + u8 start_auth_denied; + u8 start_auth_timeout; + u8 start_rx_deauth; + u8 start_tx_deauth; + u8 start_assoc_denied; + u8 start_assoc_timeout; + u8 start_connection_loss; + u8 reserved2; +}; + +struct iwl_fw_dbg_trigger_ba { + __le16 rx_ba_start; + __le16 rx_ba_stop; + __le16 tx_ba_start; + __le16 tx_ba_stop; + __le16 rx_bar; + __le16 tx_bar; + __le16 frame_timeout; +}; + +struct iwl_cmd_response { + __le32 status; +}; + +enum iwl_fw_dbg_trigger { + FW_DBG_TRIGGER_INVALID = 0, + FW_DBG_TRIGGER_USER = 1, + FW_DBG_TRIGGER_FW_ASSERT = 2, + FW_DBG_TRIGGER_MISSED_BEACONS = 3, + FW_DBG_TRIGGER_CHANNEL_SWITCH = 4, + FW_DBG_TRIGGER_FW_NOTIF = 5, + FW_DBG_TRIGGER_MLME = 6, + FW_DBG_TRIGGER_STATS = 7, + FW_DBG_TRIGGER_RSSI = 8, + FW_DBG_TRIGGER_TXQ_TIMERS = 9, + FW_DBG_TRIGGER_TIME_EVENT = 10, + FW_DBG_TRIGGER_BA = 11, + FW_DBG_TRIGGER_TX_LATENCY = 12, + FW_DBG_TRIGGER_TDLS = 13, + FW_DBG_TRIGGER_TX_STATUS = 14, + FW_DBG_TRIGGER_ALIVE_TIMEOUT = 15, + FW_DBG_TRIGGER_DRIVER = 16, + FW_DBG_TRIGGER_MAX = 17, +}; + +struct iwl_notification_wait { + struct list_head list; + bool (*fn)(struct iwl_notif_wait_data *, struct iwl_rx_packet *, void *); + void *fn_data; + u16 cmds[5]; + u8 n_cmds; + bool triggered; + bool aborted; +}; + +enum iwl_ac { + AC_BK = 0, + AC_BE = 1, + AC_VI = 2, + AC_VO = 3, + AC_NUM = 4, +}; + +enum iwl_mac_conf_subcmd_ids { + LOW_LATENCY_CMD = 3, + CHANNEL_SWITCH_TIME_EVENT_CMD = 4, + MISSED_VAP_NOTIF = 250, + SESSION_PROTECTION_CMD = 5, + CANCEL_CHANNEL_SWITCH_CMD = 6, + MAC_CONFIG_CMD = 8, + LINK_CONFIG_CMD = 9, + STA_CONFIG_CMD = 10, + AUX_STA_CMD = 11, + STA_REMOVE_CMD = 12, + STA_DISABLE_TX_CMD = 13, + ROC_CMD = 14, + MISSED_BEACONS_NOTIF = 246, + EMLSR_TRANS_FAIL_NOTIF = 247, + ROC_NOTIF = 248, + SESSION_PROTECTION_NOTIF = 251, + PROBE_RESPONSE_DATA_NOTIF = 252, + CHANNEL_SWITCH_START_NOTIF = 255, + CHANNEL_SWITCH_ERROR_NOTIF = 249, +}; + +struct iwl_mac_low_latency_cmd { + __le32 mac_id; + u8 low_latency_rx; + u8 low_latency_tx; + __le16 reserved; +}; + +enum iwl_system_subcmd_ids { + SHARED_MEM_CFG_CMD = 0, + SOC_CONFIGURATION_CMD = 1, + INIT_EXTENDED_CFG_CMD = 3, + FW_ERROR_RECOVERY_CMD = 7, + RFI_CONFIG_CMD = 11, + RFI_GET_FREQ_TABLE_CMD = 12, + SYSTEM_FEATURES_CONTROL_CMD = 13, + SYSTEM_STATISTICS_CMD = 15, + SYSTEM_STATISTICS_END_NOTIF = 253, + RFI_DEACTIVATE_NOTIF = 255, +}; + +enum { + IWL_RATE_1M_INDEX___2 = 0, + IWL_FIRST_CCK_RATE___2 = 0, + IWL_RATE_2M_INDEX___2 = 1, + IWL_RATE_5M_INDEX___2 = 2, + IWL_RATE_11M_INDEX___2 = 3, + IWL_LAST_CCK_RATE___2 = 3, + IWL_RATE_6M_INDEX___2 = 4, + IWL_FIRST_OFDM_RATE___2 = 4, + IWL_RATE_MCS_0_INDEX = 4, + IWL_FIRST_HT_RATE = 4, + IWL_FIRST_VHT_RATE = 4, + IWL_RATE_9M_INDEX___2 = 5, + IWL_RATE_12M_INDEX___2 = 6, + IWL_RATE_MCS_1_INDEX = 6, + IWL_RATE_18M_INDEX___2 = 7, + IWL_RATE_MCS_2_INDEX = 7, + IWL_RATE_24M_INDEX___2 = 8, + IWL_RATE_MCS_3_INDEX = 8, + IWL_RATE_36M_INDEX___2 = 9, + IWL_RATE_MCS_4_INDEX = 9, + IWL_RATE_48M_INDEX___2 = 10, + IWL_RATE_MCS_5_INDEX = 10, + IWL_RATE_54M_INDEX___2 = 11, + IWL_RATE_MCS_6_INDEX = 11, + IWL_LAST_NON_HT_RATE = 11, + IWL_RATE_60M_INDEX___2 = 12, + IWL_RATE_MCS_7_INDEX = 12, + IWL_LAST_HT_RATE = 12, + IWL_RATE_MCS_8_INDEX = 13, + IWL_RATE_MCS_9_INDEX = 14, + IWL_LAST_VHT_RATE = 14, + IWL_RATE_MCS_10_INDEX = 15, + IWL_RATE_MCS_11_INDEX = 16, + IWL_LAST_HE_RATE = 16, + IWL_RATE_COUNT_LEGACY___2 = 12, + IWL_RATE_COUNT___2 = 17, + IWL_RATE_INVM_INDEX___2 = 17, + IWL_RATE_INVALID___2 = 17, +}; + +enum iwl_statistics_cmd_flags { + IWL_STATISTICS_FLG_CLEAR = 1, + IWL_STATISTICS_FLG_DISABLE_NOTIF = 2, +}; + +struct iwl_statistics_cmd___2 { + __le32 flags; +}; + +enum iwl_statistics_notify_type_id { + IWL_STATS_NTFY_TYPE_ID_OPER = 1, + IWL_STATS_NTFY_TYPE_ID_OPER_PART1 = 2, + IWL_STATS_NTFY_TYPE_ID_OPER_PART2 = 4, + IWL_STATS_NTFY_TYPE_ID_OPER_PART3 = 8, + IWL_STATS_NTFY_TYPE_ID_OPER_PART4 = 16, +}; + +enum iwl_statistics_cfg_flags { + IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK = 1, + IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK = 2, + IWL_STATS_CFG_FLG_RESET_MSK = 4, +}; + +struct iwl_system_statistics_cmd { + __le32 cfg_mask; + __le32 config_time_sec; + __le32 type_id_mask; +}; + +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_service; + __le64 timestamp; +}; + +struct iwl_mvm_mod_params { + int power_scheme; +}; + +enum iwl_power_scheme { + IWL_POWER_SCHEME_CAM = 1, + IWL_POWER_SCHEME_BPS = 2, + IWL_POWER_SCHEME_LP = 3, +}; + +enum iwl_mvm_low_latency_cause { + LOW_LATENCY_TRAFFIC = 1, + LOW_LATENCY_DEBUGFS = 2, + LOW_LATENCY_VCMD = 4, + LOW_LATENCY_VIF_TYPE = 8, + LOW_LATENCY_DEBUGFS_FORCE_ENABLE = 16, + LOW_LATENCY_DEBUGFS_FORCE = 32, +}; + +typedef struct iwl_mvm *class_mvm_t; + +enum iwl_mvm_status { + IWL_MVM_STATUS_HW_RFKILL = 0, + IWL_MVM_STATUS_HW_CTKILL = 1, + IWL_MVM_STATUS_ROC_P2P_RUNNING = 2, + IWL_MVM_STATUS_HW_RESTART_REQUESTED = 3, + IWL_MVM_STATUS_IN_HW_RESTART = 4, + IWL_MVM_STATUS_ROC_AUX_RUNNING = 5, + IWL_MVM_STATUS_FIRMWARE_RUNNING = 6, + IWL_MVM_STATUS_IN_D3 = 7, + IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE = 8, + IWL_MVM_STATUS_STARTING = 9, +}; + +struct iwl_mvm_diversity_iter_data { + struct iwl_mvm_phy_ctxt *ctxt; + bool result; +}; + +struct iwl_mvm_low_latency_iter { + bool result; + bool result_per_band[6]; +}; + +struct iwl_bss_iter_data { + struct ieee80211_vif *vif; + bool error; +}; + +struct iwl_bss_find_iter_data { + struct ieee80211_vif *vif; + u32 macid; +}; + +struct iwl_sta_iter_data { + bool assoc; +}; + +struct iwl_nonqos_seq_query_cmd { + __le32 get_set_flag; + __le32 mac_id_n_color; + __le16 value; + __le16 reserved; +}; + +enum iwl_prot_offload_subcmd_ids { + WOWLAN_WAKE_PKT_NOTIFICATION = 252, + WOWLAN_INFO_NOTIFICATION = 253, + D3_END_NOTIFICATION = 254, + STORED_BEACON_NTF = 255, +}; + +enum iwl_ctxt_id_and_color { + FW_CTXT_ID_POS = 0, + FW_CTXT_ID_MSK = 255, + FW_CTXT_COLOR_POS = 8, + FW_CTXT_COLOR_MSK = 65280, + FW_CTXT_INVALID = 4294967295, +}; + +enum iwl_ctxt_action { + FW_CTXT_ACTION_INVALID = 0, + FW_CTXT_ACTION_ADD = 1, + FW_CTXT_ACTION_MODIFY = 2, + FW_CTXT_ACTION_REMOVE = 3, +}; + +typedef unsigned int iwl_ucode_tlv_api_t; + +enum iwl_ucode_tlv_api { + IWL_UCODE_TLV_API_FRAGMENTED_SCAN = 8, + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = 9, + IWL_UCODE_TLV_API_LQ_SS_PARAMS = 18, + IWL_UCODE_TLV_API_NEW_VERSION = 20, + IWL_UCODE_TLV_API_SCAN_TSF_REPORT = 28, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS = 29, + IWL_UCODE_TLV_API_STA_TYPE = 30, + IWL_UCODE_TLV_API_NAN2_VER2 = 31, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL = 32, + IWL_UCODE_TLV_API_OCE = 33, + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = 34, + IWL_UCODE_TLV_API_NEW_RX_STATS = 35, + IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = 36, + IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = 38, + IWL_UCODE_TLV_API_DEPRECATE_TTAK = 41, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = 42, + IWL_UCODE_TLV_API_FRAG_EBS = 44, + IWL_UCODE_TLV_API_REDUCE_TX_POWER = 45, + IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = 46, + IWL_UCODE_TLV_API_BEACON_FILTER_V4 = 47, + IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = 48, + IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = 49, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = 50, + IWL_UCODE_TLV_API_MBSSID_HE = 52, + IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = 53, + IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = 54, + IWL_UCODE_TLV_API_SAR_TABLE_VER = 55, + IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = 56, + IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = 57, + IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = 58, + IWL_UCODE_TLV_API_BAND_IN_RX_DATA = 59, + IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = 66, + IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = 67, + IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD = 68, + NUM_IWL_UCODE_TLV_API = 69, +}; + +struct iwl_binding_cmd_v1 { + __le32 id_and_color; + __le32 action; + __le32 macs[3]; + __le32 phy; +}; + +struct iwl_time_quota_data_v1 { + __le32 id_and_color; + __le32 quota; + __le32 max_duration; +}; + +enum iwl_d3_status { + IWL_D3_STATUS_ALIVE = 0, + IWL_D3_STATUS_RESET = 1, +}; + +enum iwl_d0i3_flags { + IWL_D0I3_RESET_REQUIRE = 1, +}; + +struct iwl_d3_manager_config { + __le32 min_sleep_time; + __le32 wakeup_flags; + __le32 wakeup_host_timer; +}; + +struct iwl_wowlan_pattern_v1 { + u8 mask[16]; + u8 pattern[128]; + u8 mask_size; + u8 pattern_size; + __le16 reserved; +}; + +struct iwl_wowlan_patterns_cmd_v1 { + __le32 n_patterns; + struct iwl_wowlan_pattern_v1 patterns[0]; +}; + +enum iwl_wowlan_pattern_type { + WOWLAN_PATTERN_TYPE_BITMASK = 0, + WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN = 1, + WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN = 2, + WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD = 3, + WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD = 4, +}; + +struct iwl_wowlan_ipv4_tcp_syn { + u8 src_addr[4]; + u8 dst_addr[4]; + __le16 src_port; + __le16 dst_port; +}; + +struct iwl_wowlan_ipv6_tcp_syn { + u8 src_addr[16]; + u8 dst_addr[16]; + __le16 src_port; + __le16 dst_port; +}; + +union iwl_wowlan_pattern_data { + struct iwl_wowlan_pattern_v1 bitmask; + struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn; + struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn; +}; + +struct iwl_wowlan_pattern_v2 { + u8 pattern_type; + u8 reserved[3]; + union iwl_wowlan_pattern_data u; +}; + +struct iwl_wowlan_patterns_cmd { + u8 n_patterns; + u8 sta_id; + __le16 reserved; + struct iwl_wowlan_pattern_v2 patterns[0]; +}; + +enum iwl_wowlan_wakeup_filters { + IWL_WOWLAN_WAKEUP_MAGIC_PACKET = 1, + IWL_WOWLAN_WAKEUP_PATTERN_MATCH = 2, + IWL_WOWLAN_WAKEUP_BEACON_MISS = 4, + IWL_WOWLAN_WAKEUP_LINK_CHANGE = 8, + IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = 16, + IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = 32, + IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = 64, + IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = 128, + IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = 256, + IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = 512, + IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = 1024, + IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = 2048, + IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = 4096, + IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = 8192, + IWL_WOWLAN_WAKEUP_HOST_TIMER = 16384, + IWL_WOWLAN_WAKEUP_RX_FRAME = 32768, + IWL_WOWLAN_WAKEUP_BCN_FILTERING = 65536, +}; + +enum iwl_wowlan_flags { + IS_11W_ASSOC = 1, + ENABLE_L3_FILTERING = 2, + ENABLE_NBNS_FILTERING = 4, + ENABLE_DHCP_FILTERING = 8, + ENABLE_STORE_BEACON = 16, +}; + +struct iwl_wowlan_config_cmd_v6 { + __le32 wakeup_filter; + __le16 non_qos_seq; + __le16 qos_seq[8]; + u8 wowlan_ba_teardown_tids; + u8 is_11n_connection; + u8 offloading_tid; + u8 flags; + u8 sta_id; + u8 reserved; +}; + +struct iwl_wowlan_config_cmd { + __le32 wakeup_filter; + u8 wowlan_ba_teardown_tids; + u8 is_11n_connection; + u8 offloading_tid; + u8 flags; + u8 sta_id; + u8 reserved[3]; +}; + +struct tkip_sc { + __le16 iv16; + __le16 pad; + __le32 iv32; +}; + +struct iwl_tkip_rsc_tsc { + struct tkip_sc unicast_rsc[16]; + struct tkip_sc multicast_rsc[16]; + struct tkip_sc tsc; +}; + +struct aes_sc { + __le64 pn; +}; + +struct iwl_aes_rsc_tsc { + struct aes_sc unicast_rsc[16]; + struct aes_sc multicast_rsc[16]; + struct aes_sc tsc; +}; + +union iwl_all_tsc_rsc { + struct iwl_tkip_rsc_tsc tkip; + struct iwl_aes_rsc_tsc aes; +}; + +struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 { + union iwl_all_tsc_rsc all_tsc_rsc; +}; + +struct iwl_wowlan_rsc_tsc_params_cmd_v4 { + struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params; + __le32 sta_id; +}; + +struct iwl_wowlan_rsc_tsc_params_cmd { + __le64 ucast_rsc[8]; + __le64 mcast_rsc[16]; + __le32 sta_id; + u8 mcast_key_id_map[4]; +}; + +struct iwl_mic_keys { + u8 tx[8]; + u8 rx_unicast[8]; + u8 rx_mcast[8]; +}; + +struct iwl_p1k_cache { + __le16 p1k[5]; +}; + +struct iwl_wowlan_tkip_params_cmd { + struct iwl_mic_keys mic_keys; + struct iwl_p1k_cache tx; + struct iwl_p1k_cache rx_uni[2]; + struct iwl_p1k_cache rx_multi[2]; + u8 reversed[2]; + __le32 sta_id; +}; + +struct iwl_wowlan_kek_kck_material_cmd_v4 { + __le32 sta_id; + u8 kck[32]; + u8 kek[32]; + __le16 kck_len; + __le16 kek_len; + __le64 replay_ctr; + __le32 akm; + __le32 gtk_cipher; + __le32 igtk_cipher; + __le32 bigtk_cipher; +}; + +struct iwl_wowlan_get_status_cmd { + __le32 sta_id; +}; + +enum iwl_wowlan_wakeup_reason { + IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, + IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = 1, + IWL_WOWLAN_WAKEUP_BY_PATTERN = 2, + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = 4, + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = 8, + IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = 16, + IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = 32, + IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = 64, + IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = 128, + IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = 256, + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = 512, + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = 1024, + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = 2048, + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = 4096, + IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = 8192, + IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = 16384, + IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = 32768, + IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = 65536, + IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = 131072, + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = 262144, + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = 524288, + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = 1048576, + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = 2097152, +}; + +struct iwl_wowlan_gtk_status_v1 { + u8 key_index; + u8 reserved[3]; + u8 decrypt_key[16]; + u8 tkip_mic_key[8]; + struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; +}; + +struct iwl_wowlan_gtk_status_v2 { + u8 key[32]; + u8 key_len; + u8 key_flags; + u8 reserved[2]; + u8 tkip_mic_key[8]; + struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; +}; + +struct iwl_wowlan_all_rsc_tsc_v5 { + __le64 ucast_rsc[8]; + __le64 mcast_rsc[16]; + __le32 sta_id; + u8 mcast_key_id_map[4]; +}; + +struct iwl_wowlan_gtk_status_v3 { + u8 key[32]; + u8 key_len; + u8 key_flags; + u8 reserved[2]; + u8 tkip_mic_key[8]; + struct iwl_wowlan_all_rsc_tsc_v5 sc; +}; + +struct iwl_wowlan_igtk_status { + u8 key[32]; + u8 ipn[6]; + u8 key_len; + u8 key_flags; +}; + +struct iwl_wowlan_status_v6 { + struct iwl_wowlan_gtk_status_v1 gtk; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 wake_packet[0]; +}; + +struct iwl_wowlan_status_v7 { + struct iwl_wowlan_gtk_status_v2 gtk[2]; + struct iwl_wowlan_igtk_status igtk[2]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 wake_packet[0]; +}; + +struct iwl_wowlan_status_v9 { + struct iwl_wowlan_gtk_status_v2 gtk[2]; + struct iwl_wowlan_igtk_status igtk[2]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 tid_tear_down; + u8 reserved[3]; + u8 wake_packet[0]; +}; + +struct iwl_wowlan_status_v12 { + struct iwl_wowlan_gtk_status_v3 gtk[2]; + struct iwl_wowlan_igtk_status igtk[2]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 tid_tear_down; + u8 reserved[3]; + u8 wake_packet[0]; +}; + +struct iwl_wowlan_info_notif_v1 { + struct iwl_wowlan_gtk_status_v3 gtk[2]; + struct iwl_wowlan_igtk_status igtk[2]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +}; + +struct iwl_wowlan_info_notif_v2 { + struct iwl_wowlan_gtk_status_v3 gtk[2]; + struct iwl_wowlan_igtk_status igtk[2]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +}; + +enum iwl_wowlan_mlo_gtk_type { + WOWLAN_MLO_GTK_KEY_TYPE_GTK = 0, + WOWLAN_MLO_GTK_KEY_TYPE_IGTK = 1, + WOWLAN_MLO_GTK_KEY_TYPE_BIGTK = 2, + WOWLAN_MLO_GTK_KEY_NUM_TYPES = 3, +}; + +enum iwl_wowlan_mlo_gtk_flag { + WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK = 1, + WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK = 14, + WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK = 240, + WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK = 768, + WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK = 1024, +}; + +struct iwl_wowlan_mlo_gtk { + u8 key[32]; + __le16 flags; + u8 pn[6]; +}; + +struct iwl_wowlan_info_notif_v4 { + struct iwl_wowlan_gtk_status_v3 gtk[2]; + struct iwl_wowlan_igtk_status igtk[2]; + struct iwl_wowlan_igtk_status bigtk[2]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 num_mlo_link_keys; + u8 reserved2; + struct iwl_wowlan_mlo_gtk mlo_gtks[0]; +}; + +struct iwl_wowlan_info_notif { + struct iwl_wowlan_gtk_status_v3 gtk[2]; + struct iwl_wowlan_igtk_status igtk[2]; + struct iwl_wowlan_igtk_status bigtk[2]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 qos_seq_ctr; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 num_mlo_link_keys; + u8 tid_offloaded_tx; + struct iwl_wowlan_mlo_gtk mlo_gtks[0]; +}; + +struct iwl_wowlan_wake_pkt_notif { + __le32 wake_packet_length; + u8 station_id; + u8 reserved[3]; + u8 wake_packet[1]; +} __attribute__((packed)); + +struct iwl_mvm_d3_end_notif { + __le32 flags; +}; + +enum iwl_scan_subcmd_ids { + CHANNEL_SURVEY_NOTIF = 251, + OFFLOAD_MATCH_INFO_NOTIF = 252, +}; + +struct iwl_scan_offload_profile_match_v1 { + u8 bssid[6]; + __le16 reserved; + u8 channel; + u8 energy; + u8 matching_feature; + u8 matching_channels[5]; +}; + +struct iwl_scan_offload_profile_match { + u8 bssid[6]; + __le16 reserved; + u8 channel; + u8 energy; + u8 matching_feature; + u8 matching_channels[7]; +}; + +struct iwl_scan_offload_match_info { + __le32 matched_profiles; + __le32 last_scan_age; + __le32 n_scans_done; + __le32 gp2_d0u; + __le32 gp2_invoked; + u8 resume_while_scanning; + u8 self_recovery; + __le16 reserved; + struct iwl_scan_offload_profile_match matches[0]; +}; + +enum iwl_sta_key_flag { + STA_KEY_FLG_NO_ENC = 0, + STA_KEY_FLG_WEP = 1, + STA_KEY_FLG_CCM = 2, + STA_KEY_FLG_TKIP = 3, + STA_KEY_FLG_EXT = 4, + STA_KEY_FLG_GCMP = 5, + STA_KEY_FLG_CMAC = 6, + STA_KEY_FLG_ENC_UNKNOWN = 7, + STA_KEY_FLG_EN_MSK = 7, + STA_KEY_FLG_WEP_KEY_MAP = 8, + STA_KEY_FLG_AMSDU_SPP = 128, + STA_KEY_FLG_KEYID_POS = 8, + STA_KEY_FLG_KEYID_MSK = 768, + STA_KEY_NOT_VALID = 2048, + STA_KEY_FLG_WEP_13BYTES = 4096, + STA_KEY_FLG_KEY_32BYTES = 4096, + STA_KEY_MULTICAST = 16384, + STA_KEY_MFP = 32768, +}; + +struct iwl_mvm_wep_key { + u8 key_index; + u8 key_offset; + __le16 reserved1; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +}; + +struct iwl_mvm_wep_key_cmd { + __le32 mac_id_n_color; + u8 num_keys; + u8 decryption_type; + u8 flags; + u8 reserved; + struct iwl_mvm_wep_key wep_key[0]; +}; + +enum ieee80211_key_flags { + IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = 1, + IEEE80211_KEY_FLAG_GENERATE_IV = 2, + IEEE80211_KEY_FLAG_GENERATE_MMIC = 4, + IEEE80211_KEY_FLAG_PAIRWISE = 8, + IEEE80211_KEY_FLAG_SW_MGMT_TX = 16, + IEEE80211_KEY_FLAG_PUT_IV_SPACE = 32, + IEEE80211_KEY_FLAG_RX_MGMT = 64, + IEEE80211_KEY_FLAG_RESERVE_TAILROOM = 128, + IEEE80211_KEY_FLAG_PUT_MIC_SPACE = 256, + IEEE80211_KEY_FLAG_NO_AUTO_TX = 512, + IEEE80211_KEY_FLAG_GENERATE_MMIE = 1024, + IEEE80211_KEY_FLAG_SPP_AMSDU = 2048, +}; + +struct ieee80211_key_seq { + union { + struct { + u32 iv32; + u16 iv16; + } tkip; + struct { + u8 pn[6]; + } ccmp; + struct { + u8 pn[6]; + } aes_cmac; + struct { + u8 pn[6]; + } aes_gmac; + struct { + u8 pn[6]; + } gcmp; + struct { + u8 seq[16]; + u8 seq_len; + } hw; + }; +}; + +struct iwl_fw_dbg_params { + u32 in_sample; + u32 out_ctrl; +}; + +enum iwl_scan_status { + IWL_MVM_SCAN_REGULAR = 1, + IWL_MVM_SCAN_SCHED = 2, + IWL_MVM_SCAN_NETDETECT = 4, + IWL_MVM_SCAN_INT_MLO = 8, + IWL_MVM_SCAN_STOPPING_REGULAR = 256, + IWL_MVM_SCAN_STOPPING_SCHED = 512, + IWL_MVM_SCAN_STOPPING_NETDETECT = 1024, + IWL_MVM_SCAN_STOPPING_INT_MLO = 2048, + IWL_MVM_SCAN_REGULAR_MASK = 257, + IWL_MVM_SCAN_SCHED_MASK = 514, + IWL_MVM_SCAN_NETDETECT_MASK = 1028, + IWL_MVM_SCAN_INT_MLO_MASK = 2056, + IWL_MVM_SCAN_STOPPING_MASK = 65280, + IWL_MVM_SCAN_MASK = 255, +}; + +struct wowlan_key_reprogram_data { + bool error; + int wep_key_idx; +}; + +struct wowlan_key_rsc_tsc_data { + struct iwl_wowlan_rsc_tsc_params_cmd_v4 *rsc_tsc; + bool have_rsc_tsc; +}; + +struct wowlan_key_rsc_v5_data { + struct iwl_wowlan_rsc_tsc_params_cmd *rsc; + bool have_rsc; + int gtks; + int gtk_ids[4]; +}; + +struct wowlan_key_tkip_data { + struct iwl_wowlan_tkip_params_cmd tkip; + bool have_tkip_keys; +} __attribute__((packed)); + +struct wowlan_key_gtk_type_iter { + struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; +}; + +struct iwl_multicast_key_data { + u8 key[32]; + u8 len; + u8 flags; + u8 id; + u8 ipn[6]; +}; + +struct iwl_wowlan_status_data { + u64 replay_ctr; + u32 num_of_gtk_rekeys; + u32 received_beacons; + u32 wakeup_reasons; + u32 wake_packet_length; + u32 wake_packet_bufsize; + u16 pattern_number; + u16 non_qos_seq_ctr; + u16 qos_seq_ctr[8]; + u8 tid_tear_down; + u8 tid_offloaded_tx; + struct { + u8 key[32]; + u8 len; + u8 flags; + u8 id; + } gtk[2]; + struct { + struct { + struct ieee80211_key_seq seq[8]; + } tkip; + struct { + struct ieee80211_key_seq seq[8]; + } aes; + s8 key_id; + bool valid; + } gtk_seq[2]; + struct { + struct { + struct ieee80211_key_seq seq[8]; + u64 tx_pn; + } tkip; + struct { + struct ieee80211_key_seq seq[8]; + u64 tx_pn; + } aes; + } ptk; + struct iwl_multicast_key_data igtk; + struct iwl_multicast_key_data bigtk[2]; + int num_mlo_keys; + struct iwl_wowlan_mlo_gtk mlo_keys[18]; + u8 *wake_packet; + long: 32; +}; + +struct iwl_mvm_d3_gtk_iter_data { + struct iwl_mvm *mvm; + struct iwl_wowlan_status_data *status; + u32 gtk_cipher; + u32 igtk_cipher; + u32 bigtk_cipher; + bool unhandled_cipher; + bool igtk_support; + bool bigtk_support; + int num_keys; +}; + +struct iwl_mvm_d3_mlo_old_keys { + u32 cipher[45]; + struct ieee80211_key_conf *key[120]; +}; + +struct iwl_mvm_nd_results { + u32 matched_profiles; + u8 matches[198]; +}; + +enum iwl_d3_notif { + IWL_D3_NOTIF_WOWLAN_INFO = 1, + IWL_D3_NOTIF_WOWLAN_WAKE_PKT = 2, + IWL_D3_NOTIF_PROT_OFFLOAD = 4, + IWL_D3_ND_MATCH_INFO = 8, + IWL_D3_NOTIF_D3_END_NOTIF = 16, +}; + +struct iwl_d3_data { + struct iwl_wowlan_status_data *status; + bool test; + u32 d3_end_flags; + u32 notif_expected; + u32 notif_received; + struct iwl_mvm_nd_results *nd_results; + bool nd_results_valid; +}; + +struct pdev_archdata {}; + +struct mfd_cell; + +struct platform_device_id; + +struct platform_device { + const char *name; + int id; + bool id_auto; + long: 32; + struct device dev; + u64 platform_dma_mask; + struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + const struct platform_device_id *id_entry; + const char *driver_override; + struct mfd_cell *mfd_cell; + struct pdev_archdata archdata; +}; + +struct platform_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct gen_pool; + +typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); + +struct gen_pool { + spinlock_t lock; + struct list_head chunks; + int min_alloc_order; + genpool_algo_t algo; + void *data; + const char *name; +}; + +enum { + MEMREMAP_WB = 1, + MEMREMAP_WT = 2, + MEMREMAP_WC = 4, + MEMREMAP_ENC = 8, + MEMREMAP_DEC = 16, +}; + +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +}; + +enum usb3_link_state { + USB3_LPM_U0 = 0, + USB3_LPM_U1 = 1, + USB3_LPM_U2 = 2, + USB3_LPM_U3 = 3, +}; + +struct usb_iso_packet_descriptor { + unsigned int offset; + unsigned int length; + unsigned int actual_length; + int status; +}; + +struct usb_anchor { + struct list_head urb_list; + wait_queue_head_t wait; + spinlock_t lock; + atomic_t suspend_wakeups; + unsigned int poisoned: 1; +}; + +struct urb; + +typedef void (*usb_complete_t)(struct urb *); + +struct urb { + struct kref kref; + int unlinked; + void *hcpriv; + atomic_t use_count; + atomic_t reject; + struct list_head urb_list; + struct list_head anchor_list; + struct usb_anchor *anchor; + struct usb_device *dev; + struct usb_host_endpoint *ep; + unsigned int pipe; + unsigned int stream_id; + int status; + unsigned int transfer_flags; + void *transfer_buffer; + dma_addr_t transfer_dma; + struct scatterlist *sg; + int num_mapped_sgs; + int num_sgs; + u32 transfer_buffer_length; + u32 actual_length; + unsigned char *setup_packet; + dma_addr_t setup_dma; + int start_frame; + int number_of_packets; + int interval; + int error_count; + void *context; + usb_complete_t complete; + struct usb_iso_packet_descriptor iso_frame_desc[0]; +}; + +enum usb_led_event { + USB_LED_EVENT_HOST = 0, + USB_LED_EVENT_GADGET = 1, +}; + +struct giveback_urb_bh { + bool running; + bool high_prio; + spinlock_t lock; + struct list_head head; + struct work_struct bh; + struct usb_host_endpoint *completing_ep; +}; + +enum usb_dev_authorize_policy { + USB_DEVICE_AUTHORIZE_NONE = 0, + USB_DEVICE_AUTHORIZE_ALL = 1, + USB_DEVICE_AUTHORIZE_INTERNAL = 2, +}; + +struct hc_driver; + +struct usb_phy; + +struct usb_phy_roothub; + +struct usb_hcd { + struct usb_bus self; + struct kref kref; + const char *product_desc; + int speed; + char irq_descr[24]; + struct timer_list rh_timer; + struct urb *status_urb; + struct work_struct wakeup_work; + struct work_struct died_work; + const struct hc_driver *driver; + struct usb_phy *usb_phy; + struct usb_phy_roothub *phy_roothub; + long unsigned int flags; + enum usb_dev_authorize_policy dev_policy; + unsigned int rh_registered: 1; + unsigned int rh_pollable: 1; + unsigned int msix_enabled: 1; + unsigned int msi_enabled: 1; + unsigned int skip_phy_initialization: 1; + unsigned int uses_new_polling: 1; + unsigned int has_tt: 1; + unsigned int amd_resume_bug: 1; + unsigned int can_do_streams: 1; + unsigned int tpl_support: 1; + unsigned int cant_recv_wakeups: 1; + unsigned int irq; + void *regs; + resource_size_t rsrc_start; + resource_size_t rsrc_len; + unsigned int power_budget; + struct giveback_urb_bh high_prio_bh; + struct giveback_urb_bh low_prio_bh; + struct mutex *address0_mutex; + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + struct dma_pool *pool[4]; + int state; + struct gen_pool *localmem_pool; + long unsigned int hcd_priv[0]; +}; + +struct hc_driver { + const char *description; + const char *product_desc; + size_t hcd_priv_size; + irqreturn_t (*irq)(struct usb_hcd *); + int flags; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*pci_suspend)(struct usb_hcd *, bool); + int (*pci_resume)(struct usb_hcd *, pm_message_t); + int (*pci_poweroff_late)(struct usb_hcd *, bool); + void (*stop)(struct usb_hcd *); + void (*shutdown)(struct usb_hcd *); + int (*get_frame_number)(struct usb_hcd *); + int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); + int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); + int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); + void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); + void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); + void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); + int (*hub_status_data)(struct usb_hcd *, char *); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned int); + long unsigned int (*get_resuming_ports)(struct usb_hcd *); + void (*relinquish_port)(struct usb_hcd *, int); + int (*port_handed_over)(struct usb_hcd *, int); + void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + void (*free_dev)(struct usb_hcd *, struct usb_device *); + int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); + int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*address_device)(struct usb_hcd *, struct usb_device *, unsigned int); + int (*enable_device)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); + int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*find_raw_port_number)(struct usb_hcd *, int); + int (*port_power)(struct usb_hcd *, int, bool); + int (*submit_single_step_set_feature)(struct usb_hcd *, struct urb *, int); +}; + +enum usb_phy_type { + USB_PHY_TYPE_UNDEFINED = 0, + USB_PHY_TYPE_USB2 = 1, + USB_PHY_TYPE_USB3 = 2, +}; + +enum usb_phy_events { + USB_EVENT_NONE = 0, + USB_EVENT_VBUS = 1, + USB_EVENT_ID = 2, + USB_EVENT_CHARGER = 3, + USB_EVENT_ENUMERATED = 4, +}; + +struct extcon_dev; + +enum usb_charger_type { + UNKNOWN_TYPE = 0, + SDP_TYPE = 1, + DCP_TYPE = 2, + CDP_TYPE = 3, + ACA_TYPE = 4, +}; + +enum usb_charger_state { + USB_CHARGER_DEFAULT = 0, + USB_CHARGER_PRESENT = 1, + USB_CHARGER_ABSENT = 2, +}; + +struct usb_charger_current { + unsigned int sdp_min; + unsigned int sdp_max; + unsigned int dcp_min; + unsigned int dcp_max; + unsigned int cdp_min; + unsigned int cdp_max; + unsigned int aca_min; + unsigned int aca_max; +}; + +struct usb_otg; + +struct usb_phy_io_ops; + +struct usb_phy { + struct device *dev; + const char *label; + unsigned int flags; + enum usb_phy_type type; + enum usb_phy_events last_event; + struct usb_otg *otg; + struct device *io_dev; + struct usb_phy_io_ops *io_ops; + void *io_priv; + struct extcon_dev *edev; + struct extcon_dev *id_edev; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct notifier_block type_nb; + enum usb_charger_type chg_type; + enum usb_charger_state chg_state; + struct usb_charger_current chg_cur; + struct work_struct chg_work; + struct atomic_notifier_head notifier; + u16 port_status; + u16 port_change; + struct list_head head; + int (*init)(struct usb_phy *); + void (*shutdown)(struct usb_phy *); + int (*set_vbus)(struct usb_phy *, int); + int (*set_power)(struct usb_phy *, unsigned int); + int (*set_suspend)(struct usb_phy *, int); + int (*set_wakeup)(struct usb_phy *, bool); + int (*notify_connect)(struct usb_phy *, enum usb_device_speed); + int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed); + enum usb_charger_type (*charger_detect)(struct usb_phy *); +}; + +struct usb_hub_descriptor { + __u8 bDescLength; + __u8 bDescriptorType; + __u8 bNbrPorts; + __le16 wHubCharacteristics; + __u8 bPwrOn2PwrGood; + __u8 bHubContrCurrent; + union { + struct { + __u8 DeviceRemovable[4]; + __u8 PortPwrCtrlMask[4]; + } hs; + struct { + __u8 bHubHdrDecLat; + __le16 wHubDelay; + __le16 DeviceRemovable; + } __attribute__((packed)) ss; + } u; +} __attribute__((packed)); + +enum usb_otg_state { + OTG_STATE_UNDEFINED = 0, + OTG_STATE_B_IDLE = 1, + OTG_STATE_B_SRP_INIT = 2, + OTG_STATE_B_PERIPHERAL = 3, + OTG_STATE_B_WAIT_ACON = 4, + OTG_STATE_B_HOST = 5, + OTG_STATE_A_IDLE = 6, + OTG_STATE_A_WAIT_VRISE = 7, + OTG_STATE_A_WAIT_BCON = 8, + OTG_STATE_A_HOST = 9, + OTG_STATE_A_SUSPEND = 10, + OTG_STATE_A_PERIPHERAL = 11, + OTG_STATE_A_WAIT_VFALL = 12, + OTG_STATE_A_VBUS_ERR = 13, +}; + +struct usb_phy_io_ops { + int (*read)(struct usb_phy *, u32); + int (*write)(struct usb_phy *, u32, u32); +}; + +struct usb_gadget; + +struct usb_otg { + u8 default_a; + struct phy *phy; + struct usb_phy *usb_phy; + struct usb_bus *host; + struct usb_gadget *gadget; + enum usb_otg_state state; + int (*set_host)(struct usb_otg *, struct usb_bus *); + int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); + int (*set_vbus)(struct usb_otg *, bool); + int (*start_srp)(struct usb_otg *); + int (*start_hnp)(struct usb_otg *); +}; + +struct pci_fixup { + u16 vendor; + u16 device; + u32 class; + unsigned int class_shift; + void (*hook)(struct pci_dev *); +}; + +enum amd_chipset_gen { + NOT_AMD_CHIPSET = 0, + AMD_CHIPSET_SB600 = 1, + AMD_CHIPSET_SB700 = 2, + AMD_CHIPSET_SB800 = 3, + AMD_CHIPSET_HUDSON2 = 4, + AMD_CHIPSET_BOLTON = 5, + AMD_CHIPSET_YANGTZE = 6, + AMD_CHIPSET_TAISHAN = 7, + AMD_CHIPSET_UNKNOWN = 8, +}; + +struct amd_chipset_type { + enum amd_chipset_gen gen; + u8 rev; +}; + +struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; + int nb_type; + struct amd_chipset_type sb_type; + int isoc_reqs; + int probe_count; + bool need_pll_quirk; +}; + +typedef struct { + spinlock_t *lock; + long unsigned int flags; +} class_spinlock_irqsave_t; + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +struct serio_driver; + +struct serio { + void *port_data; + char name[32]; + char phys[32]; + char firmware_id[128]; + bool manual_bind; + struct serio_device_id id; + spinlock_t lock; + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + struct serio *parent; + struct list_head child_node; + struct list_head children; + unsigned int depth; + struct serio_driver *drv; + struct mutex drv_mutex; + long: 32; + struct device dev; + struct list_head node; + struct mutex *ps2_cmd_mutex; + long: 32; +}; + +struct serio_driver { + const char *description; + const struct serio_device_id *id_table; + bool manual_bind; + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *); + int (*reconnect)(struct serio *); + int (*fast_reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + struct device_driver driver; +}; + +struct serport { + struct tty_struct *tty; + wait_queue_head_t wait; + struct serio *serio; + struct serio_device_id id; + spinlock_t lock; + long unsigned int flags; +}; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED = 1, +}; + +enum thermal_trend { + THERMAL_TREND_STABLE = 0, + THERMAL_TREND_RAISING = 1, + THERMAL_TREND_DROPPING = 2, +}; + +enum thermal_notify_event { + THERMAL_EVENT_UNSPECIFIED = 0, + THERMAL_EVENT_TEMP_SAMPLE = 1, + THERMAL_TRIP_VIOLATED = 2, + THERMAL_TRIP_CHANGED = 3, + THERMAL_DEVICE_DOWN = 4, + THERMAL_DEVICE_UP = 5, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, + THERMAL_TABLE_CHANGED = 7, + THERMAL_EVENT_KEEP_ALIVE = 8, + THERMAL_TZ_BIND_CDEV = 9, + THERMAL_TZ_UNBIND_CDEV = 10, + THERMAL_INSTANCE_WEIGHT_CHANGED = 11, + THERMAL_TZ_RESUME = 12, + THERMAL_TZ_ADD_THRESHOLD = 13, + THERMAL_TZ_DEL_THRESHOLD = 14, + THERMAL_TZ_FLUSH_THRESHOLDS = 15, +}; + +struct cooling_spec { + long unsigned int upper; + long unsigned int lower; + unsigned int weight; +}; + +struct thermal_zone_device_ops { + bool (*should_bind)(struct thermal_zone_device *, const struct thermal_trip *, struct thermal_cooling_device *, struct cooling_spec *); + int (*get_temp)(struct thermal_zone_device *, int *); + int (*set_trips)(struct thermal_zone_device *, int, int); + int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); + int (*set_trip_temp)(struct thermal_zone_device *, const struct thermal_trip *, int); + int (*get_crit_temp)(struct thermal_zone_device *, int *); + int (*set_emul_temp)(struct thermal_zone_device *, int); + int (*get_trend)(struct thermal_zone_device *, const struct thermal_trip *, enum thermal_trend *); + void (*hot)(struct thermal_zone_device *); + void (*critical)(struct thermal_zone_device *); +}; + +struct thermal_attr { + struct device_attribute attr; + char name[20]; +}; + +struct thermal_trip_attrs { + struct thermal_attr type; + struct thermal_attr temp; + struct thermal_attr hyst; +}; + +struct thermal_trip_desc { + struct thermal_trip trip; + struct thermal_trip_attrs trip_attrs; + struct list_head list_node; + struct list_head thermal_instances; + int threshold; +}; + +struct thermal_zone_params; + +struct thermal_governor; + +struct thermal_zone_device { + int id; + char type[20]; + struct device device; + struct completion removal; + struct completion resume; + struct attribute_group trips_attribute_group; + struct list_head trips_high; + struct list_head trips_reached; + struct list_head trips_invalid; + enum thermal_device_mode mode; + void *devdata; + int num_trips; + long unsigned int passive_delay_jiffies; + long unsigned int polling_delay_jiffies; + long unsigned int recheck_delay_jiffies; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + struct thermal_zone_device_ops ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; + u8 state; + struct list_head user_thresholds; + struct thermal_trip_desc trips[0]; +}; + +struct thermal_zone_params { + const char *governor_name; + bool no_hwmon; + u32 sustainable_power; + s32 k_po; + s32 k_pu; + s32 k_i; + s32 k_d; + s32 integral_cutoff; + int slope; + int offset; +}; + +struct user_threshold { + struct list_head list_node; + int temperature; + int direction; +}; + +struct thermal_governor { + const char *name; + int (*bind_to_tz)(struct thermal_zone_device *); + void (*unbind_from_tz)(struct thermal_zone_device *); + void (*trip_crossed)(struct thermal_zone_device *, const struct thermal_trip *, bool); + void (*manage)(struct thermal_zone_device *); + void (*update_tz)(struct thermal_zone_device *, enum thermal_notify_event); + struct list_head governor_list; +}; + +typedef struct thermal_zone_device *class_thermal_zone_t; + +typedef u16 blk_short_t; + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + long unsigned int start; +}; + +struct bio_integrity_payload { + struct bio *bip_bio; + struct bvec_iter bip_iter; + short unsigned int bip_vcnt; + short unsigned int bip_max_vcnt; + short unsigned int bip_flags; + struct bvec_iter bio_iter; + struct work_struct bip_work; + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0]; +}; + +enum dm_queue_mode { + DM_TYPE_NONE = 0, + DM_TYPE_BIO_BASED = 1, + DM_TYPE_REQUEST_BASED = 2, + DM_TYPE_DAX_BIO_BASED = 3, +}; + +typedef enum { + STATUSTYPE_INFO = 0, + STATUSTYPE_TABLE = 1, + STATUSTYPE_IMA = 2, +} status_type_t; + +union map_info { + void *ptr; +}; + +struct dm_target; + +typedef int (*dm_ctr_fn)(struct dm_target *, unsigned int, char **); + +struct dm_table; + +struct target_type; + +struct dm_target { + struct dm_table *table; + struct target_type *type; + sector_t begin; + sector_t len; + uint32_t max_io_len; + unsigned int num_flush_bios; + unsigned int num_discard_bios; + unsigned int num_secure_erase_bios; + unsigned int num_write_zeroes_bios; + unsigned int per_io_data_size; + void *private; + char *error; + bool flush_supported: 1; + bool discards_supported: 1; + bool zone_reset_all_supported: 1; + bool max_discard_granularity: 1; + bool limit_swap_bios: 1; + bool emulate_zone_append: 1; + bool accounts_remapped_io: 1; + bool needs_bio_set_dev: 1; + bool flush_bypasses_map: 1; + bool mempool_needs_integrity: 1; + long: 32; +}; + +typedef void (*dm_dtr_fn)(struct dm_target *); + +typedef int (*dm_map_fn)(struct dm_target *, struct bio *); + +typedef int (*dm_clone_and_map_request_fn)(struct dm_target *, struct request *, union map_info *, struct request **); + +typedef void (*dm_release_clone_request_fn)(struct request *, union map_info *); + +typedef int (*dm_endio_fn)(struct dm_target *, struct bio *, blk_status_t *); + +typedef int (*dm_request_endio_fn)(struct dm_target *, struct request *, blk_status_t, union map_info *); + +typedef void (*dm_presuspend_fn)(struct dm_target *); + +typedef void (*dm_presuspend_undo_fn)(struct dm_target *); + +typedef void (*dm_postsuspend_fn)(struct dm_target *); + +typedef int (*dm_preresume_fn)(struct dm_target *); + +typedef void (*dm_resume_fn)(struct dm_target *); + +typedef void (*dm_status_fn)(struct dm_target *, status_type_t, unsigned int, char *, unsigned int); + +typedef int (*dm_message_fn)(struct dm_target *, unsigned int, char **, char *, unsigned int); + +typedef int (*dm_prepare_ioctl_fn)(struct dm_target *, struct block_device **); + +typedef int (*dm_report_zones_fn)(struct dm_target *); + +struct dm_dev; + +typedef int (*iterate_devices_callout_fn)(struct dm_target *, struct dm_dev *, sector_t, sector_t, void *); + +struct dm_dev { + struct block_device *bdev; + struct file *bdev_file; + struct dax_device *dax_dev; + blk_mode_t mode; + char name[16]; +}; + +typedef int (*dm_iterate_devices_fn)(struct dm_target *, iterate_devices_callout_fn, void *); + +typedef void (*dm_io_hints_fn)(struct dm_target *, struct queue_limits *); + +typedef int (*dm_busy_fn)(struct dm_target *); + +enum dax_access_mode { + DAX_ACCESS = 0, + DAX_RECOVERY_WRITE = 1, +}; + +typedef long int (*dm_dax_direct_access_fn)(struct dm_target *, long unsigned int, long int, enum dax_access_mode, void **, pfn_t *); + +typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *, long unsigned int, size_t); + +typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *, long unsigned int, void *, size_t, struct iov_iter *); + +struct target_type { + uint64_t features; + const char *name; + struct module *module; + unsigned int version[3]; + dm_ctr_fn ctr; + dm_dtr_fn dtr; + dm_map_fn map; + dm_clone_and_map_request_fn clone_and_map_rq; + dm_release_clone_request_fn release_clone_rq; + dm_endio_fn end_io; + dm_request_endio_fn rq_end_io; + dm_presuspend_fn presuspend; + dm_presuspend_undo_fn presuspend_undo; + dm_postsuspend_fn postsuspend; + dm_preresume_fn preresume; + dm_resume_fn resume; + dm_status_fn status; + dm_message_fn message; + dm_prepare_ioctl_fn prepare_ioctl; + dm_report_zones_fn report_zones; + dm_busy_fn busy; + dm_iterate_devices_fn iterate_devices; + dm_io_hints_fn io_hints; + dm_dax_direct_access_fn direct_access; + dm_dax_zero_page_range_fn dax_zero_page_range; + dm_dax_recovery_write_fn dax_recovery_write; + struct list_head list; + long: 32; +}; + +struct mapped_device; + +struct dm_md_mempools; + +struct dm_table { + struct mapped_device *md; + enum dm_queue_mode type; + unsigned int depth; + unsigned int counts[16]; + sector_t *index[16]; + unsigned int num_targets; + unsigned int num_allocated; + sector_t *highs; + struct dm_target *targets; + struct target_type *immutable_target_type; + bool integrity_supported: 1; + bool singleton: 1; + bool flush_bypasses_map: 1; + blk_mode_t mode; + struct list_head devices; + struct rw_semaphore devices_lock; + void (*event_fn)(void *); + void *event_context; + struct dm_md_mempools *mempools; +}; + +struct dm_stats_last_position; + +struct dm_stats { + struct mutex mutex; + struct list_head list; + struct dm_stats_last_position *last; + bool precise_timestamps; +}; + +struct dm_stats_aux { + bool merged; + long: 32; + long long unsigned int duration_ns; +}; + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +struct dm_md_mempools { + struct bio_set bs; + struct bio_set io_bs; +}; + +struct dm_io; + +struct mapped_device { + struct mutex suspend_lock; + struct mutex table_devices_lock; + struct list_head table_devices; + void *map; + long unsigned int flags; + struct mutex type_lock; + enum dm_queue_mode type; + int numa_node_id; + struct request_queue *queue; + atomic_t holders; + atomic_t open_count; + struct dm_target *immutable_target; + struct target_type *immutable_target_type; + char name[16]; + struct gendisk *disk; + struct dax_device *dax_dev; + wait_queue_head_t wait; + long unsigned int *pending_io; + struct hd_geometry geometry; + struct workqueue_struct *wq; + struct work_struct work; + spinlock_t deferred_lock; + struct bio_list deferred; + struct work_struct requeue_work; + struct dm_io *requeue_list; + void *interface_ptr; + wait_queue_head_t eventq; + atomic_t event_nr; + atomic_t uevent_seq; + struct list_head uevent_list; + spinlock_t uevent_lock; + bool init_tio_pdu: 1; + struct blk_mq_tag_set *tag_set; + struct dm_stats stats; + unsigned int internal_suspend_count; + int swap_bios; + struct semaphore swap_bios_semaphore; + struct mutex swap_bios_lock; + struct dm_md_mempools *mempools; + struct dm_kobject_holder kobj_holder; + struct srcu_struct io_barrier; +}; + +struct dm_target_io { + short unsigned int magic; + blk_short_t flags; + unsigned int target_bio_nr; + struct dm_io *io; + struct dm_target *ti; + unsigned int *len_ptr; + long: 32; + sector_t old_sector; + struct bio clone; +}; + +struct dm_io { + short unsigned int magic; + blk_short_t flags; + spinlock_t lock; + long unsigned int start_time; + void *data; + struct dm_io *next; + long: 32; + struct dm_stats_aux stats_aux; + blk_status_t status; + atomic_t io_count; + struct mapped_device *md; + struct bio *orig_bio; + unsigned int sector_offset; + unsigned int sectors; + struct dm_target_io tio; +}; + +typedef long unsigned int efi_status_t; + +typedef u8 efi_bool_t; + +typedef struct { + u64 signature; + u32 revision; + u32 headersize; + u32 crc32; + u32 reserved; +} efi_table_hdr_t; + +typedef struct { + u32 type; + u32 pad; + u64 phys_addr; + u64 virt_addr; + u64 num_pages; + u64 attribute; +} efi_memory_desc_t; + +typedef struct { + efi_guid_t guid; + u32 headersize; + u32 flags; + u32 imagesize; +} efi_capsule_header_t; + +typedef struct { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 pad1; + u32 nanosecond; + s16 timezone; + u8 daylight; + u8 pad2; +} efi_time_t; + +typedef struct { + u32 resolution; + u32 accuracy; + u8 sets_to_zero; +} efi_time_cap_t; + +union efi_boot_services; + +typedef union efi_boot_services efi_boot_services_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 get_time; + u32 set_time; + u32 get_wakeup_time; + u32 set_wakeup_time; + u32 set_virtual_address_map; + u32 convert_pointer; + u32 get_variable; + u32 get_next_variable; + u32 set_variable; + u32 get_next_high_mono_count; + u32 reset_system; + u32 update_capsule; + u32 query_capsule_caps; + u32 query_variable_info; +} efi_runtime_services_32_t; + +typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *); + +typedef efi_status_t efi_set_time_t(efi_time_t *); + +typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *); + +typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *); + +typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *); + +typedef efi_status_t efi_get_next_variable_t(long unsigned int *, efi_char16_t *, efi_guid_t *); + +typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *); + +typedef efi_status_t efi_get_next_high_mono_count_t(u32 *); + +typedef void efi_reset_system_t(int, efi_status_t, long unsigned int, efi_char16_t *); + +typedef efi_status_t efi_set_virtual_address_map_t(long unsigned int, long unsigned int, u32, efi_memory_desc_t *); + +typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *); + +typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, long unsigned int, long unsigned int); + +typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, long unsigned int, u64 *, int *); + +typedef union { + struct { + efi_table_hdr_t hdr; + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_set_virtual_address_map_t *set_virtual_address_map; + void *convert_pointer; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_query_variable_info_t *query_variable_info; + }; + efi_runtime_services_32_t mixed_mode; +} efi_runtime_services_t; + +typedef struct { + efi_guid_t guid; + u32 table; +} efi_config_table_32_t; + +typedef union { + struct { + efi_guid_t guid; + void *table; + }; + efi_config_table_32_t mixed_mode; +} efi_config_table_t; + +typedef struct { + efi_guid_t guid; + long unsigned int *ptr; + const char name[16]; +} efi_config_table_type_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 fw_vendor; + u32 fw_revision; + u32 con_in_handle; + u32 con_in; + u32 con_out_handle; + u32 con_out; + u32 stderr_handle; + u32 stderr; + u32 runtime; + u32 boottime; + u32 nr_tables; + u32 tables; +} efi_system_table_32_t; + +union efi_simple_text_input_protocol; + +typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; + +union efi_simple_text_output_protocol; + +typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; + +typedef union { + struct { + efi_table_hdr_t hdr; + long unsigned int fw_vendor; + u32 fw_revision; + long unsigned int con_in_handle; + efi_simple_text_input_protocol_t *con_in; + long unsigned int con_out_handle; + efi_simple_text_output_protocol_t *con_out; + long unsigned int stderr_handle; + long unsigned int stderr; + efi_runtime_services_t *runtime; + efi_boot_services_t *boottime; + long unsigned int nr_tables; + long unsigned int tables; + }; + efi_system_table_32_t mixed_mode; +} efi_system_table_t; + +struct efi_memory_map_data { + phys_addr_t phys_map; + long unsigned int size; + long unsigned int desc_version; + long unsigned int desc_size; + long unsigned int flags; +}; + +struct efi_memory_map { + phys_addr_t phys_map; + void *map; + void *map_end; + int nr_map; + long unsigned int desc_version; + long unsigned int desc_size; + long unsigned int flags; +}; + +struct efi { + const efi_runtime_services_t *runtime; + unsigned int runtime_version; + unsigned int runtime_supported_mask; + long unsigned int acpi; + long unsigned int acpi20; + long unsigned int smbios; + long unsigned int smbios3; + long unsigned int esrt; + long unsigned int tpm_log; + long unsigned int tpm_final_log; + long unsigned int mokvar_table; + long unsigned int coco_secret; + long unsigned int unaccepted; + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_info_t *query_variable_info; + efi_query_variable_info_t *query_variable_info_nonblocking; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + struct efi_memory_map memmap; + long unsigned int flags; +}; + +struct screen_info { + __u8 orig_x; + __u8 orig_y; + __u16 ext_mem_k; + __u16 orig_video_page; + __u8 orig_video_mode; + __u8 orig_video_cols; + __u8 flags; + __u8 unused2; + __u16 orig_video_ega_bx; + __u16 unused3; + __u8 orig_video_lines; + __u8 orig_video_isVGA; + __u16 orig_video_points; + __u16 lfb_width; + __u16 lfb_height; + __u16 lfb_depth; + __u32 lfb_base; + __u32 lfb_size; + __u16 cl_magic; + __u16 cl_offset; + __u16 lfb_linelength; + __u8 red_size; + __u8 red_pos; + __u8 green_size; + __u8 green_pos; + __u8 blue_size; + __u8 blue_pos; + __u8 rsvd_size; + __u8 rsvd_pos; + __u16 vesapm_seg; + __u16 vesapm_off; + __u16 pages; + __u16 vesa_attributes; + __u32 capabilities; + __u32 ext_lfb_base; + __u8 _reserved[2]; +} __attribute__((packed)); + +enum fixed_addresses { + FIX_EARLYCON_MEM_BASE = 0, + __end_of_permanent_fixed_addresses = 1, + FIX_KMAP_BEGIN = 1, + FIX_KMAP_END = 512, + FIX_TEXT_POKE0 = 513, + FIX_TEXT_POKE1 = 514, + __end_of_fixmap_region = 515, + FIX_BTMAP_END = 1, + FIX_BTMAP_BEGIN = 224, + __end_of_early_ioremap_region = 225, +}; + +struct hid_device_id { + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + kernel_ulong_t driver_data; +}; + +enum hid_report_type { + HID_INPUT_REPORT = 0, + HID_OUTPUT_REPORT = 1, + HID_FEATURE_REPORT = 2, + HID_REPORT_TYPES = 3, +}; + +enum { + BPF_MAX_TRAMP_LINKS = 38, +}; + +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE = 1, + HID_TYPE_USBNONE = 2, +}; + +struct hid_report; + +struct hid_report_enum { + unsigned int numbered; + struct list_head report_list; + struct hid_report *report_id_hash[256]; +}; + +struct hid_collection; + +struct hid_driver; + +struct hid_ll_driver; + +struct hid_field; + +struct hid_usage; + +struct hid_device { + const __u8 *dev_rdesc; + const __u8 *bpf_rdesc; + const __u8 *rdesc; + unsigned int dev_rsize; + unsigned int bpf_rsize; + unsigned int rsize; + unsigned int collection_size; + struct hid_collection *collection; + unsigned int maxcollection; + unsigned int maxapplication; + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + __u32 version; + enum hid_type type; + unsigned int country; + struct hid_report_enum report_enum[3]; + struct work_struct led_work; + struct semaphore driver_input_lock; + long: 32; + struct device dev; + struct hid_driver *driver; + void *devres_group_id; + const struct hid_ll_driver *ll_driver; + struct mutex ll_open_lock; + unsigned int ll_open_count; + long unsigned int status; + unsigned int claimed; + unsigned int quirks; + unsigned int initial_quirks; + bool io_started; + struct list_head inputs; + void *hiddev; + void *hidraw; + char name[128]; + char phys[64]; + char uniq[64]; + void *driver_data; + int (*ff_init)(struct hid_device *); + int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); + void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*hiddev_report_event)(struct hid_device *, struct hid_report *); + short unsigned int debug; + struct dentry *debug_dir; + struct dentry *debug_rdesc; + struct dentry *debug_events; + struct list_head debug_list; + spinlock_t debug_list_lock; + wait_queue_head_t debug_wait; + struct kref ref; + unsigned int id; +}; + +struct hid_field_entry; + +struct hid_report { + struct list_head list; + struct list_head hidinput_list; + struct list_head field_entry_list; + unsigned int id; + enum hid_report_type type; + unsigned int application; + struct hid_field *field[256]; + struct hid_field_entry *field_entries; + unsigned int maxfield; + unsigned int size; + struct hid_device *device; + bool tool_active; + unsigned int tool; +}; + +struct hid_collection { + int parent_idx; + unsigned int type; + unsigned int usage; + unsigned int level; +}; + +struct hid_usage { + unsigned int hid; + unsigned int collection_index; + unsigned int usage_index; + __s8 resolution_multiplier; + __s8 wheel_factor; + __u16 code; + __u8 type; + __s16 hat_min; + __s16 hat_max; + __s16 hat_dir; + __s16 wheel_accumulated; +}; + +struct hid_input; + +struct hid_field { + unsigned int physical; + unsigned int logical; + unsigned int application; + struct hid_usage *usage; + unsigned int maxusage; + unsigned int flags; + unsigned int report_offset; + unsigned int report_size; + unsigned int report_count; + unsigned int report_type; + __s32 *value; + __s32 *new_value; + __s32 *usages_priorities; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + bool ignored; + struct hid_report *report; + unsigned int index; + struct hid_input *hidinput; + __u16 dpad; + unsigned int slot_idx; +}; + +struct hid_input { + struct list_head list; + struct hid_report *report; + struct input_dev *input; + const char *name; + struct list_head reports; + unsigned int application; + bool registered; +}; + +struct hid_field_entry { + struct list_head list; + struct hid_field *field; + unsigned int index; + __s32 priority; +}; + +struct hid_report_id; + +struct hid_usage_id; + +struct hid_driver { + char *name; + const struct hid_device_id *id_table; + struct list_head dyn_list; + spinlock_t dyn_lock; + bool (*match)(struct hid_device *, bool); + int (*probe)(struct hid_device *, const struct hid_device_id *); + void (*remove)(struct hid_device *); + const struct hid_report_id *report_table; + int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); + const struct hid_usage_id *usage_table; + int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*report)(struct hid_device *, struct hid_report *); + const __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *); + int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_configured)(struct hid_device *, struct hid_input *); + void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); + int (*suspend)(struct hid_device *, pm_message_t); + int (*resume)(struct hid_device *); + int (*reset_resume)(struct hid_device *); + struct device_driver driver; +}; + +struct hid_ll_driver { + int (*start)(struct hid_device *); + void (*stop)(struct hid_device *); + int (*open)(struct hid_device *); + void (*close)(struct hid_device *); + int (*power)(struct hid_device *, int); + int (*parse)(struct hid_device *); + void (*request)(struct hid_device *, struct hid_report *, int); + int (*wait)(struct hid_device *); + int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int); + int (*output_report)(struct hid_device *, __u8 *, size_t); + int (*idle)(struct hid_device *, int, int, int); + bool (*may_wakeup)(struct hid_device *); + unsigned int max_buffer_size; +}; + +struct hid_report_id { + __u32 report_type; +}; + +struct hid_usage_id { + __u32 usage_hid; + __u32 usage_type; + __u32 usage_code; +}; + +struct hid_debug_list { + struct { + union { + struct __kfifo kfifo; + char *type; + const char *const_type; + char (*rectype)[0]; + char *ptr; + const char *ptr_const; + }; + char buf[0]; + } hid_debug_fifo; + struct fasync_struct *fasync; + struct hid_device *hdev; + struct list_head node; + struct mutex read_mutex; +}; + +struct hid_usage_entry { + unsigned int page; + unsigned int usage; + const char *description; +}; + +struct pcpu_gen_cookie { + local_t nesting; + long: 32; + u64 last; +}; + +struct gen_cookie { + struct pcpu_gen_cookie *local; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic64_t forward_last; + atomic64_t reverse_last; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + INET_DIAG_REQ_NONE = 0, + INET_DIAG_REQ_BYTECODE = 1, + INET_DIAG_REQ_SK_BPF_STORAGES = 2, + INET_DIAG_REQ_PROTOCOL = 3, + __INET_DIAG_REQ_MAX = 4, +}; + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC = 0, + SK_MEMINFO_RCVBUF = 1, + SK_MEMINFO_WMEM_ALLOC = 2, + SK_MEMINFO_SNDBUF = 3, + SK_MEMINFO_FWD_ALLOC = 4, + SK_MEMINFO_WMEM_QUEUED = 5, + SK_MEMINFO_OPTMEM = 6, + SK_MEMINFO_BACKLOG = 7, + SK_MEMINFO_DROPS = 8, + SK_MEMINFO_VARS = 9, +}; + +enum sknetlink_groups { + SKNLGRP_NONE = 0, + SKNLGRP_INET_TCP_DESTROY = 1, + SKNLGRP_INET_UDP_DESTROY = 2, + SKNLGRP_INET6_TCP_DESTROY = 3, + SKNLGRP_INET6_UDP_DESTROY = 4, + __SKNLGRP_MAX = 5, +}; + +struct sock_diag_handler { + struct module *owner; + __u8 family; + int (*dump)(struct sk_buff *, struct nlmsghdr *); + int (*get_info)(struct sk_buff *, struct sock *); + int (*destroy)(struct sk_buff *, struct nlmsghdr *); +}; + +struct sock_diag_inet_compat { + struct module *owner; + int (*fn)(struct sk_buff *, struct nlmsghdr *); +}; + +struct broadcast_sk { + struct sock *sk; + struct work_struct work; +}; + +enum { + BPF_F_INGRESS = 1, + BPF_F_BROADCAST = 8, + BPF_F_EXCLUDE_INGRESS = 16, +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS = 1, +}; + +typedef unsigned int (*bpf_func_t)(const void *, const struct bpf_insn *); + +typedef unsigned int (*bpf_dispatcher_fn)(const void *, const struct bpf_insn *, unsigned int (*)(const void *, const struct bpf_insn *)); + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *stream_parser; + struct bpf_prog *stream_verdict; + struct bpf_prog *skb_verdict; + struct bpf_link *msg_parser_link; + struct bpf_link *stream_parser_link; + struct bpf_link *stream_verdict_link; + struct bpf_link *skb_verdict_link; +}; + +struct sk_psock_work_state { + u32 len; + u32 off; +}; + +struct sk_msg; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + bool redir_ingress; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + spinlock_t ingress_lock; + long unsigned int state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *); + void (*saved_destroy)(struct sock *); + void (*saved_close)(struct sock *, long int); + void (*saved_write_space)(struct sock *); + void (*saved_data_ready)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + struct proto *sk_proto; + struct mutex work_mutex; + struct sk_psock_work_state work_state; + struct delayed_work work; + struct sock *sk_pair; + struct rcu_work rwork; +}; + +struct strp_msg { + int full_len; + int offset; +}; + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS = 1, + __SK_REDIRECT = 2, + __SK_NONE = 3, +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + long unsigned int copy[1]; + struct scatterlist data[19]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED = 0, + SK_PSOCK_RX_STRP_ENABLED = 1, +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +enum tunable_id { + ETHTOOL_ID_UNSPEC = 0, + ETHTOOL_RX_COPYBREAK = 1, + ETHTOOL_TX_COPYBREAK = 2, + ETHTOOL_PFC_PREVENTION_TOUT = 3, + ETHTOOL_TX_COPYBREAK_BUF_SIZE = 4, + __ETHTOOL_TUNABLE_COUNT = 5, +}; + +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC = 0, + ETHTOOL_PHY_DOWNSHIFT = 1, + ETHTOOL_PHY_FAST_LINK_DOWN = 2, + ETHTOOL_PHY_EDPD = 3, + __ETHTOOL_PHY_TUNABLE_COUNT = 4, +}; + +enum { + ETH_RSS_HASH_TOP_BIT = 0, + ETH_RSS_HASH_XOR_BIT = 1, + ETH_RSS_HASH_CRC32_BIT = 2, + ETH_RSS_HASH_FUNCS_COUNT = 3, +}; + +enum { + ETHTOOL_MSG_USER_NONE = 0, + ETHTOOL_MSG_STRSET_GET = 1, + ETHTOOL_MSG_LINKINFO_GET = 2, + ETHTOOL_MSG_LINKINFO_SET = 3, + ETHTOOL_MSG_LINKMODES_GET = 4, + ETHTOOL_MSG_LINKMODES_SET = 5, + ETHTOOL_MSG_LINKSTATE_GET = 6, + ETHTOOL_MSG_DEBUG_GET = 7, + ETHTOOL_MSG_DEBUG_SET = 8, + ETHTOOL_MSG_WOL_GET = 9, + ETHTOOL_MSG_WOL_SET = 10, + ETHTOOL_MSG_FEATURES_GET = 11, + ETHTOOL_MSG_FEATURES_SET = 12, + ETHTOOL_MSG_PRIVFLAGS_GET = 13, + ETHTOOL_MSG_PRIVFLAGS_SET = 14, + ETHTOOL_MSG_RINGS_GET = 15, + ETHTOOL_MSG_RINGS_SET = 16, + ETHTOOL_MSG_CHANNELS_GET = 17, + ETHTOOL_MSG_CHANNELS_SET = 18, + ETHTOOL_MSG_COALESCE_GET = 19, + ETHTOOL_MSG_COALESCE_SET = 20, + ETHTOOL_MSG_PAUSE_GET = 21, + ETHTOOL_MSG_PAUSE_SET = 22, + ETHTOOL_MSG_EEE_GET = 23, + ETHTOOL_MSG_EEE_SET = 24, + ETHTOOL_MSG_TSINFO_GET = 25, + ETHTOOL_MSG_CABLE_TEST_ACT = 26, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27, + ETHTOOL_MSG_TUNNEL_INFO_GET = 28, + ETHTOOL_MSG_FEC_GET = 29, + ETHTOOL_MSG_FEC_SET = 30, + ETHTOOL_MSG_MODULE_EEPROM_GET = 31, + ETHTOOL_MSG_STATS_GET = 32, + ETHTOOL_MSG_PHC_VCLOCKS_GET = 33, + ETHTOOL_MSG_MODULE_GET = 34, + ETHTOOL_MSG_MODULE_SET = 35, + ETHTOOL_MSG_PSE_GET = 36, + ETHTOOL_MSG_PSE_SET = 37, + ETHTOOL_MSG_RSS_GET = 38, + ETHTOOL_MSG_PLCA_GET_CFG = 39, + ETHTOOL_MSG_PLCA_SET_CFG = 40, + ETHTOOL_MSG_PLCA_GET_STATUS = 41, + ETHTOOL_MSG_MM_GET = 42, + ETHTOOL_MSG_MM_SET = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 44, + ETHTOOL_MSG_PHY_GET = 45, + __ETHTOOL_MSG_USER_CNT = 46, + ETHTOOL_MSG_USER_MAX = 45, +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, + ETHTOOL_MSG_LINKINFO_NTF = 3, + ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, + ETHTOOL_MSG_LINKMODES_NTF = 5, + ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, + ETHTOOL_MSG_DEBUG_GET_REPLY = 7, + ETHTOOL_MSG_DEBUG_NTF = 8, + ETHTOOL_MSG_WOL_GET_REPLY = 9, + ETHTOOL_MSG_WOL_NTF = 10, + ETHTOOL_MSG_FEATURES_GET_REPLY = 11, + ETHTOOL_MSG_FEATURES_SET_REPLY = 12, + ETHTOOL_MSG_FEATURES_NTF = 13, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, + ETHTOOL_MSG_PRIVFLAGS_NTF = 15, + ETHTOOL_MSG_RINGS_GET_REPLY = 16, + ETHTOOL_MSG_RINGS_NTF = 17, + ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, + ETHTOOL_MSG_CHANNELS_NTF = 19, + ETHTOOL_MSG_COALESCE_GET_REPLY = 20, + ETHTOOL_MSG_COALESCE_NTF = 21, + ETHTOOL_MSG_PAUSE_GET_REPLY = 22, + ETHTOOL_MSG_PAUSE_NTF = 23, + ETHTOOL_MSG_EEE_GET_REPLY = 24, + ETHTOOL_MSG_EEE_NTF = 25, + ETHTOOL_MSG_TSINFO_GET_REPLY = 26, + ETHTOOL_MSG_CABLE_TEST_NTF = 27, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, + ETHTOOL_MSG_FEC_GET_REPLY = 30, + ETHTOOL_MSG_FEC_NTF = 31, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, + ETHTOOL_MSG_STATS_GET_REPLY = 33, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 34, + ETHTOOL_MSG_MODULE_GET_REPLY = 35, + ETHTOOL_MSG_MODULE_NTF = 36, + ETHTOOL_MSG_PSE_GET_REPLY = 37, + ETHTOOL_MSG_RSS_GET_REPLY = 38, + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 39, + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 40, + ETHTOOL_MSG_PLCA_NTF = 41, + ETHTOOL_MSG_MM_GET_REPLY = 42, + ETHTOOL_MSG_MM_NTF = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 44, + ETHTOOL_MSG_PHY_GET_REPLY = 45, + ETHTOOL_MSG_PHY_NTF = 46, + __ETHTOOL_MSG_KERNEL_CNT = 47, + ETHTOOL_MSG_KERNEL_MAX = 46, +}; + +enum ethtool_header_flags { + ETHTOOL_FLAG_COMPACT_BITSETS = 1, + ETHTOOL_FLAG_OMIT_REPLY = 2, + ETHTOOL_FLAG_STATS = 4, +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC = 0, + ETHTOOL_A_HEADER_DEV_INDEX = 1, + ETHTOOL_A_HEADER_DEV_NAME = 2, + ETHTOOL_A_HEADER_FLAGS = 3, + ETHTOOL_A_HEADER_PHY_INDEX = 4, + __ETHTOOL_A_HEADER_CNT = 5, + ETHTOOL_A_HEADER_MAX = 4, +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC = 0, + ETHTOOL_A_STRSET_HEADER = 1, + ETHTOOL_A_STRSET_STRINGSETS = 2, + ETHTOOL_A_STRSET_COUNTS_ONLY = 3, + __ETHTOOL_A_STRSET_CNT = 4, + ETHTOOL_A_STRSET_MAX = 3, +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC = 0, + ETHTOOL_A_LINKINFO_HEADER = 1, + ETHTOOL_A_LINKINFO_PORT = 2, + ETHTOOL_A_LINKINFO_PHYADDR = 3, + ETHTOOL_A_LINKINFO_TP_MDIX = 4, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, + ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, + __ETHTOOL_A_LINKINFO_CNT = 7, + ETHTOOL_A_LINKINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC = 0, + ETHTOOL_A_LINKMODES_HEADER = 1, + ETHTOOL_A_LINKMODES_AUTONEG = 2, + ETHTOOL_A_LINKMODES_OURS = 3, + ETHTOOL_A_LINKMODES_PEER = 4, + ETHTOOL_A_LINKMODES_SPEED = 5, + ETHTOOL_A_LINKMODES_DUPLEX = 6, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, + ETHTOOL_A_LINKMODES_LANES = 9, + ETHTOOL_A_LINKMODES_RATE_MATCHING = 10, + __ETHTOOL_A_LINKMODES_CNT = 11, + ETHTOOL_A_LINKMODES_MAX = 10, +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC = 0, + ETHTOOL_A_LINKSTATE_HEADER = 1, + ETHTOOL_A_LINKSTATE_LINK = 2, + ETHTOOL_A_LINKSTATE_SQI = 3, + ETHTOOL_A_LINKSTATE_SQI_MAX = 4, + ETHTOOL_A_LINKSTATE_EXT_STATE = 5, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 7, + __ETHTOOL_A_LINKSTATE_CNT = 8, + ETHTOOL_A_LINKSTATE_MAX = 7, +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC = 0, + ETHTOOL_A_DEBUG_HEADER = 1, + ETHTOOL_A_DEBUG_MSGMASK = 2, + __ETHTOOL_A_DEBUG_CNT = 3, + ETHTOOL_A_DEBUG_MAX = 2, +}; + +enum { + ETHTOOL_A_WOL_UNSPEC = 0, + ETHTOOL_A_WOL_HEADER = 1, + ETHTOOL_A_WOL_MODES = 2, + ETHTOOL_A_WOL_SOPASS = 3, + __ETHTOOL_A_WOL_CNT = 4, + ETHTOOL_A_WOL_MAX = 3, +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC = 0, + ETHTOOL_A_FEATURES_HEADER = 1, + ETHTOOL_A_FEATURES_HW = 2, + ETHTOOL_A_FEATURES_WANTED = 3, + ETHTOOL_A_FEATURES_ACTIVE = 4, + ETHTOOL_A_FEATURES_NOCHANGE = 5, + __ETHTOOL_A_FEATURES_CNT = 6, + ETHTOOL_A_FEATURES_MAX = 5, +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, + ETHTOOL_A_PRIVFLAGS_HEADER = 1, + ETHTOOL_A_PRIVFLAGS_FLAGS = 2, + __ETHTOOL_A_PRIVFLAGS_CNT = 3, + ETHTOOL_A_PRIVFLAGS_MAX = 2, +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC = 0, + ETHTOOL_A_RINGS_HEADER = 1, + ETHTOOL_A_RINGS_RX_MAX = 2, + ETHTOOL_A_RINGS_RX_MINI_MAX = 3, + ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, + ETHTOOL_A_RINGS_TX_MAX = 5, + ETHTOOL_A_RINGS_RX = 6, + ETHTOOL_A_RINGS_RX_MINI = 7, + ETHTOOL_A_RINGS_RX_JUMBO = 8, + ETHTOOL_A_RINGS_TX = 9, + ETHTOOL_A_RINGS_RX_BUF_LEN = 10, + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 11, + ETHTOOL_A_RINGS_CQE_SIZE = 12, + ETHTOOL_A_RINGS_TX_PUSH = 13, + ETHTOOL_A_RINGS_RX_PUSH = 14, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 15, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 16, + __ETHTOOL_A_RINGS_CNT = 17, + ETHTOOL_A_RINGS_MAX = 16, +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC = 0, + ETHTOOL_A_CHANNELS_HEADER = 1, + ETHTOOL_A_CHANNELS_RX_MAX = 2, + ETHTOOL_A_CHANNELS_TX_MAX = 3, + ETHTOOL_A_CHANNELS_OTHER_MAX = 4, + ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, + ETHTOOL_A_CHANNELS_RX_COUNT = 6, + ETHTOOL_A_CHANNELS_TX_COUNT = 7, + ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, + ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, + __ETHTOOL_A_CHANNELS_CNT = 10, + ETHTOOL_A_CHANNELS_MAX = 9, +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC = 0, + ETHTOOL_A_COALESCE_HEADER = 1, + ETHTOOL_A_COALESCE_RX_USECS = 2, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, + ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, + ETHTOOL_A_COALESCE_TX_USECS = 6, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, + ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, + ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, + ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, + ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, + ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, + ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 24, + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 25, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES = 26, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES = 27, + ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS = 28, + ETHTOOL_A_COALESCE_RX_PROFILE = 29, + ETHTOOL_A_COALESCE_TX_PROFILE = 30, + __ETHTOOL_A_COALESCE_CNT = 31, + ETHTOOL_A_COALESCE_MAX = 30, +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC = 0, + ETHTOOL_A_PAUSE_HEADER = 1, + ETHTOOL_A_PAUSE_AUTONEG = 2, + ETHTOOL_A_PAUSE_RX = 3, + ETHTOOL_A_PAUSE_TX = 4, + ETHTOOL_A_PAUSE_STATS = 5, + ETHTOOL_A_PAUSE_STATS_SRC = 6, + __ETHTOOL_A_PAUSE_CNT = 7, + ETHTOOL_A_PAUSE_MAX = 6, +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, + ETHTOOL_A_PAUSE_STAT_PAD = 1, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, + __ETHTOOL_A_PAUSE_STAT_CNT = 4, + ETHTOOL_A_PAUSE_STAT_MAX = 3, +}; + +enum { + ETHTOOL_A_EEE_UNSPEC = 0, + ETHTOOL_A_EEE_HEADER = 1, + ETHTOOL_A_EEE_MODES_OURS = 2, + ETHTOOL_A_EEE_MODES_PEER = 3, + ETHTOOL_A_EEE_ACTIVE = 4, + ETHTOOL_A_EEE_ENABLED = 5, + ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, + ETHTOOL_A_EEE_TX_LPI_TIMER = 7, + __ETHTOOL_A_EEE_CNT = 8, + ETHTOOL_A_EEE_MAX = 7, +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC = 0, + ETHTOOL_A_TSINFO_HEADER = 1, + ETHTOOL_A_TSINFO_TIMESTAMPING = 2, + ETHTOOL_A_TSINFO_TX_TYPES = 3, + ETHTOOL_A_TSINFO_RX_FILTERS = 4, + ETHTOOL_A_TSINFO_PHC_INDEX = 5, + ETHTOOL_A_TSINFO_STATS = 6, + __ETHTOOL_A_TSINFO_CNT = 7, + ETHTOOL_A_TSINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC = 0, + ETHTOOL_A_PHC_VCLOCKS_HEADER = 1, + ETHTOOL_A_PHC_VCLOCKS_NUM = 2, + ETHTOOL_A_PHC_VCLOCKS_INDEX = 3, + __ETHTOOL_A_PHC_VCLOCKS_CNT = 4, + ETHTOOL_A_PHC_VCLOCKS_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_HEADER = 1, + __ETHTOOL_A_CABLE_TEST_CNT = 2, + ETHTOOL_A_CABLE_TEST_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, + __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, + ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, + ETHTOOL_A_TUNNEL_INFO_HEADER = 1, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, + __ETHTOOL_A_TUNNEL_INFO_CNT = 3, + ETHTOOL_A_TUNNEL_INFO_MAX = 2, +}; + +enum { + ETHTOOL_A_FEC_UNSPEC = 0, + ETHTOOL_A_FEC_HEADER = 1, + ETHTOOL_A_FEC_MODES = 2, + ETHTOOL_A_FEC_AUTO = 3, + ETHTOOL_A_FEC_ACTIVE = 4, + ETHTOOL_A_FEC_STATS = 5, + __ETHTOOL_A_FEC_CNT = 6, + ETHTOOL_A_FEC_MAX = 5, +}; + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, + ETHTOOL_A_MODULE_EEPROM_HEADER = 1, + ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, + ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, + ETHTOOL_A_MODULE_EEPROM_PAGE = 4, + ETHTOOL_A_MODULE_EEPROM_BANK = 5, + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, + ETHTOOL_A_MODULE_EEPROM_DATA = 7, + __ETHTOOL_A_MODULE_EEPROM_CNT = 8, + ETHTOOL_A_MODULE_EEPROM_MAX = 7, +}; + +enum { + ETHTOOL_A_STATS_UNSPEC = 0, + ETHTOOL_A_STATS_PAD = 1, + ETHTOOL_A_STATS_HEADER = 2, + ETHTOOL_A_STATS_GROUPS = 3, + ETHTOOL_A_STATS_GRP = 4, + ETHTOOL_A_STATS_SRC = 5, + __ETHTOOL_A_STATS_CNT = 6, + ETHTOOL_A_STATS_MAX = 5, +}; + +enum { + ETHTOOL_STATS_ETH_PHY = 0, + ETHTOOL_STATS_ETH_MAC = 1, + ETHTOOL_STATS_ETH_CTRL = 2, + ETHTOOL_STATS_RMON = 3, + __ETHTOOL_STATS_CNT = 4, +}; + +enum { + ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, + __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, + ETHTOOL_A_STATS_ETH_PHY_MAX = 0, +}; + +enum { + ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, + ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, + ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, + ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, + ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, + ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, + ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, + ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, + ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, + ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, + ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, + ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, + ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, + ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, + ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, + ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, + ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, + ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, + ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, + ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, + ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, + ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, + __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, + ETHTOOL_A_STATS_ETH_MAC_MAX = 21, +}; + +enum { + ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, + ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, + ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, + __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, + ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, +}; + +enum { + ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, + ETHTOOL_A_STATS_RMON_OVERSIZE = 1, + ETHTOOL_A_STATS_RMON_FRAG = 2, + ETHTOOL_A_STATS_RMON_JABBER = 3, + __ETHTOOL_A_STATS_RMON_CNT = 4, + ETHTOOL_A_STATS_RMON_MAX = 3, +}; + +enum { + ETHTOOL_A_MODULE_UNSPEC = 0, + ETHTOOL_A_MODULE_HEADER = 1, + ETHTOOL_A_MODULE_POWER_MODE_POLICY = 2, + ETHTOOL_A_MODULE_POWER_MODE = 3, + __ETHTOOL_A_MODULE_CNT = 4, + ETHTOOL_A_MODULE_MAX = 3, +}; + +enum { + ETHTOOL_A_PSE_UNSPEC = 0, + ETHTOOL_A_PSE_HEADER = 1, + ETHTOOL_A_PODL_PSE_ADMIN_STATE = 2, + ETHTOOL_A_PODL_PSE_ADMIN_CONTROL = 3, + ETHTOOL_A_PODL_PSE_PW_D_STATUS = 4, + ETHTOOL_A_C33_PSE_ADMIN_STATE = 5, + ETHTOOL_A_C33_PSE_ADMIN_CONTROL = 6, + ETHTOOL_A_C33_PSE_PW_D_STATUS = 7, + ETHTOOL_A_C33_PSE_PW_CLASS = 8, + ETHTOOL_A_C33_PSE_ACTUAL_PW = 9, + ETHTOOL_A_C33_PSE_EXT_STATE = 10, + ETHTOOL_A_C33_PSE_EXT_SUBSTATE = 11, + ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT = 12, + ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES = 13, + __ETHTOOL_A_PSE_CNT = 14, + ETHTOOL_A_PSE_MAX = 13, +}; + +enum { + ETHTOOL_A_RSS_UNSPEC = 0, + ETHTOOL_A_RSS_HEADER = 1, + ETHTOOL_A_RSS_CONTEXT = 2, + ETHTOOL_A_RSS_HFUNC = 3, + ETHTOOL_A_RSS_INDIR = 4, + ETHTOOL_A_RSS_HKEY = 5, + ETHTOOL_A_RSS_INPUT_XFRM = 6, + ETHTOOL_A_RSS_START_CONTEXT = 7, + __ETHTOOL_A_RSS_CNT = 8, + ETHTOOL_A_RSS_MAX = 7, +}; + +enum { + ETHTOOL_A_PLCA_UNSPEC = 0, + ETHTOOL_A_PLCA_HEADER = 1, + ETHTOOL_A_PLCA_VERSION = 2, + ETHTOOL_A_PLCA_ENABLED = 3, + ETHTOOL_A_PLCA_STATUS = 4, + ETHTOOL_A_PLCA_NODE_CNT = 5, + ETHTOOL_A_PLCA_NODE_ID = 6, + ETHTOOL_A_PLCA_TO_TMR = 7, + ETHTOOL_A_PLCA_BURST_CNT = 8, + ETHTOOL_A_PLCA_BURST_TMR = 9, + __ETHTOOL_A_PLCA_CNT = 10, + ETHTOOL_A_PLCA_MAX = 9, +}; + +enum { + ETHTOOL_A_MM_UNSPEC = 0, + ETHTOOL_A_MM_HEADER = 1, + ETHTOOL_A_MM_PMAC_ENABLED = 2, + ETHTOOL_A_MM_TX_ENABLED = 3, + ETHTOOL_A_MM_TX_ACTIVE = 4, + ETHTOOL_A_MM_TX_MIN_FRAG_SIZE = 5, + ETHTOOL_A_MM_RX_MIN_FRAG_SIZE = 6, + ETHTOOL_A_MM_VERIFY_ENABLED = 7, + ETHTOOL_A_MM_VERIFY_STATUS = 8, + ETHTOOL_A_MM_VERIFY_TIME = 9, + ETHTOOL_A_MM_MAX_VERIFY_TIME = 10, + ETHTOOL_A_MM_STATS = 11, + __ETHTOOL_A_MM_CNT = 12, + ETHTOOL_A_MM_MAX = 11, +}; + +enum { + ETHTOOL_A_MODULE_FW_FLASH_UNSPEC = 0, + ETHTOOL_A_MODULE_FW_FLASH_HEADER = 1, + ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME = 2, + ETHTOOL_A_MODULE_FW_FLASH_PASSWORD = 3, + ETHTOOL_A_MODULE_FW_FLASH_STATUS = 4, + ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG = 5, + ETHTOOL_A_MODULE_FW_FLASH_DONE = 6, + ETHTOOL_A_MODULE_FW_FLASH_TOTAL = 7, + __ETHTOOL_A_MODULE_FW_FLASH_CNT = 8, + ETHTOOL_A_MODULE_FW_FLASH_MAX = 7, +}; + +enum { + ETHTOOL_A_PHY_UNSPEC = 0, + ETHTOOL_A_PHY_HEADER = 1, + ETHTOOL_A_PHY_INDEX = 2, + ETHTOOL_A_PHY_DRVNAME = 3, + ETHTOOL_A_PHY_NAME = 4, + ETHTOOL_A_PHY_UPSTREAM_TYPE = 5, + ETHTOOL_A_PHY_UPSTREAM_INDEX = 6, + ETHTOOL_A_PHY_UPSTREAM_SFP_NAME = 7, + ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME = 8, + __ETHTOOL_A_PHY_CNT = 9, + ETHTOOL_A_PHY_MAX = 8, +}; + +struct ethnl_req_info { + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u32 phy_index; +}; + +struct ethnl_reply_data { + struct net_device *dev; +}; + +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + u8 set_ntf_cmd; + int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); + int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, const struct genl_info *); + int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); + int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); + void (*cleanup_data)(struct ethnl_reply_data *); + int (*set_validate)(struct ethnl_req_info *, struct genl_info *); + int (*set)(struct ethnl_req_info *, struct genl_info *); +}; + +struct pause_req_info { + struct ethnl_req_info base; + enum ethtool_mac_stats_src src; +}; + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + long: 32; + struct ethtool_pause_stats pausestat; +}; + +enum nft_data_types { + NFT_DATA_VALUE = 0, + NFT_DATA_VERDICT = 4294967040, +}; + +enum nft_byteorder_ops { + NFT_BYTEORDER_NTOH = 0, + NFT_BYTEORDER_HTON = 1, +}; + +enum nft_byteorder_attributes { + NFTA_BYTEORDER_UNSPEC = 0, + NFTA_BYTEORDER_SREG = 1, + NFTA_BYTEORDER_DREG = 2, + NFTA_BYTEORDER_OP = 3, + NFTA_BYTEORDER_LEN = 4, + NFTA_BYTEORDER_SIZE = 5, + __NFTA_BYTEORDER_MAX = 6, +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = 1, + FLOW_ACTION_HW_STATS_DELAYED = 2, + FLOW_ACTION_HW_STATS_ANY = 3, + FLOW_ACTION_HW_STATS_DISABLED = 4, + FLOW_ACTION_HW_STATS_DONT_CARE = 7, +}; + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 drops; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +struct nft_chain; + +struct nft_verdict { + u32 code; + struct nft_chain *chain; +}; + +struct nft_rule_blob; + +struct nft_table; + +struct nft_chain { + struct nft_rule_blob *blob_gen_0; + struct nft_rule_blob *blob_gen_1; + struct list_head rules; + struct list_head list; + struct rhlist_head rhlhead; + struct nft_table *table; + long: 32; + u64 handle; + u32 use; + u8 flags: 5; + u8 bound: 1; + u8 genmask: 2; + char *name; + u16 udlen; + u8 *udata; + struct callback_head callback_head; + struct nft_rule_blob *blob_next; +}; + +struct nft_data { + union { + u32 data[4]; + struct nft_verdict verdict; + }; +}; + +struct nft_regs { + union { + u32 data[20]; + struct nft_verdict verdict; + }; +}; + +struct nft_expr_ops; + +struct nft_expr { + const struct nft_expr_ops *ops; + long: 32; + unsigned char data[0]; +}; + +struct nft_regs_track { + struct { + const struct nft_expr *selector; + const struct nft_expr *bitwise; + u8 num_reg; + } regs[20]; + const struct nft_expr *cur; + const struct nft_expr *last; +}; + +struct nft_ctx { + struct net *net; + struct nft_table *table; + struct nft_chain *chain; + const struct nlattr * const *nla; + u32 portid; + u32 seq; + u16 flags; + u8 family; + u8 level; + bool report; + long unsigned int reg_inited[1]; +}; + +struct nft_table { + struct list_head list; + struct rhltable chains_ht; + struct list_head chains; + struct list_head sets; + struct list_head objects; + struct list_head flowtables; + possible_net_t net; + long: 32; + u64 hgenerator; + u64 handle; + u32 use; + u16 family: 6; + u16 flags: 8; + u16 genmask: 2; + u32 nlpid; + char *name; + u16 udlen; + u8 *udata; + u8 validate_state; + long: 32; +}; + +enum nft_trans_phase { + NFT_TRANS_PREPARE = 0, + NFT_TRANS_PREPARE_ERROR = 1, + NFT_TRANS_ABORT = 2, + NFT_TRANS_COMMIT = 3, + NFT_TRANS_RELEASE = 4, +}; + +struct nft_offload_ctx; + +struct nft_flow_rule; + +struct nft_expr_type; + +struct nft_expr_ops { + void (*eval)(const struct nft_expr *, struct nft_regs *, const struct nft_pktinfo *); + int (*clone)(struct nft_expr *, const struct nft_expr *, gfp_t); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nft_expr *, const struct nlattr * const *); + void (*activate)(const struct nft_ctx *, const struct nft_expr *); + void (*deactivate)(const struct nft_ctx *, const struct nft_expr *, enum nft_trans_phase); + void (*destroy)(const struct nft_ctx *, const struct nft_expr *); + void (*destroy_clone)(const struct nft_ctx *, const struct nft_expr *); + int (*dump)(struct sk_buff *, const struct nft_expr *, bool); + int (*validate)(const struct nft_ctx *, const struct nft_expr *); + bool (*reduce)(struct nft_regs_track *, const struct nft_expr *); + bool (*gc)(struct net *, const struct nft_expr *); + int (*offload)(struct nft_offload_ctx *, struct nft_flow_rule *, const struct nft_expr *); + bool (*offload_action)(const struct nft_expr *); + void (*offload_stats)(struct nft_expr *, const struct flow_stats *); + const struct nft_expr_type *type; + void *data; +}; + +struct nft_expr_type { + const struct nft_expr_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + void (*release_ops)(const struct nft_expr_ops *); + const struct nft_expr_ops *ops; + const struct nft_expr_ops *inner_ops; + struct list_head list; + const char *name; + struct module *owner; + const struct nla_policy *policy; + unsigned int maxattr; + u8 family; + u8 flags; +}; + +struct nft_rule_blob { + long unsigned int size; + long: 32; + unsigned char data[0]; +}; + +struct nft_byteorder { + u8 sreg; + u8 dreg; + enum nft_byteorder_ops op: 8; + u8 len; + u8 size; +}; + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER = 0, + AUDIT_XT_OP_REPLACE = 1, + AUDIT_XT_OP_UNREGISTER = 2, + AUDIT_NFT_OP_TABLE_REGISTER = 3, + AUDIT_NFT_OP_TABLE_UNREGISTER = 4, + AUDIT_NFT_OP_CHAIN_REGISTER = 5, + AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, + AUDIT_NFT_OP_RULE_REGISTER = 7, + AUDIT_NFT_OP_RULE_UNREGISTER = 8, + AUDIT_NFT_OP_SET_REGISTER = 9, + AUDIT_NFT_OP_SET_UNREGISTER = 10, + AUDIT_NFT_OP_SETELEM_REGISTER = 11, + AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, + AUDIT_NFT_OP_GEN_REGISTER = 13, + AUDIT_NFT_OP_OBJ_REGISTER = 14, + AUDIT_NFT_OP_OBJ_UNREGISTER = 15, + AUDIT_NFT_OP_OBJ_RESET = 16, + AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, + AUDIT_NFT_OP_SETELEM_RESET = 19, + AUDIT_NFT_OP_RULE_RESET = 20, + AUDIT_NFT_OP_INVALID = 21, +}; + +struct xt_action_param; + +struct xt_mtchk_param; + +struct xt_mtdtor_param; + +struct xt_match { + struct list_head list; + const char name[29]; + u_int8_t revision; + bool (*match)(const struct sk_buff *, struct xt_action_param *); + int (*checkentry)(const struct xt_mtchk_param *); + void (*destroy)(const struct xt_mtdtor_param *); + struct module *me; + const char *table; + unsigned int matchsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_entry_match { + union { + struct { + __u16 match_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 match_size; + struct xt_match *match; + } kernel; + __u16 match_size; + } u; + unsigned char data[0]; +}; + +struct xt_tgchk_param; + +struct xt_tgdtor_param; + +struct xt_target { + struct list_head list; + const char name[29]; + u_int8_t revision; + unsigned int (*target)(struct sk_buff *, const struct xt_action_param *); + int (*checkentry)(const struct xt_tgchk_param *); + void (*destroy)(const struct xt_tgdtor_param *); + struct module *me; + const char *table; + unsigned int targetsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_entry_target { + union { + struct { + __u16 target_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 target_size; + struct xt_target *target; + } kernel; + __u16 target_size; + } u; + unsigned char data[0]; +}; + +struct xt_standard_target { + struct xt_entry_target target; + int verdict; +}; + +struct xt_error_target { + struct xt_entry_target target; + char errorname[30]; +}; + +struct xt_counters { + __u64 pcnt; + __u64 bcnt; +}; + +struct xt_counters_info { + char name[32]; + unsigned int num_counters; + long: 32; + struct xt_counters counters[0]; +}; + +struct xt_action_param { + union { + const struct xt_match *match; + const struct xt_target *target; + }; + union { + const void *matchinfo; + const void *targinfo; + }; + const struct nf_hook_state *state; + unsigned int thoff; + u16 fragoff; + bool hotdrop; +}; + +struct xt_mtchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_mtdtor_param { + struct net *net; + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +struct xt_tgchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_tgdtor_param { + struct net *net; + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + +struct xt_table_info; + +struct xt_table { + struct list_head list; + unsigned int valid_hooks; + struct xt_table_info *private; + struct nf_hook_ops *ops; + struct module *me; + u_int8_t af; + int priority; + const char name[32]; +}; + +struct xt_table_info { + unsigned int size; + unsigned int number; + unsigned int initial_entries; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int stacksize; + void ***jumpstack; + long: 32; + unsigned char entries[0]; +}; + +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char *mem; +}; + +struct xt_template { + struct list_head list; + int (*table_init)(struct net *); + struct module *me; + char name[32]; +}; + +struct xt_pernet { + struct list_head tables[11]; +}; + +struct xt_af { + struct mutex mutex; + struct list_head match; + struct list_head target; +}; + +struct nf_mttg_trav { + struct list_head *head; + struct list_head *curr; + uint8_t class; +}; + +enum { + MTTG_TRAV_INIT = 0, + MTTG_TRAV_NFP_UNSPEC = 1, + MTTG_TRAV_NFP_SPEC = 2, + MTTG_TRAV_DONE = 3, +}; + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = -2147483648, + NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP_PRI_CONNTRACK_DEFRAG = -400, + NF_IP_PRI_RAW = -300, + NF_IP_PRI_SELINUX_FIRST = -225, + NF_IP_PRI_CONNTRACK = -200, + NF_IP_PRI_MANGLE = -150, + NF_IP_PRI_NAT_DST = -100, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_HELPER = 300, + NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, + NF_IP_PRI_LAST = 2147483647, +}; + +struct ipt_ip { + struct in_addr src; + struct in_addr dst; + struct in_addr smsk; + struct in_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 flags; + __u8 invflags; +}; + +struct ipt_entry { + struct ipt_ip ip; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ipt_replace { + char name[32]; + unsigned int valid_hooks; + unsigned int num_entries; + unsigned int size; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_counters; + struct xt_counters *counters; + long: 32; + struct ipt_entry entries[0]; +}; + +struct ipt_standard { + struct ipt_entry entry; + struct xt_standard_target target; + long: 32; +}; + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id: 12; + u16 vlan_dei: 1; + u16 vlan_priority: 3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; + __be16 vlan_eth_type; + u16 padding; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +struct flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +struct flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +struct flow_dissector_key_tipc { + __be32 key; +}; + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_addrs addrs; + long: 32; +}; + +struct icmpv6_echo { + __be16 identifier; + __be16 sequence; +}; + +struct icmpv6_nd_advt { + __u32 reserved: 5; + __u32 override: 1; + __u32 solicited: 1; + __u32 router: 1; + __u32 reserved2: 24; +}; + +struct icmpv6_nd_ra { + __u8 hop_limit; + __u8 reserved: 3; + __u8 router_pref: 2; + __u8 home_agent: 1; + __u8 other: 1; + __u8 managed: 1; + __be16 rt_lifetime; +}; + +struct icmp6hdr { + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + struct icmpv6_echo u_echo; + struct icmpv6_nd_advt u_nd_advt; + struct icmpv6_nd_ra u_nd_ra; + } icmp6_dataun; +}; + +enum { + XFRM_LOOKUP_ICMP = 1, + XFRM_LOOKUP_QUEUE = 2, + XFRM_LOOKUP_KEEP_DST_REF = 4, +}; + +typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *, const struct inet6_skb_parm *); + +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + +struct ipv4_addr_key { + __be32 addr; + int vif; +}; + +struct inetpeer_addr { + union { + struct ipv4_addr_key a4; + struct in6_addr a6; + u32 key[4]; + }; + __u16 family; +}; + +struct inet_peer { + struct rb_node rb_node; + struct inetpeer_addr daddr; + u32 metrics[17]; + u32 rate_tokens; + u32 n_redirects; + long unsigned int rate_last; + union { + struct { + atomic_t rid; + }; + struct callback_head rcu; + }; + __u32 dtime; + refcount_t refcnt; +}; + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + __u8 reserved[4]; + } un; +}; + +struct xfrm_offload { + struct { + __u32 low; + __u32 hi; + } seq; + __u32 flags; + __u32 status; + __u32 orig_mac_len; + __u8 proto; + __u8 inner_ipproto; +}; + +struct sec_path { + int len; + int olen; + int verified_cnt; + struct xfrm_state *xvec[6]; + struct xfrm_offload ovec[1]; +}; + +struct icmpv6_msg { + struct sk_buff *skb; + int offset; + uint8_t type; +}; + +struct icmp6_err { + int err; + int fatal; +}; + +struct inet_timewait_sock { + struct sock_common __tw_common; + __u32 tw_mark; + unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __be16 tw_sport; + unsigned int tw_transparent: 1; + unsigned int tw_flowlabel: 20; + unsigned int tw_usec_ts: 1; + unsigned int tw_pad: 2; + unsigned int tw_tos: 8; + u32 tw_txhash; + u32 tw_priority; + struct timer_list tw_timer; + struct inet_bind_bucket *tw_tb; + struct inet_bind2_bucket *tw_tb2; +}; + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + __be16 sport; + u16 dport; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + struct sock *selected_sk; + u32 ingress_ifindex; + bool no_reuseport; +}; + +struct michael_mic_ctx { + u32 l; + u32 r; +}; + +struct tasklet_struct { + struct tasklet_struct *next; + long unsigned int state; + atomic_t count; + bool use_callback; + union { + void (*func)(long unsigned int); + void (*callback)(struct tasklet_struct *); + }; + long unsigned int data; +}; + +struct ieee80211_meshconf_ie { + u8 meshconf_psel; + u8 meshconf_pmetric; + u8 meshconf_congest; + u8 meshconf_synch; + u8 meshconf_auth; + u8 meshconf_form; + u8 meshconf_cap; +}; + +struct ieee80211_twt_setup { + u8 dialog_token; + u8 element_id; + u8 length; + u8 control; + u8 params[0]; +}; + +struct codel_params { + codel_time_t target; + codel_time_t ce_threshold; + codel_time_t interval; + u32 mtu; + bool ecn; + u8 ce_threshold_selector; + u8 ce_threshold_mask; +}; + +struct codel_vars { + u32 count; + u32 lastcount; + bool dropping; + u16 rec_inv_sqrt; + codel_time_t first_above_time; + codel_time_t drop_next; + codel_time_t ldelay; +}; + +enum ieee80211_max_queues { + IEEE80211_MAX_QUEUES = 16, + IEEE80211_MAX_QUEUE_MAP = 65535, +}; + +struct ieee80211_low_level_stats { + unsigned int dot11ACKFailureCount; + unsigned int dot11RTSFailureCount; + unsigned int dot11FCSErrorCount; + unsigned int dot11RTSSuccessCount; +}; + +enum ieee80211_chanctx_switch_mode { + CHANCTX_SWMODE_REASSIGN_VIF = 0, + CHANCTX_SWMODE_SWAP_CONTEXTS = 1, +}; + +struct ieee80211_vif_chanctx_switch { + struct ieee80211_vif *vif; + struct ieee80211_bss_conf *link_conf; + struct ieee80211_chanctx_conf *old_ctx; + struct ieee80211_chanctx_conf *new_ctx; +}; + +enum ieee80211_event_type { + RSSI_EVENT = 0, + MLME_EVENT = 1, + BAR_RX_EVENT = 2, + BA_FRAME_TIMEOUT = 3, +}; + +struct ieee80211_rssi_event { + enum ieee80211_rssi_event_data data; +}; + +enum ieee80211_mlme_event_data { + AUTH_EVENT = 0, + ASSOC_EVENT = 1, + DEAUTH_RX_EVENT = 2, + DEAUTH_TX_EVENT = 3, +}; + +enum ieee80211_mlme_event_status { + MLME_SUCCESS = 0, + MLME_DENIED = 1, + MLME_TIMEOUT = 2, +}; + +struct ieee80211_mlme_event { + enum ieee80211_mlme_event_data data; + enum ieee80211_mlme_event_status status; + u16 reason; +}; + +struct ieee80211_ba_event { + struct ieee80211_sta *sta; + u16 tid; + u16 ssn; +}; + +struct ieee80211_event { + enum ieee80211_event_type type; + union { + struct ieee80211_rssi_event rssi; + struct ieee80211_mlme_event mlme; + struct ieee80211_ba_event ba; + } u; +}; + +struct ieee80211_rx_status { + u64 mactime; + union { + u64 boottime_ns; + ktime_t ack_tx_hwtstamp; + }; + u32 device_timestamp; + u32 ampdu_reference; + u32 flag; + u16 freq: 13; + u16 freq_offset: 1; + u8 enc_flags; + u8 encoding: 3; + u8 bw: 4; + union { + struct { + u8 he_ru: 3; + u8 he_gi: 2; + u8 he_dcm: 1; + }; + struct { + u8 ru: 4; + u8 gi: 2; + } eht; + }; + u8 rate_idx; + u8 nss; + u8 rx_flags; + u8 band; + u8 antenna; + s8 signal; + u8 chains; + s8 chain_signal[4]; + u8 zero_length_psdu_type; + u8 link_valid: 1; + u8 link_id: 4; +}; + +enum ieee80211_neg_ttlm_res { + NEG_TTLM_RES_ACCEPT = 0, + NEG_TTLM_RES_REJECT = 1, + NEG_TTLM_RES_SUGGEST_PREFERRED = 2, +}; + +enum set_key_cmd { + SET_KEY = 0, + DISABLE_KEY = 1, +}; + +enum sta_notify_cmd { + STA_NOTIFY_SLEEP = 0, + STA_NOTIFY_AWAKE = 1, +}; + +struct ieee80211_tx_control { + struct ieee80211_sta *sta; +}; + +struct ieee80211_scan_request { + struct ieee80211_scan_ies ies; + struct cfg80211_scan_request req; +}; + +struct ieee80211_tdls_ch_sw_params { + struct ieee80211_sta *sta; + struct cfg80211_chan_def *chandef; + u8 action_code; + u32 status; + u32 timestamp; + u16 switch_time; + u16 switch_timeout; + struct sk_buff *tmpl_skb; + u32 ch_sw_tm_ie; +}; + +enum ieee80211_ampdu_mlme_action { + IEEE80211_AMPDU_RX_START = 0, + IEEE80211_AMPDU_RX_STOP = 1, + IEEE80211_AMPDU_TX_START = 2, + IEEE80211_AMPDU_TX_STOP_CONT = 3, + IEEE80211_AMPDU_TX_STOP_FLUSH = 4, + IEEE80211_AMPDU_TX_STOP_FLUSH_CONT = 5, + IEEE80211_AMPDU_TX_OPERATIONAL = 6, +}; + +struct ieee80211_ampdu_params { + enum ieee80211_ampdu_mlme_action action; + struct ieee80211_sta *sta; + u16 tid; + u16 ssn; + u16 buf_size; + bool amsdu; + u16 timeout; +}; + +enum ieee80211_frame_release_type { + IEEE80211_FRAME_RELEASE_PSPOLL = 0, + IEEE80211_FRAME_RELEASE_UAPSD = 1, +}; + +enum ieee80211_roc_type { + IEEE80211_ROC_TYPE_NORMAL = 0, + IEEE80211_ROC_TYPE_MGMT_TX = 1, +}; + +enum ieee80211_reconfig_type { + IEEE80211_RECONFIG_TYPE_RESTART = 0, + IEEE80211_RECONFIG_TYPE_SUSPEND = 1, +}; + +struct ieee80211_prep_tx_info { + u16 duration; + u16 subtype; + u8 success: 1; + u8 was_assoc: 1; + int link_id; +}; + +struct ieee80211_ops { + void (*tx)(struct ieee80211_hw *, struct ieee80211_tx_control *, struct sk_buff *); + int (*start)(struct ieee80211_hw *); + void (*stop)(struct ieee80211_hw *, bool); + int (*suspend)(struct ieee80211_hw *, struct cfg80211_wowlan *); + int (*resume)(struct ieee80211_hw *); + void (*set_wakeup)(struct ieee80211_hw *, bool); + int (*add_interface)(struct ieee80211_hw *, struct ieee80211_vif *); + int (*change_interface)(struct ieee80211_hw *, struct ieee80211_vif *, enum nl80211_iftype, bool); + void (*remove_interface)(struct ieee80211_hw *, struct ieee80211_vif *); + int (*config)(struct ieee80211_hw *, u32); + void (*bss_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u64); + void (*vif_cfg_changed)(struct ieee80211_hw *, struct ieee80211_vif *, u64); + void (*link_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u64); + int (*start_ap)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *); + void (*stop_ap)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *); + u64 (*prepare_multicast)(struct ieee80211_hw *, struct netdev_hw_addr_list *); + void (*configure_filter)(struct ieee80211_hw *, unsigned int, unsigned int *, u64); + void (*config_iface_filter)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int, unsigned int); + int (*set_tim)(struct ieee80211_hw *, struct ieee80211_sta *, bool); + int (*set_key)(struct ieee80211_hw *, enum set_key_cmd, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *); + void (*update_tkip_key)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_key_conf *, struct ieee80211_sta *, u32, u16 *); + void (*set_rekey_data)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_gtk_rekey_data *); + void (*set_default_unicast_key)(struct ieee80211_hw *, struct ieee80211_vif *, int); + int (*hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_scan_request *); + void (*cancel_hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *); + int (*sched_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_sched_scan_request *, struct ieee80211_scan_ies *); + int (*sched_scan_stop)(struct ieee80211_hw *, struct ieee80211_vif *); + void (*sw_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, const u8 *); + void (*sw_scan_complete)(struct ieee80211_hw *, struct ieee80211_vif *); + int (*get_stats)(struct ieee80211_hw *, struct ieee80211_low_level_stats *); + void (*get_key_seq)(struct ieee80211_hw *, struct ieee80211_key_conf *, struct ieee80211_key_seq *); + int (*set_frag_threshold)(struct ieee80211_hw *, u32); + int (*set_rts_threshold)(struct ieee80211_hw *, u32); + int (*sta_add)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); + int (*sta_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); + void (*sta_notify)(struct ieee80211_hw *, struct ieee80211_vif *, enum sta_notify_cmd, struct ieee80211_sta *); + int (*sta_set_txpwr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); + int (*sta_state)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, enum ieee80211_sta_state, enum ieee80211_sta_state); + void (*sta_pre_rcu_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); + void (*link_sta_rc_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_link_sta *, u32); + void (*sta_rate_tbl_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); + void (*sta_statistics)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct station_info *); + int (*conf_tx)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int, u16, const struct ieee80211_tx_queue_params *); + u64 (*get_tsf)(struct ieee80211_hw *, struct ieee80211_vif *); + void (*set_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, u64); + void (*offset_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, s64); + void (*reset_tsf)(struct ieee80211_hw *, struct ieee80211_vif *); + int (*tx_last_beacon)(struct ieee80211_hw *); + int (*ampdu_action)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_ampdu_params *); + int (*get_survey)(struct ieee80211_hw *, int, struct survey_info *); + void (*rfkill_poll)(struct ieee80211_hw *); + void (*set_coverage_class)(struct ieee80211_hw *, s16); + void (*flush)(struct ieee80211_hw *, struct ieee80211_vif *, u32, bool); + void (*flush_sta)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); + void (*channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); + int (*set_antenna)(struct ieee80211_hw *, u32, u32); + int (*get_antenna)(struct ieee80211_hw *, u32 *, u32 *); + int (*remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel *, int, enum ieee80211_roc_type); + int (*cancel_remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *); + int (*set_ringparam)(struct ieee80211_hw *, u32, u32); + void (*get_ringparam)(struct ieee80211_hw *, u32 *, u32 *, u32 *, u32 *); + bool (*tx_frames_pending)(struct ieee80211_hw *); + int (*set_bitrate_mask)(struct ieee80211_hw *, struct ieee80211_vif *, const struct cfg80211_bitrate_mask *); + void (*event_callback)(struct ieee80211_hw *, struct ieee80211_vif *, const struct ieee80211_event *); + void (*allow_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); + void (*release_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); + int (*get_et_sset_count)(struct ieee80211_hw *, struct ieee80211_vif *, int); + void (*get_et_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct ethtool_stats *, u64 *); + void (*get_et_strings)(struct ieee80211_hw *, struct ieee80211_vif *, u32, u8 *); + void (*mgd_prepare_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); + void (*mgd_complete_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); + void (*mgd_protect_tdls_discover)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int); + int (*add_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); + void (*remove_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); + void (*change_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, u32); + int (*assign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *); + void (*unassign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *); + int (*switch_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif_chanctx_switch *, int, enum ieee80211_chanctx_switch_mode); + void (*reconfig_complete)(struct ieee80211_hw *, enum ieee80211_reconfig_type); + void (*ipv6_addr_change)(struct ieee80211_hw *, struct ieee80211_vif *, struct inet6_dev *); + void (*channel_switch_beacon)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_chan_def *); + int (*pre_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); + int (*post_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *); + void (*abort_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *); + void (*channel_switch_rx_beacon)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); + int (*join_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); + void (*leave_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); + u32 (*get_expected_throughput)(struct ieee80211_hw *, struct ieee80211_sta *); + int (*get_txpower)(struct ieee80211_hw *, struct ieee80211_vif *, int *); + int (*tdls_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8, struct cfg80211_chan_def *, struct sk_buff *, u32); + void (*tdls_cancel_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); + void (*tdls_recv_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_tdls_ch_sw_params *); + void (*wake_tx_queue)(struct ieee80211_hw *, struct ieee80211_txq *); + void (*sync_rx_queues)(struct ieee80211_hw *); + int (*start_nan)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_nan_conf *); + int (*stop_nan)(struct ieee80211_hw *, struct ieee80211_vif *); + int (*nan_change_conf)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_nan_conf *, u32); + int (*add_nan_func)(struct ieee80211_hw *, struct ieee80211_vif *, const struct cfg80211_nan_func *); + void (*del_nan_func)(struct ieee80211_hw *, struct ieee80211_vif *, u8); + bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *, struct sk_buff *, struct sk_buff *); + int (*get_ftm_responder_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_ftm_responder_stats *); + int (*start_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *); + void (*abort_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *); + int (*set_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct cfg80211_tid_config *); + int (*reset_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8); + void (*update_vif_offload)(struct ieee80211_hw *, struct ieee80211_vif *); + void (*sta_set_4addr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, bool); + int (*set_sar_specs)(struct ieee80211_hw *, const struct cfg80211_sar_specs *); + void (*sta_set_decap_offload)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, bool); + void (*add_twt_setup)(struct ieee80211_hw *, struct ieee80211_sta *, struct ieee80211_twt_setup *); + void (*twt_teardown_request)(struct ieee80211_hw *, struct ieee80211_sta *, u8); + int (*set_radar_background)(struct ieee80211_hw *, struct cfg80211_chan_def *); + int (*net_fill_forward_path)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct net_device_path_ctx *, struct net_device_path *); + bool (*can_activate_links)(struct ieee80211_hw *, struct ieee80211_vif *, u16); + int (*change_vif_links)(struct ieee80211_hw *, struct ieee80211_vif *, u16, u16, struct ieee80211_bss_conf **); + int (*change_sta_links)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u16, u16); + int (*set_hw_timestamp)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_set_hw_timestamp *); + int (*net_setup_tc)(struct ieee80211_hw *, struct ieee80211_vif *, struct net_device *, enum tc_setup_type, void *); + enum ieee80211_neg_ttlm_res (*can_neg_ttlm)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_neg_ttlm *); + void (*prep_add_interface)(struct ieee80211_hw *, enum nl80211_iftype); +}; + +struct fq_tin; + +struct fq_flow { + struct fq_tin *tin; + struct list_head flowchain; + struct sk_buff_head queue; + u32 backlog; + int deficit; +}; + +struct fq_tin { + struct list_head new_flows; + struct list_head old_flows; + struct list_head tin_list; + struct fq_flow default_flow; + u32 backlog_bytes; + u32 backlog_packets; + u32 overlimit; + u32 collisions; + u32 flows; + u32 tx_bytes; + u32 tx_packets; +}; + +struct fq { + struct fq_flow *flows; + long unsigned int *flows_bitmap; + struct list_head tin_backlog; + spinlock_t lock; + u32 flows_cnt; + u32 limit; + u32 memory_limit; + u32 memory_usage; + u32 quantum; + u32 backlog; + u32 overlimit; + u32 overmemory; + u32 collisions; +}; + +struct arc4_ctx { + u32 S[256]; + u32 x; + u32 y; +}; + +enum ieee80211_internal_key_flags { + KEY_FLAG_UPLOADED_TO_HARDWARE = 1, + KEY_FLAG_TAINTED = 2, +}; + +enum ieee80211_internal_tkip_state { + TKIP_STATE_NOT_INIT = 0, + TKIP_STATE_PHASE1_DONE = 1, + TKIP_STATE_PHASE1_HW_UPLOADED = 2, +}; + +struct tkip_ctx { + u16 p1k[5]; + u32 p1k_iv32; + enum ieee80211_internal_tkip_state state; +}; + +struct tkip_ctx_rx { + struct tkip_ctx ctx; + u32 iv32; + u16 iv16; +}; + +struct ieee80211_local; + +struct ieee80211_sub_if_data; + +struct sta_info; + +struct ieee80211_key { + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct list_head list; + unsigned int flags; + union { + struct { + spinlock_t txlock; + struct tkip_ctx tx; + struct tkip_ctx_rx rx[16]; + u32 mic_failures; + } tkip; + struct { + u8 rx_pn[102]; + struct crypto_aead *tfm; + u32 replays; + } ccmp; + struct { + u8 rx_pn[6]; + struct crypto_shash *tfm; + u32 replays; + u32 icverrors; + } aes_cmac; + struct { + u8 rx_pn[6]; + struct crypto_aead *tfm; + u32 replays; + u32 icverrors; + } aes_gmac; + struct { + u8 rx_pn[102]; + struct crypto_aead *tfm; + u32 replays; + } gcmp; + struct { + u8 rx_pn[272]; + } gen; + } u; + unsigned int color; + struct ieee80211_key_conf conf; +}; + +enum mac80211_scan_state { + SCAN_DECISION = 0, + SCAN_SET_CHANNEL = 1, + SCAN_SEND_PROBE = 2, + SCAN_SUSPEND = 3, + SCAN_RESUME = 4, + SCAN_ABORT = 5, +}; + +struct rate_control_ref; + +struct ieee80211_local { + struct ieee80211_hw hw; + struct fq fq; + struct codel_vars *cvars; + struct codel_params cparams; + spinlock_t active_txq_lock[4]; + struct list_head active_txqs[4]; + u16 schedule_round[4]; + spinlock_t handle_wake_tx_queue_lock; + u16 airtime_flags; + u32 aql_txq_limit_low[4]; + u32 aql_txq_limit_high[4]; + u32 aql_threshold; + atomic_t aql_total_pending_airtime; + atomic_t aql_ac_pending_airtime[4]; + const struct ieee80211_ops *ops; + struct workqueue_struct *workqueue; + long unsigned int queue_stop_reasons[16]; + int q_stop_reasons[176]; + spinlock_t queue_stop_reason_lock; + int open_count; + int monitors; + int cooked_mntrs; + int tx_mntrs; + int fif_fcsfail; + int fif_plcpfail; + int fif_control; + int fif_other_bss; + int fif_pspoll; + int fif_probe_req; + bool probe_req_reg; + bool rx_mcast_action_reg; + unsigned int filter_flags; + bool wiphy_ciphers_allocated; + struct cfg80211_chan_def dflt_chandef; + bool emulate_chanctx; + spinlock_t filter_lock; + struct wiphy_work reconfig_filter; + struct netdev_hw_addr_list mc_list; + bool tim_in_locked_section; + bool suspended; + bool suspending; + bool resuming; + bool quiescing; + bool started; + bool in_reconfig; + bool reconfig_failure; + bool wowlan; + struct wiphy_work radar_detected_work; + u8 rx_chains; + u8 sband_allocated; + int tx_headroom; + struct tasklet_struct tasklet; + struct sk_buff_head skb_queue; + struct sk_buff_head skb_queue_unreliable; + spinlock_t rx_path_lock; + spinlock_t tim_lock; + long unsigned int num_sta; + struct list_head sta_list; + struct rhltable sta_hash; + struct rhltable link_sta_hash; + struct timer_list sta_cleanup; + int sta_generation; + struct sk_buff_head pending[16]; + struct tasklet_struct tx_pending_tasklet; + struct tasklet_struct wake_txqs_tasklet; + atomic_t agg_queue_stop[16]; + atomic_t iff_allmultis; + struct rate_control_ref *rate_ctrl; + struct arc4_ctx wep_tx_ctx; + struct arc4_ctx wep_rx_ctx; + u32 wep_iv; + struct list_head interfaces; + struct list_head mon_list; + struct mutex iflist_mtx; + long unsigned int scanning; + struct cfg80211_ssid scan_ssid; + struct cfg80211_scan_request *int_scan_req; + struct cfg80211_scan_request *scan_req; + struct ieee80211_scan_request *hw_scan_req; + struct cfg80211_chan_def scan_chandef; + enum nl80211_band hw_scan_band; + int scan_channel_idx; + int scan_ies_len; + int hw_scan_ies_bufsize; + long: 32; + struct cfg80211_scan_info scan_info; + struct wiphy_work sched_scan_stopped_work; + struct ieee80211_sub_if_data *sched_scan_sdata; + struct cfg80211_sched_scan_request *sched_scan_req; + u8 scan_addr[6]; + long unsigned int leave_oper_channel_time; + enum mac80211_scan_state next_scan_state; + struct wiphy_delayed_work scan_work; + struct ieee80211_sub_if_data *scan_sdata; + struct ieee80211_channel *tmp_channel; + struct list_head chanctx_list; + int total_ps_buffered; + bool pspolling; + struct ieee80211_sub_if_data *ps_sdata; + struct wiphy_work dynamic_ps_enable_work; + struct wiphy_work dynamic_ps_disable_work; + struct timer_list dynamic_ps_timer; + struct notifier_block ifa_notifier; + struct notifier_block ifa6_notifier; + int dynamic_ps_forced_timeout; + int user_power_level; + struct work_struct restart_work; + struct wiphy_delayed_work roc_work; + struct list_head roc_list; + struct wiphy_work hw_roc_start; + struct wiphy_work hw_roc_done; + long unsigned int hw_roc_start_time; + u64 roc_cookie_counter; + struct idr ack_status_frames; + spinlock_t ack_status_lock; + struct ieee80211_sub_if_data *p2p_sdata; + struct ieee80211_sub_if_data *monitor_sdata; + struct ieee80211_chan_req monitor_chanreq; + u8 ext_capa[8]; + bool wbrf_supported; + long: 32; +}; + +struct ieee80211_fragment_entry { + struct sk_buff_head skb_list; + long unsigned int first_frag_time; + u16 seq; + u16 extra_len; + u16 last_frag; + u8 rx_queue; + u8 check_sequential_pn: 1; + u8 is_protected: 1; + u8 last_pn[6]; + unsigned int key_color; +}; + +struct ieee80211_fragment_cache { + struct ieee80211_fragment_entry entries[4]; + unsigned int next; +}; + +struct ps_data { + u8 tim[252]; + struct sk_buff_head bc_buf; + atomic_t num_sta_ps; + int dtim_count; + bool dtim_bc_mc; +}; + +struct ieee80211_if_ap { + struct list_head vlans; + struct ps_data ps; + atomic_t num_mcast_sta; + bool multicast_to_unicast; + bool active; +}; + +struct ieee80211_if_vlan { + struct list_head list; + struct sta_info *sta; + atomic_t num_mcast_sta; +}; + +struct ieee80211_sta_tx_tspec { + long unsigned int time_slice_start; + u32 admitted_time; + u8 tsid; + s8 up; + u32 consumed_tx_time; + enum { + TX_TSPEC_ACTION_NONE = 0, + TX_TSPEC_ACTION_DOWNGRADE = 1, + TX_TSPEC_ACTION_STOP_DOWNGRADE = 2, + } action; + bool downgraded; +}; + +struct ieee80211_adv_ttlm_info { + u16 switch_time; + u32 duration; + u16 map; + bool active; +}; + +struct ieee80211_mgd_auth_data; + +struct ieee80211_mgd_assoc_data; + +struct ieee80211_if_managed { + struct timer_list timer; + struct timer_list conn_mon_timer; + struct timer_list bcn_mon_timer; + struct wiphy_work monitor_work; + struct wiphy_work beacon_connection_loss_work; + struct wiphy_work csa_connection_drop_work; + long unsigned int beacon_timeout; + long unsigned int probe_timeout; + int probe_send_count; + bool nullfunc_failed; + u8 connection_loss: 1; + u8 driver_disconnect: 1; + u8 reconnect: 1; + u8 associated: 1; + struct ieee80211_mgd_auth_data *auth_data; + struct ieee80211_mgd_assoc_data *assoc_data; + bool powersave; + bool broken_ap; + unsigned int flags; + u16 mcast_seq_last; + bool status_acked; + bool status_received; + __le16 status_fc; + enum { + IEEE80211_MFP_DISABLED = 0, + IEEE80211_MFP_OPTIONAL = 1, + IEEE80211_MFP_REQUIRED = 2, + } mfp; + unsigned int uapsd_queues; + unsigned int uapsd_max_sp_len; + u8 use_4addr; + int rssi_min_thold; + int rssi_max_thold; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; + struct ieee80211_s1g_cap s1g_capa; + struct ieee80211_s1g_cap s1g_capa_mask; + u8 tdls_peer[6]; + struct wiphy_delayed_work tdls_peer_del_work; + struct sk_buff *orig_teardown_skb; + struct sk_buff *teardown_skb; + spinlock_t teardown_lock; + bool tdls_wider_bw_prohibited; + struct ieee80211_sta_tx_tspec tx_tspec[4]; + struct wiphy_delayed_work tx_tspec_wk; + u8 *assoc_req_ies; + size_t assoc_req_ies_len; + struct wiphy_delayed_work ml_reconf_work; + u16 removed_links; + struct wiphy_delayed_work ttlm_work; + struct ieee80211_adv_ttlm_info ttlm_info; + struct wiphy_work teardown_ttlm_work; + u8 dialog_token_alloc; + struct wiphy_delayed_work neg_ttlm_timeout_work; +}; + +struct beacon_data; + +struct ieee80211_if_ibss { + struct timer_list timer; + struct wiphy_work csa_connection_drop_work; + long unsigned int last_scan_completed; + u32 basic_rates; + bool fixed_bssid; + bool fixed_channel; + bool privacy; + bool control_port; + bool userspace_handles_dfs; + short: 0; + u8 bssid[6]; + u8 ssid[32]; + u8 ssid_len; + u8 ie_len; + u8 *ie; + struct cfg80211_chan_def chandef; + long unsigned int ibss_join_req; + struct beacon_data *presp; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + spinlock_t incomplete_lock; + struct list_head incomplete_stations; + enum { + IEEE80211_IBSS_MLME_SEARCH = 0, + IEEE80211_IBSS_MLME_JOINED = 1, + } state; +}; + +struct mesh_preq_queue { + struct list_head list; + u8 dst[6]; + u8 flags; +}; + +struct mesh_stats { + __u32 fwded_mcast; + __u32 fwded_unicast; + __u32 fwded_frames; + __u32 dropped_frames_ttl; + __u32 dropped_frames_no_route; +}; + +struct mesh_table { + struct hlist_head known_gates; + spinlock_t gates_lock; + struct rhashtable rhead; + struct hlist_head walk_head; + spinlock_t walk_lock; + atomic_t entries; +}; + +struct mesh_tx_cache { + struct rhashtable rht; + struct hlist_head walk_head; + spinlock_t walk_lock; +}; + +struct mesh_rmc; + +struct ieee80211_mesh_sync_ops; + +struct mesh_csa_settings; + +struct ieee80211_if_mesh { + struct timer_list housekeeping_timer; + struct timer_list mesh_path_timer; + struct timer_list mesh_path_root_timer; + long unsigned int wrkq_flags; + long unsigned int mbss_changed[2]; + bool userspace_handles_dfs; + u8 mesh_id[32]; + size_t mesh_id_len; + u8 mesh_pp_id; + u8 mesh_pm_id; + u8 mesh_cc_id; + u8 mesh_sp_id; + u8 mesh_auth_id; + u32 sn; + u32 preq_id; + atomic_t mpaths; + long unsigned int last_sn_update; + long unsigned int next_perr; + long unsigned int last_preq; + struct mesh_rmc *rmc; + spinlock_t mesh_preq_queue_lock; + struct mesh_preq_queue preq_queue; + int preq_queue_len; + struct mesh_stats mshstats; + struct mesh_config mshcfg; + atomic_t estab_plinks; + atomic_t mesh_seqnum; + bool accepting_plinks; + int num_gates; + struct beacon_data *beacon; + const u8 *ie; + u8 ie_len; + enum { + IEEE80211_MESH_SEC_NONE = 0, + IEEE80211_MESH_SEC_AUTHED = 1, + IEEE80211_MESH_SEC_SECURED = 2, + } security; + bool user_mpm; + const struct ieee80211_mesh_sync_ops *sync_ops; + s64 sync_offset_clockdrift_max; + spinlock_t sync_offset_lock; + enum nl80211_mesh_power_mode nonpeer_pm; + int ps_peers_light_sleep; + int ps_peers_deep_sleep; + struct ps_data ps; + struct mesh_csa_settings *csa; + enum { + IEEE80211_MESH_CSA_ROLE_NONE = 0, + IEEE80211_MESH_CSA_ROLE_INIT = 1, + IEEE80211_MESH_CSA_ROLE_REPEATER = 2, + } csa_role; + u8 chsw_ttl; + u16 pre_value; + int meshconf_offset; + struct mesh_table mesh_paths; + struct mesh_table mpp_paths; + int mesh_paths_generation; + int mpp_paths_generation; + struct mesh_tx_cache tx_cache; +}; + +struct ieee80211_if_ocb { + struct timer_list housekeeping_timer; + long unsigned int wrkq_flags; + spinlock_t incomplete_lock; + struct list_head incomplete_stations; + bool joined; +}; + +struct ieee80211_if_mntr { + u32 flags; + u8 mu_follow_addr[6]; + struct list_head list; +}; + +struct ieee80211_if_nan { + struct cfg80211_nan_conf conf; + spinlock_t func_lock; + struct idr function_inst_ids; +}; + +enum ieee80211_conn_mode { + IEEE80211_CONN_MODE_S1G = 0, + IEEE80211_CONN_MODE_LEGACY = 1, + IEEE80211_CONN_MODE_HT = 2, + IEEE80211_CONN_MODE_VHT = 3, + IEEE80211_CONN_MODE_HE = 4, + IEEE80211_CONN_MODE_EHT = 5, +}; + +enum ieee80211_conn_bw_limit { + IEEE80211_CONN_BW_LIMIT_20 = 0, + IEEE80211_CONN_BW_LIMIT_40 = 1, + IEEE80211_CONN_BW_LIMIT_80 = 2, + IEEE80211_CONN_BW_LIMIT_160 = 3, + IEEE80211_CONN_BW_LIMIT_320 = 4, +}; + +struct ieee80211_conn_settings { + enum ieee80211_conn_mode mode; + enum ieee80211_conn_bw_limit bw_limit; +}; + +struct ewma_beacon_signal { + long unsigned int internal; +}; + +struct ieee80211_link_data_managed { + u8 bssid[6]; + u8 dtim_period; + enum ieee80211_smps_mode req_smps; + enum ieee80211_smps_mode driver_smps_mode; + struct ieee80211_conn_settings conn; + s16 p2p_noa_index; + bool tdls_chan_switch_prohibited; + bool have_beacon; + bool tracking_signal_avg; + bool disable_wmm_tracking; + bool operating_11g_mode; + struct { + struct wiphy_delayed_work switch_work; + struct cfg80211_chan_def ap_chandef; + struct ieee80211_parsed_tpe tpe; + long unsigned int time; + bool waiting_bcn; + bool ignored_same_chan; + bool blocked_tx; + } csa; + struct wiphy_work request_smps_work; + struct wiphy_work recalc_smps; + bool beacon_crc_valid; + u32 beacon_crc; + struct ewma_beacon_signal ave_beacon_signal; + int last_ave_beacon_signal; + unsigned int count_beacon_signal; + unsigned int beacon_loss_count; + int last_cqm_event_signal; + int wmm_last_param_set; + int mu_edca_last_param_set; +}; + +struct probe_resp; + +struct fils_discovery_data; + +struct unsol_bcast_probe_resp_data; + +struct ieee80211_link_data_ap { + struct beacon_data *beacon; + struct probe_resp *probe_resp; + struct fils_discovery_data *fils_discovery; + struct unsol_bcast_probe_resp_data *unsol_bcast_probe_resp; + struct cfg80211_beacon_data *next_beacon; +}; + +struct ieee80211_chanctx; + +struct ieee80211_link_data { + struct ieee80211_sub_if_data *sdata; + unsigned int link_id; + struct list_head assigned_chanctx_list; + struct list_head reserved_chanctx_list; + struct ieee80211_key *gtk[8]; + struct ieee80211_key *default_multicast_key; + struct ieee80211_key *default_mgmt_key; + struct ieee80211_key *default_beacon_key; + bool operating_11g_mode; + struct { + struct wiphy_work finalize_work; + struct ieee80211_chan_req chanreq; + } csa; + struct wiphy_work color_change_finalize_work; + struct wiphy_delayed_work color_collision_detect_work; + long: 32; + u64 color_bitmap; + struct ieee80211_chanctx *reserved_chanctx; + struct ieee80211_chan_req reserved; + bool reserved_radar_required; + bool reserved_ready; + u8 needed_rx_chains; + enum ieee80211_smps_mode smps_mode; + int user_power_level; + int ap_power_level; + bool radar_required; + struct wiphy_delayed_work dfs_cac_timer_work; + union { + struct ieee80211_link_data_managed mgd; + struct ieee80211_link_data_ap ap; + } u; + struct ieee80211_tx_queue_params tx_conf[4]; + struct ieee80211_bss_conf *conf; + long: 32; +}; + +struct mac80211_qos_map; + +struct ieee80211_sub_if_data { + struct list_head list; + struct wireless_dev wdev; + struct list_head key_list; + int crypto_tx_tailroom_needed_cnt; + int crypto_tx_tailroom_pending_dec; + struct wiphy_delayed_work dec_tailroom_needed_wk; + struct net_device *dev; + struct ieee80211_local *local; + unsigned int flags; + long unsigned int state; + bool csa_blocked_queues; + char name[16]; + struct ieee80211_fragment_cache frags; + u16 noack_map; + u8 wmm_acm; + struct ieee80211_key *keys[4]; + struct ieee80211_key *default_unicast_key; + u16 sequence_number; + u16 mld_mcast_seq; + __be16 control_port_protocol; + bool control_port_no_encrypt; + bool control_port_no_preauth; + bool control_port_over_nl80211; + atomic_t num_tx_queued; + struct mac80211_qos_map *qos_map; + struct wiphy_work work; + struct sk_buff_head skb_queue; + struct sk_buff_head status_queue; + struct ieee80211_if_ap *bss; + u32 rc_rateidx_mask[6]; + bool rc_has_mcs_mask[6]; + u8 rc_rateidx_mcs_mask[60]; + bool rc_has_vht_mcs_mask[6]; + u16 rc_rateidx_vht_mcs_mask[48]; + u32 beacon_rateidx_mask[6]; + bool beacon_rate_set; + long: 32; + union { + struct ieee80211_if_ap ap; + struct ieee80211_if_vlan vlan; + struct ieee80211_if_managed mgd; + struct ieee80211_if_ibss ibss; + struct ieee80211_if_mesh mesh; + struct ieee80211_if_ocb ocb; + struct ieee80211_if_mntr mntr; + struct ieee80211_if_nan nan; + } u; + struct ieee80211_link_data deflink; + struct ieee80211_link_data *link[15]; + struct wiphy_work activate_links_work; + u16 desired_active_links; + u16 restart_active_links; + long: 32; + struct ieee80211_vif vif; +}; + +struct airtime_info { + u64 rx_airtime; + u64 tx_airtime; + long unsigned int last_active; + s32 deficit; + atomic_t aql_tx_pending; + u32 aql_limit_low; + u32 aql_limit_high; + long: 32; +}; + +struct tid_ampdu_rx; + +struct tid_ampdu_tx; + +struct sta_ampdu_mlme { + struct tid_ampdu_rx *tid_rx[16]; + u8 tid_rx_token[16]; + long unsigned int tid_rx_timer_expired[1]; + long unsigned int tid_rx_stop_requested[1]; + long unsigned int tid_rx_manage_offl[1]; + long unsigned int agg_session_valid[1]; + long unsigned int unexpected_agg[1]; + struct wiphy_work work; + struct tid_ampdu_tx *tid_tx[16]; + struct tid_ampdu_tx *tid_start_tx[16]; + long unsigned int last_addba_req_time[16]; + u8 addba_req_num[16]; + u8 dialog_token_allocator; +}; + +struct ieee80211_sta_rx_stats { + long unsigned int packets; + long unsigned int last_rx; + long unsigned int num_duplicates; + long unsigned int fragments; + long unsigned int dropped; + int last_signal; + u8 chains; + s8 chain_signal_last[4]; + u32 last_rate; + struct u64_stats_sync syncp; + u64 bytes; + u64 msdu[17]; +}; + +struct ewma_signal { + long unsigned int internal; +}; + +struct ewma_avg_signal { + long unsigned int internal; +}; + +struct link_sta_info { + u8 addr[6]; + u8 link_id; + u8 op_mode_nss; + u8 capa_nss; + struct rhlist_head link_hash_node; + struct sta_info *sta; + struct ieee80211_key *gtk[8]; + struct ieee80211_sta_rx_stats *pcpu_rx_stats; + long: 32; + struct ieee80211_sta_rx_stats rx_stats; + struct { + struct ewma_signal signal; + struct ewma_signal chain_signal[4]; + } rx_stats_avg; + long: 32; + struct { + long unsigned int filtered; + long unsigned int retry_failed; + long unsigned int retry_count; + unsigned int lost_packets; + long unsigned int last_pkt_time; + long: 32; + u64 msdu_retries[17]; + u64 msdu_failed[17]; + long unsigned int last_ack; + s8 last_ack_signal; + bool ack_signal_filled; + struct ewma_avg_signal avg_ack_signal; + long: 32; + } status_stats; + struct { + u64 packets[4]; + u64 bytes[4]; + struct ieee80211_tx_rate last_rate; + struct rate_info last_rate_info; + long: 32; + u64 msdu[17]; + } tx_stats; + enum ieee80211_sta_rx_bandwidth cur_max_bandwidth; + struct ieee80211_link_sta *pub; +}; + +struct ieee80211_fast_tx; + +struct ieee80211_fast_rx; + +struct sta_info { + struct list_head list; + struct list_head free_list; + struct callback_head callback_head; + struct rhlist_head hash_node; + u8 addr[6]; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_key *ptk[4]; + u8 ptk_idx; + struct rate_control_ref *rate_ctrl; + void *rate_ctrl_priv; + spinlock_t rate_ctrl_lock; + spinlock_t lock; + struct ieee80211_fast_tx *fast_tx; + struct ieee80211_fast_rx *fast_rx; + struct work_struct drv_deliver_wk; + u16 listen_interval; + bool dead; + bool removed; + bool uploaded; + enum ieee80211_sta_state sta_state; + long unsigned int _flags; + spinlock_t ps_lock; + struct sk_buff_head ps_tx_buf[4]; + struct sk_buff_head tx_filtered[4]; + long unsigned int driver_buffered_tids; + long unsigned int txq_buffered_tids; + u64 assoc_at; + long int last_connected; + __le16 last_seq_ctrl[17]; + u16 tid_seq[16]; + struct airtime_info airtime[4]; + u16 airtime_weight; + struct sta_ampdu_mlme ampdu_mlme; + struct codel_params cparams; + u8 reserved_tid; + s8 amsdu_mesh_control; + struct cfg80211_chan_def tdls_chandef; + struct ieee80211_fragment_cache frags; + struct ieee80211_sta_aggregates cur; + long: 32; + struct link_sta_info deflink; + struct link_sta_info *link[15]; + struct ieee80211_sta sta; + long: 32; +}; + +struct tid_ampdu_tx { + struct callback_head callback_head; + struct timer_list session_timer; + struct timer_list addba_resp_timer; + struct sk_buff_head pending; + struct sta_info *sta; + long unsigned int state; + long unsigned int last_tx; + u16 timeout; + u8 dialog_token; + u8 stop_initiator; + bool tx_stop; + u16 buf_size; + u16 ssn; + u16 failed_bar_ssn; + bool bar_pending; + bool amsdu; + u8 tid; +}; + +struct tid_ampdu_rx { + struct callback_head callback_head; + spinlock_t reorder_lock; + long: 32; + u64 reorder_buf_filtered; + struct sk_buff_head *reorder_buf; + long unsigned int *reorder_time; + struct sta_info *sta; + struct timer_list session_timer; + struct timer_list reorder_timer; + long unsigned int last_rx; + u16 head_seq_num; + u16 stored_mpdu_num; + u16 ssn; + u16 buf_size; + u16 timeout; + u8 tid; + u8 auto_seq: 1; + u8 removed: 1; + u8 started: 1; + long: 32; +}; + +struct ieee80211_fast_tx { + struct ieee80211_key *key; + u8 hdr_len; + u8 sa_offs; + u8 da_offs; + u8 pn_offs; + u8 band; + short: 0; + u8 hdr[56]; + struct callback_head callback_head; +}; + +struct ieee80211_fast_rx { + struct net_device *dev; + enum nl80211_iftype vif_type; + u8 vif_addr[6]; + u8 rfc1042_hdr[6]; + __be16 control_port_protocol; + __le16 expected_ds_bits; + u8 icv_len; + u8 key: 1; + u8 internal_forward: 1; + u8 uses_rss: 1; + u8 da_offs; + u8 sa_offs; + struct callback_head callback_head; +}; + +enum skb_drop_reason_subsys { + SKB_DROP_REASON_SUBSYS_CORE = 0, + SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE = 1, + SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR = 2, + SKB_DROP_REASON_SUBSYS_OPENVSWITCH = 3, + SKB_DROP_REASON_SUBSYS_NUM = 4, +}; + +typedef unsigned int ieee80211_rx_result; + +enum ___mac80211_drop_reason { + ___RX_CONTINUE = 1, + ___RX_QUEUED = 0, + ___RX_DROP_MONITOR = 131072, + ___RX_DROP_M_UNEXPECTED_4ADDR_FRAME = 131073, + ___RX_DROP_M_BAD_BCN_KEYIDX = 131074, + ___RX_DROP_M_BAD_MGMT_KEYIDX = 131075, + ___RX_DROP_UNUSABLE = 65536, + ___RX_DROP_U_MIC_FAIL = 65537, + ___RX_DROP_U_REPLAY = 65538, + ___RX_DROP_U_BAD_MMIE = 65539, + ___RX_DROP_U_DUP = 65540, + ___RX_DROP_U_SPURIOUS = 65541, + ___RX_DROP_U_DECRYPT_FAIL = 65542, + ___RX_DROP_U_NO_KEY_ID = 65543, + ___RX_DROP_U_BAD_CIPHER = 65544, + ___RX_DROP_U_OOM = 65545, + ___RX_DROP_U_NONSEQ_PN = 65546, + ___RX_DROP_U_BAD_KEY_COLOR = 65547, + ___RX_DROP_U_BAD_4ADDR = 65548, + ___RX_DROP_U_BAD_AMSDU = 65549, + ___RX_DROP_U_BAD_AMSDU_CIPHER = 65550, + ___RX_DROP_U_INVALID_8023 = 65551, + ___RX_DROP_U_RUNT_ACTION = 65552, + ___RX_DROP_U_UNPROT_ACTION = 65553, + ___RX_DROP_U_UNPROT_DUAL = 65554, + ___RX_DROP_U_UNPROT_UCAST_MGMT = 65555, + ___RX_DROP_U_UNPROT_MCAST_MGMT = 65556, + ___RX_DROP_U_UNPROT_BEACON = 65557, + ___RX_DROP_U_UNPROT_UNICAST_PUB_ACTION = 65558, + ___RX_DROP_U_UNPROT_ROBUST_ACTION = 65559, + ___RX_DROP_U_ACTION_UNKNOWN_SRC = 65560, + ___RX_DROP_U_REJECTED_ACTION_RESPONSE = 65561, + ___RX_DROP_U_EXPECT_DEFRAG_PROT = 65562, + ___RX_DROP_U_WEP_DEC_FAIL = 65563, + ___RX_DROP_U_NO_IV = 65564, + ___RX_DROP_U_NO_ICV = 65565, + ___RX_DROP_U_AP_RX_GROUPCAST = 65566, + ___RX_DROP_U_SHORT_MMIC = 65567, + ___RX_DROP_U_MMIC_FAIL = 65568, + ___RX_DROP_U_SHORT_TKIP = 65569, + ___RX_DROP_U_TKIP_FAIL = 65570, + ___RX_DROP_U_SHORT_CCMP = 65571, + ___RX_DROP_U_SHORT_CCMP_MIC = 65572, + ___RX_DROP_U_SHORT_GCMP = 65573, + ___RX_DROP_U_SHORT_GCMP_MIC = 65574, + ___RX_DROP_U_SHORT_CMAC = 65575, + ___RX_DROP_U_SHORT_CMAC256 = 65576, + ___RX_DROP_U_SHORT_GMAC = 65577, + ___RX_DROP_U_UNEXPECTED_VLAN_4ADDR = 65578, + ___RX_DROP_U_UNEXPECTED_STA_4ADDR = 65579, + ___RX_DROP_U_UNEXPECTED_VLAN_MCAST = 65580, + ___RX_DROP_U_NOT_PORT_CONTROL = 65581, + ___RX_DROP_U_UNKNOWN_ACTION_REJECTED = 65582, +}; + +struct beacon_data { + u8 *head; + u8 *tail; + int head_len; + int tail_len; + struct ieee80211_meshconf_ie *meshconf; + u16 cntdwn_counter_offsets[2]; + u8 cntdwn_current_counter; + struct cfg80211_mbssid_elems *mbssid_ies; + struct cfg80211_rnr_elems *rnr_ies; + struct callback_head callback_head; +}; + +struct probe_resp { + struct callback_head callback_head; + int len; + u16 cntdwn_counter_offsets[2]; + u8 data[0]; +}; + +struct fils_discovery_data { + struct callback_head callback_head; + int len; + u8 data[0]; +}; + +struct unsol_bcast_probe_resp_data { + struct callback_head callback_head; + int len; + u8 data[0]; +}; + +struct ieee80211_mgd_auth_data { + struct cfg80211_bss *bss; + long unsigned int timeout; + int tries; + u16 algorithm; + u16 expected_transaction; + u8 key[13]; + u8 key_len; + u8 key_idx; + bool done; + bool waiting; + bool peer_confirmed; + bool timeout_started; + int link_id; + u8 ap_addr[6]; + u16 sae_trans; + u16 sae_status; + size_t data_len; + u8 data[0]; +}; + +struct ieee80211_mgd_assoc_data { + struct { + struct cfg80211_bss *bss; + u8 addr[6]; + u8 ap_ht_param; + struct ieee80211_vht_cap ap_vht_cap; + long: 0; + size_t elems_len; + u8 *elems; + struct ieee80211_conn_settings conn; + u16 status; + bool disabled; + long: 0; + } __attribute__((packed)) link[15]; + u8 ap_addr[6]; + const u8 *supp_rates; + u8 supp_rates_len; + long unsigned int timeout; + int tries; + u8 prev_ap_addr[6]; + u8 ssid[32]; + u8 ssid_len; + bool wmm; + bool uapsd; + bool need_beacon; + bool synced; + bool timeout_started; + bool comeback; + bool s1g; + bool spp_amsdu; + unsigned int assoc_link_id; + u8 fils_nonces[32]; + u8 fils_kek[64]; + size_t fils_kek_len; + size_t ie_len; + u8 *ie_pos; + u8 ie[0]; +}; + +struct ieee80211_mesh_sync_ops { + void (*rx_bcn_presp)(struct ieee80211_sub_if_data *, u16, struct ieee80211_mgmt *, unsigned int, const struct ieee80211_meshconf_ie *, struct ieee80211_rx_status *); + void (*adjust_tsf)(struct ieee80211_sub_if_data *, struct beacon_data *); +}; + +struct mesh_csa_settings { + struct callback_head callback_head; + struct cfg80211_csa_settings settings; +}; + +enum ieee80211_sub_if_data_flags { + IEEE80211_SDATA_ALLMULTI = 1, + IEEE80211_SDATA_DONT_BRIDGE_PACKETS = 8, + IEEE80211_SDATA_DISCONNECT_RESUME = 16, + IEEE80211_SDATA_IN_DRIVER = 32, + IEEE80211_SDATA_DISCONNECT_HW_RESTART = 64, +}; + +enum ieee80211_chanctx_mode { + IEEE80211_CHANCTX_SHARED = 0, + IEEE80211_CHANCTX_EXCLUSIVE = 1, +}; + +enum ieee80211_chanctx_replace_state { + IEEE80211_CHANCTX_REPLACE_NONE = 0, + IEEE80211_CHANCTX_WILL_BE_REPLACED = 1, + IEEE80211_CHANCTX_REPLACES_OTHER = 2, +}; + +struct ieee80211_chanctx { + struct list_head list; + struct callback_head callback_head; + struct list_head assigned_links; + struct list_head reserved_links; + enum ieee80211_chanctx_replace_state replace_state; + struct ieee80211_chanctx *replace_ctx; + enum ieee80211_chanctx_mode mode; + bool driver_present; + struct ieee80211_chan_req req; + bool radar_detected; + struct ieee80211_chanctx_conf conf; +}; + +struct mac80211_qos_map { + struct cfg80211_qos_map qos_map; + struct callback_head callback_head; +}; + +enum queue_stop_reason { + IEEE80211_QUEUE_STOP_REASON_DRIVER = 0, + IEEE80211_QUEUE_STOP_REASON_PS = 1, + IEEE80211_QUEUE_STOP_REASON_CSA = 2, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION = 3, + IEEE80211_QUEUE_STOP_REASON_SUSPEND = 4, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD = 5, + IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL = 6, + IEEE80211_QUEUE_STOP_REASON_FLUSH = 7, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN = 8, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID = 9, + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE = 10, + IEEE80211_QUEUE_STOP_REASONS = 11, +}; + +enum { + TKIP_DECRYPT_OK = 0, + TKIP_DECRYPT_NO_EXT_IV = -1, + TKIP_DECRYPT_INVALID_KEYIDX = -2, + TKIP_DECRYPT_REPLAY = -3, +}; + +typedef __be32 fdt32_t; + +typedef __be64 fdt64_t; + +struct fdt_header { + fdt32_t magic; + fdt32_t totalsize; + fdt32_t off_dt_struct; + fdt32_t off_dt_strings; + fdt32_t off_mem_rsvmap; + fdt32_t version; + fdt32_t last_comp_version; + fdt32_t boot_cpuid_phys; + fdt32_t size_dt_strings; + fdt32_t size_dt_struct; +}; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +enum { + ASSUME_PERFECT = 255, + ASSUME_VALID_DTB = 1, + ASSUME_VALID_INPUT = 2, + ASSUME_LATEST = 4, + ASSUME_NO_ROLLBACK = 8, + ASSUME_LIBFDT_ORDER = 16, + ASSUME_LIBFDT_FLAWLESS = 32, +}; + +struct xa_state { + struct xarray *xa; + long unsigned int xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; + struct list_lru *xa_lru; +}; + +struct radix_tree_iter { + long unsigned int index; + long unsigned int next_index; + long unsigned int tags; + struct xa_node *node; +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 15, + RADIX_TREE_ITER_TAGGED = 16, + RADIX_TREE_ITER_CONTIG = 32, +}; + +struct ida_bitmap { + long unsigned int bitmap[32]; +}; + +struct maple_metadata { + unsigned char end; + unsigned char gap; +}; + +struct maple_pnode; + +struct maple_range_64 { + struct maple_pnode *parent; + long unsigned int pivot[31]; + union { + void *slot[32]; + struct { + void *pad[31]; + struct maple_metadata meta; + }; + }; +}; + +struct maple_arange_64 { + struct maple_pnode *parent; + long unsigned int pivot[20]; + void *slot[21]; + long unsigned int gap[21]; + struct maple_metadata meta; +}; + +struct maple_topiary { + struct maple_pnode *parent; + struct maple_enode *next; +}; + +enum maple_type { + maple_dense = 0, + maple_leaf_64 = 1, + maple_range_64 = 2, + maple_arange_64 = 3, +}; + +struct maple_node { + union { + struct { + struct maple_pnode *parent; + void *slot[63]; + }; + struct { + void *pad; + struct callback_head rcu; + struct maple_enode *piv_parent; + unsigned char parent_slot; + enum maple_type type; + unsigned char slot_len; + unsigned int ma_flags; + }; + struct maple_range_64 mr64; + struct maple_arange_64 ma64; + struct maple_alloc alloc; + }; +}; + +struct ma_topiary { + struct maple_enode *head; + struct maple_enode *tail; + struct maple_tree *mtree; +}; + +struct ma_wr_state { + struct ma_state *mas; + struct maple_node *node; + long unsigned int r_min; + long unsigned int r_max; + enum maple_type type; + unsigned char offset_end; + long unsigned int *pivots; + long unsigned int end_piv; + void **slots; + void *entry; + void *content; +}; + +struct trace_event_raw_ma_op { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_read { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_write { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + long unsigned int piv; + void *val; + void *node; + char __data[0]; +}; + +struct trace_event_data_offsets_ma_op {}; + +struct trace_event_data_offsets_ma_read {}; + +struct trace_event_data_offsets_ma_write {}; + +typedef void (*btf_trace_ma_op)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_read)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_write)(void *, const char *, struct ma_state *, long unsigned int, void *); + +struct maple_big_node { + long unsigned int pivot[65]; + union { + struct maple_enode *slot[66]; + struct { + long unsigned int padding[43]; + long unsigned int gap[43]; + }; + }; + unsigned char b_end; + enum maple_type type; +}; + +struct maple_subtree_state { + struct ma_state *orig_l; + struct ma_state *orig_r; + struct ma_state *l; + struct ma_state *m; + struct ma_state *r; + struct ma_topiary *free; + struct ma_topiary *destroy; + struct maple_big_node *bn; +}; + +struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +}; + +struct nsset { + unsigned int flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +enum { + PROC_ROOT_INO = 1, + PROC_IPC_INIT_INO = 4026531839, + PROC_UTS_INIT_INO = 4026531838, + PROC_USER_INIT_INO = 4026531837, + PROC_PID_INIT_INO = 4026531836, + PROC_CGROUP_INIT_INO = 4026531835, + PROC_TIME_INIT_INO = 4026531834, +}; + +struct cpu_topology { + int thread_id; + int core_id; + int cluster_id; + int package_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t cluster_sibling; + cpumask_t llc_sibling; +}; + +struct cpu_efficiency { + const char *compatible; + long unsigned int efficiency; +}; + +enum { + SPECTRE_UNAFFECTED = 0, + SPECTRE_MITIGATED = 1, + SPECTRE_VULNERABLE = 2, +}; + +enum { + __SPECTRE_V2_METHOD_BPIALL = 0, + __SPECTRE_V2_METHOD_ICIALLU = 1, + __SPECTRE_V2_METHOD_SMC = 2, + __SPECTRE_V2_METHOD_HVC = 3, + __SPECTRE_V2_METHOD_LOOP8 = 4, +}; + +enum { + SPECTRE_V2_METHOD_BPIALL = 1, + SPECTRE_V2_METHOD_ICIALLU = 2, + SPECTRE_V2_METHOD_SMC = 4, + SPECTRE_V2_METHOD_HVC = 8, + SPECTRE_V2_METHOD_LOOP8 = 16, +}; + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, + MEMBLOCK_DRIVER_MANAGED = 8, + MEMBLOCK_RSRV_NOINIT = 16, +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; +}; + +struct memblock_type { + long unsigned int cnt; + long unsigned int max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +struct map_desc { + long unsigned int virtual; + long unsigned int pfn; + long unsigned int length; + unsigned int type; +}; + +enum { + MT_UNCACHED = 4, + MT_CACHECLEAN = 5, + MT_MINICLEAN = 6, + MT_LOW_VECTORS = 7, + MT_HIGH_VECTORS = 8, + MT_MEMORY_RWX = 9, + MT_MEMORY_RW = 10, + MT_MEMORY_RO = 11, + MT_ROM = 12, + MT_MEMORY_RWX_NONCACHED = 13, + MT_MEMORY_RW_DTCM = 14, + MT_MEMORY_RWX_ITCM = 15, + MT_MEMORY_RW_SO = 16, + MT_MEMORY_DMA_READY = 17, +}; + +struct mem_type { + pteval_t prot_pte; + pteval_t prot_pte_s2; + pmdval_t prot_l1; + pmdval_t prot_sect; + unsigned int domain; +}; + +struct static_vm { + struct vm_struct vm; + struct list_head list; +}; + +struct cachepolicy { + const char policy[16]; + unsigned int cr_mask; + pmdval_t pmd; + pteval_t pte; +}; + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX = 1, + UCLAMP_CNT = 2, +}; + +struct l2c_init_data { + const char *type; + unsigned int way_size_0; + unsigned int num_lock; + void (*of_parse)(const struct device_node *, u32 *, u32 *); + void (*enable)(void *, unsigned int); + void (*fixup)(void *, u32, struct outer_cache_fns *); + void (*save)(void *); + void (*configure)(void *); + void (*unlock)(void *, unsigned int); + struct outer_cache_fns outer_cache; +}; + +typedef struct { + unsigned int __softirq_pending; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +} irq_cpustat_t; + +struct softirq_action { + void (*action)(); +}; + +enum { + TASKLET_STATE_SCHED = 0, + TASKLET_STATE_RUN = 1, +}; + +struct kernel_stat { + long unsigned int irqs_sum; + unsigned int softirqs[10]; +}; + +struct trace_event_raw_irq_handler_entry { + struct trace_entry ent; + int irq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_irq_handler_exit { + struct trace_entry ent; + int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_softirq { + struct trace_entry ent; + unsigned int vec; + char __data[0]; +}; + +struct trace_event_raw_tasklet { + struct trace_entry ent; + void *tasklet; + void *func; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_handler_entry { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_irq_handler_exit {}; + +struct trace_event_data_offsets_softirq {}; + +struct trace_event_data_offsets_tasklet {}; + +typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); + +typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); + +typedef void (*btf_trace_softirq_entry)(void *, unsigned int); + +typedef void (*btf_trace_softirq_exit)(void *, unsigned int); + +typedef void (*btf_trace_softirq_raise)(void *, unsigned int); + +typedef void (*btf_trace_tasklet_entry)(void *, struct tasklet_struct *, void *); + +typedef void (*btf_trace_tasklet_exit)(void *, struct tasklet_struct *, void *); + +struct tasklet_head { + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +struct radix_tree_preload { + local_lock_t lock; + unsigned int nr; + struct xa_node *nodes; +}; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, + IRQ_NO_DEBUG = 2097152, +}; + +enum { + IRQTF_RUNTHREAD = 0, + IRQTF_WARNED = 1, + IRQTF_AFFINITY = 2, + IRQTF_FORCED_THREAD = 3, + IRQTF_READY = 4, +}; + +enum { + IRQS_AUTODETECT = 1, + IRQS_SPURIOUS_DISABLED = 2, + IRQS_POLL_INPROGRESS = 8, + IRQS_ONESHOT = 32, + IRQS_REPLAY = 64, + IRQS_WAITING = 128, + IRQS_PENDING = 512, + IRQS_SUSPENDED = 2048, + IRQS_TIMINGS = 4096, + IRQS_NMI = 8192, + IRQS_SYSFS = 16384, +}; + +enum { + _IRQ_DEFAULT_INIT_FLAGS = 3072, + _IRQ_PER_CPU = 512, + _IRQ_LEVEL = 256, + _IRQ_NOPROBE = 1024, + _IRQ_NOREQUEST = 2048, + _IRQ_NOTHREAD = 65536, + _IRQ_NOAUTOEN = 4096, + _IRQ_MOVE_PCNTXT = 16384, + _IRQ_NO_BALANCING = 8192, + _IRQ_NESTED_THREAD = 32768, + _IRQ_PER_CPU_DEVID = 131072, + _IRQ_IS_POLLED = 262144, + _IRQ_DISABLE_UNLAZY = 524288, + _IRQ_HIDDEN = 1048576, + _IRQ_NO_DEBUG = 2097152, + _IRQF_MODIFY_MASK = 2096911, +}; + +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; + long: 32; +}; + +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long int tm_year; + int tm_wday; + int tm_yday; +}; + +typedef __kernel_timer_t timer_t; + +typedef __u64 timeu64_t; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long int tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + long: 32; + long long int offset; + long long int freq; + long long int maxerror; + long long int esterror; + int status; + long: 32; + long long int constant; + long long int precision; + long long int tolerance; + struct __kernel_timex_timeval time; + long long int tick; + long long int ppsfreq; + long long int jitter; + int shift; + long: 32; + long long int stabil; + long long int jitcnt; + long long int calcnt; + long long int errcnt; + long long int stbcnt; + int tai; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct ucounts *ucounts; +}; + +struct class_interface { + struct list_head node; + const struct class *class; + int (*add_dev)(struct device *); + void (*remove_dev)(struct device *); +}; + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +struct rtc_wkalrm { + unsigned char enabled; + unsigned char pending; + struct rtc_time time; +}; + +struct rtc_param { + __u64 param; + union { + __u64 uvalue; + __s64 svalue; + __u64 ptr; + }; + __u32 index; + __u32 __pad; +}; + +enum alarmtimer_type { + ALARM_REALTIME = 0, + ALARM_BOOTTIME = 1, + ALARM_NUMTYPE = 2, + ALARM_REALTIME_FREEZER = 3, + ALARM_BOOTTIME_FREEZER = 4, +}; + +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + void (*function)(struct alarm *, ktime_t); + enum alarmtimer_type type; + int state; + void *data; +}; + +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + bool firing; + bool nanosleep; + struct task_struct *handling; +}; + +struct k_clock; + +struct k_itimer { + struct hlist_node list; + struct hlist_node ignored_list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_status; + bool it_sig_periodic; + s64 it_overrun; + s64 it_overrun_last; + unsigned int it_signal_seq; + unsigned int it_sigqueue_seq; + int it_sigev_notify; + enum pid_type it_pid_type; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue sigq; + rcuref_t rcuref; + long: 32; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +struct k_clock { + int (*clock_getres)(const clockid_t, struct timespec64 *); + int (*clock_set)(const clockid_t, const struct timespec64 *); + int (*clock_get_timespec)(const clockid_t, struct timespec64 *); + ktime_t (*clock_get_ktime)(const clockid_t); + int (*clock_adj)(const clockid_t, struct __kernel_timex *); + int (*timer_create)(struct k_itimer *); + int (*nsleep)(const clockid_t, int, const struct timespec64 *); + int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); + int (*timer_del)(struct k_itimer *); + void (*timer_get)(struct k_itimer *, struct itimerspec64 *); + void (*timer_rearm)(struct k_itimer *); + s64 (*timer_forward)(struct k_itimer *, ktime_t); + ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); + int (*timer_try_to_cancel)(struct k_itimer *); + void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); + void (*timer_wait_running)(struct k_itimer *); +}; + +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, long unsigned int); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*alarm_irq_enable)(struct device *, unsigned int); + int (*read_offset)(struct device *, long int *); + int (*set_offset)(struct device *, long int); + int (*param_get)(struct device *, struct rtc_param *); + int (*param_set)(struct device *, struct rtc_param *); +}; + +struct rtc_device; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(struct rtc_device *); + struct rtc_device *rtc; + int enabled; + long: 32; +}; + +struct rtc_device { + struct device dev; + struct module *owner; + int id; + const struct rtc_class_ops *ops; + struct mutex ops_lock; + struct cdev char_dev; + long unsigned int flags; + long unsigned int irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + int irq_freq; + int max_user_freq; + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; + int pie_enabled; + struct work_struct irqwork; + long unsigned int set_offset_nsec; + long unsigned int features[1]; + long: 32; + time64_t range_min; + timeu64_t range_max; + timeu64_t alarm_offset_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; + long: 32; +}; + +struct property_entry; + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + bool of_node_reused; + const char *name; + int id; + const struct resource *res; + unsigned int num_res; + const void *data; + size_t size_data; + long: 32; + u64 dma_mask; + const struct property_entry *properties; + long: 32; +}; + +enum dev_prop_type { + DEV_PROP_U8 = 0, + DEV_PROP_U16 = 1, + DEV_PROP_U32 = 2, + DEV_PROP_U64 = 3, + DEV_PROP_STRING = 4, + DEV_PROP_REF = 5, +}; + +struct property_entry { + const char *name; + size_t length; + bool is_inline; + enum dev_prop_type type; + union { + const void *pointer; + union { + u8 u8_data[8]; + u16 u16_data[4]; + u32 u32_data[2]; + u64 u64_data[1]; + const char *str[2]; + } value; + }; +}; + +struct platform_driver { + int (*probe)(struct platform_device *); + union { + void (*remove)(struct platform_device *); + void (*remove_new)(struct platform_device *); + }; + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; + bool driver_managed_dma; +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +struct trace_event_raw_alarmtimer_suspend { + struct trace_entry ent; + s64 expires; + unsigned char alarm_type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_alarm_class { + struct trace_entry ent; + void *alarm; + unsigned char alarm_type; + s64 expires; + s64 now; + char __data[0]; +}; + +struct trace_event_data_offsets_alarmtimer_suspend {}; + +struct trace_event_data_offsets_alarm_class {}; + +typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); + +typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); + +struct alarm_base { + spinlock_t lock; + struct timerqueue_head timerqueue; + ktime_t (*get_ktime)(); + void (*get_timespec)(struct timespec64 *); + clockid_t base_clockid; +}; + +typedef __kernel_ulong_t ino_t; + +struct kernel_clone_args { + u64 flags; + int *pidfd; + int *child_tid; + int *parent_tid; + const char *name; + int exit_signal; + u32 kthread: 1; + u32 io_thread: 1; + u32 user_worker: 1; + u32 no_files: 1; + long unsigned int stack; + long unsigned int stack_size; + long unsigned int tls; + pid_t *set_tid; + size_t set_tid_size; + int cgroup; + int idle; + int (*fn)(void *); + void *fn_arg; + struct cgroup *cgrp; + struct css_set *cset; + long: 32; +}; + +enum kernfs_node_type { + KERNFS_DIR = 1, + KERNFS_FILE = 2, + KERNFS_LINK = 4, +}; + +struct psi_window { + u64 size; + u64 start_time; + u64 start_value; + u64 prev_growth; +}; + +struct psi_trigger { + enum psi_states state; + long: 32; + u64 threshold; + struct list_head node; + struct psi_group *group; + wait_queue_head_t event_wait; + struct kernfs_open_file *of; + int event; + struct psi_window win; + u64 last_event_time; + bool pending_event; + enum psi_aggregators aggregator; +}; + +enum { + CGRP_NOTIFY_ON_RELEASE = 0, + CGRP_CPUSET_CLONE_CHILDREN = 1, + CGRP_FREEZE = 2, + CGRP_FROZEN = 3, + CGRP_KILL = 4, +}; + +enum { + CGRP_ROOT_NOPREFIX = 2, + CGRP_ROOT_XATTR = 4, + CGRP_ROOT_NS_DELEGATE = 8, + CGRP_ROOT_FAVOR_DYNMODS = 16, + CGRP_ROOT_CPUSET_V2_MODE = 65536, + CGRP_ROOT_MEMORY_LOCAL_EVENTS = 131072, + CGRP_ROOT_MEMORY_RECURSIVE_PROT = 262144, + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = 524288, + CGRP_ROOT_PIDS_LOCAL_EVENTS = 1048576, +}; + +struct cgroup_taskset { + struct list_head src_csets; + struct list_head dst_csets; + int nr_tasks; + int ssid; + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +struct cgroup_of_peak { + long unsigned int value; + struct list_head list; +}; + +enum { + CSS_TASK_ITER_PROCS = 1, + CSS_TASK_ITER_THREADED = 2, + CSS_TASK_ITER_SKIPPED = 65536, +}; + +typedef struct fd class_fd_raw_t; + +struct cgroup_fs_context { + struct kernfs_fs_context kfc; + struct cgroup_root *root; + struct cgroup_namespace *ns; + unsigned int flags; + bool cpuset_clone_children; + bool none; + bool all_ss; + u16 subsys_mask; + char *name; + char *release_agent; +}; + +struct cgroup_pidlist; + +struct cgroup_file_ctx { + struct cgroup_namespace *ns; + struct { + void *trigger; + } psi; + struct { + bool started; + struct css_task_iter iter; + } procs; + struct { + struct cgroup_pidlist *pidlist; + } procs1; + struct cgroup_of_peak peak; +}; + +struct cgrp_cset_link { + struct cgroup *cgrp; + struct css_set *cset; + struct list_head cset_link; + struct list_head cgrp_link; +}; + +struct cgroup_mgctx { + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + struct cgroup_taskset tset; + u16 ss_mask; +}; + +struct trace_event_raw_cgroup_root { + struct trace_entry ent; + int root; + u16 ss_mask; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_cgroup { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cgroup_migrate { + struct trace_entry ent; + int dst_root; + int dst_level; + u64 dst_id; + int pid; + u32 __data_loc_dst_path; + u32 __data_loc_comm; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cgroup_event { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + int val; + char __data[0]; +}; + +struct trace_event_raw_cgroup_rstat { + struct trace_entry ent; + int root; + int level; + u64 id; + int cpu; + bool contended; + char __data[0]; +}; + +struct trace_event_data_offsets_cgroup_root { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cgroup { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_migrate { + u32 dst_path; + const void *dst_path_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_cgroup_event { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_rstat {}; + +typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_rstat_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock_fastpath)(void *, struct cgroup *, int, bool); + +enum cgroup_opt_features { + OPT_FEATURE_PRESSURE = 0, + OPT_FEATURE_COUNT = 1, +}; + +enum cgroup2_param { + Opt_nsdelegate = 0, + Opt_favordynmods = 1, + Opt_memory_localevents = 2, + Opt_memory_recursiveprot = 3, + Opt_memory_hugetlb_accounting = 4, + Opt_pids_localevents = 5, + nr__cgroup2_params = 6, +}; + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + struct net *net; + struct list_head node; + long: 32; +}; + +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, + IORES_DESC_CXL = 9, +}; + +typedef void (*dr_release_t)(struct device *, void *); + +typedef int (*dr_match_t)(struct device *, void *, void *); + +enum { + REGION_INTERSECTS = 0, + REGION_DISJOINT = 1, + REGION_MIXED = 2, +}; + +struct va_format { + const char *fmt; + va_list *va; +}; + +enum pageblock_bits { + PB_migrate = 0, + PB_migrate_end = 2, + PB_migrate_skip = 3, + NR_PAGEBLOCK_BITS = 4, +}; + +enum migratetype { + MIGRATE_UNMOVABLE = 0, + MIGRATE_MOVABLE = 1, + MIGRATE_RECLAIMABLE = 2, + MIGRATE_PCPTYPES = 3, + MIGRATE_HIGHATOMIC = 3, + MIGRATE_TYPES = 4, +}; + +enum zone_watermarks { + WMARK_MIN = 0, + WMARK_LOW = 1, + WMARK_HIGH = 2, + WMARK_PROMO = 3, + NR_WMARK = 4, +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK = 0, + ZONE_RECLAIM_ACTIVE = 1, + ZONE_BELOW_HIGH = 2, +}; + +enum { + ZONELIST_FALLBACK = 0, + MAX_ZONELISTS = 1, +}; + +enum meminit_context { + MEMINIT_EARLY = 0, + MEMINIT_HOTPLUG = 1, +}; + +enum migrate_reason { + MR_COMPACTION = 0, + MR_MEMORY_FAILURE = 1, + MR_MEMORY_HOTPLUG = 2, + MR_SYSCALL = 3, + MR_MEMPOLICY_MBIND = 4, + MR_NUMA_MISPLACED = 5, + MR_CONTIG_RANGE = 6, + MR_LONGTERM_PIN = 7, + MR_DEMOTION = 8, + MR_DAMON = 9, + MR_TYPES = 10, +}; + +enum oom_constraint { + CONSTRAINT_NONE = 0, + CONSTRAINT_CPUSET = 1, + CONSTRAINT_MEMORY_POLICY = 2, + CONSTRAINT_MEMCG = 3, +}; + +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct mem_cgroup *memcg; + const gfp_t gfp_mask; + const int order; + long unsigned int totalpages; + struct task_struct *chosen; + long int chosen_points; + enum oom_constraint constraint; +}; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = 1, + CACHE_TYPE_DATA = 2, + CACHE_TYPE_SEPARATE = 3, + CACHE_TYPE_UNIFIED = 4, +}; + +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int per_cpu_data_slice_size; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; + bool early_ci_levels; +}; + +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type highest_zoneidx; + bool spread_dirty_pages; +}; + +typedef int fpi_t; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + long unsigned int min_coredump; +}; + +struct linux_binprm { + struct vm_area_struct *vma; + long unsigned int vma_pages; + long unsigned int argmin; + struct mm_struct *mm; + long unsigned int p; + unsigned int have_execfd: 1; + unsigned int execfd_creds: 1; + unsigned int secureexec: 1; + unsigned int point_of_no_return: 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + long unsigned int loader; + long unsigned int exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +struct fiemap { + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; +}; + +struct fsuuid { + __u32 fsu_len; + __u32 fsu_flags; + __u8 fsu_uuid[0]; +}; + +struct move_extent { + __u32 reserved; + __u32 donor_fd; + __u64 orig_start; + __u64 donor_start; + __u64 len; + __u64 moved_len; +}; + +struct ext4_new_group_input { + __u32 group; + long: 32; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 unused; +}; + +struct ext4_new_group_data { + __u32 group; + long: 32; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 mdata_blocks; + __u32 free_clusters_count; + long: 32; +}; + +enum { + EXT4_FC_REASON_XATTR = 0, + EXT4_FC_REASON_CROSS_RENAME = 1, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, + EXT4_FC_REASON_NOMEM = 3, + EXT4_FC_REASON_SWAP_BOOT = 4, + EXT4_FC_REASON_RESIZE = 5, + EXT4_FC_REASON_RENAME_DIR = 6, + EXT4_FC_REASON_FALLOC_RANGE = 7, + EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, + EXT4_FC_REASON_ENCRYPTED_FILENAME = 9, + EXT4_FC_REASON_MAX = 10, +}; + +enum ext4_journal_trigger_type { + EXT4_JTR_ORPHAN_FILE = 0, + EXT4_JTR_NONE = 1, +}; + +enum { + EXT4_STATE_NEW = 0, + EXT4_STATE_XATTR = 1, + EXT4_STATE_NO_EXPAND = 2, + EXT4_STATE_DA_ALLOC_CLOSE = 3, + EXT4_STATE_EXT_MIGRATE = 4, + EXT4_STATE_NEWENTRY = 5, + EXT4_STATE_MAY_INLINE_DATA = 6, + EXT4_STATE_EXT_PRECACHED = 7, + EXT4_STATE_LUSTRE_EA_INODE = 8, + EXT4_STATE_VERITY_IN_PROGRESS = 9, + EXT4_STATE_FC_COMMITTING = 10, + EXT4_STATE_ORPHAN_FILE = 11, +}; + +struct ext4_iloc { + struct buffer_head *bh; + long unsigned int offset; + ext4_group_t block_group; +}; + +typedef enum { + EXT4_IGET_NORMAL = 0, + EXT4_IGET_SPECIAL = 1, + EXT4_IGET_HANDLE = 2, + EXT4_IGET_BAD = 4, + EXT4_IGET_EA_INODE = 8, +} ext4_iget_flags; + +struct fsmap { + __u32 fmr_device; + __u32 fmr_flags; + __u64 fmr_physical; + __u64 fmr_owner; + __u64 fmr_offset; + __u64 fmr_length; + __u64 fmr_reserved[3]; +}; + +struct fsmap_head { + __u32 fmh_iflags; + __u32 fmh_oflags; + __u32 fmh_count; + __u32 fmh_entries; + __u64 fmh_reserved[6]; + struct fsmap fmh_keys[2]; + struct fsmap fmh_recs[0]; +}; + +struct ext4_fsmap { + struct list_head fmr_list; + dev_t fmr_device; + uint32_t fmr_flags; + uint64_t fmr_physical; + uint64_t fmr_owner; + uint64_t fmr_length; +}; + +struct ext4_fsmap_head { + uint32_t fmh_iflags; + uint32_t fmh_oflags; + unsigned int fmh_count; + unsigned int fmh_entries; + struct ext4_fsmap fmh_keys[2]; +}; + +typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); + +typedef void ext4_update_sb_callback(struct ext4_super_block *, const void *); + +struct getfsmap_info { + struct super_block *gi_sb; + struct fsmap_head *gi_data; + unsigned int gi_idx; + __u32 gi_last_flags; +}; + +enum fid_type { + FILEID_ROOT = 0, + FILEID_INO32_GEN = 1, + FILEID_INO32_GEN_PARENT = 2, + FILEID_BTRFS_WITHOUT_PARENT = 77, + FILEID_BTRFS_WITH_PARENT = 78, + FILEID_BTRFS_WITH_PARENT_ROOT = 79, + FILEID_UDF_WITHOUT_PARENT = 81, + FILEID_UDF_WITH_PARENT = 82, + FILEID_NILFS_WITHOUT_PARENT = 97, + FILEID_NILFS_WITH_PARENT = 98, + FILEID_FAT_WITHOUT_PARENT = 113, + FILEID_FAT_WITH_PARENT = 114, + FILEID_INO64_GEN = 129, + FILEID_INO64_GEN_PARENT = 130, + FILEID_LUSTRE = 151, + FILEID_BCACHEFS_WITHOUT_PARENT = 177, + FILEID_BCACHEFS_WITH_PARENT = 178, + FILEID_KERNFS = 254, + FILEID_INVALID = 255, +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u64 ino; + u32 gen; + } i64; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + struct { + struct {} __empty_raw; + __u32 raw[0]; + }; + }; +}; + +struct msdos_dir_entry { + __u8 name[11]; + __u8 attr; + __u8 lcase; + __u8 ctime_cs; + __le16 ctime; + __le16 cdate; + __le16 adate; + __le16 starthi; + __le16 time; + __le16 date; + __le16 start; + __le32 size; +}; + +struct fat_mount_options { + kuid_t fs_uid; + kgid_t fs_gid; + short unsigned int fs_fmask; + short unsigned int fs_dmask; + short unsigned int codepage; + int time_offset; + char *iocharset; + short unsigned int shortname; + unsigned char name_check; + unsigned char errors; + unsigned char nfs; + short unsigned int allow_utime; + unsigned int quiet: 1; + unsigned int showexec: 1; + unsigned int sys_immutable: 1; + unsigned int dotsOK: 1; + unsigned int isvfat: 1; + unsigned int utf8: 1; + unsigned int unicode_xlate: 1; + unsigned int numtail: 1; + unsigned int flush: 1; + unsigned int nocase: 1; + unsigned int usefree: 1; + unsigned int tz_set: 1; + unsigned int rodir: 1; + unsigned int discard: 1; + unsigned int dos1xfloppy: 1; + unsigned int debug: 1; +}; + +struct fatent_operations; + +struct msdos_sb_info { + short unsigned int sec_per_clus; + short unsigned int cluster_bits; + unsigned int cluster_size; + unsigned char fats; + unsigned char fat_bits; + short unsigned int fat_start; + long unsigned int fat_length; + long unsigned int dir_start; + short unsigned int dir_entries; + long unsigned int data_start; + long unsigned int max_cluster; + long unsigned int root_cluster; + long unsigned int fsinfo_sector; + struct mutex fat_lock; + struct mutex nfs_build_inode_lock; + struct mutex s_lock; + unsigned int prev_free; + unsigned int free_clusters; + unsigned int free_clus_valid; + struct fat_mount_options options; + struct nls_table *nls_disk; + struct nls_table *nls_io; + const void *dir_ops; + int dir_per_block; + int dir_per_block_bits; + unsigned int vol_id; + int fatent_shift; + const struct fatent_operations *fatent_ops; + struct inode *fat_inode; + struct inode *fsinfo_inode; + struct ratelimit_state ratelimit; + spinlock_t inode_hash_lock; + struct hlist_head inode_hashtable[256]; + spinlock_t dir_hash_lock; + struct hlist_head dir_hashtable[256]; + unsigned int dirty; + struct callback_head rcu; +}; + +struct fat_entry; + +struct fatent_operations { + void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); + void (*ent_set_ptr)(struct fat_entry *, int); + int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); + int (*ent_get)(struct fat_entry *); + void (*ent_put)(struct fat_entry *, int); + int (*ent_next)(struct fat_entry *); +}; + +struct msdos_inode_info { + spinlock_t cache_lru_lock; + struct list_head cache_lru; + int nr_caches; + unsigned int cache_valid_id; + long: 32; + loff_t mmu_private; + int i_start; + int i_logstart; + int i_attrs; + long: 32; + loff_t i_pos; + struct hlist_node i_fat_hash; + struct hlist_node i_dir_hash; + struct rw_semaphore truncate_lock; + struct timespec64 i_crtime; + struct inode vfs_inode; +}; + +struct fat_slot_info { + loff_t i_pos; + loff_t slot_off; + int nr_slots; + struct msdos_dir_entry *de; + struct buffer_head *bh; + long: 32; +}; + +typedef long long unsigned int llu; + +struct fat_fid { + u32 i_gen; + u32 i_pos_low; + u16 i_pos_hi; + u16 parent_i_pos_hi; + u32 parent_i_pos_low; + u32 parent_i_gen; +}; + +struct fuse_open_out { + uint64_t fh; + uint32_t open_flags; + int32_t backing_id; +}; + +struct fuse_release_in { + uint64_t fh; + uint32_t flags; + uint32_t release_flags; + uint64_t lock_owner; +}; + +struct fuse_backing_map { + int32_t fd; + uint32_t flags; + uint64_t padding; +}; + +enum { + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, + IOPRIO_CLASS_INVALID = 7, +}; + +enum { + IOPRIO_HINT_NONE = 0, + IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, + IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, + IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, + IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, + IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, + IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, + IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, +}; + +struct fuse_backing { + struct file *file; + struct cred *cred; + refcount_t count; + struct callback_head rcu; +}; + +union fuse_file_args; + +struct fuse_file { + struct fuse_mount *fm; + union fuse_file_args *args; + u64 kh; + u64 fh; + u64 nodeid; + refcount_t count; + u32 open_flags; + struct list_head write_entry; + struct { + loff_t pos; + loff_t cache_off; + u64 version; + } readdir; + struct rb_node polled_node; + wait_queue_head_t poll_wait; + enum { + IOM_NONE = 0, + IOM_CACHED = 1, + IOM_UNCACHED = 2, + } iomode; + struct file *passthrough; + const struct cred *cred; + bool flock: 1; +}; + +struct fuse_release_args { + struct fuse_args args; + struct fuse_release_in inarg; + struct inode *inode; + long: 32; +}; + +union fuse_file_args { + struct fuse_open_out open_outarg; + struct fuse_release_args release_args; +}; + +struct backing_file_ctx { + const struct cred *cred; + void (*accessed)(struct file *); + void (*end_write)(struct kiocb *, ssize_t); +}; + +struct btrfs_inode_ref { + __le64 index; + __le16 name_len; +} __attribute__((packed)); + +struct btrfs_root_ref { + __le64 dirid; + __le64 sequence; + __le16 name_len; +} __attribute__((packed)); + +struct btrfs_fid { + u64 objectid; + u64 root_objectid; + u32 gen; + u64 parent_objectid; + u32 parent_gen; + u64 parent_root_objectid; +}; + +struct btrfs_free_space_info { + __le32 extent_count; + __le32 flags; +}; + +enum { + READA_NONE = 0, + READA_BACK = 1, + READA_FORWARD = 2, + READA_FORWARD_ALWAYS = 3, +}; + +struct btrfs_lru_cache_entry { + struct list_head lru_list; + u64 key; + u64 gen; + struct list_head list; +}; + +struct btrfs_lru_cache { + struct list_head lru_list; + struct maple_tree entries; + unsigned int size; + unsigned int max_size; +}; + +struct btrfs_backref_shared_cache_entry { + u64 bytenr; + u64 gen; + bool is_shared; + long: 32; +}; + +struct btrfs_backref_share_check_ctx { + struct ulist refs; + long: 32; + u64 curr_leaf_bytenr; + u64 prev_leaf_bytenr; + struct btrfs_backref_shared_cache_entry path_cache_entries[8]; + bool use_path_cache; + long: 32; + struct { + u64 bytenr; + bool is_shared; + long: 32; + } prev_extents_cache[8]; + int prev_extents_cache_slot; + long: 32; +}; + +enum btrfs_ilock_type { + __BTRFS_ILOCK_SHARED_BIT = 0, + BTRFS_ILOCK_SHARED = 1, + __BTRFS_ILOCK_SHARED_SEQ = 0, + __BTRFS_ILOCK_TRY_BIT = 1, + BTRFS_ILOCK_TRY = 2, + __BTRFS_ILOCK_TRY_SEQ = 1, + __BTRFS_ILOCK_MMAP_BIT = 2, + BTRFS_ILOCK_MMAP = 4, + __BTRFS_ILOCK_MMAP_SEQ = 2, +}; + +struct btrfs_fiemap_entry { + u64 offset; + u64 phys; + u64 len; + u32 flags; + long: 32; +}; + +struct fiemap_cache { + struct btrfs_fiemap_entry *entries; + int entries_size; + int entries_pos; + long: 32; + u64 next_search_offset; + unsigned int extents_mapped; + long: 32; + u64 offset; + u64 phys; + u64 len; + u32 flags; + bool cached; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +struct crypto_sig { + struct crypto_tfm base; +}; + +struct sig_alg { + int (*sign)(struct crypto_sig *, const void *, unsigned int, void *, unsigned int); + int (*verify)(struct crypto_sig *, const void *, unsigned int, const void *, unsigned int); + int (*set_pub_key)(struct crypto_sig *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_sig *, const void *, unsigned int); + unsigned int (*key_size)(struct crypto_sig *); + unsigned int (*digest_size)(struct crypto_sig *); + unsigned int (*max_size)(struct crypto_sig *); + int (*init)(struct crypto_sig *); + void (*exit)(struct crypto_sig *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct sig_instance { + void (*free)(struct sig_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[64]; + struct crypto_instance base; + }; + struct sig_alg alg; + }; +}; + +struct crypto_sig_spawn { + struct crypto_spawn base; +}; + +struct crypto_rng; + +struct rng_alg { + int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); + int (*seed)(struct crypto_rng *, const u8 *, unsigned int); + void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); + unsigned int seedsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +struct parsed_partitions { + struct gendisk *disk; + char name[32]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + long: 32; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +typedef struct { + struct folio *v; +} Sector; + +struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; +}; + +typedef struct _gpt_header gpt_header; + +struct _gpt_entry_attributes { + u64 required_to_function: 1; + u64 reserved: 47; + u64 type_guid_specific: 16; +}; + +typedef struct _gpt_entry_attributes gpt_entry_attributes; + +struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + __le16 partition_name[36]; +}; + +typedef struct _gpt_entry gpt_entry; + +struct _gpt_mbr_record { + u8 boot_indicator; + u8 start_head; + u8 start_sector; + u8 start_track; + u8 os_type; + u8 end_head; + u8 end_sector; + u8 end_track; + __le32 starting_lba; + __le32 size_in_lba; +}; + +typedef struct _gpt_mbr_record gpt_mbr_record; + +struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __attribute__((packed)); + +typedef struct _legacy_mbr legacy_mbr; + +struct bd_holder_disk { + struct list_head list; + struct kobject *holder_dir; + int refcnt; +}; + +enum io_uring_op { + IORING_OP_NOP = 0, + IORING_OP_READV = 1, + IORING_OP_WRITEV = 2, + IORING_OP_FSYNC = 3, + IORING_OP_READ_FIXED = 4, + IORING_OP_WRITE_FIXED = 5, + IORING_OP_POLL_ADD = 6, + IORING_OP_POLL_REMOVE = 7, + IORING_OP_SYNC_FILE_RANGE = 8, + IORING_OP_SENDMSG = 9, + IORING_OP_RECVMSG = 10, + IORING_OP_TIMEOUT = 11, + IORING_OP_TIMEOUT_REMOVE = 12, + IORING_OP_ACCEPT = 13, + IORING_OP_ASYNC_CANCEL = 14, + IORING_OP_LINK_TIMEOUT = 15, + IORING_OP_CONNECT = 16, + IORING_OP_FALLOCATE = 17, + IORING_OP_OPENAT = 18, + IORING_OP_CLOSE = 19, + IORING_OP_FILES_UPDATE = 20, + IORING_OP_STATX = 21, + IORING_OP_READ = 22, + IORING_OP_WRITE = 23, + IORING_OP_FADVISE = 24, + IORING_OP_MADVISE = 25, + IORING_OP_SEND = 26, + IORING_OP_RECV = 27, + IORING_OP_OPENAT2 = 28, + IORING_OP_EPOLL_CTL = 29, + IORING_OP_SPLICE = 30, + IORING_OP_PROVIDE_BUFFERS = 31, + IORING_OP_REMOVE_BUFFERS = 32, + IORING_OP_TEE = 33, + IORING_OP_SHUTDOWN = 34, + IORING_OP_RENAMEAT = 35, + IORING_OP_UNLINKAT = 36, + IORING_OP_MKDIRAT = 37, + IORING_OP_SYMLINKAT = 38, + IORING_OP_LINKAT = 39, + IORING_OP_MSG_RING = 40, + IORING_OP_FSETXATTR = 41, + IORING_OP_SETXATTR = 42, + IORING_OP_FGETXATTR = 43, + IORING_OP_GETXATTR = 44, + IORING_OP_SOCKET = 45, + IORING_OP_URING_CMD = 46, + IORING_OP_SEND_ZC = 47, + IORING_OP_SENDMSG_ZC = 48, + IORING_OP_READ_MULTISHOT = 49, + IORING_OP_WAITID = 50, + IORING_OP_FUTEX_WAIT = 51, + IORING_OP_FUTEX_WAKE = 52, + IORING_OP_FUTEX_WAITV = 53, + IORING_OP_FIXED_FD_INSTALL = 54, + IORING_OP_FTRUNCATE = 55, + IORING_OP_BIND = 56, + IORING_OP_LISTEN = 57, + IORING_OP_LAST = 58, +}; + +struct io_cancel_data { + struct io_ring_ctx *ctx; + long: 32; + union { + u64 data; + struct file *file; + }; + u8 opcode; + u32 flags; + int seq; + long: 32; +}; + +struct io_timeout_data { + struct io_kiocb *req; + long: 32; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; + u32 flags; +}; + +struct io_timeout { + struct file *file; + u32 off; + u32 target_seq; + u32 repeats; + struct list_head list; + struct io_kiocb *head; + struct io_kiocb *prev; +}; + +struct io_timeout_rem { + struct file *file; + long: 32; + u64 addr; + struct timespec64 ts; + u32 flags; + bool ltimeout; +}; + +typedef mpi_limb_t *mpi_ptr_t; + +typedef int mpi_size_t; + +typedef unsigned char Byte; + +typedef long unsigned int uLong; + +struct internal_state; + +struct z_stream_s { + const Byte *next_in; + uLong avail_in; + uLong total_in; + Byte *next_out; + uLong avail_out; + uLong total_out; + char *msg; + struct internal_state *state; + void *workspace; + int data_type; + uLong adler; + uLong reserved; +}; + +struct internal_state { + int dummy; +}; + +typedef struct z_stream_s z_stream; + +typedef z_stream *z_streamp; + +typedef int __kernel_ptrdiff_t; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +typedef s16 int16_t; + +typedef uint8_t BYTE; + +typedef uint8_t U8; + +typedef uint16_t U16; + +typedef int16_t S16; + +typedef uint32_t U32; + +typedef uint64_t U64; + +typedef enum { + ZSTD_fast = 1, + ZSTD_dfast = 2, + ZSTD_greedy = 3, + ZSTD_lazy = 4, + ZSTD_lazy2 = 5, + ZSTD_btlazy2 = 6, + ZSTD_btopt = 7, + ZSTD_btultra = 8, + ZSTD_btultra2 = 9, +} ZSTD_strategy; + +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int minMatch; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef enum { + ZSTD_ps_auto = 0, + ZSTD_ps_enable = 1, + ZSTD_ps_disable = 2, +} ZSTD_paramSwitch_e; + +typedef void * (*ZSTD_allocFunction)(void *, size_t); + +typedef void (*ZSTD_freeFunction)(void *, void *); + +typedef struct { + ZSTD_allocFunction customAlloc; + ZSTD_freeFunction customFree; + void *opaque; +} ZSTD_customMem; + +typedef unsigned int FSE_CTable; + +typedef enum { + FSE_repeat_none = 0, + FSE_repeat_check = 1, + FSE_repeat_valid = 2, +} FSE_repeat; + +typedef size_t HUF_CElt; + +typedef enum { + HUF_repeat_none = 0, + HUF_repeat_check = 1, + HUF_repeat_valid = 2, +} HUF_repeat; + +typedef enum { + ZSTD_no_overlap = 0, + ZSTD_overlap_src_before_dst = 1, +} ZSTD_overlap_e; + +struct seqDef_s { + U32 offBase; + U16 litLength; + U16 mlBase; +}; + +typedef struct seqDef_s seqDef; + +typedef enum { + ZSTD_llt_none = 0, + ZSTD_llt_literalLength = 1, + ZSTD_llt_matchLength = 2, +} ZSTD_longLengthType_e; + +typedef struct { + seqDef *sequencesStart; + seqDef *sequences; + BYTE *litStart; + BYTE *lit; + BYTE *llCode; + BYTE *mlCode; + BYTE *ofCode; + size_t maxNbSeq; + size_t maxNbLit; + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; +} seqStore_t; + +typedef struct { + HUF_CElt CTable[257]; + HUF_repeat repeatMode; +} ZSTD_hufCTables_t; + +typedef struct { + FSE_CTable offcodeCTable[193]; + FSE_CTable matchlengthCTable[363]; + FSE_CTable litlengthCTable[329]; + FSE_repeat offcode_repeatMode; + FSE_repeat matchlength_repeatMode; + FSE_repeat litlength_repeatMode; +} ZSTD_fseCTables_t; + +typedef struct { + ZSTD_hufCTables_t huf; + ZSTD_fseCTables_t fse; +} ZSTD_entropyCTables_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + U32 offset; + U32 litLength; + U32 matchLength; +} rawSeq; + +typedef struct { + rawSeq *seq; + size_t pos; + size_t posInSequence; + size_t size; + size_t capacity; +} rawSeqStore_t; + +typedef struct { + int price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[3]; +} ZSTD_optimal_t; + +typedef enum { + zop_dynamic = 0, + zop_predef = 1, +} ZSTD_OptPrice_e; + +typedef struct { + unsigned int *litFreq; + unsigned int *litLengthFreq; + unsigned int *matchLengthFreq; + unsigned int *offCodeFreq; + ZSTD_match_t *matchTable; + ZSTD_optimal_t *priceTable; + U32 litSum; + U32 litLengthSum; + U32 matchLengthSum; + U32 offCodeSum; + U32 litSumBasePrice; + U32 litLengthSumBasePrice; + U32 matchLengthSumBasePrice; + U32 offCodeSumBasePrice; + ZSTD_OptPrice_e priceType; + const ZSTD_entropyCTables_t *symbolCosts; + ZSTD_paramSwitch_e literalCompressionMode; +} optState_t; + +typedef struct { + const BYTE *nextSrc; + const BYTE *base; + const BYTE *dictBase; + U32 dictLimit; + U32 lowLimit; + U32 nbOverflowCorrections; +} ZSTD_window_t; + +struct ZSTD_matchState_t; + +typedef struct ZSTD_matchState_t ZSTD_matchState_t; + +struct ZSTD_matchState_t { + ZSTD_window_t window; + U32 loadedDictEnd; + U32 nextToUpdate; + U32 hashLog3; + U32 rowHashLog; + U16 *tagTable; + U32 hashCache[8]; + U32 *hashTable; + U32 *hashTable3; + U32 *chainTable; + U32 forceNonContiguous; + int dedicatedDictSearch; + optState_t opt; + const ZSTD_matchState_t *dictMatchState; + ZSTD_compressionParameters cParams; + const rawSeqStore_t *ldmSeqStore; +}; + +typedef enum { + ZSTD_dtlm_fast = 0, + ZSTD_dtlm_full = 1, +} ZSTD_dictTableLoadMethod_e; + +enum pci_dev_flags { + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, + PCI_DEV_FLAGS_NO_D3 = 2, + PCI_DEV_FLAGS_ASSIGNED = 4, + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, + PCI_DEV_FLAGS_NO_BUS_RESET = 64, + PCI_DEV_FLAGS_NO_PM_RESET = 128, + PCI_DEV_FLAGS_VPD_REF_F0 = 256, + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, + PCI_DEV_FLAGS_NO_FLR_RESET = 1024, + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, + PCI_DEV_FLAGS_HAS_MSI_MASKING = 4096, +}; + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; + +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int); + void *priv; +}; + +struct virtio_pci_common_cfg { + __le32 device_feature_select; + __le32 device_feature; + __le32 guest_feature_select; + __le32 guest_feature; + __le16 msix_config; + __le16 num_queues; + __u8 device_status; + __u8 config_generation; + __le16 queue_select; + __le16 queue_size; + __le16 queue_msix_vector; + __le16 queue_enable; + __le16 queue_notify_off; + __le32 queue_desc_lo; + __le32 queue_desc_hi; + __le32 queue_avail_lo; + __le32 queue_avail_hi; + __le32 queue_used_lo; + __le32 queue_used_hi; +}; + +struct virtio_pci_legacy_device { + struct pci_dev *pci_dev; + u8 *isr; + void *ioaddr; + struct virtio_device_id id; +}; + +struct virtio_device; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + unsigned int num_max; + bool reset; + void *priv; +}; + +struct vringh_config_ops; + +struct virtio_config_ops; + +struct virtio_device { + int index; + bool failed; + bool config_core_enabled; + bool config_driver_disabled; + bool config_change_pending; + spinlock_t config_lock; + spinlock_t vqs_list_lock; + struct device dev; + struct virtio_device_id id; + const struct virtio_config_ops *config; + const struct vringh_config_ops *vringh_config; + struct list_head vqs; + u64 features; + void *priv; + long: 32; +}; + +struct virtqueue_info; + +struct virtio_shm_region; + +struct virtio_config_ops { + void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); + void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); + u32 (*generation)(struct virtio_device *); + u8 (*get_status)(struct virtio_device *); + void (*set_status)(struct virtio_device *, u8); + void (*reset)(struct virtio_device *); + int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, struct virtqueue_info *, struct irq_affinity *); + void (*del_vqs)(struct virtio_device *); + void (*synchronize_cbs)(struct virtio_device *); + u64 (*get_features)(struct virtio_device *); + int (*finalize_features)(struct virtio_device *); + const char * (*bus_name)(struct virtio_device *); + int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); + const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); + bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); + int (*disable_vq_and_reset)(struct virtqueue *); + int (*enable_vq_after_reset)(struct virtqueue *); +}; + +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +typedef void vq_callback_t(struct virtqueue *); + +struct virtqueue_info { + const char *name; + vq_callback_t *callback; + bool ctx; +}; + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + struct virtio_pci_common_cfg *common; + void *device; + void *notify_base; + resource_size_t notify_pa; + u8 *isr; + size_t notify_len; + size_t device_len; + size_t common_len; + int notify_map_cap; + u32 notify_offset_multiplier; + int modern_bars; + struct virtio_device_id id; + int (*device_id_check)(struct pci_dev *); + long: 32; + u64 dma_mask; +}; + +struct virtio_pci_vq_info { + struct virtqueue *vq; + struct list_head node; + unsigned int msix_vector; +}; + +struct virtio_pci_admin_vq { + struct virtio_pci_vq_info *info; + spinlock_t lock; + u64 supported_cmds; + u64 supported_caps; + u8 max_dev_parts_objects; + struct ida dev_parts_ida; + char name[10]; + u16 vq_index; + long: 32; +}; + +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + long: 32; + union { + struct virtio_pci_legacy_device ldev; + struct virtio_pci_modern_device mdev; + }; + bool is_legacy; + u8 *isr; + spinlock_t lock; + struct list_head virtqueues; + struct list_head slow_virtqueues; + struct virtio_pci_vq_info **vqs; + struct virtio_pci_admin_vq admin_vq; + int msix_enabled; + int intx_enabled; + cpumask_var_t *msix_affinity_masks; + char (*msix_names)[256]; + unsigned int msix_vectors; + unsigned int msix_used_vectors; + bool per_vq_vectors; + struct virtqueue * (*setup_vq)(struct virtio_pci_device *, struct virtio_pci_vq_info *, unsigned int, void (*)(struct virtqueue *), const char *, bool, u16); + void (*del_vq)(struct virtio_pci_vq_info *); + u16 (*config_vector)(struct virtio_pci_device *, u16); + int (*avq_index)(struct virtio_device *, u16 *, u16 *); + long: 32; +}; + +struct mctrl_gpios; + +struct uart_8250_dma; + +struct uart_8250_ops; + +struct uart_8250_em485; + +struct uart_8250_port { + struct uart_port port; + struct timer_list timer; + struct list_head list; + u32 capabilities; + u16 bugs; + unsigned int tx_loadsz; + unsigned char acr; + unsigned char fcr; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char cur_iotype; + unsigned int rpm_tx_active; + unsigned char canary; + unsigned char probe; + struct mctrl_gpios *gpios; + u16 lsr_saved_flags; + u16 lsr_save_mask; + unsigned char msr_saved_flags; + struct uart_8250_dma *dma; + const struct uart_8250_ops *ops; + u32 (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, u32); + struct uart_8250_em485 *em485; + void (*rs485_start_tx)(struct uart_8250_port *); + void (*rs485_stop_tx)(struct uart_8250_port *); + struct delayed_work overrun_backoff; + u32 overrun_backoff_time_ms; +}; + +struct uart_8250_ops { + int (*setup_irq)(struct uart_8250_port *); + void (*release_irq)(struct uart_8250_port *); + void (*setup_timer)(struct uart_8250_port *); +}; + +struct uart_8250_em485 { + struct hrtimer start_tx_timer; + struct hrtimer stop_tx_timer; + struct hrtimer *active_timer; + struct uart_8250_port *port; + unsigned int tx_stopped: 1; + long: 32; +}; + +struct uart_8250_dma { + int (*tx_dma)(struct uart_8250_port *); + int (*rx_dma)(struct uart_8250_port *); + void (*prepare_tx_dma)(struct uart_8250_port *); + void (*prepare_rx_dma)(struct uart_8250_port *); + dma_filter_fn fn; + void *rx_param; + void *tx_param; + struct dma_slave_config rxconf; + struct dma_slave_config txconf; + struct dma_chan *rxchan; + struct dma_chan *txchan; + phys_addr_t rx_dma_addr; + phys_addr_t tx_dma_addr; + dma_addr_t rx_addr; + dma_addr_t tx_addr; + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; + void *rx_buf; + size_t rx_size; + size_t tx_size; + unsigned char tx_running; + unsigned char tx_err; + unsigned char rx_running; +}; + +enum dma_transaction_type { + DMA_MEMCPY = 0, + DMA_XOR = 1, + DMA_PQ = 2, + DMA_XOR_VAL = 3, + DMA_PQ_VAL = 4, + DMA_MEMSET = 5, + DMA_MEMSET_SG = 6, + DMA_INTERRUPT = 7, + DMA_PRIVATE = 8, + DMA_ASYNC_TX = 9, + DMA_SLAVE = 10, + DMA_CYCLIC = 11, + DMA_INTERLEAVE = 12, + DMA_COMPLETION_NO_ORDER = 13, + DMA_REPEAT = 14, + DMA_LOAD_EOT = 15, + DMA_TX_TYPE_END = 16, +}; + +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +struct serial8250_config { + const char *name; + short unsigned int fifo_size; + short unsigned int tx_loadsz; + unsigned char fcr; + unsigned char rxtrig_bytes[4]; + unsigned int flags; +}; + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + const struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead: 1; +}; + +enum scsi_devinfo_key { + SCSI_DEVINFO_GLOBAL = 0, + SCSI_DEVINFO_SPI = 1, +}; + +struct scsi_dev_info_list { + struct list_head dev_info_list; + char vendor[8]; + char model[16]; + blist_flags_t flags; + unsigned int compatible; + long: 32; +}; + +struct scsi_dev_info_list_table { + struct list_head node; + struct list_head scsi_dev_info_list; + const char *name; + int key; +}; + +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE = 1, + DL_STATE_CONSUMER_PROBE = 2, + DL_STATE_ACTIVE = 3, + DL_STATE_SUPPLIER_UNBIND = 4, +}; + +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + struct work_struct rm_work; + bool supplier_preactivated; + long: 32; +}; + +struct mii_bus; + +struct mdio_device { + struct device dev; + struct mii_bus *bus; + char modalias[32]; + int (*bus_match)(struct device *, const struct device_driver *); + void (*device_free)(struct mdio_device *); + void (*device_remove)(struct mdio_device *); + int addr; + int flags; + int reset_state; + struct gpio_desc *reset_gpio; + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; + long: 32; +}; + +struct phy_c45_device_ids { + u32 devices_in_package; + u32 mmds_present; + u32 device_ids[32]; +}; + +enum phy_state { + PHY_DOWN = 0, + PHY_READY = 1, + PHY_HALTED = 2, + PHY_ERROR = 3, + PHY_UP = 4, + PHY_RUNNING = 5, + PHY_NOLINK = 6, + PHY_CABLETEST = 7, +}; + +typedef enum { + PHY_INTERFACE_MODE_NA = 0, + PHY_INTERFACE_MODE_INTERNAL = 1, + PHY_INTERFACE_MODE_MII = 2, + PHY_INTERFACE_MODE_GMII = 3, + PHY_INTERFACE_MODE_SGMII = 4, + PHY_INTERFACE_MODE_TBI = 5, + PHY_INTERFACE_MODE_REVMII = 6, + PHY_INTERFACE_MODE_RMII = 7, + PHY_INTERFACE_MODE_REVRMII = 8, + PHY_INTERFACE_MODE_RGMII = 9, + PHY_INTERFACE_MODE_RGMII_ID = 10, + PHY_INTERFACE_MODE_RGMII_RXID = 11, + PHY_INTERFACE_MODE_RGMII_TXID = 12, + PHY_INTERFACE_MODE_RTBI = 13, + PHY_INTERFACE_MODE_SMII = 14, + PHY_INTERFACE_MODE_XGMII = 15, + PHY_INTERFACE_MODE_XLGMII = 16, + PHY_INTERFACE_MODE_MOCA = 17, + PHY_INTERFACE_MODE_PSGMII = 18, + PHY_INTERFACE_MODE_QSGMII = 19, + PHY_INTERFACE_MODE_TRGMII = 20, + PHY_INTERFACE_MODE_100BASEX = 21, + PHY_INTERFACE_MODE_1000BASEX = 22, + PHY_INTERFACE_MODE_2500BASEX = 23, + PHY_INTERFACE_MODE_5GBASER = 24, + PHY_INTERFACE_MODE_RXAUI = 25, + PHY_INTERFACE_MODE_XAUI = 26, + PHY_INTERFACE_MODE_10GBASER = 27, + PHY_INTERFACE_MODE_25GBASER = 28, + PHY_INTERFACE_MODE_USXGMII = 29, + PHY_INTERFACE_MODE_10GKR = 30, + PHY_INTERFACE_MODE_QUSGMII = 31, + PHY_INTERFACE_MODE_1000BASEKX = 32, + PHY_INTERFACE_MODE_10G_QXGMII = 33, + PHY_INTERFACE_MODE_MAX = 34, +} phy_interface_t; + +struct eee_config { + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_enabled; +}; + +struct phylink; + +struct pse_control; + +struct phy_driver; + +struct phy_package_shared; + +struct mii_timestamper; + +struct phy_device { + struct mdio_device mdio; + const struct phy_driver *drv; + struct device_link *devlink; + u32 phyindex; + u32 phy_id; + struct phy_c45_device_ids c45_ids; + unsigned int is_c45: 1; + unsigned int is_internal: 1; + unsigned int is_pseudo_fixed_link: 1; + unsigned int is_gigabit_capable: 1; + unsigned int has_fixups: 1; + unsigned int suspended: 1; + unsigned int suspended_by_mdio_bus: 1; + unsigned int sysfs_links: 1; + unsigned int loopback_enabled: 1; + unsigned int downshifted_rate: 1; + unsigned int is_on_sfp_module: 1; + unsigned int mac_managed_pm: 1; + unsigned int wol_enabled: 1; + unsigned int autoneg: 1; + unsigned int link: 1; + unsigned int autoneg_complete: 1; + unsigned int interrupts: 1; + unsigned int irq_suspended: 1; + unsigned int irq_rerun: 1; + unsigned int default_timestamp: 1; + int rate_matching; + enum phy_state state; + u32 dev_flags; + phy_interface_t interface; + long unsigned int possible_interfaces[2]; + int speed; + int duplex; + int port; + int pause; + int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; + long unsigned int supported[4]; + long unsigned int advertising[4]; + long unsigned int lp_advertising[4]; + long unsigned int adv_old[4]; + long unsigned int supported_eee[4]; + long unsigned int advertising_eee[4]; + long unsigned int eee_broken_modes[4]; + bool enable_tx_lpi; + bool eee_active; + struct eee_config eee_cfg; + long unsigned int host_interfaces[2]; + struct list_head leds; + int irq; + void *priv; + struct phy_package_shared *shared; + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + struct delayed_work state_queue; + struct mutex lock; + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; + struct phylink *phylink; + struct net_device *attached_dev; + struct mii_timestamper *mii_ts; + struct pse_control *psec; + u8 mdix; + u8 mdix_ctrl; + int pma_extable; + unsigned int link_down_events; + void (*phy_link_change)(struct phy_device *, bool); + void (*adjust_link)(struct net_device *); + long: 32; +}; + +struct phy_plca_cfg { + int version; + int enabled; + int node_id; + int node_cnt; + int to_tmr; + int burst_cnt; + int burst_tmr; +}; + +struct phy_plca_status { + bool pst; +}; + +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + struct u64_stats_sync syncp; + long: 32; +}; + +struct mii_bus { + struct module *owner; + const char *name; + char id[61]; + void *priv; + int (*read)(struct mii_bus *, int, int); + int (*write)(struct mii_bus *, int, int, u16); + int (*read_c45)(struct mii_bus *, int, int, int); + int (*write_c45)(struct mii_bus *, int, int, int, u16); + int (*reset)(struct mii_bus *); + struct mdio_bus_stats stats[32]; + struct mutex mdio_lock; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED = 2, + MDIOBUS_UNREGISTERED = 3, + MDIOBUS_RELEASED = 4, + } state; + long: 32; + struct device dev; + struct mdio_device *mdio_map[32]; + u32 phy_mask; + u32 phy_ignore_ta_mask; + int irq[32]; + int reset_delay_us; + int reset_post_delay_us; + struct gpio_desc *reset_gpiod; + struct mutex shared_lock; + struct phy_package_shared *shared[32]; +}; + +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; + +struct mii_timestamper { + bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); + void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); + int (*hwtstamp)(struct mii_timestamper *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); + void (*link_state)(struct mii_timestamper *, struct phy_device *); + int (*ts_info)(struct mii_timestamper *, struct kernel_ethtool_ts_info *); + struct device *device; +}; + +struct phy_package_shared { + u8 base_addr; + struct device_node *np; + refcount_t refcnt; + long unsigned int flags; + size_t priv_size; + void *priv; +}; + +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + const long unsigned int * const features; + u32 flags; + const void *driver_data; + int (*soft_reset)(struct phy_device *); + int (*config_init)(struct phy_device *); + int (*probe)(struct phy_device *); + int (*get_features)(struct phy_device *); + int (*get_rate_matching)(struct phy_device *, phy_interface_t); + int (*suspend)(struct phy_device *); + int (*resume)(struct phy_device *); + int (*config_aneg)(struct phy_device *); + int (*aneg_done)(struct phy_device *); + int (*read_status)(struct phy_device *); + int (*config_intr)(struct phy_device *); + irqreturn_t (*handle_interrupt)(struct phy_device *); + void (*remove)(struct phy_device *); + int (*match_phy_device)(struct phy_device *); + int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*link_change_notify)(struct phy_device *); + int (*read_mmd)(struct phy_device *, int, u16); + int (*write_mmd)(struct phy_device *, int, u16, u16); + int (*read_page)(struct phy_device *); + int (*write_page)(struct phy_device *, int); + int (*module_info)(struct phy_device *, struct ethtool_modinfo *); + int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); + int (*cable_test_start)(struct phy_device *); + int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); + int (*cable_test_get_status)(struct phy_device *, bool *); + int (*get_sset_count)(struct phy_device *); + void (*get_strings)(struct phy_device *, u8 *); + void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); + int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); + int (*set_loopback)(struct phy_device *, bool); + int (*get_sqi)(struct phy_device *); + int (*get_sqi_max)(struct phy_device *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*led_brightness_set)(struct phy_device *, u8, enum led_brightness); + int (*led_blink_set)(struct phy_device *, u8, long unsigned int *, long unsigned int *); + int (*led_hw_is_supported)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_set)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_get)(struct phy_device *, u8, long unsigned int *); + int (*led_polarity_set)(struct phy_device *, int, long unsigned int); +}; + +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +enum { + DUMP_PREFIX_NONE = 0, + DUMP_PREFIX_ADDRESS = 1, + DUMP_PREFIX_OFFSET = 2, +}; + +enum msix_fh_int_causes { + MSIX_FH_INT_CAUSES_Q0 = 1, + MSIX_FH_INT_CAUSES_Q1 = 2, + MSIX_FH_INT_CAUSES_D2S_CH0_NUM = 65536, + MSIX_FH_INT_CAUSES_D2S_CH1_NUM = 131072, + MSIX_FH_INT_CAUSES_S2D = 524288, + MSIX_FH_INT_CAUSES_FH_ERR = 2097152, +}; + +enum msix_hw_int_causes { + MSIX_HW_INT_CAUSES_REG_ALIVE = 1, + MSIX_HW_INT_CAUSES_REG_WAKEUP = 2, + MSIX_HW_INT_CAUSES_REG_IML = 2, + MSIX_HW_INT_CAUSES_REG_RESET_DONE = 4, + MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR = 8, + MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = 32, + MSIX_HW_INT_CAUSES_REG_CT_KILL = 64, + MSIX_HW_INT_CAUSES_REG_RF_KILL = 128, + MSIX_HW_INT_CAUSES_REG_PERIODIC = 256, + MSIX_HW_INT_CAUSES_REG_SW_ERR = 33554432, + MSIX_HW_INT_CAUSES_REG_SCD = 67108864, + MSIX_HW_INT_CAUSES_REG_FH_TX = 134217728, + MSIX_HW_INT_CAUSES_REG_HW_ERR = 536870912, + MSIX_HW_INT_CAUSES_REG_HAP = 1073741824, +}; + +enum iwl_fw_ini_allocation_id { + IWL_FW_INI_ALLOCATION_INVALID = 0, + IWL_FW_INI_ALLOCATION_ID_DBGC1 = 1, + IWL_FW_INI_ALLOCATION_ID_DBGC2 = 2, + IWL_FW_INI_ALLOCATION_ID_DBGC3 = 3, + IWL_FW_INI_ALLOCATION_ID_DBGC4 = 4, + IWL_FW_INI_ALLOCATION_NUM = 5, +}; + +enum iwl_fw_dbg_reg_operator { + CSR_ASSIGN = 0, + CSR_SETBIT = 1, + CSR_CLEARBIT = 2, + PRPH_ASSIGN = 3, + PRPH_SETBIT = 4, + PRPH_CLEARBIT = 5, + INDIRECT_ASSIGN = 6, + INDIRECT_SETBIT = 7, + INDIRECT_CLEARBIT = 8, + PRPH_BLOCKBIT = 9, +}; + +enum iwl_fw_dbg_monitor_mode { + SMEM_MODE = 0, + EXTERNAL_MODE = 1, + MARBH_MODE = 2, + MIPI_MODE = 3, +}; + +enum iwl_fw_error_dump_type { + IWL_FW_ERROR_DUMP_CSR = 1, + IWL_FW_ERROR_DUMP_RXF = 2, + IWL_FW_ERROR_DUMP_TXCMD = 3, + IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4, + IWL_FW_ERROR_DUMP_FW_MONITOR = 5, + IWL_FW_ERROR_DUMP_PRPH = 6, + IWL_FW_ERROR_DUMP_TXF = 7, + IWL_FW_ERROR_DUMP_FH_REGS = 8, + IWL_FW_ERROR_DUMP_MEM = 9, + IWL_FW_ERROR_DUMP_ERROR_INFO = 10, + IWL_FW_ERROR_DUMP_RB = 11, + IWL_FW_ERROR_DUMP_PAGING = 12, + IWL_FW_ERROR_DUMP_RADIO_REG = 13, + IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, + IWL_FW_ERROR_DUMP_EXTERNAL = 15, + IWL_FW_ERROR_DUMP_MEM_CFG = 16, + IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17, +}; + +struct iwl_fw_error_dump_data { + __le32 type; + __le32 len; + __u8 data[0]; +}; + +struct iwl_fw_error_dump_txcmd { + __le32 cmdlen; + __le32 caplen; + u8 data[0]; +}; + +struct iwl_fw_error_dump_fw_mon { + __le32 fw_mon_wr_ptr; + __le32 fw_mon_base_ptr; + __le32 fw_mon_cycle_cnt; + __le32 fw_mon_base_high_ptr; + __le32 reserved[2]; + u8 data[0]; +}; + +struct iwl_fw_error_dump_rb { + __le32 index; + __le32 rxq; + __le32 reserved; + u8 data[0]; +}; + +struct iwl_fw_error_dump_paging { + __le32 index; + __le32 reserved; + u8 data[0]; +}; + +struct iwl_trans_dump_data { + u32 len; + u8 data[0]; +}; + +struct iwl_trans_rxq_dma_data { + u64 fr_bd_cb; + u32 fr_bd_wid; + long: 32; + u64 urbd_stts_wrptr; + u64 ur_bd_cb; +}; + +enum { + LMPM_CHICK_EXTENDED_ADDR_SPACE = 1, +}; + +enum iwl_debug_cmds { + LMAC_RD_WR = 0, + UMAC_RD_WR = 1, + HOST_EVENT_CFG = 3, + INVALID_WR_PTR_CMD = 6, + DBGC_SUSPEND_RESUME = 7, + BUFFER_ALLOCATION = 8, + GET_TAS_STATUS = 10, + FW_DUMP_COMPLETE_CMD = 11, + FW_CLEAR_BUFFER = 13, + MFU_ASSERT_DUMP_NTF = 254, +}; + +struct iwl_rb_status { + __le16 closed_rb_num; + __le16 closed_fr_num; + __le16 finished_rb_num; + __le16 finished_fr_num; + __le32 __spare; +}; + +struct iwl_tfd_tb { + __le32 lo; + __le16 hi_n_len; +} __attribute__((packed)); + +struct iwl_tfd { + u8 __reserved1[3]; + u8 num_tbs; + struct iwl_tfd_tb tbs[20]; + __le32 __pad; +}; + +enum iwl_shared_irq_flags { + IWL_SHARED_IRQ_NON_RX = 1, + IWL_SHARED_IRQ_FIRST_RSS = 2, +}; + +struct iwl_context_info_gen3 { + __le16 version; + __le16 size; + __le32 config; + __le64 prph_info_base_addr; + __le64 cr_head_idx_arr_base_addr; + __le64 tr_tail_idx_arr_base_addr; + __le64 cr_tail_idx_arr_base_addr; + __le64 tr_head_idx_arr_base_addr; + __le16 cr_idx_arr_size; + __le16 tr_idx_arr_size; + __le64 mtr_base_addr; + __le64 mcr_base_addr; + __le16 mtr_size; + __le16 mcr_size; + __le16 mtr_doorbell_vec; + __le16 mcr_doorbell_vec; + __le16 mtr_msi_vec; + __le16 mcr_msi_vec; + u8 mtr_opt_header_size; + u8 mtr_opt_footer_size; + u8 mcr_opt_header_size; + u8 mcr_opt_footer_size; + __le16 msg_rings_ctrl_flags; + __le16 prph_info_msi_vec; + __le64 prph_scratch_base_addr; + __le32 prph_scratch_size; + __le32 reserved; +}; + +struct iwl_prph_info { + __le32 boot_stage_mirror; + __le32 ipc_status_mirror; + __le32 sleep_notif; + __le32 reserved; +}; + +struct iwl_prph_scratch_version { + __le16 mac_id; + __le16 version; + __le16 size; + __le16 reserved; +}; + +struct iwl_prph_scratch_control { + __le32 control_flags; + __le32 reserved; +}; + +struct iwl_prph_scratch_pnvm_cfg { + __le64 pnvm_base_addr; + __le32 pnvm_size; + __le32 reserved; +}; + +struct iwl_prph_scratch_hwm_cfg { + __le64 hwm_base_addr; + __le32 hwm_size; + __le32 debug_token_config; +}; + +struct iwl_prph_scratch_rbd_cfg { + __le64 free_rbd_addr; + __le32 reserved; +}; + +struct iwl_prph_scratch_uefi_cfg { + __le64 base_addr; + __le32 size; + __le32 reserved; +}; + +struct iwl_prph_scratch_step_cfg { + __le32 mbx_addr_0; + __le32 mbx_addr_1; +}; + +struct iwl_prph_scratch_ctrl_cfg { + struct iwl_prph_scratch_version version; + struct iwl_prph_scratch_control control; + struct iwl_prph_scratch_pnvm_cfg pnvm_cfg; + struct iwl_prph_scratch_hwm_cfg hwm_cfg; + struct iwl_prph_scratch_rbd_cfg rbd_cfg; + struct iwl_prph_scratch_uefi_cfg reduce_power_cfg; + struct iwl_prph_scratch_step_cfg step_cfg; +}; + +struct iwl_prph_scratch { + struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; + __le32 fseq_override; + __le32 step_analog_params; + __le32 reserved[8]; + struct iwl_context_info_dram dram; +}; + +struct iwl_causes_list { + u16 mask_reg; + u8 bit; + u8 addr; +}; + +struct iwl_trans_pcie_removal { + struct pci_dev *pdev; + struct work_struct work; + bool rescan; +}; + +struct iwl_fw_channel_info_v1 { + u8 band; + u8 channel; + u8 width; + u8 ctrl_pos; +}; + +struct iwl_fw_channel_info { + __le32 channel; + u8 band; + u8 width; + u8 ctrl_pos; + u8 reserved; +}; + +struct iwl_phy_context_cmd_tail { + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +}; + +struct iwl_phy_context_cmd_v1 { + __le32 id_and_color; + __le32 action; + __le32 apply_time; + __le32 tx_param_color; + struct iwl_fw_channel_info ci; + struct iwl_phy_context_cmd_tail tail; +}; + +struct iwl_phy_context_cmd { + __le32 id_and_color; + __le32 action; + struct iwl_fw_channel_info ci; + __le32 lmac_id; + union { + __le32 rxchain_info; + struct { + u8 sbb_bandwidth; + u8 sbb_ctrl_channel_loc; + __le16 puncture_mask; + }; + }; + __le32 dsp_cfg_flags; + __le32 reserved; +}; + +enum iwl_rlc_chain_info { + IWL_RLC_CHAIN_INFO_DRIVER_FORCE = 1, + IWL_RLC_CHAIN_INFO_VALID = 14, + IWL_RLC_CHAIN_INFO_FORCE = 112, + IWL_RLC_CHAIN_INFO_FORCE_MIMO = 896, + IWL_RLC_CHAIN_INFO_COUNT = 3072, + IWL_RLC_CHAIN_INFO_MIMO_COUNT = 12288, +}; + +struct iwl_rlc_properties { + __le32 rx_chain_info; + __le32 reserved; +}; + +struct iwl_sad_properties { + __le32 chain_a_sad_mode; + __le32 chain_b_sad_mode; + __le32 mac_id; + __le32 reserved; +}; + +struct iwl_rlc_config_cmd { + __le32 phy_id; + struct iwl_rlc_properties rlc; + struct iwl_sad_properties sad; + u8 flags; + u8 reserved[3]; +}; + +struct ieee80211_rts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; + u8 ta[6]; +}; + +struct ieee80211_cts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; +}; + +enum ieee80211_bss_change { + BSS_CHANGED_ASSOC = 1ULL, + BSS_CHANGED_ERP_CTS_PROT = 2ULL, + BSS_CHANGED_ERP_PREAMBLE = 4ULL, + BSS_CHANGED_ERP_SLOT = 8ULL, + BSS_CHANGED_HT = 16ULL, + BSS_CHANGED_BASIC_RATES = 32ULL, + BSS_CHANGED_BEACON_INT = 64ULL, + BSS_CHANGED_BSSID = 128ULL, + BSS_CHANGED_BEACON = 256ULL, + BSS_CHANGED_BEACON_ENABLED = 512ULL, + BSS_CHANGED_CQM = 1024ULL, + BSS_CHANGED_IBSS = 2048ULL, + BSS_CHANGED_ARP_FILTER = 4096ULL, + BSS_CHANGED_QOS = 8192ULL, + BSS_CHANGED_IDLE = 16384ULL, + BSS_CHANGED_SSID = 32768ULL, + BSS_CHANGED_AP_PROBE_RESP = 65536ULL, + BSS_CHANGED_PS = 131072ULL, + BSS_CHANGED_TXPOWER = 262144ULL, + BSS_CHANGED_P2P_PS = 524288ULL, + BSS_CHANGED_BEACON_INFO = 1048576ULL, + BSS_CHANGED_BANDWIDTH = 2097152ULL, + BSS_CHANGED_OCB = 4194304ULL, + BSS_CHANGED_MU_GROUPS = 8388608ULL, + BSS_CHANGED_KEEP_ALIVE = 16777216ULL, + BSS_CHANGED_MCAST_RATE = 33554432ULL, + BSS_CHANGED_FTM_RESPONDER = 67108864ULL, + BSS_CHANGED_TWT = 134217728ULL, + BSS_CHANGED_HE_OBSS_PD = 268435456ULL, + BSS_CHANGED_HE_BSS_COLOR = 536870912ULL, + BSS_CHANGED_FILS_DISCOVERY = 1073741824ULL, + BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 2147483648ULL, + BSS_CHANGED_MLD_VALID_LINKS = 8589934592ULL, + BSS_CHANGED_MLD_TTLM = 17179869184ULL, + BSS_CHANGED_TPE = 34359738368ULL, +}; + +enum mac80211_tx_info_flags { + IEEE80211_TX_CTL_REQ_TX_STATUS = 1, + IEEE80211_TX_CTL_ASSIGN_SEQ = 2, + IEEE80211_TX_CTL_NO_ACK = 4, + IEEE80211_TX_CTL_CLEAR_PS_FILT = 8, + IEEE80211_TX_CTL_FIRST_FRAGMENT = 16, + IEEE80211_TX_CTL_SEND_AFTER_DTIM = 32, + IEEE80211_TX_CTL_AMPDU = 64, + IEEE80211_TX_CTL_INJECTED = 128, + IEEE80211_TX_STAT_TX_FILTERED = 256, + IEEE80211_TX_STAT_ACK = 512, + IEEE80211_TX_STAT_AMPDU = 1024, + IEEE80211_TX_STAT_AMPDU_NO_BACK = 2048, + IEEE80211_TX_CTL_RATE_CTRL_PROBE = 4096, + IEEE80211_TX_INTFL_OFFCHAN_TX_OK = 8192, + IEEE80211_TX_CTL_HW_80211_ENCAP = 16384, + IEEE80211_TX_INTFL_RETRIED = 32768, + IEEE80211_TX_INTFL_DONT_ENCRYPT = 65536, + IEEE80211_TX_CTL_NO_PS_BUFFER = 131072, + IEEE80211_TX_CTL_MORE_FRAMES = 262144, + IEEE80211_TX_INTFL_RETRANSMISSION = 524288, + IEEE80211_TX_INTFL_MLME_CONN_TX = 1048576, + IEEE80211_TX_INTFL_NL80211_FRAME_TX = 2097152, + IEEE80211_TX_CTL_LDPC = 4194304, + IEEE80211_TX_CTL_STBC = 25165824, + IEEE80211_TX_CTL_TX_OFFCHAN = 33554432, + IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = 67108864, + IEEE80211_TX_CTL_NO_CCK_RATE = 134217728, + IEEE80211_TX_STATUS_EOSP = 268435456, + IEEE80211_TX_CTL_USE_MINRATE = 536870912, + IEEE80211_TX_CTL_DONTFRAG = 1073741824, + IEEE80211_TX_STAT_NOACK_TRANSMITTED = 2147483648, +}; + +enum mac80211_rx_encoding { + RX_ENC_LEGACY = 0, + RX_ENC_HT = 1, + RX_ENC_VHT = 2, + RX_ENC_HE = 3, + RX_ENC_EHT = 4, +}; + +enum ieee80211_filter_flags { + FIF_ALLMULTI = 2, + FIF_FCSFAIL = 4, + FIF_PLCPFAIL = 8, + FIF_BCN_PRBRESP_PROMISC = 16, + FIF_CONTROL = 32, + FIF_OTHER_BSS = 64, + FIF_PSPOLL = 128, + FIF_PROBE_REQ = 256, + FIF_MCAST_ACTION = 512, +}; + +enum rt2x00_chip_intf { + RT2X00_CHIP_INTF_PCI = 0, + RT2X00_CHIP_INTF_PCIE = 1, + RT2X00_CHIP_INTF_USB = 2, + RT2X00_CHIP_INTF_SOC = 3, +}; + +struct rt2x00_chip { + u16 rt; + u16 rf; + u16 rev; + enum rt2x00_chip_intf intf; +}; + +struct rf_channel; + +struct channel_info; + +struct hw_mode_spec { + unsigned int supported_bands; + unsigned int supported_rates; + unsigned int num_channels; + const struct rf_channel *channels; + const struct channel_info *channels_info; + struct ieee80211_sta_ht_cap ht; +}; + +enum antenna { + ANTENNA_SW_DIVERSITY = 0, + ANTENNA_A = 1, + ANTENNA_B = 2, + ANTENNA_HW_DIVERSITY = 3, +}; + +struct antenna_setup { + enum antenna rx; + enum antenna tx; + u8 rx_chain_num; + u8 tx_chain_num; +}; + +union csr { + void *base; + void *cache; +}; + +struct link_qual { + int rssi; + int false_cca; + u8 vgc_level; + u8 vgc_level_reg; + int rx_success; + int rx_failed; + int tx_success; + int tx_failed; +}; + +struct link_ant { + unsigned int flags; + struct antenna_setup active; + int rssi_history; + struct ewma_rssi rssi_ant; +}; + +struct link { + u32 count; + struct link_qual qual; + struct link_ant ant; + struct ewma_rssi avg_rssi; + struct delayed_work work; + struct delayed_work watchdog_work; + unsigned int watchdog_interval; + unsigned int watchdog; +}; + +struct rt2x00_ops; + +struct rt2x00_chan_survey; + +struct data_queue; + +struct rt2x00_dev { + struct device *dev; + const struct rt2x00_ops *ops; + void *drv_data; + struct ieee80211_hw *hw; + struct ieee80211_supported_band bands[6]; + struct rt2x00_chan_survey *chan_survey; + enum nl80211_band curr_band; + int curr_freq; + long unsigned int flags; + long unsigned int cap_flags; + int irq; + const char *name; + struct rt2x00_chip chip; + struct hw_mode_spec spec; + struct antenna_setup default_ant; + union csr csr; + struct mutex csr_mutex; + struct mutex conf_mutex; + unsigned int packet_filter; + unsigned int intf_ap_count; + unsigned int intf_sta_count; + unsigned int intf_associated; + unsigned int intf_beaconing; + struct ieee80211_iface_limit if_limits_ap; + struct ieee80211_iface_combination if_combinations[1]; + struct link link; + __le16 *eeprom; + u32 *rf; + short int lna_gain; + u16 tx_power; + u8 short_retry; + u8 long_retry; + u8 rssi_offset; + u8 freq_offset; + u16 aid; + u16 beacon_int; + u16 rxdma_busy; + u16 txdma_busy; + long unsigned int last_beacon; + struct ieee80211_low_level_stats low_level_stats; + struct workqueue_struct *workqueue; + struct work_struct intf_work; + struct work_struct rxdone_work; + struct work_struct txdone_work; + struct delayed_work autowakeup_work; + struct work_struct sleep_work; + unsigned int data_queues; + struct data_queue *rx; + struct data_queue *tx; + struct data_queue *bcn; + struct data_queue *atim; + const struct firmware *fw; + struct { + union { + struct __kfifo kfifo; + u32 *type; + const u32 *const_type; + char (*rectype)[0]; + u32 *ptr; + const u32 *ptr_const; + }; + u32 buf[0]; + } txstatus_fifo; + long: 32; + struct hrtimer txstatus_timer; + struct tasklet_struct txstatus_tasklet; + struct tasklet_struct pretbtt_tasklet; + struct tasklet_struct tbtt_tasklet; + struct tasklet_struct rxdone_tasklet; + struct tasklet_struct autowake_tasklet; + int rf_channel; + spinlock_t irqmask_lock; + struct list_head bar_list; + spinlock_t bar_list_lock; + unsigned int extra_tx_headroom; + struct usb_anchor *anchor; + unsigned int num_proto_errs; + struct clk *clk; + long: 32; +}; + +enum tsf_sync { + TSF_SYNC_NONE = 0, + TSF_SYNC_INFRA = 1, + TSF_SYNC_ADHOC = 2, + TSF_SYNC_AP_NONE = 3, +}; + +enum dev_state { + STATE_DEEP_SLEEP = 0, + STATE_SLEEP = 1, + STATE_STANDBY = 2, + STATE_AWAKE = 3, + STATE_RADIO_ON = 4, + STATE_RADIO_OFF = 5, + STATE_RADIO_IRQ_ON = 6, + STATE_RADIO_IRQ_OFF = 7, +}; + +enum ifs { + IFS_BACKOFF = 0, + IFS_SIFS = 1, + IFS_NEW_BACKOFF = 2, + IFS_NONE = 3, +}; + +enum txop { + TXOP_HTTXOP = 0, + TXOP_PIFS = 1, + TXOP_SIFS = 2, + TXOP_BACKOFF = 3, +}; + +enum cipher { + CIPHER_NONE = 0, + CIPHER_WEP64 = 1, + CIPHER_WEP128 = 2, + CIPHER_TKIP = 3, + CIPHER_AES = 4, + CIPHER_CKIP64 = 5, + CIPHER_CKIP128 = 6, + CIPHER_TKIP_NO_MIC = 7, + CIPHER_MAX = 4, +}; + +enum rate_modulation { + RATE_MODE_CCK = 0, + RATE_MODE_OFDM = 1, + RATE_MODE_HT_MIX = 2, + RATE_MODE_HT_GREENFIELD = 3, +}; + +enum data_queue_qid { + QID_AC_VO = 0, + QID_AC_VI = 1, + QID_AC_BE = 2, + QID_AC_BK = 3, + QID_HCCA = 4, + QID_MGMT = 13, + QID_RX = 14, + QID_OTHER = 15, + QID_BEACON = 16, + QID_ATIM = 17, +}; + +struct rxdone_entry_desc { + u64 timestamp; + int signal; + int rssi; + int size; + int flags; + int dev_flags; + u16 rate_mode; + u16 enc_flags; + enum mac80211_rx_encoding encoding; + enum rate_info_bw bw; + u8 cipher; + u8 cipher_status; + __le32 iv[2]; + __le32 icv; +}; + +struct txentry_desc { + long unsigned int flags; + u16 length; + u16 header_length; + union { + struct { + u16 length_high; + u16 length_low; + u16 signal; + u16 service; + enum ifs ifs; + } plcp; + struct { + u16 mcs; + u8 stbc; + u8 ba_size; + u8 mpdu_density; + enum txop txop; + int wcid; + } ht; + } u; + enum rate_modulation rate_mode; + short int retry_limit; + enum cipher cipher; + u16 key_idx; + u16 iv_offset; + u16 iv_len; +}; + +enum queue_entry_flags { + ENTRY_BCN_ASSIGNED = 0, + ENTRY_BCN_ENABLED = 1, + ENTRY_OWNER_DEVICE_DATA = 2, + ENTRY_DATA_PENDING = 3, + ENTRY_DATA_IO_FAILED = 4, + ENTRY_DATA_STATUS_PENDING = 5, +}; + +struct queue_entry { + long unsigned int flags; + long unsigned int last_action; + struct data_queue *queue; + struct sk_buff *skb; + unsigned int entry_idx; + void *priv_data; +}; + +struct data_queue { + struct rt2x00_dev *rt2x00dev; + struct queue_entry *entries; + enum data_queue_qid qid; + long unsigned int flags; + struct mutex status_lock; + spinlock_t tx_lock; + spinlock_t index_lock; + unsigned int count; + short unsigned int limit; + short unsigned int threshold; + short unsigned int length; + short unsigned int index[3]; + short unsigned int wd_count; + unsigned int wd_idx; + short unsigned int txop; + short unsigned int aifs; + short unsigned int cw_min; + short unsigned int cw_max; + short unsigned int data_size; + unsigned char desc_size; + unsigned char winfo_size; + short unsigned int priv_size; + short unsigned int usb_endpoint; + short unsigned int usb_maxpacket; +}; + +enum queue_index { + Q_INDEX = 0, + Q_INDEX_DMA_DONE = 1, + Q_INDEX_DONE = 2, + Q_INDEX_MAX = 3, +}; + +struct rf_channel { + int channel; + u32 rf1; + u32 rf2; + u32 rf3; + u32 rf4; +}; + +struct rt2x00_chan_survey { + u64 time_idle; + u64 time_busy; + u64 time_ext_busy; +}; + +struct channel_info { + unsigned int flags; + short int max_power; + short int default_power1; + short int default_power2; + short int default_power3; +}; + +enum rt2x00_delayed_flags { + DELAYED_UPDATE_BEACON = 0, +}; + +struct rt2x00_intf { + struct mutex beacon_skb_mutex; + struct queue_entry *beacon; + bool enable_beacon; + long unsigned int delayed_flags; + atomic_t seqno; +}; + +struct rt2x00lib_conf { + struct ieee80211_conf *conf; + struct rf_channel rf; + struct channel_info channel; +}; + +struct rt2x00lib_erp { + int short_preamble; + int cts_protection; + u32 basic_rates; + int slot_time; + short int sifs; + short int pifs; + short int difs; + short int eifs; + u16 beacon_int; + u16 ht_opmode; +}; + +struct rt2x00lib_crypto { + enum cipher cipher; + enum set_key_cmd cmd; + const u8 *address; + u32 bssidx; + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; + int wcid; +}; + +struct rt2x00intf_conf { + enum nl80211_iftype type; + enum tsf_sync sync; + __le32 mac[2]; + __le32 bssid[2]; +}; + +struct rt2x00_sta { + int wcid; +}; + +struct rt2x00lib_ops { + irq_handler_t irq_handler; + void (*txstatus_tasklet)(struct tasklet_struct *); + void (*pretbtt_tasklet)(struct tasklet_struct *); + void (*tbtt_tasklet)(struct tasklet_struct *); + void (*rxdone_tasklet)(struct tasklet_struct *); + void (*autowake_tasklet)(struct tasklet_struct *); + int (*probe_hw)(struct rt2x00_dev *); + char * (*get_firmware_name)(struct rt2x00_dev *); + int (*check_firmware)(struct rt2x00_dev *, const u8 *, const size_t); + int (*load_firmware)(struct rt2x00_dev *, const u8 *, const size_t); + int (*initialize)(struct rt2x00_dev *); + void (*uninitialize)(struct rt2x00_dev *); + bool (*get_entry_state)(struct queue_entry *); + void (*clear_entry)(struct queue_entry *); + int (*set_device_state)(struct rt2x00_dev *, enum dev_state); + int (*rfkill_poll)(struct rt2x00_dev *); + void (*link_stats)(struct rt2x00_dev *, struct link_qual *); + void (*reset_tuner)(struct rt2x00_dev *, struct link_qual *); + void (*link_tuner)(struct rt2x00_dev *, struct link_qual *, const u32); + void (*gain_calibration)(struct rt2x00_dev *); + void (*vco_calibration)(struct rt2x00_dev *); + void (*watchdog)(struct rt2x00_dev *); + void (*start_queue)(struct data_queue *); + void (*kick_queue)(struct data_queue *); + void (*stop_queue)(struct data_queue *); + void (*flush_queue)(struct data_queue *, bool); + void (*tx_dma_done)(struct queue_entry *); + void (*write_tx_desc)(struct queue_entry *, struct txentry_desc *); + void (*write_tx_data)(struct queue_entry *, struct txentry_desc *); + void (*write_beacon)(struct queue_entry *, struct txentry_desc *); + void (*clear_beacon)(struct queue_entry *); + int (*get_tx_data_len)(struct queue_entry *); + void (*fill_rxdone)(struct queue_entry *, struct rxdone_entry_desc *); + int (*config_shared_key)(struct rt2x00_dev *, struct rt2x00lib_crypto *, struct ieee80211_key_conf *); + int (*config_pairwise_key)(struct rt2x00_dev *, struct rt2x00lib_crypto *, struct ieee80211_key_conf *); + void (*config_filter)(struct rt2x00_dev *, const unsigned int); + void (*config_intf)(struct rt2x00_dev *, struct rt2x00_intf *, struct rt2x00intf_conf *, const unsigned int); + void (*config_erp)(struct rt2x00_dev *, struct rt2x00lib_erp *, u32); + void (*config_ant)(struct rt2x00_dev *, struct antenna_setup *); + void (*config)(struct rt2x00_dev *, struct rt2x00lib_conf *, const unsigned int); + void (*pre_reset_hw)(struct rt2x00_dev *); + int (*sta_add)(struct rt2x00_dev *, struct ieee80211_vif *, struct ieee80211_sta *); + int (*sta_remove)(struct rt2x00_dev *, struct ieee80211_sta *); +}; + +struct rt2x00_ops { + const char *name; + const unsigned int drv_data_size; + const unsigned int max_ap_intf; + const unsigned int eeprom_size; + const unsigned int rf_size; + const unsigned int tx_queues; + void (*queue_init)(struct data_queue *); + const struct rt2x00lib_ops *lib; + const void *drv; + const struct ieee80211_ops *hw; +}; + +enum rt2x00_state_flags { + DEVICE_STATE_PRESENT = 0, + DEVICE_STATE_REGISTERED_HW = 1, + DEVICE_STATE_INITIALIZED = 2, + DEVICE_STATE_STARTED = 3, + DEVICE_STATE_ENABLED_RADIO = 4, + DEVICE_STATE_SCANNING = 5, + DEVICE_STATE_FLUSHING = 6, + DEVICE_STATE_RESET = 7, + CONFIG_CHANNEL_HT40 = 8, + CONFIG_POWERSAVING = 9, + CONFIG_HT_DISABLED = 10, + CONFIG_MONITORING = 11, + TX_STATUS_READING = 12, +}; + +enum rt2x00_capability_flags { + REQUIRE_FIRMWARE = 0, + REQUIRE_BEACON_GUARD = 1, + REQUIRE_ATIM_QUEUE = 2, + REQUIRE_DMA = 3, + REQUIRE_COPY_IV = 4, + REQUIRE_L2PAD = 5, + REQUIRE_TXSTATUS_FIFO = 6, + REQUIRE_TASKLET_CONTEXT = 7, + REQUIRE_SW_SEQNO = 8, + REQUIRE_HT_TX_DESC = 9, + REQUIRE_PS_AUTOWAKE = 10, + REQUIRE_DELAYED_RFKILL = 11, + CAPABILITY_HW_BUTTON = 12, + CAPABILITY_HW_CRYPTO = 13, + CAPABILITY_POWER_LIMIT = 14, + CAPABILITY_CONTROL_FILTERS = 15, + CAPABILITY_CONTROL_FILTER_PSPOLL = 16, + CAPABILITY_PRE_TBTT_INTERRUPT = 17, + CAPABILITY_LINK_TUNING = 18, + CAPABILITY_FRAME_TYPE = 19, + CAPABILITY_RF_SEQUENCE = 20, + CAPABILITY_EXTERNAL_LNA_A = 21, + CAPABILITY_EXTERNAL_LNA_BG = 22, + CAPABILITY_DOUBLE_ANTENNA = 23, + CAPABILITY_BT_COEXIST = 24, + CAPABILITY_VCO_RECALIBRATION = 25, + CAPABILITY_EXTERNAL_PA_TX0 = 26, + CAPABILITY_EXTERNAL_PA_TX1 = 27, + CAPABILITY_RESTART_HW = 28, +}; + +enum { + IF_COMB_AP = 0, + NUM_IF_COMB = 1, +}; + +struct usb_sg_request { + int status; + size_t bytes; + spinlock_t lock; + struct usb_device *dev; + int pipe; + int entries; + struct urb **urbs; + int count; + struct completion complete; +}; + +struct us_data; + +struct us_unusual_dev { + const char *vendorName; + const char *productName; + __u8 useProtocol; + __u8 useTransport; + int (*initFunction)(struct us_data *); +}; + +typedef int (*trans_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef int (*trans_reset)(struct us_data *); + +typedef void (*proto_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef void (*extra_data_destructor)(void *); + +typedef void (*pm_hook)(struct us_data *, int); + +struct us_data { + struct mutex dev_mutex; + struct usb_device *pusb_dev; + struct usb_interface *pusb_intf; + const struct us_unusual_dev *unusual_dev; + u64 fflags; + long unsigned int dflags; + unsigned int send_bulk_pipe; + unsigned int recv_bulk_pipe; + unsigned int send_ctrl_pipe; + unsigned int recv_ctrl_pipe; + unsigned int recv_intr_pipe; + char *transport_name; + char *protocol_name; + __le32 bcs_signature; + u8 subclass; + u8 protocol; + u8 max_lun; + u8 ifnum; + u8 ep_bInterval; + trans_cmnd transport; + trans_reset transport_reset; + proto_cmnd proto_handler; + struct scsi_cmnd *srb; + unsigned int tag; + char scsi_name[32]; + struct urb *current_urb; + struct usb_ctrlrequest *cr; + struct usb_sg_request current_sg; + unsigned char *iobuf; + dma_addr_t iobuf_dma; + struct task_struct *ctl_thread; + struct completion cmnd_ready; + struct completion notify; + wait_queue_head_t delay_wait; + struct delayed_work scan_dwork; + void *extra; + extra_data_destructor extra_destructor; + pm_hook suspend_resume_hook; + int use_last_sector_hacks; + int last_sector_retries; +}; + +enum hwmon_sensor_types { + hwmon_chip = 0, + hwmon_temp = 1, + hwmon_in = 2, + hwmon_curr = 3, + hwmon_power = 4, + hwmon_energy = 5, + hwmon_humidity = 6, + hwmon_fan = 7, + hwmon_pwm = 8, + hwmon_intrusion = 9, + hwmon_max = 10, +}; + +enum hwmon_temp_attributes { + hwmon_temp_enable = 0, + hwmon_temp_input = 1, + hwmon_temp_type = 2, + hwmon_temp_lcrit = 3, + hwmon_temp_lcrit_hyst = 4, + hwmon_temp_min = 5, + hwmon_temp_min_hyst = 6, + hwmon_temp_max = 7, + hwmon_temp_max_hyst = 8, + hwmon_temp_crit = 9, + hwmon_temp_crit_hyst = 10, + hwmon_temp_emergency = 11, + hwmon_temp_emergency_hyst = 12, + hwmon_temp_alarm = 13, + hwmon_temp_lcrit_alarm = 14, + hwmon_temp_min_alarm = 15, + hwmon_temp_max_alarm = 16, + hwmon_temp_crit_alarm = 17, + hwmon_temp_emergency_alarm = 18, + hwmon_temp_fault = 19, + hwmon_temp_offset = 20, + hwmon_temp_label = 21, + hwmon_temp_lowest = 22, + hwmon_temp_highest = 23, + hwmon_temp_reset_history = 24, + hwmon_temp_rated_min = 25, + hwmon_temp_rated_max = 26, + hwmon_temp_beep = 27, +}; + +enum hwmon_in_attributes { + hwmon_in_enable = 0, + hwmon_in_input = 1, + hwmon_in_min = 2, + hwmon_in_max = 3, + hwmon_in_lcrit = 4, + hwmon_in_crit = 5, + hwmon_in_average = 6, + hwmon_in_lowest = 7, + hwmon_in_highest = 8, + hwmon_in_reset_history = 9, + hwmon_in_label = 10, + hwmon_in_alarm = 11, + hwmon_in_min_alarm = 12, + hwmon_in_max_alarm = 13, + hwmon_in_lcrit_alarm = 14, + hwmon_in_crit_alarm = 15, + hwmon_in_rated_min = 16, + hwmon_in_rated_max = 17, + hwmon_in_beep = 18, + hwmon_in_fault = 19, +}; + +enum hwmon_curr_attributes { + hwmon_curr_enable = 0, + hwmon_curr_input = 1, + hwmon_curr_min = 2, + hwmon_curr_max = 3, + hwmon_curr_lcrit = 4, + hwmon_curr_crit = 5, + hwmon_curr_average = 6, + hwmon_curr_lowest = 7, + hwmon_curr_highest = 8, + hwmon_curr_reset_history = 9, + hwmon_curr_label = 10, + hwmon_curr_alarm = 11, + hwmon_curr_min_alarm = 12, + hwmon_curr_max_alarm = 13, + hwmon_curr_lcrit_alarm = 14, + hwmon_curr_crit_alarm = 15, + hwmon_curr_rated_min = 16, + hwmon_curr_rated_max = 17, + hwmon_curr_beep = 18, +}; + +enum hwmon_power_attributes { + hwmon_power_enable = 0, + hwmon_power_average = 1, + hwmon_power_average_interval = 2, + hwmon_power_average_interval_max = 3, + hwmon_power_average_interval_min = 4, + hwmon_power_average_highest = 5, + hwmon_power_average_lowest = 6, + hwmon_power_average_max = 7, + hwmon_power_average_min = 8, + hwmon_power_input = 9, + hwmon_power_input_highest = 10, + hwmon_power_input_lowest = 11, + hwmon_power_reset_history = 12, + hwmon_power_accuracy = 13, + hwmon_power_cap = 14, + hwmon_power_cap_hyst = 15, + hwmon_power_cap_max = 16, + hwmon_power_cap_min = 17, + hwmon_power_min = 18, + hwmon_power_max = 19, + hwmon_power_crit = 20, + hwmon_power_lcrit = 21, + hwmon_power_label = 22, + hwmon_power_alarm = 23, + hwmon_power_cap_alarm = 24, + hwmon_power_min_alarm = 25, + hwmon_power_max_alarm = 26, + hwmon_power_lcrit_alarm = 27, + hwmon_power_crit_alarm = 28, + hwmon_power_rated_min = 29, + hwmon_power_rated_max = 30, +}; + +struct hwmon_ops { + umode_t visible; + umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); + int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *); + int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); + int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int); +}; + +struct hwmon_channel_info { + enum hwmon_sensor_types type; + const u32 *config; +}; + +struct hwmon_chip_info { + const struct hwmon_ops *ops; + const struct hwmon_channel_info * const *info; +}; + +enum power_supply_property { + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE = 1, + POWER_SUPPLY_PROP_HEALTH = 2, + POWER_SUPPLY_PROP_PRESENT = 3, + POWER_SUPPLY_PROP_ONLINE = 4, + POWER_SUPPLY_PROP_AUTHENTIC = 5, + POWER_SUPPLY_PROP_TECHNOLOGY = 6, + POWER_SUPPLY_PROP_CYCLE_COUNT = 7, + POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, + POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, + POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, + POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, + POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, + POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, + POWER_SUPPLY_PROP_CURRENT_MAX = 16, + POWER_SUPPLY_PROP_CURRENT_NOW = 17, + POWER_SUPPLY_PROP_CURRENT_AVG = 18, + POWER_SUPPLY_PROP_CURRENT_BOOT = 19, + POWER_SUPPLY_PROP_POWER_NOW = 20, + POWER_SUPPLY_PROP_POWER_AVG = 21, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, + POWER_SUPPLY_PROP_CHARGE_FULL = 24, + POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, + POWER_SUPPLY_PROP_CHARGE_NOW = 26, + POWER_SUPPLY_PROP_CHARGE_AVG = 27, + POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, + POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, + POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR = 37, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 38, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 39, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 40, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 41, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 42, + POWER_SUPPLY_PROP_ENERGY_FULL = 43, + POWER_SUPPLY_PROP_ENERGY_EMPTY = 44, + POWER_SUPPLY_PROP_ENERGY_NOW = 45, + POWER_SUPPLY_PROP_ENERGY_AVG = 46, + POWER_SUPPLY_PROP_CAPACITY = 47, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 48, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 49, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 50, + POWER_SUPPLY_PROP_CAPACITY_LEVEL = 51, + POWER_SUPPLY_PROP_TEMP = 52, + POWER_SUPPLY_PROP_TEMP_MAX = 53, + POWER_SUPPLY_PROP_TEMP_MIN = 54, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 55, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 56, + POWER_SUPPLY_PROP_TEMP_AMBIENT = 57, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 58, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 59, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 60, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 61, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 62, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 63, + POWER_SUPPLY_PROP_TYPE = 64, + POWER_SUPPLY_PROP_USB_TYPE = 65, + POWER_SUPPLY_PROP_SCOPE = 66, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 67, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 68, + POWER_SUPPLY_PROP_CALIBRATE = 69, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 70, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 71, + POWER_SUPPLY_PROP_MANUFACTURE_DAY = 72, + POWER_SUPPLY_PROP_MODEL_NAME = 73, + POWER_SUPPLY_PROP_MANUFACTURER = 74, + POWER_SUPPLY_PROP_SERIAL_NUMBER = 75, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY = 1, + POWER_SUPPLY_TYPE_UPS = 2, + POWER_SUPPLY_TYPE_MAINS = 3, + POWER_SUPPLY_TYPE_USB = 4, + POWER_SUPPLY_TYPE_USB_DCP = 5, + POWER_SUPPLY_TYPE_USB_CDP = 6, + POWER_SUPPLY_TYPE_USB_ACA = 7, + POWER_SUPPLY_TYPE_USB_TYPE_C = 8, + POWER_SUPPLY_TYPE_USB_PD = 9, + POWER_SUPPLY_TYPE_USB_PD_DRP = 10, + POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, + POWER_SUPPLY_TYPE_WIRELESS = 12, +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply; + +struct power_supply_desc { + const char *name; + enum power_supply_type type; + u8 charge_behaviours; + u32 usb_types; + const enum power_supply_property *properties; + size_t num_properties; + int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); + int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); + int (*property_is_writeable)(struct power_supply *, enum power_supply_property); + void (*external_power_changed)(struct power_supply *); + void (*set_charged)(struct power_supply *); + bool no_thermal; + int use_for_apm; +}; + +struct power_supply_battery_info; + +struct power_supply { + const struct power_supply_desc *desc; + char **supplied_to; + size_t num_supplicants; + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + void *drv_data; + long: 32; + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; + struct power_supply_battery_info *battery_info; + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; + long: 32; +}; + +struct power_supply_maintenance_charge_table; + +struct power_supply_battery_ocv_table; + +struct power_supply_resistance_temp_table; + +struct power_supply_vbat_ri_table; + +struct power_supply_battery_info { + unsigned int technology; + int energy_full_design_uwh; + int charge_full_design_uah; + int voltage_min_design_uv; + int voltage_max_design_uv; + int tricklecharge_current_ua; + int precharge_current_ua; + int precharge_voltage_max_uv; + int charge_term_current_ua; + int charge_restart_voltage_uv; + int overvoltage_limit_uv; + int constant_charge_current_max_ua; + int constant_charge_voltage_max_uv; + const struct power_supply_maintenance_charge_table *maintenance_charge; + int maintenance_charge_size; + int alert_low_temp_charge_current_ua; + int alert_low_temp_charge_voltage_uv; + int alert_high_temp_charge_current_ua; + int alert_high_temp_charge_voltage_uv; + int factory_internal_resistance_uohm; + int factory_internal_resistance_charging_uohm; + int ocv_temp[20]; + int temp_ambient_alert_min; + int temp_ambient_alert_max; + int temp_alert_min; + int temp_alert_max; + int temp_min; + int temp_max; + const struct power_supply_battery_ocv_table *ocv_table[20]; + int ocv_table_size[20]; + const struct power_supply_resistance_temp_table *resist_table; + int resist_table_size; + const struct power_supply_vbat_ri_table *vbat2ri_discharging; + int vbat2ri_discharging_size; + const struct power_supply_vbat_ri_table *vbat2ri_charging; + int vbat2ri_charging_size; + int bti_resistance_ohm; + int bti_resistance_tolerance; +}; + +struct power_supply_battery_ocv_table { + int ocv; + int capacity; +}; + +struct power_supply_resistance_temp_table { + int temp; + int resistance; +}; + +struct power_supply_vbat_ri_table { + int vbat_uv; + int ri_uohm; +}; + +struct power_supply_maintenance_charge_table { + int charge_current_max_ua; + int charge_voltage_max_uv; + int charge_safety_timer_minutes; +}; + +struct power_supply_hwmon { + struct power_supply *psy; + long unsigned int *props; +}; + +struct hwmon_type_attr_list { + const u32 *attrs; + size_t n_attrs; +}; + +enum dm_uevent_type { + DM_UEVENT_PATH_FAILED = 0, + DM_UEVENT_PATH_REINSTATED = 1, +}; + +struct dm_uevent { + struct mapped_device *md; + enum kobject_action action; + struct kobj_uevent_env ku_env; + struct list_head elist; + char name[128]; + char uuid[129]; +}; + +struct cpuidle_state_usage { + long long unsigned int disable; + long long unsigned int usage; + u64 time_ns; + long long unsigned int above; + long long unsigned int below; + long long unsigned int rejected; + long long unsigned int s2idle_usage; + long long unsigned int s2idle_time; +}; + +struct cpuidle_device; + +struct cpuidle_driver; + +struct cpuidle_state { + char name[16]; + char desc[32]; + s64 exit_latency_ns; + s64 target_residency_ns; + unsigned int flags; + unsigned int exit_latency; + int power_usage; + unsigned int target_residency; + int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); + void (*enter_dead)(struct cpuidle_device *, int); + int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); + long: 32; +}; + +struct cpuidle_driver_kobj; + +struct cpuidle_state_kobj; + +struct cpuidle_device_kobj; + +struct cpuidle_device { + unsigned int registered: 1; + unsigned int enabled: 1; + unsigned int poll_time_limit: 1; + unsigned int cpu; + ktime_t next_hrtimer; + int last_state_idx; + long: 32; + u64 last_residency_ns; + u64 poll_limit_ns; + u64 forced_idle_latency_limit_ns; + struct cpuidle_state_usage states_usage[10]; + struct cpuidle_state_kobj *kobjs[10]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; +}; + +struct cpuidle_driver { + const char *name; + struct module *owner; + unsigned int bctimer: 1; + long: 32; + struct cpuidle_state states[10]; + int state_count; + int safe_state_index; + struct cpumask *cpumask; + const char *governor; +}; + +struct cpuidle_governor { + char name[16]; + struct list_head governor_list; + unsigned int rating; + int (*enable)(struct cpuidle_driver *, struct cpuidle_device *); + void (*disable)(struct cpuidle_driver *, struct cpuidle_device *); + int (*select)(struct cpuidle_driver *, struct cpuidle_device *, bool *); + void (*reflect)(struct cpuidle_device *, int); +}; + +struct of_timer_irq { + int irq; + int index; + const char *name; + long unsigned int flags; + irq_handler_t handler; +}; + +struct of_timer_base { + void *base; + const char *name; + int index; +}; + +struct of_timer_clk { + struct clk *clk; + const char *name; + int index; + long unsigned int rate; + long unsigned int period; +}; + +struct timer_of { + unsigned int flags; + struct device_node *np; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct clock_event_device clkevt; + struct of_timer_base of_base; + struct of_timer_irq of_irq; + struct of_timer_clk of_clk; + void *private_data; + long: 32; + long: 32; +}; + +struct amba_cs_uci_id { + unsigned int devarch; + unsigned int devarch_mask; + unsigned int devtype; + void *data; +}; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + struct device_dma_parameters dma_parms; + unsigned int periphid; + struct mutex periphid_lock; + unsigned int cid; + struct amba_cs_uci_id uci; + unsigned int irq[9]; + const char *driver_override; + long: 32; +}; + +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +enum { + BPF_F_RECOMPUTE_CSUM = 1, + BPF_F_INVALIDATE_HASH = 2, +}; + +enum { + BPF_F_HDR_FIELD_MASK = 15, +}; + +enum { + BPF_F_PSEUDO_HDR = 16, + BPF_F_MARK_MANGLED_0 = 32, + BPF_F_MARK_ENFORCE = 64, +}; + +enum { + BPF_F_TUNINFO_IPV6 = 1, +}; + +enum { + BPF_F_ZERO_CSUM_TX = 2, + BPF_F_DONT_FRAGMENT = 4, + BPF_F_SEQ_NUMBER = 8, + BPF_F_NO_TUNNEL_KEY = 16, +}; + +enum { + BPF_F_TUNINFO_FLAGS = 16, +}; + +enum { + BPF_F_INDEX_MASK = 4294967295ULL, + BPF_F_CURRENT_CPU = 4294967295ULL, + BPF_F_CTXLEN_MASK = 4503595332403200ULL, +}; + +enum { + BPF_CSUM_LEVEL_QUERY = 0, + BPF_CSUM_LEVEL_INC = 1, + BPF_CSUM_LEVEL_DEC = 2, + BPF_CSUM_LEVEL_RESET = 3, +}; + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = 1, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128, + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256, +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + +enum { + BPF_SK_LOOKUP_F_REPLACE = 1, + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, +}; + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET = 0, + BPF_ADJ_ROOM_MAC = 1, +}; + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC = 0, + BPF_HDR_START_NET = 1, +}; + +enum { + BPF_SKB_TSTAMP_UNSPEC = 0, + BPF_SKB_TSTAMP_DELIVERY_MONO = 1, + BPF_SKB_CLOCK_REALTIME = 0, + BPF_SKB_CLOCK_MONOTONIC = 1, + BPF_SKB_CLOCK_TAI = 2, +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __be16 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 data_meta; + union { + struct bpf_flow_keys *flow_keys; + }; + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { + struct bpf_sock *sk; + }; + __u32 gso_size; + __u8 tstamp_type; + __u64 hwtstamp; +}; + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + union { + __u16 tunnel_ext; + __be16 tunnel_flags; + }; + __u32 tunnel_label; + union { + __u32 local_ipv4; + __u32 local_ipv6[4]; + }; +}; + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + __u64 bytes_acked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +struct bpf_xdp_sock { + __u32 queue_id; +}; + +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP = 1, + XDP_PASS = 2, + XDP_TX = 3, + XDP_REDIRECT = 4, +}; + +struct sk_msg_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + union { + struct bpf_sock *sk; + }; +}; + +struct sk_reuseport_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 len; + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; + long: 32; + union { + struct bpf_sock *sk; + }; + union { + struct bpf_sock *migrating_sk; + }; +}; + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + __u32 user_ip6[4]; + __u32 user_port; + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + __u32 msg_src_ip6[4]; + long: 32; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { + struct bpf_sock *sk; + }; + union { + void *skb_data; + }; + union { + void *skb_data_end; + }; + __u32 skb_len; + __u32 skb_tcp_flags; + __u64 skb_hwtstamp; +}; + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = 1, + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, + BPF_SOCK_OPS_STATE_CB_FLAG = 4, + BPF_SOCK_OPS_RTT_CB_FLAG = 8, + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, + BPF_SOCK_OPS_ALL_CB_FLAGS = 127, +}; + +enum { + BPF_SOCK_OPS_VOID = 0, + BPF_SOCK_OPS_TIMEOUT_INIT = 1, + BPF_SOCK_OPS_RWND_INIT = 2, + BPF_SOCK_OPS_TCP_CONNECT_CB = 3, + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, + BPF_SOCK_OPS_NEEDS_ECN = 6, + BPF_SOCK_OPS_BASE_RTT = 7, + BPF_SOCK_OPS_RTO_CB = 8, + BPF_SOCK_OPS_RETRANS_CB = 9, + BPF_SOCK_OPS_STATE_CB = 10, + BPF_SOCK_OPS_TCP_LISTEN_CB = 11, + BPF_SOCK_OPS_RTT_CB = 12, + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, + TCP_BPF_DELACK_MAX = 1003, + TCP_BPF_RTO_MIN = 1004, + TCP_BPF_SYN = 1005, + TCP_BPF_SYN_IP = 1006, + TCP_BPF_SYN_MAC = 1007, + TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = 1, +}; + +enum { + BPF_FIB_LOOKUP_DIRECT = 1, + BPF_FIB_LOOKUP_OUTPUT = 2, + BPF_FIB_LOOKUP_SKIP_NEIGH = 4, + BPF_FIB_LOOKUP_TBID = 8, + BPF_FIB_LOOKUP_SRC = 16, + BPF_FIB_LOOKUP_MARK = 32, +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS = 0, + BPF_FIB_LKUP_RET_BLACKHOLE = 1, + BPF_FIB_LKUP_RET_UNREACHABLE = 2, + BPF_FIB_LKUP_RET_PROHIBIT = 3, + BPF_FIB_LKUP_RET_NOT_FWDED = 4, + BPF_FIB_LKUP_RET_FWD_DISABLED = 5, + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, + BPF_FIB_LKUP_RET_NO_NEIGH = 7, + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9, +}; + +struct bpf_fib_lookup { + __u8 family; + __u8 l4_protocol; + __be16 sport; + __be16 dport; + union { + __u16 tot_len; + __u16 mtu_result; + }; + __u32 ifindex; + union { + __u8 tos; + __be32 flowinfo; + __u32 rt_metric; + }; + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + union { + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + __u32 tbid; + }; + union { + struct { + __u32 mark; + }; + struct { + __u8 smac[6]; + __u8 dmac[6]; + }; + }; +}; + +struct bpf_redir_neigh { + __u32 nh_family; + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; + }; +}; + +enum bpf_check_mtu_flags { + BPF_MTU_CHK_SEGS = 1, +}; + +enum bpf_check_mtu_ret { + BPF_MTU_CHK_RET_SUCCESS = 0, + BPF_MTU_CHK_RET_FRAG_NEEDED = 1, + BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, +}; + +struct bpf_dynptr { + __u64 __opaque[2]; +}; + +struct bpf_sk_lookup { + union { + union { + struct bpf_sock *sk; + }; + __u64 cookie; + }; + __u32 family; + __u32 protocol; + __u32 remote_ip4; + __u32 remote_ip6[4]; + __be16 remote_port; + __u32 local_ip4; + __u32 local_ip6[4]; + __u32 local_port; + __u32 ingress_ifindex; + long: 32; +}; + +struct sock_fprog { + short unsigned int len; + struct sock_filter *filter; +}; + +struct __call_single_data { + struct __call_single_node node; + smp_call_func_t func; + void *info; +}; + +typedef struct __call_single_data call_single_data_t; + +struct netdev_xmit { + u16 recursion; + u8 more; + u8 skip_txqueue; +}; + +struct bpf_local_storage_data; + +struct bpf_local_storage { + struct bpf_local_storage_data *cache[16]; + struct bpf_local_storage_map *smap; + struct hlist_head list; + void *owner; + struct callback_head rcu; + raw_spinlock_t lock; +}; + +struct btf_id_set8 { + u32 cnt; + u32 flags; + struct { + u32 id; + u32 flags; + } pairs[0]; +}; + +enum { + BTF_SOCK_TYPE_INET = 0, + BTF_SOCK_TYPE_INET_CONN = 1, + BTF_SOCK_TYPE_INET_REQ = 2, + BTF_SOCK_TYPE_INET_TW = 3, + BTF_SOCK_TYPE_REQ = 4, + BTF_SOCK_TYPE_SOCK = 5, + BTF_SOCK_TYPE_SOCK_COMMON = 6, + BTF_SOCK_TYPE_TCP = 7, + BTF_SOCK_TYPE_TCP_REQ = 8, + BTF_SOCK_TYPE_TCP_TW = 9, + BTF_SOCK_TYPE_TCP6 = 10, + BTF_SOCK_TYPE_UDP = 11, + BTF_SOCK_TYPE_UDP6 = 12, + BTF_SOCK_TYPE_UNIX = 13, + BTF_SOCK_TYPE_MPTCP = 14, + BTF_SOCK_TYPE_SOCKET = 15, + MAX_BTF_SOCK_TYPE = 16, +}; + +typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *, u32); + +struct btf_kfunc_id_set { + struct module *owner; + struct btf_id_set8 *set; + btf_kfunc_filter_t filter; +}; + +struct bpf_mem_caches; + +struct bpf_mem_cache; + +struct bpf_mem_alloc { + struct bpf_mem_caches *caches; + struct bpf_mem_cache *cache; + struct obj_cgroup *objcg; + bool percpu; + struct work_struct work; +}; + +struct bpf_local_storage_map_bucket; + +struct bpf_local_storage_map { + struct bpf_map map; + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; + struct bpf_mem_alloc selem_ma; + struct bpf_mem_alloc storage_ma; + bool bpf_ma; +}; + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + void *rw_image; + u32 image_off; + struct bpf_ksym ksym; +}; + +struct bpf_dynptr_kern { + void *data; + u32 size; + u32 offset; + long: 32; +}; + +typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); + +struct bpf_empty_prog_array { + struct bpf_prog_array hdr; + struct bpf_prog *null_prog; + long: 32; +}; + +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + struct sock *migrating_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +typedef u32 compat_uptr_t; + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + const void *data; + const void *data_end; +}; + +enum { + SKBFL_ZEROCOPY_ENABLE = 1, + SKBFL_SHARED_FRAG = 2, + SKBFL_PURE_ZEROCOPY = 4, + SKBFL_DONT_ORPHAN = 8, + SKBFL_MANAGED_FRAG_REFS = 16, +}; + +enum skb_tstamp_type { + SKB_CLOCK_REALTIME = 0, + SKB_CLOCK_MONOTONIC = 1, + SKB_CLOCK_TAI = 2, + __SKB_CLOCK_MAX = 2, +}; + +struct xsk_queue; + +struct xdp_umem; + +struct xdp_buff_xsk; + +struct xdp_desc; + +struct xsk_buff_pool { + struct device *dev; + struct net_device *netdev; + struct list_head xsk_tx_list; + spinlock_t xsk_tx_list_lock; + refcount_t users; + struct xdp_umem *umem; + struct work_struct work; + struct list_head free_list; + struct list_head xskb_list; + u32 heads_cnt; + u16 queue_id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xsk_queue *fq; + struct xsk_queue *cq; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + struct xdp_desc *tx_descs; + long: 32; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 chunk_shift; + u32 frame_len; + u32 xdp_zc_max_segs; + u8 tx_metadata_len; + u8 cached_need_wakeup; + bool uses_need_wakeup; + bool unaligned; + bool tx_sw_csum; + void *addrs; + spinlock_t cq_lock; + struct xdp_buff_xsk *free_heads[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; + struct callback_head rcu; +}; + +struct sd_flow_limit; + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + local_lock_t process_queue_bh_lock; + unsigned int processed; + unsigned int time_squeeze; + struct softnet_data *rps_ipi_list; + unsigned int received_rps; + bool in_net_rx_action; + bool in_napi_threaded_poll; + struct sd_flow_limit *flow_limit; + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + struct netdev_xmit xmit; + unsigned int input_queue_head; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + call_single_data_t csd; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + struct sk_buff_head input_pkt_queue; + long: 32; + struct napi_struct backlog; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t dropped; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t defer_lock; + int defer_count; + int defer_ipi_scheduled; + struct sk_buff *defer_list; + call_single_data_t defer_csd; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[128]; + u8 buckets[0]; +}; + +enum rt_class_t { + RT_TABLE_UNSPEC = 0, + RT_TABLE_COMPAT = 252, + RT_TABLE_DEFAULT = 253, + RT_TABLE_MAIN = 254, + RT_TABLE_LOCAL = 255, + RT_TABLE_MAX = 4294967295, +}; + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify: 1; + u8 skip_notify_kernel: 1; +}; + +struct ip_tunnel_key { + __be64 tun_id; + union { + struct { + __be32 src; + __be32 dst; + } ipv4; + struct { + struct in6_addr src; + struct in6_addr dst; + } ipv6; + } u; + long unsigned int tun_flags[1]; + __be32 label; + u32 nhid; + u8 tos; + u8 ttl; + __be16 tp_src; + __be16 tp_dst; + __u8 flow_flags; + long: 32; +}; + +struct dst_cache_pcpu; + +struct dst_cache { + struct dst_cache_pcpu *cache; + long unsigned int reset_ts; +}; + +struct ip_tunnel_info { + struct ip_tunnel_key key; + struct ip_tunnel_encap encap; + struct dst_cache dst_cache; + u8 options_len; + u8 mode; + long: 32; +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + unsigned char data[20]; +}; + +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + u64 tmp_reg; + void *t_ctx; + u32 uaddrlen; +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; + long: 32; + u64 temp; +}; + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + u32 tw_last_oow_ack_time; + int tw_ts_recent_stamp; + u32 tw_tx_delay; +}; + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef u8 dscp_t; + +struct fib_table; + +struct fib_result { + __be32 prefix; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + u32 tclassid; + dscp_t dscp; + struct fib_nh_common *nhc; + struct fib_info *fi; + struct fib_table *table; + struct hlist_head *fa_head; +}; + +struct fib_table { + struct hlist_node tb_hlist; + u32 tb_id; + int tb_num_default; + struct callback_head rcu; + long unsigned int *tb_data; + long unsigned int __data[0]; +}; + +struct fib6_result; + +struct fib6_config; + +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); + int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); + struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); + int (*ipv6_route_input)(struct sk_buff *); + struct fib6_table * (*fib6_get_table)(struct net *, u32); + int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); + int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); + void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); + u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); + int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); + void (*fib6_nh_release)(struct fib6_nh *); + void (*fib6_nh_release_dsts)(struct fib6_nh *); + void (*fib6_update_sernum)(struct net *, struct fib6_info *); + int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); + void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); + void (*udpv6_encap_enable)(); + void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); + struct neigh_table *nd_tbl; + int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + struct net_device * (*ipv6_dev_find)(struct net *, const struct in6_addr *, struct net_device *); + int (*ip6_xmit)(const struct sock *, struct sk_buff *, struct flowi6 *, __u32, struct ipv6_txoptions *, int, u32); +}; + +struct fib6_result { + struct fib6_nh *nh; + struct fib6_info *f6i; + u32 fib6_flags; + u8 fib6_type; + struct rt6_info *rt6; +}; + +struct fib6_config { + u32 fc_table; + u32 fc_metric; + int fc_dst_len; + int fc_src_len; + int fc_ifindex; + u32 fc_flags; + u32 fc_protocol; + u16 fc_type; + u16 fc_delete_all_nh: 1; + u16 fc_ignore_dev_down: 1; + u16 __unused: 14; + u32 fc_nh_id; + struct in6_addr fc_dst; + struct in6_addr fc_src; + struct in6_addr fc_prefsrc; + struct in6_addr fc_gateway; + long unsigned int fc_expires; + struct nlattr *fc_mx; + int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; + bool fc_is_fdb; +}; + +struct ipv6_bpf_stub { + int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); + struct sock * (*udp6_lib_lookup)(const struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); + int (*ipv6_setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*ipv6_getsockopt)(struct sock *, int, int, sockptr_t, sockptr_t); + int (*ipv6_dev_get_saddr)(struct net *, const struct net_device *, const struct in6_addr *, unsigned int, struct in6_addr *); +}; + +enum lwtunnel_encap_types { + LWTUNNEL_ENCAP_NONE = 0, + LWTUNNEL_ENCAP_MPLS = 1, + LWTUNNEL_ENCAP_IP = 2, + LWTUNNEL_ENCAP_ILA = 3, + LWTUNNEL_ENCAP_IP6 = 4, + LWTUNNEL_ENCAP_SEG6 = 5, + LWTUNNEL_ENCAP_BPF = 6, + LWTUNNEL_ENCAP_SEG6_LOCAL = 7, + LWTUNNEL_ENCAP_RPL = 8, + LWTUNNEL_ENCAP_IOAM6 = 9, + LWTUNNEL_ENCAP_XFRM = 10, + __LWTUNNEL_ENCAP_MAX = 11, +}; + +enum { + INET_ECN_NOT_ECT = 0, + INET_ECN_ECT_1 = 1, + INET_ECN_ECT_0 = 2, + INET_ECN_CE = 3, + INET_ECN_MASK = 3, +}; + +struct mptcp_sock {}; + +struct bpf_tcp_req_attrs { + u32 rcv_tsval; + u32 rcv_tsecr; + u16 mss; + u8 rcv_wscale; + u8 snd_wscale; + u8 ecn_ok; + u8 wscale_ok; + u8 sack_ok; + u8 tstamp_ok; + u8 usec_ts_ok; + u8 reserved[3]; +}; + +struct tcp_skb_cb { + __u32 seq; + __u32 end_seq; + union { + struct { + u16 tcp_gso_segs; + u16 tcp_gso_size; + }; + }; + __u8 tcp_flags; + __u8 sacked; + __u8 ip_dsfield; + __u8 txstamp_ack: 1; + __u8 eor: 1; + __u8 has_rxtstamp: 1; + __u8 unused: 5; + __u32 ack_seq; + long: 32; + union { + struct { + __u32 is_app_limited: 1; + __u32 delivered_ce: 20; + __u32 unused: 11; + __u32 delivered; + u64 first_tx_mstamp; + u64 delivered_mstamp; + } tx; + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + }; +}; + +struct _strp_msg { + struct strp_msg strp; + int accum_len; +}; + +struct tls_msg { + u8 control; +}; + +struct sk_skb_cb { + unsigned char data[20]; + unsigned char pad[4]; + struct _strp_msg strp; + struct tls_msg tls; + u64 temp_reg; +}; + +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; + long: 32; +}; + +typedef u64 sci_t; + +enum metadata_type { + METADATA_IP_TUNNEL = 0, + METADATA_HW_PORT_MUX = 1, + METADATA_MACSEC = 2, + METADATA_XFRM = 3, +}; + +struct hw_port_info { + struct net_device *lower_dev; + u32 port_id; +}; + +struct macsec_info { + sci_t sci; +}; + +struct xfrm_md_info { + u32 if_id; + int link; + struct dst_entry *dst_orig; +}; + +struct metadata_dst { + struct dst_entry dst; + enum metadata_type type; + long: 32; + union { + struct ip_tunnel_info tun_info; + struct hw_port_info port_info; + struct macsec_info macsec_info; + struct xfrm_md_info xfrm_info; + } u; +}; + +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0 = 1, + MEM_TYPE_PAGE_POOL = 2, + MEM_TYPE_XSK_BUFF_POOL = 3, + MEM_TYPE_MAX = 4, +}; + +enum xdp_buff_flags { + XDP_FLAGS_HAS_FRAGS = 1, + XDP_FLAGS_FRAGS_PF_MEMALLOC = 2, +}; + +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +struct xdp_umem { + void *addrs; + long: 32; + u64 size; + u32 headroom; + u32 chunk_size; + u32 chunks; + u32 npgs; + struct user_struct *user; + refcount_t users; + u8 flags; + u8 tx_metadata_len; + bool zc; + struct page **pgs; + int id; + struct list_head xsk_dma_list; + struct work_struct work; + long: 32; +}; + +struct xdp_sock { + struct sock sk; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xsk_queue *rx; + struct net_device *dev; + struct xdp_umem *umem; + struct list_head flush_node; + struct xsk_buff_pool *pool; + u16 queue_id; + bool zc; + bool sg; + enum { + XSK_READY = 0, + XSK_BOUND = 1, + XSK_UNBOUND = 2, + } state; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xsk_queue *tx; + struct list_head tx_list; + u32 tx_budget_spent; + spinlock_t rx_lock; + long: 32; + u64 rx_dropped; + u64 rx_queue_full; + struct sk_buff *skb; + struct list_head map_list; + spinlock_t map_list_lock; + struct mutex mutex; + struct xsk_queue *fq_tmp; + struct xsk_queue *cq_tmp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_data { + struct bpf_local_storage_map *smap; + long: 32; + u8 data[0]; +}; + +struct crypto_wait { + struct completion completion; + int err; +}; + +struct tls_crypto_info { + __u16 version; + __u16 cipher_type; +}; + +struct tls12_crypto_info_aes_gcm_128 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[32]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_chacha20_poly1305 { + struct tls_crypto_info info; + unsigned char iv[12]; + unsigned char key[32]; + unsigned char salt[0]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls_strparser { + struct sock *sk; + u32 mark: 8; + u32 stopped: 1; + u32 copy_mode: 1; + u32 mixed_decrypted: 1; + bool msg_ready; + struct strp_msg stm; + struct sk_buff *anchor; + struct work_struct work; +}; + +struct tls_sw_context_rx { + struct crypto_aead *aead_recv; + struct crypto_wait async_wait; + struct sk_buff_head rx_list; + void (*saved_data_ready)(struct sock *); + u8 reader_present; + u8 async_capable: 1; + u8 zc_capable: 1; + u8 reader_contended: 1; + struct tls_strparser strp; + atomic_t decrypt_pending; + struct sk_buff_head async_hold; + struct wait_queue_head wq; +}; + +struct tls_prot_info { + u16 version; + u16 cipher_type; + u16 prepend_size; + u16 tag_size; + u16 overhead_size; + u16 iv_size; + u16 salt_size; + u16 rec_seq_size; + u16 aad_size; + u16 tail_size; +}; + +struct cipher_context { + char iv[20]; + char rec_seq[8]; +}; + +union tls_crypto_context { + struct tls_crypto_info info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; + }; +}; + +struct tls_context { + struct tls_prot_info prot_info; + u8 tx_conf: 3; + u8 rx_conf: 3; + u8 zerocopy_sendfile: 1; + u8 rx_no_pad: 1; + int (*push_pending_record)(struct sock *, int); + void (*sk_write_space)(struct sock *); + void *priv_ctx_tx; + void *priv_ctx_rx; + struct net_device *netdev; + struct cipher_context tx; + struct cipher_context rx; + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + bool splicing_pages; + bool pending_open_record_frags; + struct mutex tx_lock; + long unsigned int flags; + struct proto *sk_proto; + struct sock *sk; + void (*sk_destruct)(struct sock *); + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + struct list_head list; + refcount_t refcount; + struct callback_head rcu; +}; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + u8 cb[24]; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + struct list_head list_node; +}; + +typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); + +typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); + +typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); + +typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); + +typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); + +typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); + +enum { + BPF_F_NEIGH = 65536, + BPF_F_PEER = 131072, + BPF_F_NEXTHOP = 262144, +}; + +typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_redirect)(u32, u64); + +typedef u64 (*btf_bpf_redirect_peer)(u32, u64); + +typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); + +typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_get_cgroup_classid_curr)(); + +typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); + +typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); + +typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); + +typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_xdp_get_buff_len)(struct xdp_buff *); + +typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_load_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_store_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); + +typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); + +typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); + +typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); + +typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); + +typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); + +typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); + +typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sk_msg)(struct sk_msg *); + +typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); + +typedef u64 (*btf_bpf_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); + +typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); + +typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_release)(struct sock *); + +typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); + +typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); + +typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); + +typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tstamp)(struct sk_buff *, u64, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *, struct tcphdr *); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *); + +typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); + +typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_unix_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_mptcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_sock_from_file)(struct file *); + +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT = 0, + ETHTOOL_FEC_AUTO_BIT = 1, + ETHTOOL_FEC_OFF_BIT = 2, + ETHTOOL_FEC_RS_BIT = 3, + ETHTOOL_FEC_BASER_BIT = 4, + ETHTOOL_FEC_LLRS_BIT = 5, +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC = 0, + ETHTOOL_A_FEC_STAT_PAD = 1, + ETHTOOL_A_FEC_STAT_CORRECTED = 2, + ETHTOOL_A_FEC_STAT_UNCORR = 3, + ETHTOOL_A_FEC_STAT_CORR_BITS = 4, + __ETHTOOL_A_FEC_STAT_CNT = 5, + ETHTOOL_A_FEC_STAT_MAX = 4, +}; + +typedef const char (* const ethnl_string_array_t)[32]; + +struct fec_stat_grp { + u64 stats[9]; + u8 cnt; + long: 32; +}; + +struct fec_reply_data { + struct ethnl_reply_data base; + long unsigned int fec_link_modes[4]; + u32 active_fec; + u8 fec_auto; + long: 32; + struct fec_stat_grp corr; + struct fec_stat_grp uncorr; + struct fec_stat_grp corr_bits; +}; + +enum ip_conntrack_status { + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = 1, + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = 2, + IPS_ASSURED_BIT = 2, + IPS_ASSURED = 4, + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = 8, + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = 16, + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = 32, + IPS_NAT_MASK = 48, + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = 64, + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = 128, + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = 256, + IPS_NAT_DONE_MASK = 384, + IPS_DYING_BIT = 9, + IPS_DYING = 512, + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = 1024, + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = 2048, + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = 4096, + IPS_NAT_CLASH_BIT = 12, + IPS_NAT_CLASH = 4096, + IPS_HELPER_BIT = 13, + IPS_HELPER = 8192, + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = 16384, + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = 32768, + IPS_UNCHANGEABLE_MASK = 56313, + __IPS_MAX_BIT = 16, +}; + +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + +struct nf_conntrack_l4proto { + u_int8_t l4proto; + bool allow_clash; + u16 nlattr_size; + bool (*can_early_drop)(const struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *, bool); + int (*from_nlattr)(struct nlattr **, struct nf_conn *); + int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *); + unsigned int (*nlattr_tuple_size)(); + int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *, u_int32_t); + const struct nla_policy *nla_policy; + struct { + int (*nlattr_to_obj)(struct nlattr **, struct net *, void *); + int (*obj_to_nlattr)(struct sk_buff *, const void *); + u16 obj_size; + u16 nlattr_max; + const struct nla_policy *nla_policy; + } ctnl_timeout; + void (*print_conntrack)(struct seq_file *, struct nf_conn *); +}; + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + u_int16_t flags; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +enum ctattr_l4proto { + CTA_PROTO_UNSPEC = 0, + CTA_PROTO_NUM = 1, + CTA_PROTO_SRC_PORT = 2, + CTA_PROTO_DST_PORT = 3, + CTA_PROTO_ICMP_ID = 4, + CTA_PROTO_ICMP_TYPE = 5, + CTA_PROTO_ICMP_CODE = 6, + CTA_PROTO_ICMPV6_ID = 7, + CTA_PROTO_ICMPV6_TYPE = 8, + CTA_PROTO_ICMPV6_CODE = 9, + __CTA_PROTO_MAX = 10, +}; + +enum nft_set_attributes { + NFTA_SET_UNSPEC = 0, + NFTA_SET_TABLE = 1, + NFTA_SET_NAME = 2, + NFTA_SET_FLAGS = 3, + NFTA_SET_KEY_TYPE = 4, + NFTA_SET_KEY_LEN = 5, + NFTA_SET_DATA_TYPE = 6, + NFTA_SET_DATA_LEN = 7, + NFTA_SET_POLICY = 8, + NFTA_SET_DESC = 9, + NFTA_SET_ID = 10, + NFTA_SET_TIMEOUT = 11, + NFTA_SET_GC_INTERVAL = 12, + NFTA_SET_USERDATA = 13, + NFTA_SET_PAD = 14, + NFTA_SET_OBJ_TYPE = 15, + NFTA_SET_HANDLE = 16, + NFTA_SET_EXPR = 17, + NFTA_SET_EXPRESSIONS = 18, + __NFTA_SET_MAX = 19, +}; + +struct nft_elem_priv {}; + +struct nft_set_elem { + union { + u32 buf[16]; + struct nft_data val; + } key; + union { + u32 buf[16]; + struct nft_data val; + } key_end; + union { + u32 buf[16]; + struct nft_data val; + } data; + struct nft_elem_priv *priv; + long: 32; +}; + +enum nft_iter_type { + NFT_ITER_UNSPEC = 0, + NFT_ITER_READ = 1, + NFT_ITER_UPDATE = 2, +}; + +struct nft_set; + +struct nft_set_iter { + u8 genmask; + enum nft_iter_type type: 8; + unsigned int count; + unsigned int skip; + int err; + int (*fn)(const struct nft_ctx *, struct nft_set *, const struct nft_set_iter *, struct nft_elem_priv *); +}; + +struct nft_set_ops; + +struct nft_set { + struct list_head list; + struct list_head bindings; + refcount_t refs; + struct nft_table *table; + possible_net_t net; + char *name; + u64 handle; + u32 ktype; + u32 dtype; + u32 objtype; + u32 size; + u8 field_len[16]; + u8 field_count; + u32 use; + atomic_t nelems; + u32 ndeact; + u64 timeout; + u32 gc_int; + u16 policy; + u16 udlen; + unsigned char *udata; + struct list_head pending_update; + long: 32; + long: 32; + long: 32; + const struct nft_set_ops *ops; + u16 flags: 13; + u16 dead: 1; + u16 genmask: 2; + u8 klen; + u8 dlen; + u8 num_exprs; + struct nft_expr *exprs[2]; + struct list_head catchall_list; + long: 32; + unsigned char data[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_set_desc { + u32 ktype; + unsigned int klen; + u32 dtype; + unsigned int dlen; + u32 objtype; + unsigned int size; + u32 policy; + u32 gc_int; + u64 timeout; + u8 field_len[16]; + u8 field_count; + bool expr; + long: 32; +}; + +enum nft_set_class { + NFT_SET_CLASS_O_1 = 0, + NFT_SET_CLASS_O_LOG_N = 1, + NFT_SET_CLASS_O_N = 2, +}; + +struct nft_set_estimate { + u64 size; + enum nft_set_class lookup; + enum nft_set_class space; +}; + +struct nft_set_ext; + +struct nft_set_ops { + bool (*lookup)(const struct net *, const struct nft_set *, const u32 *, const struct nft_set_ext **); + bool (*update)(struct nft_set *, const u32 *, struct nft_elem_priv * (*)(struct nft_set *, const struct nft_expr *, struct nft_regs *), const struct nft_expr *, struct nft_regs *, const struct nft_set_ext **); + bool (*delete)(const struct nft_set *, const u32 *); + int (*insert)(const struct net *, const struct nft_set *, const struct nft_set_elem *, struct nft_elem_priv **); + void (*activate)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + struct nft_elem_priv * (*deactivate)(const struct net *, const struct nft_set *, const struct nft_set_elem *); + void (*flush)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*remove)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*walk)(const struct nft_ctx *, struct nft_set *, struct nft_set_iter *); + struct nft_elem_priv * (*get)(const struct net *, const struct nft_set *, const struct nft_set_elem *, unsigned int); + void (*commit)(struct nft_set *); + void (*abort)(const struct nft_set *); + u64 (*privsize)(const struct nlattr * const *, const struct nft_set_desc *); + bool (*estimate)(const struct nft_set_desc *, u32, struct nft_set_estimate *); + int (*init)(const struct nft_set *, const struct nft_set_desc *, const struct nlattr * const *); + void (*destroy)(const struct nft_ctx *, const struct nft_set *); + void (*gc_init)(const struct nft_set *); + unsigned int elemsize; +}; + +struct nft_set_ext { + u8 genmask; + u8 offset[8]; + char data[0]; +}; + +struct nft_set_type { + const struct nft_set_ops ops; + u32 features; +}; + +struct nft_bitmap_elem { + struct nft_elem_priv priv; + struct list_head head; + struct nft_set_ext ext; +}; + +struct nft_bitmap { + struct list_head list; + u16 bitmap_size; + u8 bitmap[0]; +}; + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; + +struct xa_limit { + u32 max; + u32 min; +}; + +struct dmabuf_cmsg { + __u64 frag_offset; + __u32 frag_size; + __u32 frag_token; + __u32 dmabuf_id; + __u32 flags; +}; + +struct dmabuf_genpool_chunk_owner; + +struct net_iov { + long unsigned int __unused_padding; + long unsigned int pp_magic; + struct page_pool *pp; + struct dmabuf_genpool_chunk_owner *owner; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; +}; + +struct net_devmem_dmabuf_binding; + +struct dmabuf_genpool_chunk_owner { + long unsigned int base_virtual; + dma_addr_t base_dma_addr; + struct net_iov *niovs; + size_t num_niovs; + struct net_devmem_dmabuf_binding *binding; +}; + +struct mmpin { + struct user_struct *user; + unsigned int num_pg; +}; + +struct ubuf_info_msgzc { + struct ubuf_info ubuf; + union { + struct { + long unsigned int desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy: 1; + u32 bytelen; + }; + }; + struct mmpin mmp; +}; + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT = 2, + BPF_TCP_SYN_RECV = 3, + BPF_TCP_FIN_WAIT1 = 4, + BPF_TCP_FIN_WAIT2 = 5, + BPF_TCP_TIME_WAIT = 6, + BPF_TCP_CLOSE = 7, + BPF_TCP_CLOSE_WAIT = 8, + BPF_TCP_LAST_ACK = 9, + BPF_TCP_LISTEN = 10, + BPF_TCP_CLOSING = 11, + BPF_TCP_NEW_SYN_RECV = 12, + BPF_TCP_BOUND_INACTIVE = 13, + BPF_TCP_MAX_STATES = 14, +}; + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; + +struct tcp_bbr_info { + __u32 bbr_bw_lo; + __u32 bbr_bw_hi; + __u32 bbr_min_rtt; + __u32 bbr_pacing_gain; + __u32 bbr_cwnd_gain; +}; + +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; + struct tcp_bbr_info bbr; +}; + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16, + ICSK_ACK_NOMEM = 32, +}; + +enum { + TCP_FLAG_CWR = 32768, + TCP_FLAG_ECE = 16384, + TCP_FLAG_URG = 8192, + TCP_FLAG_ACK = 4096, + TCP_FLAG_PSH = 2048, + TCP_FLAG_RST = 1024, + TCP_FLAG_SYN = 512, + TCP_FLAG_FIN = 256, + TCP_RESERVED_BITS = 15, + TCP_DATA_OFFSET = 240, +}; + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +struct tcp_repair_window { + __u32 snd_wl1; + __u32 snd_wnd; + __u32 max_window; + __u32 rcv_wnd; + __u32 rcv_wup; +}; + +enum { + TCP_NO_QUEUE = 0, + TCP_RECV_QUEUE = 1, + TCP_SEND_QUEUE = 2, + TCP_QUEUES_NR = 3, +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale: 4; + __u8 tcpi_rcv_wscale: 4; + __u8 tcpi_delivery_rate_app_limited: 1; + __u8 tcpi_fastopen_client_fail: 2; + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + __u32 tcpi_total_retrans; + __u64 tcpi_pacing_rate; + __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; + __u64 tcpi_bytes_received; + __u32 tcpi_segs_out; + __u32 tcpi_segs_in; + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; + __u32 tcpi_data_segs_out; + __u64 tcpi_delivery_rate; + __u64 tcpi_busy_time; + __u64 tcpi_rwnd_limited; + __u64 tcpi_sndbuf_limited; + __u32 tcpi_delivered; + __u32 tcpi_delivered_ce; + __u64 tcpi_bytes_sent; + __u64 tcpi_bytes_retrans; + __u32 tcpi_dsack_dups; + __u32 tcpi_reord_seen; + __u32 tcpi_rcv_ooopack; + __u32 tcpi_snd_wnd; + __u32 tcpi_rcv_wnd; + __u32 tcpi_rehash; + __u16 tcpi_total_rto; + __u16 tcpi_total_rto_recoveries; + __u32 tcpi_total_rto_time; +}; + +enum { + TCP_NLA_PAD = 0, + TCP_NLA_BUSY = 1, + TCP_NLA_RWND_LIMITED = 2, + TCP_NLA_SNDBUF_LIMITED = 3, + TCP_NLA_DATA_SEGS_OUT = 4, + TCP_NLA_TOTAL_RETRANS = 5, + TCP_NLA_PACING_RATE = 6, + TCP_NLA_DELIVERY_RATE = 7, + TCP_NLA_SND_CWND = 8, + TCP_NLA_REORDERING = 9, + TCP_NLA_MIN_RTT = 10, + TCP_NLA_RECUR_RETRANS = 11, + TCP_NLA_DELIVERY_RATE_APP_LMT = 12, + TCP_NLA_SNDQ_SIZE = 13, + TCP_NLA_CA_STATE = 14, + TCP_NLA_SND_SSTHRESH = 15, + TCP_NLA_DELIVERED = 16, + TCP_NLA_DELIVERED_CE = 17, + TCP_NLA_BYTES_SENT = 18, + TCP_NLA_BYTES_RETRANS = 19, + TCP_NLA_DSACK_DUPS = 20, + TCP_NLA_REORD_SEEN = 21, + TCP_NLA_SRTT = 22, + TCP_NLA_TIMEOUT_REHASH = 23, + TCP_NLA_BYTES_NOTSENT = 24, + TCP_NLA_EDT = 25, + TCP_NLA_TTL = 26, + TCP_NLA_REHASH = 27, +}; + +struct tcp_zerocopy_receive { + __u64 address; + __u32 length; + __u32 recv_skip_hint; + __u32 inq; + __s32 err; + __u64 copybuf_address; + __s32 copybuf_len; + __u32 flags; + __u64 msg_control; + __u64 msg_controllen; + __u32 msg_flags; + __u32 reserved; +}; + +union tcp_ao_addr { + struct in_addr a4; + struct in6_addr a6; +}; + +struct tcp_ao_hdr { + u8 kind; + u8 length; + u8 keyid; + u8 rnext_keyid; +}; + +enum tcp_skb_cb_sacked_flags { + TCPCB_SACKED_ACKED = 1, + TCPCB_SACKED_RETRANS = 2, + TCPCB_LOST = 4, + TCPCB_TAGBITS = 7, + TCPCB_REPAIRED = 16, + TCPCB_EVER_RETRANS = 128, + TCPCB_RETRANS = 146, +}; + +struct tcp_md5sig_key { + struct hlist_node node; + u8 keylen; + u8 family; + u8 prefixlen; + u8 flags; + union tcp_ao_addr addr; + int l3index; + u8 key[80]; + struct callback_head rcu; +}; + +enum tcp_chrono { + TCP_CHRONO_UNSPEC = 0, + TCP_CHRONO_BUSY = 1, + TCP_CHRONO_RWND_LIMITED = 2, + TCP_CHRONO_SNDBUF_LIMITED = 3, + __TCP_CHRONO_MAX = 4, +}; + +struct rps_sock_flow_table { + u32 mask; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u32 ents[0]; +}; + +struct dma_buf; + +struct dma_buf_attachment; + +struct net_devmem_dmabuf_binding { + struct dma_buf *dmabuf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct net_device *dev; + struct gen_pool *chunk_pool; + refcount_t ref; + struct list_head list; + struct xarray bound_rxqs; + u32 id; +}; + +enum { + TCP_CMSG_INQ = 1, + TCP_CMSG_TS = 2, +}; + +struct tcp_splice_state { + struct pipe_inode_info *pipe; + size_t len; + unsigned int flags; +}; + +struct tcp_xa_pool { + u8 max; + u8 idx; + __u32 tokens[17]; + netmem_ref netmems[17]; +}; + +typedef __kernel_clock_t clock_t; + +enum { + IF_OPER_UNKNOWN = 0, + IF_OPER_NOTPRESENT = 1, + IF_OPER_DOWN = 2, + IF_OPER_LOWERLAYERDOWN = 3, + IF_OPER_TESTING = 4, + IF_OPER_DORMANT = 5, + IF_OPER_UP = 6, +}; + +enum { + IFLA_UNSPEC = 0, + IFLA_ADDRESS = 1, + IFLA_BROADCAST = 2, + IFLA_IFNAME = 3, + IFLA_MTU = 4, + IFLA_LINK = 5, + IFLA_QDISC = 6, + IFLA_STATS = 7, + IFLA_COST = 8, + IFLA_PRIORITY = 9, + IFLA_MASTER = 10, + IFLA_WIRELESS = 11, + IFLA_PROTINFO = 12, + IFLA_TXQLEN = 13, + IFLA_MAP = 14, + IFLA_WEIGHT = 15, + IFLA_OPERSTATE = 16, + IFLA_LINKMODE = 17, + IFLA_LINKINFO = 18, + IFLA_NET_NS_PID = 19, + IFLA_IFALIAS = 20, + IFLA_NUM_VF = 21, + IFLA_VFINFO_LIST = 22, + IFLA_STATS64 = 23, + IFLA_VF_PORTS = 24, + IFLA_PORT_SELF = 25, + IFLA_AF_SPEC = 26, + IFLA_GROUP = 27, + IFLA_NET_NS_FD = 28, + IFLA_EXT_MASK = 29, + IFLA_PROMISCUITY = 30, + IFLA_NUM_TX_QUEUES = 31, + IFLA_NUM_RX_QUEUES = 32, + IFLA_CARRIER = 33, + IFLA_PHYS_PORT_ID = 34, + IFLA_CARRIER_CHANGES = 35, + IFLA_PHYS_SWITCH_ID = 36, + IFLA_LINK_NETNSID = 37, + IFLA_PHYS_PORT_NAME = 38, + IFLA_PROTO_DOWN = 39, + IFLA_GSO_MAX_SEGS = 40, + IFLA_GSO_MAX_SIZE = 41, + IFLA_PAD = 42, + IFLA_XDP = 43, + IFLA_EVENT = 44, + IFLA_NEW_NETNSID = 45, + IFLA_IF_NETNSID = 46, + IFLA_TARGET_NETNSID = 46, + IFLA_CARRIER_UP_COUNT = 47, + IFLA_CARRIER_DOWN_COUNT = 48, + IFLA_NEW_IFINDEX = 49, + IFLA_MIN_MTU = 50, + IFLA_MAX_MTU = 51, + IFLA_PROP_LIST = 52, + IFLA_ALT_IFNAME = 53, + IFLA_PERM_ADDRESS = 54, + IFLA_PROTO_DOWN_REASON = 55, + IFLA_PARENT_DEV_NAME = 56, + IFLA_PARENT_DEV_BUS_NAME = 57, + IFLA_GRO_MAX_SIZE = 58, + IFLA_TSO_MAX_SIZE = 59, + IFLA_TSO_MAX_SEGS = 60, + IFLA_ALLMULTI = 61, + IFLA_DEVLINK_PORT = 62, + IFLA_GSO_IPV4_MAX_SIZE = 63, + IFLA_GRO_IPV4_MAX_SIZE = 64, + IFLA_DPLL_PIN = 65, + IFLA_MAX_PACING_OFFLOAD_HORIZON = 66, + __IFLA_MAX = 67, +}; + +enum { + IFLA_INET6_UNSPEC = 0, + IFLA_INET6_FLAGS = 1, + IFLA_INET6_CONF = 2, + IFLA_INET6_STATS = 3, + IFLA_INET6_MCAST = 4, + IFLA_INET6_CACHEINFO = 5, + IFLA_INET6_ICMP6STATS = 6, + IFLA_INET6_TOKEN = 7, + IFLA_INET6_ADDR_GEN_MODE = 8, + IFLA_INET6_RA_MTU = 9, + __IFLA_INET6_MAX = 10, +}; + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64 = 0, + IN6_ADDR_GEN_MODE_NONE = 1, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, + IN6_ADDR_GEN_MODE_RANDOM = 3, +}; + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; + +enum { + IFA_UNSPEC = 0, + IFA_ADDRESS = 1, + IFA_LOCAL = 2, + IFA_LABEL = 3, + IFA_BROADCAST = 4, + IFA_ANYCAST = 5, + IFA_CACHEINFO = 6, + IFA_MULTICAST = 7, + IFA_FLAGS = 8, + IFA_RT_PRIORITY = 9, + IFA_TARGET_NETNSID = 10, + IFA_PROTO = 11, + __IFA_MAX = 12, +}; + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; + +enum { + RTM_BASE = 16, + RTM_NEWLINK = 16, + RTM_DELLINK = 17, + RTM_GETLINK = 18, + RTM_SETLINK = 19, + RTM_NEWADDR = 20, + RTM_DELADDR = 21, + RTM_GETADDR = 22, + RTM_NEWROUTE = 24, + RTM_DELROUTE = 25, + RTM_GETROUTE = 26, + RTM_NEWNEIGH = 28, + RTM_DELNEIGH = 29, + RTM_GETNEIGH = 30, + RTM_NEWRULE = 32, + RTM_DELRULE = 33, + RTM_GETRULE = 34, + RTM_NEWQDISC = 36, + RTM_DELQDISC = 37, + RTM_GETQDISC = 38, + RTM_NEWTCLASS = 40, + RTM_DELTCLASS = 41, + RTM_GETTCLASS = 42, + RTM_NEWTFILTER = 44, + RTM_DELTFILTER = 45, + RTM_GETTFILTER = 46, + RTM_NEWACTION = 48, + RTM_DELACTION = 49, + RTM_GETACTION = 50, + RTM_NEWPREFIX = 52, + RTM_GETMULTICAST = 58, + RTM_GETANYCAST = 62, + RTM_NEWNEIGHTBL = 64, + RTM_GETNEIGHTBL = 66, + RTM_SETNEIGHTBL = 67, + RTM_NEWNDUSEROPT = 68, + RTM_NEWADDRLABEL = 72, + RTM_DELADDRLABEL = 73, + RTM_GETADDRLABEL = 74, + RTM_GETDCB = 78, + RTM_SETDCB = 79, + RTM_NEWNETCONF = 80, + RTM_DELNETCONF = 81, + RTM_GETNETCONF = 82, + RTM_NEWMDB = 84, + RTM_DELMDB = 85, + RTM_GETMDB = 86, + RTM_NEWNSID = 88, + RTM_DELNSID = 89, + RTM_GETNSID = 90, + RTM_NEWSTATS = 92, + RTM_GETSTATS = 94, + RTM_SETSTATS = 95, + RTM_NEWCACHEREPORT = 96, + RTM_NEWCHAIN = 100, + RTM_DELCHAIN = 101, + RTM_GETCHAIN = 102, + RTM_NEWNEXTHOP = 104, + RTM_DELNEXTHOP = 105, + RTM_GETNEXTHOP = 106, + RTM_NEWLINKPROP = 108, + RTM_DELLINKPROP = 109, + RTM_GETLINKPROP = 110, + RTM_NEWVLAN = 112, + RTM_DELVLAN = 113, + RTM_GETVLAN = 114, + RTM_NEWNEXTHOPBUCKET = 116, + RTM_DELNEXTHOPBUCKET = 117, + RTM_GETNEXTHOPBUCKET = 118, + RTM_NEWTUNNEL = 120, + RTM_DELTUNNEL = 121, + RTM_GETTUNNEL = 122, + __RTM_MAX = 123, +}; + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + short unsigned int ifi_type; + int ifi_index; + unsigned int ifi_flags; + unsigned int ifi_change; +}; + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + short unsigned int prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum { + PREFIX_UNSPEC = 0, + PREFIX_ADDRESS = 1, + PREFIX_CACHEINFO = 2, + __PREFIX_MAX = 3, +}; + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + +enum rtnetlink_groups { + RTNLGRP_NONE = 0, + RTNLGRP_LINK = 1, + RTNLGRP_NOTIFY = 2, + RTNLGRP_NEIGH = 3, + RTNLGRP_TC = 4, + RTNLGRP_IPV4_IFADDR = 5, + RTNLGRP_IPV4_MROUTE = 6, + RTNLGRP_IPV4_ROUTE = 7, + RTNLGRP_IPV4_RULE = 8, + RTNLGRP_IPV6_IFADDR = 9, + RTNLGRP_IPV6_MROUTE = 10, + RTNLGRP_IPV6_ROUTE = 11, + RTNLGRP_IPV6_IFINFO = 12, + RTNLGRP_DECnet_IFADDR = 13, + RTNLGRP_NOP2 = 14, + RTNLGRP_DECnet_ROUTE = 15, + RTNLGRP_DECnet_RULE = 16, + RTNLGRP_NOP4 = 17, + RTNLGRP_IPV6_PREFIX = 18, + RTNLGRP_IPV6_RULE = 19, + RTNLGRP_ND_USEROPT = 20, + RTNLGRP_PHONET_IFADDR = 21, + RTNLGRP_PHONET_ROUTE = 22, + RTNLGRP_DCB = 23, + RTNLGRP_IPV4_NETCONF = 24, + RTNLGRP_IPV6_NETCONF = 25, + RTNLGRP_MDB = 26, + RTNLGRP_MPLS_ROUTE = 27, + RTNLGRP_NSID = 28, + RTNLGRP_MPLS_NETCONF = 29, + RTNLGRP_IPV4_MROUTE_R = 30, + RTNLGRP_IPV6_MROUTE_R = 31, + RTNLGRP_NEXTHOP = 32, + RTNLGRP_BRVLAN = 33, + RTNLGRP_MCTP_IFADDR = 34, + RTNLGRP_TUNNEL = 35, + RTNLGRP_STATS = 36, + __RTNLGRP_MAX = 37, +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT = 1, + DEVCONF_MTU6 = 2, + DEVCONF_ACCEPT_RA = 3, + DEVCONF_ACCEPT_REDIRECTS = 4, + DEVCONF_AUTOCONF = 5, + DEVCONF_DAD_TRANSMITS = 6, + DEVCONF_RTR_SOLICITS = 7, + DEVCONF_RTR_SOLICIT_INTERVAL = 8, + DEVCONF_RTR_SOLICIT_DELAY = 9, + DEVCONF_USE_TEMPADDR = 10, + DEVCONF_TEMP_VALID_LFT = 11, + DEVCONF_TEMP_PREFERED_LFT = 12, + DEVCONF_REGEN_MAX_RETRY = 13, + DEVCONF_MAX_DESYNC_FACTOR = 14, + DEVCONF_MAX_ADDRESSES = 15, + DEVCONF_FORCE_MLD_VERSION = 16, + DEVCONF_ACCEPT_RA_DEFRTR = 17, + DEVCONF_ACCEPT_RA_PINFO = 18, + DEVCONF_ACCEPT_RA_RTR_PREF = 19, + DEVCONF_RTR_PROBE_INTERVAL = 20, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, + DEVCONF_PROXY_NDP = 22, + DEVCONF_OPTIMISTIC_DAD = 23, + DEVCONF_ACCEPT_SOURCE_ROUTE = 24, + DEVCONF_MC_FORWARDING = 25, + DEVCONF_DISABLE_IPV6 = 26, + DEVCONF_ACCEPT_DAD = 27, + DEVCONF_FORCE_TLLAO = 28, + DEVCONF_NDISC_NOTIFY = 29, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, + DEVCONF_SUPPRESS_FRAG_NDISC = 32, + DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, + DEVCONF_USE_OPTIMISTIC = 34, + DEVCONF_ACCEPT_RA_MTU = 35, + DEVCONF_STABLE_SECRET = 36, + DEVCONF_USE_OIF_ADDRS_ONLY = 37, + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, + DEVCONF_DROP_UNSOLICITED_NA = 41, + DEVCONF_KEEP_ADDR_ON_DOWN = 42, + DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, + DEVCONF_SEG6_ENABLED = 44, + DEVCONF_SEG6_REQUIRE_HMAC = 45, + DEVCONF_ENHANCED_DAD = 46, + DEVCONF_ADDR_GEN_MODE = 47, + DEVCONF_DISABLE_POLICY = 48, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, + DEVCONF_NDISC_TCLASS = 50, + DEVCONF_RPL_SEG_ENABLED = 51, + DEVCONF_RA_DEFRTR_METRIC = 52, + DEVCONF_IOAM6_ENABLED = 53, + DEVCONF_IOAM6_ID = 54, + DEVCONF_IOAM6_ID_WIDE = 55, + DEVCONF_NDISC_EVICT_NOCARRIER = 56, + DEVCONF_ACCEPT_UNTRACKED_NA = 57, + DEVCONF_ACCEPT_RA_MIN_LFT = 58, + DEVCONF_MAX = 59, +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); + +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, + RTNL_FLAG_BULK_DEL_SUPPORTED = 2, + RTNL_FLAG_DUMP_UNLOCKED = 4, + RTNL_FLAG_DUMP_SPLIT_NLM_DONE = 8, +}; + +struct rtnl_msg_handler { + struct module *owner; + int protocol; + int msgtype; + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + int flags; +}; + +struct rtnl_af_ops { + struct list_head list; + struct srcu_struct srcu; + int family; + int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); + size_t (*get_link_af_size)(const struct net_device *, u32); + int (*validate_link_af)(const struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*set_link_af)(struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*fill_stats_af)(struct sk_buff *, const struct net_device *); + size_t (*get_stats_af_size)(const struct net_device *); +}; + +enum { + INET6_IFADDR_STATE_PREDAD = 0, + INET6_IFADDR_STATE_DAD = 1, + INET6_IFADDR_STATE_POSTDAD = 2, + INET6_IFADDR_STATE_ERRDAD = 3, + INET6_IFADDR_STATE_DEAD = 4, +}; + +struct in6_validator_info { + struct in6_addr i6vi_addr; + struct inet6_dev *i6vi_dev; + struct netlink_ext_ack *extack; +}; + +struct ifa6_config { + const struct in6_addr *pfx; + unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; + u32 rt_priority; + u32 ifa_flags; + u32 preferred_lft; + u32 valid_lft; + u16 scope; +}; + +union fwnet_hwaddr { + u8 u[16]; + struct { + __be64 uniq_id; + u8 max_rec; + u8 sspd; + u8 fifo[6]; + } uc; +}; + +struct netconfmsg { + __u8 ncm_family; +}; + +enum { + NETCONFA_UNSPEC = 0, + NETCONFA_IFINDEX = 1, + NETCONFA_FORWARDING = 2, + NETCONFA_RP_FILTER = 3, + NETCONFA_MC_FORWARDING = 4, + NETCONFA_PROXY_NEIGH = 5, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, + NETCONFA_INPUT = 7, + NETCONFA_BC_FORWARDING = 8, + __NETCONFA_MAX = 9, +}; + +enum cleanup_prefix_rt_t { + CLEANUP_PREFIX_RT_NOP = 0, + CLEANUP_PREFIX_RT_DEL = 1, + CLEANUP_PREFIX_RT_EXPIRE = 2, +}; + +enum { + IPV6_SADDR_RULE_INIT = 0, + IPV6_SADDR_RULE_LOCAL = 1, + IPV6_SADDR_RULE_SCOPE = 2, + IPV6_SADDR_RULE_PREFERRED = 3, + IPV6_SADDR_RULE_OIF = 4, + IPV6_SADDR_RULE_LABEL = 5, + IPV6_SADDR_RULE_PRIVACY = 6, + IPV6_SADDR_RULE_ORCHID = 7, + IPV6_SADDR_RULE_PREFIX = 8, + IPV6_SADDR_RULE_MAX = 9, +}; + +struct ipv6_saddr_score { + int rule; + int addr_type; + struct inet6_ifaddr *ifa; + long unsigned int scorebits[1]; + int scopedist; + int matchlen; +}; + +struct ipv6_saddr_dst { + const struct in6_addr *addr; + int ifindex; + int scope; + int label; + unsigned int prefs; +}; + +struct if6_iter_state { + struct seq_net_private p; + int bucket; + int offset; +}; + +enum addr_type_t { + UNICAST_ADDR = 0, + MULTICAST_ADDR = 1, + ANYCAST_ADDR = 2, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; +}; + +enum { + DAD_PROCESS = 0, + DAD_BEGIN = 1, + DAD_ABORT = 2, +}; + +struct devlink; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET = 0, + DEVLINK_PORT_TYPE_AUTO = 1, + DEVLINK_PORT_TYPE_ETH = 2, + DEVLINK_PORT_TYPE_IB = 3, +}; + +struct ib_device; + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, + DEVLINK_PORT_FLAVOUR_CPU = 1, + DEVLINK_PORT_FLAVOUR_DSA = 2, + DEVLINK_PORT_FLAVOUR_PCI_PF = 3, + DEVLINK_PORT_FLAVOUR_PCI_VF = 4, + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, + DEVLINK_PORT_FLAVOUR_UNUSED = 6, + DEVLINK_PORT_FLAVOUR_PCI_SF = 7, +}; + +struct devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u32 controller; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_pci_vf_attrs { + u32 controller; + u16 pf; + u16 vf; + u8 external: 1; +}; + +struct devlink_port_pci_sf_attrs { + u32 controller; + u32 sf; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_attrs { + u8 split: 1; + u8 splittable: 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + struct devlink_port_pci_sf_attrs pci_sf; + }; +}; + +struct devlink_linecard; + +struct devlink_port_ops; + +struct devlink_rate; + +struct devlink_port { + struct list_head list; + struct list_head region_list; + struct devlink *devlink; + const struct devlink_port_ops *ops; + unsigned int index; + spinlock_t type_lock; + enum devlink_port_type type; + enum devlink_port_type desired_type; + union { + struct { + struct net_device *netdev; + int ifindex; + char ifname[16]; + } type_eth; + struct { + struct ib_device *ibdev; + } type_ib; + }; + struct devlink_port_attrs attrs; + u8 attrs_set: 1; + u8 switch_port: 1; + u8 registered: 1; + u8 initialized: 1; + struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct devlink_rate *devlink_rate; + struct devlink_linecard *linecard; + u32 rel_index; +}; + +enum devlink_rate_type { + DEVLINK_RATE_TYPE_LEAF = 0, + DEVLINK_RATE_TYPE_NODE = 1, +}; + +enum devlink_port_fn_state { + DEVLINK_PORT_FN_STATE_INACTIVE = 0, + DEVLINK_PORT_FN_STATE_ACTIVE = 1, +}; + +enum devlink_port_fn_opstate { + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, +}; + +struct devlink_rate { + struct list_head list; + enum devlink_rate_type type; + struct devlink *devlink; + void *priv; + long: 32; + u64 tx_share; + u64 tx_max; + struct devlink_rate *parent; + union { + struct devlink_port *devlink_port; + struct { + char *name; + refcount_t refcnt; + }; + }; + u32 tx_priority; + u32 tx_weight; + long: 32; +}; + +struct devlink_port_ops { + int (*port_split)(struct devlink *, struct devlink_port *, unsigned int, struct netlink_ext_ack *); + int (*port_unsplit)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_type_set)(struct devlink_port *, enum devlink_port_type); + int (*port_del)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_get)(struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_set)(struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); + int (*port_fn_roce_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_roce_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_migratable_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_migratable_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_state_get)(struct devlink_port *, enum devlink_port_fn_state *, enum devlink_port_fn_opstate *, struct netlink_ext_ack *); + int (*port_fn_state_set)(struct devlink_port *, enum devlink_port_fn_state, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_get)(struct devlink_port *, u32 *, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_set)(struct devlink_port *, u32, struct netlink_ext_ack *); +}; + +enum net_bridge_opts { + BROPT_VLAN_ENABLED = 0, + BROPT_VLAN_STATS_ENABLED = 1, + BROPT_NF_CALL_IPTABLES = 2, + BROPT_NF_CALL_IP6TABLES = 3, + BROPT_NF_CALL_ARPTABLES = 4, + BROPT_GROUP_ADDR_SET = 5, + BROPT_MULTICAST_ENABLED = 6, + BROPT_MULTICAST_QUERY_USE_IFADDR = 7, + BROPT_MULTICAST_STATS_ENABLED = 8, + BROPT_HAS_IPV6_ADDR = 9, + BROPT_NEIGH_SUPPRESS_ENABLED = 10, + BROPT_MTU_SET_BY_USER = 11, + BROPT_VLAN_STATS_PER_PORT = 12, + BROPT_NO_LL_LEARN = 13, + BROPT_VLAN_BRIDGE_BINDING = 14, + BROPT_MCAST_VLAN_SNOOPING_ENABLED = 15, + BROPT_MST_ENABLED = 16, +}; + +enum mac80211_tx_control_flags { + IEEE80211_TX_CTRL_PORT_CTRL_PROTO = 1, + IEEE80211_TX_CTRL_PS_RESPONSE = 2, + IEEE80211_TX_CTRL_RATE_INJECT = 4, + IEEE80211_TX_CTRL_AMSDU = 8, + IEEE80211_TX_CTRL_FAST_XMIT = 16, + IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = 32, + IEEE80211_TX_INTCFL_NEED_TXPROCESSING = 64, + IEEE80211_TX_CTRL_NO_SEQNO = 128, + IEEE80211_TX_CTRL_DONT_REORDER = 256, + IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = 512, + IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = 1024, + IEEE80211_TX_CTRL_MLO_LINK = 4026531840, +}; + +struct ieee80211_rate_status { + struct rate_info rate_idx; + u8 try_count; + u8 tx_power_idx; +}; + +struct ieee80211_tx_status { + struct ieee80211_sta *sta; + struct ieee80211_tx_info *info; + struct sk_buff *skb; + struct ieee80211_rate_status *rates; + ktime_t ack_hwtstamp; + u8 n_rates; + struct list_head *free_list; +}; + +struct ieee80211_tx_rate_control { + struct ieee80211_hw *hw; + struct ieee80211_supported_band *sband; + struct ieee80211_bss_conf *bss_conf; + struct sk_buff *skb; + struct ieee80211_tx_rate reported_rate; + bool rts; + bool short_preamble; + u32 rate_idx_mask; + u8 *rate_idx_mcs_mask; + bool bss; +}; + +enum rate_control_capabilities { + RATE_CTRL_CAPA_VHT_EXT_NSS_BW = 1, + RATE_CTRL_CAPA_AMPDU_TRIGGER = 2, +}; + +struct rate_control_ops { + long unsigned int capa; + const char *name; + void * (*alloc)(struct ieee80211_hw *); + void (*add_debugfs)(struct ieee80211_hw *, void *, struct dentry *); + void (*free)(void *); + void * (*alloc_sta)(void *, struct ieee80211_sta *, gfp_t); + void (*rate_init)(void *, struct ieee80211_supported_band *, struct cfg80211_chan_def *, struct ieee80211_sta *, void *); + void (*rate_update)(void *, struct ieee80211_supported_band *, struct cfg80211_chan_def *, struct ieee80211_sta *, void *, u32); + void (*free_sta)(void *, struct ieee80211_sta *, void *); + void (*tx_status_ext)(void *, struct ieee80211_supported_band *, void *, struct ieee80211_tx_status *); + void (*tx_status)(void *, struct ieee80211_supported_band *, struct ieee80211_sta *, void *, struct sk_buff *); + void (*get_rate)(void *, struct ieee80211_sta *, void *, struct ieee80211_tx_rate_control *); + void (*add_sta_debugfs)(void *, void *, struct dentry *); + u32 (*get_expected_throughput)(void *); +}; + +enum ieee80211_sta_info_flags { + WLAN_STA_AUTH = 0, + WLAN_STA_ASSOC = 1, + WLAN_STA_PS_STA = 2, + WLAN_STA_AUTHORIZED = 3, + WLAN_STA_SHORT_PREAMBLE = 4, + WLAN_STA_WDS = 5, + WLAN_STA_CLEAR_PS_FILT = 6, + WLAN_STA_MFP = 7, + WLAN_STA_BLOCK_BA = 8, + WLAN_STA_PS_DRIVER = 9, + WLAN_STA_PSPOLL = 10, + WLAN_STA_TDLS_PEER = 11, + WLAN_STA_TDLS_PEER_AUTH = 12, + WLAN_STA_TDLS_INITIATOR = 13, + WLAN_STA_TDLS_CHAN_SWITCH = 14, + WLAN_STA_TDLS_OFF_CHANNEL = 15, + WLAN_STA_TDLS_WIDER_BW = 16, + WLAN_STA_UAPSD = 17, + WLAN_STA_SP = 18, + WLAN_STA_4ADDR_EVENT = 19, + WLAN_STA_INSERTED = 20, + WLAN_STA_RATE_CONTROL = 21, + WLAN_STA_TOFFSET_KNOWN = 22, + WLAN_STA_MPSP_OWNER = 23, + WLAN_STA_MPSP_RECIPIENT = 24, + WLAN_STA_PS_DELIVER = 25, + WLAN_STA_USES_ENCRYPTION = 26, + WLAN_STA_DECAP_OFFLOAD = 27, + NUM_WLAN_STA_FLAGS = 28, +}; + +struct rate_control_ref { + const struct rate_control_ops *ops; + void *priv; +}; + +struct minstrel_priv { + struct ieee80211_hw *hw; + unsigned int cw_min; + unsigned int cw_max; + unsigned int max_retry; + unsigned int segment_size; + unsigned int update_interval; + u8 cck_rates[4]; + u8 ofdm_rates[48]; +}; + +struct mcs_group { + u16 flags; + u8 streams; + u8 shift; + u8 bw; + u16 duration[10]; +}; + +struct minstrel_rate_stats { + u16 attempts; + u16 last_attempts; + u16 success; + u16 last_success; + u32 att_hist; + u32 succ_hist; + u16 prob_avg; + u16 prob_avg_1; + u8 retry_count; + u8 retry_count_rtscts; + bool retry_updated; +}; + +enum minstrel_sample_type { + MINSTREL_SAMPLE_TYPE_INC = 0, + MINSTREL_SAMPLE_TYPE_JUMP = 1, + MINSTREL_SAMPLE_TYPE_SLOW = 2, + __MINSTREL_SAMPLE_TYPE_MAX = 3, +}; + +struct minstrel_mcs_group_data { + u8 index; + u8 column; + u16 max_group_tp_rate[4]; + u16 max_group_prob_rate; + struct minstrel_rate_stats rates[10]; +}; + +struct minstrel_sample_category { + u8 sample_group; + u16 sample_rates[5]; + u16 cur_sample_rates[5]; +}; + +struct minstrel_ht_sta { + struct ieee80211_sta *sta; + unsigned int ampdu_len; + unsigned int ampdu_packets; + unsigned int avg_ampdu_len; + u16 max_tp_rate[4]; + u16 max_prob_rate; + long unsigned int last_stats_update; + unsigned int overhead; + unsigned int overhead_rtscts; + unsigned int overhead_legacy; + unsigned int overhead_legacy_rtscts; + unsigned int total_packets; + unsigned int sample_packets; + u32 tx_flags; + bool use_short_preamble; + u8 band; + u8 sample_seq; + u16 sample_rate; + long unsigned int sample_time; + struct minstrel_sample_category sample[3]; + u16 supported[42]; + struct minstrel_mcs_group_data groups[42]; +}; + +struct posix_acl_entry { + short int e_tag; + short unsigned int e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + unsigned int a_count; + struct callback_head a_rcu; + struct posix_acl_entry a_entries[0]; +}; + +enum { + Root_NFS = 255, + Root_CIFS = 254, + Root_Generic = 253, + Root_RAM0 = 1048576, +}; + +enum rpc_display_format_t { + RPC_DISPLAY_ADDR = 0, + RPC_DISPLAY_PORT = 1, + RPC_DISPLAY_PROTO = 2, + RPC_DISPLAY_HEX_ADDR = 3, + RPC_DISPLAY_HEX_PORT = 4, + RPC_DISPLAY_NETID = 5, + RPC_DISPLAY_MAX = 6, +}; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_CLONE = 71, + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, +}; + +struct mpu_rgn { + union { + u32 drbar; + u32 prbar; + }; + union { + u32 drsr; + u32 prlar; + }; + union { + u32 dracr; + u32 unused; + }; +}; + +struct mpu_rgn_info { + unsigned int used; + struct mpu_rgn rgns[16]; +}; + +struct secondary_data { + union { + struct mpu_rgn_info *mpu_rgn_info; + u64 pgdir; + }; + long unsigned int swapper_pg_dir; + void *stack; + struct task_struct *task; + long: 32; +}; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED = 0, + CPUFREQ_TABLE_SORTED_ASCENDING = 1, + CPUFREQ_TABLE_SORTED_DESCENDING = 2, +}; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct cpufreq_stats; + +struct cpufreq_governor; + +struct cpufreq_frequency_table; + +struct cpufreq_policy { + cpumask_var_t cpus; + cpumask_var_t related_cpus; + cpumask_var_t real_cpus; + unsigned int shared_type; + unsigned int cpu; + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo; + unsigned int min; + unsigned int max; + unsigned int cur; + unsigned int suspend_freq; + unsigned int policy; + unsigned int last_policy; + struct cpufreq_governor *governor; + void *governor_data; + char last_governor[16]; + struct work_struct update; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + struct rw_semaphore rwsem; + bool fast_switch_possible; + bool fast_switch_enabled; + bool strict_target; + bool efficiencies_available; + unsigned int transition_delay_us; + bool dvfs_possible_from_any_cpu; + bool boost_enabled; + unsigned int cached_target_freq; + unsigned int cached_resolved_idx; + bool transition_ongoing; + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; + struct cpufreq_stats *stats; + void *driver_data; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct cpufreq_governor { + char name[16]; + int (*init)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*start)(struct cpufreq_policy *); + void (*stop)(struct cpufreq_policy *); + void (*limits)(struct cpufreq_policy *); + ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); + int (*store_setspeed)(struct cpufreq_policy *, unsigned int); + struct list_head governor_list; + struct module *owner; + u8 flags; +}; + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; + unsigned int frequency; +}; + +struct cpufreq_freqs { + struct cpufreq_policy *policy; + unsigned int old; + unsigned int new; + u8 flags; +}; + +struct cpuinfo_arm { + u32 cpuid; + unsigned int loops_per_jiffy; +}; + +enum ipi_msg_type { + IPI_WAKEUP = 0, + IPI_TIMER = 1, + IPI_RESCHEDULE = 2, + IPI_CALL_FUNC = 3, + IPI_CPU_STOP = 4, + IPI_IRQ_WORK = 5, + IPI_COMPLETION = 6, + NR_IPI = 7, + IPI_CPU_BACKTRACE = 7, + MAX_IPI = 8, +}; + +struct optimistic_spin_node { + struct optimistic_spin_node *next; + struct optimistic_spin_node *prev; + int locked; + int cpu; +}; + +struct swsusp_info { + struct new_utsname uts; + u32 version_code; + long unsigned int num_physpages; + int cpus; + long unsigned int image_pages; + long unsigned int pages; + long unsigned int size; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct swap_map_page { + sector_t entries[511]; + sector_t next_swap; +}; + +struct swap_map_page_list { + struct swap_map_page *map; + struct swap_map_page_list *next; +}; + +struct swap_map_handle { + struct swap_map_page *cur; + struct swap_map_page_list *maps; + sector_t cur_swap; + sector_t first_sector; + unsigned int k; + long unsigned int reqd_free_pages; + u32 crc32; + long: 32; +}; + +struct swsusp_header { + char reserved[4056]; + u32 hw_sig; + u32 crc32; + sector_t image; + unsigned int flags; + char orig_sig[10]; + char sig[10]; +}; + +struct swsusp_extent { + struct rb_node node; + long unsigned int start; + long unsigned int end; +}; + +struct hib_bio_batch { + atomic_t count; + wait_queue_head_t wait; + blk_status_t error; + long: 32; + struct blk_plug plug; +}; + +struct crc_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + unsigned int run_threads; + wait_queue_head_t go; + wait_queue_head_t done; + u32 *crc32; + size_t *unc_len[3]; + unsigned char *unc[3]; +}; + +struct cmp_data { + struct task_struct *thr; + struct crypto_comp *cc; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; +}; + +struct dec_data { + struct task_struct *thr; + struct crypto_comp *cc; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; +}; + +struct msi_dev_domain { + struct xarray store; + struct irq_domain *domain; +}; + +struct msi_device_data { + long unsigned int properties; + struct mutex mutex; + struct msi_dev_domain __domains[1]; + long unsigned int __iter_idx; +}; + +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY = 1, + IRQ_SET_MASK_OK_DONE = 2, +}; + +struct arch_msi_msg_addr_lo { + u32 address_lo; +}; + +typedef struct arch_msi_msg_addr_lo arch_msi_msg_addr_lo_t; + +struct arch_msi_msg_addr_hi { + u32 address_hi; +}; + +typedef struct arch_msi_msg_addr_hi arch_msi_msg_addr_hi_t; + +struct arch_msi_msg_data { + u32 data; +}; + +typedef struct arch_msi_msg_data arch_msi_msg_data_t; + +struct msi_msg { + union { + u32 address_lo; + arch_msi_msg_addr_lo_t arch_addr_lo; + }; + union { + u32 address_hi; + arch_msi_msg_addr_hi_t arch_addr_hi; + }; + union { + u32 data; + arch_msi_msg_data_t arch_data; + }; +}; + +struct pci_msi_desc { + union { + u32 msi_mask; + u32 msix_ctrl; + }; + struct { + u8 is_msix: 1; + u8 multiple: 3; + u8 multi_cap: 3; + u8 can_mask: 1; + u8 is_64: 1; + u8 is_virtual: 1; + unsigned int default_irq; + } msi_attrib; + union { + u8 mask_pos; + void *mask_base; + }; +}; + +union msi_domain_cookie { + u64 value; + void *ptr; + void *iobase; +}; + +union msi_instance_cookie { + u64 value; + void *ptr; +}; + +struct msi_desc_data { + union msi_domain_cookie dcookie; + union msi_instance_cookie icookie; +}; + +struct irq_affinity_desc; + +struct msi_desc { + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct irq_affinity_desc *affinity; + struct device_attribute *sysfs_attrs; + void (*write_msi_msg)(struct msi_desc *, void *); + void *write_msi_msg_data; + u16 msi_index; + long: 32; + union { + struct pci_msi_desc pci; + struct msi_desc_data data; + }; +}; + +enum { + IRQCHIP_SET_TYPE_MASKED = 1, + IRQCHIP_EOI_IF_HANDLED = 2, + IRQCHIP_MASK_ON_SUSPEND = 4, + IRQCHIP_ONOFFLINE_ENABLED = 8, + IRQCHIP_SKIP_SET_WAKE = 16, + IRQCHIP_ONESHOT_SAFE = 32, + IRQCHIP_EOI_THREADED = 64, + IRQCHIP_SUPPORTS_LEVEL_MSI = 128, + IRQCHIP_SUPPORTS_NMI = 256, + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, + IRQCHIP_AFFINITY_PRE_STARTUP = 1024, + IRQCHIP_IMMUTABLE = 2048, +}; + +enum { + IRQ_DOMAIN_FLAG_HIERARCHY = 1, + IRQ_DOMAIN_NAME_ALLOCATED = 2, + IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, + IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, + IRQ_DOMAIN_FLAG_MSI = 16, + IRQ_DOMAIN_FLAG_ISOLATED_MSI = 32, + IRQ_DOMAIN_FLAG_NO_MAP = 64, + IRQ_DOMAIN_FLAG_MSI_PARENT = 256, + IRQ_DOMAIN_FLAG_MSI_DEVICE = 512, + IRQ_DOMAIN_FLAG_DESTROY_GC = 1024, + IRQ_DOMAIN_FLAG_NONCORE = 65536, +}; + +enum { + IRQCHIP_FWNODE_REAL = 0, + IRQCHIP_FWNODE_NAMED = 1, + IRQCHIP_FWNODE_NAMED_ID = 2, +}; + +enum msi_domain_ids { + MSI_DEFAULT_DOMAIN = 0, + MSI_MAX_DEVICE_IRQDOMAINS = 1, +}; + +struct msi_map { + int index; + int virq; +}; + +struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + long unsigned int flags; + union { + long unsigned int ul; + void *ptr; + } scratchpad[2]; +}; + +typedef struct msi_alloc_info msi_alloc_info_t; + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed: 1; +}; + +enum msi_desc_filter { + MSI_DESC_ALL = 0, + MSI_DESC_NOTASSOCIATED = 1, + MSI_DESC_ASSOCIATED = 2, +}; + +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); + int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); + void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); + int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); + void (*prepare_desc)(struct irq_domain *, msi_alloc_info_t *, struct msi_desc *); + void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); + int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); + void (*domain_free_irqs)(struct irq_domain *, struct device *); + void (*msi_post_free)(struct irq_domain *, struct device *); + int (*msi_translate)(struct irq_domain *, struct irq_fwspec *, irq_hw_number_t *, unsigned int *); +}; + +struct msi_domain_info { + u32 flags; + enum irq_domain_bus_token bus_token; + unsigned int hwsize; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +struct msi_domain_template { + char name[48]; + struct irq_chip chip; + struct msi_domain_ops ops; + struct msi_domain_info info; +}; + +enum { + MSI_FLAG_USE_DEF_DOM_OPS = 1, + MSI_FLAG_USE_DEF_CHIP_OPS = 2, + MSI_FLAG_ACTIVATE_EARLY = 4, + MSI_FLAG_MUST_REACTIVATE = 8, + MSI_FLAG_DEV_SYSFS = 16, + MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = 32, + MSI_FLAG_FREE_MSI_DESCS = 64, + MSI_FLAG_USE_DEV_FWNODE = 128, + MSI_FLAG_PARENT_PM_DEV = 256, + MSI_FLAG_PCI_MSI_MASK_PARENT = 512, + MSI_GENERIC_FLAGS_MASK = 65535, + MSI_DOMAIN_FLAGS_MASK = 4294901760, + MSI_FLAG_MULTI_PCI_MSI = 65536, + MSI_FLAG_PCI_MSIX = 131072, + MSI_FLAG_LEVEL_CAPABLE = 262144, + MSI_FLAG_MSIX_CONTIGUOUS = 524288, + MSI_FLAG_PCI_MSIX_ALLOC_DYN = 1048576, + MSI_FLAG_NO_AFFINITY = 2097152, +}; + +struct msi_ctrl { + unsigned int domid; + unsigned int first; + unsigned int last; + unsigned int nirqs; +}; + +struct trace_bprintk_fmt { + struct list_head list; + const char *fmt; +}; + +struct btf_id_dtor_kfunc { + u32 btf_id; + u32 kfunc_btf_id; +}; + +struct bpf_cpumask { + cpumask_t cpumask; + refcount_t usage; +}; + +struct anon_vma_name { + struct kref kref; + char name[0]; +}; + +struct vma_iterator { + struct ma_state mas; +}; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); + void (*close)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +enum { + HUGETLB_SHMFS_INODE = 1, + HUGETLB_ANONHUGE_INODE = 2, +}; + +struct hstate {}; + +struct mempolicy {}; + +struct trace_event_raw_vm_unmapped_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int total_vm; + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + char __data[0]; +}; + +struct trace_event_raw_vma_mas_szero { + struct trace_entry ent; + struct maple_tree *mt; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_vma_store { + struct trace_entry ent; + struct maple_tree *mt; + struct vm_area_struct *vma; + long unsigned int vm_start; + long unsigned int vm_end; + char __data[0]; +}; + +struct trace_event_raw_exit_mmap { + struct trace_entry ent; + struct mm_struct *mm; + struct maple_tree *mt; + char __data[0]; +}; + +struct trace_event_data_offsets_vm_unmapped_area {}; + +struct trace_event_data_offsets_vma_mas_szero {}; + +struct trace_event_data_offsets_vma_store {}; + +struct trace_event_data_offsets_exit_mmap {}; + +typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *); + +typedef void (*btf_trace_vma_mas_szero)(void *, struct maple_tree *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_vma_store)(void *, struct maple_tree *, struct vm_area_struct *); + +typedef void (*btf_trace_exit_mmap)(void *, struct mm_struct *); + +enum vma_merge_state { + VMA_MERGE_START = 0, + VMA_MERGE_ERROR_NOMEM = 1, + VMA_MERGE_NOMERGE = 2, + VMA_MERGE_SUCCESS = 3, +}; + +enum vma_merge_flags { + VMG_FLAG_DEFAULT = 0, + VMG_FLAG_JUST_EXPAND = 1, +}; + +struct vma_merge_struct { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int pgoff; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vm_area_struct *vma; + long unsigned int start; + long unsigned int end; + long unsigned int flags; + struct file *file; + struct anon_vma *anon_vma; + struct mempolicy *policy; + struct vm_userfaultfd_ctx uffd_ctx; + struct anon_vma_name *anon_name; + enum vma_merge_flags merge_flags; + enum vma_merge_state state; +}; + +struct mmap_arg_struct { + long unsigned int addr; + long unsigned int len; + long unsigned int prot; + long unsigned int flags; + long unsigned int fd; + long unsigned int offset; +}; + +typedef struct { + void *lock; +} class_preempt_t; + +struct files_stat_struct { + long unsigned int nr_files; + long unsigned int nr_free_files; + long unsigned int max_files; +}; + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE = 0, + FSNOTIFY_EVENT_PATH = 1, + FSNOTIFY_EVENT_INODE = 2, + FSNOTIFY_EVENT_DENTRY = 3, + FSNOTIFY_EVENT_ERROR = 4, +}; + +struct backing_file { + struct file file; + union { + struct path user_path; + freeptr_t bf_freeptr; + }; +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + +enum wb_state { + WB_registered = 0, + WB_writeback_running = 1, + WB_has_dirty_io = 2, + WB_start_all = 3, +}; + +struct wb_writeback_work { + long int nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages: 1; + unsigned int for_kupdate: 1; + unsigned int range_cyclic: 1; + unsigned int for_background: 1; + unsigned int for_sync: 1; + unsigned int auto_free: 1; + enum wb_reason reason; + struct list_head list; + struct wb_completion *done; +}; + +struct trace_event_raw_writeback_folio_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_writeback_dirty_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_inode_foreign_history { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t cgroup_ino; + unsigned int history; + char __data[0]; +}; + +struct trace_event_raw_inode_switch_wbs { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t old_cgroup_ino; + ino_t new_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_track_foreign_dirty { + struct trace_entry ent; + char name[32]; + u64 bdi_id; + ino_t ino; + unsigned int memcg_id; + ino_t cgroup_ino; + ino_t page_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_flush_foreign { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + unsigned int frn_bdi_id; + unsigned int frn_memcg_id; + char __data[0]; +}; + +struct trace_event_raw_writeback_write_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + int sync_mode; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_work_class { + struct trace_entry ent; + char name[32]; + long int nr_pages; + dev_t sb_dev; + int sync_mode; + int for_kupdate; + int range_cyclic; + int for_background; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_pages_written { + struct trace_entry ent; + long int pages; + char __data[0]; +}; + +struct trace_event_raw_writeback_class { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_bdi_register { + struct trace_entry ent; + char name[32]; + char __data[0]; +}; + +struct trace_event_raw_wbc_class { + struct trace_entry ent; + char name[32]; + long int nr_to_write; + long int pages_skipped; + int sync_mode; + int for_kupdate; + int for_background; + int for_reclaim; + int range_cyclic; + long int range_start; + long int range_end; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_queue_io { + struct trace_entry ent; + char name[32]; + long unsigned int older; + long int age; + int moved; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_global_dirty_state { + struct trace_entry ent; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int background_thresh; + long unsigned int dirty_thresh; + long unsigned int dirty_limit; + long unsigned int nr_dirtied; + long unsigned int nr_written; + char __data[0]; +}; + +struct trace_event_raw_bdi_dirty_ratelimit { + struct trace_entry ent; + char bdi[32]; + long unsigned int write_bw; + long unsigned int avg_write_bw; + long unsigned int dirty_rate; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + long unsigned int balanced_dirty_ratelimit; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_balance_dirty_pages { + struct trace_entry ent; + char bdi[32]; + long unsigned int limit; + long unsigned int setpoint; + long unsigned int dirty; + long unsigned int bdi_setpoint; + long unsigned int bdi_dirty; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + unsigned int dirtied; + unsigned int dirtied_pause; + long unsigned int paused; + long int pause; + long unsigned int period; + long int think; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_sb_inodes_requeue { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_single_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + long unsigned int writeback_index; + long int nr_to_write; + long unsigned int wrote; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_inode_template { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int state; + __u16 mode; + long unsigned int dirtied_when; + char __data[0]; +}; + +struct trace_event_data_offsets_writeback_folio_template {}; + +struct trace_event_data_offsets_writeback_dirty_inode_template {}; + +struct trace_event_data_offsets_inode_foreign_history {}; + +struct trace_event_data_offsets_inode_switch_wbs {}; + +struct trace_event_data_offsets_track_foreign_dirty {}; + +struct trace_event_data_offsets_flush_foreign {}; + +struct trace_event_data_offsets_writeback_write_inode_template {}; + +struct trace_event_data_offsets_writeback_work_class {}; + +struct trace_event_data_offsets_writeback_pages_written {}; + +struct trace_event_data_offsets_writeback_class {}; + +struct trace_event_data_offsets_writeback_bdi_register {}; + +struct trace_event_data_offsets_wbc_class {}; + +struct trace_event_data_offsets_writeback_queue_io {}; + +struct trace_event_data_offsets_global_dirty_state {}; + +struct trace_event_data_offsets_bdi_dirty_ratelimit {}; + +struct trace_event_data_offsets_balance_dirty_pages {}; + +struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; + +struct trace_event_data_offsets_writeback_single_inode_template {}; + +struct trace_event_data_offsets_writeback_inode_template {}; + +typedef void (*btf_trace_writeback_dirty_folio)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_folio_wait_writeback)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); + +typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); + +typedef void (*btf_trace_track_foreign_dirty)(void *, struct folio *, struct bdi_writeback *); + +typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_pages_written)(void *, long int); + +typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); + +typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); + +typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); + +typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, long unsigned int, int); + +typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int); + +typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); + +typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); + +typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); + +typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); + +typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); + +typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); + +struct inode_switch_wbs_context { + struct rcu_work work; + struct bdi_writeback *new_wb; + struct inode *inodes[0]; +}; + +typedef u32 nlink_t; + +enum { + PROC_ENTRY_PERMANENT = 1, +}; + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +struct proc_dir_entry { + atomic_t in_use; + refcount_t refcnt; + struct list_head pde_openers; + spinlock_t pde_unload_lock; + struct completion *pde_unload_completion; + const struct inode_operations *proc_iops; + union { + const struct proc_ops *proc_ops; + const struct file_operations *proc_dir_ops; + }; + const struct dentry_operations *proc_dops; + union { + const struct seq_operations *seq_ops; + int (*single_show)(struct seq_file *, void *); + }; + proc_write_t write; + void *data; + unsigned int state_size; + unsigned int low_ino; + nlink_t nlink; + kuid_t uid; + kgid_t gid; + long: 32; + loff_t size; + struct proc_dir_entry *parent; + struct rb_root subdir; + struct rb_node subdir_node; + char *name; + umode_t mode; + u8 flags; + u8 namelen; + char inline_name[0]; + long: 32; +}; + +struct kernel_cpustat { + u64 cpustat[11]; +}; + +struct ext4_group_desc { + __le32 bg_block_bitmap_lo; + __le32 bg_inode_bitmap_lo; + __le32 bg_inode_table_lo; + __le16 bg_free_blocks_count_lo; + __le16 bg_free_inodes_count_lo; + __le16 bg_used_dirs_count_lo; + __le16 bg_flags; + __le32 bg_exclude_bitmap_lo; + __le16 bg_block_bitmap_csum_lo; + __le16 bg_inode_bitmap_csum_lo; + __le16 bg_itable_unused_lo; + __le16 bg_checksum; + __le32 bg_block_bitmap_hi; + __le32 bg_inode_bitmap_hi; + __le32 bg_inode_table_hi; + __le16 bg_free_blocks_count_hi; + __le16 bg_free_inodes_count_hi; + __le16 bg_used_dirs_count_hi; + __le16 bg_itable_unused_hi; + __le32 bg_exclude_bitmap_hi; + __le16 bg_block_bitmap_csum_hi; + __le16 bg_inode_bitmap_csum_hi; + __u32 bg_reserved; +}; + +struct ext4_locality_group { + struct mutex lg_mutex; + struct list_head lg_prealloc_list[10]; + spinlock_t lg_prealloc_lock; +}; + +typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); + +struct ext4_getfsmap_info { + struct ext4_fsmap_head *gfi_head; + ext4_fsmap_format_t gfi_formatter; + void *gfi_format_arg; + long: 32; + ext4_fsblk_t gfi_next_fsblk; + u32 gfi_dev; + ext4_group_t gfi_agno; + struct ext4_fsmap gfi_low; + struct ext4_fsmap gfi_high; + struct ext4_fsmap gfi_lastfree; + struct list_head gfi_meta_list; + bool gfi_last; + long: 32; +}; + +struct ext4_getfsmap_dev { + int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); + u32 gfd_dev; +}; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); + +typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); + +struct kstatfs { + long int f_type; + long int f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long int f_namelen; + long int f_frsize; + long int f_flags; + long int f_spare[4]; + long: 32; +}; + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +enum { + DIO_LOCKING = 1, + DIO_SKIP_HOLES = 2, +}; + +struct fat_boot_sector { + __u8 ignored[3]; + __u8 system_id[8]; + __u8 sector_size[2]; + __u8 sec_per_clus; + __le16 reserved; + __u8 fats; + __u8 dir_entries[2]; + __u8 sectors[2]; + __u8 media; + __le16 fat_length; + __le16 secs_track; + __le16 heads; + __le32 hidden; + __le32 total_sect; + union { + struct { + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat16; + struct { + __le32 length; + __le16 flags; + __u8 version[2]; + __le32 root_cluster; + __le16 info_sector; + __le16 backup_boot; + __le16 reserved2[6]; + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat32; + }; +}; + +struct fat_boot_fsinfo { + __le32 signature1; + __le32 reserved1[120]; + __le32 signature2; + __le32 free_clusters; + __le32 next_cluster; + __le32 reserved2[4]; +}; + +struct constant_table { + const char *name; + int value; +}; + +struct fat_bios_param_block { + u16 fat_sector_size; + u8 fat_sec_per_clus; + u16 fat_reserved; + u8 fat_fats; + u16 fat_dir_entries; + u16 fat_sectors; + u16 fat_fat_length; + u32 fat_total_sect; + u8 fat16_state; + u32 fat16_vol_id; + u32 fat32_length; + u32 fat32_root_cluster; + u16 fat32_info_sector; + u8 fat32_state; + u32 fat32_vol_id; +}; + +struct fat_floppy_defaults { + unsigned int nr_sectors; + unsigned int sec_per_clus; + unsigned int dir_entries; + unsigned int media; + unsigned int fat_length; +}; + +enum { + Opt_check = 0, + Opt_uid = 1, + Opt_gid = 2, + Opt_umask = 3, + Opt_dmask = 4, + Opt_fmask = 5, + Opt_allow_utime = 6, + Opt_codepage = 7, + Opt_usefree = 8, + Opt_nocase = 9, + Opt_quiet = 10, + Opt_showexec = 11, + Opt_debug = 12, + Opt_immutable = 13, + Opt_dots = 14, + Opt_dotsOK = 15, + Opt_charset = 16, + Opt_shortname = 17, + Opt_utf8 = 18, + Opt_utf8_bool = 19, + Opt_uni_xl = 20, + Opt_uni_xl_bool = 21, + Opt_nonumtail = 22, + Opt_nonumtail_bool = 23, + Opt_obsolete = 24, + Opt_flush = 25, + Opt_tz = 26, + Opt_rodir = 27, + Opt_errors = 28, + Opt_discard = 29, + Opt_nfs = 30, + Opt_nfs_enum = 31, + Opt_time_offset = 32, + Opt_dos1xfloppy = 33, +}; + +struct trace_print_flags_u64 { + long long unsigned int mask; + const char *name; + long: 32; +}; + +struct btrfs_ioctl_vol_args { + __s64 fd; + char name[4088]; +}; + +enum btrfs_dev_stat_values { + BTRFS_DEV_STAT_WRITE_ERRS = 0, + BTRFS_DEV_STAT_READ_ERRS = 1, + BTRFS_DEV_STAT_FLUSH_ERRS = 2, + BTRFS_DEV_STAT_CORRUPTION_ERRS = 3, + BTRFS_DEV_STAT_GENERATION_ERRS = 4, + BTRFS_DEV_STAT_VALUES_MAX = 5, +}; + +struct btrfs_dir_item { + struct btrfs_disk_key location; + __le64 transid; + __le16 data_len; + __le16 name_len; + __u8 type; +} __attribute__((packed)); + +enum { + IO_TREE_FS_PINNED_EXTENTS = 0, + IO_TREE_FS_EXCLUDED_EXTENTS = 1, + IO_TREE_BTREE_INODE_IO = 2, + IO_TREE_INODE_IO = 3, + IO_TREE_RELOC_BLOCKS = 4, + IO_TREE_TRANS_DIRTY_PAGES = 5, + IO_TREE_ROOT_DIRTY_LOG_PAGES = 6, + IO_TREE_INODE_FILE_EXTENT = 7, + IO_TREE_LOG_CSUM_RANGE = 8, + IO_TREE_SELFTEST = 9, + IO_TREE_DEVICE_ALLOC_STATE = 10, +}; + +struct btrfs_workqueue { + struct workqueue_struct *normal_wq; + struct btrfs_fs_info *fs_info; + struct list_head ordered_list; + spinlock_t list_lock; + atomic_t pending; + int limit_active; + int current_active; + int thresh; + unsigned int count; + spinlock_t thres_lock; +}; + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +enum { + BTRFS_ORDERED_REGULAR = 0, + BTRFS_ORDERED_NOCOW = 1, + BTRFS_ORDERED_PREALLOC = 2, + BTRFS_ORDERED_COMPRESSED = 3, + BTRFS_ORDERED_DIRECT = 4, + BTRFS_ORDERED_IO_DONE = 5, + BTRFS_ORDERED_COMPLETE = 6, + BTRFS_ORDERED_IOERR = 7, + BTRFS_ORDERED_TRUNCATED = 8, + BTRFS_ORDERED_LOGGED = 9, + BTRFS_ORDERED_LOGGED_CSUM = 10, + BTRFS_ORDERED_PENDING = 11, + BTRFS_ORDERED_ENCODED = 12, +}; + +enum { + BTRFS_STAT_CURR = 0, + BTRFS_STAT_PREV = 1, + BTRFS_STAT_NR_ENTRIES = 2, +}; + +struct extent_inode_elem; + +struct prelim_ref { + struct rb_node rbnode; + long: 32; + u64 root_id; + struct btrfs_key key_for_search; + u8 level; + int count; + struct extent_inode_elem *inode_list; + long: 32; + u64 parent; + u64 wanted_disk_byte; +}; + +struct btrfs_device_info { + struct btrfs_device *dev; + long: 32; + u64 dev_offset; + u64 max_avail; + u64 total_avail; +}; + +struct btrfs_raid_attr { + u8 sub_stripes; + u8 dev_stripes; + u8 devs_max; + u8 devs_min; + u8 tolerated_failures; + u8 devs_increment; + u8 ncopies; + u8 nparity; + u8 mindev_error; + const char raid_name[8]; + long: 32; + u64 bg_flag; +}; + +enum btrfs_flush_state { + FLUSH_DELAYED_ITEMS_NR = 1, + FLUSH_DELAYED_ITEMS = 2, + FLUSH_DELAYED_REFS_NR = 3, + FLUSH_DELAYED_REFS = 4, + FLUSH_DELALLOC = 5, + FLUSH_DELALLOC_WAIT = 6, + FLUSH_DELALLOC_FULL = 7, + ALLOC_CHUNK = 8, + ALLOC_CHUNK_FORCE = 9, + RUN_DELAYED_IPUTS = 10, + COMMIT_TRANS = 11, +}; + +enum btrfs_rbio_ops { + BTRFS_RBIO_WRITE = 0, + BTRFS_RBIO_READ_REBUILD = 1, + BTRFS_RBIO_PARITY_SCRUB = 2, +}; + +struct sector_ptr; + +struct btrfs_raid_bio { + struct btrfs_io_context *bioc; + struct list_head hash_list; + struct list_head stripe_cache; + struct work_struct work; + struct bio_list bio_list; + spinlock_t bio_list_lock; + struct list_head plug_list; + long unsigned int flags; + enum btrfs_rbio_ops operation; + u16 nr_pages; + u16 nr_sectors; + u8 nr_data; + u8 real_stripes; + u8 stripe_npages; + u8 stripe_nsectors; + u8 scrubp; + int bio_list_bytes; + refcount_t refs; + atomic_t stripes_pending; + wait_queue_head_t io_wait; + long unsigned int dbitmap; + long unsigned int finish_pbitmap; + struct page **stripe_pages; + struct sector_ptr *bio_sectors; + struct sector_ptr *stripe_sectors; + void **finish_pointers; + long unsigned int *error_bitmap; + u8 *csum_buf; + long unsigned int *csum_bitmap; +}; + +struct raid56_bio_trace_info { + u64 devid; + u32 offset; + u8 stripe_nr; +}; + +struct trace_event_raw_btrfs_transaction_commit { + struct trace_entry ent; + u8 fsid[16]; + u64 generation; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__inode { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 blocks; + u64 disk_i_size; + u64 generation; + u64 last_trans; + u64 logged_trans; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + u64 start; + u64 len; + u32 flags; + int refs; + char __data[0]; +}; + +struct trace_event_raw_btrfs_handle_em_exist { + struct trace_entry ent; + u8 fsid[16]; + u64 e_start; + u64 e_len; + u64 map_start; + u64 map_len; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_regular { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 extent_offset; + u8 extent_type; + u8 compression; + long: 32; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_inline { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u8 extent_type; + u8 compression; + long: 32; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 file_offset; + u64 start; + u64 len; + u64 disk_len; + u64 bytes_left; + long unsigned int flags; + int compress_type; + int refs; + long: 32; + u64 root_objectid; + u64 truncated_len; + char __data[0]; +}; + +struct trace_event_raw_btrfs_finish_ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 len; + bool uptodate; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__writepage { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + long unsigned int index; + long int nr_to_write; + long int pages_skipped; + long: 32; + loff_t range_start; + loff_t range_end; + char for_kupdate; + char for_reclaim; + char range_cyclic; + long unsigned int writeback_index; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_writepage_end_io_hook { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 end; + int uptodate; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_file { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 parent; + int datasync; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_fs { + struct trace_entry ent; + u8 fsid[16]; + int wait; + char __data[0]; +}; + +struct trace_event_raw_btrfs_add_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 offset; + u64 size; + u64 flags; + u64 bytes_used; + u64 bytes_super; + int create; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_delayed_tree_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + long: 32; + u64 parent; + u64 ref_root; + int level; + int type; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_data_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + long: 32; + u64 parent; + u64 ref_root; + u64 owner; + u64 offset; + int type; + long: 32; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_ref_head { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + int is_data; + char __data[0]; +}; + +struct trace_event_raw_btrfs__chunk { + struct trace_entry ent; + u8 fsid[16]; + int num_stripes; + long: 32; + u64 type; + int sub_stripes; + long: 32; + u64 offset; + u64 size; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_cow_block { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 buf_start; + int refs; + long: 32; + u64 cow_start; + int buf_level; + int cow_level; + char __data[0]; +}; + +struct trace_event_raw_btrfs_space_reservation { + struct trace_entry ent; + u8 fsid[16]; + u32 __data_loc_type; + long: 32; + u64 val; + u64 bytes; + int reserve; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_trigger_flush { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + int flush; + u32 __data_loc_reason; + char __data[0]; +}; + +struct trace_event_raw_btrfs_flush_space { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 num_bytes; + int state; + int ret; + bool for_preempt; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__reserved_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_search_loop { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_have_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + bool hinted; + long: 32; + u64 bg_start; + u64 bg_flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs__reserve_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + int bg_size_class; + long: 32; + u64 start; + u64 len; + u64 loop; + bool hinted; + int size_class; + char __data[0]; +}; + +struct trace_event_raw_btrfs_find_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 bytes; + u64 empty_size; + u64 min_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_failed_cluster_setup { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_setup_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 max_size; + u64 size; + int bitmap; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_alloc_extent_state { + struct trace_entry ent; + const struct extent_state *state; + long unsigned int mask; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_free_extent_state { + struct trace_entry ent; + const struct extent_state *state; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work { + struct trace_entry ent; + u8 fsid[16]; + const void *work; + const void *wq; + const void *func; + const void *ordered_func; + const void *normal_work; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work__done { + struct trace_entry ent; + u8 fsid[16]; + const void *wtag; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue_done { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + char __data[0]; +}; + +struct trace_event_raw_btrfs__qgroup_rsv_data { + struct trace_entry ent; + u8 fsid[16]; + u64 rootid; + u64 ino; + u64 start; + u64 len; + u64 reserved; + int op; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_qgroup_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + char __data[0]; +}; + +struct trace_event_raw_qgroup_num_dirty_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 num_dirty_extents; + char __data[0]; +}; + +struct trace_event_raw_btrfs_qgroup_account_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 bytenr; + u64 num_bytes; + u64 nr_old_roots; + u64 nr_new_roots; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_counters { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 old_rfer; + u64 old_excl; + u64 cur_old_count; + u64 cur_new_count; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 cur_reserved; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_qgroup_meta_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_qgroup_meta_convert { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_qgroup_meta_free_all_pertrans { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__prelim_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 root_id; + u64 objectid; + u8 type; + long: 32; + u64 offset; + int level; + int old_count; + u64 parent; + u64 bytenr; + int mod_count; + long: 32; + u64 tree_size; + char __data[0]; +}; + +struct trace_event_raw_btrfs_inode_mod_outstanding_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + int mod; + unsigned int outstanding; + char __data[0]; +}; + +struct trace_event_raw_btrfs__block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 len; + u64 used; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs_set_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_clear_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int clear_bits; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_convert_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + unsigned int clear_bits; + char __data[0]; +}; + +struct trace_event_raw_btrfs_dump_space_info { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 reclaim_size; + int clamp; + long: 32; + u64 global_reserved; + u64 trans_reserved; + u64 delayed_refs_reserved; + u64 delayed_reserved; + u64 free_chunk_space; + u64 delalloc_bytes; + u64 ordered_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_reserve_ticket { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + u64 start_ns; + int flush; + int error; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sleep_tree_lock { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 start_ns; + u64 end_ns; + u64 diff_ns; + u64 owner; + int is_log_tree; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_locking_events { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 owner; + int is_log_tree; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__space_info_update { + struct trace_entry ent; + u8 fsid[16]; + u64 type; + u64 old; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_btrfs_raid56_bio { + struct trace_entry ent; + u8 fsid[16]; + u64 full_stripe; + u64 physical; + u64 devid; + u32 offset; + u32 len; + u8 opf; + u8 total_stripes; + u8 real_stripes; + u8 nr_data; + u8 stripe_nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_insert_one_raid_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + int num_stripes; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_raid_extent_delete { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 end; + u64 found_start; + u64 found_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_raid_extent_offset { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + u64 physical; + u64 devid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_count { + struct trace_entry ent; + u8 fsid[16]; + long int nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_enter { + struct trace_entry ent; + u8 fsid[16]; + long int nr_to_scan; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_exit { + struct trace_entry ent; + u8 fsid[16]; + long int nr_dropped; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_remove_em { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 root_id; + u64 start; + u64 len; + u32 flags; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_btrfs_transaction_commit {}; + +struct trace_event_data_offsets_btrfs__inode {}; + +struct trace_event_data_offsets_btrfs_get_extent {}; + +struct trace_event_data_offsets_btrfs_handle_em_exist {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_regular {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_inline {}; + +struct trace_event_data_offsets_btrfs__ordered_extent {}; + +struct trace_event_data_offsets_btrfs_finish_ordered_extent {}; + +struct trace_event_data_offsets_btrfs__writepage {}; + +struct trace_event_data_offsets_btrfs_writepage_end_io_hook {}; + +struct trace_event_data_offsets_btrfs_sync_file {}; + +struct trace_event_data_offsets_btrfs_sync_fs {}; + +struct trace_event_data_offsets_btrfs_add_block_group {}; + +struct trace_event_data_offsets_btrfs_delayed_tree_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_data_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_ref_head {}; + +struct trace_event_data_offsets_btrfs__chunk {}; + +struct trace_event_data_offsets_btrfs_cow_block {}; + +struct trace_event_data_offsets_btrfs_space_reservation { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_btrfs_trigger_flush { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_btrfs_flush_space {}; + +struct trace_event_data_offsets_btrfs__reserved_extent {}; + +struct trace_event_data_offsets_find_free_extent {}; + +struct trace_event_data_offsets_find_free_extent_search_loop {}; + +struct trace_event_data_offsets_find_free_extent_have_block_group {}; + +struct trace_event_data_offsets_btrfs__reserve_extent {}; + +struct trace_event_data_offsets_btrfs_find_cluster {}; + +struct trace_event_data_offsets_btrfs_failed_cluster_setup {}; + +struct trace_event_data_offsets_btrfs_setup_cluster {}; + +struct trace_event_data_offsets_alloc_extent_state {}; + +struct trace_event_data_offsets_free_extent_state {}; + +struct trace_event_data_offsets_btrfs__work {}; + +struct trace_event_data_offsets_btrfs__work__done {}; + +struct trace_event_data_offsets_btrfs_workqueue { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_btrfs_workqueue_done {}; + +struct trace_event_data_offsets_btrfs__qgroup_rsv_data {}; + +struct trace_event_data_offsets_btrfs_qgroup_extent {}; + +struct trace_event_data_offsets_qgroup_num_dirty_extents {}; + +struct trace_event_data_offsets_btrfs_qgroup_account_extent {}; + +struct trace_event_data_offsets_qgroup_update_counters {}; + +struct trace_event_data_offsets_qgroup_update_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_convert {}; + +struct trace_event_data_offsets_qgroup_meta_free_all_pertrans {}; + +struct trace_event_data_offsets_btrfs__prelim_ref {}; + +struct trace_event_data_offsets_btrfs_inode_mod_outstanding_extents {}; + +struct trace_event_data_offsets_btrfs__block_group {}; + +struct trace_event_data_offsets_btrfs_set_extent_bit {}; + +struct trace_event_data_offsets_btrfs_clear_extent_bit {}; + +struct trace_event_data_offsets_btrfs_convert_extent_bit {}; + +struct trace_event_data_offsets_btrfs_dump_space_info {}; + +struct trace_event_data_offsets_btrfs_reserve_ticket {}; + +struct trace_event_data_offsets_btrfs_sleep_tree_lock {}; + +struct trace_event_data_offsets_btrfs_locking_events {}; + +struct trace_event_data_offsets_btrfs__space_info_update {}; + +struct trace_event_data_offsets_btrfs_raid56_bio {}; + +struct trace_event_data_offsets_btrfs_insert_one_raid_extent {}; + +struct trace_event_data_offsets_btrfs_raid_extent_delete {}; + +struct trace_event_data_offsets_btrfs_get_raid_extent_offset {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_count {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_enter {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_exit {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_remove_em {}; + +typedef void (*btf_trace_btrfs_transaction_commit)(void *, const struct btrfs_fs_info *); + +typedef void (*btf_trace_btrfs_inode_new)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_request)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_evict)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_get_extent)(void *, const struct btrfs_root *, const struct btrfs_inode *, const struct extent_map *); + +typedef void (*btf_trace_btrfs_handle_em_exist)(void *, const struct btrfs_fs_info *, const struct extent_map *, const struct extent_map *, u64, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_ordered_extent_add)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_remove)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_start)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_put)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_for_logging)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_split)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_dec_test_pending)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_mark_finished)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_finish_ordered_extent)(void *, const struct btrfs_inode *, u64, u64, bool); + +typedef void (*btf_trace_extent_writepage)(void *, const struct folio *, const struct inode *, const struct writeback_control *); + +typedef void (*btf_trace_btrfs_writepage_end_io_hook)(void *, const struct btrfs_inode *, u64, u64, int); + +typedef void (*btf_trace_btrfs_sync_file)(void *, const struct file *, int); + +typedef void (*btf_trace_btrfs_sync_fs)(void *, const struct btrfs_fs_info *, int); + +typedef void (*btf_trace_btrfs_add_block_group)(void *, const struct btrfs_fs_info *, const struct btrfs_block_group *, int); + +typedef void (*btf_trace_add_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_run_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_btrfs_chunk_alloc)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_chunk_free)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_cow_block)(void *, const struct btrfs_root *, const struct extent_buffer *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_space_reservation)(void *, const struct btrfs_fs_info *, const char *, u64, u64, int); + +typedef void (*btf_trace_btrfs_trigger_flush)(void *, const struct btrfs_fs_info *, u64, u64, int, const char *); + +typedef void (*btf_trace_btrfs_flush_space)(void *, const struct btrfs_fs_info *, u64, u64, int, int, bool); + +typedef void (*btf_trace_btrfs_reserved_extent_alloc)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_reserved_extent_free)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_find_free_extent)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_search_loop)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_have_block_group)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reserve_extent)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_reserve_extent_cluster)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_find_cluster)(void *, const struct btrfs_block_group *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_failed_cluster_setup)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_setup_cluster)(void *, const struct btrfs_block_group *, const struct btrfs_free_cluster *, u64, int); + +typedef void (*btf_trace_alloc_extent_state)(void *, const struct extent_state *, gfp_t, long unsigned int); + +typedef void (*btf_trace_free_extent_state)(void *, const struct extent_state *, long unsigned int); + +typedef void (*btf_trace_btrfs_work_queued)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_work_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_all_work_done)(void *, const struct btrfs_fs_info *, const void *); + +typedef void (*btf_trace_btrfs_ordered_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_workqueue_alloc)(void *, const struct btrfs_workqueue *, const char *); + +typedef void (*btf_trace_btrfs_workqueue_destroy)(void *, const struct btrfs_workqueue *); + +typedef void (*btf_trace_btrfs_qgroup_reserve_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_release_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_account_extents)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_btrfs_qgroup_trace_extent)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_qgroup_num_dirty_extents)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_qgroup_account_extent)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64, u64); + +typedef void (*btf_trace_qgroup_update_counters)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, u64, u64); + +typedef void (*btf_trace_qgroup_update_reserve)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, s64, int); + +typedef void (*btf_trace_qgroup_meta_reserve)(void *, const struct btrfs_root *, s64, int); + +typedef void (*btf_trace_qgroup_meta_convert)(void *, const struct btrfs_root *, s64); + +typedef void (*btf_trace_qgroup_meta_free_all_pertrans)(void *, struct btrfs_root *); + +typedef void (*btf_trace_btrfs_prelim_ref_merge)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_prelim_ref_insert)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_inode_mod_outstanding_extents)(void *, const struct btrfs_root *, u64, int, unsigned int); + +typedef void (*btf_trace_btrfs_remove_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_skip_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_set_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_clear_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_convert_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int, unsigned int); + +typedef void (*btf_trace_btrfs_done_preemptive_reclaim)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_fail_all_tickets)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_reserve_ticket)(void *, const struct btrfs_fs_info *, u64, u64, u64, int, int); + +typedef void (*btf_trace_btrfs_tree_read_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock_blocking)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_read)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_write)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_try_tree_read_lock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_lock_atomic)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_update_bytes_may_use)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_pinned)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_zone_unusable)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_raid56_read)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_raid56_write)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_btrfs_insert_one_raid_extent)(void *, const struct btrfs_fs_info *, u64, u64, int); + +typedef void (*btf_trace_btrfs_raid_extent_delete)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_get_raid_extent_offset)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_count)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_enter)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_exit)(void *, const struct btrfs_fs_info *, long int, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_remove_em)(void *, const struct btrfs_inode *, const struct extent_map *); + +struct btrfs_fs_context { + char *subvol_name; + long: 32; + u64 subvol_objectid; + u64 max_inline; + u32 commit_interval; + u32 metadata_ratio; + u32 thread_pool_size; + long: 32; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + refcount_t refs; + long: 32; +}; + +enum { + Opt_acl = 0, + Opt_clear_cache = 1, + Opt_commit_interval = 2, + Opt_compress = 3, + Opt_compress_force = 4, + Opt_compress_force_type = 5, + Opt_compress_type = 6, + Opt_degraded = 7, + Opt_device = 8, + Opt_fatal_errors = 9, + Opt_flushoncommit = 10, + Opt_max_inline = 11, + Opt_barrier = 12, + Opt_datacow = 13, + Opt_datasum = 14, + Opt_defrag = 15, + Opt_discard___2 = 16, + Opt_discard_mode = 17, + Opt_ratio = 18, + Opt_rescan_uuid_tree = 19, + Opt_skip_balance = 20, + Opt_space_cache = 21, + Opt_space_cache_version = 22, + Opt_ssd = 23, + Opt_ssd_spread = 24, + Opt_subvol = 25, + Opt_subvol_empty = 26, + Opt_subvolid = 27, + Opt_thread_pool = 28, + Opt_treelog = 29, + Opt_user_subvol_rm_allowed = 30, + Opt_norecovery = 31, + Opt_rescue = 32, + Opt_usebackuproot = 33, + Opt_nologreplay = 34, + Opt_enospc_debug = 35, + Opt_err = 36, +}; + +enum { + Opt_fatal_errors_panic = 0, + Opt_fatal_errors_bug = 1, +}; + +enum { + Opt_discard_sync = 0, + Opt_discard_async = 1, +}; + +enum { + Opt_space_cache_v1 = 0, + Opt_space_cache_v2 = 1, +}; + +enum { + Opt_rescue_usebackuproot = 0, + Opt_rescue_nologreplay = 1, + Opt_rescue_ignorebadroots = 2, + Opt_rescue_ignoredatacsums = 3, + Opt_rescue_ignoremetacsums = 4, + Opt_rescue_ignoresuperflags = 5, + Opt_rescue_parameter_all = 6, +}; + +struct init_sequence { + int (*init_func)(); + void (*exit_func)(); +}; + +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; + __u8 statelen; + __u8 blocklen_bytes; + char cra_name[128]; + char backend_cra_name[128]; +}; + +struct drbg_state; + +struct drbg_state_ops { + int (*update)(struct drbg_state *, struct list_head *, int); + int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); + int (*crypto_init)(struct drbg_state *); + int (*crypto_fini)(struct drbg_state *); +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED = 0, + DRBG_SEED_STATE_PARTIAL = 1, + DRBG_SEED_STATE_FULL = 2, +}; + +struct drbg_state { + struct mutex drbg_mutex; + unsigned char *V; + unsigned char *Vbuf; + unsigned char *C; + unsigned char *Cbuf; + size_t reseed_ctr; + size_t reseed_threshold; + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; + struct crypto_skcipher *ctr_handle; + struct skcipher_request *ctr_req; + __u8 *outscratchpadbuf; + __u8 *outscratchpad; + struct crypto_wait ctr_wait; + struct scatterlist sg_in; + struct scatterlist sg_out; + enum drbg_seed_state seeded; + long unsigned int last_seed_time; + bool pr; + bool fips_primed; + unsigned char *prev; + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +enum drbg_prefixes { + DRBG_PREFIX0 = 0, + DRBG_PREFIX1 = 1, + DRBG_PREFIX2 = 2, + DRBG_PREFIX3 = 3, +}; + +struct sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +struct sbq_wait { + struct sbitmap_queue *sbq; + struct wait_queue_entry wait; +}; + +enum { + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_S_MAX = 4, +}; + +enum { + BLK_MQ_REQ_NOWAIT = 1, + BLK_MQ_REQ_RESERVED = 2, + BLK_MQ_REQ_PM = 4, +}; + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = 65535, +}; + +enum { + BLK_MQ_NO_TAG = 4294967295, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = 4294967294, +}; + +struct bt_iter_data { + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + busy_tag_iter_fn *fn; + void *data; + bool reserved; +}; + +struct bt_tags_iter_data { + struct blk_mq_tags *tags; + busy_tag_iter_fn *fn; + void *data; + unsigned int flags; +}; + +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 user_addr; +}; + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 user_addr; +}; + +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; + +struct io_sq_data { + refcount_t refs; + atomic_t park_pending; + struct mutex lock; + struct list_head ctx_list; + struct task_struct *thread; + struct wait_queue_head wait; + unsigned int sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + long: 32; + u64 work_time; + long unsigned int state; + struct completion exited; + long: 32; +}; + +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK = 1, +}; + +struct node_groups { + unsigned int id; + union { + unsigned int ngroups; + unsigned int ncpus; + }; +}; + +struct resource_constraint { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_alignf alignf; + void *alignf_data; +}; + +struct pci_dev_resource { + struct list_head list; + struct resource *res; + struct pci_dev *dev; + resource_size_t start; + resource_size_t end; + resource_size_t add_size; + resource_size_t min_align; + long unsigned int flags; +}; + +enum release_type { + leaf_only = 0, + whole_subtree = 1, +}; + +enum enable_type { + undefined = -1, + user_disabled = 0, + auto_disabled = 1, + user_enabled = 2, + auto_enabled = 3, +}; + +struct devm_clk_state { + struct clk *clk; + void (*exit)(struct clk *); +}; + +struct clk_bulk_devres { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct clk_core; + +struct clk_hw; + +struct clk_rate_request { + struct clk_core *core; + long unsigned int rate; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +struct clk_init_data; + +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +struct clk_ops { + int (*prepare)(struct clk_hw *); + void (*unprepare)(struct clk_hw *); + int (*is_prepared)(struct clk_hw *); + void (*unprepare_unused)(struct clk_hw *); + int (*enable)(struct clk_hw *); + void (*disable)(struct clk_hw *); + int (*is_enabled)(struct clk_hw *); + void (*disable_unused)(struct clk_hw *); + int (*save_context)(struct clk_hw *); + void (*restore_context)(struct clk_hw *); + long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int); + long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *); + int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); + int (*set_parent)(struct clk_hw *, u8); + u8 (*get_parent)(struct clk_hw *); + int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int); + int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8); + long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int); + int (*get_phase)(struct clk_hw *); + int (*set_phase)(struct clk_hw *, int); + int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*init)(struct clk_hw *); + void (*terminate)(struct clk_hw *); + void (*debug_init)(struct clk_hw *, struct dentry *); +}; + +struct clk_parent_data { + const struct clk_hw *hw; + const char *fw_name; + const char *name; + int index; +}; + +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + const char * const *parent_names; + const struct clk_parent_data *parent_data; + const struct clk_hw **parent_hws; + u8 num_parents; + long unsigned int flags; +}; + +struct clk_mux { + struct clk_hw hw; + void *reg; + const u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +struct vt_notifier_param { + struct vc_data *vc; + unsigned int c; +}; + +struct vcs_poll_data { + struct notifier_block notifier; + unsigned int cons_num; + int event; + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + +struct software_node; + +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[8]; +}; + +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; +}; + +struct swnode { + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + int id; + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + unsigned int allocated: 1; + unsigned int managed: 1; +}; + +struct dma_fence; + +struct dma_fence_unwrap { + struct dma_fence *chain; + struct dma_fence *array; + unsigned int index; +}; + +struct dma_fence_ops; + +struct dma_fence { + spinlock_t *lock; + const struct dma_fence_ops *ops; + union { + struct list_head cb_list; + ktime_t timestamp; + struct callback_head rcu; + }; + u64 context; + u64 seqno; + long unsigned int flags; + struct kref refcount; + int error; + long: 32; +}; + +struct dma_fence_ops { + bool use_64bit_seqno; + const char * (*get_driver_name)(struct dma_fence *); + const char * (*get_timeline_name)(struct dma_fence *); + bool (*enable_signaling)(struct dma_fence *); + bool (*signaled)(struct dma_fence *); + long int (*wait)(struct dma_fence *, bool, long int); + void (*release)(struct dma_fence *); + void (*fence_value_str)(struct dma_fence *, char *, int); + void (*timeline_value_str)(struct dma_fence *, char *, int); + void (*set_deadline)(struct dma_fence *, ktime_t); +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT = 0, + DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, + DMA_FENCE_FLAG_USER_BITS = 3, +}; + +struct dma_fence_cb; + +typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); + +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +struct sync_file { + struct file *file; + char user_name[32]; + struct list_head sync_file_list; + wait_queue_head_t wq; + long unsigned int flags; + struct dma_fence *fence; + struct dma_fence_cb cb; +}; + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + __u64 sync_fence_info; +}; + +struct sync_set_deadline { + __u64 deadline_ns; + __u64 pad; +}; + +enum sam_status { + SAM_STAT_GOOD = 0, + SAM_STAT_CHECK_CONDITION = 2, + SAM_STAT_CONDITION_MET = 4, + SAM_STAT_BUSY = 8, + SAM_STAT_INTERMEDIATE = 16, + SAM_STAT_INTERMEDIATE_CONDITION_MET = 20, + SAM_STAT_RESERVATION_CONFLICT = 24, + SAM_STAT_COMMAND_TERMINATED = 34, + SAM_STAT_TASK_SET_FULL = 40, + SAM_STAT_ACA_ACTIVE = 48, + SAM_STAT_TASK_ABORTED = 64, +}; + +struct scsi_sense_hdr { + u8 response_code; + u8 sense_key; + u8 asc; + u8 ascq; + u8 byte4; + u8 byte5; + u8 byte6; + u8 additional_length; +}; + +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + unsigned int __nents; + int __pg_advance; +}; + +struct sg_mapping_iter { + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +struct scsi_failure { + int result; + u8 sense; + u8 asc; + u8 ascq; + s8 allowed; + s8 retries; +}; + +struct scsi_failures { + int total_allowed; + int total_retries; + struct scsi_failure *failure_definitions; +}; + +struct scsi_exec_args { + unsigned char *sense; + unsigned int sense_len; + struct scsi_sense_hdr *sshdr; + blk_mq_req_flags_t req_flags; + int scmd_flags; + int *resid; + struct scsi_failures *failures; +}; + +enum ata_prot_flags { + ATA_PROT_FLAG_PIO = 1, + ATA_PROT_FLAG_DMA = 2, + ATA_PROT_FLAG_NCQ = 4, + ATA_PROT_FLAG_ATAPI = 8, + ATA_PROT_UNKNOWN = 255, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = 1, + ATA_PROT_DMA = 2, + ATA_PROT_NCQ_NODATA = 4, + ATA_PROT_NCQ = 6, + ATAPI_PROT_NODATA = 8, + ATAPI_PROT_PIO = 9, + ATAPI_PROT_DMA = 10, +}; + +enum ata_link_iter_mode { + ATA_LITER_EDGE = 0, + ATA_LITER_HOST_FIRST = 1, + ATA_LITER_PMP_FIRST = 2, +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED = 0, + ATA_DITER_ENABLED_REVERSE = 1, + ATA_DITER_ALL = 2, + ATA_DITER_ALL_REVERSE = 3, +}; + +typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *); + +enum iwl_cfg_trans_ltr_delay { + IWL_CFG_TRANS_LTR_DELAY_NONE = 0, + IWL_CFG_TRANS_LTR_DELAY_200US = 1, + IWL_CFG_TRANS_LTR_DELAY_2500US = 2, + IWL_CFG_TRANS_LTR_DELAY_1820US = 3, +}; + +struct iwl_soc_configuration_cmd { + __le32 flags; + __le32 latency; +}; + +struct iwl_rfh_queue_data { + u8 q_num; + u8 enable; + __le16 reserved; + __le64 urbd_stts_wrptr; + __le64 fr_bd_cb; + __le64 ur_bd_cb; + __le32 fr_bd_wid; +}; + +struct iwl_rfh_queue_config { + u8 num_queues; + u8 reserved[3]; + struct iwl_rfh_queue_data data[0]; +}; + +struct iwl_mvm_quota_iterator_data { + int n_interfaces[4]; + int colors[4]; + int low_latency[4]; + int n_low_latency_bindings; + struct ieee80211_vif *disabled_vif; +}; + +struct ieee80211_bar { + __le16 frame_control; + __le16 duration; + __u8 ra[6]; + __u8 ta[6]; + __le16 control; + __le16 start_seq_num; +}; + +enum rt2x00_dump_type { + DUMP_FRAME_RXDONE = 1, + DUMP_FRAME_TX = 2, + DUMP_FRAME_TXDONE = 3, + DUMP_FRAME_BEACON = 4, +}; + +enum skb_frame_desc_flags { + SKBDESC_DMA_MAPPED_RX = 1, + SKBDESC_DMA_MAPPED_TX = 2, + SKBDESC_IV_STRIPPED = 4, + SKBDESC_NOT_MAC80211 = 8, + SKBDESC_DESC_IN_SKB = 16, +}; + +struct skb_frame_desc { + u8 flags; + u8 desc_len; + u8 tx_rate_idx; + u8 tx_rate_flags; + void *desc; + __le32 iv[2]; + dma_addr_t skb_dma; + struct ieee80211_sta *sta; +}; + +enum txentry_desc_flags { + ENTRY_TXD_RTS_FRAME = 0, + ENTRY_TXD_CTS_FRAME = 1, + ENTRY_TXD_GENERATE_SEQ = 2, + ENTRY_TXD_FIRST_FRAGMENT = 3, + ENTRY_TXD_MORE_FRAG = 4, + ENTRY_TXD_REQ_TIMESTAMP = 5, + ENTRY_TXD_BURST = 6, + ENTRY_TXD_ACK = 7, + ENTRY_TXD_RETRY_MODE = 8, + ENTRY_TXD_ENCRYPT = 9, + ENTRY_TXD_ENCRYPT_PAIRWISE = 10, + ENTRY_TXD_ENCRYPT_IV = 11, + ENTRY_TXD_ENCRYPT_MMIC = 12, + ENTRY_TXD_HT_AMPDU = 13, + ENTRY_TXD_HT_BW_40 = 14, + ENTRY_TXD_HT_SHORT_GI = 15, + ENTRY_TXD_HT_MIMO_PS = 16, +}; + +enum data_queue_flags { + QUEUE_STARTED = 0, + QUEUE_PAUSED = 1, +}; + +struct rt2x00_bar_list_entry { + struct list_head list; + struct callback_head head; + struct queue_entry *entry; + int block_acked; + __u8 ra[6]; + __u8 ta[6]; + __le16 control; + __le16 start_seq_num; +}; + +struct rt2x00_rate { + short unsigned int flags; + short unsigned int bitrate; + short unsigned int ratemask; + short unsigned int plcp; + short unsigned int mcs; +}; + +struct netdev_hw_addr { + struct list_head list; + struct rb_node node; + unsigned char addr[32]; + unsigned char type; + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +enum gro_result { + GRO_MERGED = 0, + GRO_MERGED_FREE = 1, + GRO_HELD = 2, + GRO_NORMAL = 3, + GRO_CONSUMED = 4, +}; + +typedef enum gro_result gro_result_t; + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF = 0, + __QUEUE_STATE_STACK_XOFF = 1, + __QUEUE_STATE_FROZEN = 2, +}; + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS = 1, + ETH_SS_PRIV_FLAGS = 2, + ETH_SS_NTUPLE_FILTERS = 3, + ETH_SS_FEATURES = 4, + ETH_SS_RSS_HASH_FUNCS = 5, + ETH_SS_TUNABLES = 6, + ETH_SS_PHY_STATS = 7, + ETH_SS_PHY_TUNABLES = 8, + ETH_SS_LINK_MODES = 9, + ETH_SS_MSG_CLASSES = 10, + ETH_SS_WOL_MODES = 11, + ETH_SS_SOF_TIMESTAMPING = 12, + ETH_SS_TS_TX_TYPES = 13, + ETH_SS_TS_RX_FILTERS = 14, + ETH_SS_UDP_TUNNEL_TYPES = 15, + ETH_SS_STATS_STD = 16, + ETH_SS_STATS_ETH_PHY = 17, + ETH_SS_STATS_ETH_MAC = 18, + ETH_SS_STATS_ETH_CTRL = 19, + ETH_SS_STATS_RMON = 20, + ETH_SS_COUNT = 21, +}; + +struct mii_ioctl_data { + __u16 phy_id; + __u16 reg_num; + __u16 val_in; + __u16 val_out; +}; + +struct mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + unsigned int full_duplex: 1; + unsigned int force_media: 1; + unsigned int supports_gmii: 1; + struct net_device *dev; + int (*mdio_read)(struct net_device *, int, int); + void (*mdio_write)(struct net_device *, int, int, int); +}; + +struct usb_dynids { + struct list_head list; +}; + +struct usb_driver { + const char *name; + int (*probe)(struct usb_interface *, const struct usb_device_id *); + void (*disconnect)(struct usb_interface *); + int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); + int (*suspend)(struct usb_interface *, pm_message_t); + int (*resume)(struct usb_interface *); + int (*reset_resume)(struct usb_interface *); + int (*pre_reset)(struct usb_interface *); + int (*post_reset)(struct usb_interface *); + void (*shutdown)(struct usb_interface *); + const struct usb_device_id *id_table; + const struct attribute_group **dev_groups; + struct usb_dynids dynids; + struct device_driver driver; + unsigned int no_dynamic_id: 1; + unsigned int supports_autosuspend: 1; + unsigned int disable_hub_initiated_lpm: 1; + unsigned int soft_unbind: 1; +}; + +typedef u32 acpi_size; + +typedef u64 acpi_io_address; + +typedef u32 acpi_status; + +typedef char *acpi_string; + +typedef void *acpi_handle; + +typedef u32 acpi_object_type; + +union acpi_object { + acpi_object_type type; + struct { + acpi_object_type type; + long: 32; + u64 value; + } integer; + struct { + acpi_object_type type; + u32 length; + char *pointer; + } string; + struct { + acpi_object_type type; + u32 length; + u8 *pointer; + } buffer; + struct { + acpi_object_type type; + u32 count; + union acpi_object *elements; + } package; + struct { + acpi_object_type type; + acpi_object_type actual_type; + acpi_handle handle; + } reference; + struct { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + long: 32; + } processor; + struct { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +struct acpi_buffer { + acpi_size length; + void *pointer; +}; + +enum spd_duplex { + NWAY_10M_HALF = 0, + NWAY_10M_FULL = 1, + NWAY_100M_HALF = 2, + NWAY_100M_FULL = 3, + NWAY_1000M_FULL = 4, + FORCE_10M_HALF = 5, + FORCE_10M_FULL = 6, + FORCE_100M_HALF = 7, + FORCE_100M_FULL = 8, + FORCE_1000M_FULL = 9, + NWAY_2500M_FULL = 10, +}; + +enum rtl_register_content { + _2500bps = 1024, + _1250bps = 512, + _500bps = 256, + _tx_flow = 64, + _rx_flow = 32, + _1000bps = 16, + _100bps = 8, + _10bps = 4, + LINK_STATUS = 2, + FULL_DUP = 1, +}; + +enum rtl8152_flags { + RTL8152_INACCESSIBLE = 0, + RTL8152_SET_RX_MODE = 1, + WORK_ENABLE = 2, + RTL8152_LINK_CHG = 3, + SELECTIVE_SUSPEND = 4, + PHY_RESET = 5, + SCHEDULE_TASKLET = 6, + GREEN_ETHERNET = 7, + RX_EPROTO = 8, + IN_PRE_RESET = 9, + PROBED_WITH_NO_ERRORS = 10, + PROBE_SHOULD_RETRY = 11, +}; + +struct tally_counter { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; +}; + +struct rx_desc { + __le32 opts1; + __le32 opts2; + __le32 opts3; + __le32 opts4; + __le32 opts5; + __le32 opts6; +}; + +struct tx_desc { + __le32 opts1; + __le32 opts2; +}; + +struct r8152; + +struct rx_agg { + struct list_head list; + struct list_head info_list; + struct urb *urb; + struct r8152 *context; + struct page *page; + void *buffer; +}; + +struct tx_agg { + struct list_head list; + struct urb *urb; + struct r8152 *context; + void *buffer; + void *head; + u32 skb_num; + u32 skb_len; +}; + +struct rtl_ops { + void (*init)(struct r8152 *); + int (*enable)(struct r8152 *); + void (*disable)(struct r8152 *); + void (*up)(struct r8152 *); + void (*down)(struct r8152 *); + void (*unload)(struct r8152 *); + int (*eee_get)(struct r8152 *, struct ethtool_keee *); + int (*eee_set)(struct r8152 *, struct ethtool_keee *); + bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); + void (*autosuspend_en)(struct r8152 *, bool); + void (*change_mtu)(struct r8152 *); +}; + +struct ups_info { + u32 r_tune: 1; + u32 _10m_ckdiv: 1; + u32 _250m_ckdiv: 1; + u32 aldps: 1; + u32 lite_mode: 2; + u32 speed_duplex: 4; + u32 eee: 1; + u32 eee_lite: 1; + u32 eee_ckdiv: 1; + u32 eee_plloff_100: 1; + u32 eee_plloff_giga: 1; + u32 eee_cmod_lv: 1; + u32 green: 1; + u32 flow_control: 1; + u32 ctap_short_off: 1; +}; + +struct rtl_fw { + const char *fw_name; + const struct firmware *fw; + char version[32]; + int (*pre_fw)(struct r8152 *); + int (*post_fw)(struct r8152 *); + bool retry; +}; + +struct r8152 { + long unsigned int flags; + struct usb_device *udev; + struct napi_struct napi; + struct usb_interface *intf; + struct net_device *netdev; + struct urb *intr_urb; + struct tx_agg tx_info[4]; + struct list_head rx_info; + struct list_head rx_used; + struct list_head rx_done; + struct list_head tx_free; + struct sk_buff_head tx_queue; + struct sk_buff_head rx_queue; + spinlock_t rx_lock; + spinlock_t tx_lock; + struct delayed_work schedule; + struct delayed_work hw_phy_work; + struct mii_if_info mii; + struct mutex control; + struct notifier_block pm_notifier; + struct tasklet_struct tx_tl; + struct rtl_ops rtl_ops; + struct ups_info ups_info; + struct rtl_fw rtl_fw; + atomic_t rx_count; + bool eee_en; + int intr_interval; + u32 saved_wolopts; + u32 msg_enable; + u32 tx_qlen; + u32 coalesce; + u32 advertising; + u32 rx_buf_sz; + u32 rx_copybreak; + u32 rx_pending; + u32 fc_pause_on; + u32 fc_pause_off; + unsigned int pipe_in; + unsigned int pipe_out; + unsigned int pipe_intr; + unsigned int pipe_ctrl_in; + unsigned int pipe_ctrl_out; + u32 support_2500full: 1; + u32 lenovo_macpassthru: 1; + u32 dell_tb_rx_agg_bug: 1; + u16 ocp_base; + u16 speed; + u16 eee_adv; + u8 *intr_buff; + u8 version; + u8 duplex; + u8 autoneg; + unsigned int reg_access_reset_count; +}; + +struct fw_block { + __le32 type; + __le32 length; +}; + +struct fw_header { + u8 checksum[32]; + char version[32]; + struct fw_block blocks[0]; +}; + +enum rtl8152_fw_flags { + FW_FLAGS_USB = 0, + FW_FLAGS_PLA = 1, + FW_FLAGS_START = 2, + FW_FLAGS_STOP = 3, + FW_FLAGS_NC = 4, + FW_FLAGS_NC1 = 5, + FW_FLAGS_NC2 = 6, + FW_FLAGS_UC2 = 7, + FW_FLAGS_UC = 8, + FW_FLAGS_SPEED_UP = 9, + FW_FLAGS_VER = 10, +}; + +enum rtl8152_fw_fixup_cmd { + FW_FIXUP_AND = 0, + FW_FIXUP_OR = 1, + FW_FIXUP_NOT = 2, + FW_FIXUP_XOR = 3, +}; + +struct fw_phy_set { + __le16 addr; + __le16 data; +}; + +struct fw_phy_speed_up { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 version; + __le16 fw_reg; + __le16 reserved; + char info[0]; +}; + +struct fw_phy_ver { + struct fw_block blk_hdr; + struct fw_phy_set ver; + __le32 reserved; +}; + +struct fw_phy_fixup { + struct fw_block blk_hdr; + struct fw_phy_set setting; + __le16 bit_cmd; + __le16 reserved; +}; + +struct fw_phy_union { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + struct fw_phy_set pre_set[2]; + struct fw_phy_set bp[8]; + struct fw_phy_set bp_en; + u8 pre_num; + u8 bp_num; + char info[0]; +} __attribute__((packed)); + +struct fw_mac { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 bp_ba_addr; + __le16 bp_ba_value; + __le16 bp_en_addr; + __le16 bp_en_value; + __le16 bp_start; + __le16 bp_num; + __le16 bp[16]; + __le32 reserved; + __le16 fw_ver_reg; + u8 fw_ver_data; + char info[0]; +} __attribute__((packed)); + +struct fw_phy_patch_key { + struct fw_block blk_hdr; + __le16 key_reg; + __le16 key_data; + __le32 reserved; +}; + +struct fw_phy_nc { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 ba_reg; + __le16 ba_data; + __le16 patch_en_addr; + __le16 patch_en_value; + __le16 mode_reg; + __le16 mode_pre; + __le16 mode_post; + __le16 reserved; + __le16 bp_start; + __le16 bp_num; + __le16 bp[4]; + char info[0]; +}; + +enum rtl_fw_type { + RTL_FW_END = 0, + RTL_FW_PLA = 1, + RTL_FW_USB = 2, + RTL_FW_PHY_START = 3, + RTL_FW_PHY_STOP = 4, + RTL_FW_PHY_NC = 5, + RTL_FW_PHY_FIXUP = 6, + RTL_FW_PHY_UNION_NC = 7, + RTL_FW_PHY_UNION_NC1 = 8, + RTL_FW_PHY_UNION_NC2 = 9, + RTL_FW_PHY_UNION_UC2 = 10, + RTL_FW_PHY_UNION_UC = 11, + RTL_FW_PHY_UNION_MISC = 12, + RTL_FW_PHY_SPEED_UP = 13, + RTL_FW_PHY_VER = 14, +}; + +enum rtl_version { + RTL_VER_UNKNOWN = 0, + RTL_VER_01 = 1, + RTL_VER_02 = 2, + RTL_VER_03 = 3, + RTL_VER_04 = 4, + RTL_VER_05 = 5, + RTL_VER_06 = 6, + RTL_VER_07 = 7, + RTL_VER_08 = 8, + RTL_VER_09 = 9, + RTL_TEST_01 = 10, + RTL_VER_10 = 11, + RTL_VER_11 = 12, + RTL_VER_12 = 13, + RTL_VER_13 = 14, + RTL_VER_14 = 15, + RTL_VER_15 = 16, + RTL_VER_MAX = 17, +}; + +enum tx_csum_stat { + TX_CSUM_SUCCESS = 0, + TX_CSUM_TSO = 1, + TX_CSUM_NONE = 2, +}; + +typedef struct thermal_cooling_device *class_cooling_dev_t; + +struct thermal_instance { + int id; + char name[20]; + struct thermal_cooling_device *cdev; + const struct thermal_trip *trip; + bool initialized; + long unsigned int upper; + long unsigned int lower; + long unsigned int target; + char attr_name[20]; + struct device_attribute attr; + char weight_attr_name[20]; + struct device_attribute weight_attr; + struct list_head trip_node; + struct list_head cdev_node; + unsigned int weight; + bool upper_no_limit; +}; + +struct stripe { + struct dm_dev *dev; + long: 32; + sector_t physical_start; + atomic_t error_count; + long: 32; +}; + +struct stripe_c { + uint32_t stripes; + int stripes_shift; + sector_t stripe_width; + uint32_t chunk_size; + int chunk_size_shift; + struct dm_target *ti; + struct work_struct trigger_event; + long: 32; + struct stripe stripe[0]; +}; + +typedef s64 int64_t; + +struct menu_device { + int needs_update; + int tick_wakeup; + u64 next_timer_ns; + unsigned int bucket; + unsigned int correction_factor[6]; + unsigned int intervals[8]; + int interval_ptr; +}; + +struct debugfs_blob_wrapper { + void *data; + long unsigned int size; +}; + +typedef efi_status_t efi_query_variable_store_t(u32, long unsigned int, bool); + +typedef struct { + efi_guid_t guid; + u64 table; +} efi_config_table_64_t; + +struct efi_unaccepted_memory { + u32 version; + u32 unit_size; + u64 phys_base; + u64 size; + long unsigned int bitmap[0]; +}; + +typedef struct { + u16 version; + u16 length; + u32 runtime_services_supported; +} efi_rt_properties_table_t; + +struct efivar_operations { + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_store_t *query_variable_store; + efi_query_variable_info_t *query_variable_info; +}; + +struct efivars { + struct kset *kset; + const struct efivar_operations *ops; +}; + +struct linux_efi_random_seed { + u32 size; + u8 bits[0]; +}; + +struct linux_efi_memreserve { + int size; + atomic_t count; + phys_addr_t next; + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; +}; + +struct linux_efi_initrd { + long unsigned int base; + long unsigned int size; +}; + +enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, + __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, +}; + +enum flow_dissector_ctrl_flags { + FLOW_DIS_IS_FRAGMENT = 1, + FLOW_DIS_FIRST_FRAG = 2, + FLOW_DIS_F_TUNNEL_CSUM = 4, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = 8, + FLOW_DIS_F_TUNNEL_OAM = 16, + FLOW_DIS_F_TUNNEL_CRIT_OPT = 32, + FLOW_DIS_ENCAPSULATION = 64, +}; + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD = 0, + FLOW_DISSECT_RET_OUT_BAD = 1, + FLOW_DISSECT_RET_PROTO_AGAIN = 2, + FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, + FLOW_DISSECT_RET_CONTINUE = 4, +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl: 8; + u32 mpls_bos: 1; + u32 mpls_tc: 3; + u32 mpls_label: 20; +}; + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +struct flow_dissector_key_enc_opts { + u8 data[255]; + u8 len; + u32 dst_opt_type; +}; + +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + +struct flow_dissector_key_eth_addrs { + unsigned char dst[6]; + unsigned char src[6]; +}; + +struct flow_dissector_key_tcp { + __be16 flags; +}; + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; + u8 l2_miss; +}; + +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +struct flow_dissector_key_hash { + u32 hash; +}; + +struct flow_dissector_key_num_of_vlans { + u8 num_of_vlans; +}; + +struct flow_dissector_key_pppoe { + __be16 session_id; + __be16 ppp_proto; + __be16 type; +}; + +struct flow_dissector_key_l2tpv3 { + __be32 session_id; +}; + +struct flow_dissector_key_ipsec { + __be32 spi; +}; + +struct flow_dissector_key_cfm { + u8 mdl_ver; + u8 opcode; +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; +}; + +struct flow_dissector { + long long unsigned int used_keys; + short unsigned int offset[33]; + long: 32; +}; + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +struct flow_keys_digest { + u8 data[16]; +}; + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; + __be16 reserved; + __be32 spi; + __be32 seq_no; + __u8 auth_data[0]; +}; + +struct ip_esp_hdr { + __be32 spi; + __be32 seq_no; + __u8 enc_data[0]; +}; + +enum bpf_ret_code { + BPF_OK = 0, + BPF_DROP = 2, + BPF_REDIRECT = 7, + BPF_LWT_REROUTE = 128, + BPF_FLOW_DISSECTOR_CONTINUE = 129, +}; + +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1, + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2, + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4, +}; + +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +struct arphdr { + __be16 ar_hrd; + __be16 ar_pro; + unsigned char ar_hln; + unsigned char ar_pln; + __be16 ar_op; +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; + +struct gre_full_hdr { + struct gre_base_hdr fixed_header; + __be16 csum; + __be16 reserved1; + __be32 key; + __be32 seq; +}; + +struct pptp_gre_header { + struct gre_base_hdr gre_hd; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; +}; + +struct tipc_basic_hdr { + __be32 w[4]; +}; + +enum sctp_msg_flags { + MSG_NOTIFICATION = 32768, +}; + +enum dccp_state { + DCCP_OPEN = 1, + DCCP_REQUESTING = 2, + DCCP_LISTEN = 10, + DCCP_RESPOND = 3, + DCCP_ACTIVE_CLOSEREQ = 4, + DCCP_PASSIVE_CLOSE = 8, + DCCP_CLOSING = 11, + DCCP_TIME_WAIT = 6, + DCCP_CLOSED = 7, + DCCP_NEW_SYN_RECV = 12, + DCCP_PARTOPEN = 14, + DCCP_PASSIVE_CLOSEREQ = 15, + DCCP_MAX_STATES = 16, +}; + +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +}; + +struct pppoe_hdr { + __u8 type: 4; + __u8 ver: 4; + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +}; + +struct hsr_tag { + __be16 path_and_LSDU_size; + __be16 sequence_nr; + __be16 encap_proto; +}; + +struct mpls_label { + __be32 entry; +}; + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; + u8 ver; + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __attribute__((packed)); + +enum batadv_packettype { + BATADV_IV_OGM = 0, + BATADV_BCAST = 1, + BATADV_CODED = 2, + BATADV_ELP = 3, + BATADV_OGM2 = 4, + BATADV_MCAST = 5, + BATADV_UNICAST = 64, + BATADV_UNICAST_FRAG = 65, + BATADV_UNICAST_4ADDR = 66, + BATADV_ICMP = 67, + BATADV_UNICAST_TVLV = 68, +}; + +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; + __u8 dest[6]; +}; + +struct nf_conn_labels { + long unsigned int bits[4]; +}; + +struct _flow_keys_digest_data { + __be16 n_proto; + u8 ip_proto; + u8 padding; + __be32 ports; + __be32 src; + __be32 dst; +}; + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID = 0, + NL_ATTR_TYPE_FLAG = 1, + NL_ATTR_TYPE_U8 = 2, + NL_ATTR_TYPE_U16 = 3, + NL_ATTR_TYPE_U32 = 4, + NL_ATTR_TYPE_U64 = 5, + NL_ATTR_TYPE_S8 = 6, + NL_ATTR_TYPE_S16 = 7, + NL_ATTR_TYPE_S32 = 8, + NL_ATTR_TYPE_S64 = 9, + NL_ATTR_TYPE_BINARY = 10, + NL_ATTR_TYPE_STRING = 11, + NL_ATTR_TYPE_NUL_STRING = 12, + NL_ATTR_TYPE_NESTED = 13, + NL_ATTR_TYPE_NESTED_ARRAY = 14, + NL_ATTR_TYPE_BITFIELD32 = 15, + NL_ATTR_TYPE_SINT = 16, + NL_ATTR_TYPE_UINT = 17, +}; + +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC = 0, + NL_POLICY_TYPE_ATTR_TYPE = 1, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, + NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, + NL_POLICY_TYPE_ATTR_PAD = 11, + NL_POLICY_TYPE_ATTR_MASK = 12, + __NL_POLICY_TYPE_ATTR_MAX = 13, + NL_POLICY_TYPE_ATTR_MAX = 12, +}; + +struct netlink_policy_dump_state { + unsigned int policy_idx; + unsigned int attr_idx; + unsigned int n_alloc; + struct { + const struct nla_policy *policy; + unsigned int maxtype; + } policies[0]; +}; + +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC = 0, + ETHTOOL_TUNABLE_U8 = 1, + ETHTOOL_TUNABLE_U16 = 2, + ETHTOOL_TUNABLE_U32 = 3, + ETHTOOL_TUNABLE_U64 = 4, + ETHTOOL_TUNABLE_STRING = 5, + ETHTOOL_TUNABLE_S8 = 6, + ETHTOOL_TUNABLE_S16 = 7, + ETHTOOL_TUNABLE_S32 = 8, + ETHTOOL_TUNABLE_S64 = 9, +}; + +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; + +enum ethtool_flags { + ETH_FLAG_TXVLAN = 128, + ETH_FLAG_RXVLAN = 256, + ETH_FLAG_LRO = 32768, + ETH_FLAG_NTUPLE = 134217728, + ETH_FLAG_RXHASH = 268435456, +}; + +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 input_xfrm; + __u8 rsvd8[2]; + __u32 rsvd32; + __u32 rss_config[0]; +}; + +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT = 0, + ETHTOOL_F_WISH__BIT = 1, + ETHTOOL_F_COMPAT__BIT = 2, +}; + +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[128]; + char data[0]; +}; + +struct flow_rule; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + long unsigned int priv[0]; +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP = 1, + FLOW_ACTION_TRAP = 2, + FLOW_ACTION_GOTO = 3, + FLOW_ACTION_REDIRECT = 4, + FLOW_ACTION_MIRRED = 5, + FLOW_ACTION_REDIRECT_INGRESS = 6, + FLOW_ACTION_MIRRED_INGRESS = 7, + FLOW_ACTION_VLAN_PUSH = 8, + FLOW_ACTION_VLAN_POP = 9, + FLOW_ACTION_VLAN_MANGLE = 10, + FLOW_ACTION_TUNNEL_ENCAP = 11, + FLOW_ACTION_TUNNEL_DECAP = 12, + FLOW_ACTION_MANGLE = 13, + FLOW_ACTION_ADD = 14, + FLOW_ACTION_CSUM = 15, + FLOW_ACTION_MARK = 16, + FLOW_ACTION_PTYPE = 17, + FLOW_ACTION_PRIORITY = 18, + FLOW_ACTION_RX_QUEUE_MAPPING = 19, + FLOW_ACTION_WAKE = 20, + FLOW_ACTION_QUEUE = 21, + FLOW_ACTION_SAMPLE = 22, + FLOW_ACTION_POLICE = 23, + FLOW_ACTION_CT = 24, + FLOW_ACTION_CT_METADATA = 25, + FLOW_ACTION_MPLS_PUSH = 26, + FLOW_ACTION_MPLS_POP = 27, + FLOW_ACTION_MPLS_MANGLE = 28, + FLOW_ACTION_GATE = 29, + FLOW_ACTION_PPPOE_PUSH = 30, + FLOW_ACTION_JUMP = 31, + FLOW_ACTION_PIPE = 32, + FLOW_ACTION_VLAN_PUSH_ETH = 33, + FLOW_ACTION_VLAN_POP_ETH = 34, + FLOW_ACTION_CONTINUE = 35, + NUM_FLOW_ACTIONS = 36, +}; + +typedef void (*action_destr)(void *); + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, + FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, + FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, + FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, + FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, +}; + +struct psample_group; + +struct action_gate_entry; + +struct nf_flowtable; + +struct flow_action_cookie; + +struct flow_action_entry { + enum flow_action_id id; + u32 hw_index; + long unsigned int cookie; + long: 32; + u64 miss_cookie; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + long: 32; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + unsigned char dst[6]; + unsigned char src[6]; + } vlan_push_eth; + struct { + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u16 rx_queue; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + u32 burst; + long: 32; + u64 rate_bytes_ps; + u64 peakrate_bytes_ps; + u32 avrate; + u16 overhead; + u64 burst_pkt; + u64 rate_pkt_ps; + u32 mtu; + struct { + enum flow_action_id act_id; + u32 extval; + } exceed; + struct { + enum flow_action_id act_id; + u32 extval; + } notexceed; + long: 32; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + long unsigned int cookie; + u32 mark; + u32 labels[4]; + bool orig_dir; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + s32 prio; + long: 32; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + struct { + u16 sid; + } pppoe; + }; + struct flow_action_cookie *user_cookie; + long: 32; +}; + +struct flow_action { + unsigned int num_entries; + long: 32; + struct flow_action_entry entries[0]; +}; + +struct flow_rule { + struct flow_match match; + long: 32; + struct flow_action action; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *); + int (*get_strings)(struct phy_device *, u8 *); + int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *, struct netlink_ext_ack *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); + int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); +}; + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[0]; +}; + +struct ethtool_devlink_compat { + struct devlink *devlink; + union { + struct ethtool_flash efl; + struct ethtool_drvinfo info; + }; +}; + +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[4]; + __u32 advertising[4]; + __u32 lp_advertising[4]; + } link_modes; +}; + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; +}; + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED = 0, + BRNF_PROTO_8021Q = 1, + BRNF_PROTO_PPPOE = 2, + } orig_proto: 8; + u8 pkt_otherhost: 1; + u8 in_prerouting: 1; + u8 bridged_dnat: 1; + u8 sabotage_in_done: 1; + __u16 frag_max_size; + int physinif; + struct net_device *physoutdev; + union { + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + char neigh_header[8]; + }; +}; + +struct nfnl_ct_hook { + size_t (*build_size)(const struct nf_conn *); + int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t); + int (*parse)(const struct nlattr *, struct nf_conn *); + int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); + void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); +}; + +enum nfulnl_msg_types { + NFULNL_MSG_PACKET = 0, + NFULNL_MSG_CONFIG = 1, + NFULNL_MSG_MAX = 2, +}; + +struct nfulnl_msg_packet_hdr { + __be16 hw_protocol; + __u8 hook; + __u8 _pad; +}; + +struct nfulnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfulnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfulnl_vlan_attr { + NFULA_VLAN_UNSPEC = 0, + NFULA_VLAN_PROTO = 1, + NFULA_VLAN_TCI = 2, + __NFULA_VLAN_MAX = 3, +}; + +enum nfulnl_attr_type { + NFULA_UNSPEC = 0, + NFULA_PACKET_HDR = 1, + NFULA_MARK = 2, + NFULA_TIMESTAMP = 3, + NFULA_IFINDEX_INDEV = 4, + NFULA_IFINDEX_OUTDEV = 5, + NFULA_IFINDEX_PHYSINDEV = 6, + NFULA_IFINDEX_PHYSOUTDEV = 7, + NFULA_HWADDR = 8, + NFULA_PAYLOAD = 9, + NFULA_PREFIX = 10, + NFULA_UID = 11, + NFULA_SEQ = 12, + NFULA_SEQ_GLOBAL = 13, + NFULA_GID = 14, + NFULA_HWTYPE = 15, + NFULA_HWHEADER = 16, + NFULA_HWLEN = 17, + NFULA_CT = 18, + NFULA_CT_INFO = 19, + NFULA_VLAN = 20, + NFULA_L2HDR = 21, + __NFULA_MAX = 22, +}; + +enum nfulnl_msg_config_cmds { + NFULNL_CFG_CMD_NONE = 0, + NFULNL_CFG_CMD_BIND = 1, + NFULNL_CFG_CMD_UNBIND = 2, + NFULNL_CFG_CMD_PF_BIND = 3, + NFULNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfulnl_msg_config_cmd { + __u8 command; +}; + +struct nfulnl_msg_config_mode { + __be32 copy_range; + __u8 copy_mode; + __u8 _pad; +} __attribute__((packed)); + +enum nfulnl_attr_config { + NFULA_CFG_UNSPEC = 0, + NFULA_CFG_CMD = 1, + NFULA_CFG_MODE = 2, + NFULA_CFG_NLBUFSIZ = 3, + NFULA_CFG_TIMEOUT = 4, + NFULA_CFG_QTHRESH = 5, + NFULA_CFG_FLAGS = 6, + __NFULA_CFG_MAX = 7, +}; + +struct nfulnl_instance { + struct hlist_node hlist; + spinlock_t lock; + refcount_t use; + unsigned int qlen; + struct sk_buff *skb; + struct timer_list timer; + struct net *net; + netns_tracker ns_tracker; + struct user_namespace *peer_user_ns; + u32 peer_portid; + unsigned int flushtimeout; + unsigned int nlbufsiz; + unsigned int qthreshold; + u_int32_t copy_range; + u_int32_t seq; + u_int16_t group_num; + u_int16_t flags; + u_int8_t copy_mode; + struct callback_head rcu; +}; + +struct nfnl_log_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; + atomic_t global_seq; +}; + +struct iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum nft_bitwise_ops { + NFT_BITWISE_MASK_XOR = 0, + NFT_BITWISE_LSHIFT = 1, + NFT_BITWISE_RSHIFT = 2, + NFT_BITWISE_AND = 3, + NFT_BITWISE_OR = 4, + NFT_BITWISE_XOR = 5, +}; + +enum nft_bitwise_attributes { + NFTA_BITWISE_UNSPEC = 0, + NFTA_BITWISE_SREG = 1, + NFTA_BITWISE_DREG = 2, + NFTA_BITWISE_LEN = 3, + NFTA_BITWISE_MASK = 4, + NFTA_BITWISE_XOR = 5, + NFTA_BITWISE_OP = 6, + NFTA_BITWISE_DATA = 7, + NFTA_BITWISE_SREG2 = 8, + __NFTA_BITWISE_MAX = 9, +}; + +struct nf_flowtable_type; + +struct nf_flowtable { + unsigned int flags; + int priority; + struct rhashtable rhashtable; + struct list_head list; + const struct nf_flowtable_type *type; + struct delayed_work gc_work; + struct flow_block flow_block; + struct rw_semaphore flow_block_lock; + possible_net_t net; +}; + +enum flow_block_command { + FLOW_BLOCK_BIND = 0, + FLOW_BLOCK_UNBIND = 1, +}; + +struct nf_flow_key { + struct flow_dissector_key_meta meta; + struct flow_dissector_key_control control; + struct flow_dissector_key_control enc_control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_keyid enc_key_id; + union { + struct flow_dissector_key_ipv4_addrs enc_ipv4; + struct flow_dissector_key_ipv6_addrs enc_ipv6; + }; + struct flow_dissector_key_tcp tcp; + struct flow_dissector_key_ports tp; +}; + +struct nf_flow_match { + struct flow_dissector dissector; + struct nf_flow_key key; + struct nf_flow_key mask; +}; + +struct nf_flow_rule { + struct nf_flow_match match; + struct flow_rule *rule; + long: 32; +}; + +enum flow_offload_tuple_dir { + FLOW_OFFLOAD_DIR_ORIGINAL = 0, + FLOW_OFFLOAD_DIR_REPLY = 1, +}; + +struct flow_offload; + +struct nf_flowtable_type { + struct list_head list; + int family; + int (*init)(struct nf_flowtable *); + bool (*gc)(const struct flow_offload *); + int (*setup)(struct nf_flowtable *, struct net_device *, enum flow_block_command); + int (*action)(struct net *, struct flow_offload *, enum flow_offload_tuple_dir, struct nf_flow_rule *); + void (*free)(struct nf_flowtable *); + void (*get)(struct nf_flowtable *); + void (*put)(struct nf_flowtable *); + nf_hookfn *hook; + struct module *owner; +}; + +struct flow_offload_tuple { + union { + struct in_addr src_v4; + struct in6_addr src_v6; + }; + union { + struct in_addr dst_v4; + struct in6_addr dst_v6; + }; + struct { + __be16 src_port; + __be16 dst_port; + }; + int iifidx; + u8 l3proto; + u8 l4proto; + struct { + u16 id; + __be16 proto; + } encap[2]; + struct {} __hash; + u8 dir: 2; + u8 xmit_type: 3; + u8 encap_num: 2; + char: 1; + u8 in_vlan_ingress: 2; + u16 mtu; + union { + struct { + struct dst_entry *dst_cache; + u32 dst_cookie; + }; + struct { + u32 ifidx; + u32 hw_ifidx; + u8 h_source[6]; + u8 h_dest[6]; + } out; + struct { + u32 iifidx; + } tc; + }; +}; + +struct flow_offload_tuple_rhash { + struct rhash_head node; + struct flow_offload_tuple tuple; +}; + +struct flow_offload { + struct flow_offload_tuple_rhash tuplehash[2]; + struct nf_conn *ct; + long unsigned int flags; + u16 type; + u32 timeout; + struct callback_head callback_head; +}; + +struct nft_data_desc { + enum nft_data_types type; + unsigned int size; + unsigned int len; + unsigned int flags; +}; + +enum nft_offload_dep_type { + NFT_OFFLOAD_DEP_UNSPEC = 0, + NFT_OFFLOAD_DEP_NETWORK = 1, + NFT_OFFLOAD_DEP_TRANSPORT = 2, +}; + +struct nft_offload_reg { + u32 key; + u32 len; + u32 base_offset; + u32 offset; + u32 flags; + long: 32; + struct nft_data data; + struct nft_data mask; +}; + +struct nft_offload_ctx { + struct { + enum nft_offload_dep_type type; + __be16 l3num; + u8 protonum; + } dep; + unsigned int num_actions; + struct net *net; + struct nft_offload_reg regs[24]; +}; + +struct nft_flow_key { + struct flow_dissector_key_basic basic; + struct flow_dissector_key_control control; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_eth_addrs eth_addrs; + struct flow_dissector_key_meta meta; +}; + +struct nft_flow_match { + struct flow_dissector dissector; + struct nft_flow_key key; + struct nft_flow_key mask; +}; + +struct nft_flow_rule { + __be16 proto; + long: 32; + struct nft_flow_match match; + struct flow_rule *rule; + long: 32; +}; + +struct nft_bitwise_fast_expr { + u32 mask; + u32 xor; + u8 sreg; + u8 dreg; +}; + +struct nft_bitwise { + u8 sreg; + u8 sreg2; + u8 dreg; + enum nft_bitwise_ops op: 8; + u8 len; + struct nft_data mask; + struct nft_data xor; + struct nft_data data; +}; + +enum nft_nat_types { + NFT_NAT_SNAT = 0, + NFT_NAT_DNAT = 1, +}; + +enum nft_nat_attributes { + NFTA_NAT_UNSPEC = 0, + NFTA_NAT_TYPE = 1, + NFTA_NAT_FAMILY = 2, + NFTA_NAT_REG_ADDR_MIN = 3, + NFTA_NAT_REG_ADDR_MAX = 4, + NFTA_NAT_REG_PROTO_MIN = 5, + NFTA_NAT_REG_PROTO_MAX = 6, + NFTA_NAT_FLAGS = 7, + __NFTA_NAT_MAX = 8, +}; + +struct nf_nat_range { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; +}; + +struct nft_nat { + u8 sreg_addr_min; + u8 sreg_addr_max; + u8 sreg_proto_min; + u8 sreg_proto_max; + enum nf_nat_manip_type type: 8; + u8 family; + u16 flags; +}; + +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC = 0, + LWTUNNEL_IP_ID = 1, + LWTUNNEL_IP_DST = 2, + LWTUNNEL_IP_SRC = 3, + LWTUNNEL_IP_TTL = 4, + LWTUNNEL_IP_TOS = 5, + LWTUNNEL_IP_FLAGS = 6, + LWTUNNEL_IP_PAD = 7, + LWTUNNEL_IP_OPTS = 8, + __LWTUNNEL_IP_MAX = 9, +}; + +enum lwtunnel_ip6_t { + LWTUNNEL_IP6_UNSPEC = 0, + LWTUNNEL_IP6_ID = 1, + LWTUNNEL_IP6_DST = 2, + LWTUNNEL_IP6_SRC = 3, + LWTUNNEL_IP6_HOPLIMIT = 4, + LWTUNNEL_IP6_TC = 5, + LWTUNNEL_IP6_FLAGS = 6, + LWTUNNEL_IP6_PAD = 7, + LWTUNNEL_IP6_OPTS = 8, + __LWTUNNEL_IP6_MAX = 9, +}; + +enum { + LWTUNNEL_IP_OPTS_UNSPEC = 0, + LWTUNNEL_IP_OPTS_GENEVE = 1, + LWTUNNEL_IP_OPTS_VXLAN = 2, + LWTUNNEL_IP_OPTS_ERSPAN = 3, + __LWTUNNEL_IP_OPTS_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, + LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, + LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, + LWTUNNEL_IP_OPT_GENEVE_DATA = 3, + __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_VXLAN_GBP = 1, + __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, +}; + +enum { + LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_ERSPAN_VER = 1, + LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, + LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, + LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, + __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, +}; + +struct lwtunnel_encap_ops { + int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); + void (*destroy_state)(struct lwtunnel_state *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*input)(struct sk_buff *); + int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); + int (*get_encap_size)(struct lwtunnel_state *); + int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); + int (*xmit)(struct sk_buff *); + struct module *owner; +}; + +enum { + IFLA_IPTUN_UNSPEC = 0, + IFLA_IPTUN_LINK = 1, + IFLA_IPTUN_LOCAL = 2, + IFLA_IPTUN_REMOTE = 3, + IFLA_IPTUN_TTL = 4, + IFLA_IPTUN_TOS = 5, + IFLA_IPTUN_ENCAP_LIMIT = 6, + IFLA_IPTUN_FLOWINFO = 7, + IFLA_IPTUN_FLAGS = 8, + IFLA_IPTUN_PROTO = 9, + IFLA_IPTUN_PMTUDISC = 10, + IFLA_IPTUN_6RD_PREFIX = 11, + IFLA_IPTUN_6RD_RELAY_PREFIX = 12, + IFLA_IPTUN_6RD_PREFIXLEN = 13, + IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14, + IFLA_IPTUN_ENCAP_TYPE = 15, + IFLA_IPTUN_ENCAP_FLAGS = 16, + IFLA_IPTUN_ENCAP_SPORT = 17, + IFLA_IPTUN_ENCAP_DPORT = 18, + IFLA_IPTUN_COLLECT_METADATA = 19, + IFLA_IPTUN_FWMARK = 20, + __IFLA_IPTUN_MAX = 21, +}; + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); + int (*err_handler)(struct sk_buff *, u32); +}; + +struct udp_tunnel_info { + short unsigned int type; + sa_family_t sa_family; + __be16 port; + u8 hw_priv; +}; + +struct udp_tunnel_nic_shared { + struct udp_tunnel_nic *udp_tunnel_nic_info; + struct list_head devices; +}; + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 length: 5; + u8 r3: 1; + u8 r2: 1; + u8 r1: 1; + u8 opt_data[0]; +}; + +struct vxlan_metadata { + u32 gbp; +}; + +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; + __u8 hwid_upper: 2; + __u8 ft: 5; + __u8 p: 1; + __u8 o: 1; + __u8 gra: 2; + __u8 dir: 1; + __u8 hwid: 4; +}; + +struct erspan_metadata { + int version; + union { + __be32 index; + struct erspan_md2 md2; + } u; +}; + +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; + struct { + enum bpf_cgroup_iter_order order; + __u32 cgroup_fd; + __u64 cgroup_id; + } cgroup; + struct { + __u32 tid; + __u32 pid; + __u32 pid_fd; + } task; +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); + +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); + +typedef const struct bpf_func_proto * (*bpf_iter_get_func_proto_t)(enum bpf_func_id, const struct bpf_prog *); + +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + bpf_iter_show_fdinfo_t show_fdinfo; + bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; + u32 ctx_arg_info_size; + u32 feature; + struct bpf_ctx_arg_aux ctx_arg_info[2]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_iter_meta { + union { + struct seq_file *seq; + }; + u64 session_id; + u64 seq_num; +}; + +enum netdev_reg_state { + NETREG_UNINITIALIZED = 0, + NETREG_REGISTERED = 1, + NETREG_UNREGISTERING = 2, + NETREG_UNREGISTERED = 3, + NETREG_RELEASED = 4, + NETREG_DUMMY = 5, +}; + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + unsigned int rtm_flags; +}; + +enum rtattr_type_t { + RTA_UNSPEC = 0, + RTA_DST = 1, + RTA_SRC = 2, + RTA_IIF = 3, + RTA_OIF = 4, + RTA_GATEWAY = 5, + RTA_PRIORITY = 6, + RTA_PREFSRC = 7, + RTA_METRICS = 8, + RTA_MULTIPATH = 9, + RTA_PROTOINFO = 10, + RTA_FLOW = 11, + RTA_CACHEINFO = 12, + RTA_SESSION = 13, + RTA_MP_ALGO = 14, + RTA_TABLE = 15, + RTA_MARK = 16, + RTA_MFC_STATS = 17, + RTA_VIA = 18, + RTA_NEWDST = 19, + RTA_PREF = 20, + RTA_ENCAP_TYPE = 21, + RTA_ENCAP = 22, + RTA_EXPIRES = 23, + RTA_PAD = 24, + RTA_UID = 25, + RTA_TTL_PROPAGATE = 26, + RTA_IP_PROTO = 27, + RTA_SPORT = 28, + RTA_DPORT = 29, + RTA_NH_ID = 30, + __RTA_MAX = 31, +}; + +struct rtnexthop { + short unsigned int rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +struct uncached_list { + spinlock_t lock; + struct list_head head; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE = 0, + FIB_EVENT_ENTRY_APPEND = 1, + FIB_EVENT_ENTRY_ADD = 2, + FIB_EVENT_ENTRY_DEL = 3, + FIB_EVENT_RULE_ADD = 4, + FIB_EVENT_RULE_DEL = 5, + FIB_EVENT_NH_ADD = 6, + FIB_EVENT_NH_DEL = 7, + FIB_EVENT_VIF_ADD = 8, + FIB_EVENT_VIF_DEL = 9, +}; + +struct fib_dump_filter { + u32 table_id; + bool filter_set; + bool dump_routes; + bool dump_exceptions; + bool rtnl_held; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + long unsigned int rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +struct fib6_gc_args { + int timeout; + int more; +}; + +struct rt6_exception { + struct hlist_node hlist; + struct rt6_info *rt6i; + long unsigned int stamp; + struct callback_head rcu; +}; + +typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); + +struct rt6_rtnl_dump_arg { + struct sk_buff *skb; + struct netlink_callback *cb; + struct net *net; + struct fib_dump_filter filter; +}; + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; +}; + +struct trace_event_raw_fib6_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[16]; + __u8 dst[16]; + u16 sport; + u16 dport; + u8 proto; + u8 rt_type; + char name[16]; + __u8 gw[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib6_table_lookup {}; + +typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); + +enum rt6_nud_state { + RT6_NUD_FAIL_HARD = -3, + RT6_NUD_FAIL_PROBE = -2, + RT6_NUD_FAIL_DO_RR = -1, + RT6_NUD_SUCCEED = 1, +}; + +struct fib6_nh_dm_arg { + struct net *net; + const struct in6_addr *saddr; + int oif; + int flags; + struct fib6_nh *nh; +}; + +struct fib6_nh_frl_arg { + u32 flags; + int oif; + int strict; + int *mpri; + bool *do_rr; + struct fib6_nh *nh; +}; + +struct fib6_nh_excptn_arg { + struct rt6_info *rt; + int plen; +}; + +struct fib6_nh_match_arg { + const struct net_device *dev; + const struct in6_addr *gw; + struct fib6_nh *match; +}; + +struct fib6_nh_age_excptn_arg { + struct fib6_gc_args *gc_args; + long unsigned int now; +}; + +struct fib6_nh_rd_arg { + struct fib6_result *res; + struct flowi6 *fl6; + const struct in6_addr *gw; + struct rt6_info **ret; +}; + +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +struct fib6_nh_del_cached_rt_arg { + struct fib6_config *cfg; + struct fib6_info *f6i; +}; + +struct arg_dev_net_ip { + struct net *net; + struct in6_addr *addr; +}; + +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned char nh_flags; + long unsigned int event; + }; +}; + +struct rt6_mtu_change_arg { + struct net_device *dev; + unsigned int mtu; + struct fib6_info *f6i; +}; + +struct rt6_nh { + struct fib6_info *fib6_info; + struct fib6_config r_cfg; + struct list_head next; +}; + +struct fib6_nh_exception_dump_walker { + struct rt6_rtnl_dump_arg *dump; + struct fib6_info *rt; + unsigned int flags; + unsigned int skip; + unsigned int count; +}; + +enum { + MDBA_UNSPEC = 0, + MDBA_MDB = 1, + MDBA_ROUTER = 2, + __MDBA_MAX = 3, +}; + +enum { + MDBA_MDB_UNSPEC = 0, + MDBA_MDB_ENTRY = 1, + __MDBA_MDB_MAX = 2, +}; + +enum { + MDBA_MDB_ENTRY_UNSPEC = 0, + MDBA_MDB_ENTRY_INFO = 1, + __MDBA_MDB_ENTRY_MAX = 2, +}; + +enum { + MDBA_MDB_EATTR_UNSPEC = 0, + MDBA_MDB_EATTR_TIMER = 1, + MDBA_MDB_EATTR_SRC_LIST = 2, + MDBA_MDB_EATTR_GROUP_MODE = 3, + MDBA_MDB_EATTR_SOURCE = 4, + MDBA_MDB_EATTR_RTPROT = 5, + MDBA_MDB_EATTR_DST = 6, + MDBA_MDB_EATTR_DST_PORT = 7, + MDBA_MDB_EATTR_VNI = 8, + MDBA_MDB_EATTR_IFINDEX = 9, + MDBA_MDB_EATTR_SRC_VNI = 10, + __MDBA_MDB_EATTR_MAX = 11, +}; + +enum { + MDBA_MDB_SRCLIST_UNSPEC = 0, + MDBA_MDB_SRCLIST_ENTRY = 1, + __MDBA_MDB_SRCLIST_MAX = 2, +}; + +enum { + MDBA_MDB_SRCATTR_UNSPEC = 0, + MDBA_MDB_SRCATTR_ADDRESS = 1, + MDBA_MDB_SRCATTR_TIMER = 2, + __MDBA_MDB_SRCATTR_MAX = 3, +}; + +enum { + MDBA_ROUTER_UNSPEC = 0, + MDBA_ROUTER_PORT = 1, + __MDBA_ROUTER_MAX = 2, +}; + +enum { + MDBA_ROUTER_PATTR_UNSPEC = 0, + MDBA_ROUTER_PATTR_TIMER = 1, + MDBA_ROUTER_PATTR_TYPE = 2, + MDBA_ROUTER_PATTR_INET_TIMER = 3, + MDBA_ROUTER_PATTR_INET6_TIMER = 4, + MDBA_ROUTER_PATTR_VID = 5, + __MDBA_ROUTER_PATTR_MAX = 6, +}; + +struct br_port_msg { + __u8 family; + __u32 ifindex; +}; + +struct br_mdb_entry { + __u32 ifindex; + __u8 state; + __u8 flags; + __u16 vid; + struct { + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } u; + __be16 proto; + } addr; +}; + +enum { + MDBA_SET_ENTRY_UNSPEC = 0, + MDBA_SET_ENTRY = 1, + MDBA_SET_ENTRY_ATTRS = 2, + __MDBA_SET_ENTRY_MAX = 3, +}; + +enum { + MDBA_GET_ENTRY_UNSPEC = 0, + MDBA_GET_ENTRY = 1, + MDBA_GET_ENTRY_ATTRS = 2, + __MDBA_GET_ENTRY_MAX = 3, +}; + +enum { + MDBE_ATTR_UNSPEC = 0, + MDBE_ATTR_SOURCE = 1, + MDBE_ATTR_SRC_LIST = 2, + MDBE_ATTR_GROUP_MODE = 3, + MDBE_ATTR_RTPROT = 4, + MDBE_ATTR_DST = 5, + MDBE_ATTR_DST_PORT = 6, + MDBE_ATTR_VNI = 7, + MDBE_ATTR_IFINDEX = 8, + MDBE_ATTR_SRC_VNI = 9, + MDBE_ATTR_STATE_MASK = 10, + __MDBE_ATTR_MAX = 11, +}; + +enum { + MDBE_SRC_LIST_UNSPEC = 0, + MDBE_SRC_LIST_ENTRY = 1, + __MDBE_SRC_LIST_MAX = 2, +}; + +enum { + MDBE_SRCATTR_UNSPEC = 0, + MDBE_SRCATTR_ADDRESS = 1, + __MDBE_SRCATTR_MAX = 2, +}; + +struct br_mdb_src_entry { + struct br_ip addr; +}; + +struct br_mdb_config { + struct net_bridge *br; + struct net_bridge_port *p; + struct br_mdb_entry *entry; + struct br_ip group; + bool src_entry; + u8 filter_mode; + u16 nlflags; + struct br_mdb_src_entry *src_entries; + int num_src_entries; + u8 rt_protocol; +}; + +enum { + BR_VLFLAG_PER_PORT_STATS = 1, + BR_VLFLAG_ADDED_BY_SWITCHDEV = 2, + BR_VLFLAG_MCAST_ENABLED = 4, + BR_VLFLAG_GLOBAL_MCAST_ENABLED = 8, + BR_VLFLAG_NEIGH_SUPPRESS_ENABLED = 16, +}; + +struct net_bridge_vlan_group { + struct rhashtable vlan_hash; + struct rhashtable tunnel_hash; + struct list_head vlan_list; + u16 num_vlans; + u16 pvid; + u8 pvid_state; +}; + +struct net_bridge_mcast_gc { + struct hlist_node gc_node; + void (*destroy)(struct net_bridge_mcast_gc *); +}; + +struct net_bridge_port_group; + +struct net_bridge_group_src { + struct hlist_node node; + struct br_ip addr; + struct net_bridge_port_group *pg; + u8 flags; + u8 src_query_rexmit_cnt; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct net_bridge_port_group_sg_key { + struct net_bridge_port *port; + struct br_ip addr; +}; + +struct net_bridge_port_group { + struct net_bridge_port_group *next; + struct net_bridge_port_group_sg_key key; + unsigned char eth_addr[6]; + unsigned char flags; + unsigned char filter_mode; + unsigned char grp_query_rexmit_cnt; + unsigned char rt_protocol; + struct hlist_head src_list; + unsigned int src_ents; + struct timer_list timer; + struct timer_list rexmit_timer; + struct hlist_node mglist; + struct rb_root eht_set_tree; + struct rb_root eht_host_tree; + struct rhash_head rhnode; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct net_bridge_mdb_entry { + struct rhash_head rhnode; + struct net_bridge *br; + struct net_bridge_port_group *ports; + struct br_ip addr; + bool host_joined; + struct timer_list timer; + struct hlist_node mdb_node; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct br_mdb_flush_desc { + u32 port_ifindex; + u16 vid; + u8 rt_protocol; + u8 state; + u8 state_mask; +}; + +enum ieee80211_category { + WLAN_CATEGORY_SPECTRUM_MGMT = 0, + WLAN_CATEGORY_QOS = 1, + WLAN_CATEGORY_DLS = 2, + WLAN_CATEGORY_BACK = 3, + WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_RADIO_MEASUREMENT = 5, + WLAN_CATEGORY_FAST_BBS_TRANSITION = 6, + WLAN_CATEGORY_HT = 7, + WLAN_CATEGORY_SA_QUERY = 8, + WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, + WLAN_CATEGORY_WNM = 10, + WLAN_CATEGORY_WNM_UNPROTECTED = 11, + WLAN_CATEGORY_TDLS = 12, + WLAN_CATEGORY_MESH_ACTION = 13, + WLAN_CATEGORY_MULTIHOP_ACTION = 14, + WLAN_CATEGORY_SELF_PROTECTED = 15, + WLAN_CATEGORY_DMG = 16, + WLAN_CATEGORY_WMM = 17, + WLAN_CATEGORY_FST = 18, + WLAN_CATEGORY_UNPROT_DMG = 20, + WLAN_CATEGORY_VHT = 21, + WLAN_CATEGORY_S1G = 22, + WLAN_CATEGORY_PROTECTED_EHT = 37, + WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, + WLAN_CATEGORY_VENDOR_SPECIFIC = 127, +}; + +enum ieee80211_ht_actioncode { + WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0, + WLAN_HT_ACTION_SMPS = 1, + WLAN_HT_ACTION_PSMP = 2, + WLAN_HT_ACTION_PCO_PHASE = 3, + WLAN_HT_ACTION_CSI = 4, + WLAN_HT_ACTION_NONCOMPRESSED_BF = 5, + WLAN_HT_ACTION_COMPRESSED_BF = 6, + WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7, +}; + +enum ieee80211_back_actioncode { + WLAN_ACTION_ADDBA_REQ = 0, + WLAN_ACTION_ADDBA_RESP = 1, + WLAN_ACTION_DELBA = 2, +}; + +enum ieee80211_back_parties { + WLAN_BACK_RECIPIENT = 0, + WLAN_BACK_INITIATOR = 1, +}; + +struct codel_stats { + u32 maxpacket; + u32 drop_count; + u32 drop_len; + u32 ecn_mark; + u32 ce_mark; +}; + +enum ieee80211_agg_stop_reason { + AGG_STOP_DECLINED = 0, + AGG_STOP_LOCAL_REQUEST = 1, + AGG_STOP_PEER_REQUEST = 2, + AGG_STOP_DESTROY_STA = 3, +}; + +enum ieee80211_status_data { + IEEE80211_STATUS_TYPE_MASK = 15, + IEEE80211_STATUS_TYPE_INVALID = 0, + IEEE80211_STATUS_TYPE_SMPS = 1, + IEEE80211_STATUS_TYPE_NEG_TTLM = 2, + IEEE80211_STATUS_SUBDATA_MASK = 8176, +}; + +enum txq_info_flags { + IEEE80211_TXQ_STOP = 0, + IEEE80211_TXQ_AMPDU = 1, + IEEE80211_TXQ_NO_AMSDU = 2, + IEEE80211_TXQ_DIRTY = 3, +}; + +struct txq_info { + struct fq_tin tin; + struct codel_vars def_cvars; + struct codel_stats cstats; + u16 schedule_round; + struct list_head schedule_order; + struct sk_buff_head frags; + long unsigned int flags; + struct ieee80211_txq txq; +}; + +typedef initcall_t initcall_entry_t; + +typedef int (*parse_unknown_fn)(char *, char *, const char *, void *); + +struct binfmt_misc { + struct list_head entries; + rwlock_t entries_lock; + bool enabled; +}; + +struct trace_event_raw_initcall_level { + struct trace_entry ent; + u32 __data_loc_level; + char __data[0]; +}; + +struct trace_event_raw_initcall_start { + struct trace_entry ent; + initcall_t func; + char __data[0]; +}; + +struct trace_event_raw_initcall_finish { + struct trace_entry ent; + initcall_t func; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_initcall_level { + u32 level; + const void *level_ptr_; +}; + +struct trace_event_data_offsets_initcall_start {}; + +struct trace_event_data_offsets_initcall_finish {}; + +typedef void (*btf_trace_initcall_level)(void *, const char *); + +typedef void (*btf_trace_initcall_start)(void *, initcall_t); + +typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); + +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +struct sysrq_key_op { + void (* const handler)(u8); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; +}; + +enum { + CSD_FLAG_LOCK = 1, + IRQ_WORK_PENDING = 1, + IRQ_WORK_BUSY = 2, + IRQ_WORK_LAZY = 4, + IRQ_WORK_HARD_IRQ = 8, + IRQ_WORK_CLAIMED = 3, + CSD_TYPE_ASYNC = 0, + CSD_TYPE_SYNC = 16, + CSD_TYPE_IRQ_WORK = 32, + CSD_TYPE_TTWU = 48, + CSD_FLAG_TYPE_MASK = 240, +}; + +typedef struct { + seqcount_t seqcount; +} seqcount_latch_t; + +struct dev_printk_info { + char subsystem[16]; + char device[48]; +}; + +struct printk_buffers { + char outbuf[2048]; + char scratchbuf[1024]; +}; + +enum con_flush_mode { + CONSOLE_FLUSH_PENDING = 0, + CONSOLE_REPLAY_ALL = 1, +}; + +struct syscore_ops { + struct list_head node; + int (*suspend)(); + void (*resume)(); + void (*shutdown)(); +}; + +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF = 0, + KMSG_DUMP_PANIC = 1, + KMSG_DUMP_OOPS = 2, + KMSG_DUMP_EMERG = 3, + KMSG_DUMP_SHUTDOWN = 4, + KMSG_DUMP_MAX = 5, +}; + +struct kmsg_dump_iter { + u64 cur_seq; + u64 next_seq; +}; + +struct kmsg_dump_detail { + enum kmsg_dump_reason reason; + const char *description; +}; + +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *, struct kmsg_dump_detail *); + enum kmsg_dump_reason max_reason; + bool registered; +}; + +struct trace_event_raw_console { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_console { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_console)(void *, const char *, size_t); + +struct printk_info { + u64 seq; + u64 ts_nsec; + u16 text_len; + u8 facility; + u8 flags: 5; + u8 level: 3; + u32 caller_id; + struct dev_printk_info dev_info; +}; + +struct printk_record { + struct printk_info *info; + char *text_buf; + unsigned int text_buf_size; +}; + +struct prb_data_blk_lpos { + long unsigned int begin; + long unsigned int next; +}; + +struct prb_desc { + atomic_long_t state_var; + struct prb_data_blk_lpos text_blk_lpos; +}; + +struct prb_data_ring { + unsigned int size_bits; + char *data; + atomic_long_t head_lpos; + atomic_long_t tail_lpos; +}; + +struct prb_desc_ring { + unsigned int count_bits; + struct prb_desc *descs; + struct printk_info *infos; + atomic_long_t head_id; + atomic_long_t tail_id; + atomic_long_t last_finalized_seq; +}; + +struct printk_ringbuffer { + struct prb_desc_ring desc_ring; + struct prb_data_ring text_data_ring; + atomic_long_t fail; +}; + +struct prb_reserved_entry { + struct printk_ringbuffer *rb; + long unsigned int irqflags; + long unsigned int id; + unsigned int text_space; +}; + +enum desc_state { + desc_miss = -1, + desc_reserved = 0, + desc_committed = 1, + desc_finalized = 2, + desc_reusable = 3, +}; + +struct console_cmdline { + char name[16]; + int index; + char devname[32]; + bool user_specified; + char *options; +}; + +enum printk_info_flags { + LOG_FORCE_CON = 1, + LOG_NEWLINE = 2, + LOG_CONT = 8, +}; + +struct console_flush_type { + bool nbcon_atomic; + bool nbcon_offload; + bool legacy_direct; + bool legacy_offload; +}; + +struct printk_message { + struct printk_buffers *pbufs; + unsigned int outbuf_len; + u64 seq; + long unsigned int dropped; + long: 32; +}; + +enum devkmsg_log_bits { + __DEVKMSG_LOG_BIT_ON = 0, + __DEVKMSG_LOG_BIT_OFF = 1, + __DEVKMSG_LOG_BIT_LOCK = 2, +}; + +enum devkmsg_log_masks { + DEVKMSG_LOG_MASK_ON = 1, + DEVKMSG_LOG_MASK_OFF = 2, + DEVKMSG_LOG_MASK_LOCK = 4, +}; + +enum con_msg_format_flags { + MSG_FORMAT_DEFAULT = 0, + MSG_FORMAT_SYSLOG = 1, +}; + +struct latched_seq { + seqcount_latch_t latch; + long: 32; + u64 val[2]; +}; + +struct devkmsg_user { + atomic64_t seq; + struct ratelimit_state rs; + struct mutex lock; + struct printk_buffers pbufs; +}; + +struct ctx_switch_entry { + struct trace_entry ent; + unsigned int prev_pid; + unsigned int next_pid; + unsigned int next_cpu; + unsigned char prev_prio; + unsigned char prev_state; + unsigned char next_prio; + unsigned char next_state; +}; + +struct userstack_entry { + struct trace_entry ent; + unsigned int tgid; + long unsigned int caller[8]; +}; + +struct hwlat_entry { + struct trace_entry ent; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + unsigned int nmi_count; + unsigned int seqnum; + unsigned int count; + long: 32; +}; + +struct osnoise_entry { + struct trace_entry ent; + u64 noise; + u64 runtime; + u64 max_sample; + unsigned int hw_count; + unsigned int nmi_count; + unsigned int irq_count; + unsigned int softirq_count; + unsigned int thread_count; + long: 32; +}; + +struct timerlat_entry { + struct trace_entry ent; + unsigned int seqnum; + int context; + u64 timer_latency; +}; + +struct ftrace_event_field { + struct list_head link; + const char *name; + const char *type; + int filter_type; + int offset; + int size; + int is_signed; + int len; +}; + +struct trace_mark { + long long unsigned int val; + char sym; + long: 32; +}; + +struct bpf_mprog_fp { + struct bpf_prog *prog; +}; + +struct bpf_mprog_cp { + struct bpf_link *link; +}; + +struct bpf_mprog_bundle; + +struct bpf_mprog_entry { + struct bpf_mprog_fp fp_items[64]; + struct bpf_mprog_bundle *parent; +}; + +struct bpf_mprog_bundle { + struct bpf_mprog_entry a; + struct bpf_mprog_entry b; + struct bpf_mprog_cp cp_items[64]; + struct bpf_prog *ref; + long: 32; + atomic64_t revision; + u32 count; + long: 32; +}; + +struct bpf_tuple { + struct bpf_prog *prog; + struct bpf_link *link; +}; + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; + swp_entry_t *slots_ret; + int n_ret; +}; + +enum zpool_mapmode { + ZPOOL_MM_RW = 0, + ZPOOL_MM_RO = 1, + ZPOOL_MM_WO = 2, + ZPOOL_MM_DEFAULT = 0, +}; + +struct zpool_driver { + char *type; + struct module *owner; + atomic_t refcount; + struct list_head list; + void * (*create)(const char *, gfp_t); + void (*destroy)(void *); + bool malloc_support_movable; + int (*malloc)(void *, size_t, gfp_t, long unsigned int *); + void (*free)(void *, long unsigned int); + bool sleep_mapped; + void * (*map)(void *, long unsigned int, enum zpool_mapmode); + void (*unmap)(void *, long unsigned int); + u64 (*total_pages)(void *); +}; + +struct zbud_pool { + spinlock_t lock; + union { + struct list_head buddied; + struct list_head unbuddied[63]; + }; + long: 32; + u64 pages_nr; +}; + +struct zbud_header { + struct list_head buddy; + unsigned int first_chunks; + unsigned int last_chunks; +}; + +enum buddy { + FIRST = 0, + LAST = 1, +}; + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fsuuid2 { + __u8 len; + __u8 uuid[16]; +}; + +struct fs_sysfs_path { + __u8 len; + __u8 name[128]; +}; + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + __s32 status; + __u32 reserved; +}; + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; + +struct space_resv { + __s16 l_type; + __s16 l_whence; + long: 32; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +}; + +typedef int class_get_unused_fd_t; + +typedef struct { + void *lock; +} class_rcu_t; + +struct mount; + +struct mnt_namespace { + struct ns_common ns; + struct mount *root; + struct rb_root mounts; + struct user_namespace *user_ns; + struct ucounts *ucounts; + u64 seq; + wait_queue_head_t poll; + long: 32; + u64 event; + unsigned int nr_mounts; + unsigned int pending_mounts; + struct rb_node mnt_ns_tree_node; + refcount_t passive; +}; + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler * const *xattr; + const struct dentry_operations *dops; + long unsigned int magic; +}; + +typedef struct ns_common *ns_get_path_helper_t(void *); + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +struct mnt_ns_info { + __u32 size; + __u32 nr_mounts; + __u64 mnt_ns_id; +}; + +struct mnt_pcp; + +struct mountpoint; + +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; + union { + struct callback_head mnt_rcu; + struct llist_node mnt_llist; + }; + struct mnt_pcp *mnt_pcp; + struct list_head mnt_mounts; + struct list_head mnt_child; + struct list_head mnt_instance; + const char *mnt_devname; + union { + struct rb_node mnt_node; + struct list_head mnt_list; + }; + struct list_head mnt_expire; + struct list_head mnt_share; + struct list_head mnt_slave_list; + struct list_head mnt_slave; + struct mount *mnt_master; + struct mnt_namespace *mnt_ns; + struct mountpoint *mnt_mp; + union { + struct hlist_node mnt_mp_list; + struct hlist_node mnt_umount; + }; + struct list_head mnt_umounting; + struct fsnotify_mark_connector *mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; + int mnt_id; + long: 32; + u64 mnt_id_unique; + int mnt_group_id; + int mnt_expiry_mark; + struct hlist_head mnt_pins; + struct hlist_head mnt_stuck_children; +}; + +struct mnt_pcp { + int mnt_count; + int mnt_writers; +}; + +struct mountpoint { + struct hlist_node m_hash; + struct dentry *m_dentry; + struct hlist_head m_list; + int m_count; +}; + +struct stashed_operations { + void (*put_data)(void *); + int (*init_inode)(struct inode *, void *); +}; + +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +struct fsnotify_group; + +struct fsnotify_iter_info; + +struct fsnotify_mark; + +struct fsnotify_event; + +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); + int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); + void (*free_group_priv)(struct fsnotify_group *); + void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); + void (*free_event)(struct fsnotify_group *, struct fsnotify_event *); + void (*free_mark)(struct fsnotify_mark *); +}; + +struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; +}; + +struct fsnotify_group { + const struct fsnotify_ops *ops; + refcount_t refcnt; + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + enum fsnotify_group_prio priority; + bool shutdown; + int flags; + unsigned int owner_flags; + struct mutex mark_mutex; + atomic_t user_waits; + struct list_head marks_list; + struct fasync_struct *fsn_fa; + struct fsnotify_event *overflow_event; + struct mem_cgroup *memcg; + union { + void *private; + struct inotify_group_private_data inotify_data; + }; +}; + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[5]; + struct fsnotify_group *current_group; + unsigned int report_mask; + int srcu_idx; +}; + +struct fsnotify_mark { + __u32 mask; + refcount_t refcnt; + struct fsnotify_group *group; + struct list_head g_list; + spinlock_t lock; + struct hlist_node obj_list; + struct fsnotify_mark_connector *connector; + __u32 ignore_mask; + unsigned int flags; +}; + +struct fsnotify_event { + struct list_head list; +}; + +struct inotify_event_info { + struct fsnotify_event fse; + u32 mask; + int wd; + u32 sync_cookie; + int name_len; + char name[0]; +}; + +struct inotify_inode_mark { + struct fsnotify_mark fsn_mark; + int wd; +}; + +enum { + VERBOSE_STATUS = 1, +}; + +enum { + Enabled = 0, + Magic = 1, +}; + +typedef struct { + struct list_head list; + long unsigned int flags; + int offset; + int size; + char *magic; + char *mask; + const char *interpreter; + char *name; + struct dentry *dentry; + struct file *interp_file; + refcount_t users; +} Node; + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 16, + KERNFS_NS = 32, + KERNFS_HAS_SEQ_SHOW = 64, + KERNFS_HAS_MMAP = 128, + KERNFS_LOCKDEP = 256, + KERNFS_HIDDEN = 512, + KERNFS_SUICIDAL = 1024, + KERNFS_SUICIDED = 2048, + KERNFS_EMPTY_DIR = 4096, + KERNFS_HAS_RELEASE = 8192, + KERNFS_REMOVING = 16384, +}; + +struct kernfs_root { + struct kernfs_node *kn; + unsigned int flags; + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + struct list_head supers; + wait_queue_head_t deactivate_waitq; + struct rw_semaphore kernfs_rwsem; + struct rw_semaphore kernfs_iattr_rwsem; + struct rw_semaphore kernfs_supers_rwsem; + struct callback_head rcu; +}; + +struct simple_xattrs { + struct rb_root rb_root; + rwlock_t lock; +}; + +struct kernfs_iattrs { + kuid_t ia_uid; + kgid_t ia_gid; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; +}; + +struct migrate_struct { + ext4_lblk_t first_block; + ext4_lblk_t last_block; + ext4_lblk_t curr_block; + long: 32; + ext4_fsblk_t first_pblock; + ext4_fsblk_t last_pblock; +}; + +typedef unsigned int autofs_wqt_t; + +struct autofs_sb_info; + +struct autofs_info { + struct dentry *dentry; + int flags; + struct completion expire_complete; + struct list_head active; + struct list_head expiring; + struct autofs_sb_info *sbi; + long unsigned int exp_timeout; + long unsigned int last_used; + int count; + kuid_t uid; + kgid_t gid; + struct callback_head rcu; +}; + +struct autofs_wait_queue; + +struct autofs_sb_info { + u32 magic; + int pipefd; + struct file *pipe; + struct pid *oz_pgrp; + int version; + int sub_version; + int min_proto; + int max_proto; + unsigned int flags; + long unsigned int exp_timeout; + unsigned int type; + struct super_block *sb; + struct mutex wq_mutex; + struct mutex pipe_mutex; + spinlock_t fs_lock; + struct autofs_wait_queue *queues; + spinlock_t lookup_lock; + struct list_head active_list; + struct list_head expiring_list; + struct callback_head rcu; +}; + +struct autofs_wait_queue { + wait_queue_head_t queue; + struct autofs_wait_queue *next; + autofs_wqt_t wait_queue_token; + long: 32; + struct qstr name; + u32 offset; + u32 dev; + u64 ino; + kuid_t uid; + kgid_t gid; + pid_t pid; + pid_t tgid; + int status; + unsigned int wait_ctr; +}; + +enum { + Opt_direct = 0, + Opt_fd = 1, + Opt_gid___2 = 2, + Opt_ignore = 3, + Opt_indirect = 4, + Opt_maxproto = 5, + Opt_minproto = 6, + Opt_offset = 7, + Opt_pgrp = 8, + Opt_strictexpire = 9, + Opt_uid___2 = 10, +}; + +struct autofs_fs_context { + kuid_t uid; + kgid_t gid; + int pgrp; + bool pgrp_set; +}; + +struct btrfs_csum_item { + __u8 csum; +}; + +struct btrfs_bio; + +typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *); + +struct btrfs_ordered_sum; + +struct btrfs_bio { + struct btrfs_inode *inode; + long: 32; + u64 file_offset; + union { + struct { + u8 *csum; + u8 csum_inline[64]; + struct bvec_iter saved_iter; + }; + struct { + struct btrfs_ordered_extent *ordered; + struct btrfs_ordered_sum *sums; + u64 orig_physical; + }; + struct btrfs_tree_parent_check parent_check; + }; + btrfs_bio_end_io_t end_io; + void *private; + unsigned int mirror_num; + atomic_t pending_ios; + struct work_struct end_io_work; + struct btrfs_fs_info *fs_info; + blk_status_t status; + struct bio bio; +}; + +struct btrfs_ordered_sum { + u64 logical; + u32 len; + struct list_head list; + u8 sums[0]; + long: 32; +}; + +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 10, + ZSTD_error_version_unsupported = 12, + ZSTD_error_frameParameter_unsupported = 14, + ZSTD_error_frameParameter_windowTooLarge = 16, + ZSTD_error_corruption_detected = 20, + ZSTD_error_checksum_wrong = 22, + ZSTD_error_dictionary_corrupted = 30, + ZSTD_error_dictionary_wrong = 32, + ZSTD_error_dictionaryCreation_failed = 34, + ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_outOfBound = 42, + ZSTD_error_tableLog_tooLarge = 44, + ZSTD_error_maxSymbolValue_tooLarge = 46, + ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stage_wrong = 60, + ZSTD_error_init_missing = 62, + ZSTD_error_memory_allocation = 64, + ZSTD_error_workSpace_tooSmall = 66, + ZSTD_error_dstSize_tooSmall = 70, + ZSTD_error_srcSize_wrong = 72, + ZSTD_error_dstBuffer_null = 74, + ZSTD_error_frameIndex_tooLarge = 100, + ZSTD_error_seekableIO = 102, + ZSTD_error_dstBuffer_wrong = 104, + ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_maxCode = 120, +} ZSTD_ErrorCode; + +typedef enum { + ZSTDcs_created = 0, + ZSTDcs_init = 1, + ZSTDcs_ongoing = 2, + ZSTDcs_ending = 3, +} ZSTD_compressionStage_e; + +typedef enum { + ZSTD_f_zstd1 = 0, + ZSTD_f_zstd1_magicless = 1, +} ZSTD_format_e; + +typedef struct { + int contentSizeFlag; + int checksumFlag; + int noDictIDFlag; +} ZSTD_frameParameters; + +typedef enum { + ZSTD_dictDefaultAttach = 0, + ZSTD_dictForceAttach = 1, + ZSTD_dictForceCopy = 2, + ZSTD_dictForceLoad = 3, +} ZSTD_dictAttachPref_e; + +typedef struct { + ZSTD_paramSwitch_e enableLdm; + U32 hashLog; + U32 bucketSizeLog; + U32 minMatchLength; + U32 hashRateLog; + U32 windowLog; +} ldmParams_t; + +typedef enum { + ZSTD_bm_buffered = 0, + ZSTD_bm_stable = 1, +} ZSTD_bufferMode_e; + +typedef enum { + ZSTD_sf_noBlockDelimiters = 0, + ZSTD_sf_explicitBlockDelimiters = 1, +} ZSTD_sequenceFormat_e; + +struct ZSTD_CCtx_params_s { + ZSTD_format_e format; + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; + int compressionLevel; + int forceWindow; + size_t targetCBlockSize; + int srcSizeHint; + ZSTD_dictAttachPref_e attachDictPref; + ZSTD_paramSwitch_e literalCompressionMode; + int nbWorkers; + size_t jobSize; + int overlapLog; + int rsyncable; + ldmParams_t ldmParams; + int enableDedicatedDictSearch; + ZSTD_bufferMode_e inBufferMode; + ZSTD_bufferMode_e outBufferMode; + ZSTD_sequenceFormat_e blockDelimiters; + int validateSequences; + ZSTD_paramSwitch_e useBlockSplitter; + ZSTD_paramSwitch_e useRowMatchFinder; + int deterministicRefPrefix; + ZSTD_customMem customMem; +}; + +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; + +typedef enum { + ZSTD_cwksp_alloc_objects = 0, + ZSTD_cwksp_alloc_buffers = 1, + ZSTD_cwksp_alloc_aligned = 2, +} ZSTD_cwksp_alloc_phase_e; + +typedef enum { + ZSTD_cwksp_dynamic_alloc = 0, + ZSTD_cwksp_static_alloc = 1, +} ZSTD_cwksp_static_alloc_e; + +typedef struct { + void *workspace; + void *workspaceEnd; + void *objectEnd; + void *tableEnd; + void *tableValidEnd; + void *allocStart; + BYTE allocFailed; + int workspaceOversizedDuration; + ZSTD_cwksp_alloc_phase_e phase; + ZSTD_cwksp_static_alloc_e isStatic; +} ZSTD_cwksp; + +struct POOL_ctx_s; + +typedef struct POOL_ctx_s ZSTD_threadPool; + +typedef struct { + unsigned int offset; + unsigned int litLength; + unsigned int matchLength; + unsigned int rep; +} ZSTD_Sequence; + +typedef struct { + int collectSequences; + ZSTD_Sequence *seqStart; + size_t seqIndex; + size_t maxSequences; +} SeqCollector; + +typedef struct { + U32 offset; + U32 checksum; +} ldmEntry_t; + +typedef struct { + const BYTE *split; + U32 hash; + U32 checksum; + ldmEntry_t *bucket; +} ldmMatchCandidate_t; + +typedef struct { + ZSTD_window_t window; + ldmEntry_t *hashTable; + U32 loadedDictEnd; + BYTE *bucketOffsets; + size_t splitIndices[64]; + ldmMatchCandidate_t matchCandidates[64]; +} ldmState_t; + +typedef struct { + ZSTD_entropyCTables_t entropy; + U32 rep[3]; +} ZSTD_compressedBlockState_t; + +typedef struct { + ZSTD_compressedBlockState_t *prevCBlock; + ZSTD_compressedBlockState_t *nextCBlock; + ZSTD_matchState_t matchState; +} ZSTD_blockState_t; + +typedef enum { + ZSTDb_not_buffered = 0, + ZSTDb_buffered = 1, +} ZSTD_buffered_policy_e; + +typedef enum { + zcss_init = 0, + zcss_load = 1, + zcss_flush = 2, +} ZSTD_cStreamStage; + +struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; + +typedef enum { + ZSTD_dct_auto = 0, + ZSTD_dct_rawContent = 1, + ZSTD_dct_fullDict = 2, +} ZSTD_dictContentType_e; + +struct ZSTD_CDict_s; + +typedef struct ZSTD_CDict_s ZSTD_CDict; + +typedef struct { + void *dictBuffer; + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; + ZSTD_CDict *cdict; +} ZSTD_localDict; + +struct ZSTD_prefixDict_s { + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; +}; + +typedef struct ZSTD_prefixDict_s ZSTD_prefixDict; + +typedef enum { + set_basic = 0, + set_rle = 1, + set_compressed = 2, + set_repeat = 3, +} symbolEncodingType_e; + +typedef struct { + symbolEncodingType_e hType; + BYTE hufDesBuffer[128]; + size_t hufDesSize; +} ZSTD_hufCTablesMetadata_t; + +typedef struct { + symbolEncodingType_e llType; + symbolEncodingType_e ofType; + symbolEncodingType_e mlType; + BYTE fseTablesBuffer[133]; + size_t fseTablesSize; + size_t lastCountSize; +} ZSTD_fseCTablesMetadata_t; + +typedef struct { + ZSTD_hufCTablesMetadata_t hufMetadata; + ZSTD_fseCTablesMetadata_t fseMetadata; +} ZSTD_entropyCTablesMetadata_t; + +typedef struct { + seqStore_t fullSeqStoreChunk; + seqStore_t firstHalfSeqStore; + seqStore_t secondHalfSeqStore; + seqStore_t currSeqStore; + seqStore_t nextSeqStore; + U32 partitions[196]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +} ZSTD_blockSplitCtx; + +struct ZSTD_CCtx_s { + ZSTD_compressionStage_e stage; + int cParamsChanged; + int bmi2; + ZSTD_CCtx_params requestedParams; + ZSTD_CCtx_params appliedParams; + ZSTD_CCtx_params simpleApiParams; + U32 dictID; + size_t dictContentSize; + ZSTD_cwksp workspace; + size_t blockSize; + long long unsigned int pledgedSrcSizePlusOne; + long long unsigned int consumedSrcSize; + long long unsigned int producedCSize; + struct xxh64_state xxhState; + ZSTD_customMem customMem; + ZSTD_threadPool *pool; + size_t staticSize; + SeqCollector seqCollector; + int isFirstBlock; + int initialized; + seqStore_t seqStore; + ldmState_t ldmState; + rawSeq *ldmSequences; + size_t maxNbLdmSequences; + rawSeqStore_t externSeqStore; + ZSTD_blockState_t blockState; + U32 *entropyWorkspace; + ZSTD_buffered_policy_e bufferedPolicy; + char *inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + char *outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage streamStage; + U32 frameEnded; + ZSTD_inBuffer expectedInBuffer; + size_t expectedOutBufferSize; + ZSTD_localDict localDict; + const ZSTD_CDict *cdict; + ZSTD_prefixDict prefixDict; + ZSTD_blockSplitCtx blockSplitCtx; +}; + +typedef struct ZSTD_CCtx_s ZSTD_CCtx; + +typedef struct { + U16 nextState; + BYTE nbAdditionalBits; + BYTE nbBits; + U32 baseValue; +} ZSTD_seqSymbol; + +typedef U32 HUF_DTable; + +typedef struct { + ZSTD_seqSymbol LLTable[513]; + ZSTD_seqSymbol OFTable[257]; + ZSTD_seqSymbol MLTable[513]; + HUF_DTable hufTable[4097]; + U32 rep[3]; + U32 workspace[157]; +} ZSTD_entropyDTables_t; + +typedef enum { + ZSTD_frame = 0, + ZSTD_skippableFrame = 1, +} ZSTD_frameType_e; + +typedef struct { + long long unsigned int frameContentSize; + long long unsigned int windowSize; + unsigned int blockSizeMax; + ZSTD_frameType_e frameType; + unsigned int headerSize; + unsigned int dictID; + unsigned int checksumFlag; + long: 32; +} ZSTD_frameHeader; + +typedef enum { + bt_raw = 0, + bt_rle = 1, + bt_compressed = 2, + bt_reserved = 3, +} blockType_e; + +typedef enum { + ZSTDds_getFrameHeaderSize = 0, + ZSTDds_decodeFrameHeader = 1, + ZSTDds_decodeBlockHeader = 2, + ZSTDds_decompressBlock = 3, + ZSTDds_decompressLastBlock = 4, + ZSTDds_checkChecksum = 5, + ZSTDds_decodeSkippableHeader = 6, + ZSTDds_skipFrame = 7, +} ZSTD_dStage; + +typedef enum { + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1, +} ZSTD_forceIgnoreChecksum_e; + +typedef enum { + ZSTD_use_indefinitely = -1, + ZSTD_dont_use = 0, + ZSTD_use_once = 1, +} ZSTD_dictUses_e; + +struct ZSTD_DDict_s; + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +typedef struct { + const ZSTD_DDict **ddictPtrTable; + size_t ddictPtrTableSize; + size_t ddictPtrCount; +} ZSTD_DDictHashSet; + +typedef enum { + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1, +} ZSTD_refMultipleDDicts_e; + +typedef enum { + zdss_init = 0, + zdss_loadHeader = 1, + zdss_read = 2, + zdss_load = 3, + zdss_flush = 4, +} ZSTD_dStreamStage; + +struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; + +typedef enum { + ZSTD_not_in_dst = 0, + ZSTD_in_dst = 1, + ZSTD_split = 2, +} ZSTD_litLocation_e; + +struct ZSTD_DCtx_s { + const ZSTD_seqSymbol *LLTptr; + const ZSTD_seqSymbol *MLTptr; + const ZSTD_seqSymbol *OFTptr; + const HUF_DTable *HUFptr; + ZSTD_entropyDTables_t entropy; + U32 workspace[640]; + const void *previousDstEnd; + const void *prefixStart; + const void *virtualStart; + const void *dictEnd; + size_t expected; + ZSTD_frameHeader fParams; + U64 processedCSize; + U64 decodedSize; + blockType_e bType; + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + struct xxh64_state xxhState; + size_t headerSize; + ZSTD_format_e format; + ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; + U32 validateChecksum; + const BYTE *litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + size_t staticSize; + ZSTD_DDict *ddictLocal; + const ZSTD_DDict *ddict; + U32 dictID; + int ddictIsCold; + ZSTD_dictUses_e dictUses; + ZSTD_DDictHashSet *ddictSet; + ZSTD_refMultipleDDicts_e refMultipleDDicts; + ZSTD_dStreamStage streamStage; + char *inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char *outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t lhSize; + U32 hostageByte; + int noForwardProgress; + ZSTD_bufferMode_e outBufferMode; + ZSTD_outBuffer expectedOutBuffer; + BYTE *litBuffer; + const BYTE *litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[65568]; + BYTE headerBuffer[18]; + size_t oversizedDuration; + long: 32; +}; + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +typedef ZSTD_CCtx ZSTD_CStream; + +typedef ZSTD_DCtx ZSTD_DStream; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +typedef ZSTD_ErrorCode zstd_error_code; + +typedef ZSTD_compressionParameters zstd_compression_parameters; + +typedef ZSTD_parameters zstd_parameters; + +typedef ZSTD_inBuffer zstd_in_buffer; + +typedef ZSTD_outBuffer zstd_out_buffer; + +typedef ZSTD_CStream zstd_cstream; + +typedef ZSTD_DStream zstd_dstream; + +struct compressed_bio { + unsigned int nr_folios; + struct folio **compressed_folios; + u64 start; + unsigned int len; + unsigned int compressed_len; + u8 compress_type; + bool writeback; + union { + struct btrfs_bio *orig_bbio; + struct work_struct write_end_work; + }; + long: 32; + struct btrfs_bio bbio; +}; + +struct workspace_manager { + struct list_head idle_ws; + spinlock_t ws_lock; + int free_ws; + atomic_t total_ws; + wait_queue_head_t ws_wait; +}; + +struct btrfs_compress_op { + struct workspace_manager *workspace_manager; + unsigned int max_level; + unsigned int default_level; +}; + +struct workspace { + void *mem; + size_t size; + char *buf; + unsigned int level; + unsigned int req_level; + long unsigned int last_used; + struct list_head list; + struct list_head lru_list; + zstd_in_buffer in_buf; + zstd_out_buffer out_buf; +}; + +struct zstd_workspace_manager { + const struct btrfs_compress_op *ops; + spinlock_t lock; + struct list_head lru_list; + struct list_head idle_ws[15]; + long unsigned int active_map; + wait_queue_head_t wait; + struct timer_list timer; +}; + +enum { + btrfs_bitmap_nr_uptodate = 0, + btrfs_bitmap_nr_dirty = 1, + btrfs_bitmap_nr_writeback = 2, + btrfs_bitmap_nr_ordered = 3, + btrfs_bitmap_nr_checked = 4, + btrfs_bitmap_nr_locked = 5, + btrfs_bitmap_nr_max = 6, +}; + +struct btrfs_subpage { + spinlock_t lock; + union { + atomic_t eb_refs; + atomic_t nr_locked; + }; + long unsigned int bitmaps[0]; +}; + +enum btrfs_subpage_type { + BTRFS_SUBPAGE_METADATA = 0, + BTRFS_SUBPAGE_DATA = 1, +}; + +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *, const u8 *, unsigned int); + int (*encrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*decrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*init)(struct crypto_lskcipher *); + void (*exit)(struct crypto_lskcipher *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct skcipher_alg_common co; +}; + +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + +struct io_uring_cmd { + struct file *file; + const struct io_uring_sqe *sqe; + void (*task_work_cb)(struct io_uring_cmd *, unsigned int); + u32 cmd_op; + u32 flags; + u8 pdu[32]; +}; + +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void *data; +}; + +struct blkpg_partition { + long long int start; + long long int length; + int pno; + char devname[64]; + char volname[64]; + long: 32; +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +struct pr_keys { + u32 generation; + u32 num_keys; + u64 keys[0]; +}; + +struct pr_held_reservation { + u64 key; + u32 generation; + enum pr_type type; +}; + +enum { + IOU_F_TWQ_LAZY_WAKE = 1, +}; + +struct blk_iou_cmd { + int res; + bool nowait; +}; + +struct io_notif_data { + struct file *file; + struct ubuf_info uarg; + struct io_notif_data *next; + struct io_notif_data *head; + unsigned int account_pages; + bool zc_report; + bool zc_used; + bool zc_copied; +}; + +struct ZSTD_DDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictSize; + ZSTD_entropyDTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; + +typedef enum { + ZSTD_dlm_byCopy = 0, + ZSTD_dlm_byRef = 1, +} ZSTD_dictLoadMethod_e; + +enum bus_notifier_event { + BUS_NOTIFY_ADD_DEVICE = 0, + BUS_NOTIFY_DEL_DEVICE = 1, + BUS_NOTIFY_REMOVED_DEVICE = 2, + BUS_NOTIFY_BIND_DRIVER = 3, + BUS_NOTIFY_BOUND_DRIVER = 4, + BUS_NOTIFY_UNBIND_DRIVER = 5, + BUS_NOTIFY_UNBOUND_DRIVER = 6, + BUS_NOTIFY_DRIVER_NOT_BOUND = 7, +}; + +struct vga_device { + struct list_head list; + struct pci_dev *pdev; + unsigned int decodes; + unsigned int owns; + unsigned int locks; + unsigned int io_lock_cnt; + unsigned int mem_lock_cnt; + unsigned int io_norm_cnt; + unsigned int mem_norm_cnt; + bool bridge_has_one_vga; + bool is_firmware_default; + unsigned int (*set_decode)(struct pci_dev *, bool); +}; + +struct vga_arb_user_card { + struct pci_dev *pdev; + unsigned int mem_cnt; + unsigned int io_cnt; +}; + +struct vga_arb_private { + struct list_head list; + struct pci_dev *target; + struct vga_arb_user_card cards[16]; + spinlock_t lock; +}; + +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[16]; +}; + +typedef void (*of_init_fn_1)(struct device_node *); + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; + long unsigned int acc; + unsigned int flags; +}; + +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + union { + void *userptr; + struct file *file; + void *data; + } u; + void (*splice_eof)(struct splice_desc *); + long: 32; + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; + long: 32; +}; + +typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); + +struct memdev { + const char *name; + const struct file_operations *fops; + fmode_t fmode; + umode_t mode; +}; + +struct trace_event_raw_dma_fence { + struct trace_entry ent; + u32 __data_loc_driver; + u32 __data_loc_timeline; + unsigned int context; + unsigned int seqno; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_fence { + u32 driver; + const void *driver_ptr_; + u32 timeline; + const void *timeline_ptr_; +}; + +typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +enum pci_ers_result { + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NO_AER_DRIVER = 6, +}; + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE = 0, + PKT_HASH_TYPE_L2 = 1, + PKT_HASH_TYPE_L3 = 2, + PKT_HASH_TYPE_L4 = 3, +}; + +enum netdev_queue_type { + NETDEV_QUEUE_TYPE_RX = 0, + NETDEV_QUEUE_TYPE_TX = 1, +}; + +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +union e1000_rx_desc_packet_split { + struct { + __le64 buffer_addr[4]; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length0; + __le16 vlan; + } middle; + struct { + __le16 header_status; + __le16 length[3]; + } upper; + __le64 reserved; + } wb; +}; + +struct e1000_tx_desc { + __le64 buffer_addr; + union { + __le32 data; + struct { + __le16 length; + u8 cso; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; + u8 css; + __le16 special; + } fields; + } upper; +}; + +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; + u8 ipcso; + __le16 ipcse; + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; + u8 tucso; + __le16 tucse; + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; + u8 hdr_len; + __le16 mss; + } fields; + } tcp_seg_setup; +}; + +enum e1000_boards { + board_82571 = 0, + board_82572 = 1, + board_82573 = 2, + board_82574 = 3, + board_82583 = 4, + board_80003es2lan = 5, + board_ich8lan = 6, + board_ich9lan = 7, + board_ich10lan = 8, + board_pchlan = 9, + board_pch2lan = 10, + board_pch_lpt = 11, + board_pch_spt = 12, + board_pch_cnp = 13, + board_pch_tgp = 14, + board_pch_adp = 15, + board_pch_mtp = 16, +}; + +enum e1000_state_t___2 { + __E1000_TESTING = 0, + __E1000_RESETTING = 1, + __E1000_ACCESS_SHARED_RESOURCE = 2, + __E1000_DOWN = 3, +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255, +}; + +struct trace_event_raw_e1000e_trace_mac_register { + struct trace_entry ent; + uint32_t reg; + char __data[0]; +}; + +struct trace_event_data_offsets_e1000e_trace_mac_register {}; + +typedef void (*btf_trace_e1000e_trace_mac_register)(void *, uint32_t); + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +struct my_u0 { + __le64 a; + __le64 b; +}; + +struct my_u1 { + __le64 a; + __le64 b; + __le64 c; + __le64 d; +}; + +enum iwl_disable_11n { + IWL_DISABLE_HT_ALL = 1, + IWL_DISABLE_HT_TXAGG = 2, + IWL_DISABLE_HT_RXAGG = 4, + IWL_ENABLE_HT_TXAGG = 8, +}; + +struct iwl_eeprom_calib_hdr { + u8 version; + u8 pa_type; + __le16 voltage; +}; + +enum eeprom_sku_bits { + EEPROM_SKU_CAP_BAND_24GHZ = 16, + EEPROM_SKU_CAP_BAND_52GHZ = 32, + EEPROM_SKU_CAP_11N_ENABLE = 64, + EEPROM_SKU_CAP_AMT_ENABLE = 128, + EEPROM_SKU_CAP_IPAN_ENABLE = 256, +}; + +enum iwl_eeprom_channel_flags { + EEPROM_CHANNEL_VALID = 1, + EEPROM_CHANNEL_IBSS = 2, + EEPROM_CHANNEL_ACTIVE = 8, + EEPROM_CHANNEL_RADAR = 16, + EEPROM_CHANNEL_WIDE = 32, + EEPROM_CHANNEL_DFS = 128, +}; + +struct iwl_eeprom_channel { + u8 flags; + s8 max_power_avg; +}; + +enum iwl_eeprom_enhanced_txpwr_flags { + IWL_EEPROM_ENH_TXP_FL_VALID = 1, + IWL_EEPROM_ENH_TXP_FL_BAND_52G = 2, + IWL_EEPROM_ENH_TXP_FL_OFDM = 4, + IWL_EEPROM_ENH_TXP_FL_40MHZ = 8, + IWL_EEPROM_ENH_TXP_FL_HT_AP = 16, + IWL_EEPROM_ENH_TXP_FL_RES1 = 32, + IWL_EEPROM_ENH_TXP_FL_RES2 = 64, + IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = 128, +}; + +struct iwl_eeprom_enhanced_txpwr { + u8 flags; + u8 channel; + s8 chain_a_max; + s8 chain_b_max; + s8 chain_c_max; + u8 delta_20_in_40; + s8 mimo2_max; + s8 mimo3_max; +}; + +enum iwl_phy_ops_subcmd_ids { + CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0, + CTDP_CONFIG_CMD = 3, + TEMP_REPORTING_THRESHOLDS_CMD = 4, + PER_CHAIN_LIMIT_OFFSET_CMD = 5, + PER_PLATFORM_ANT_GAIN_CMD = 7, + AP_TX_POWER_CONSTRAINTS_CMD = 12, + CT_KILL_NOTIFICATION = 254, + DTS_MEASUREMENT_NOTIF_WIDE = 255, +}; + +enum iwl_dts_measurement_flags { + DTS_TRIGGER_CMD_FLAGS_TEMP = 1, + DTS_TRIGGER_CMD_FLAGS_VOLT = 2, +}; + +struct iwl_dts_measurement_cmd { + __le32 flags; +}; + +enum iwl_dts_control_measurement_mode { + DTS_AUTOMATIC = 0, + DTS_REQUEST_READ = 1, + DTS_OVER_WRITE = 2, + DTS_DIRECT_WITHOUT_MEASURE = 3, +}; + +struct iwl_ext_dts_measurement_cmd { + __le32 control_mode; + __le32 temperature; + __le32 sensor; + __le32 avg_factor; + __le32 bit_mode; + __le32 step_duration; +}; + +struct iwl_dts_measurement_notif_v1 { + __le32 temp; + __le32 voltage; +}; + +struct iwl_dts_measurement_notif_v2 { + __le32 temp; + __le32 voltage; + __le32 threshold_idx; +}; + +struct iwl_dts_measurement_resp { + __le32 temp; +}; + +struct ct_kill_notif { + __le16 temperature; + u8 dts; + u8 scheme; +}; + +enum iwl_mvm_ctdp_cmd_operation { + CTDP_CMD_OPERATION_START = 1, + CTDP_CMD_OPERATION_STOP = 2, + CTDP_CMD_OPERATION_REPORT = 4, +}; + +struct iwl_mvm_ctdp_cmd { + __le32 operation; + __le32 budget; + __le32 window_size; +}; + +struct temp_report_ths_cmd { + __le32 num_temps; + __le16 thresholds[8]; +}; + +enum iwl_mvm_init_status { + IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = 1, + IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = 2, +}; + +struct iwl_trip_walk_data { + __le16 *thresholds; + int count; +}; + +struct rtw_coex_hid_info_a { + u8 cmd_id; + u8 len; + u8 subid; + u8 handle; + u8 vendor; + u8 name[3]; +}; + +enum coex_mp_info_op { + BT_MP_INFO_OP_PATCH_VER = 0, + BT_MP_INFO_OP_READ_REG = 17, + BT_MP_INFO_OP_SUPP_FEAT = 42, + BT_MP_INFO_OP_SUPP_VER = 43, + BT_MP_INFO_OP_SCAN_TYPE = 45, + BT_MP_INFO_OP_LNA_CONSTRAINT = 50, +}; + +enum coex_set_ant_phase { + COEX_SET_ANT_INIT = 0, + COEX_SET_ANT_WONLY = 1, + COEX_SET_ANT_WOFF = 2, + COEX_SET_ANT_2G = 3, + COEX_SET_ANT_5G = 4, + COEX_SET_ANT_POWERON = 5, + COEX_SET_ANT_2G_WLBT = 6, + COEX_SET_ANT_2G_FREERUN = 7, + COEX_SET_ANT_MAX = 8, +}; + +enum coex_runreason { + COEX_RSN_2GSCANSTART = 0, + COEX_RSN_5GSCANSTART = 1, + COEX_RSN_SCANFINISH = 2, + COEX_RSN_2GSWITCHBAND = 3, + COEX_RSN_5GSWITCHBAND = 4, + COEX_RSN_2GCONSTART = 5, + COEX_RSN_5GCONSTART = 6, + COEX_RSN_2GCONFINISH = 7, + COEX_RSN_5GCONFINISH = 8, + COEX_RSN_2GMEDIA = 9, + COEX_RSN_5GMEDIA = 10, + COEX_RSN_MEDIADISCON = 11, + COEX_RSN_BTINFO = 12, + COEX_RSN_LPS = 13, + COEX_RSN_WLSTATUS = 14, + COEX_RSN_BTSTATUS = 15, + COEX_RSN_MAX = 16, +}; + +enum coex_gnt_setup_state { + COEX_GNT_SET_HW_PTA = 0, + COEX_GNT_SET_SW_LOW = 1, + COEX_GNT_SET_SW_HIGH = 3, +}; + +enum coex_ext_ant_switch_pos_type { + COEX_SWITCH_TO_BT = 0, + COEX_SWITCH_TO_WLG = 1, + COEX_SWITCH_TO_WLA = 2, + COEX_SWITCH_TO_NOCARE = 3, + COEX_SWITCH_TO_WLG_BT = 4, + COEX_SWITCH_TO_MAX = 5, +}; + +enum coex_ext_ant_switch_ctrl_type { + COEX_SWITCH_CTRL_BY_BBSW = 0, + COEX_SWITCH_CTRL_BY_PTA = 1, + COEX_SWITCH_CTRL_BY_ANTDIV = 2, + COEX_SWITCH_CTRL_BY_MAC = 3, + COEX_SWITCH_CTRL_BY_BT = 4, + COEX_SWITCH_CTRL_BY_FW = 5, + COEX_SWITCH_CTRL_MAX = 6, +}; + +enum coex_algorithm { + COEX_ALGO_NOPROFILE = 0, + COEX_ALGO_HFP = 1, + COEX_ALGO_HID = 2, + COEX_ALGO_A2DP = 3, + COEX_ALGO_PAN = 4, + COEX_ALGO_A2DP_HID = 5, + COEX_ALGO_A2DP_PAN = 6, + COEX_ALGO_PAN_HID = 7, + COEX_ALGO_A2DP_PAN_HID = 8, + COEX_ALGO_MAX = 9, +}; + +enum coex_bt_profile { + BPM_NOPROFILE = 0, + BPM_HFP = 1, + BPM_HID = 2, + BPM_A2DP = 4, + BPM_PAN = 8, + BPM_HID_HFP = 3, + BPM_A2DP_HFP = 5, + BPM_A2DP_HID = 6, + BPM_A2DP_HID_HFP = 7, + BPM_PAN_HFP = 9, + BPM_PAN_HID = 10, + BPM_PAN_HID_HFP = 11, + BPM_PAN_A2DP = 12, + BPM_PAN_A2DP_HFP = 13, + BPM_PAN_A2DP_HID = 14, + BPM_PAN_A2DP_HID_HFP = 15, +}; + +enum coex_wl_link_mode { + COEX_WLINK_2G1PORT = 0, + COEX_WLINK_5G = 3, + COEX_WLINK_2GFREE = 7, + COEX_WLINK_MAX = 8, +}; + +enum coex_wl2bt_scoreboard { + COEX_SCBD_ACTIVE = 1, + COEX_SCBD_ONOFF = 2, + COEX_SCBD_SCAN = 4, + COEX_SCBD_UNDERTEST = 8, + COEX_SCBD_RXGAIN = 16, + COEX_SCBD_BT_RFK = 32, + COEX_SCBD_WLBUSY = 64, + COEX_SCBD_EXTFEM = 256, + COEX_SCBD_TDMA = 512, + COEX_SCBD_FIX2M = 1024, + COEX_SCBD_ALL = 65535, +}; + +enum coex_power_save_type { + COEX_PS_WIFI_NATIVE = 0, + COEX_PS_LPS_ON = 1, + COEX_PS_LPS_OFF = 2, +}; + +enum coex_rssi_state { + COEX_RSSI_STATE_HIGH = 0, + COEX_RSSI_STATE_MEDIUM = 1, + COEX_RSSI_STATE_LOW = 2, + COEX_RSSI_STATE_STAY_HIGH = 3, + COEX_RSSI_STATE_STAY_MEDIUM = 4, + COEX_RSSI_STATE_STAY_LOW = 5, +}; + +enum coex_notify_type_ips { + COEX_IPS_LEAVE = 0, + COEX_IPS_ENTER = 1, +}; + +enum coex_notify_type_lps { + COEX_LPS_DISABLE = 0, + COEX_LPS_ENABLE = 1, +}; + +enum coex_notify_type_scan { + COEX_SCAN_FINISH = 0, + COEX_SCAN_START = 1, + COEX_SCAN_START_2G = 2, + COEX_SCAN_START_5G = 3, +}; + +enum coex_notify_type_switchband { + COEX_NOT_SWITCH = 0, + COEX_SWITCH_TO_24G = 1, + COEX_SWITCH_TO_5G = 2, + COEX_SWITCH_TO_24G_NOFORSCAN = 3, +}; + +enum coex_notify_type_associate { + COEX_ASSOCIATE_FINISH = 0, + COEX_ASSOCIATE_START = 1, + COEX_ASSOCIATE_5G_FINISH = 2, + COEX_ASSOCIATE_5G_START = 3, +}; + +enum coex_notify_type_media_status { + COEX_MEDIA_DISCONNECT = 0, + COEX_MEDIA_CONNECT = 1, + COEX_MEDIA_CONNECT_5G = 2, +}; + +enum coex_bt_status { + COEX_BTSTATUS_NCON_IDLE = 0, + COEX_BTSTATUS_CON_IDLE = 1, + COEX_BTSTATUS_INQ_PAGE = 2, + COEX_BTSTATUS_ACL_BUSY = 3, + COEX_BTSTATUS_SCO_BUSY = 4, + COEX_BTSTATUS_ACL_SCO_BUSY = 5, + COEX_BTSTATUS_MAX = 6, +}; + +enum coex_wl_tput_dir { + COEX_WL_TPUT_TX = 0, + COEX_WL_TPUT_RX = 1, + COEX_WL_TPUT_MAX = 2, +}; + +enum coex_wl_priority_mask { + COEX_WLPRI_RX_RSP = 2, + COEX_WLPRI_TX_RSP = 3, + COEX_WLPRI_TX_BEACON = 4, + COEX_WLPRI_TX_OFDM = 11, + COEX_WLPRI_TX_CCK = 12, + COEX_WLPRI_TX_BEACONQ = 27, + COEX_WLPRI_RX_CCK = 28, + COEX_WLPRI_RX_OFDM = 29, + COEX_WLPRI_MAX = 30, +}; + +enum coex_pstdma_type { + COEX_PSTDMA_FORCE_LPSOFF = 0, + COEX_PSTDMA_FORCE_LPSON = 1, + COEX_PSTDMA_MAX = 2, +}; + +enum coex_btrssi_type { + COEX_BTRSSI_RATIO = 0, + COEX_BTRSSI_DBM = 1, + COEX_BTRSSI_MAX = 2, +}; + +struct rtw_c2h_cmd { + u8 id; + u8 seq; + u8 payload[0]; +}; + +struct rtw_coex_info_req { + u8 seq; + u8 op_code; + u8 para1; + u8 para2; + u8 para3; +}; + +struct ehci_stats { + long unsigned int normal; + long unsigned int error; + long unsigned int iaa; + long unsigned int lost_iaa; + long unsigned int complete; + long unsigned int unlink; +}; + +struct ehci_per_sched { + struct usb_device *udev; + struct usb_host_endpoint *ep; + struct list_head ps_list; + u16 tt_usecs; + u16 cs_mask; + u16 period; + u16 phase; + u8 bw_phase; + u8 phase_uf; + u8 usecs; + u8 c_usecs; + u8 bw_uperiod; + u8 bw_period; +}; + +enum ehci_rh_state { + EHCI_RH_HALTED = 0, + EHCI_RH_SUSPENDED = 1, + EHCI_RH_RUNNING = 2, + EHCI_RH_STOPPING = 3, +}; + +enum ehci_hrtimer_event { + EHCI_HRTIMER_POLL_ASS = 0, + EHCI_HRTIMER_POLL_PSS = 1, + EHCI_HRTIMER_POLL_DEAD = 2, + EHCI_HRTIMER_UNLINK_INTR = 3, + EHCI_HRTIMER_FREE_ITDS = 4, + EHCI_HRTIMER_ACTIVE_UNLINK = 5, + EHCI_HRTIMER_START_UNLINK_INTR = 6, + EHCI_HRTIMER_ASYNC_UNLINKS = 7, + EHCI_HRTIMER_IAA_WATCHDOG = 8, + EHCI_HRTIMER_DISABLE_PERIODIC = 9, + EHCI_HRTIMER_DISABLE_ASYNC = 10, + EHCI_HRTIMER_IO_WATCHDOG = 11, + EHCI_HRTIMER_NUM_EVENTS = 12, +}; + +struct ehci_caps; + +struct ehci_regs; + +struct ehci_dbg_port; + +struct ehci_qh; + +union ehci_shadow; + +struct ehci_itd; + +struct ehci_sitd; + +struct ehci_hcd { + enum ehci_hrtimer_event next_hrtimer_event; + unsigned int enabled_hrtimer_events; + ktime_t hr_timeouts[12]; + struct hrtimer hrtimer; + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + struct ehci_caps *caps; + struct ehci_regs *regs; + struct ehci_dbg_port *debug; + __u32 hcs_params; + spinlock_t lock; + enum ehci_rh_state rh_state; + bool scanning: 1; + bool need_rescan: 1; + bool intr_unlinking: 1; + bool iaa_in_progress: 1; + bool async_unlinking: 1; + bool shutdown: 1; + struct ehci_qh *qh_scan_next; + struct ehci_qh *async; + struct ehci_qh *dummy; + struct list_head async_unlink; + struct list_head async_idle; + unsigned int async_unlink_cycle; + unsigned int async_count; + __le32 old_current; + __le32 old_token; + unsigned int periodic_size; + __le32 *periodic; + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned int i_thresh; + union ehci_shadow *pshadow; + struct list_head intr_unlink_wait; + struct list_head intr_unlink; + unsigned int intr_unlink_wait_cycle; + unsigned int intr_unlink_cycle; + unsigned int now_frame; + unsigned int last_iso_frame; + unsigned int intr_count; + unsigned int isoc_count; + unsigned int periodic_count; + unsigned int uframe_periodic_max; + struct list_head cached_itd_list; + struct ehci_itd *last_itd_to_free; + struct list_head cached_sitd_list; + struct ehci_sitd *last_sitd_to_free; + long unsigned int reset_done[15]; + long unsigned int bus_suspended; + long unsigned int companion_ports; + long unsigned int owned_ports; + long unsigned int port_c_suspend; + long unsigned int suspended_ports; + long unsigned int resuming_ports; + struct dma_pool *qh_pool; + struct dma_pool *qtd_pool; + struct dma_pool *itd_pool; + struct dma_pool *sitd_pool; + unsigned int random_frame; + long unsigned int next_statechange; + long: 32; + ktime_t last_periodic_enable; + u32 command; + unsigned int no_selective_suspend: 1; + unsigned int has_fsl_port_bug: 1; + unsigned int has_fsl_hs_errata: 1; + unsigned int has_fsl_susp_errata: 1; + unsigned int has_ci_pec_bug: 1; + unsigned int big_endian_mmio: 1; + unsigned int big_endian_desc: 1; + unsigned int big_endian_capbase: 1; + unsigned int has_amcc_usb23: 1; + unsigned int need_io_watchdog: 1; + unsigned int amd_pll_fix: 1; + unsigned int use_dummy_qh: 1; + unsigned int has_synopsys_hc_bug: 1; + unsigned int frame_index_bug: 1; + unsigned int need_oc_pp_cycle: 1; + unsigned int imx28_write_fix: 1; + unsigned int spurious_oc: 1; + unsigned int is_aspeed: 1; + unsigned int zx_wakeup_clear_needed: 1; + __le32 *ohci_hcctrl_reg; + unsigned int has_hostpc: 1; + unsigned int has_tdi_phy_lpm: 1; + unsigned int has_ppcd: 1; + u8 sbrn; + struct ehci_stats stats; + struct dentry *debug_dir; + u8 bandwidth[64]; + u8 tt_budget[64]; + struct list_head tt_list; + long: 32; + long unsigned int priv[0]; +}; + +struct ehci_caps { + u32 hc_capbase; + u32 hcs_params; + u32 hcc_params; + u8 portroute[8]; +}; + +struct ehci_regs { + u32 command; + u32 status; + u32 intr_enable; + u32 frame_index; + u32 segment; + u32 frame_list; + u32 async_next; + u32 reserved1[2]; + u32 txfill_tuning; + u32 reserved2[6]; + u32 configured_flag; + union { + u32 port_status[15]; + struct { + u32 reserved3[9]; + u32 usbmode; + }; + }; + union { + struct { + u32 reserved4; + u32 hostpc[15]; + }; + u32 brcm_insnreg[4]; + }; + u32 reserved5[2]; + u32 usbmode_ex; +}; + +struct ehci_dbg_port { + u32 control; + u32 pids; + u32 data03; + u32 data47; + u32 address; +}; + +struct ehci_fstn; + +union ehci_shadow { + struct ehci_qh *qh; + struct ehci_itd *itd; + struct ehci_sitd *sitd; + struct ehci_fstn *fstn; + __le32 *hw_next; + void *ptr; +}; + +struct ehci_qh_hw; + +struct ehci_qtd; + +struct ehci_qh { + struct ehci_qh_hw *hw; + dma_addr_t qh_dma; + union ehci_shadow qh_next; + struct list_head qtd_list; + struct list_head intr_node; + struct ehci_qtd *dummy; + struct list_head unlink_node; + struct ehci_per_sched ps; + unsigned int unlink_cycle; + u8 qh_state; + u8 xacterrs; + u8 unlink_reason; + u8 gap_uf; + unsigned int is_out: 1; + unsigned int clearing_tt: 1; + unsigned int dequeue_during_giveback: 1; + unsigned int should_be_inactive: 1; +}; + +struct ehci_iso_stream; + +struct ehci_itd { + __le32 hw_next; + __le32 hw_transaction[8]; + __le32 hw_bufp[7]; + __le32 hw_bufp_hi[7]; + dma_addr_t itd_dma; + union ehci_shadow itd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head itd_list; + unsigned int frame; + unsigned int pg; + unsigned int index[8]; + long: 32; +}; + +struct ehci_sitd { + __le32 hw_next; + __le32 hw_fullspeed_ep; + __le32 hw_uframe; + __le32 hw_results; + __le32 hw_buf[2]; + __le32 hw_backpointer; + __le32 hw_buf_hi[2]; + dma_addr_t sitd_dma; + union ehci_shadow sitd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head sitd_list; + unsigned int frame; + unsigned int index; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_qtd { + __le32 hw_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + dma_addr_t qtd_dma; + struct list_head qtd_list; + struct urb *urb; + size_t length; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_fstn { + __le32 hw_next; + __le32 hw_prev; + dma_addr_t fstn_dma; + union ehci_shadow fstn_next; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_qh_hw { + __le32 hw_next; + __le32 hw_info1; + __le32 hw_info2; + __le32 hw_current; + __le32 hw_qtd_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_iso_stream { + struct ehci_qh_hw *hw; + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; + struct list_head free_list; + struct ehci_per_sched ps; + unsigned int next_uframe; + __le32 splits; + u16 uperiod; + u16 maxp; + unsigned int bandwidth; + __le32 buf0; + __le32 buf1; + __le32 buf2; + __le32 address; +}; + +struct ehci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*port_power)(struct usb_hcd *, int, bool); +}; + +struct vivaldi_data { + u32 function_row_physmap[24]; + unsigned int num_function_row_keys; +}; + +typedef struct serio *class_serio_pause_rx_t; + +enum ps2_disposition { + PS2_PROCESS = 0, + PS2_IGNORE = 1, + PS2_ERROR = 2, +}; + +struct ps2dev; + +typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8, unsigned int); + +typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8); + +struct ps2dev { + struct serio *serio; + struct mutex cmd_mutex; + wait_queue_head_t wait; + long unsigned int flags; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; + ps2_pre_receive_handler_t pre_receive_handler; + ps2_receive_handler_t receive_handler; +}; + +struct atkbd { + struct ps2dev ps2dev; + struct input_dev *dev; + char name[64]; + char phys[32]; + short unsigned int id; + short unsigned int keycode[512]; + long unsigned int force_release_mask[16]; + unsigned char set; + bool translated; + bool extra; + bool write; + bool softrepeat; + bool softraw; + bool scroll; + bool enabled; + unsigned char emul; + bool resend; + bool release; + long unsigned int xl_bit; + unsigned int last; + long unsigned int time; + long unsigned int err_count; + struct delayed_work event_work; + long unsigned int event_jiffies; + long unsigned int event_mask; + struct mutex mutex; + struct vivaldi_data vdata; +}; + +struct thermal_hwmon_device { + char type[20]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; + struct thermal_hwmon_attr temp_crit; +}; + +struct subsys_interface { + const char *name; + const struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *, struct subsys_interface *); + void (*remove_dev)(struct device *, struct subsys_interface *); +}; + +struct cpufreq_policy_data { + struct cpufreq_cpuinfo cpuinfo; + struct cpufreq_frequency_table *freq_table; + unsigned int cpu; + unsigned int min; + unsigned int max; +}; + +struct freq_attr { + struct attribute attr; + ssize_t (*show)(struct cpufreq_policy *, char *); + ssize_t (*store)(struct cpufreq_policy *, const char *, size_t); +}; + +struct cpufreq_driver { + char name[16]; + u16 flags; + void *driver_data; + int (*init)(struct cpufreq_policy *); + int (*verify)(struct cpufreq_policy_data *); + int (*setpolicy)(struct cpufreq_policy *); + int (*target)(struct cpufreq_policy *, unsigned int, unsigned int); + int (*target_index)(struct cpufreq_policy *, unsigned int); + unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int); + void (*adjust_perf)(unsigned int, long unsigned int, long unsigned int, long unsigned int); + unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int); + int (*target_intermediate)(struct cpufreq_policy *, unsigned int); + unsigned int (*get)(unsigned int); + void (*update_limits)(unsigned int); + int (*bios_limit)(int, unsigned int *); + int (*online)(struct cpufreq_policy *); + int (*offline)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*suspend)(struct cpufreq_policy *); + int (*resume)(struct cpufreq_policy *); + void (*ready)(struct cpufreq_policy *); + struct freq_attr **attr; + bool boost_enabled; + int (*set_boost)(struct cpufreq_policy *, int); + void (*register_em)(struct cpufreq_policy *); +}; + +enum cpu_pm_event { + CPU_PM_ENTER = 0, + CPU_PM_ENTER_FAILED = 1, + CPU_PM_EXIT = 2, + CPU_CLUSTER_PM_ENTER = 3, + CPU_CLUSTER_PM_ENTER_FAILED = 4, + CPU_CLUSTER_PM_EXIT = 5, +}; + +enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +}; + +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +}; + +struct arm_pmu; + +struct pmu_hw_events { + struct perf_event *events[32]; + long unsigned int used_mask[1]; + struct arm_pmu *percpu_pmu; + int irq; +}; + +struct arm_pmu { + struct pmu pmu; + cpumask_t supported_cpus; + char *name; + int pmuver; + irqreturn_t (*handle_irq)(struct arm_pmu *); + void (*enable)(struct perf_event *); + void (*disable)(struct perf_event *); + int (*get_event_idx)(struct pmu_hw_events *, struct perf_event *); + void (*clear_event_idx)(struct pmu_hw_events *, struct perf_event *); + int (*set_event_filter)(struct hw_perf_event *, struct perf_event_attr *); + u64 (*read_counter)(struct perf_event *); + void (*write_counter)(struct perf_event *, u64); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); + void (*reset)(void *); + int (*map_event)(struct perf_event *); + long unsigned int cntr_mask[1]; + bool secure_access; + long unsigned int pmceid_bitmap[2]; + long unsigned int pmceid_ext_bitmap[2]; + struct platform_device *plat_device; + struct pmu_hw_events *hw_events; + struct hlist_node node; + struct notifier_block cpu_pm_nb; + const struct attribute_group *attr_groups[5]; + long: 32; + u64 reg_pmmir; + long unsigned int acpi_cpuid; + long: 32; +}; + +enum armpmu_attr_groups { + ARMPMU_ATTR_GROUP_COMMON = 0, + ARMPMU_ATTR_GROUP_EVENTS = 1, + ARMPMU_ATTR_GROUP_FORMATS = 2, + ARMPMU_ATTR_GROUP_CAPS = 3, + ARMPMU_NR_ATTR_GROUPS = 4, +}; + +struct pmu_irq_ops { + void (*enable_pmuirq)(unsigned int); + void (*disable_pmuirq)(unsigned int); + void (*free_pmuirq)(unsigned int, int, void *); +}; + +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + NETDEV_XDP_ACT_MASK = 127, +}; + +enum netdev_xdp_rx_metadata { + NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, + NETDEV_XDP_RX_METADATA_HASH = 2, + NETDEV_XDP_RX_METADATA_VLAN_TAG = 4, +}; + +enum netdev_xsk_flags { + NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, + NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, +}; + +enum netdev_qstats_scope { + NETDEV_QSTATS_SCOPE_QUEUE = 1, +}; + +enum { + NETDEV_A_DEV_IFINDEX = 1, + NETDEV_A_DEV_PAD = 2, + NETDEV_A_DEV_XDP_FEATURES = 3, + NETDEV_A_DEV_XDP_ZC_MAX_SEGS = 4, + NETDEV_A_DEV_XDP_RX_METADATA_FEATURES = 5, + NETDEV_A_DEV_XSK_FEATURES = 6, + __NETDEV_A_DEV_MAX = 7, + NETDEV_A_DEV_MAX = 6, +}; + +enum { + NETDEV_A_PAGE_POOL_ID = 1, + NETDEV_A_PAGE_POOL_IFINDEX = 2, + NETDEV_A_PAGE_POOL_NAPI_ID = 3, + NETDEV_A_PAGE_POOL_INFLIGHT = 4, + NETDEV_A_PAGE_POOL_INFLIGHT_MEM = 5, + NETDEV_A_PAGE_POOL_DETACH_TIME = 6, + NETDEV_A_PAGE_POOL_DMABUF = 7, + __NETDEV_A_PAGE_POOL_MAX = 8, + NETDEV_A_PAGE_POOL_MAX = 7, +}; + +enum { + NETDEV_A_NAPI_IFINDEX = 1, + NETDEV_A_NAPI_ID = 2, + NETDEV_A_NAPI_IRQ = 3, + NETDEV_A_NAPI_PID = 4, + NETDEV_A_NAPI_DEFER_HARD_IRQS = 5, + NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT = 6, + NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT = 7, + __NETDEV_A_NAPI_MAX = 8, + NETDEV_A_NAPI_MAX = 7, +}; + +enum { + NETDEV_A_QUEUE_ID = 1, + NETDEV_A_QUEUE_IFINDEX = 2, + NETDEV_A_QUEUE_TYPE = 3, + NETDEV_A_QUEUE_NAPI_ID = 4, + NETDEV_A_QUEUE_DMABUF = 5, + __NETDEV_A_QUEUE_MAX = 6, + NETDEV_A_QUEUE_MAX = 5, +}; + +enum { + NETDEV_A_QSTATS_IFINDEX = 1, + NETDEV_A_QSTATS_QUEUE_TYPE = 2, + NETDEV_A_QSTATS_QUEUE_ID = 3, + NETDEV_A_QSTATS_SCOPE = 4, + NETDEV_A_QSTATS_RX_PACKETS = 8, + NETDEV_A_QSTATS_RX_BYTES = 9, + NETDEV_A_QSTATS_TX_PACKETS = 10, + NETDEV_A_QSTATS_TX_BYTES = 11, + NETDEV_A_QSTATS_RX_ALLOC_FAIL = 12, + NETDEV_A_QSTATS_RX_HW_DROPS = 13, + NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS = 14, + NETDEV_A_QSTATS_RX_CSUM_COMPLETE = 15, + NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY = 16, + NETDEV_A_QSTATS_RX_CSUM_NONE = 17, + NETDEV_A_QSTATS_RX_CSUM_BAD = 18, + NETDEV_A_QSTATS_RX_HW_GRO_PACKETS = 19, + NETDEV_A_QSTATS_RX_HW_GRO_BYTES = 20, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS = 21, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES = 22, + NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS = 23, + NETDEV_A_QSTATS_TX_HW_DROPS = 24, + NETDEV_A_QSTATS_TX_HW_DROP_ERRORS = 25, + NETDEV_A_QSTATS_TX_CSUM_NONE = 26, + NETDEV_A_QSTATS_TX_NEEDS_CSUM = 27, + NETDEV_A_QSTATS_TX_HW_GSO_PACKETS = 28, + NETDEV_A_QSTATS_TX_HW_GSO_BYTES = 29, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS = 30, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES = 31, + NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS = 32, + NETDEV_A_QSTATS_TX_STOP = 33, + NETDEV_A_QSTATS_TX_WAKE = 34, + __NETDEV_A_QSTATS_MAX = 35, + NETDEV_A_QSTATS_MAX = 34, +}; + +enum { + NETDEV_A_DMABUF_IFINDEX = 1, + NETDEV_A_DMABUF_QUEUES = 2, + NETDEV_A_DMABUF_FD = 3, + NETDEV_A_DMABUF_ID = 4, + __NETDEV_A_DMABUF_MAX = 5, + NETDEV_A_DMABUF_MAX = 4, +}; + +enum { + NETDEV_CMD_DEV_GET = 1, + NETDEV_CMD_DEV_ADD_NTF = 2, + NETDEV_CMD_DEV_DEL_NTF = 3, + NETDEV_CMD_DEV_CHANGE_NTF = 4, + NETDEV_CMD_PAGE_POOL_GET = 5, + NETDEV_CMD_PAGE_POOL_ADD_NTF = 6, + NETDEV_CMD_PAGE_POOL_DEL_NTF = 7, + NETDEV_CMD_PAGE_POOL_CHANGE_NTF = 8, + NETDEV_CMD_PAGE_POOL_STATS_GET = 9, + NETDEV_CMD_QUEUE_GET = 10, + NETDEV_CMD_NAPI_GET = 11, + NETDEV_CMD_QSTATS_GET = 12, + NETDEV_CMD_BIND_RX = 13, + NETDEV_CMD_NAPI_SET = 14, + __NETDEV_CMD_MAX = 15, + NETDEV_CMD_MAX = 14, +}; + +struct pp_memory_provider_params { + void *mp_priv; +}; + +struct rps_map; + +struct rps_dev_flow_table; + +struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; + struct rps_map *rps_map; + struct rps_dev_flow_table *rps_flow_table; + struct kobject kobj; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct napi_struct *napi; + struct pp_memory_provider_params mp_params; + long: 32; + long: 32; +}; + +struct netdev_queue_stats_rx { + u64 bytes; + u64 packets; + u64 alloc_fail; + u64 hw_drops; + u64 hw_drop_overruns; + u64 csum_unnecessary; + u64 csum_none; + u64 csum_bad; + u64 hw_gro_packets; + u64 hw_gro_bytes; + u64 hw_gro_wire_packets; + u64 hw_gro_wire_bytes; + u64 hw_drop_ratelimits; +}; + +struct netdev_queue_stats_tx { + u64 bytes; + u64 packets; + u64 hw_drops; + u64 hw_drop_errors; + u64 csum_none; + u64 needs_csum; + u64 hw_gso_packets; + u64 hw_gso_bytes; + u64 hw_gso_wire_packets; + u64 hw_gso_wire_bytes; + u64 hw_drop_ratelimits; + u64 stop; + u64 wake; +}; + +struct genl_dumpit_info { + struct genl_split_ops op; + struct genl_info info; +}; + +enum { + NETDEV_NLGRP_MGMT = 0, + NETDEV_NLGRP_PAGE_POOL = 1, +}; + +struct netdev_nl_dump_ctx { + long unsigned int ifindex; + unsigned int rxq_idx; + unsigned int txq_idx; + unsigned int napi_id; +}; + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + netdevice_tracker dev_tracker; + int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); + bool (*id_match)(struct packet_type *, struct sock *); + struct net *af_packet_net; + void *af_packet_priv; + struct list_head list; +}; + +struct llc_addr { + unsigned char lsap; + unsigned char mac[6]; +}; + +struct llc_sap { + unsigned char state; + unsigned char p_bit; + unsigned char f_bit; + refcount_t refcnt; + int (*rcv_func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + struct llc_addr laddr; + struct list_head node; + spinlock_t sk_lock; + int sk_count; + struct hlist_nulls_head sk_laddr_hash[64]; + struct hlist_head sk_dev_hash[64]; + struct callback_head rcu; +}; + +struct llc_pdu_un { + u8 dsap; + u8 ssap; + u8 ctrl_1; +}; + +struct stp_proto { + unsigned char group_address[6]; + void (*rcv)(const struct stp_proto *, struct sk_buff *, struct net_device *); + void *data; +}; + +enum { + ETHTOOL_A_TS_STAT_UNSPEC = 0, + ETHTOOL_A_TS_STAT_TX_PKTS = 1, + ETHTOOL_A_TS_STAT_TX_LOST = 2, + ETHTOOL_A_TS_STAT_TX_ERR = 3, + __ETHTOOL_A_TS_STAT_CNT = 4, + ETHTOOL_A_TS_STAT_MAX = 3, +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct kernel_ethtool_ts_info ts_info; + struct ethtool_ts_stats stats; +}; + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +enum nft_verdicts { + NFT_CONTINUE = -1, + NFT_BREAK = -2, + NFT_JUMP = -3, + NFT_GOTO = -4, + NFT_RETURN = -5, +}; + +enum nft_range_ops { + NFT_RANGE_EQ = 0, + NFT_RANGE_NEQ = 1, +}; + +enum nft_range_attributes { + NFTA_RANGE_UNSPEC = 0, + NFTA_RANGE_SREG = 1, + NFTA_RANGE_OP = 2, + NFTA_RANGE_FROM_DATA = 3, + NFTA_RANGE_TO_DATA = 4, + __NFTA_RANGE_MAX = 5, +}; + +struct nft_range_expr { + struct nft_data data_from; + struct nft_data data_to; + u8 sreg; + u8 len; + enum nft_range_ops op: 8; + long: 32; +}; + +enum nft_set_flags { + NFT_SET_ANONYMOUS = 1, + NFT_SET_CONSTANT = 2, + NFT_SET_INTERVAL = 4, + NFT_SET_MAP = 8, + NFT_SET_TIMEOUT = 16, + NFT_SET_EVAL = 32, + NFT_SET_OBJECT = 64, + NFT_SET_CONCAT = 128, + NFT_SET_EXPR = 256, +}; + +struct nft_timeout { + u64 timeout; + u64 expiration; +}; + +struct nft_trans_gc { + struct list_head list; + struct net *net; + struct nft_set *set; + u32 seq; + u16 count; + struct nft_elem_priv *priv[256]; + struct callback_head rcu; +}; + +struct nftables_pernet { + struct list_head tables; + struct list_head commit_list; + struct list_head commit_set_list; + struct list_head binding_list; + struct list_head module_list; + struct list_head notify_list; + struct mutex commit_mutex; + long: 32; + u64 table_handle; + u64 tstamp; + unsigned int base_seq; + unsigned int gc_seq; + u8 validate_state; + long: 32; +}; + +struct nft_pipapo_elem; + +union nft_pipapo_map_bucket { + struct { + long unsigned int to: 24; + long unsigned int n: 8; + }; + struct nft_pipapo_elem *e; +}; + +struct nft_pipapo_elem { + struct nft_elem_priv priv; + struct nft_set_ext ext; +}; + +struct nft_pipapo_field { + unsigned int rules; + unsigned int bsize; + unsigned int rules_alloc; + u8 groups; + u8 bb; + long unsigned int *lt; + union nft_pipapo_map_bucket *mt; +}; + +struct nft_pipapo_scratch { + u8 map_index; + u32 align_off; + long unsigned int map[0]; +}; + +struct nft_pipapo_match { + u8 field_count; + unsigned int bsize_max; + struct nft_pipapo_scratch **scratch; + struct callback_head rcu; + struct nft_pipapo_field f[0]; +}; + +struct nft_pipapo { + struct nft_pipapo_match *match; + struct nft_pipapo_match *clone; + int width; + long unsigned int last_gc; +}; + +enum { + SKB_FCLONE_UNAVAILABLE = 0, + SKB_FCLONE_ORIG = 1, + SKB_FCLONE_CLONE = 2, +}; + +struct sk_buff_fclones { + struct sk_buff skb1; + struct sk_buff skb2; + refcount_t fclone_ref; + long: 32; +}; + +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +enum tsq_flags { + TSQF_THROTTLED = 1, + TSQF_QUEUED = 2, + TCPF_TSQ_DEFERRED = 4, + TCPF_WRITE_TIMER_DEFERRED = 8, + TCPF_DELACK_TIMER_DEFERRED = 16, + TCPF_MTU_REDUCED_DEFERRED = 32, + TCPF_ACK_DEFERRED = 64, +}; + +struct tcp_ao_key { + struct hlist_node node; + union tcp_ao_addr addr; + u8 key[80]; + unsigned int tcp_sigpool_id; + unsigned int digest_size; + int l3index; + u8 prefixlen; + u8 family; + u8 keylen; + u8 keyflags; + u8 sndid; + u8 rcvid; + u8 maclen; + struct callback_head rcu; + long: 32; + atomic64_t pkt_good; + atomic64_t pkt_bad; + u8 traffic_keys[0]; +}; + +struct mptcp_out_options {}; + +enum tcp_queue { + TCP_FRAG_IN_WRITE_QUEUE = 0, + TCP_FRAG_IN_RTX_QUEUE = 1, +}; + +struct tcp_key { + union { + struct { + struct tcp_ao_key *ao_key; + char *traffic_key; + u32 sne; + u8 rcv_next; + }; + struct tcp_md5sig_key *md5_key; + }; + enum { + TCP_KEY_NONE = 0, + TCP_KEY_MD5 = 1, + TCP_KEY_AO = 2, + } type; +}; + +struct tcp_out_options { + u16 options; + u16 mss; + u8 ws; + u8 num_sack_blocks; + u8 hash_size; + u8 bpf_opt_len; + __u8 *hash_location; + __u32 tsval; + __u32 tsecr; + struct tcp_fastopen_cookie *fastopen_cookie; + struct mptcp_out_options mptcp; +}; + +struct tsq_tasklet { + struct tasklet_struct tasklet; + struct list_head head; +}; + +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + u32 consumed; +}; + +enum unix_vertex_index { + UNIX_VERTEX_INDEX_MARK1 = 0, + UNIX_VERTEX_INDEX_MARK2 = 1, + UNIX_VERTEX_INDEX_START = 2, +}; + +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u32 cmpre: 4; + __u32 cmpri: 4; + __u32 reserved: 4; + __u32 pad: 4; + __u32 reserved1: 16; + union { + struct { + struct {} __empty_addr; + struct in6_addr addr[0]; + }; + struct { + struct {} __empty_data; + __u8 data[0]; + }; + } segments; +}; + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +struct ioam6_trace_hdr { + __be16 namespace_id; + char: 2; + __u8 overflow: 1; + __u8 nodelen: 5; + __u8 remlen: 7; + union { + __be32 type_be32; + struct { + __u32 bit7: 1; + __u32 bit6: 1; + __u32 bit5: 1; + __u32 bit4: 1; + __u32 bit3: 1; + __u32 bit2: 1; + __u32 bit1: 1; + __u32 bit0: 1; + __u32 bit15: 1; + __u32 bit14: 1; + __u32 bit13: 1; + __u32 bit12: 1; + __u32 bit11: 1; + __u32 bit10: 1; + __u32 bit9: 1; + __u32 bit8: 1; + __u32 bit23: 1; + __u32 bit22: 1; + __u32 bit21: 1; + __u32 bit20: 1; + __u32 bit19: 1; + __u32 bit18: 1; + __u32 bit17: 1; + __u32 bit16: 1; + } type; + }; + __u8 data[0]; +}; + +enum { + IOAM6_ATTR_UNSPEC = 0, + IOAM6_ATTR_NS_ID = 1, + IOAM6_ATTR_NS_DATA = 2, + IOAM6_ATTR_NS_DATA_WIDE = 3, + IOAM6_ATTR_SC_ID = 4, + IOAM6_ATTR_SC_DATA = 5, + IOAM6_ATTR_SC_NONE = 6, + IOAM6_ATTR_PAD = 7, + __IOAM6_ATTR_MAX = 8, +}; + +enum { + IOAM6_CMD_UNSPEC = 0, + IOAM6_CMD_ADD_NAMESPACE = 1, + IOAM6_CMD_DEL_NAMESPACE = 2, + IOAM6_CMD_DUMP_NAMESPACES = 3, + IOAM6_CMD_ADD_SCHEMA = 4, + IOAM6_CMD_DEL_SCHEMA = 5, + IOAM6_CMD_DUMP_SCHEMAS = 6, + IOAM6_CMD_NS_SET_SCHEMA = 7, + __IOAM6_CMD_MAX = 8, +}; + +enum ioam6_event_type { + IOAM6_EVENT_UNSPEC = 0, + IOAM6_EVENT_TRACE = 1, +}; + +enum ioam6_event_attr { + IOAM6_EVENT_ATTR_UNSPEC = 0, + IOAM6_EVENT_ATTR_TRACE_NAMESPACE = 1, + IOAM6_EVENT_ATTR_TRACE_NODELEN = 2, + IOAM6_EVENT_ATTR_TRACE_TYPE = 3, + IOAM6_EVENT_ATTR_TRACE_DATA = 4, + __IOAM6_EVENT_ATTR_MAX = 5, +}; + +struct ioam6_pernet_data { + struct mutex lock; + struct rhashtable namespaces; + struct rhashtable schemas; +}; + +struct ioam6_schema; + +struct ioam6_namespace { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_schema *schema; + __be16 id; + __be32 data; + __be64 data_wide; +}; + +struct ioam6_schema { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_namespace *ns; + u32 id; + int len; + __be32 hdr; + u8 data[0]; +}; + +struct br_config_bpdu { + unsigned int topology_change: 1; + unsigned int topology_change_ack: 1; + bridge_id root; + int root_path_cost; + bridge_id bridge_id; + port_id port_id; + int message_age; + int max_age; + int hello_time; + int forward_delay; +}; + +enum ieee80211_sdata_state_bits { + SDATA_STATE_RUNNING = 0, + SDATA_STATE_OFFCHANNEL = 1, + SDATA_STATE_OFFCHANNEL_BEACON_STOPPED = 2, +}; + +struct link_container { + struct ieee80211_link_data data; + struct ieee80211_bss_conf conf; +}; + +struct delay_timer { + long unsigned int (*read_current_timer)(); + long unsigned int freq; +}; + +struct fdt_errtabent { + const char *str; +}; + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +typedef struct { + long unsigned int key[2]; +} hsiphash_key_t; + +enum scx_public_consts { + SCX_OPS_NAME_LEN = 128ULL, + SCX_SLICE_DFL = 20000000ULL, + SCX_SLICE_INF = 18446744073709551615ULL, +}; + +enum scx_dsq_id_flags { + SCX_DSQ_FLAG_BUILTIN = 9223372036854775808ULL, + SCX_DSQ_FLAG_LOCAL_ON = 4611686018427387904ULL, + SCX_DSQ_INVALID = 9223372036854775808ULL, + SCX_DSQ_GLOBAL = 9223372036854775809ULL, + SCX_DSQ_LOCAL = 9223372036854775810ULL, + SCX_DSQ_LOCAL_ON = 13835058055282163712ULL, + SCX_DSQ_LOCAL_CPU_MASK = 4294967295ULL, +}; + +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + long unsigned int stamp; + unsigned int acquired; + short unsigned int wounded; + short unsigned int is_wait_die; +}; + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +struct trace_event_raw_contention_begin { + struct trace_entry ent; + void *lock_addr; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_contention_end { + struct trace_entry ent; + void *lock_addr; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_contention_begin {}; + +struct trace_event_data_offsets_contention_end {}; + +typedef void (*btf_trace_contention_begin)(void *, void *, unsigned int); + +typedef void (*btf_trace_contention_end)(void *, void *, int); + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +}; + +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED = 1, +}; + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *, struct cred *); + void (*cleanup)(struct subprocess_info *); + void *data; +}; + +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4, + TICK_DEP_BIT_RCU_EXP = 5, +}; + +enum posix_timer_state { + POSIX_TIMER_DISARMED = 0, + POSIX_TIMER_ARMED = 1, + POSIX_TIMER_REQUEUE_PENDING = 2, +}; + +struct action_cache { + long unsigned int allow_native[15]; +}; + +struct notification; + +struct seccomp_filter { + refcount_t refs; + refcount_t users; + bool log; + bool wait_killable_recv; + struct action_cache cache; + struct seccomp_filter *prev; + struct bpf_prog *prog; + struct notification *notif; + struct mutex notify_lock; + wait_queue_head_t wqh; +}; + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +enum notify_state { + SECCOMP_NOTIFY_INIT = 0, + SECCOMP_NOTIFY_SENT = 1, + SECCOMP_NOTIFY_REPLIED = 2, +}; + +struct seccomp_knotif { + struct task_struct *task; + long: 32; + u64 id; + const struct seccomp_data *data; + enum notify_state state; + int error; + long int val; + u32 flags; + struct completion ready; + struct list_head list; + struct list_head addfd; + long: 32; +}; + +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + __u32 ioctl_flags; + union { + bool setfd; + int ret; + }; + struct completion completion; + struct list_head list; +}; + +struct notification { + atomic_t requests; + u32 flags; + u64 next_id; + struct list_head notifications; +}; + +struct seccomp_log_name { + u32 log; + const char *name; +}; + +struct bpf_iter_num { + __u64 __opaque[1]; +}; + +enum { + BPF_MAX_LOOPS = 8388608, +}; + +enum bpf_iter_feature { + BPF_ITER_RESCHED = 1, +}; + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + long: 32; + u64 session_id; + u64 seq_num; + bool done_stop; + long: 32; + u8 target_private[0]; +}; + +typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_loop)(u32, void *, void *, u64); + +struct bpf_iter_num_kern { + int cur; + int end; +}; + +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct xdp_dev_bulk_queue { + struct xdp_frame *q[16]; + struct list_head flush_node; + struct net_device *dev; + struct net_device *dev_rx; + struct bpf_prog *xdp_prog; + unsigned int count; +}; + +struct bpf_dtab_netdev { + struct net_device *dev; + struct hlist_node index_hlist; + struct bpf_prog *xdp_prog; + struct callback_head rcu; + unsigned int idx; + struct bpf_devmap_val val; +}; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev **netdev_map; + struct list_head list; + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; + long: 32; +}; + +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; + struct rb_node rb; + long unsigned int rb_subtree_last; +}; + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *, struct rb_node *); + void (*copy)(struct rb_node *, struct rb_node *); + void (*rotate)(struct rb_node *, struct rb_node *); +}; + +enum page_walk_lock { + PGWALK_RDLOCK = 0, + PGWALK_WRLOCK = 1, + PGWALK_WRLOCK_VERIFY = 2, +}; + +struct mm_walk; + +struct mm_walk_ops { + int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); + int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); + int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *); + void (*post_vma)(struct mm_walk *); + int (*install_pte)(long unsigned int, long unsigned int, pte_t *, struct mm_walk *); + enum page_walk_lock walk_lock; +}; + +enum page_walk_action { + ACTION_SUBTREE = 0, + ACTION_CONTINUE = 1, + ACTION_AGAIN = 2, +}; + +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + pgd_t *pgd; + struct vm_area_struct *vma; + enum page_walk_action action; + bool no_vma; + void *private; +}; + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[8]; +}; + +enum { + PERCPU_REF_INIT_ATOMIC = 1, + PERCPU_REF_INIT_DEAD = 2, + PERCPU_REF_ALLOW_REINIT = 4, +}; + +typedef int filler_t(struct file *, struct folio *); + +union swap_header { + struct { + char reserved[4086]; + char magic[10]; + } magic; + struct { + char bootbits[1024]; + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + +struct swap_extent { + struct rb_node rb_node; + long unsigned int start_page; + long unsigned int nr_pages; + long: 32; + sector_t start_block; +}; + +struct dnotify_struct { + struct dnotify_struct *dn_next; + __u32 dn_mask; + int dn_fd; + struct file *dn_filp; + fl_owner_t dn_owner; +}; + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_ANY = -1, + FSNOTIFY_OBJ_TYPE_INODE = 0, + FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, + FSNOTIFY_OBJ_TYPE_SB = 2, + FSNOTIFY_OBJ_TYPE_COUNT = 3, + FSNOTIFY_OBJ_TYPE_DETACHED = 3, +}; + +struct dnotify_mark { + struct fsnotify_mark fsn_mark; + struct dnotify_struct *dn; +}; + +enum hash_algo { + HASH_ALGO_MD4 = 0, + HASH_ALGO_MD5 = 1, + HASH_ALGO_SHA1 = 2, + HASH_ALGO_RIPE_MD_160 = 3, + HASH_ALGO_SHA256 = 4, + HASH_ALGO_SHA384 = 5, + HASH_ALGO_SHA512 = 6, + HASH_ALGO_SHA224 = 7, + HASH_ALGO_RIPE_MD_128 = 8, + HASH_ALGO_RIPE_MD_256 = 9, + HASH_ALGO_RIPE_MD_320 = 10, + HASH_ALGO_WP_256 = 11, + HASH_ALGO_WP_384 = 12, + HASH_ALGO_WP_512 = 13, + HASH_ALGO_TGR_128 = 14, + HASH_ALGO_TGR_160 = 15, + HASH_ALGO_TGR_192 = 16, + HASH_ALGO_SM3_256 = 17, + HASH_ALGO_STREEBOG_256 = 18, + HASH_ALGO_STREEBOG_512 = 19, + HASH_ALGO_SHA3_256 = 20, + HASH_ALGO_SHA3_384 = 21, + HASH_ALGO_SHA3_512 = 22, + HASH_ALGO__LAST = 23, +}; + +enum criteria { + CR_POWER2_ALIGNED = 0, + CR_GOAL_LEN_FAST = 1, + CR_BEST_AVAIL_LEN = 2, + CR_GOAL_LEN_SLOW = 3, + CR_ANY_FREE = 4, + EXT4_MB_NUM_CRS = 5, +}; + +struct ext4_map_blocks { + ext4_fsblk_t m_pblk; + ext4_lblk_t m_lblk; + unsigned int m_len; + unsigned int m_flags; + long: 32; +}; + +struct ext4_dir_entry_hash { + __le32 hash; + __le32 minor_hash; +}; + +struct ext4_dir_entry_2 { + __le32 inode; + __le16 rec_len; + __u8 name_len; + __u8 file_type; + char name[255]; +}; + +struct fname; + +struct dir_private_info { + struct rb_root root; + struct rb_node *curr_node; + struct fname *extra_fname; + long: 32; + loff_t last_pos; + __u32 curr_hash; + __u32 curr_minor_hash; + __u32 next_hash; + long: 32; + u64 cookie; + bool initialized; + long: 32; +}; + +struct fname { + __u32 hash; + __u32 minor_hash; + struct rb_node rb_hash; + struct fname *next; + __u32 inode; + __u8 name_len; + __u8 file_type; + char name[0]; +}; + +struct jbd2_journal_block_tail { + __be32 t_checksum; +}; + +struct jbd2_journal_revoke_header_s { + journal_header_t r_header; + __be32 r_count; +}; + +typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; + +struct recovery_info { + tid_t start_transaction; + tid_t end_transaction; + long unsigned int head_block; + int nr_replays; + int nr_revokes; + int nr_revoke_hits; +}; + +struct getdents_callback { + struct dir_context ctx; + char *name; + long: 32; + u64 ino; + int found; + int sequence; +}; + +struct btrfs_inode_extref { + __le64 parent_objectid; + __le64 index; + __le16 name_len; + __u8 name[0]; +} __attribute__((packed)); + +struct btrfs_truncate_control { + struct btrfs_inode *inode; + long: 32; + u64 new_size; + u64 extents_found; + u64 last_size; + u64 sub_bytes; + u64 ino; + u32 min_type; + bool skip_ref_updates; + bool clear_extent_range; +}; + +struct reserve_ticket { + u64 bytes; + int error; + bool steal; + struct list_head list; + wait_queue_head_t wait; + long: 32; +}; + +typedef int __kernel_key_t; + +typedef short unsigned int __kernel_mode_t; + +typedef short unsigned int __kernel_uid_t; + +typedef short unsigned int __kernel_gid_t; + +typedef __kernel_key_t key_t; + +typedef short unsigned int ushort; + +struct ipc_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + short unsigned int seq; +}; + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned char __pad1[2]; + short unsigned int seq; + short unsigned int __pad2; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; +}; + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + long unsigned int seq; + void *security; + struct rhash_head khtnode; + struct callback_head rcu; + refcount_t refcount; + long: 32; +}; + +struct sem; + +struct sem_queue; + +struct sem_undo; + +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + short unsigned int sem_nsems; +}; + +struct sem { + int semval; + struct pid *sempid; + spinlock_t lock; + struct list_head pending_alter; + struct list_head pending_const; + long: 32; + time64_t sem_otime; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sembuf; + +struct sem_queue { + struct list_head list; + struct task_struct *sleeper; + struct sem_undo *undo; + struct pid *pid; + int status; + struct sembuf *sops; + struct sembuf *blocking; + int nsops; + bool alter; + bool dupsop; +}; + +struct sem_undo { + struct list_head list_proc; + struct callback_head rcu; + struct sem_undo_list *ulp; + struct list_head list_id; + int semid; + short int semadj[0]; +}; + +struct semid64_ds { + struct ipc64_perm sem_perm; + long unsigned int sem_otime; + long unsigned int sem_otime_high; + long unsigned int sem_ctime; + long unsigned int sem_ctime_high; + long unsigned int sem_nsems; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct sembuf { + short unsigned int sem_num; + short int sem_op; + short int sem_flg; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +struct sem_undo_list { + refcount_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + +struct ipc_ids { + int in_use; + short unsigned int seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; + int last_idx; + struct rhashtable key_ht; +}; + +struct ipc_namespace { + struct ipc_ids ids[3]; + int sem_ctls[4]; + int used_sems; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + long: 32; + struct percpu_counter percpu_msg_bytes; + struct percpu_counter percpu_msg_hdrs; + size_t shm_ctlmax; + size_t shm_ctlall; + long unsigned int shm_tot; + int shm_ctlmni; + int shm_rmid_forced; + struct notifier_block ipcns_nb; + struct vfsmount *mq_mnt; + unsigned int mq_queues_count; + unsigned int mq_queues_max; + unsigned int mq_msg_max; + unsigned int mq_msgsize_max; + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + struct ctl_table_set mq_set; + struct ctl_table_header *mq_sysctls; + struct ctl_table_set ipc_set; + struct ctl_table_header *ipc_sysctls; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; +}; + +struct ipc_params { + key_t key; + int flg; + union { + size_t size; + int nsems; + } u; +}; + +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); +}; + +struct sem_array { + struct kern_ipc_perm sem_perm; + time64_t sem_ctime; + struct list_head pending_alter; + struct list_head pending_const; + struct list_head list_id; + int sem_nsems; + int complex_count; + unsigned int use_global_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sem sems[0]; +}; + +struct rtattr { + short unsigned int rta_len; + short unsigned int rta_type; +}; + +struct aead_instance { + void (*free)(struct aead_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct ahash_request { + struct crypto_async_request base; + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + void *priv; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_ahash { + bool using_shash; + unsigned int statesize; + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + unsigned int authkeylen; + unsigned int enckeylen; +}; + +struct authenc_esn_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; +}; + +struct crypto_authenc_esn_ctx { + unsigned int reqoff; + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_esn_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct subsys_private; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; + struct subsys_private *sp; +}; + +struct disk_stats { + u64 nsecs[4]; + long unsigned int sectors[4]; + long unsigned int ios[4]; + long unsigned int merges[4]; + long unsigned int io_ticks; + local_t in_flight[2]; + long: 32; +}; + +enum stat_group { + STAT_READ = 0, + STAT_WRITE = 1, + STAT_DISCARD = 2, + STAT_FLUSH = 3, + NR_STAT_GROUPS = 4, +}; + +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = 0, + RSEQ_EVENT_SIGNAL_BIT = 1, + RSEQ_EVENT_MIGRATE_BIT = 2, +}; + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ = 0, + BLKG_RWSTAT_WRITE = 1, + BLKG_RWSTAT_SYNC = 2, + BLKG_RWSTAT_ASYNC = 3, + BLKG_RWSTAT_DISCARD = 4, + BLKG_RWSTAT_NR = 5, + BLKG_RWSTAT_TOTAL = 5, +}; + +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; + +struct io_open { + struct file *file; + int dfd; + u32 file_slot; + struct filename *filename; + struct open_how how; + long unsigned int nofile; + long: 32; +}; + +struct io_close { + struct file *file; + int fd; + u32 file_slot; +}; + +struct io_fixed_install { + struct file *file; + unsigned int o_flags; +}; + +struct genradix_root; + +struct __genradix { + struct genradix_root *root; +}; + +struct genradix_node { + union { + struct genradix_node *children[128]; + u8 data[512]; + }; +}; + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +struct sha256_state { + u32 state[8]; + u64 count; + u8 buf[64]; +}; + +typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); + +typedef struct { + size_t error; + int lowerBound; + int upperBound; +} ZSTD_bounds; + +typedef enum { + ZSTD_reset_session_only = 1, + ZSTD_reset_parameters = 2, + ZSTD_reset_session_and_parameters = 3, +} ZSTD_ResetDirective; + +typedef enum { + ZSTD_d_windowLogMax = 100, + ZSTD_d_experimentalParam1 = 1000, + ZSTD_d_experimentalParam2 = 1001, + ZSTD_d_experimentalParam3 = 1002, + ZSTD_d_experimentalParam4 = 1003, +} ZSTD_dParameter; + +typedef enum { + ZSTDnit_frameHeader = 0, + ZSTDnit_blockHeader = 1, + ZSTDnit_block = 2, + ZSTDnit_lastBlock = 3, + ZSTDnit_checksum = 4, + ZSTDnit_skippableFrame = 5, +} ZSTD_nextInputType_e; + +typedef struct { + size_t compressedSize; + long: 32; + long long unsigned int decompressedBound; +} ZSTD_frameSizeInfo; + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +typedef enum { + not_streaming = 0, + is_streaming = 1, +} streaming_operation; + +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); + +enum pci_bus_speed { + PCI_SPEED_33MHz = 0, + PCI_SPEED_66MHz = 1, + PCI_SPEED_66MHz_PCIX = 2, + PCI_SPEED_100MHz_PCIX = 3, + PCI_SPEED_133MHz_PCIX = 4, + PCI_SPEED_66MHz_PCIX_ECC = 5, + PCI_SPEED_100MHz_PCIX_ECC = 6, + PCI_SPEED_133MHz_PCIX_ECC = 7, + PCI_SPEED_66MHz_PCIX_266 = 9, + PCI_SPEED_100MHz_PCIX_266 = 10, + PCI_SPEED_133MHz_PCIX_266 = 11, + AGP_UNKNOWN = 12, + AGP_1X = 13, + AGP_2X = 14, + AGP_4X = 15, + AGP_8X = 16, + PCI_SPEED_66MHz_PCIX_533 = 17, + PCI_SPEED_100MHz_PCIX_533 = 18, + PCI_SPEED_133MHz_PCIX_533 = 19, + PCIE_SPEED_2_5GT = 20, + PCIE_SPEED_5_0GT = 21, + PCIE_SPEED_8_0GT = 22, + PCIE_SPEED_16_0GT = 23, + PCIE_SPEED_32_0GT = 24, + PCIE_SPEED_64_0GT = 25, + PCI_SPEED_UNKNOWN = 255, +}; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; + +struct ldsem_waiter { + struct list_head list; + struct task_struct *task; +}; + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(const struct bus_type *, char *); + ssize_t (*store)(const struct bus_type *, const char *, size_t); +}; + +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe: 1; + const struct bus_type *bus; + struct device *dev_root; + struct kset glue_dirs; + const struct class *class; + struct lock_class_key lock_key; +}; + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +struct msdos_partition { + u8 boot_ind; + u8 head; + u8 sector; + u8 cyl; + u8 sys_ind; + u8 end_head; + u8 end_sector; + u8 end_cyl; + __le32 start_sect; + __le32 nr_sects; +}; + +enum scsi_disposition { + NEEDS_RETRY = 8193, + SUCCESS = 8194, + FAILED = 8195, + QUEUED = 8196, + SOFT_ERROR = 8197, + ADD_TO_MLQUEUE = 8198, + TIMEOUT_ERROR = 8199, + SCSI_RETURN_NOT_HANDLED = 8200, + FAST_IO_FAIL = 8201, +}; + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = 1, + ETH_TEST_FL_FAILED = 2, + ETH_TEST_FL_EXTERNAL_LB = 4, + ETH_TEST_FL_EXTERNAL_LB_DONE = 8, +}; + +typedef enum { + e1000_undefined___2 = 0, + e1000_82542_rev2_0 = 1, + e1000_82542_rev2_1 = 2, + e1000_82543 = 3, + e1000_82544 = 4, + e1000_82540 = 5, + e1000_82545 = 6, + e1000_82545_rev_3 = 7, + e1000_82546 = 8, + e1000_ce4100 = 9, + e1000_82546_rev_3 = 10, + e1000_82541 = 11, + e1000_82541_rev_2 = 12, + e1000_82547 = 13, + e1000_82547_rev_2 = 14, + e1000_num_macs___2 = 15, +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi = 1, + e1000_eeprom_microwire = 2, + e1000_eeprom_flash = 3, + e1000_eeprom_none = 4, + e1000_num_eeprom_types = 5, +} e1000_eeprom_type; + +typedef enum { + e1000_media_type_copper___2 = 0, + e1000_media_type_fiber___2 = 1, + e1000_media_type_internal_serdes___2 = 2, + e1000_num_media_types___2 = 3, +} e1000_media_type; + +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 255, +} e1000_fc_type; + +struct e1000_shadow_ram___2 { + u16 eeprom_word; + bool modified; +}; + +typedef enum { + e1000_bus_type_unknown___2 = 0, + e1000_bus_type_pci___2 = 1, + e1000_bus_type_pcix___2 = 2, + e1000_bus_type_reserved___2 = 3, +} e1000_bus_type; + +typedef enum { + e1000_bus_speed_unknown___2 = 0, + e1000_bus_speed_33___2 = 1, + e1000_bus_speed_66___2 = 2, + e1000_bus_speed_100___2 = 3, + e1000_bus_speed_120___2 = 4, + e1000_bus_speed_133___2 = 5, + e1000_bus_speed_reserved___2 = 6, +} e1000_bus_speed; + +typedef enum { + e1000_bus_width_unknown___2 = 0, + e1000_bus_width_32___2 = 1, + e1000_bus_width_64___2 = 2, + e1000_bus_width_reserved___2 = 3, +} e1000_bus_width; + +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80 = 1, + e1000_cable_length_80_110 = 2, + e1000_cable_length_110_140 = 3, + e1000_cable_length_140 = 4, + e1000_cable_length_undefined = 255, +} e1000_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower = 1, + e1000_10bt_ext_dist_enable_undefined = 255, +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal___2 = 0, + e1000_rev_polarity_reversed___2 = 1, + e1000_rev_polarity_undefined___2 = 255, +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated = 1, + e1000_downshift_undefined = 255, +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default___2 = 0, + e1000_smart_speed_on___2 = 1, + e1000_smart_speed_off___2 = 2, +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled = 1, + e1000_polarity_reversal_undefined = 255, +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix = 1, + e1000_auto_x_mode_auto1 = 2, + e1000_auto_x_mode_auto2 = 3, + e1000_auto_x_mode_undefined = 255, +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok___2 = 0, + e1000_1000t_rx_status_ok___2 = 1, + e1000_1000t_rx_status_undefined___2 = 255, +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88___3 = 0, + e1000_phy_igp___3 = 1, + e1000_phy_8211 = 2, + e1000_phy_8201 = 3, + e1000_phy_undefined = 255, +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default___2 = 0, + e1000_ms_force_master___2 = 1, + e1000_ms_force_slave___2 = 2, + e1000_ms_auto___2 = 3, +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active = 1, + e1000_ffe_config_blocked = 2, +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled = 1, + e1000_dsp_config_activated = 2, + e1000_dsp_config_undefined = 255, +} e1000_dsp_config; + +struct e1000_phy_info___3 { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +struct e1000_rx_desc { + __le64 buffer_addr; + __le16 length; + __le16 csum; + u8 status; + u8 errors; + __le16 special; +}; + +struct e1000_hw_stats___3 { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_hw___3 { + u8 *hw_addr; + u8 *flash_address; + void *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram___2 *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + long unsigned int io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[6]; + u8 perm_mac_addr[6]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + long unsigned int time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + short unsigned int segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; + u8 *data; + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + void *desc; + dma_addr_t dma; + unsigned int size; + unsigned int count; + unsigned int next_to_use; + unsigned int next_to_clean; + struct e1000_tx_buffer *buffer_info; + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + void *desc; + dma_addr_t dma; + unsigned int size; + unsigned int count; + unsigned int next_to_use; + unsigned int next_to_clean; + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + int cpu; + u16 rdh; + u16 rdt; +}; + +struct e1000_adapter___2 { + long unsigned int active_vlans[128]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + u8 fc_autoneg; + struct e1000_tx_ring *tx_ring; + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + bool (*clean_rx)(struct e1000_adapter___2 *, struct e1000_rx_ring *, int *, int); + void (*alloc_rx_buf)(struct e1000_adapter___2 *, struct e1000_rx_ring *, int); + struct e1000_rx_ring *rx_ring; + struct napi_struct napi; + int num_tx_queues; + int num_rx_queues; + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + long: 32; + u64 gorcl_old; + struct net_device *netdev; + struct pci_dev *pdev; + struct e1000_hw___3 hw; + long: 32; + struct e1000_hw_stats___3 stats; + struct e1000_phy_info___3 phy_info; + struct e1000_phy_stats phy_stats; + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + int msg_enable; + bool tso_force; + bool smart_power_down; + bool quad_port_a; + long unsigned int flags; + u32 eeprom_wol; + int bars; + int need_ioport; + bool discarding; + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t___3 { + __E1000_TESTING___2 = 0, + __E1000_RESETTING___2 = 1, + __E1000_DOWN___2 = 2, + __E1000_DISABLED = 3, +}; + +enum { + NETDEV_STATS = 0, + E1000_STATS = 1, +}; + +struct e1000_stats { + char stat_string[32]; + int type; + int sizeof_stat; + int stat_offset; +}; + +enum iwl_fw_phy_cfg { + FW_PHY_CFG_RADIO_TYPE_POS = 0, + FW_PHY_CFG_RADIO_TYPE = 3, + FW_PHY_CFG_RADIO_STEP_POS = 2, + FW_PHY_CFG_RADIO_STEP = 12, + FW_PHY_CFG_RADIO_DASH_POS = 4, + FW_PHY_CFG_RADIO_DASH = 48, + FW_PHY_CFG_TX_CHAIN_POS = 16, + FW_PHY_CFG_TX_CHAIN = 983040, + FW_PHY_CFG_RX_CHAIN_POS = 20, + FW_PHY_CFG_RX_CHAIN = 15728640, + FW_PHY_CFG_CHAIN_SAD_POS = 23, + FW_PHY_CFG_CHAIN_SAD_ENABLED = 8388608, + FW_PHY_CFG_CHAIN_SAD_ANT_A = 16777216, + FW_PHY_CFG_CHAIN_SAD_ANT_B = 33554432, + FW_PHY_CFG_SHARED_CLK = 2147483648, +}; + +struct cmd { + u8 cmd_id; + u8 group_id; +}; + +struct iwl_fw_dbg_trigger_cmd { + struct cmd cmds[16]; +}; + +enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_CMD_QUEUE = 0, + IWL_MVM_DQA_AUX_QUEUE = 1, + IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2, + IWL_MVM_DQA_GCAST_QUEUE = 3, + IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, + IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, + IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, + IWL_MVM_DQA_MIN_DATA_QUEUE = 10, + IWL_MVM_DQA_MAX_DATA_QUEUE = 30, +}; + +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE = 1, + IWL_MVM_TX_FIFO_VI = 2, + IWL_MVM_TX_FIFO_VO = 3, + IWL_MVM_TX_FIFO_MCAST = 5, + IWL_MVM_TX_FIFO_CMD = 7, +}; + +enum iwl_mvm_fw_esr_recommendation { + ESR_RECOMMEND_LEAVE = 0, + ESR_FORCE_LEAVE = 1, + ESR_RECOMMEND_ENTER = 2, +}; + +struct iwl_mvm_esr_mode_notif { + __le32 action; +}; + +struct iwl_esr_trans_fail_notif { + __le32 link_id; + __le32 activation; + __le32 err_code; +}; + +enum iwl_datapath_monitor_notif_type { + IWL_DP_MON_NOTIF_TYPE_EXT_CCA = 0, +}; + +struct iwl_datapath_monitor_notif { + __le32 type; + u8 mac_id; + u8 reserved[3]; +}; + +enum iwl_thermal_dual_chain_req_events { + THERMAL_DUAL_CHAIN_REQ_ENABLE = 0, + THERMAL_DUAL_CHAIN_REQ_DISABLE = 1, +}; + +struct iwl_thermal_dual_chain_request { + __le32 event; +}; + +enum iwl_statistics_subcmd_ids { + STATISTICS_OPER_NOTIF = 0, + STATISTICS_OPER_PART1_NOTIF = 1, +}; + +enum iwl_regulatory_and_nvm_subcmd_ids { + NVM_ACCESS_COMPLETE = 0, + LARI_CONFIG_CHANGE = 1, + NVM_GET_INFO = 2, + TAS_CONFIG = 3, + SAR_OFFSET_MAPPING_TABLE_CMD = 4, + MCC_ALLOWED_AP_TYPE_CMD = 5, + PNVM_INIT_COMPLETE_NTFY = 254, +}; + +enum iwl_nvm_section_type { + NVM_SECTION_TYPE_SW = 1, + NVM_SECTION_TYPE_REGULATORY = 3, + NVM_SECTION_TYPE_CALIBRATION = 4, + NVM_SECTION_TYPE_PRODUCTION = 5, + NVM_SECTION_TYPE_REGULATORY_SDP = 8, + NVM_SECTION_TYPE_MAC_OVERRIDE = 11, + NVM_SECTION_TYPE_PHY_SKU = 12, + NVM_MAX_NUM_SECTIONS = 13, +}; + +struct iwl_rx_mpdu_desc_v1 { + union { + __le32 rss_hash; + __le32 phy_data2; + }; + union { + __le32 filter_match; + __le32 phy_data3; + }; + __le32 rate_n_flags; + u8 energy_a; + u8 energy_b; + u8 channel; + u8 mac_context; + __le32 gp2_on_air_rise; + union { + __le64 tsf_on_air_rise; + struct { + __le32 phy_data0; + __le32 phy_data1; + }; + }; +}; + +struct iwl_rx_mpdu_desc_v3 { + union { + __le32 filter_match; + __le32 phy_data3; + }; + union { + __le32 rss_hash; + __le32 phy_data2; + }; + __le32 partial_hash; + __be16 raw_xsum; + __le16 reserved_xsum; + __le32 rate_n_flags; + u8 energy_a; + u8 energy_b; + u8 channel; + u8 mac_context; + __le32 gp2_on_air_rise; + union { + __le64 tsf_on_air_rise; + struct { + __le32 phy_data0; + __le32 phy_data1; + }; + }; + __le32 phy_data5; + __le32 reserved[1]; +}; + +struct iwl_rx_mpdu_desc { + __le16 mpdu_len; + u8 mac_flags1; + u8 mac_flags2; + u8 amsdu_info; + __le16 phy_info; + u8 mac_phy_idx; + union { + struct { + __le16 raw_csum; + union { + __le16 l3l4_flags; + __le16 phy_data4; + }; + }; + __le32 phy_eht_data4; + }; + __le32 status; + __le32 reorder_data; + union { + struct iwl_rx_mpdu_desc_v1 v1; + struct iwl_rx_mpdu_desc_v3 v3; + }; +} __attribute__((packed)); + +struct iwl_mvm_mgmt_mcast_key_cmd { + __le32 ctrl_flags; + u8 igtk[32]; + __le32 key_id; + __le32 sta_id; + __le64 receive_seq_cnt; +}; + +enum iwl_location_subcmd_ids { + TOF_RANGE_REQ_CMD = 0, + TOF_CONFIG_CMD = 1, + TOF_RANGE_ABORT_CMD = 2, + TOF_RANGE_REQ_EXT_CMD = 3, + TOF_RESPONDER_CONFIG_CMD = 4, + TOF_RESPONDER_DYN_CONFIG_CMD = 5, + CSI_HEADER_NOTIFICATION = 250, + CSI_CHUNKS_NOTIFICATION = 251, + TOF_LC_NOTIF = 252, + TOF_RESPONDER_STATS = 253, + TOF_MCSI_DEBUG_NOTIF = 254, + TOF_RANGE_RESPONSE_NOTIF = 255, +}; + +struct iwl_mei_ops { + void (*me_conn_status)(void *, const struct iwl_mei_conn_info *); + void (*rfkill)(void *, bool, bool); + void (*roaming_forbidden)(void *, bool); + void (*sap_connected)(void *); + void (*nic_stolen)(void *); +}; + +struct iwl_mvm_txq { + struct list_head list; + u16 txq_id; + atomic_t tx_request; + long unsigned int state; +}; + +enum iwl_rx_handler_context { + RX_HANDLER_SYNC = 0, + RX_HANDLER_ASYNC_LOCKED = 1, + RX_HANDLER_ASYNC_UNLOCKED = 2, + RX_HANDLER_ASYNC_LOCKED_WIPHY = 3, +}; + +struct iwl_rx_handlers { + u16 cmd_id; + u16 min_size; + enum iwl_rx_handler_context context; + void (*fn)(struct iwl_mvm *, struct iwl_rx_cmd_buffer *); +}; + +struct iwl_mvm_frob_txf_data { + u8 *buf; + size_t buflen; +}; + +struct iwl_async_handler_entry { + struct list_head list; + struct iwl_rx_cmd_buffer rxb; + enum iwl_rx_handler_context context; + void (*fn)(struct iwl_mvm *, struct iwl_rx_cmd_buffer *); +}; + +struct iwl_mvm_reprobe { + struct device *dev; + struct work_struct work; +}; + +enum rxdone_entry_desc_flags { + RXDONE_SIGNAL_PLCP = 1, + RXDONE_SIGNAL_BITRATE = 2, + RXDONE_SIGNAL_MCS = 4, + RXDONE_MY_BSS = 8, + RXDONE_CRYPTO_IV = 16, + RXDONE_CRYPTO_ICV = 32, + RXDONE_L2PAD = 64, +}; + +struct netdev_lag_lower_state_info { + u8 link_up: 1; + u8 tx_enabled: 1; +}; + +struct failover_ops { + int (*slave_pre_register)(struct net_device *, struct net_device *); + int (*slave_register)(struct net_device *, struct net_device *); + int (*slave_pre_unregister)(struct net_device *, struct net_device *); + int (*slave_unregister)(struct net_device *, struct net_device *); + int (*slave_link_change)(struct net_device *, struct net_device *); + int (*slave_name_change)(struct net_device *, struct net_device *); + rx_handler_result_t (*slave_handle_frame)(struct sk_buff **); +}; + +struct failover { + struct list_head list; + struct net_device *failover_dev; + netdevice_tracker dev_tracker; + struct failover_ops *ops; +}; + +struct net_failover_info { + struct net_device *primary_dev; + struct net_device *standby_dev; + struct rtnl_link_stats64 primary_stats; + struct rtnl_link_stats64 standby_stats; + struct rtnl_link_stats64 failover_stats; + spinlock_t stats_lock; + long: 32; +}; + +enum serio_event_type { + SERIO_RESCAN_PORT = 0, + SERIO_RECONNECT_PORT = 1, + SERIO_RECONNECT_SUBTREE = 2, + SERIO_REGISTER_PORT = 3, + SERIO_ATTACH_DRIVER = 4, +}; + +struct serio_event { + enum serio_event_type type; + void *object; + struct module *owner; + struct list_head node; +}; + +struct itco_wdt_platform_data { + char name[32]; + unsigned int version; + bool no_reboot_use_pmc; +}; + +struct i801_priv { + struct i2c_adapter adapter; + long unsigned int smba; + unsigned char original_hstcfg; + unsigned char original_hstcnt; + unsigned char original_slvcmd; + struct pci_dev *pci_dev; + unsigned int features; + struct completion done; + u8 status; + u8 cmd; + bool is_read; + int count; + int len; + u8 *data; + struct platform_device *tco_pdev; + bool acpi_reserved; +}; + +struct kthread_work; + +typedef void (*kthread_work_func_t)(struct kthread_work *); + +struct kthread_worker; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + int canceling; +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +struct posix_clock; + +struct posix_clock_context; + +struct posix_clock_operations { + struct module *owner; + int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); + int (*clock_gettime)(struct posix_clock *, struct timespec64 *); + int (*clock_getres)(struct posix_clock *, struct timespec64 *); + int (*clock_settime)(struct posix_clock *, const struct timespec64 *); + long int (*ioctl)(struct posix_clock_context *, unsigned int, long unsigned int); + int (*open)(struct posix_clock_context *, fmode_t); + __poll_t (*poll)(struct posix_clock_context *, struct file *, poll_table *); + int (*release)(struct posix_clock_context *); + ssize_t (*read)(struct posix_clock_context *, uint, char *, size_t); +}; + +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +struct posix_clock_context { + struct posix_clock *clk; + void *private_clkdata; +}; + +struct ptp_extts_event { + struct ptp_clock_time t; + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_kparams { + int api_version; + int mode; + struct pps_ktime assert_off_tu; + struct pps_ktime clear_off_tu; +}; + +struct pps_device; + +struct pps_source_info { + char name[32]; + char path[32]; + int mode; + void (*echo)(struct pps_device *, int, void *); + struct module *owner; + struct device *dev; +}; + +struct pps_device { + struct pps_source_info info; + struct pps_kparams params; + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + unsigned int last_ev; + wait_queue_head_t queue; + unsigned int id; + const void *lookup_cookie; + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; + spinlock_t lock; + long: 32; +}; + +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + +struct timestamp_event_queue { + struct ptp_extts_event buf[128]; + int head; + int tail; + spinlock_t lock; + struct list_head qlist; + long unsigned int *mask; + struct dentry *debugfs_instance; + struct debugfs_u32_array dfs_bitmap; + long: 32; +}; + +struct ptp_clock { + struct posix_clock clock; + long: 32; + struct device dev; + struct ptp_clock_info *info; + dev_t devid; + int index; + struct pps_device *pps_source; + long int dialed_frequency; + struct list_head tsevqs; + spinlock_t tsevqs_lock; + struct mutex pincfg_mux; + wait_queue_head_t tsev_wq; + int defunct; + struct device_attribute *pin_dev_attr; + struct attribute **pin_attr; + struct attribute_group pin_attr_group; + const struct attribute_group *pin_attr_groups[2]; + struct kthread_worker *kworker; + struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + int *vclock_index; + struct mutex n_vclocks_mux; + bool is_virtual_clock; + bool has_cycles; + struct dentry *debugfs_root; +}; + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct hlist_node vclock_hash_node; + struct cyclecounter cc; + struct timecounter tc; + struct mutex lock; + long: 32; +}; + +struct update_util_data { + void (*func)(struct update_util_data *, u64, unsigned int); +}; + +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +struct dbs_governor; + +struct dbs_data { + struct gov_attr_set attr_set; + struct dbs_governor *gov; + void *tuners; + unsigned int ignore_nice_load; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int io_is_busy; +}; + +struct policy_dbs_info; + +struct dbs_governor { + struct cpufreq_governor gov; + struct kobj_type kobj_type; + struct dbs_data *gdbs_data; + unsigned int (*gov_dbs_update)(struct cpufreq_policy *); + struct policy_dbs_info * (*alloc)(); + void (*free)(struct policy_dbs_info *); + int (*init)(struct dbs_data *); + void (*exit)(struct dbs_data *); + void (*start)(struct cpufreq_policy *); +}; + +struct policy_dbs_info { + struct cpufreq_policy *policy; + struct mutex update_mutex; + u64 last_sample_time; + s64 sample_delay_ns; + atomic_t work_count; + struct irq_work irq_work; + struct work_struct work; + struct dbs_data *dbs_data; + struct list_head list; + unsigned int rate_mult; + unsigned int idle_periods; + bool is_shared; + bool work_in_progress; + long: 32; +}; + +struct cpu_dbs_info { + u64 prev_cpu_idle; + u64 prev_update_time; + u64 prev_cpu_nice; + unsigned int prev_load; + struct update_util_data update_util; + struct policy_dbs_info *policy_dbs; + long: 32; +}; + +enum efi_rts_ids { + EFI_NONE = 0, + EFI_GET_TIME = 1, + EFI_SET_TIME = 2, + EFI_GET_WAKEUP_TIME = 3, + EFI_SET_WAKEUP_TIME = 4, + EFI_GET_VARIABLE = 5, + EFI_GET_NEXT_VARIABLE = 6, + EFI_SET_VARIABLE = 7, + EFI_QUERY_VARIABLE_INFO = 8, + EFI_GET_NEXT_HIGH_MONO_COUNT = 9, + EFI_RESET_SYSTEM = 10, + EFI_UPDATE_CAPSULE = 11, + EFI_QUERY_CAPSULE_CAPS = 12, + EFI_ACPI_PRM_HANDLER = 13, +}; + +union efi_rts_args; + +struct efi_runtime_work { + union efi_rts_args *args; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; + const void *caller; +}; + +union efi_rts_args { + struct { + efi_time_t *time; + efi_time_cap_t *capabilities; + } GET_TIME; + struct { + efi_time_t *time; + } SET_TIME; + struct { + efi_bool_t *enabled; + efi_bool_t *pending; + efi_time_t *time; + } GET_WAKEUP_TIME; + struct { + efi_bool_t enable; + efi_time_t *time; + } SET_WAKEUP_TIME; + struct { + efi_char16_t *name; + efi_guid_t *vendor; + u32 *attr; + long unsigned int *data_size; + void *data; + } GET_VARIABLE; + struct { + long unsigned int *name_size; + efi_char16_t *name; + efi_guid_t *vendor; + } GET_NEXT_VARIABLE; + struct { + efi_char16_t *name; + efi_guid_t *vendor; + u32 attr; + long unsigned int data_size; + void *data; + } SET_VARIABLE; + struct { + u32 attr; + u64 *storage_space; + u64 *remaining_space; + u64 *max_variable_size; + } QUERY_VARIABLE_INFO; + struct { + u32 *high_count; + } GET_NEXT_HIGH_MONO_COUNT; + struct { + efi_capsule_header_t **capsules; + long unsigned int count; + long unsigned int sg_list; + } UPDATE_CAPSULE; + struct { + efi_capsule_header_t **capsules; + long unsigned int count; + u64 *max_size; + int *reset_type; + } QUERY_CAPSULE_CAPS; + struct { + efi_status_t (*acpi_prm_handler)(u64, void *); + long: 32; + u64 param_buffer_addr; + void *context; + long: 32; + } ACPI_PRM_HANDLER; +}; + +enum hid_class_request { + HID_REQ_GET_REPORT = 1, + HID_REQ_GET_IDLE = 2, + HID_REQ_GET_PROTOCOL = 3, + HID_REQ_SET_REPORT = 9, + HID_REQ_SET_IDLE = 10, + HID_REQ_SET_PROTOCOL = 11, +}; + +struct hidraw_devinfo { + __u32 bustype; + __s16 vendor; + __s16 product; +}; + +struct hidraw { + unsigned int minor; + int exist; + int open; + wait_queue_head_t wait; + struct hid_device *hid; + struct device *dev; + spinlock_t list_lock; + struct list_head list; +}; + +struct hidraw_report { + __u8 *value; + int len; +}; + +struct hidraw_list { + struct hidraw_report buffer[64]; + int head; + int tail; + struct fasync_struct *fasync; + struct hidraw *hidraw; + struct list_head node; + struct mutex read_mutex; + bool revoked; +}; + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + char __data[126]; + }; + void *__align; + }; +}; + +struct net_packet_attrs { + const unsigned char *src; + const unsigned char *dst; + u32 ip_src; + u32 ip_dst; + bool tcp; + u16 sport; + u16 dport; + int timeout; + int size; + int max_size; + u8 id; + u16 queue_mapping; +}; + +struct net_test_priv { + struct net_packet_attrs *packet; + struct packet_type pt; + struct completion comp; + int double_vlan; + int vlan_id; + int ok; +}; + +struct netsfhdr { + __be32 version; + __be64 magic; + u8 id; +} __attribute__((packed)); + +struct net_test { + char name[32]; + int (*fn)(struct net_device *); +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[32]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +enum ip_conntrack_events { + IPCT_NEW = 0, + IPCT_RELATED = 1, + IPCT_DESTROY = 2, + IPCT_REPLY = 3, + IPCT_ASSURED = 4, + IPCT_PROTOINFO = 5, + IPCT_HELPER = 6, + IPCT_MARK = 7, + IPCT_SEQADJ = 8, + IPCT_NATSEQADJ = 8, + IPCT_SECMARK = 9, + IPCT_LABEL = 10, + IPCT_SYNPROXY = 11, + __IPCT_MAX = 12, +}; + +enum nft_rt_keys { + NFT_RT_CLASSID = 0, + NFT_RT_NEXTHOP4 = 1, + NFT_RT_NEXTHOP6 = 2, + NFT_RT_TCPMSS = 3, + NFT_RT_XFRM = 4, + __NFT_RT_MAX = 5, +}; + +enum nft_rt_attributes { + NFTA_RT_UNSPEC = 0, + NFTA_RT_DREG = 1, + NFTA_RT_KEY = 2, + __NFTA_RT_MAX = 3, +}; + +struct nft_rt { + enum nft_rt_keys key: 8; + u8 dreg; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + +struct ip_sf_list; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + long unsigned int sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list *next_rcu; + }; + struct ip_mc_list *next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; + unsigned char crcount; + struct callback_head rcu; +}; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + __be32 sl_addr[0]; +}; + +struct ip_mc_socklist { + struct ip_mc_socklist *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; + struct ip_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + long unsigned int sf_count[2]; + __be32 sf_inaddr; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +struct fib_rt_info { + struct fib_info *fi; + u32 tb_id; + __be32 dst; + int dst_len; + dscp_t dscp; + u8 type; + u8 offload: 1; + u8 trap: 1; + u8 offload_failed: 1; + u8 unused: 5; +}; + +struct rt_cache_stat { + unsigned int in_slow_tot; + unsigned int in_slow_mc; + unsigned int in_no_route; + unsigned int in_brd; + unsigned int in_martian_dst; + unsigned int in_martian_src; + unsigned int out_slow_tot; + unsigned int out_slow_mc; +}; + +struct fib_alias { + struct hlist_node fa_list; + struct fib_info *fa_info; + dscp_t fa_dscp; + u8 fa_type; + u8 fa_state; + u8 fa_slen; + u32 tb_id; + s16 fa_default; + u8 offload; + u8 trap; + u8 offload_failed; + struct callback_head rcu; +}; + +struct fib_prop { + int error; + u8 scope; +}; + +enum { + INET_FRAG_FIRST_IN = 1, + INET_FRAG_LAST_IN = 2, + INET_FRAG_COMPLETE = 4, + INET_FRAG_HASH_DEAD = 8, + INET_FRAG_DROP = 16, +}; + +struct ipfrag_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + }; + struct sk_buff *next_frag; + int frag_run_len; + int ip_defrag_offset; +}; + +struct ac6_iter_state { + struct seq_net_private p; + struct net_device *dev; +}; + +enum { + IP6_FH_F_FRAG = 1, + IP6_FH_F_AUTH = 2, + IP6_FH_F_SKIP_RH = 4, +}; + +struct vlan_ethhdr { + union { + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + }; + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + } addrs; + }; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct nf_ct_hook { + int (*update)(struct net *, struct sk_buff *); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); + void (*attach)(struct sk_buff *, const struct sk_buff *); + void (*set_closing)(struct nf_conntrack *); + int (*confirm)(struct sk_buff *); +}; + +enum nf_br_hook_priorities { + NF_BR_PRI_FIRST = -2147483648, + NF_BR_PRI_NAT_DST_BRIDGED = -300, + NF_BR_PRI_FILTER_BRIDGED = -200, + NF_BR_PRI_BRNF = 0, + NF_BR_PRI_NAT_DST_OTHER = 100, + NF_BR_PRI_FILTER_OTHER = 200, + NF_BR_PRI_NAT_SRC = 300, + NF_BR_PRI_LAST = 2147483647, +}; + +enum nf_ip6_hook_priorities { + NF_IP6_PRI_FIRST = -2147483648, + NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP6_PRI_CONNTRACK_DEFRAG = -400, + NF_IP6_PRI_RAW = -300, + NF_IP6_PRI_SELINUX_FIRST = -225, + NF_IP6_PRI_CONNTRACK = -200, + NF_IP6_PRI_MANGLE = -150, + NF_IP6_PRI_NAT_DST = -100, + NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, + NF_IP6_PRI_NAT_SRC = 100, + NF_IP6_PRI_SELINUX_LAST = 225, + NF_IP6_PRI_CONNTRACK_HELPER = 300, + NF_IP6_PRI_LAST = 2147483647, +}; + +struct nf_queue_entry; + +struct nf_ipv6_ops { + void (*route_input)(struct sk_buff *); + int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); +}; + +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + unsigned int hook_index; + struct net_device *physin; + struct net_device *physout; + struct nf_hook_state state; + u16 size; +}; + +struct br_input_skb_cb { + struct net_device *brdev; + u16 frag_max_size; + u8 igmp; + u8 mrouters_only: 1; + u8 proxyarp_replied: 1; + u8 src_port_isolated: 1; + u8 promisc: 1; + u8 br_netfilter_broute: 1; + u32 backup_nhid; +}; + +struct nf_br_ops { + int (*br_dev_xmit_hook)(struct sk_buff *); +}; + +struct brnf_net { + bool enabled; + struct ctl_table_header *ctl_hdr; + int call_iptables; + int call_ip6tables; + int call_arptables; + int filter_vlan_tagged; + int filter_pppoe_tagged; + int pass_vlan_indev; +}; + +struct brnf_frag_data { + local_lock_t bh_lock; + char mac[22]; + u8 encap_size; + u8 size; + u16 vlan_tci; + __be16 vlan_proto; +}; + +enum ieee80211_vht_actioncode { + WLAN_VHT_ACTION_COMPRESSED_BF = 0, + WLAN_VHT_ACTION_GROUPID_MGMT = 1, + WLAN_VHT_ACTION_OPMODE_NOTIF = 2, +}; + +enum ieee80211_protected_eht_actioncode { + WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0, + WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1, + WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2, +}; + +enum ieee80211_s1g_actioncode { + WLAN_S1G_AID_SWITCH_REQUEST = 0, + WLAN_S1G_AID_SWITCH_RESPONSE = 1, + WLAN_S1G_SYNC_CONTROL = 2, + WLAN_S1G_STA_INFO_ANNOUNCE = 3, + WLAN_S1G_EDCA_PARAM_SET = 4, + WLAN_S1G_EL_OPERATION = 5, + WLAN_S1G_TWT_SETUP = 6, + WLAN_S1G_TWT_TEARDOWN = 7, + WLAN_S1G_SECT_GROUP_ID_LIST = 8, + WLAN_S1G_SECT_ID_FEEDBACK = 9, + WLAN_S1G_TWT_INFORMATION = 11, +}; + +struct ieee80211_radiotap_header_fixed { + uint8_t it_version; + uint8_t it_pad; + __le16 it_len; + __le32 it_present; +}; + +struct ieee80211_radiotap_header { + union { + struct { + uint8_t it_version; + uint8_t it_pad; + __le16 it_len; + __le32 it_present; + }; + struct ieee80211_radiotap_header_fixed hdr; + }; + __le32 it_optional[0]; +}; + +enum ieee80211_conf_flags { + IEEE80211_CONF_MONITOR = 1, + IEEE80211_CONF_PS = 2, + IEEE80211_CONF_IDLE = 4, + IEEE80211_CONF_OFFCHANNEL = 8, +}; + +enum ieee80211_conf_changed { + IEEE80211_CONF_CHANGE_SMPS = 2, + IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = 4, + IEEE80211_CONF_CHANGE_MONITOR = 8, + IEEE80211_CONF_CHANGE_PS = 16, + IEEE80211_CONF_CHANGE_POWER = 32, + IEEE80211_CONF_CHANGE_CHANNEL = 64, + IEEE80211_CONF_CHANGE_RETRY_LIMITS = 128, + IEEE80211_CONF_CHANGE_IDLE = 256, +}; + +enum ieee80211_offload_flags { + IEEE80211_OFFLOAD_ENCAP_ENABLED = 1, + IEEE80211_OFFLOAD_ENCAP_4ADDR = 2, + IEEE80211_OFFLOAD_DECAP_ENABLED = 4, +}; + +enum ieee80211_tpt_led_trigger_flags { + IEEE80211_TPT_LEDTRIG_FL_RADIO = 1, + IEEE80211_TPT_LEDTRIG_FL_WORK = 2, + IEEE80211_TPT_LEDTRIG_FL_CONNECTED = 4, +}; + +struct ieee80211_roc_work { + struct list_head list; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_channel *chan; + bool started; + bool abort; + bool hw_begun; + bool notified; + bool on_channel; + long unsigned int start_time; + u32 duration; + u32 req_duration; + struct sk_buff *frame; + u64 cookie; + u64 mgmt_tx_cookie; + enum ieee80211_roc_type type; + long: 32; +}; + +struct mesh_rmc { + struct hlist_head bucket[256]; + u32 idx_mask; +}; + +enum mac80211_scan_flags { + SCAN_SW_SCANNING = 0, + SCAN_HW_SCANNING = 1, + SCAN_ONCHANNEL_SCANNING = 2, + SCAN_COMPLETED = 3, + SCAN_ABORTED = 4, + SCAN_HW_CANCELLED = 5, + SCAN_BEACON_WAIT = 6, + SCAN_BEACON_DONE = 7, +}; + +struct uf_node { + struct uf_node *parent; + unsigned int rank; +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +enum { + FTRACE_OPS_FL_ENABLED = 1, + FTRACE_OPS_FL_DYNAMIC = 2, + FTRACE_OPS_FL_SAVE_REGS = 4, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8, + FTRACE_OPS_FL_RECURSION = 16, + FTRACE_OPS_FL_STUB = 32, + FTRACE_OPS_FL_INITIALIZED = 64, + FTRACE_OPS_FL_DELETED = 128, + FTRACE_OPS_FL_ADDING = 256, + FTRACE_OPS_FL_REMOVING = 512, + FTRACE_OPS_FL_MODIFYING = 1024, + FTRACE_OPS_FL_ALLOC_TRAMP = 2048, + FTRACE_OPS_FL_IPMODIFY = 4096, + FTRACE_OPS_FL_PID = 8192, + FTRACE_OPS_FL_RCU = 16384, + FTRACE_OPS_FL_TRACE_ARRAY = 32768, + FTRACE_OPS_FL_PERMANENT = 65536, + FTRACE_OPS_FL_DIRECT = 131072, + FTRACE_OPS_FL_SUBOP = 262144, +}; + +enum { + FTRACE_UPDATE_CALLS = 1, + FTRACE_DISABLE_CALLS = 2, + FTRACE_UPDATE_TRACE_FUNC = 4, + FTRACE_START_FUNC_RET = 8, + FTRACE_STOP_FUNC_RET = 16, + FTRACE_MAY_SLEEP = 32, +}; + +struct ftrace_ret_stack { + long unsigned int ret; + long unsigned int func; + long unsigned int fp; + long unsigned int *retp; +}; + +enum { + FGRAPH_TYPE_RESERVED = 0, + FGRAPH_TYPE_BITMAP = 1, + FGRAPH_TYPE_DATA = 2, +}; + +struct fgraph_ret_regs; + +struct bpf_tramp_run_ctx; + +typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *, struct bpf_tramp_run_ctx *); + +struct bpf_tramp_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + struct bpf_run_ctx *saved_run_ctx; + long: 32; +}; + +typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *, u64, struct bpf_tramp_run_ctx *); + +struct bpf_attach_target_info { + struct btf_func_model fmodel; + long int tgt_addr; + struct module *tgt_mod; + const char *tgt_name; + const struct btf_type *tgt_type; +}; + +enum bpf_text_poke_type { + BPF_MOD_CALL = 0, + BPF_MOD_JUMP = 1, +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, + PERF_RECORD_KSYMBOL_TYPE_MAX = 3, +}; + +enum kasan_report_type { + KASAN_REPORT_ACCESS = 0, + KASAN_REPORT_INVALID_FREE = 1, + KASAN_REPORT_DOUBLE_FREE = 2, +}; + +enum execmem_type { + EXECMEM_DEFAULT = 0, + EXECMEM_MODULE_TEXT = 0, + EXECMEM_KPROBES = 1, + EXECMEM_FTRACE = 2, + EXECMEM_BPF = 3, + EXECMEM_MODULE_DATA = 4, + EXECMEM_TYPE_MAX = 5, +}; + +enum execmem_range_flags { + EXECMEM_KASAN_SHADOW = 1, + EXECMEM_ROX_CACHE = 2, +}; + +struct execmem_range { + long unsigned int start; + long unsigned int end; + long unsigned int fallback_start; + long unsigned int fallback_end; + pgprot_t pgprot; + unsigned int alignment; + enum execmem_range_flags flags; +}; + +struct execmem_info { + struct execmem_range ranges[5]; +}; + +enum dentry_d_lock_class { + DENTRY_D_LOCK_NORMAL = 0, + DENTRY_D_LOCK_NESTED = 1, +}; + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[36]; + long: 32; +}; + +struct dentry_stat_t { + long int nr_dentry; + long int nr_unused; + long int age_limit; + long int want_pages; + long int nr_negative; + long int dummy; +}; + +struct external_name { + union { + atomic_t count; + struct callback_head head; + } u; + unsigned char name[0]; +}; + +enum d_walk_ret { + D_WALK_CONTINUE = 0, + D_WALK_QUIT = 1, + D_WALK_NORETRY = 2, + D_WALK_SKIP = 3, +}; + +struct check_mount { + struct vfsmount *mnt; + unsigned int mounted; +}; + +struct select_data { + struct dentry *start; + union { + long int found; + struct dentry *victim; + }; + struct list_head dispose; +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + unsigned char f_handle[0]; +}; + +enum { + MBE_REFERENCED_B = 0, + MBE_REUSABLE_B = 1, +}; + +struct mb_cache_entry { + struct list_head e_list; + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + u32 e_key; + long unsigned int e_flags; + long: 32; + u64 e_value; +}; + +struct mb_cache { + struct hlist_bl_head *c_hash; + int c_bucket_bits; + long unsigned int c_max_entries; + spinlock_t c_list_lock; + struct list_head c_list; + long unsigned int c_entry_count; + struct shrinker *c_shrink; + struct work_struct c_shrink_work; +}; + +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + +enum procmap_query_flags { + PROCMAP_QUERY_VMA_READABLE = 1, + PROCMAP_QUERY_VMA_WRITABLE = 2, + PROCMAP_QUERY_VMA_EXECUTABLE = 4, + PROCMAP_QUERY_VMA_SHARED = 8, + PROCMAP_QUERY_COVERING_OR_NEXT_VMA = 16, + PROCMAP_QUERY_FILE_BACKED_VMA = 32, +}; + +struct procmap_query { + __u64 size; + __u64 query_flags; + __u64 query_addr; + __u64 vma_start; + __u64 vma_end; + __u64 vma_flags; + __u64 vma_page_size; + __u64 vma_offset; + __u64 inode; + __u32 dev_major; + __u32 dev_minor; + __u32 vma_name_size; + __u32 build_id_size; + __u64 vma_name_addr; + __u64 build_id_addr; +}; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); + int lsmid; +}; + +struct proc_inode { + struct pid *pid; + unsigned int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + const struct ctl_table *sysctl_entry; + struct hlist_node sibling_inodes; + const struct proc_ns_operations *ns_ops; + long: 32; + struct inode vfs_inode; +}; + +struct proc_maps_private { + struct inode *inode; + struct task_struct *task; + struct mm_struct *mm; + struct vma_iterator iter; +}; + +struct mem_size_stats { + long unsigned int resident; + long unsigned int shared_clean; + long unsigned int shared_dirty; + long unsigned int private_clean; + long unsigned int private_dirty; + long unsigned int referenced; + long unsigned int anonymous; + long unsigned int lazyfree; + long unsigned int anonymous_thp; + long unsigned int shmem_thp; + long unsigned int file_thp; + long unsigned int swap; + long unsigned int shared_hugetlb; + long unsigned int private_hugetlb; + long unsigned int ksm; + long: 32; + u64 pss; + u64 pss_anon; + u64 pss_file; + u64 pss_shmem; + u64 pss_dirty; + u64 pss_locked; + u64 swap_pss; +}; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON = 2, + CLEAR_REFS_MAPPED = 3, + CLEAR_REFS_SOFT_DIRTY = 4, + CLEAR_REFS_MM_HIWATER_RSS = 5, + CLEAR_REFS_LAST = 6, +}; + +struct clear_refs_private { + enum clear_refs_types type; +}; + +typedef struct { + u64 pme; +} pagemap_entry_t; + +struct pagemapread { + int pos; + int len; + pagemap_entry_t *buffer; + bool show_pfn; +}; + +struct pagemap_scan_private { + struct pm_scan_arg arg; + long unsigned int masks_of_interest; + long unsigned int cur_vma_category; + struct page_region *vec_buf; + long unsigned int vec_buf_len; + long unsigned int vec_buf_index; + long unsigned int found_pages; + struct page_region *vec_out; + long: 32; +}; + +struct iso_directory_record { + __u8 length[1]; + __u8 ext_attr_length[1]; + __u8 extent[8]; + __u8 size[8]; + __u8 date[7]; + __u8 flags[1]; + __u8 file_unit_size[1]; + __u8 interleave[1]; + __u8 volume_sequence_number[4]; + __u8 name_len[1]; + char name[0]; +}; + +enum isofs_file_format { + isofs_file_normal = 0, + isofs_file_sparse = 1, + isofs_file_compressed = 2, +}; + +struct iso_inode_info { + long unsigned int i_iget5_block; + long unsigned int i_iget5_offset; + unsigned int i_first_extent; + unsigned char i_file_format; + unsigned char i_format_parm[3]; + long unsigned int i_next_section_block; + long unsigned int i_next_section_offset; + off_t i_section_size; + long: 32; + struct inode vfs_inode; +}; + +struct isofs_sb_info { + long unsigned int s_ninodes; + long unsigned int s_nzones; + long unsigned int s_firstdatazone; + long unsigned int s_log_zone_size; + long unsigned int s_max_size; + int s_rock_offset; + s32 s_sbsector; + unsigned char s_joliet_level; + unsigned char s_mapping; + unsigned char s_check; + unsigned char s_session; + unsigned int s_high_sierra: 1; + unsigned int s_rock: 2; + unsigned int s_cruft: 1; + unsigned int s_nocompress: 1; + unsigned int s_hide: 1; + unsigned int s_showassoc: 1; + unsigned int s_overriderockperm: 1; + unsigned int s_uid_set: 1; + unsigned int s_gid_set: 1; + umode_t s_fmode; + umode_t s_dmode; + kgid_t s_gid; + kuid_t s_uid; + struct nls_table *s_nls_iocharset; +}; + +struct SU_SP_s { + __u8 magic[2]; + __u8 skip; +}; + +struct SU_CE_s { + __u8 extent[8]; + __u8 offset[8]; + __u8 size[8]; +}; + +struct SU_ER_s { + __u8 len_id; + __u8 len_des; + __u8 len_src; + __u8 ext_ver; + __u8 data[0]; +}; + +struct RR_RR_s { + __u8 flags[1]; +}; + +struct RR_PX_s { + __u8 mode[8]; + __u8 n_links[8]; + __u8 uid[8]; + __u8 gid[8]; +}; + +struct RR_PN_s { + __u8 dev_high[8]; + __u8 dev_low[8]; +}; + +struct SL_component { + __u8 flags; + __u8 len; + __u8 text[0]; +}; + +struct RR_SL_s { + __u8 flags; + struct SL_component link; +}; + +struct RR_NM_s { + __u8 flags; + char name[0]; +}; + +struct RR_CL_s { + __u8 location[8]; +}; + +struct RR_PL_s { + __u8 location[8]; +}; + +struct stamp { + __u8 time[7]; +}; + +struct RR_TF_s { + __u8 flags; + struct stamp times[0]; +}; + +struct RR_ZF_s { + __u8 algorithm[2]; + __u8 parms[2]; + __u8 real_size[8]; +}; + +struct rock_ridge { + __u8 signature[2]; + __u8 len; + __u8 version; + union { + struct SU_SP_s SP; + struct SU_CE_s CE; + struct SU_ER_s ER; + struct RR_RR_s RR; + struct RR_PX_s PX; + struct RR_PN_s PN; + struct RR_SL_s SL; + struct RR_NM_s NM; + struct RR_CL_s CL; + struct RR_PL_s PL; + struct RR_TF_s TF; + struct RR_ZF_s ZF; + } u; +}; + +struct rock_state { + void *buffer; + unsigned char *chr; + int len; + int cont_size; + int cont_extent; + int cont_offset; + int cont_loops; + struct inode *inode; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + bool is_nokey_name; +}; + +enum btrfs_delayed_ref_flags { + BTRFS_DELAYED_REFS_FLUSHING = 0, +}; + +enum { + ____TRANS_FREEZABLE_BIT = 0, + __TRANS_FREEZABLE = 1, + ____TRANS_FREEZABLE_SEQ = 0, + ____TRANS_START_BIT = 1, + __TRANS_START = 2, + ____TRANS_START_SEQ = 1, + ____TRANS_ATTACH_BIT = 2, + __TRANS_ATTACH = 4, + ____TRANS_ATTACH_SEQ = 2, + ____TRANS_JOIN_BIT = 3, + __TRANS_JOIN = 8, + ____TRANS_JOIN_SEQ = 3, + ____TRANS_JOIN_NOLOCK_BIT = 4, + __TRANS_JOIN_NOLOCK = 16, + ____TRANS_JOIN_NOLOCK_SEQ = 4, + ____TRANS_DUMMY_BIT = 5, + __TRANS_DUMMY = 32, + ____TRANS_DUMMY_SEQ = 5, + ____TRANS_JOIN_NOSTART_BIT = 6, + __TRANS_JOIN_NOSTART = 64, + ____TRANS_JOIN_NOSTART_SEQ = 6, +}; + +struct request_key_auth { + struct callback_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +}; + +struct shash_instance { + void (*free)(struct shash_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1, + DISK_EVENT_EJECT_REQUEST = 2, +}; + +enum { + DISK_EVENT_FLAG_POLL = 1, + DISK_EVENT_FLAG_UEVENT = 2, + DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 4, +}; + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + AS_NO_WRITEBACK_TAGS = 5, + AS_RELEASE_ALWAYS = 6, + AS_STABLE_WRITES = 7, + AS_INACCESSIBLE = 8, + AS_FOLIO_ORDER_BITS = 5, + AS_FOLIO_ORDER_MIN = 16, + AS_FOLIO_ORDER_MAX = 21, +}; + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +struct io_poll { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + int retries; + struct wait_queue_entry wait; +}; + +struct async_poll { + struct io_poll poll; + struct io_poll *double_poll; +}; + +struct io_issue_def { + unsigned int needs_file: 1; + unsigned int plug: 1; + unsigned int hash_reg_file: 1; + unsigned int unbound_nonreg_file: 1; + unsigned int pollin: 1; + unsigned int pollout: 1; + unsigned int poll_exclusive: 1; + unsigned int buffer_select: 1; + unsigned int audit_skip: 1; + unsigned int ioprio: 1; + unsigned int iopoll: 1; + unsigned int iopoll_queue: 1; + unsigned int vectored: 1; + short unsigned int async_size; + int (*issue)(struct io_kiocb *, unsigned int); + int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); +}; + +struct io_cold_def { + const char *name; + void (*cleanup)(struct io_kiocb *); + void (*fail)(struct io_kiocb *); +}; + +struct once_work { + struct work_struct work; + struct static_key_true *key; + struct module *module; +}; + +struct ZSTD_CDict_s { + const void *dictContent; + size_t dictContentSize; + ZSTD_dictContentType_e dictContentType; + U32 *entropyWorkspace; + ZSTD_cwksp workspace; + ZSTD_matchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_customMem customMem; + U32 dictID; + int compressionLevel; + ZSTD_paramSwitch_e useRowMatchFinder; +}; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_sequenceLength; + +struct repcodes_s { + U32 rep[3]; +}; + +typedef struct repcodes_s repcodes_t; + +struct word_at_a_time { + const long unsigned int one_bits; + const long unsigned int high_bits; +}; + +enum support_mode { + ALLOW_LEGACY = 0, + DENY_LEGACY = 1, +}; + +struct eeprom_93cx6 { + void *data; + void (*register_read)(struct eeprom_93cx6 *); + void (*register_write)(struct eeprom_93cx6 *); + int width; + unsigned int quirks; + char drive_data; + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +enum cti_port_type { + CTI_PORT_TYPE_NONE = 0, + CTI_PORT_TYPE_RS232 = 1, + CTI_PORT_TYPE_RS422_485 = 2, + CTI_PORT_TYPE_RS232_422_485_HW = 3, + CTI_PORT_TYPE_RS232_422_485_SW = 4, + CTI_PORT_TYPE_RS232_422_485_4B = 5, + CTI_PORT_TYPE_RS232_422_485_2B = 6, + CTI_PORT_TYPE_MAX = 7, +}; + +struct exar8250_platform { + int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); + const struct serial_rs485 *rs485_supported; + int (*register_gpio)(struct pci_dev *, struct uart_8250_port *); + void (*unregister_gpio)(struct uart_8250_port *); +}; + +struct exar8250; + +struct exar8250_board { + unsigned int num_ports; + unsigned int reg_shift; + int (*setup)(struct exar8250 *, struct pci_dev *, struct uart_8250_port *, int); + void (*exit)(struct pci_dev *); +}; + +struct exar8250 { + unsigned int nr; + unsigned int osc_freq; + struct exar8250_board *board; + struct eeprom_93cx6 eeprom; + void *virt; + int line[0]; +}; + +struct builtin_fw { + char *name; + void *data; + long unsigned int size; +}; + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); + +struct scsi_lun { + __u8 scsi_lun[8]; +}; + +enum scsi_pr_type { + SCSI_PR_WRITE_EXCLUSIVE = 1, + SCSI_PR_EXCLUSIVE_ACCESS = 3, + SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 5, + SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 6, + SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 7, + SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 8, +}; + +struct execute_work { + struct work_struct work; +}; + +typedef s32 compat_int_t; + +typedef u32 compat_uint_t; + +struct rq_map_data { + struct page **pages; + long unsigned int offset; + short unsigned int page_order; + short unsigned int nr_entries; + bool null_mapped; + bool from_user; +}; + +enum blktrace_act { + __BLK_TA_QUEUE = 1, + __BLK_TA_BACKMERGE = 2, + __BLK_TA_FRONTMERGE = 3, + __BLK_TA_GETRQ = 4, + __BLK_TA_SLEEPRQ = 5, + __BLK_TA_REQUEUE = 6, + __BLK_TA_ISSUE = 7, + __BLK_TA_COMPLETE = 8, + __BLK_TA_PLUG = 9, + __BLK_TA_UNPLUG_IO = 10, + __BLK_TA_UNPLUG_TIMER = 11, + __BLK_TA_INSERT = 12, + __BLK_TA_SPLIT = 13, + __BLK_TA_BOUNCE = 14, + __BLK_TA_REMAP = 15, + __BLK_TA_ABORT = 16, + __BLK_TA_DRV_DATA = 17, + __BLK_TA_CGROUP = 256, +}; + +enum scsi_msg_byte { + COMMAND_COMPLETE = 0, + EXTENDED_MESSAGE = 1, + SAVE_POINTERS = 2, + RESTORE_POINTERS = 3, + DISCONNECT = 4, + INITIATOR_ERROR = 5, + ABORT_TASK_SET = 6, + MESSAGE_REJECT = 7, + NOP = 8, + MSG_PARITY_ERROR = 9, + LINKED_CMD_COMPLETE = 10, + LINKED_FLG_CMD_COMPLETE = 11, + TARGET_RESET = 12, + ABORT_TASK = 13, + CLEAR_TASK_SET = 14, + INITIATE_RECOVERY = 15, + RELEASE_RECOVERY = 16, + TERMINATE_IO_PROC = 17, + CLEAR_ACA = 22, + LOGICAL_UNIT_RESET = 23, + SIMPLE_QUEUE_TAG = 32, + HEAD_OF_QUEUE_TAG = 33, + ORDERED_QUEUE_TAG = 34, + IGNORE_WIDE_RESIDUE = 35, + ACA = 36, + QAS_REQUEST = 85, + BUS_DEVICE_RESET = 12, + ABORT = 6, +}; + +struct sg_io_hdr { + int interface_id; + int dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + unsigned int dxfer_len; + void *dxferp; + unsigned char *cmdp; + void *sbp; + unsigned int timeout; + unsigned int flags; + int pack_id; + void *usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + int resid; + unsigned int duration; + unsigned int info; +}; + +typedef struct sg_io_hdr sg_io_hdr_t; + +struct compat_sg_io_hdr { + compat_int_t interface_id; + compat_int_t dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + compat_uint_t dxfer_len; + compat_uint_t dxferp; + compat_uptr_t cmdp; + compat_uptr_t sbp; + compat_uint_t timeout; + compat_uint_t flags; + compat_int_t pack_id; + compat_uptr_t usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + compat_int_t resid; + compat_uint_t duration; + compat_uint_t info; +}; + +struct sg_scsi_id { + int host_no; + int channel; + int scsi_id; + int lun; + int scsi_type; + short int h_cmd_per_lun; + short int d_queue_depth; + int unused[2]; +}; + +typedef struct sg_scsi_id sg_scsi_id_t; + +struct sg_req_info { + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + void *usr_ptr; + unsigned int duration; + int unused; +}; + +typedef struct sg_req_info sg_req_info_t; + +struct sg_header { + int pack_len; + int reply_len; + int pack_id; + int result; + unsigned int twelve_byte: 1; + unsigned int target_status: 5; + unsigned int host_status: 8; + unsigned int driver_status: 8; + unsigned int other_flags: 10; + unsigned char sense_buffer[16]; +}; + +struct sg_scatter_hold { + short unsigned int k_use_sg; + unsigned int sglist_len; + unsigned int bufflen; + struct page **pages; + int page_order; + char dio_in_use; + unsigned char cmd_opcode; +}; + +typedef struct sg_scatter_hold Sg_scatter_hold; + +struct sg_fd; + +struct sg_request { + struct list_head entry; + struct sg_fd *parentfp; + Sg_scatter_hold data; + sg_io_hdr_t header; + unsigned char sense_b[96]; + char res_used; + char orphan; + char sg_io_owned; + char done; + struct request *rq; + struct bio *bio; + struct execute_work ew; +}; + +typedef struct sg_request Sg_request; + +struct sg_device; + +struct sg_fd { + struct list_head sfd_siblings; + struct sg_device *parentdp; + wait_queue_head_t read_wait; + rwlock_t rq_list_lock; + struct mutex f_mutex; + int timeout; + int timeout_user; + Sg_scatter_hold reserve; + struct list_head rq_list; + struct fasync_struct *async_qp; + Sg_request req_arr[16]; + char force_packid; + char cmd_q; + unsigned char next_cmd_len; + char keep_orphan; + char mmap_called; + char res_in_use; + struct kref f_ref; + struct execute_work ew; +}; + +struct sg_device { + struct scsi_device *device; + wait_queue_head_t open_wait; + struct mutex open_rel_lock; + int sg_tablesize; + u32 index; + struct list_head sfds; + rwlock_t sfd_lock; + atomic_t detaching; + bool exclude; + int open_cnt; + char sgdebug; + char name[32]; + struct cdev *cdev; + struct kref d_ref; +}; + +typedef struct sg_fd Sg_fd; + +typedef struct sg_device Sg_device; + +enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3, +}; + +struct my_u { + __le64 a; + __le64 b; +}; + +enum iwl_bt_coex_profile_traffic_load { + IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0, + IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1, + IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2, + IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3, +}; + +struct iwl_rate_info { + u8 plcp; + u8 plcp_siso; + u8 plcp_mimo2; + u8 plcp_mimo3; + u8 ieee; + u8 prev_ieee; + u8 next_ieee; + u8 prev_rs; + u8 next_rs; + u8 prev_rs_tgg; + u8 next_rs_tgg; +}; + +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_60M_PLCP = 3, + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, +}; + +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO2_6M_PLCP = 8, + IWL_RATE_MIMO2_12M_PLCP = 9, + IWL_RATE_MIMO2_18M_PLCP = 10, + IWL_RATE_MIMO2_24M_PLCP = 11, + IWL_RATE_MIMO2_36M_PLCP = 12, + IWL_RATE_MIMO2_48M_PLCP = 13, + IWL_RATE_MIMO2_54M_PLCP = 14, + IWL_RATE_MIMO2_60M_PLCP = 15, + IWL_RATE_MIMO3_6M_PLCP = 16, + IWL_RATE_MIMO3_12M_PLCP = 17, + IWL_RATE_MIMO3_18M_PLCP = 18, + IWL_RATE_MIMO3_24M_PLCP = 19, + IWL_RATE_MIMO3_36M_PLCP = 20, + IWL_RATE_MIMO3_48M_PLCP = 21, + IWL_RATE_MIMO3_54M_PLCP = 22, + IWL_RATE_MIMO3_60M_PLCP = 23, + IWL_RATE_SISO_INVM_PLCP = 24, + IWL_RATE_MIMO2_INVM_PLCP = 24, + IWL_RATE_MIMO3_INVM_PLCP = 24, +}; + +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +enum iwl_table_type___2 { + LQ_NONE___2 = 0, + LQ_G = 1, + LQ_A = 2, + LQ_SISO = 3, + LQ_MIMO2 = 4, + LQ_MIMO3 = 5, + LQ_MAX___2 = 6, +}; + +struct iwl_rate_scale_data___2 { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + long unsigned int stamp; + long: 32; +}; + +struct iwl_scale_tbl_info___2 { + enum iwl_table_type___2 lq_type; + u8 ant_type; + u8 is_SGI; + u8 is_ht40; + u8 is_dup; + u8 action; + u8 max_search; + const u16 *expected_tpt; + u32 current_rate; + long: 32; + struct iwl_rate_scale_data___2 win[13]; +}; + +struct iwl_traffic_load { + long unsigned int time_stamp; + u32 packet_count[20]; + u32 total; + u8 queue_count; + u8 head; +}; + +struct iwl_lq_sta___2 { + u8 active_tbl; + u8 enable_counter; + u8 stay_in_tbl; + u8 search_better_tbl; + s32 last_tpt; + u32 table_count_limit; + u32 max_failure_limit; + u32 max_success_limit; + u32 table_count; + u32 total_failed; + u32 total_success; + u64 flush_timer; + u8 action_counter; + u8 is_green; + u8 is_dup; + long: 0; + enum nl80211_band band; + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + u16 active_mimo3_rate; + s8 max_rate_idx; + u8 missed_rate_counter; + struct iwl_link_quality_cmd lq; + long: 0; + struct iwl_scale_tbl_info___2 lq_info[2]; + struct iwl_traffic_load load[8]; + u8 tx_agg_tid_en; + long: 0; + struct iwl_priv *drv; + int last_txrate_idx; + u32 last_rate_n_flags; + u8 is_agg; + u8 last_bt_traffic; + long: 32; +} __attribute__((packed)); + +struct iwl_station_priv { + struct iwl_rxon_context *ctx; + long: 32; + struct iwl_lq_sta___2 lq_sta; + atomic_t pending_frames; + bool client; + bool asleep; + u8 max_agg_bufsize; + u8 sta_id; +}; + +enum ieee80211_vht_chanwidth { + IEEE80211_VHT_CHANWIDTH_USE_HT = 0, + IEEE80211_VHT_CHANWIDTH_80MHZ = 1, + IEEE80211_VHT_CHANWIDTH_160MHZ = 2, + IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3, +}; + +struct iwl_rate_mcs_info { + char mbps[12]; + char mcs[12]; +}; + +enum { + IWL_RATE_6M_PLCP___2 = 13, + IWL_RATE_9M_PLCP___2 = 15, + IWL_RATE_12M_PLCP___2 = 5, + IWL_RATE_18M_PLCP___2 = 7, + IWL_RATE_24M_PLCP___2 = 9, + IWL_RATE_36M_PLCP___2 = 11, + IWL_RATE_48M_PLCP___2 = 1, + IWL_RATE_54M_PLCP___2 = 3, + IWL_RATE_1M_PLCP___2 = 10, + IWL_RATE_2M_PLCP___2 = 20, + IWL_RATE_5M_PLCP___2 = 55, + IWL_RATE_11M_PLCP___2 = 110, + IWL_RATE_INVM_PLCP = -1, +}; + +struct iwl_rs_rate_info { + u8 plcp; + u8 plcp_ht_siso; + u8 plcp_ht_mimo2; + u8 plcp_vht_siso; + u8 plcp_vht_mimo2; + u8 prev_rs; + u8 next_rs; +}; + +enum { + IWL_RATE_HT_SISO_MCS_0_PLCP = 0, + IWL_RATE_HT_SISO_MCS_1_PLCP = 1, + IWL_RATE_HT_SISO_MCS_2_PLCP = 2, + IWL_RATE_HT_SISO_MCS_3_PLCP = 3, + IWL_RATE_HT_SISO_MCS_4_PLCP = 4, + IWL_RATE_HT_SISO_MCS_5_PLCP = 5, + IWL_RATE_HT_SISO_MCS_6_PLCP = 6, + IWL_RATE_HT_SISO_MCS_7_PLCP = 7, + IWL_RATE_HT_MIMO2_MCS_0_PLCP = 8, + IWL_RATE_HT_MIMO2_MCS_1_PLCP = 9, + IWL_RATE_HT_MIMO2_MCS_2_PLCP = 10, + IWL_RATE_HT_MIMO2_MCS_3_PLCP = 11, + IWL_RATE_HT_MIMO2_MCS_4_PLCP = 12, + IWL_RATE_HT_MIMO2_MCS_5_PLCP = 13, + IWL_RATE_HT_MIMO2_MCS_6_PLCP = 14, + IWL_RATE_HT_MIMO2_MCS_7_PLCP = 15, + IWL_RATE_VHT_SISO_MCS_0_PLCP = 0, + IWL_RATE_VHT_SISO_MCS_1_PLCP = 1, + IWL_RATE_VHT_SISO_MCS_2_PLCP = 2, + IWL_RATE_VHT_SISO_MCS_3_PLCP = 3, + IWL_RATE_VHT_SISO_MCS_4_PLCP = 4, + IWL_RATE_VHT_SISO_MCS_5_PLCP = 5, + IWL_RATE_VHT_SISO_MCS_6_PLCP = 6, + IWL_RATE_VHT_SISO_MCS_7_PLCP = 7, + IWL_RATE_VHT_SISO_MCS_8_PLCP = 8, + IWL_RATE_VHT_SISO_MCS_9_PLCP = 9, + IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 16, + IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 17, + IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 18, + IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 19, + IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 20, + IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 21, + IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 22, + IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 23, + IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 24, + IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 25, + IWL_RATE_HT_SISO_MCS_INV_PLCP = 26, + IWL_RATE_HT_MIMO2_MCS_INV_PLCP = 26, + IWL_RATE_VHT_SISO_MCS_INV_PLCP = 26, + IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = 26, + IWL_RATE_HT_SISO_MCS_8_PLCP = 26, + IWL_RATE_HT_SISO_MCS_9_PLCP = 26, + IWL_RATE_HT_MIMO2_MCS_8_PLCP = 26, + IWL_RATE_HT_MIMO2_MCS_9_PLCP = 26, +}; + +enum { + RS_STATE_SEARCH_CYCLE_STARTED = 0, + RS_STATE_SEARCH_CYCLE_ENDED = 1, + RS_STATE_STAY_IN_COLUMN = 2, +}; + +enum rs_action { + RS_ACTION_STAY = 0, + RS_ACTION_DOWNSCALE = -1, + RS_ACTION_UPSCALE = 1, +}; + +enum rs_column_mode { + RS_INVALID = 0, + RS_LEGACY = 1, + RS_SISO = 2, + RS_MIMO2 = 3, +}; + +struct rs_tx_column; + +typedef bool (*allow_column_func_t)(struct iwl_mvm *, struct ieee80211_sta *, struct rs_rate *, const struct rs_tx_column *); + +struct rs_tx_column { + enum rs_column_mode mode; + u8 ant; + bool sgi; + enum rs_column next_columns[7]; + allow_column_func_t checks[3]; +}; + +enum tpc_action { + TPC_ACTION_STAY = 0, + TPC_ACTION_DECREASE = 1, + TPC_ACTION_INCREASE = 2, + TPC_ACTION_NO_RESTIRCTION = 3, +}; + +struct rs_bfer_active_iter_data { + struct ieee80211_sta *exclude_sta; + struct iwl_mvm_sta *bfer_mvmsta; +}; + +struct rtw_hw_reg_desc { + u32 addr; + u32 mask; + const char *desc; +}; + +struct rtw_chan_info { + int pri_ch_idx; + int action_id; + int bw; + u8 extra_info; + u8 channel; + u16 timeout; +}; + +struct rtw_chan_list { + u32 buf_size; + u32 ch_num; + u32 size; + u16 addr; +}; + +enum rtw_c2h_cmd_id { + C2H_CCX_TX_RPT = 3, + C2H_BT_INFO = 9, + C2H_BT_MP_INFO = 11, + C2H_BT_HID_INFO = 69, + C2H_RA_RPT = 12, + C2H_HW_FEATURE_REPORT = 25, + C2H_WLAN_INFO = 39, + C2H_WLAN_RFON = 50, + C2H_BCN_FILTER_NOTIFY = 54, + C2H_ADAPTIVITY = 55, + C2H_SCAN_RESULT = 56, + C2H_HW_FEATURE_DUMP = 253, + C2H_HALMAC = 255, +}; + +enum rtw_c2h_cmd_id_ext { + C2H_SCAN_STATUS_RPT = 3, + C2H_CCX_RPT = 15, + C2H_CHAN_SWITCH = 34, +}; + +struct rtw_c2h_adaptivity { + u8 density; + u8 igi; + u8 l2h_th_init; + u8 l2h; + u8 h2l; + u8 option; +}; + +struct rtw_c2h_ra_rpt { + u8 rate_sgi; + u8 mac_id; + u8 byte2; + u8 status; + u8 byte4; + u8 ra_ratio; + u8 bw; +}; + +struct rtw_h2c_register { + u32 w0; + u32 w1; +}; + +struct rtw_h2c_cmd { + __le32 msg; + __le32 msg_ext; +}; + +enum rtw_rsvd_packet_type { + RSVD_BEACON = 0, + RSVD_DUMMY = 1, + RSVD_PS_POLL = 2, + RSVD_PROBE_RESP = 3, + RSVD_NULL = 4, + RSVD_QOS_NULL = 5, + RSVD_LPS_PG_DPK = 6, + RSVD_LPS_PG_INFO = 7, + RSVD_PROBE_REQ = 8, + RSVD_NLO_INFO = 9, + RSVD_CH_INFO = 10, +}; + +enum rtw_fw_rf_type { + FW_RF_1T2R = 0, + FW_RF_2T4R = 1, + FW_RF_2T2R = 2, + FW_RF_2T3R = 3, + FW_RF_1T1R = 4, + FW_RF_2T2R_GREEN = 5, + FW_RF_3T3R = 6, + FW_RF_3T4R = 7, + FW_RF_4T4R = 8, + FW_RF_MAX_TYPE = 15, +}; + +enum rtw_fw_feature_ext { + FW_FEATURE_EXT_OLD_PAGE_NUM = 1, +}; + +enum rtw_beacon_filter_offload_mode { + BCN_FILTER_OFFLOAD_MODE_0 = 0, + BCN_FILTER_OFFLOAD_MODE_1 = 1, + BCN_FILTER_OFFLOAD_MODE_2 = 2, + BCN_FILTER_OFFLOAD_MODE_3 = 3, + BCN_FILTER_OFFLOAD_MODE_DEFAULT = 0, +}; + +struct rtw_iqk_para { + u8 clear; + u8 segment_iqk; +}; + +struct rtw_lps_pg_dpk_hdr { + u16 dpk_path_ok; + u8 dpk_txagc[2]; + u16 dpk_gs[2]; + u32 coef[40]; + u8 dpk_ch; +} __attribute__((packed)); + +struct rtw_lps_pg_info_hdr { + u8 macid; + u8 mbssid; + u8 pattern_count; + u8 mu_tab_group_id; + u8 sec_cam_count; + u8 tx_bu_page_count; + u16 rsvd; + u8 sec_cam[8]; +}; + +struct rtw_rsvd_page { + struct list_head vif_list; + struct rtw_vif *rtwvif; + struct list_head build_list; + struct sk_buff *skb; + enum rtw_rsvd_packet_type type; + u8 page; + u16 tim_offset; + bool add_txdesc; + struct cfg80211_ssid *ssid; + u16 probe_req_size; +}; + +enum rtw_keep_alive_pkt_type { + KEEP_ALIVE_NULL_PKT = 0, + KEEP_ALIVE_ARP_RSP = 1, +}; + +struct rtw_nlo_info_hdr { + u8 nlo_count; + u8 hidden_ap_count; + u8 rsvd1[2]; + u8 pattern_check[4]; + u8 rsvd2[8]; + u8 ssid_len[16]; + u8 chiper[16]; + u8 rsvd3[16]; + u8 location[8]; +}; + +enum rtw_packet_type { + RTW_PACKET_PROBE_REQ = 0, + RTW_PACKET_UNDEFINE = 2147483647, +}; + +struct rtw_fw_wow_keep_alive_para { + bool adopt; + u8 pkt_type; + u8 period; +}; + +struct rtw_fw_wow_disconnect_para { + bool adopt; + u8 period; + u8 retry_count; +}; + +enum rtw_channel_type { + RTW_CHANNEL_PASSIVE = 0, + RTW_CHANNEL_ACTIVE = 1, + RTW_CHANNEL_RADAR = 2, +}; + +enum rtw_scan_extra_id { + RTW_SCAN_EXTRA_ID_DFS = 0, +}; + +enum rtw_scan_extra_info { + RTW_SCAN_EXTRA_ACTION_SCAN = 0, +}; + +enum rtw_scan_report_code { + RTW_SCAN_REPORT_SUCCESS = 0, + RTW_SCAN_REPORT_ERR_PHYDM = 1, + RTW_SCAN_REPORT_ERR_ID = 2, + RTW_SCAN_REPORT_ERR_TX = 3, + RTW_SCAN_REPORT_CANCELED = 16, + RTW_SCAN_REPORT_CANCELED_EXT = 17, + RTW_SCAN_REPORT_FW_DISABLED = 240, +}; + +enum rtw_scan_notify_id { + RTW_SCAN_NOTIFY_ID_PRESWITCH = 0, + RTW_SCAN_NOTIFY_ID_POSTSWITCH = 1, + RTW_SCAN_NOTIFY_ID_PROBE_PRETX = 2, + RTW_SCAN_NOTIFY_ID_PROBE_ISSUETX = 3, + RTW_SCAN_NOTIFY_ID_NULL0_PRETX = 4, + RTW_SCAN_NOTIFY_ID_NULL0_ISSUETX = 5, + RTW_SCAN_NOTIFY_ID_NULL0_POSTTX = 6, + RTW_SCAN_NOTIFY_ID_NULL1_PRETX = 7, + RTW_SCAN_NOTIFY_ID_NULL1_ISSUETX = 8, + RTW_SCAN_NOTIFY_ID_NULL1_POSTTX = 9, + RTW_SCAN_NOTIFY_ID_DWELLEXT = 10, +}; + +struct rtw_ch_switch_option { + u8 periodic_option; + u32 tsf_high; + u32 tsf_low; + u8 dest_ch_en; + u8 absolute_time_en; + u8 dest_ch; + u8 normal_period; + u8 normal_period_sel; + u8 normal_cycle; + u8 slow_period; + u8 slow_period_sel; + u8 nlo_en; + bool switch_en; + bool back_op_en; +}; + +struct rtw_fw_iter_ra_data { + struct rtw_dev *rtwdev; + u8 *payload; + u8 length; +}; + +struct rtw_beacon_filter_iter_data { + struct rtw_dev *rtwdev; + u8 *payload; +}; + +struct bulk_cb_wrap { + __le32 Signature; + __u32 Tag; + __le32 DataTransferLength; + __u8 Flags; + __u8 Lun; + __u8 Length; + __u8 CDB[16]; +}; + +struct bulk_cs_wrap { + __le32 Signature; + __u32 Tag; + __le32 Residue; + __u8 Status; +}; + +struct i2c_board_info { + char type[20]; + short unsigned int flags; + short unsigned int addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct software_node *swnode; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +struct i2c_smbus_alert_setup { + int irq; +}; + +struct trace_event_raw_smbus_write { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_read { + struct trace_entry ent; + int adapter_nr; + __u16 flags; + __u16 addr; + __u8 command; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_reply { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_result { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 read_write; + __u8 command; + __s16 res; + __u32 protocol; + char __data[0]; +}; + +struct trace_event_data_offsets_smbus_write {}; + +struct trace_event_data_offsets_smbus_read {}; + +struct trace_event_data_offsets_smbus_reply {}; + +struct trace_event_data_offsets_smbus_result {}; + +typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *); + +typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int); + +typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *, int); + +typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, int); + +struct md_cluster_operations { + int (*join)(struct mddev *, int); + int (*leave)(struct mddev *); + int (*slot_number)(struct mddev *); + int (*resync_info_update)(struct mddev *, sector_t, sector_t); + int (*resync_start_notify)(struct mddev *); + int (*resync_status_get)(struct mddev *); + void (*resync_info_get)(struct mddev *, sector_t *, sector_t *); + int (*metadata_update_start)(struct mddev *); + int (*metadata_update_finish)(struct mddev *); + void (*metadata_update_cancel)(struct mddev *); + int (*resync_start)(struct mddev *); + int (*resync_finish)(struct mddev *); + int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); + int (*add_new_disk)(struct mddev *, struct md_rdev *); + void (*add_new_disk_cancel)(struct mddev *); + int (*new_disk_ack)(struct mddev *, bool); + int (*remove_disk)(struct mddev *, struct md_rdev *); + void (*load_bitmaps)(struct mddev *, int); + int (*gather_bitmaps)(struct md_rdev *); + int (*resize_bitmaps)(struct mddev *, sector_t, sector_t); + int (*lock_all_bitmaps)(struct mddev *); + void (*unlock_all_bitmaps)(struct mddev *); + void (*update_size)(struct mddev *, sector_t); +}; + +typedef __u16 bitmap_counter_t; + +enum bitmap_state { + BITMAP_STALE = 1, + BITMAP_WRITE_ERROR = 2, + BITMAP_HOSTENDIAN = 15, +}; + +struct bitmap_super_s { + __le32 magic; + __le32 version; + __u8 uuid[16]; + __le64 events; + __le64 events_cleared; + __le64 sync_size; + __le32 state; + __le32 chunksize; + __le32 daemon_sleep; + __le32 write_behind; + __le32 sectors_reserved; + __le32 nodes; + __u8 cluster_name[64]; + __u8 pad[120]; +}; + +typedef struct bitmap_super_s bitmap_super_t; + +struct bitmap_page { + char *map; + unsigned int hijacked: 1; + unsigned int pending: 1; + unsigned int count: 30; +}; + +struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + long unsigned int pages; + long unsigned int missing_pages; + long unsigned int chunkshift; + long unsigned int chunks; +}; + +struct bitmap_storage { + struct file *file; + struct page *sb_page; + long unsigned int sb_index; + struct page **filemap; + long unsigned int *filemap_attr; + long unsigned int file_pages; + long unsigned int bytes; +}; + +struct bitmap { + struct bitmap_counts counts; + struct mddev *mddev; + long: 32; + __u64 events_cleared; + int need_sync; + struct bitmap_storage storage; + long unsigned int flags; + int allclean; + atomic_t behind_writes; + long unsigned int behind_writes_used; + long unsigned int daemon_lastrun; + long unsigned int last_end_sync; + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + struct kernfs_node *sysfs_can_clear; + int cluster_slot; +}; + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, + BITMAP_PAGE_PENDING = 1, + BITMAP_PAGE_NEEDWRITE = 2, +}; + +struct bitmap_unplug_work { + struct work_struct work; + struct bitmap *bitmap; + struct completion *done; +}; + +struct of_phandle_iterator { + const char *cells_name; + int cell_count; + const struct device_node *parent; + const __be32 *list_end; + const __be32 *phandle_end; + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + +struct alias_prop { + struct list_head link; + const char *alias; + struct device_node *np; + int id; + char stem[0]; +}; + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + spinlock_t lock; +}; + +typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); + +struct sock_map_seq_info { + struct bpf_map *map; + struct sock *sk; + u32 index; +}; + +struct bpf_iter__sockmap { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + struct sock *sk; + }; +}; + +struct bpf_shtab_elem { + struct callback_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct bpf_shtab_bucket { + struct hlist_head head; + spinlock_t lock; +}; + +struct bpf_shtab { + struct bpf_map map; + struct bpf_shtab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; +}; + +typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); + +struct sock_hash_seq_info { + struct bpf_map *map; + struct bpf_shtab *htab; + u32 bucket_id; +}; + +struct sockmap_link { + struct bpf_link link; + struct bpf_map *map; + enum bpf_attach_type attach_type; +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[2]; + u32 wanted[2]; + u32 active[2]; + u32 nochange[2]; + u32 all[2]; +}; + +struct nf_defrag_hook { + struct module *owner; + int (*enable)(struct net *); + void (*disable)(struct net *); +}; + +struct bpf_nf_ctx { + const struct nf_hook_state *state; + struct sk_buff *skb; +}; + +struct bpf_nf_link { + struct bpf_link link; + struct nf_hook_ops hook_ops; + netns_tracker ns_tracker; + struct net *net; + u32 dead; + const struct nf_defrag_hook *defrag_hook; + long: 32; +}; + +enum nft_cmp_ops { + NFT_CMP_EQ = 0, + NFT_CMP_NEQ = 1, + NFT_CMP_LT = 2, + NFT_CMP_LTE = 3, + NFT_CMP_GT = 4, + NFT_CMP_GTE = 5, +}; + +enum nft_cmp_attributes { + NFTA_CMP_UNSPEC = 0, + NFTA_CMP_SREG = 1, + NFTA_CMP_OP = 2, + NFTA_CMP_DATA = 3, + __NFTA_CMP_MAX = 4, +}; + +struct nft_cmp_fast_expr { + u32 data; + u32 mask; + u8 sreg; + u8 len; + bool inv; +}; + +struct nft_cmp16_fast_expr { + struct nft_data data; + struct nft_data mask; + u8 sreg; + u8 len; + bool inv; + long: 32; +}; + +enum nft_offload_reg_flags { + NFT_OFFLOAD_F_NETWORK2HOST = 1, +}; + +struct nft_cmp_expr { + struct nft_data data; + u8 sreg; + u8 len; + enum nft_cmp_ops op: 8; + long: 32; +}; + +union nft_cmp_offload_data { + u16 val16; + u32 val32; + u64 val64; +}; + +struct xt_cgroup_info_v0 { + __u32 id; + __u32 invert; +}; + +struct xt_cgroup_info_v1 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + char path[4096]; + __u32 classid; + void *priv; + long: 32; +}; + +struct xt_cgroup_info_v2 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + union { + char path[512]; + __u32 classid; + }; + long: 32; + void *priv; + long: 32; +}; + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + +typedef struct sock * (*udp_lookup_t)(const struct sk_buff *, __be16, __be16); + +typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); + +typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); + +struct iptable_nat_pernet { + struct nf_hook_ops *nf_nat_ops; +}; + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; +}; + +struct ip6fl_iter_state { + struct seq_net_private p; + struct pid_namespace *pid_ns; + int bucket; +}; + +struct hop_jumbo_hdr { + u8 nexthdr; + u8 hdrlen; + u8 tlv_type; + u8 tlv_len; + __be32 jumbo_payload_len; +}; + +struct cfg80211_per_bw_puncturing_values { + u8 len; + const u16 *valid_values; +}; + +struct ieee80211_hdr_3addr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; +}; + +enum ieee80211_ht_chanwidth_values { + IEEE80211_HT_CHANWIDTH_20MHZ = 0, + IEEE80211_HT_CHANWIDTH_ANY = 1, +}; + +struct ieee80211_mmie { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[8]; +}; + +struct ieee80211_mmie_16 { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[16]; +}; + +struct ieee80211_tdls_data { + u8 da[6]; + u8 sa[6]; + __be16 ether_type; + u8 payload_type; + u8 category; + u8 action_code; + union { + struct { + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __attribute__((packed)) setup_req; + struct { + __le16 status_code; + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __attribute__((packed)) setup_resp; + struct { + __le16 status_code; + u8 dialog_token; + u8 variable[0]; + } __attribute__((packed)) setup_cfm; + struct { + __le16 reason_code; + u8 variable[0]; + } teardown; + struct { + u8 dialog_token; + u8 variable[0]; + } discover_req; + struct { + u8 target_channel; + u8 oper_class; + u8 variable[0]; + } chan_switch_req; + struct { + __le16 status_code; + u8 variable[0]; + } chan_switch_resp; + } u; +}; + +enum ieee80211_spectrum_mgmt_actioncode { + WLAN_ACTION_SPCT_MSR_REQ = 0, + WLAN_ACTION_SPCT_MSR_RPRT = 1, + WLAN_ACTION_SPCT_TPC_REQ = 2, + WLAN_ACTION_SPCT_TPC_RPRT = 3, + WLAN_ACTION_SPCT_CHL_SWITCH = 4, +}; + +enum ieee80211_self_protected_actioncode { + WLAN_SP_RESERVED = 0, + WLAN_SP_MESH_PEERING_OPEN = 1, + WLAN_SP_MESH_PEERING_CONFIRM = 2, + WLAN_SP_MESH_PEERING_CLOSE = 3, + WLAN_SP_MGK_INFORM = 4, + WLAN_SP_MGK_ACK = 5, +}; + +enum ieee80211_unprotected_wnm_actioncode { + WLAN_UNPROTECTED_WNM_ACTION_TIM = 0, + WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1, +}; + +enum ieee80211_pub_actioncode { + WLAN_PUB_ACTION_20_40_BSS_COEX = 0, + WLAN_PUB_ACTION_DSE_ENABLEMENT = 1, + WLAN_PUB_ACTION_DSE_DEENABLEMENT = 2, + WLAN_PUB_ACTION_DSE_REG_LOC_ANN = 3, + WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, + WLAN_PUB_ACTION_DSE_MSMT_REQ = 5, + WLAN_PUB_ACTION_DSE_MSMT_RESP = 6, + WLAN_PUB_ACTION_MSMT_PILOT = 7, + WLAN_PUB_ACTION_DSE_PC = 8, + WLAN_PUB_ACTION_VENDOR_SPECIFIC = 9, + WLAN_PUB_ACTION_GAS_INITIAL_REQ = 10, + WLAN_PUB_ACTION_GAS_INITIAL_RESP = 11, + WLAN_PUB_ACTION_GAS_COMEBACK_REQ = 12, + WLAN_PUB_ACTION_GAS_COMEBACK_RESP = 13, + WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14, + WLAN_PUB_ACTION_LOC_TRACK_NOTI = 15, + WLAN_PUB_ACTION_QAB_REQUEST_FRAME = 16, + WLAN_PUB_ACTION_QAB_RESPONSE_FRAME = 17, + WLAN_PUB_ACTION_QMF_POLICY = 18, + WLAN_PUB_ACTION_QMF_POLICY_CHANGE = 19, + WLAN_PUB_ACTION_QLOAD_REQUEST = 20, + WLAN_PUB_ACTION_QLOAD_REPORT = 21, + WLAN_PUB_ACTION_HCCA_TXOP_ADVERT = 22, + WLAN_PUB_ACTION_HCCA_TXOP_RESPONSE = 23, + WLAN_PUB_ACTION_PUBLIC_KEY = 24, + WLAN_PUB_ACTION_CHANNEL_AVAIL_QUERY = 25, + WLAN_PUB_ACTION_CHANNEL_SCHEDULE_MGMT = 26, + WLAN_PUB_ACTION_CONTACT_VERI_SIGNAL = 27, + WLAN_PUB_ACTION_GDD_ENABLEMENT_REQ = 28, + WLAN_PUB_ACTION_GDD_ENABLEMENT_RESP = 29, + WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30, + WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31, + WLAN_PUB_ACTION_FTM_REQUEST = 32, + WLAN_PUB_ACTION_FTM_RESPONSE = 33, + WLAN_PUB_ACTION_FILS_DISCOVERY = 34, +}; + +enum ieee80211_tdls_actioncode { + WLAN_TDLS_SETUP_REQUEST = 0, + WLAN_TDLS_SETUP_RESPONSE = 1, + WLAN_TDLS_SETUP_CONFIRM = 2, + WLAN_TDLS_TEARDOWN = 3, + WLAN_TDLS_PEER_TRAFFIC_INDICATION = 4, + WLAN_TDLS_CHANNEL_SWITCH_REQUEST = 5, + WLAN_TDLS_CHANNEL_SWITCH_RESPONSE = 6, + WLAN_TDLS_PEER_PSM_REQUEST = 7, + WLAN_TDLS_PEER_PSM_RESPONSE = 8, + WLAN_TDLS_PEER_TRAFFIC_RESPONSE = 9, + WLAN_TDLS_DISCOVERY_REQUEST = 10, +}; + +enum ieee80211_sa_query_action { + WLAN_ACTION_SA_QUERY_REQUEST = 0, + WLAN_ACTION_SA_QUERY_RESPONSE = 1, +}; + +enum nl80211_he_ru_alloc { + NL80211_RATE_INFO_HE_RU_ALLOC_26 = 0, + NL80211_RATE_INFO_HE_RU_ALLOC_52 = 1, + NL80211_RATE_INFO_HE_RU_ALLOC_106 = 2, + NL80211_RATE_INFO_HE_RU_ALLOC_242 = 3, + NL80211_RATE_INFO_HE_RU_ALLOC_484 = 4, + NL80211_RATE_INFO_HE_RU_ALLOC_996 = 5, + NL80211_RATE_INFO_HE_RU_ALLOC_2x996 = 6, +}; + +enum nl80211_eht_gi { + NL80211_RATE_INFO_EHT_GI_0_8 = 0, + NL80211_RATE_INFO_EHT_GI_1_6 = 1, + NL80211_RATE_INFO_EHT_GI_3_2 = 2, +}; + +enum ieee80211_radiotap_presence { + IEEE80211_RADIOTAP_TSFT = 0, + IEEE80211_RADIOTAP_FLAGS = 1, + IEEE80211_RADIOTAP_RATE = 2, + IEEE80211_RADIOTAP_CHANNEL = 3, + IEEE80211_RADIOTAP_FHSS = 4, + IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5, + IEEE80211_RADIOTAP_DBM_ANTNOISE = 6, + IEEE80211_RADIOTAP_LOCK_QUALITY = 7, + IEEE80211_RADIOTAP_TX_ATTENUATION = 8, + IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9, + IEEE80211_RADIOTAP_DBM_TX_POWER = 10, + IEEE80211_RADIOTAP_ANTENNA = 11, + IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12, + IEEE80211_RADIOTAP_DB_ANTNOISE = 13, + IEEE80211_RADIOTAP_RX_FLAGS = 14, + IEEE80211_RADIOTAP_TX_FLAGS = 15, + IEEE80211_RADIOTAP_RTS_RETRIES = 16, + IEEE80211_RADIOTAP_DATA_RETRIES = 17, + IEEE80211_RADIOTAP_MCS = 19, + IEEE80211_RADIOTAP_AMPDU_STATUS = 20, + IEEE80211_RADIOTAP_VHT = 21, + IEEE80211_RADIOTAP_TIMESTAMP = 22, + IEEE80211_RADIOTAP_HE = 23, + IEEE80211_RADIOTAP_HE_MU = 24, + IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26, + IEEE80211_RADIOTAP_LSIG = 27, + IEEE80211_RADIOTAP_TLV = 28, + IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30, + IEEE80211_RADIOTAP_EXT = 31, + IEEE80211_RADIOTAP_EHT_USIG = 33, + IEEE80211_RADIOTAP_EHT = 34, +}; + +enum ieee80211_radiotap_flags { + IEEE80211_RADIOTAP_F_CFP = 1, + IEEE80211_RADIOTAP_F_SHORTPRE = 2, + IEEE80211_RADIOTAP_F_WEP = 4, + IEEE80211_RADIOTAP_F_FRAG = 8, + IEEE80211_RADIOTAP_F_FCS = 16, + IEEE80211_RADIOTAP_F_DATAPAD = 32, + IEEE80211_RADIOTAP_F_BADFCS = 64, +}; + +enum ieee80211_radiotap_channel_flags { + IEEE80211_CHAN_CCK = 32, + IEEE80211_CHAN_OFDM = 64, + IEEE80211_CHAN_2GHZ = 128, + IEEE80211_CHAN_5GHZ = 256, + IEEE80211_CHAN_DYN = 1024, + IEEE80211_CHAN_HALF = 16384, + IEEE80211_CHAN_QUARTER = 32768, +}; + +enum ieee80211_radiotap_rx_flags { + IEEE80211_RADIOTAP_F_RX_BADPLCP = 2, +}; + +enum ieee80211_radiotap_mcs_have { + IEEE80211_RADIOTAP_MCS_HAVE_BW = 1, + IEEE80211_RADIOTAP_MCS_HAVE_MCS = 2, + IEEE80211_RADIOTAP_MCS_HAVE_GI = 4, + IEEE80211_RADIOTAP_MCS_HAVE_FMT = 8, + IEEE80211_RADIOTAP_MCS_HAVE_FEC = 16, + IEEE80211_RADIOTAP_MCS_HAVE_STBC = 32, +}; + +enum ieee80211_radiotap_mcs_flags { + IEEE80211_RADIOTAP_MCS_BW_MASK = 3, + IEEE80211_RADIOTAP_MCS_BW_20 = 0, + IEEE80211_RADIOTAP_MCS_BW_40 = 1, + IEEE80211_RADIOTAP_MCS_BW_20L = 2, + IEEE80211_RADIOTAP_MCS_BW_20U = 3, + IEEE80211_RADIOTAP_MCS_SGI = 4, + IEEE80211_RADIOTAP_MCS_FMT_GF = 8, + IEEE80211_RADIOTAP_MCS_FEC_LDPC = 16, + IEEE80211_RADIOTAP_MCS_STBC_MASK = 96, + IEEE80211_RADIOTAP_MCS_STBC_1 = 1, + IEEE80211_RADIOTAP_MCS_STBC_2 = 2, + IEEE80211_RADIOTAP_MCS_STBC_3 = 3, + IEEE80211_RADIOTAP_MCS_STBC_SHIFT = 5, +}; + +enum ieee80211_radiotap_ampdu_flags { + IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN = 1, + IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN = 2, + IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN = 4, + IEEE80211_RADIOTAP_AMPDU_IS_LAST = 8, + IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR = 16, + IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN = 32, + IEEE80211_RADIOTAP_AMPDU_EOF = 64, + IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN = 128, +}; + +enum ieee80211_radiotap_vht_flags { + IEEE80211_RADIOTAP_VHT_FLAG_STBC = 1, + IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA = 2, + IEEE80211_RADIOTAP_VHT_FLAG_SGI = 4, + IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 = 8, + IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM = 16, + IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED = 32, +}; + +enum ieee80211_radiotap_vht_coding { + IEEE80211_RADIOTAP_CODING_LDPC_USER0 = 1, + IEEE80211_RADIOTAP_CODING_LDPC_USER1 = 2, + IEEE80211_RADIOTAP_CODING_LDPC_USER2 = 4, + IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 8, +}; + +enum ieee80211_radiotap_timestamp_flags { + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT = 0, + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT = 1, + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 2, +}; + +struct ieee80211_radiotap_he { + __le16 data1; + __le16 data2; + __le16 data3; + __le16 data4; + __le16 data5; + __le16 data6; +}; + +enum ieee80211_radiotap_he_bits { + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3, + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 4, + IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 8, + IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 16, + IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 32, + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 64, + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 128, + IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 256, + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 512, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 1024, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 2048, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 4096, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 8192, + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 16384, + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 32768, + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 1, + IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 2, + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 4, + IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 8, + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 16, + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 32, + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 64, + IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 128, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 16128, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 16384, + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 32768, + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 63, + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 64, + IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 128, + IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 3840, + IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 4096, + IEEE80211_RADIOTAP_HE_DATA3_CODING = 8192, + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 16384, + IEEE80211_RADIOTAP_HE_DATA3_STBC = 32768, + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 15, + IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 32752, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 15, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 240, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 3840, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 61440, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 15, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10, + IEEE80211_RADIOTAP_HE_DATA5_GI = 48, + IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0, + IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1, + IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 192, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 1792, + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 12288, + IEEE80211_RADIOTAP_HE_DATA5_TXBF = 16384, + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 32768, + IEEE80211_RADIOTAP_HE_DATA6_NSTS = 15, + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 16, + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN = 32, + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW = 192, + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_20MHZ = 0, + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_40MHZ = 1, + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_80MHZ = 2, + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_160MHZ = 3, + IEEE80211_RADIOTAP_HE_DATA6_TXOP = 32512, + IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 32768, +}; + +struct ieee80211_radiotap_he_mu { + __le16 flags1; + __le16 flags2; + u8 ru_ch1[4]; + u8 ru_ch2[4]; +}; + +struct ieee80211_radiotap_lsig { + __le16 data1; + __le16 data2; +}; + +enum mac80211_rx_flags { + RX_FLAG_MMIC_ERROR = 1, + RX_FLAG_DECRYPTED = 2, + RX_FLAG_ONLY_MONITOR = 4, + RX_FLAG_MMIC_STRIPPED = 8, + RX_FLAG_IV_STRIPPED = 16, + RX_FLAG_FAILED_FCS_CRC = 32, + RX_FLAG_FAILED_PLCP_CRC = 64, + RX_FLAG_MACTIME_IS_RTAP_TS64 = 128, + RX_FLAG_NO_SIGNAL_VAL = 256, + RX_FLAG_AMPDU_DETAILS = 512, + RX_FLAG_PN_VALIDATED = 1024, + RX_FLAG_DUP_VALIDATED = 2048, + RX_FLAG_AMPDU_LAST_KNOWN = 4096, + RX_FLAG_AMPDU_IS_LAST = 8192, + RX_FLAG_AMPDU_DELIM_CRC_ERROR = 16384, + RX_FLAG_MACTIME = 196608, + RX_FLAG_MACTIME_PLCP_START = 65536, + RX_FLAG_MACTIME_START = 131072, + RX_FLAG_MACTIME_END = 196608, + RX_FLAG_SKIP_MONITOR = 262144, + RX_FLAG_AMSDU_MORE = 524288, + RX_FLAG_RADIOTAP_TLV_AT_END = 1048576, + RX_FLAG_MIC_STRIPPED = 2097152, + RX_FLAG_ALLOW_SAME_PN = 4194304, + RX_FLAG_ICV_STRIPPED = 8388608, + RX_FLAG_AMPDU_EOF_BIT = 16777216, + RX_FLAG_AMPDU_EOF_BIT_KNOWN = 33554432, + RX_FLAG_RADIOTAP_HE = 67108864, + RX_FLAG_RADIOTAP_HE_MU = 134217728, + RX_FLAG_RADIOTAP_LSIG = 268435456, + RX_FLAG_NO_PSDU = 536870912, + RX_FLAG_8023 = 1073741824, +}; + +enum mac80211_rx_encoding_flags { + RX_ENC_FLAG_SHORTPRE = 1, + RX_ENC_FLAG_SHORT_GI = 4, + RX_ENC_FLAG_HT_GF = 8, + RX_ENC_FLAG_STBC_MASK = 48, + RX_ENC_FLAG_LDPC = 64, + RX_ENC_FLAG_BF = 128, +}; + +enum ieee80211_rate_control_changed { + IEEE80211_RC_BW_CHANGED = 1, + IEEE80211_RC_SMPS_CHANGED = 2, + IEEE80211_RC_SUPP_RATES_CHANGED = 4, + IEEE80211_RC_NSS_CHANGED = 8, +}; + +enum sta_stats_type { + STA_STATS_RATE_TYPE_INVALID = 0, + STA_STATS_RATE_TYPE_LEGACY = 1, + STA_STATS_RATE_TYPE_HT = 2, + STA_STATS_RATE_TYPE_VHT = 3, + STA_STATS_RATE_TYPE_HE = 4, + STA_STATS_RATE_TYPE_S1G = 5, + STA_STATS_RATE_TYPE_EHT = 6, +}; + +enum mac80211_drop_reason { + RX_CONTINUE = 1, + RX_QUEUED = 0, + RX_DROP_MONITOR = 131072, + RX_DROP_M_UNEXPECTED_4ADDR_FRAME = 131073, + RX_DROP_M_BAD_BCN_KEYIDX = 131074, + RX_DROP_M_BAD_MGMT_KEYIDX = 131075, + RX_DROP_U_MIC_FAIL = 65537, + RX_DROP_U_REPLAY = 65538, + RX_DROP_U_BAD_MMIE = 65539, + RX_DROP_U_DUP = 65540, + RX_DROP_U_SPURIOUS = 65541, + RX_DROP_U_DECRYPT_FAIL = 65542, + RX_DROP_U_NO_KEY_ID = 65543, + RX_DROP_U_BAD_CIPHER = 65544, + RX_DROP_U_OOM = 65545, + RX_DROP_U_NONSEQ_PN = 65546, + RX_DROP_U_BAD_KEY_COLOR = 65547, + RX_DROP_U_BAD_4ADDR = 65548, + RX_DROP_U_BAD_AMSDU = 65549, + RX_DROP_U_BAD_AMSDU_CIPHER = 65550, + RX_DROP_U_INVALID_8023 = 65551, + RX_DROP_U_RUNT_ACTION = 65552, + RX_DROP_U_UNPROT_ACTION = 65553, + RX_DROP_U_UNPROT_DUAL = 65554, + RX_DROP_U_UNPROT_UCAST_MGMT = 65555, + RX_DROP_U_UNPROT_MCAST_MGMT = 65556, + RX_DROP_U_UNPROT_BEACON = 65557, + RX_DROP_U_UNPROT_UNICAST_PUB_ACTION = 65558, + RX_DROP_U_UNPROT_ROBUST_ACTION = 65559, + RX_DROP_U_ACTION_UNKNOWN_SRC = 65560, + RX_DROP_U_REJECTED_ACTION_RESPONSE = 65561, + RX_DROP_U_EXPECT_DEFRAG_PROT = 65562, + RX_DROP_U_WEP_DEC_FAIL = 65563, + RX_DROP_U_NO_IV = 65564, + RX_DROP_U_NO_ICV = 65565, + RX_DROP_U_AP_RX_GROUPCAST = 65566, + RX_DROP_U_SHORT_MMIC = 65567, + RX_DROP_U_MMIC_FAIL = 65568, + RX_DROP_U_SHORT_TKIP = 65569, + RX_DROP_U_TKIP_FAIL = 65570, + RX_DROP_U_SHORT_CCMP = 65571, + RX_DROP_U_SHORT_CCMP_MIC = 65572, + RX_DROP_U_SHORT_GCMP = 65573, + RX_DROP_U_SHORT_GCMP_MIC = 65574, + RX_DROP_U_SHORT_CMAC = 65575, + RX_DROP_U_SHORT_CMAC256 = 65576, + RX_DROP_U_SHORT_GMAC = 65577, + RX_DROP_U_UNEXPECTED_VLAN_4ADDR = 65578, + RX_DROP_U_UNEXPECTED_STA_4ADDR = 65579, + RX_DROP_U_UNEXPECTED_VLAN_MCAST = 65580, + RX_DROP_U_NOT_PORT_CONTROL = 65581, + RX_DROP_U_UNKNOWN_ACTION_REJECTED = 65582, +}; + +enum ieee80211_packet_rx_flags { + IEEE80211_RX_AMSDU = 8, + IEEE80211_RX_MALFORMED_ACTION_FRM = 16, + IEEE80211_RX_DEFERRED_RELEASE = 32, +}; + +enum ieee80211_rx_flags { + IEEE80211_RX_CMNTR = 1, + IEEE80211_RX_BEACON_REPORTED = 2, +}; + +struct ieee80211_rx_data { + struct list_head *list; + struct sk_buff *skb; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_link_data *link; + struct sta_info *sta; + struct link_sta_info *link_sta; + struct ieee80211_key *key; + unsigned int flags; + int seqno_idx; + int security_idx; + int link_id; + union { + struct { + u32 iv32; + u16 iv16; + } tkip; + struct { + u8 pn[6]; + } ccm_gcm; + }; +}; + +enum { + IEEE80211_RX_MSG = 1, + IEEE80211_TX_STATUS_MSG = 2, +}; + +struct taint_flag { + char c_true; + char c_false; + bool module; + const char *desc; +}; + +struct latch_tree_root { + seqcount_latch_t seq; + struct rb_root tree[2]; +}; + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source; + struct module *target; +}; + +enum kernel_load_data_id { + LOADING_UNKNOWN = 0, + LOADING_FIRMWARE = 1, + LOADING_MODULE = 2, + LOADING_KEXEC_IMAGE = 3, + LOADING_KEXEC_INITRAMFS = 4, + LOADING_POLICY = 5, + LOADING_X509_CERTIFICATE = 6, + LOADING_MAX_ID = 7, +}; + +enum mod_license { + NOT_GPL_ONLY = 0, + GPL_ONLY = 1, +}; + +struct find_symbol_arg { + const char *name; + bool gplok; + bool warn; + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +enum fail_dup_mod_reason { + FAIL_DUP_MOD_BECOMING = 0, + FAIL_DUP_MOD_LOAD = 1, +}; + +struct mod_tree_root { + struct latch_tree_root root; + long unsigned int addr_min; + long unsigned int addr_max; +}; + +struct trace_event_raw_module_load { + struct trace_entry ent; + unsigned int taints; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_free { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_refcnt { + struct trace_entry ent; + long unsigned int ip; + int refcnt; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_request { + struct trace_entry ent; + long unsigned int ip; + bool wait; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_module_load { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_refcnt { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_request { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_module_load)(void *, struct module *); + +typedef void (*btf_trace_module_free)(void *, struct module *); + +typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int); + +struct symsearch { + const struct kernel_symbol *start; + const struct kernel_symbol *stop; + const s32 *crcs; + enum mod_license license; +}; + +struct mod_initfree { + struct llist_node node; + void *init_text; + void *init_data; + void *init_rodata; +}; + +struct idempotent { + const void *cookie; + struct hlist_node entry; + struct completion complete; + int ret; +}; + +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + __u64 nr_io_wait; +}; + +enum cgroup_filetype { + CGROUP_FILE_PROCS = 0, + CGROUP_FILE_TASKS = 1, +}; + +struct cgroup_pidlist { + struct { + enum cgroup_filetype type; + struct pid_namespace *ns; + } key; + pid_t *list; + int length; + struct list_head links; + struct cgroup *owner; + struct delayed_work destroy_dwork; +}; + +enum cgroup1_param { + Opt_all = 0, + Opt_clone_children = 1, + Opt_cpuset_v2_mode = 2, + Opt_name = 3, + Opt_none = 4, + Opt_noprefix = 5, + Opt_release_agent = 6, + Opt_xattr = 7, + Opt_favordynmods___2 = 8, + Opt_nofavordynmods = 9, +}; + +typedef int (*eventfs_callback)(const char *, umode_t *, void **, const struct file_operations **); + +typedef void (*eventfs_release)(const char *, void *); + +struct eventfs_entry { + const char *name; + eventfs_callback callback; + eventfs_release release; +}; + +enum { + TRACE_PIDS = 1, + TRACE_NO_PIDS = 2, +}; + +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int); +}; + +struct ftrace_probe_ops { + void (*func)(long unsigned int, long unsigned int, struct trace_array *, struct ftrace_probe_ops *, void *); + int (*init)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *, void **); + void (*free)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *); + int (*print)(struct seq_file *, long unsigned int, struct ftrace_probe_ops *, void *); +}; + +typedef int (*ftrace_mapper_func)(void *); + +struct module_string { + struct list_head next; + struct module *module; + char *str; +}; + +enum { + FORMAT_HEADER = 1, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, +}; + +struct boot_triggers { + const char *event; + char *trigger; +}; + +struct event_probe_data { + struct trace_event_file *file; + long unsigned int count; + int ref; + bool enable; +}; + +enum ctx_state { + CT_STATE_DISABLED = -1, + CT_STATE_KERNEL = 0, + CT_STATE_IDLE = 1, + CT_STATE_USER = 2, + CT_STATE_GUEST = 3, + CT_STATE_MAX = 4, +}; + +struct context_tracking { + bool active; + int recursion; + atomic_t state; + long int nesting; + long int nmi_nesting; +}; + +struct trace_event_raw_context_tracking_user { + struct trace_entry ent; + int dummy; + char __data[0]; +}; + +struct trace_event_data_offsets_context_tracking_user {}; + +typedef void (*btf_trace_user_enter)(void *, int); + +typedef void (*btf_trace_user_exit)(void *, int); + +typedef struct folio *new_folio_t(struct folio *, long unsigned int); + +typedef void free_folio_t(struct folio *, long unsigned int); + +struct migration_target_control { + int nid; + nodemask_t *nmask; + gfp_t gfp_mask; + enum migrate_reason reason; +}; + +enum { + FOLL_TOUCH = 65536, + FOLL_TRIED = 131072, + FOLL_REMOTE = 262144, + FOLL_PIN = 524288, + FOLL_FAST_ONLY = 1048576, + FOLL_UNLOCKABLE = 2097152, + FOLL_MADV_POPULATE = 4194304, +}; + +struct follow_page_context { + struct dev_pagemap *pgmap; + unsigned int page_mask; +}; + +struct pages_or_folios { + union { + struct page **pages; + struct folio **folios; + void **entries; + }; + bool has_folios; + long int nr_entries; +}; + +enum memcg_stat_item { + MEMCG_SWAP = 44, + MEMCG_SOCK = 45, + MEMCG_PERCPU_B = 46, + MEMCG_VMALLOC = 47, + MEMCG_KMEM = 48, + MEMCG_ZSWAP_B = 49, + MEMCG_ZSWAPPED = 50, + MEMCG_NR_STAT = 51, +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + int generation; +}; + +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_acomp { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct comp_alg_common { + struct crypto_alg base; +}; + +struct crypto_acomp_ctx { + struct crypto_acomp *acomp; + struct acomp_req *req; + struct crypto_wait wait; + u8 *buffer; + struct mutex mutex; + bool is_sleepable; +}; + +struct zpool; + +struct zswap_pool { + struct zpool *zpool; + struct crypto_acomp_ctx *acomp_ctx; + struct percpu_ref ref; + struct list_head list; + struct work_struct release_work; + struct hlist_node node; + char tfm_name[128]; +}; + +struct zswap_entry { + swp_entry_t swpentry; + unsigned int length; + bool referenced; + struct zswap_pool *pool; + long unsigned int handle; + struct obj_cgroup *objcg; + struct list_head lru; +}; + +enum zswap_init_type { + ZSWAP_UNINIT = 0, + ZSWAP_INIT_SUCCEED = 1, + ZSWAP_INIT_FAILED = 2, +}; + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); + +struct char_device_struct { + struct char_device_struct *next; + unsigned int major; + unsigned int baseminor; + int minorct; + char name[64]; + struct cdev *cdev; +}; + +typedef __kernel_uid_t __kernel_old_uid_t; + +typedef __kernel_gid_t __kernel_old_gid_t; + +typedef __kernel_old_uid_t old_uid_t; + +typedef __kernel_old_gid_t old_gid_t; + +struct siginfo { + union { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; + int _si_pad[32]; + }; +}; + +typedef struct siginfo siginfo_t; + +typedef long unsigned int elf_greg_t; + +typedef elf_greg_t elf_gregset_t[18]; + +struct elf32_phdr { + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +}; + +struct core_vma_metadata; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct file *file; + long unsigned int limit; + long unsigned int mm_flags; + int cpu; + long: 32; + loff_t written; + loff_t pos; + loff_t to_skip; + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; + long: 32; +}; + +struct elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct elf_prstatus_common { + struct elf_siginfo pr_info; + short int pr_cursig; + long unsigned int pr_sigpend; + long unsigned int pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; + struct __kernel_old_timeval pr_stime; + struct __kernel_old_timeval pr_cutime; + struct __kernel_old_timeval pr_cstime; +}; + +struct elf_prstatus { + struct elf_prstatus_common common; + elf_gregset_t pr_reg; + int pr_fpvalid; +}; + +struct elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + long unsigned int pr_flag; + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct core_vma_metadata { + long unsigned int start; + long unsigned int end; + long unsigned int flags; + long unsigned int dump_size; + long unsigned int pgoff; + struct file *file; +}; + +struct arch_elf_state {}; + +struct memelfnote { + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +struct membuf { + void *p; + size_t left; +}; + +struct user_regset; + +typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); + +typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); + +typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); + +typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); + +struct user_regset { + user_regset_get2_fn *regset_get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +struct elf_thread_core_info { + struct elf_thread_core_info *next; + struct task_struct *task; + struct elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info { + struct elf_thread_core_info *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct ext4_allocation_request { + struct inode *inode; + unsigned int len; + ext4_lblk_t logical; + ext4_lblk_t lleft; + ext4_lblk_t lright; + long: 32; + ext4_fsblk_t goal; + ext4_fsblk_t pleft; + ext4_fsblk_t pright; + unsigned int flags; + long: 32; +}; + +struct fuse_submount_lookup { + refcount_t count; + long: 32; + u64 nodeid; + struct fuse_forget_link *forget; + long: 32; +}; + +struct fuse_inode { + struct inode inode; + u64 nodeid; + u64 nlookup; + struct fuse_forget_link *forget; + long: 32; + u64 i_time; + u32 inval_mask; + umode_t orig_i_mode; + struct timespec64 i_btime; + u64 orig_ino; + u64 attr_version; + union { + struct { + struct list_head write_files; + struct list_head queued_writes; + int writectr; + int iocachectr; + wait_queue_head_t page_waitq; + wait_queue_head_t direct_io_waitq; + struct rb_root writepages; + }; + struct { + bool cached; + long: 32; + loff_t size; + loff_t pos; + u64 version; + struct timespec64 mtime; + u64 iversion; + spinlock_t lock; + long: 32; + } rdc; + }; + long unsigned int state; + struct mutex mutex; + spinlock_t lock; + struct fuse_submount_lookup *submount_lookup; + struct fuse_backing *fb; + long: 32; +}; + +enum { + FUSE_I_ADVISE_RDPLUS = 0, + FUSE_I_INIT_RDPLUS = 1, + FUSE_I_SIZE_UNSTABLE = 2, + FUSE_I_BAD = 3, + FUSE_I_BTIME = 4, + FUSE_I_CACHE_IO_MODE = 5, +}; + +enum btrfs_feature_set { + FEAT_COMPAT = 0, + FEAT_COMPAT_RO = 1, + FEAT_INCOMPAT = 2, + FEAT_MAX = 3, +}; + +struct btrfs_feature_attr { + struct kobj_attribute kobj_attr; + enum btrfs_feature_set feature_set; + long: 32; + u64 feature_bit; +}; + +struct raid_kobject { + u64 flags; + struct kobject kobj; + long: 32; +}; + +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kuid_t rootid; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; + +typedef struct { + __be64 a; + __be64 b; +} be128; + +struct gf128mul_4k { + be128 t[256]; +}; + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[16]; + u32 bytes; +}; + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct gendisk *, char *); + ssize_t (*store)(struct gendisk *, const char *, size_t); + void (*load_module)(struct gendisk *, const char *, size_t); +}; + +enum dd_data_dir { + DD_READ = 0, + DD_WRITE = 1, +}; + +enum { + DD_DIR_COUNT = 2, +}; + +enum dd_prio { + DD_RT_PRIO = 0, + DD_BE_PRIO = 1, + DD_IDLE_PRIO = 2, + DD_PRIO_MAX = 2, +}; + +enum { + DD_PRIO_COUNT = 3, +}; + +struct io_stats_per_prio { + uint32_t inserted; + uint32_t merged; + uint32_t dispatched; + atomic_t completed; +}; + +struct dd_per_prio { + struct list_head dispatch; + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + sector_t latest_pos[2]; + struct io_stats_per_prio stats; +}; + +struct deadline_data { + struct dd_per_prio per_prio[3]; + enum dd_data_dir last_dir; + unsigned int batching; + unsigned int starved; + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; + u32 async_depth; + int prio_aging_expire; + spinlock_t lock; + long: 32; +}; + +struct io_sync { + struct file *file; + long: 32; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +struct region { + unsigned int start; + unsigned int off; + unsigned int group_len; + unsigned int end; + unsigned int nbits; +}; + +struct gen_pool_chunk { + struct list_head next_chunk; + atomic_long_t avail; + phys_addr_t phys_addr; + void *owner; + long unsigned int start_addr; + long unsigned int end_addr; + long unsigned int bits[0]; +}; + +struct genpool_data_align { + int align; +}; + +struct genpool_data_fixed { + long unsigned int offset; +}; + +struct font_data { + unsigned int extra[4]; + const unsigned char data[0]; +}; + +enum { + LOGIC_PIO_INDIRECT = 0, + LOGIC_PIO_CPU_MMIO = 1, +}; + +struct logic_pio_host_ops; + +struct logic_pio_hwaddr { + struct list_head list; + const struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + long unsigned int flags; + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *, long unsigned int, size_t); + void (*out)(void *, long unsigned int, u32, size_t); + u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); + void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); +}; + +struct hotplug_slot_ops; + +struct hotplug_slot { + const struct hotplug_slot_ops *ops; + struct list_head slot_list; + struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; +}; + +enum pcie_reset_state { + pcie_deassert_reset = 1, + pcie_warm_reset = 2, + pcie_hot_reset = 3, +}; + +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = 1, + PCI_BUS_FLAGS_NO_MMRBC = 2, + PCI_BUS_FLAGS_NO_AERSID = 4, + PCI_BUS_FLAGS_NO_EXTCFG = 8, +}; + +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0, + PCIE_LNK_X1 = 1, + PCIE_LNK_X2 = 2, + PCIE_LNK_X4 = 4, + PCIE_LNK_X8 = 8, + PCIE_LNK_X12 = 12, + PCIE_LNK_X16 = 16, + PCIE_LNK_X32 = 32, + PCIE_LNK_WIDTH_UNKNOWN = 255, +}; + +enum { + PCI_REASSIGN_ALL_RSRC = 1, + PCI_REASSIGN_ALL_BUS = 2, + PCI_PROBE_ONLY = 4, + PCI_CAN_SKIP_ISA_ALIGN = 8, + PCI_ENABLE_PROC_DOMAINS = 16, + PCI_COMPAT_DOMAIN_0 = 32, + PCI_SCAN_ALL_PCIE_DEVS = 64, +}; + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF = 0, + PCIE_BUS_DEFAULT = 1, + PCIE_BUS_SAFE = 2, + PCIE_BUS_PERFORMANCE = 3, + PCIE_BUS_PEER2PEER = 4, +}; + +typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); + +struct hotplug_slot_ops { + int (*enable_slot)(struct hotplug_slot *); + int (*disable_slot)(struct hotplug_slot *); + int (*set_attention_status)(struct hotplug_slot *, u8); + int (*hardware_test)(struct hotplug_slot *, u32); + int (*get_power_status)(struct hotplug_slot *, u8 *); + int (*get_attention_status)(struct hotplug_slot *, u8 *); + int (*get_latch_status)(struct hotplug_slot *, u8 *); + int (*get_adapter_status)(struct hotplug_slot *, u8 *); + int (*reset_slot)(struct hotplug_slot *, bool); +}; + +struct pcie_tlp_log { + u32 dw[4]; +}; + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct pci_reset_fn_method { + int (*reset_fn)(struct pci_dev *, bool); + char *name; +}; + +struct pci_pme_device { + struct list_head list; + struct pci_dev *dev; +}; + +struct pci_acs { + u16 cap; + u16 ctrl; + u16 fw_ctrl; +}; + +struct pci_saved_state { + u32 config_space[16]; + struct pci_cap_saved_data cap[0]; +}; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +struct clk_lookup_alloc { + struct clk_lookup cl; + char dev_id[24]; + char con_id[16]; +}; + +struct virtio_driver { + struct device_driver driver; + const struct virtio_device_id *id_table; + const unsigned int *feature_table; + unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *); + int (*probe)(struct virtio_device *); + void (*scan)(struct virtio_device *); + void (*remove)(struct virtio_device *); + void (*config_changed)(struct virtio_device *); + int (*freeze)(struct virtio_device *); + int (*restore)(struct virtio_device *); +}; + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(const struct class *, const struct class_attribute *, char *); + ssize_t (*store)(const struct class *, const struct class_attribute *, const char *, size_t); +}; + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +struct class_compat { + struct kobject *kobj; +}; + +enum scsi_scan_mode { + SCSI_SCAN_INITIAL = 0, + SCSI_SCAN_RESCAN = 1, + SCSI_SCAN_MANUAL = 2, +}; + +struct scsi_event { + enum scsi_device_event evt_type; + struct list_head node; +}; + +typedef void (*activate_complete)(void *, int); + +struct scsi_device_handler { + struct list_head list; + struct module *module; + const char *name; + enum scsi_disposition (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); + int (*attach)(struct scsi_device *); + void (*detach)(struct scsi_device *); + int (*activate)(struct scsi_device *, activate_complete, void *); + blk_status_t (*prep_fn)(struct scsi_device *, struct request *); + int (*set_params)(struct scsi_device *, const char *); + void (*rescan)(struct scsi_device *); +}; + +struct phylib_stubs { + int (*hwtstamp_get)(struct phy_device *, struct kernel_hwtstamp_config *); + int (*hwtstamp_set)(struct phy_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +enum phy_state_work { + PHY_STATE_WORK_NONE = 0, + PHY_STATE_WORK_ANEG = 1, + PHY_STATE_WORK_SUSPEND = 2, +}; + +enum mac_version { + RTL_GIGA_MAC_VER_02 = 0, + RTL_GIGA_MAC_VER_03 = 1, + RTL_GIGA_MAC_VER_04 = 2, + RTL_GIGA_MAC_VER_05 = 3, + RTL_GIGA_MAC_VER_06 = 4, + RTL_GIGA_MAC_VER_07 = 5, + RTL_GIGA_MAC_VER_08 = 6, + RTL_GIGA_MAC_VER_09 = 7, + RTL_GIGA_MAC_VER_10 = 8, + RTL_GIGA_MAC_VER_11 = 9, + RTL_GIGA_MAC_VER_14 = 10, + RTL_GIGA_MAC_VER_17 = 11, + RTL_GIGA_MAC_VER_18 = 12, + RTL_GIGA_MAC_VER_19 = 13, + RTL_GIGA_MAC_VER_20 = 14, + RTL_GIGA_MAC_VER_21 = 15, + RTL_GIGA_MAC_VER_22 = 16, + RTL_GIGA_MAC_VER_23 = 17, + RTL_GIGA_MAC_VER_24 = 18, + RTL_GIGA_MAC_VER_25 = 19, + RTL_GIGA_MAC_VER_26 = 20, + RTL_GIGA_MAC_VER_28 = 21, + RTL_GIGA_MAC_VER_29 = 22, + RTL_GIGA_MAC_VER_30 = 23, + RTL_GIGA_MAC_VER_31 = 24, + RTL_GIGA_MAC_VER_32 = 25, + RTL_GIGA_MAC_VER_33 = 26, + RTL_GIGA_MAC_VER_34 = 27, + RTL_GIGA_MAC_VER_35 = 28, + RTL_GIGA_MAC_VER_36 = 29, + RTL_GIGA_MAC_VER_37 = 30, + RTL_GIGA_MAC_VER_38 = 31, + RTL_GIGA_MAC_VER_39 = 32, + RTL_GIGA_MAC_VER_40 = 33, + RTL_GIGA_MAC_VER_42 = 34, + RTL_GIGA_MAC_VER_43 = 35, + RTL_GIGA_MAC_VER_44 = 36, + RTL_GIGA_MAC_VER_46 = 37, + RTL_GIGA_MAC_VER_48 = 38, + RTL_GIGA_MAC_VER_51 = 39, + RTL_GIGA_MAC_VER_52 = 40, + RTL_GIGA_MAC_VER_53 = 41, + RTL_GIGA_MAC_VER_61 = 42, + RTL_GIGA_MAC_VER_63 = 43, + RTL_GIGA_MAC_VER_64 = 44, + RTL_GIGA_MAC_VER_65 = 45, + RTL_GIGA_MAC_VER_66 = 46, + RTL_GIGA_MAC_NONE = 47, +}; + +struct rtl8169_private; + +typedef void (*rtl_fw_write_t)(struct rtl8169_private *, int, int); + +enum rtl_dash_type { + RTL_DASH_NONE = 0, + RTL_DASH_DP = 1, + RTL_DASH_EP = 2, +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_tc_offsets { + bool inited; + long: 32; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +struct r8169_led_classdev; + +struct TxDesc; + +struct RxDesc; + +struct rtl8169_counters; + +struct rtl_fw___2; + +struct rtl8169_private { + void *mmio_addr; + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; + u32 cur_tx; + u32 dirty_tx; + struct TxDesc *TxDescArray; + struct RxDesc *RxDescArray; + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[256]; + struct ring_info tx_skb[256]; + u16 cp_cmd; + u16 tx_lpi_timer; + u32 irq_mask; + int irq; + struct clk *clk; + struct { + long unsigned int flags[1]; + struct work_struct work; + } wk; + raw_spinlock_t mac_ocp_lock; + struct mutex led_lock; + unsigned int supports_gmii: 1; + unsigned int aspm_manageable: 1; + unsigned int dash_enabled: 1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + long: 32; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + const char *fw_name; + struct rtl_fw___2 *rtl_fw; + struct r8169_led_classdev *leds; + u32 ocp_base; + long: 32; +}; + +typedef int (*rtl_fw_read_t)(struct rtl8169_private *, int); + +struct rtl_fw_phy_action { + __le32 *code; + size_t size; +}; + +struct rtl_fw___2 { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + char version[32]; + struct rtl_fw_phy_action phy_action; +}; + +enum rtl_registers { + MAC0 = 0, + MAC4 = 4, + MAR0 = 8, + CounterAddrLow = 16, + CounterAddrHigh = 20, + TxDescStartAddrLow = 32, + TxDescStartAddrHigh = 36, + TxHDescStartAddrLow = 40, + TxHDescStartAddrHigh = 44, + FLASH = 48, + ERSR = 54, + ChipCmd = 55, + TxPoll = 56, + IntrMask = 60, + IntrStatus = 62, + TxConfig = 64, + RxConfig = 68, + Cfg9346 = 80, + Config0 = 81, + Config1 = 82, + Config2 = 83, + Config3 = 84, + Config4 = 85, + Config5 = 86, + PHYAR = 96, + PHYstatus = 108, + RxMaxSize = 218, + CPlusCmd = 224, + IntrMitigate = 226, + RxDescAddrLow = 228, + RxDescAddrHigh = 232, + EarlyTxThres = 236, + MaxTxPacketSize = 236, + FuncEvent = 240, + FuncEventMask = 244, + FuncPresetState = 248, + IBCR0 = 248, + IBCR2 = 249, + IBIMR0 = 250, + IBISR0 = 251, + FuncForceEvent = 252, +}; + +enum rtl8168_8101_registers { + CSIDR = 100, + CSIAR = 104, + PMCH = 111, + EPHYAR = 128, + DLLPR = 208, + DBG_REG = 209, + TWSI = 210, + MCU = 211, + EFUSEAR = 220, + MISC_1 = 242, +}; + +enum rtl8168_registers { + LED_CTRL = 24, + LED_FREQ = 26, + EEE_LED = 27, + ERIDR = 112, + ERIAR = 116, + EPHY_RXER_NUM = 124, + OCPDR = 176, + OCPAR = 180, + GPHY_OCP = 184, + RDSAR1 = 208, + MISC = 240, +}; + +enum rtl8125_registers { + LEDSEL0 = 24, + INT_CFG0_8125 = 52, + IntrMask_8125 = 56, + IntrStatus_8125 = 60, + INT_CFG1_8125 = 122, + LEDSEL2 = 132, + LEDSEL1 = 134, + TxPoll_8125 = 144, + LEDSEL3 = 150, + MAC0_BKP = 6624, + RSS_CTRL_8125 = 17664, + Q_NUM_CTRL_8125 = 18432, + EEE_TXIDLE_TIMER_8125 = 24648, +}; + +enum rtl_register_content___2 { + SYSErr = 32768, + PCSTimeout = 16384, + SWInt = 256, + TxDescUnavail = 128, + RxFIFOOver = 64, + LinkChg = 32, + RxOverflow = 16, + TxErr = 8, + TxOK = 4, + RxErr = 2, + RxOK = 1, + RxRWT = 4194304, + RxRES = 2097152, + RxRUNT = 1048576, + RxCRC = 524288, + StopReq = 128, + CmdReset = 16, + CmdRxEnb = 8, + CmdTxEnb = 4, + RxBufEmpty = 1, + HPQ = 128, + NPQ = 64, + FSWInt = 1, + Cfg9346_Lock = 0, + Cfg9346_Unlock = 192, + AcceptErr = 32, + AcceptRunt = 16, + AcceptBroadcast = 8, + AcceptMulticast = 4, + AcceptMyPhys = 2, + AcceptAllPhys = 1, + TxInterFrameGapShift = 24, + TxDMAShift = 8, + LEDS1 = 128, + LEDS0 = 64, + Speed_down = 16, + MEMMAP = 8, + IOMAP = 4, + VPD = 2, + PMEnable = 1, + ClkReqEn = 128, + MSIEnable = 32, + PCI_Clock_66MHz = 1, + PCI_Clock_33MHz = 0, + MagicPacket = 32, + LinkUp = 16, + Jumbo_En0 = 4, + Rdy_to_L23 = 2, + Beacon_en = 1, + Jumbo_En1 = 2, + BWF = 64, + MWF = 32, + UWF = 16, + Spi_en = 8, + LanWake = 2, + PMEStatus = 1, + ASPM_en = 1, + EnableBist = 32768, + Mac_dbgo_oe = 16384, + EnAnaPLL = 16384, + Normal_mode = 8192, + Force_half_dup = 4096, + Force_rxflow_en = 2048, + Force_txflow_en = 1024, + Cxpl_dbg_sel = 512, + ASF = 256, + PktCntrDisable = 128, + Mac_dbgo_sel = 28, + RxVlan = 64, + RxChkSum = 32, + PCIDAC = 16, + PCIMulRW = 8, + TBI_Enable = 128, + TxFlowCtrl = 64, + RxFlowCtrl = 32, + _1000bpsF = 16, + _100bps___2 = 8, + _10bps___2 = 4, + LinkStatus = 2, + FullDup = 1, + CounterReset = 1, + CounterDump = 8, + MagicPacket_v2 = 65536, +}; + +enum rtl_desc_bit { + DescOwn = -2147483648, + RingEnd = 1073741824, + FirstFrag = 536870912, + LastFrag = 268435456, +}; + +enum rtl_tx_desc_bit { + TD_LSO = 134217728, + TxVlanTag = 131072, +}; + +enum rtl_tx_desc_bit_0 { + TD0_TCP_CS = 65536, + TD0_UDP_CS = 131072, + TD0_IP_CS = 262144, +}; + +enum rtl_tx_desc_bit_1 { + TD1_GTSENV4 = 67108864, + TD1_GTSENV6 = 33554432, + TD1_IPv6_CS = 268435456, + TD1_IPv4_CS = 536870912, + TD1_TCP_CS = 1073741824, + TD1_UDP_CS = -2147483648, +}; + +enum rtl_rx_desc_bit { + PID1 = 262144, + PID0 = 131072, + IPFail = 65536, + UDPFail = 32768, + TCPFail = 16384, + RxVlanTag = 65536, +}; + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; + __le64 tx_octets; + __le64 rx_octets; + __le64 rx_multicast64; + __le64 tx_unicast64; + __le64 tx_broadcast64; + __le64 tx_multicast64; + __le32 tx_pause_on; + __le32 tx_pause_off; + __le32 tx_pause_all; + __le32 tx_deferred; + __le32 tx_late_collision; + __le32 tx_all_collision; + __le32 tx_aborted32; + __le32 align_errors32; + __le32 rx_frame_too_long; + __le32 rx_runt; + __le32 rx_pause_on; + __le32 rx_pause_off; + __le32 rx_pause_all; + __le32 rx_unknown_opcode; + __le32 rx_mac_error; + __le32 tx_underrun32; + __le32 rx_mac_missed; + __le32 rx_tcam_dropped; + __le32 tdu; + __le32 rdu; +}; + +enum rtl_flag { + RTL_FLAG_TASK_RESET_PENDING = 0, + RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE = 1, + RTL_FLAG_TASK_TX_TIMEOUT = 2, + RTL_FLAG_MAX = 3, +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *); + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; +}; + +struct iwl5000_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + __le32 reserved[52]; +}; + +struct iwl6000_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + __le32 reserved[78]; +}; + +struct iwl_rfi_lut_entry { + __le16 freq; + u8 channels[15]; + u8 bands[15]; +}; + +struct iwl_rfi_config_cmd { + struct iwl_rfi_lut_entry table[24]; + u8 oem; + u8 reserved[3]; +}; + +struct iwl_rfi_freq_table_resp_cmd { + struct iwl_rfi_lut_entry table[4]; + __le32 status; +}; + +struct iwl_rfi_deactivate_notif { + __le32 reason; +}; + +struct rtw_regd_alternative_t { + bool set; + u8 alt; +}; + +typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *); + +enum rc6_mode { + RC6_MODE_0 = 0, + RC6_MODE_6A = 1, + RC6_MODE_UNKNOWN = 2, +}; + +enum rc6_state { + STATE_INACTIVE___2 = 0, + STATE_PREFIX_SPACE = 1, + STATE_HEADER_BIT_START = 2, + STATE_HEADER_BIT_END = 3, + STATE_TOGGLE_START = 4, + STATE_TOGGLE_END = 5, + STATE_BODY_BIT_START = 6, + STATE_BODY_BIT_END = 7, + STATE_FINISHED___2 = 8, +}; + +enum { + BIOSET_NEED_BVECS = 1, + BIOSET_NEED_RESCUER = 2, + BIOSET_PERCPU_CACHE = 4, +}; + +enum blk_crypto_mode_num { + BLK_ENCRYPTION_MODE_INVALID = 0, + BLK_ENCRYPTION_MODE_AES_256_XTS = 1, + BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV = 2, + BLK_ENCRYPTION_MODE_ADIANTUM = 3, + BLK_ENCRYPTION_MODE_SM4_XTS = 4, + BLK_ENCRYPTION_MODE_MAX = 5, +}; + +struct blk_crypto_config { + enum blk_crypto_mode_num crypto_mode; + unsigned int data_unit_size; + unsigned int dun_bytes; +}; + +struct blk_crypto_key { + struct blk_crypto_config crypto_cfg; + unsigned int data_unit_size_bits; + unsigned int size; + u8 raw[64]; +}; + +struct blk_crypto_profile; + +struct blk_crypto_ll_ops { + int (*keyslot_program)(struct blk_crypto_profile *, const struct blk_crypto_key *, unsigned int); + int (*keyslot_evict)(struct blk_crypto_profile *, const struct blk_crypto_key *, unsigned int); +}; + +struct blk_crypto_keyslot; + +struct blk_crypto_profile { + struct blk_crypto_ll_ops ll_ops; + unsigned int max_dun_bytes_supported; + unsigned int modes_supported[5]; + struct device *dev; + unsigned int num_slots; + struct rw_semaphore lock; + struct lock_class_key lockdep_key; + wait_queue_head_t idle_slots_wait_queue; + struct list_head idle_slots; + spinlock_t idle_slots_lock; + struct hlist_head *slot_hashtable; + unsigned int log_slot_ht_size; + struct blk_crypto_keyslot *slots; +}; + +struct dm_arg_set { + unsigned int argc; + char **argv; +}; + +struct dm_arg { + unsigned int min; + unsigned int max; + char *error; +}; + +struct dm_dev_internal { + struct list_head list; + refcount_t count; + struct dm_dev *dm_dev; +}; + +enum suspend_mode { + PRESUSPEND = 0, + PRESUSPEND_UNDO = 1, + POSTSUSPEND = 2, +}; + +struct hid_item { + unsigned int format; + __u8 size; + __u8 type; + __u8 tag; + union { + __u8 u8; + __s8 s8; + __u16 u16; + __s16 s16; + __u32 u32; + __s32 s32; + const __u8 *longdata; + } data; +}; + +struct hid_global { + unsigned int usage_page; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + unsigned int report_id; + unsigned int report_size; + unsigned int report_count; +}; + +struct hid_local { + unsigned int usage[12288]; + u8 usage_size[12288]; + unsigned int collection_index[12288]; + unsigned int usage_index; + unsigned int usage_minimum; + unsigned int delimiter_depth; + unsigned int delimiter_branch; +}; + +struct hid_parser { + struct hid_global global; + struct hid_global global_stack[4]; + unsigned int global_stack_ptr; + struct hid_local local; + unsigned int *collection_stack; + unsigned int collection_stack_ptr; + unsigned int collection_stack_size; + struct hid_device *device; + unsigned int scan_flags; +}; + +struct hiddev { + int minor; + int exist; + int open; + struct mutex existancelock; + wait_queue_head_t wait; + struct hid_device *hid; + struct list_head list; + spinlock_t list_lock; + bool initialized; +}; + +struct hid_dynid { + struct list_head list; + struct hid_device_id id; +}; + +enum { + ETHTOOL_A_MM_STAT_UNSPEC = 0, + ETHTOOL_A_MM_STAT_PAD = 1, + ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS = 2, + ETHTOOL_A_MM_STAT_SMD_ERRORS = 3, + ETHTOOL_A_MM_STAT_REASSEMBLY_OK = 4, + ETHTOOL_A_MM_STAT_RX_FRAG_COUNT = 5, + ETHTOOL_A_MM_STAT_TX_FRAG_COUNT = 6, + ETHTOOL_A_MM_STAT_HOLD_COUNT = 7, + __ETHTOOL_A_MM_STAT_CNT = 8, + ETHTOOL_A_MM_STAT_MAX = 7, +}; + +struct mm_reply_data { + struct ethnl_reply_data base; + struct ethtool_mm_state state; + long: 32; + struct ethtool_mm_stats stats; +}; + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + struct netlink_ext_ack *extack; + void *data; + struct module *module; + u32 min_dump_alloc; + int flags; +}; + +struct nf_conntrack_expect_policy; + +struct nf_conntrack_helper { + struct hlist_node hnode; + char name[16]; + refcount_t refcnt; + struct module *me; + const struct nf_conntrack_expect_policy *expect_policy; + struct nf_conntrack_tuple tuple; + int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info); + void (*destroy)(struct nf_conn *); + int (*from_nlattr)(struct nlattr *, struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, const struct nf_conn *); + unsigned int expect_class_max; + unsigned int flags; + unsigned int queue_num; + u16 data_len; + char nat_mod_name[16]; +}; + +struct nf_conntrack_expect_policy { + unsigned int max_expected; + unsigned int timeout; + char name[16]; +}; + +enum nf_ct_helper_flags { + NF_CT_HELPER_F_USERSPACE = 1, + NF_CT_HELPER_F_CONFIGURED = 2, +}; + +struct nf_conn_help { + struct nf_conntrack_helper *helper; + struct hlist_head expectations; + u8 expecting[4]; + long: 32; + char data[32]; +}; + +enum ctattr_help { + CTA_HELP_UNSPEC = 0, + CTA_HELP_NAME = 1, + CTA_HELP_INFO = 2, + __CTA_HELP_MAX = 3, +}; + +enum nfnl_cthelper_msg_types { + NFNL_MSG_CTHELPER_NEW = 0, + NFNL_MSG_CTHELPER_GET = 1, + NFNL_MSG_CTHELPER_DEL = 2, + NFNL_MSG_CTHELPER_MAX = 3, +}; + +enum nfnl_cthelper_type { + NFCTH_UNSPEC = 0, + NFCTH_NAME = 1, + NFCTH_TUPLE = 2, + NFCTH_QUEUE_NUM = 3, + NFCTH_POLICY = 4, + NFCTH_PRIV_DATA_LEN = 5, + NFCTH_STATUS = 6, + __NFCTH_MAX = 7, +}; + +enum nfnl_cthelper_policy_type { + NFCTH_POLICY_SET_UNSPEC = 0, + NFCTH_POLICY_SET_NUM = 1, + NFCTH_POLICY_SET = 2, + NFCTH_POLICY_SET1 = 2, + NFCTH_POLICY_SET2 = 3, + NFCTH_POLICY_SET3 = 4, + NFCTH_POLICY_SET4 = 5, + __NFCTH_POLICY_SET_MAX = 6, +}; + +enum nfnl_cthelper_pol_type { + NFCTH_POLICY_UNSPEC = 0, + NFCTH_POLICY_NAME = 1, + NFCTH_POLICY_EXPECT_MAX = 2, + NFCTH_POLICY_EXPECT_TIMEOUT = 3, + __NFCTH_POLICY_MAX = 4, +}; + +enum nfnl_cthelper_tuple_type { + NFCTH_TUPLE_UNSPEC = 0, + NFCTH_TUPLE_L3PROTONUM = 1, + NFCTH_TUPLE_L4PROTONUM = 2, + __NFCTH_TUPLE_MAX = 3, +}; + +struct nfnl_cthelper { + struct list_head list; + struct nf_conntrack_helper helper; +}; + +struct nf_nat_ipv4_range { + unsigned int flags; + __be32 min_ip; + __be32 max_ip; + union nf_conntrack_man_proto min; + union nf_conntrack_man_proto max; +}; + +struct nf_nat_ipv4_multi_range_compat { + unsigned int rangesize; + struct nf_nat_ipv4_range range[1]; +}; + +enum tcp_metric_index { + TCP_METRIC_RTT = 0, + TCP_METRIC_RTTVAR = 1, + TCP_METRIC_SSTHRESH = 2, + TCP_METRIC_CWND = 3, + TCP_METRIC_REORDERING = 4, + TCP_METRIC_RTT_US = 5, + TCP_METRIC_RTTVAR_US = 6, + __TCP_METRIC_MAX = 7, +}; + +enum { + TCP_METRICS_ATTR_UNSPEC = 0, + TCP_METRICS_ATTR_ADDR_IPV4 = 1, + TCP_METRICS_ATTR_ADDR_IPV6 = 2, + TCP_METRICS_ATTR_AGE = 3, + TCP_METRICS_ATTR_TW_TSVAL = 4, + TCP_METRICS_ATTR_TW_TS_STAMP = 5, + TCP_METRICS_ATTR_VALS = 6, + TCP_METRICS_ATTR_FOPEN_MSS = 7, + TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, + TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, + TCP_METRICS_ATTR_FOPEN_COOKIE = 10, + TCP_METRICS_ATTR_SADDR_IPV4 = 11, + TCP_METRICS_ATTR_SADDR_IPV6 = 12, + TCP_METRICS_ATTR_PAD = 13, + __TCP_METRICS_ATTR_MAX = 14, +}; + +enum { + TCP_METRICS_CMD_UNSPEC = 0, + TCP_METRICS_CMD_GET = 1, + TCP_METRICS_CMD_DEL = 2, + __TCP_METRICS_CMD_MAX = 3, +}; + +struct tcp_fastopen_metrics { + u16 mss; + u16 syn_loss: 10; + u16 try_exp: 2; + long unsigned int last_syn_loss; + struct tcp_fastopen_cookie cookie; +}; + +struct tcp_metrics_block { + struct tcp_metrics_block *tcpm_next; + struct net *tcpm_net; + struct inetpeer_addr tcpm_saddr; + struct inetpeer_addr tcpm_daddr; + long unsigned int tcpm_stamp; + u32 tcpm_lock; + u32 tcpm_vals[5]; + long: 32; + struct tcp_fastopen_metrics tcpm_fastopen; + struct callback_head callback_head; +}; + +struct tcpm_hash_bucket { + struct tcp_metrics_block *chain; +}; + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +enum fib6_walk_state { + FWS_L = 0, + FWS_R = 1, + FWS_C = 2, + FWS_U = 3, +}; + +struct fib6_walker { + struct list_head lh; + struct fib6_node *root; + struct fib6_node *node; + struct fib6_info *leaf; + enum fib6_walk_state state; + unsigned int skip; + unsigned int count; + unsigned int skip_in_node; + int (*func)(struct fib6_walker *); + void *args; +}; + +struct fib6_entry_notifier_info { + struct fib_notifier_info info; + struct fib6_info *rt; + unsigned int nsiblings; +}; + +struct ipv6_route_iter { + struct seq_net_private p; + struct fib6_walker w; + loff_t skip; + struct fib6_table *tbl; + int sernum; +}; + +struct bpf_iter__ipv6_route { + union { + struct bpf_iter_meta *meta; + }; + union { + struct fib6_info *rt; + }; +}; + +struct fib6_cleaner { + struct fib6_walker w; + struct net *net; + int (*func)(struct fib6_info *, void *); + int sernum; + void *arg; + bool skip_notify; +}; + +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; + struct netlink_ext_ack *extack; +}; + +struct fib6_nh_pcpu_arg { + struct fib6_info *from; + const struct fib6_table *table; +}; + +struct lookup_args { + int offset; + const struct in6_addr *addr; +}; + +enum reg_request_treatment { + REG_REQ_OK = 0, + REG_REQ_IGNORE = 1, + REG_REQ_INTERSECT = 2, + REG_REQ_ALREADY_SET = 3, +}; + +struct reg_beacon { + struct list_head list; + struct ieee80211_channel chan; +}; + +struct reg_regdb_apply_request { + struct list_head list; + const struct ieee80211_regdomain *regdom; +}; + +struct fwdb_country { + u8 alpha2[2]; + __be16 coll_ptr; +}; + +struct fwdb_header { + __be32 magic; + __be32 version; + struct fwdb_country country[0]; +}; + +struct fwdb_collection { + u8 len; + u8 n_rules; + u8 dfs_region; + long: 0; +}; + +enum fwdb_flags { + FWDB_FLAG_NO_OFDM = 1, + FWDB_FLAG_NO_OUTDOOR = 2, + FWDB_FLAG_DFS = 4, + FWDB_FLAG_NO_IR = 8, + FWDB_FLAG_AUTO_BW = 16, +}; + +struct fwdb_wmm_ac { + u8 ecw; + u8 aifsn; + __be16 cot; +}; + +struct fwdb_wmm_rule { + struct fwdb_wmm_ac client[4]; + struct fwdb_wmm_ac ap[4]; +}; + +struct fwdb_rule { + u8 len; + u8 flags; + __be16 max_eirp; + __be32 start; + __be32 end; + __be32 max_bw; + __be16 cac_timeout; + __be16 wmm_ptr; +}; + +struct clk { + struct clk_core *core; + struct device *dev; + const char *dev_id; + const char *con_id; + long unsigned int min_rate; + long unsigned int max_rate; + unsigned int exclusive_count; + struct hlist_node clks_node; +}; + +enum format_type { + FORMAT_TYPE_NONE = 0, + FORMAT_TYPE_WIDTH = 1, + FORMAT_TYPE_PRECISION = 2, + FORMAT_TYPE_CHAR = 3, + FORMAT_TYPE_STR = 4, + FORMAT_TYPE_PTR = 5, + FORMAT_TYPE_PERCENT_CHAR = 6, + FORMAT_TYPE_INVALID = 7, + FORMAT_TYPE_LONG_LONG = 8, + FORMAT_TYPE_ULONG = 9, + FORMAT_TYPE_LONG = 10, + FORMAT_TYPE_UBYTE = 11, + FORMAT_TYPE_BYTE = 12, + FORMAT_TYPE_USHORT = 13, + FORMAT_TYPE_SHORT = 14, + FORMAT_TYPE_UINT = 15, + FORMAT_TYPE_INT = 16, + FORMAT_TYPE_SIZE_T = 17, + FORMAT_TYPE_PTRDIFF = 18, +}; + +struct printf_spec { + unsigned int type: 8; + int field_width: 24; + unsigned int flags: 8; + unsigned int base: 8; + int precision: 16; +}; + +struct page_flags_fields { + int width; + int shift; + int mask; + const struct printf_spec *spec; + const char *name; +}; + +enum { + PER_LINUX = 0, + PER_LINUX_32BIT = 8388608, + PER_LINUX_FDPIC = 524288, + PER_SVR4 = 68157441, + PER_SVR3 = 83886082, + PER_SCOSVR3 = 117440515, + PER_OSR5 = 100663299, + PER_WYSEV386 = 83886084, + PER_ISCR4 = 67108869, + PER_BSD = 6, + PER_SUNOS = 67108870, + PER_XENIX = 83886087, + PER_LINUX32 = 8, + PER_LINUX32_3GB = 134217736, + PER_IRIX32 = 67108873, + PER_IRIXN32 = 67108874, + PER_IRIX64 = 67108875, + PER_RISCOS = 12, + PER_SOLARIS = 67108877, + PER_UW7 = 68157454, + PER_OSF4 = 15, + PER_HPUX = 16, + PER_MASK = 255, +}; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING = 1, + UMH_DISABLED = 2, +}; + +enum { + AFFINITY = 0, + AFFINITY_LIST = 1, + EFFECTIVE = 2, + EFFECTIVE_LIST = 3, +}; + +enum misc_res_type { + MISC_CG_RES_TYPES = 0, +}; + +struct misc_res { + u64 max; + atomic64_t watermark; + atomic64_t usage; + atomic64_t events; + atomic64_t events_local; +}; + +struct misc_cg { + struct cgroup_subsys_state css; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct misc_res res[0]; +}; + +enum blktrace_cat { + BLK_TC_READ = 1, + BLK_TC_WRITE = 2, + BLK_TC_FLUSH = 4, + BLK_TC_SYNC = 8, + BLK_TC_SYNCIO = 8, + BLK_TC_QUEUE = 16, + BLK_TC_REQUEUE = 32, + BLK_TC_ISSUE = 64, + BLK_TC_COMPLETE = 128, + BLK_TC_FS = 256, + BLK_TC_PC = 512, + BLK_TC_NOTIFY = 1024, + BLK_TC_AHEAD = 2048, + BLK_TC_META = 4096, + BLK_TC_DISCARD = 8192, + BLK_TC_DRV_DATA = 16384, + BLK_TC_FUA = 32768, + BLK_TC_END = 32768, +}; + +enum blktrace_notify { + __BLK_TN_PROCESS = 0, + __BLK_TN_TIMESTAMP = 1, + __BLK_TN_MESSAGE = 2, + __BLK_TN_CGROUP = 256, +}; + +struct blk_io_trace { + __u32 magic; + __u32 sequence; + __u64 time; + __u64 sector; + __u32 bytes; + __u32 action; + __u32 pid; + __u32 device; + __u32 cpu; + __u16 error; + __u16 pdu_len; +}; + +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running = 2, + Blktrace_stopped = 3, +}; + +struct blk_user_trace_setup { + char name[32]; + __u16 act_mask; + __u32 buf_size; + __u32 buf_nr; + long: 32; + __u64 start_lba; + __u64 end_lba; + __u32 pid; + long: 32; +}; + +typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0: 1; + __u64 cap_bit0_is_deprecated: 1; + __u64 cap_user_rdpmc: 1; + __u64 cap_user_time: 1; + __u64 cap_user_time_zero: 1; + __u64 cap_user_time_short: 1; + __u64 cap_____res: 58; + }; + }; + __u16 pmc_width; + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + __u64 time_zero; + __u32 size; + __u32 __reserved_1; + __u64 time_cycles; + __u64 time_mask; + __u8 __reserved[928]; + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_AUX_OUTPUT_HW_ID = 21, + PERF_RECORD_MAX = 22, +}; + +struct perf_buffer { + refcount_t refcount; + struct callback_head callback_head; + struct work_struct work; + int page_order; + int nr_pages; + int overwrite; + int paused; + atomic_t poll; + local_t head; + unsigned int nest; + local_t events; + local_t wakeup; + local_t lost; + long int watermark; + long int aux_watermark; + spinlock_t event_lock; + struct list_head event_list; + atomic_t mmap_count; + long unsigned int mmap_locked; + struct user_struct *mmap_user; + struct mutex aux_mutex; + long int aux_head; + unsigned int aux_nest; + long int aux_wakeup; + long unsigned int aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + atomic_t aux_mmap_count; + long unsigned int aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + int aux_in_pause_resume; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 524288, + AOP_TRUNCATED_PAGE = 524289, +}; + +enum iter_type { + ITER_UBUF = 0, + ITER_IOVEC = 1, + ITER_BVEC = 2, + ITER_KVEC = 3, + ITER_FOLIOQ = 4, + ITER_XARRAY = 5, + ITER_DISCARD = 6, +}; + +struct shared_policy {}; + +struct simple_xattr { + struct rb_node rb_node; + char *name; + size_t size; + char value[0]; +}; + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; + long unsigned int flags; + long unsigned int alloced; + long unsigned int swapped; + union { + struct offset_ctx dir_offsets; + struct { + struct list_head shrinklist; + struct list_head swaplist; + }; + }; + long: 32; + struct timespec64 i_crtime; + struct shared_policy policy; + struct simple_xattrs xattrs; + long unsigned int fallocend; + unsigned int fsflags; + atomic_t stop_eviction; + long: 32; + struct inode vfs_inode; +}; + +struct shmem_quota_limits { + qsize_t usrquota_bhardlimit; + qsize_t usrquota_ihardlimit; + qsize_t grpquota_bhardlimit; + qsize_t grpquota_ihardlimit; +}; + +struct shmem_sb_info { + long unsigned int max_blocks; + long: 32; + struct percpu_counter used_blocks; + long unsigned int max_inodes; + long unsigned int free_ispace; + raw_spinlock_t stat_lock; + umode_t mode; + unsigned char huge; + kuid_t uid; + kgid_t gid; + bool full_inums; + bool noswap; + ino_t next_ino; + ino_t *ino_batch; + struct mempolicy *mpol; + spinlock_t shrinklist_lock; + struct list_head shrinklist; + long unsigned int shrinklist_len; + struct shmem_quota_limits qlimits; +}; + +enum sgp_type { + SGP_READ = 0, + SGP_NOALLOC = 1, + SGP_CACHE = 2, + SGP_WRITE = 3, + SGP_FALLOC = 4, +}; + +struct shmem_falloc { + wait_queue_head_t *waitq; + long unsigned int start; + long unsigned int next; + long unsigned int nr_falloced; + long unsigned int nr_unswapped; +}; + +struct shmem_options { + long long unsigned int blocks; + long long unsigned int inodes; + struct mempolicy *mpol; + kuid_t uid; + kgid_t gid; + umode_t mode; + bool full_inums; + int huge; + int seen; + bool noswap; + short unsigned int quota_types; + long: 32; + struct shmem_quota_limits qlimits; +}; + +enum shmem_param { + Opt_gid___3 = 0, + Opt_huge = 1, + Opt_mode = 2, + Opt_mpol = 3, + Opt_nr_blocks = 4, + Opt_nr_inodes = 5, + Opt_size = 6, + Opt_uid___3 = 7, + Opt_inode32 = 8, + Opt_inode64 = 9, + Opt_noswap = 10, + Opt_quota = 11, + Opt_usrquota = 12, + Opt_grpquota = 13, + Opt_usrquota_block_hardlimit = 14, + Opt_usrquota_inode_hardlimit = 15, + Opt_grpquota_block_hardlimit = 16, + Opt_grpquota_inode_hardlimit = 17, + Opt_casefold_version = 18, + Opt_casefold = 19, + Opt_strict_encoding = 20, +}; + +struct reserve_mem_table { + char name[16]; + phys_addr_t start; + phys_addr_t size; +}; + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +enum { + DIR_OFFSET_MIN = 2, +}; + +struct simple_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; + char set_buf[24]; + void *data; + const char *fmt; + struct mutex mutex; +}; + +enum siginfo_layout { + SIL_KILL = 0, + SIL_TIMER = 1, + SIL_POLL = 2, + SIL_FAULT = 3, + SIL_FAULT_TRAPNO = 4, + SIL_FAULT_MCEERR = 5, + SIL_FAULT_BNDERR = 6, + SIL_FAULT_PKUERR = 7, + SIL_FAULT_PERF_EVENT = 8, + SIL_CHLD = 9, + SIL_RT = 10, + SIL_SYS = 11, +}; + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + __u16 __pad2; + __s32 ssi_syscall; + __u64 ssi_call_addr; + __u32 ssi_arch; + __u8 __pad[28]; +}; + +struct signalfd_ctx { + sigset_t sigmask; +}; + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} ext4_acl_entry; + +typedef struct { + __le32 a_version; +} ext4_acl_header; + +struct btrfs_dir_log_item { + __le64 end; +}; + +struct btrfs_map_token { + struct extent_buffer *eb; + char *kaddr; + long unsigned int offset; +}; + +struct btrfs_drop_extents_args { + struct btrfs_path *path; + long: 32; + u64 start; + u64 end; + bool drop_cache; + bool replace_extent; + u32 extent_item_size; + u64 drop_end; + u64 bytes_found; + bool extent_inserted; + long: 32; +}; + +enum btrfs_delayed_item_type { + BTRFS_DELAYED_INSERTION_ITEM = 0, + BTRFS_DELAYED_DELETION_ITEM = 1, +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + long: 32; + u64 index; + struct list_head tree_list; + struct list_head readdir_list; + struct list_head log_list; + u64 bytes_reserved; + struct btrfs_delayed_node *delayed_node; + refcount_t refs; + enum btrfs_delayed_item_type type: 8; + bool logged; + u16 data_len; + char data[0]; + long: 32; +}; + +struct btrfs_log_ctx { + int log_ret; + int log_transid; + bool log_new_dentries; + bool logging_new_name; + bool logging_new_delayed_dentries; + bool logged_before; + struct btrfs_inode *inode; + struct list_head list; + struct list_head ordered_extents; + struct list_head conflict_inodes; + int num_conflict_inodes; + bool logging_conflict_inodes; + struct extent_buffer *scratch_eb; +}; + +enum { + LOG_INODE_ALL = 0, + LOG_INODE_EXISTS = 1, +}; + +enum { + LOG_WALK_PIN_ONLY = 0, + LOG_WALK_REPLAY_INODES = 1, + LOG_WALK_REPLAY_DIR_INDEX = 2, + LOG_WALK_REPLAY_ALL = 3, +}; + +struct walk_control___2 { + int free; + int pin; + int stage; + bool ignore_cur_inode; + struct btrfs_root *replay_dest; + struct btrfs_trans_handle *trans; + int (*process_func)(struct btrfs_root *, struct extent_buffer *, struct walk_control___2 *, u64, int); +}; + +struct btrfs_dir_list { + u64 ino; + struct list_head list; +}; + +struct btrfs_ino_list { + u64 ino; + u64 parent; + struct list_head list; +}; + +typedef int __kernel_mqd_t; + +typedef __kernel_mqd_t mqd_t; + +struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[13]; + int _tid; + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +}; + +enum inode_i_mutex_lock_class { + I_MUTEX_NORMAL = 0, + I_MUTEX_PARENT = 1, + I_MUTEX_CHILD = 2, + I_MUTEX_XATTR = 3, + I_MUTEX_NONDIR2 = 4, + I_MUTEX_PARENT2 = 5, +}; + +struct mq_attr { + __kernel_long_t mq_flags; + __kernel_long_t mq_maxmsg; + __kernel_long_t mq_msgsize; + __kernel_long_t mq_curmsgs; + __kernel_long_t __reserved[4]; +}; + +struct msg_msgseg; + +struct msg_msg { + struct list_head m_list; + long int m_type; + size_t m_ts; + struct msg_msgseg *next; + void *security; +}; + +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; + bool newns; +}; + +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; + +struct ext_wait_queue { + struct task_struct *task; + struct list_head list; + struct msg_msg *msg; + int state; +}; + +struct mqueue_inode_info { + spinlock_t lock; + long: 32; + struct inode vfs_inode; + wait_queue_head_t wait_q; + struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; + struct posix_msg_tree_node *node_cache; + struct mq_attr attr; + struct sigevent notify; + struct pid *notify_owner; + u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct ucounts *ucounts; + struct sock *notify_sock; + struct sk_buff *notify_cookie; + struct ext_wait_queue e_wait_q[2]; + long unsigned int qsize; + long: 32; +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +enum xen_domain_type { + XEN_NATIVE = 0, + XEN_PV_DOMAIN = 1, + XEN_HVM_DOMAIN = 2, +}; + +enum bio_merge_status { + BIO_MERGE_OK = 0, + BIO_MERGE_NONE = 1, + BIO_MERGE_FAILED = 2, +}; + +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __u64 data; +}; + +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct io_ring_ctx *ctx; +}; + +typedef unsigned int USItype; + +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +typedef struct { + size_t bitContainer; + unsigned int bitPos; + char *startPtr; + char *ptr; + char *endPtr; +} BIT_CStream_t; + +typedef struct { + ptrdiff_t value; + const void *stateTable; + const void *symbolTT; + unsigned int stateLog; +} FSE_CState_t; + +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1, +} ZSTD_defaultPolicy_e; + +typedef struct { + S16 norm[53]; + U32 wksp[285]; +} ZSTD_BuildCTableWksp; + +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + +struct broken_edid { + u8 manufacturer[4]; + u32 model; + u32 fix; +}; + +struct __fb_timings { + u32 dclk; + u32 hfreq; + u32 vfreq; + u32 hactive; + u32 vactive; + u32 hblank; + u32 vblank; + u32 htotal; + u32 vtotal; +}; + +struct termios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; +}; + +struct termios2 { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct termio { + short unsigned int c_iflag; + short unsigned int c_oflag; + short unsigned int c_cflag; + short unsigned int c_lflag; + unsigned char c_line; + unsigned char c_cc[8]; +}; + +enum tty_flow_change { + TTY_FLOW_NO_CHANGE = 0, + TTY_THROTTLE_SAFE = 1, + TTY_UNTHROTTLE_SAFE = 2, +}; + +enum scsi_prot_operations { + SCSI_PROT_NORMAL = 0, + SCSI_PROT_READ_INSERT = 1, + SCSI_PROT_WRITE_STRIP = 2, + SCSI_PROT_READ_STRIP = 3, + SCSI_PROT_WRITE_INSERT = 4, + SCSI_PROT_READ_PASS = 5, + SCSI_PROT_WRITE_PASS = 6, +}; + +struct scsi_driver { + struct device_driver gendrv; + int (*resume)(struct device *); + void (*rescan)(struct device *); + blk_status_t (*init_command)(struct scsi_cmnd *); + void (*uninit_command)(struct scsi_cmnd *); + int (*done)(struct scsi_cmnd *); + int (*eh_action)(struct scsi_cmnd *, int); + void (*eh_reset)(struct scsi_cmnd *); +}; + +struct scsi_eh_save { + int result; + unsigned int resid_len; + int eh_eflags; + enum dma_data_direction data_direction; + unsigned int underflow; + unsigned char cmd_len; + unsigned char prot_op; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scatterlist sense_sgl; +}; + +enum scsi_ml_status { + SCSIML_STAT_OK = 0, + SCSIML_STAT_RESV_CONFLICT = 1, + SCSIML_STAT_NOSPC = 2, + SCSIML_STAT_MED_ERROR = 3, + SCSIML_STAT_TGT_FAILURE = 4, + SCSIML_STAT_DL_TIMEOUT = 5, +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +struct mdio_driver { + struct mdio_driver_common mdiodrv; + int (*probe)(struct mdio_device *); + void (*remove)(struct mdio_device *); + void (*shutdown)(struct mdio_device *); +}; + +struct mdio_board_info { + const char *bus_id; + char modalias[32]; + int mdio_addr; + const void *platform_data; +}; + +enum reset_control_flags { + RESET_CONTROL_EXCLUSIVE = 4, + RESET_CONTROL_EXCLUSIVE_DEASSERTED = 12, + RESET_CONTROL_EXCLUSIVE_RELEASED = 0, + RESET_CONTROL_SHARED = 1, + RESET_CONTROL_SHARED_DEASSERTED = 9, + RESET_CONTROL_OPTIONAL_EXCLUSIVE = 6, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = 14, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = 2, + RESET_CONTROL_OPTIONAL_SHARED = 3, + RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = 11, +}; + +struct trace_event_raw_mdio_access { + struct trace_entry ent; + char busid[61]; + char read; + u8 addr; + u16 val; + unsigned int regnum; + char __data[0]; +}; + +struct trace_event_data_offsets_mdio_access {}; + +typedef void (*btf_trace_mdio_access)(void *, struct mii_bus *, char, u8, unsigned int, u16, int); + +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +enum iwl_power_level { + IWL_POWER_INDEX_1 = 0, + IWL_POWER_INDEX_2 = 1, + IWL_POWER_INDEX_3 = 2, + IWL_POWER_INDEX_4 = 3, + IWL_POWER_INDEX_5 = 4, + IWL_POWER_NUM = 5, +}; + +enum iwl_uapsd_disable { + IWL_DISABLE_UAPSD_BSS = 1, + IWL_DISABLE_UAPSD_P2P_CLIENT = 2, +}; + +struct iwl_fw_ini_region_dev_addr_range { + __le32 offset; +}; + +struct iwl_fw_ini_region_dev_addr { + __le32 size; + __le32 offset; +}; + +struct iwl_fw_ini_region_fifos { + __le32 fid[2]; + __le32 hdr_only; + __le32 offset; +}; + +struct iwl_fw_ini_region_err_table { + __le32 version; + __le32 base_addr; + __le32 size; + __le32 offset; +}; + +struct iwl_fw_ini_region_special_device_memory { + __le16 type; + __le16 version; + __le32 base_addr; + __le32 size; + __le32 offset; +}; + +struct iwl_fw_ini_region_internal_buffer { + __le32 alloc_id; + __le32 base_addr; + __le32 size; +}; + +struct iwl_fw_ini_region_tlv { + struct iwl_fw_ini_header hdr; + __le32 id; + u8 type; + u8 sub_type; + u8 sub_type_ver; + u8 reserved; + u8 name[32]; + union { + struct iwl_fw_ini_region_dev_addr dev_addr; + struct iwl_fw_ini_region_dev_addr_range dev_addr_range; + struct iwl_fw_ini_region_fifos fifos; + struct iwl_fw_ini_region_err_table err_table; + struct iwl_fw_ini_region_internal_buffer internal_buffer; + struct iwl_fw_ini_region_special_device_memory special_mem; + __le32 dram_alloc_id; + __le32 tlv_mask; + }; + __le32 addrs[0]; +}; + +enum iwl_fw_ini_region_type { + IWL_FW_INI_REGION_INVALID = 0, + IWL_FW_INI_REGION_TLV = 1, + IWL_FW_INI_REGION_INTERNAL_BUFFER = 2, + IWL_FW_INI_REGION_DRAM_BUFFER = 3, + IWL_FW_INI_REGION_TXF = 4, + IWL_FW_INI_REGION_RXF = 5, + IWL_FW_INI_REGION_LMAC_ERROR_TABLE = 6, + IWL_FW_INI_REGION_UMAC_ERROR_TABLE = 7, + IWL_FW_INI_REGION_RSP_OR_NOTIF = 8, + IWL_FW_INI_REGION_DEVICE_MEMORY = 9, + IWL_FW_INI_REGION_PERIPHERY_MAC = 10, + IWL_FW_INI_REGION_PERIPHERY_PHY = 11, + IWL_FW_INI_REGION_PERIPHERY_AUX = 12, + IWL_FW_INI_REGION_PAGING = 13, + IWL_FW_INI_REGION_CSR = 14, + IWL_FW_INI_REGION_DRAM_IMR = 15, + IWL_FW_INI_REGION_PCI_IOSF_CONFIG = 16, + IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY = 17, + IWL_FW_INI_REGION_DBGI_SRAM = 18, + IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE = 19, + IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE = 20, + IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP = 21, + IWL_FW_INI_REGION_NUM = 22, +}; + +enum iwl_fw_ini_region_device_memory_subtype { + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20, +}; + +struct iwl_ucode_header { + __le32 ver; + union { + struct { + __le32 inst_size; + __le32 data_size; + __le32 init_size; + __le32 init_data_size; + __le32 boot_size; + u8 data[0]; + } v1; + struct { + __le32 build; + __le32 inst_size; + __le32 data_size; + __le32 init_size; + __le32 init_data_size; + __le32 boot_size; + u8 data[0]; + } v2; + } u; +}; + +enum iwl_ucode_tlv_type { + IWL_UCODE_TLV_INVALID = 0, + IWL_UCODE_TLV_INST = 1, + IWL_UCODE_TLV_DATA = 2, + IWL_UCODE_TLV_INIT = 3, + IWL_UCODE_TLV_INIT_DATA = 4, + IWL_UCODE_TLV_BOOT = 5, + IWL_UCODE_TLV_PROBE_MAX_LEN = 6, + IWL_UCODE_TLV_PAN = 7, + IWL_UCODE_TLV_MEM_DESC = 7, + IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, + IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, + IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, + IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11, + IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12, + IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, + IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, + IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, + IWL_UCODE_TLV_WOWLAN_INST = 16, + IWL_UCODE_TLV_WOWLAN_DATA = 17, + IWL_UCODE_TLV_FLAGS = 18, + IWL_UCODE_TLV_SEC_RT = 19, + IWL_UCODE_TLV_SEC_INIT = 20, + IWL_UCODE_TLV_SEC_WOWLAN = 21, + IWL_UCODE_TLV_DEF_CALIB = 22, + IWL_UCODE_TLV_PHY_SKU = 23, + IWL_UCODE_TLV_SECURE_SEC_RT = 24, + IWL_UCODE_TLV_SECURE_SEC_INIT = 25, + IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26, + IWL_UCODE_TLV_NUM_OF_CPU = 27, + IWL_UCODE_TLV_CSCHEME = 28, + IWL_UCODE_TLV_API_CHANGES_SET = 29, + IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, + IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, + IWL_UCODE_TLV_PAGING = 32, + IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, + IWL_UCODE_TLV_FW_VERSION = 36, + IWL_UCODE_TLV_FW_DBG_DEST = 38, + IWL_UCODE_TLV_FW_DBG_CONF = 39, + IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, + IWL_UCODE_TLV_CMD_VERSIONS = 48, + IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, + IWL_UCODE_TLV_FW_MEM_SEG = 51, + IWL_UCODE_TLV_IML = 52, + IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54, + IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55, + IWL_UCODE_TLV_FW_RECOVERY_INFO = 57, + IWL_UCODE_TLV_HW_TYPE = 58, + IWL_UCODE_TLV_FW_FSEQ_VERSION = 60, + IWL_UCODE_TLV_PHY_INTEGRATION_VERSION = 61, + IWL_UCODE_TLV_PNVM_VERSION = 62, + IWL_UCODE_TLV_PNVM_SKU = 64, + IWL_UCODE_TLV_SEC_TABLE_ADDR = 66, + IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, + IWL_UCODE_TLV_CURRENT_PC = 68, + IWL_UCODE_TLV_FW_NUM_STATIONS = 256, + IWL_UCODE_TLV_FW_NUM_BEACONS = 258, + IWL_UCODE_TLV_TYPE_DEBUG_INFO = 16777221, + IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = 16777222, + IWL_UCODE_TLV_TYPE_HCMD = 16777223, + IWL_UCODE_TLV_TYPE_REGIONS = 16777224, + IWL_UCODE_TLV_TYPE_TRIGGERS = 16777225, + IWL_UCODE_TLV_TYPE_CONF_SET = 16777226, + IWL_UCODE_TLV_DEBUG_MAX = 16777225, + IWL_UCODE_TLV_FW_DBG_DUMP_LST = 4096, +}; + +struct iwl_tlv_ucode_header { + __le32 zero; + __le32 magic; + u8 human_readable[64]; + __le32 ver; + __le32 build; + __le64 ignore; + u8 data[0]; +}; + +struct iwl_ucode_api { + __le32 api_index; + __le32 api_flags; +}; + +struct iwl_ucode_capa { + __le32 api_index; + __le32 api_capa; +}; + +struct iwl_fw_dbg_dest_tlv { + u8 version; + u8 monitor_mode; + u8 size_power; + u8 reserved; + __le32 cfg_reg; + __le32 write_ptr_reg; + __le32 wrap_count; + u8 base_shift; + u8 size_shift; + struct iwl_fw_dbg_reg_op reg_ops[0]; +} __attribute__((packed)); + +struct iwl_fw_dump_exclude { + __le32 addr; + __le32 size; +}; + +enum iwl_ucode_sec { + IWL_UCODE_SECTION_DATA = 0, + IWL_UCODE_SECTION_INST = 1, +}; + +enum iwl_error_event_table_status { + IWL_ERROR_EVENT_TABLE_LMAC1 = 1, + IWL_ERROR_EVENT_TABLE_LMAC2 = 2, + IWL_ERROR_EVENT_TABLE_UMAC = 4, + IWL_ERROR_EVENT_TABLE_TCM1 = 8, + IWL_ERROR_EVENT_TABLE_TCM2 = 16, + IWL_ERROR_EVENT_TABLE_RCM1 = 32, + IWL_ERROR_EVENT_TABLE_RCM2 = 64, +}; + +struct iwl_drv { + struct list_head list; + struct iwl_fw fw; + struct iwl_op_mode *op_mode; + struct iwl_trans *trans; + struct device *dev; + int fw_index; + char firmware_name[64]; + struct completion request_firmware_complete; +}; + +struct iwl_lmac_debug_addrs { + __le32 error_event_table_ptr; + __le32 log_event_table_ptr; + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; + __le32 st_fwrd_addr; + __le32 st_fwrd_size; +}; + +struct iwl_umac_debug_addrs { + __le32 error_info_addr; + __le32 dbg_print_buff_addr; +}; + +enum { + DVM_OP_MODE = 0, + MVM_OP_MODE = 1, +}; + +struct iwlwifi_opmode_table { + const char *name; + const struct iwl_op_mode_ops *ops; + struct list_head drv; +}; + +struct fw_sec { + const void *data; + size_t size; + u32 offset; +}; + +struct fw_img_parsing { + struct fw_sec *sec; + int sec_counter; +}; + +struct fw_sec_parsing { + __le32 offset; + const u8 data[0]; +}; + +struct iwl_tlv_calib_data { + __le32 ucode_type; + struct iwl_tlv_calib_ctrl calib; +}; + +struct iwl_firmware_pieces { + struct fw_img_parsing img[4]; + u32 init_evtlog_ptr; + u32 init_evtlog_size; + u32 init_errlog_ptr; + u32 inst_evtlog_ptr; + u32 inst_evtlog_size; + u32 inst_errlog_ptr; + bool dbg_dest_tlv_init; + const u8 *dbg_dest_ver; + union { + const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1; + }; + const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[32]; + size_t dbg_conf_tlv_len[32]; + const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[17]; + size_t dbg_trigger_tlv_len[17]; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; + size_t n_mem_tlv; +}; + +enum ieee80211_vif_flags { + IEEE80211_VIF_BEACON_FILTER = 1, + IEEE80211_VIF_SUPPORTS_CQM_RSSI = 2, + IEEE80211_VIF_SUPPORTS_UAPSD = 4, + IEEE80211_VIF_GET_NOA_UPDATE = 8, + IEEE80211_VIF_EML_ACTIVE = 16, + IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = 32, +}; + +enum iwl_link_ctx_modify_flags { + LINK_CONTEXT_MODIFY_ACTIVE = 1, + LINK_CONTEXT_MODIFY_RATES_INFO = 2, + LINK_CONTEXT_MODIFY_PROTECT_FLAGS = 4, + LINK_CONTEXT_MODIFY_QOS_PARAMS = 8, + LINK_CONTEXT_MODIFY_BEACON_TIMING = 16, + LINK_CONTEXT_MODIFY_HE_PARAMS = 32, + LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE = 64, + LINK_CONTEXT_MODIFY_EHT_PARAMS = 128, + LINK_CONTEXT_MODIFY_BANDWIDTH = 256, + LINK_CONTEXT_MODIFY_ALL = 255, +}; + +enum iwl_6ghz_ap_type { + IWL_6GHZ_AP_TYPE_LPI = 0, + IWL_6GHZ_AP_TYPE_SP = 1, + IWL_6GHZ_AP_TYPE_VLP = 2, +}; + +struct iwl_txpower_constraints_cmd { + __le16 link_id; + __le16 ap_type; + __s8 eirp_pwr[5]; + __s8 psd_pwr[16]; + u8 reserved[3]; +}; + +struct iwl_mvm_sta_state_ops { + int (*add_sta)(struct iwl_mvm *, struct ieee80211_vif *, struct ieee80211_sta *); + int (*update_sta)(struct iwl_mvm *, struct ieee80211_vif *, struct ieee80211_sta *); + int (*rm_sta)(struct iwl_mvm *, struct ieee80211_vif *, struct ieee80211_sta *); + int (*mac_ctxt_changed)(struct iwl_mvm *, struct ieee80211_vif *, bool); +}; + +struct iwl_mvm_roc_ops { + int (*add_aux_sta_for_hs20)(struct iwl_mvm *, u32); + int (*link)(struct iwl_mvm *, struct ieee80211_vif *); +}; + +struct iwl_mvm_switch_vif_chanctx_ops { + int (*__assign_vif_chanctx)(struct iwl_mvm *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *, bool); + void (*__unassign_vif_chanctx)(struct iwl_mvm *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *, bool); +}; + +enum rtw_tx_queue_type { + RTW_TX_QUEUE_BK = 0, + RTW_TX_QUEUE_BE = 1, + RTW_TX_QUEUE_VI = 2, + RTW_TX_QUEUE_VO = 3, + RTW_TX_QUEUE_BCN = 4, + RTW_TX_QUEUE_MGMT = 5, + RTW_TX_QUEUE_HI0 = 6, + RTW_TX_QUEUE_H2C = 7, + RTK_MAX_TX_QUEUE_NUM = 8, +}; + +enum rtw_rx_queue_type { + RTW_RX_QUEUE_MPDU = 0, + RTW_RX_QUEUE_C2H = 1, + RTK_MAX_RX_QUEUE_NUM = 2, +}; + +enum rtw_pci_flags { + RTW_PCI_FLAG_NAPI_RUNNING = 0, + NUM_OF_RTW_PCI_FLAGS = 1, +}; + +struct xhci_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; +}; + +struct xhci_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dev_notification; + __le64 cmd_ring; + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + __le32 reserved4[241]; + __le32 port_status_base; + __le32 port_power_base; + __le32 port_link_base; + __le32 reserved5; + __le32 reserved6[1016]; +}; + +struct xhci_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +struct xhci_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct xhci_intr_reg ir_set[128]; +}; + +struct xhci_doorbell_array { + __le32 doorbell[256]; +}; + +struct xhci_container_ctx { + unsigned int type; + int size; + u8 *bytes; + dma_addr_t dma; +}; + +struct xhci_slot_ctx { + __le32 dev_info; + __le32 dev_info2; + __le32 tt_info; + __le32 dev_state; + __le32 reserved[4]; +}; + +struct xhci_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 reserved[3]; +}; + +struct xhci_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +union xhci_trb; + +struct xhci_command { + struct xhci_container_ctx *in_ctx; + u32 status; + int slot_id; + struct completion *completion; + union xhci_trb *command_trb; + struct list_head cmd_list; + unsigned int timeout_ms; +}; + +struct xhci_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +struct xhci_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +struct xhci_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +struct xhci_generic_trb { + __le32 field[4]; +}; + +union xhci_trb { + struct xhci_link_trb link; + struct xhci_transfer_event trans_event; + struct xhci_event_cmd event_cmd; + struct xhci_generic_trb generic; +}; + +struct xhci_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +struct xhci_ring; + +struct xhci_stream_info { + struct xhci_ring **stream_rings; + unsigned int num_streams; + struct xhci_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct xarray trb_address_map; + struct xhci_command *free_streams_command; +}; + +enum xhci_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC = 1, + TYPE_BULK = 2, + TYPE_INTR = 3, + TYPE_STREAM = 4, + TYPE_COMMAND = 5, + TYPE_EVENT = 6, +}; + +struct xhci_segment; + +struct xhci_ring { + struct xhci_segment *first_seg; + struct xhci_segment *last_seg; + union xhci_trb *enqueue; + struct xhci_segment *enq_seg; + union xhci_trb *dequeue; + struct xhci_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int stream_id; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + enum xhci_ring_type type; + bool last_td_was_short; + struct xarray *trb_address_map; +}; + +struct xhci_bw_info { + unsigned int ep_interval; + unsigned int mult; + unsigned int num_packets; + unsigned int max_packet_size; + unsigned int max_esit_payload; + unsigned int type; +}; + +struct xhci_virt_device; + +struct xhci_hcd; + +struct xhci_virt_ep { + struct xhci_virt_device *vdev; + unsigned int ep_index; + struct xhci_ring *ring; + struct xhci_stream_info *stream_info; + struct xhci_ring *new_ring; + unsigned int err_count; + unsigned int ep_state; + struct list_head cancelled_td_list; + struct xhci_hcd *xhci; + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; + bool skip; + struct xhci_bw_info bw_info; + struct list_head bw_endpoint_list; + long unsigned int stop_time; + int next_frame_id; + bool use_extended_tbc; +}; + +struct xhci_port; + +struct xhci_interval_bw_table; + +struct xhci_tt_bw_info; + +struct xhci_virt_device { + int slot_id; + struct usb_device *udev; + struct xhci_container_ctx *out_ctx; + struct xhci_container_ctx *in_ctx; + struct xhci_virt_ep eps[31]; + struct xhci_port *rhub_port; + struct xhci_interval_bw_table *bw_table; + struct xhci_tt_bw_info *tt_info; + long unsigned int flags; + u16 current_mel; + void *debugfs_private; +}; + +struct s3_save { + u32 command; + u32 dev_nt; + u64 dcbaa_ptr; + u32 config_reg; + long: 32; +}; + +struct xhci_bus_state { + long unsigned int bus_suspended; + long unsigned int next_statechange; + u32 port_c_suspend; + u32 suspended_ports; + u32 port_remote_wakeup; + long unsigned int resuming_ports; +}; + +struct xhci_hub { + struct xhci_port **ports; + unsigned int num_ports; + struct usb_hcd *hcd; + struct xhci_bus_state bus_state; + u8 maj_rev; + u8 min_rev; +}; + +struct xhci_device_context_array; + +struct xhci_interrupter; + +struct xhci_scratchpad; + +struct xhci_root_port_bw_info; + +struct xhci_port_cap; + +struct xhci_hcd { + struct usb_hcd *main_hcd; + struct usb_hcd *shared_hcd; + struct xhci_cap_regs *cap_regs; + struct xhci_op_regs *op_regs; + struct xhci_run_regs *run_regs; + struct xhci_doorbell_array *dba; + __u32 hcs_params1; + __u32 hcs_params2; + __u32 hcs_params3; + __u32 hcc_params; + __u32 hcc_params2; + spinlock_t lock; + u16 hci_version; + u16 max_interrupters; + u32 imod_interval; + int page_size; + int page_shift; + int nvecs; + struct clk *clk; + struct clk *reg_clk; + struct reset_control *reset; + struct xhci_device_context_array *dcbaa; + struct xhci_interrupter **interrupters; + struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; + struct list_head cmd_list; + unsigned int cmd_ring_reserved_trbs; + struct delayed_work cmd_timer; + struct completion cmd_ring_stop_completion; + struct xhci_command *current_cmd; + struct xhci_scratchpad *scratchpad; + struct mutex mutex; + struct xhci_virt_device *devs[256]; + struct xhci_root_port_bw_info *rh_bw; + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + struct dma_pool *small_streams_pool; + struct dma_pool *medium_streams_pool; + unsigned int xhc_state; + long unsigned int run_graceperiod; + struct s3_save s3; + long long unsigned int quirks; + unsigned int num_active_eps; + unsigned int limit_active_eps; + struct xhci_port *hw_ports; + struct xhci_hub usb2_rhub; + struct xhci_hub usb3_rhub; + unsigned int hw_lpm_support: 1; + unsigned int broken_suspend: 1; + unsigned int allow_single_roothub: 1; + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; + u16 test_mode; + struct dentry *debugfs_root; + struct dentry *debugfs_slots; + struct list_head regset_list; + void *dbc; + long unsigned int priv[0]; +}; + +struct xhci_segment { + union xhci_trb *trbs; + struct xhci_segment *next; + unsigned int num; + dma_addr_t dma; + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +struct xhci_interval_bw { + unsigned int num_packets; + struct list_head endpoints; + unsigned int overhead[3]; +}; + +struct xhci_interval_bw_table { + unsigned int interval0_esit_payload; + struct xhci_interval_bw interval_bw[16]; + unsigned int bw_used; + unsigned int ss_bw_in; + unsigned int ss_bw_out; +}; + +struct xhci_port { + __le32 *addr; + int hw_portnum; + int hcd_portnum; + struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; + unsigned int lpm_incapable: 1; + long unsigned int resume_timestamp; + bool rexit_active; + int slot_id; + struct completion rexit_done; + struct completion u3exit_done; +}; + +struct xhci_tt_bw_info { + struct list_head tt_list; + int slot_id; + int ttport; + struct xhci_interval_bw_table bw_table; + int active_eps; +}; + +struct xhci_root_port_bw_info { + struct list_head tts; + unsigned int num_active_tts; + struct xhci_interval_bw_table bw_table; +}; + +struct xhci_device_context_array { + __le64 dev_context_ptrs[256]; + dma_addr_t dma; + long: 32; +}; + +enum xhci_cancelled_td_status { + TD_DIRTY = 0, + TD_HALTED = 1, + TD_CLEARING_CACHE = 2, + TD_CLEARING_CACHE_DEFERRED = 3, + TD_CLEARED = 4, +}; + +struct xhci_td { + struct list_head td_list; + struct list_head cancelled_td_list; + int status; + enum xhci_cancelled_td_status cancel_status; + struct urb *urb; + struct xhci_segment *start_seg; + union xhci_trb *start_trb; + struct xhci_segment *end_seg; + union xhci_trb *end_trb; + struct xhci_segment *bounce_seg; + bool urb_length_set; + bool error_mid_td; +}; + +struct xhci_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 rsvd; +}; + +struct xhci_erst { + struct xhci_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; +}; + +struct xhci_scratchpad { + u64 *sp_array; + dma_addr_t sp_dma; + void **sp_buffers; +}; + +struct urb_priv { + int num_tds; + int num_tds_done; + struct xhci_td td[0]; +}; + +struct xhci_interrupter { + struct xhci_ring *event_ring; + struct xhci_erst erst; + struct xhci_intr_reg *ir_set; + unsigned int intr_num; + bool ip_autoclear; + u32 isoc_bei_interval; + u32 s3_irq_pending; + u32 s3_irq_control; + u32 s3_erst_size; + long: 32; + u64 s3_erst_base; + u64 s3_erst_dequeue; +}; + +struct xhci_port_cap { + u32 *psi; + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; + u32 protocol_caps; +}; + +struct mdu_array_info_s { + int major_version; + int minor_version; + int patch_version; + unsigned int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + unsigned int utime; + int state; + int active_disks; + int working_disks; + int failed_disks; + int spare_disks; + int layout; + int chunk_size; +}; + +struct mdu_disk_info_s { + int number; + int major; + int minor; + int raid_disk; + int state; +}; + +struct md_setup_args { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +}; + +struct cpuidle_state_kobj { + struct cpuidle_state *state; + struct cpuidle_state_usage *state_usage; + struct completion kobj_unregister; + struct kobject kobj; + struct cpuidle_device *device; +}; + +struct cpuidle_device_kobj { + struct cpuidle_device *dev; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_device *, char *); + ssize_t (*store)(struct cpuidle_device *, const char *, size_t); +}; + +struct cpuidle_state_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *); + ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t); +}; + +enum { + NDA_UNSPEC = 0, + NDA_DST = 1, + NDA_LLADDR = 2, + NDA_CACHEINFO = 3, + NDA_PROBES = 4, + NDA_VLAN = 5, + NDA_PORT = 6, + NDA_VNI = 7, + NDA_IFINDEX = 8, + NDA_MASTER = 9, + NDA_LINK_NETNSID = 10, + NDA_SRC_VNI = 11, + NDA_PROTOCOL = 12, + NDA_NH_ID = 13, + NDA_FDB_EXT_ATTRS = 14, + NDA_FLAGS_EXT = 15, + NDA_NDM_STATE_MASK = 16, + NDA_NDM_FLAGS_MASK = 17, + __NDA_MAX = 18, +}; + +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + __u32 rx_compressed; + __u32 tx_compressed; + __u32 rx_nohandler; +}; + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; + long: 32; +}; + +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC = 0, + IFLA_PROTO_DOWN_REASON_MASK = 1, + IFLA_PROTO_DOWN_REASON_VALUE = 2, + __IFLA_PROTO_DOWN_REASON_CNT = 3, + IFLA_PROTO_DOWN_REASON_MAX = 2, +}; + +enum { + IFLA_BRPORT_UNSPEC = 0, + IFLA_BRPORT_STATE = 1, + IFLA_BRPORT_PRIORITY = 2, + IFLA_BRPORT_COST = 3, + IFLA_BRPORT_MODE = 4, + IFLA_BRPORT_GUARD = 5, + IFLA_BRPORT_PROTECT = 6, + IFLA_BRPORT_FAST_LEAVE = 7, + IFLA_BRPORT_LEARNING = 8, + IFLA_BRPORT_UNICAST_FLOOD = 9, + IFLA_BRPORT_PROXYARP = 10, + IFLA_BRPORT_LEARNING_SYNC = 11, + IFLA_BRPORT_PROXYARP_WIFI = 12, + IFLA_BRPORT_ROOT_ID = 13, + IFLA_BRPORT_BRIDGE_ID = 14, + IFLA_BRPORT_DESIGNATED_PORT = 15, + IFLA_BRPORT_DESIGNATED_COST = 16, + IFLA_BRPORT_ID = 17, + IFLA_BRPORT_NO = 18, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, + IFLA_BRPORT_CONFIG_PENDING = 20, + IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, + IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, + IFLA_BRPORT_HOLD_TIMER = 23, + IFLA_BRPORT_FLUSH = 24, + IFLA_BRPORT_MULTICAST_ROUTER = 25, + IFLA_BRPORT_PAD = 26, + IFLA_BRPORT_MCAST_FLOOD = 27, + IFLA_BRPORT_MCAST_TO_UCAST = 28, + IFLA_BRPORT_VLAN_TUNNEL = 29, + IFLA_BRPORT_BCAST_FLOOD = 30, + IFLA_BRPORT_GROUP_FWD_MASK = 31, + IFLA_BRPORT_NEIGH_SUPPRESS = 32, + IFLA_BRPORT_ISOLATED = 33, + IFLA_BRPORT_BACKUP_PORT = 34, + IFLA_BRPORT_MRP_RING_OPEN = 35, + IFLA_BRPORT_MRP_IN_OPEN = 36, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, + IFLA_BRPORT_LOCKED = 39, + IFLA_BRPORT_MAB = 40, + IFLA_BRPORT_MCAST_N_GROUPS = 41, + IFLA_BRPORT_MCAST_MAX_GROUPS = 42, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 43, + IFLA_BRPORT_BACKUP_NHID = 44, + __IFLA_BRPORT_MAX = 45, +}; + +enum { + IFLA_INFO_UNSPEC = 0, + IFLA_INFO_KIND = 1, + IFLA_INFO_DATA = 2, + IFLA_INFO_XSTATS = 3, + IFLA_INFO_SLAVE_KIND = 4, + IFLA_INFO_SLAVE_DATA = 5, + __IFLA_INFO_MAX = 6, +}; + +enum { + IFLA_VF_INFO_UNSPEC = 0, + IFLA_VF_INFO = 1, + __IFLA_VF_INFO_MAX = 2, +}; + +enum { + IFLA_VF_UNSPEC = 0, + IFLA_VF_MAC = 1, + IFLA_VF_VLAN = 2, + IFLA_VF_TX_RATE = 3, + IFLA_VF_SPOOFCHK = 4, + IFLA_VF_LINK_STATE = 5, + IFLA_VF_RATE = 6, + IFLA_VF_RSS_QUERY_EN = 7, + IFLA_VF_STATS = 8, + IFLA_VF_TRUST = 9, + IFLA_VF_IB_NODE_GUID = 10, + IFLA_VF_IB_PORT_GUID = 11, + IFLA_VF_VLAN_LIST = 12, + IFLA_VF_BROADCAST = 13, + __IFLA_VF_MAX = 14, +}; + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC = 0, + IFLA_VF_VLAN_INFO = 1, + __IFLA_VF_VLAN_INFO_MAX = 2, +}; + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_STATS_RX_PACKETS = 0, + IFLA_VF_STATS_TX_PACKETS = 1, + IFLA_VF_STATS_RX_BYTES = 2, + IFLA_VF_STATS_TX_BYTES = 3, + IFLA_VF_STATS_BROADCAST = 4, + IFLA_VF_STATS_MULTICAST = 5, + IFLA_VF_STATS_PAD = 6, + IFLA_VF_STATS_RX_DROPPED = 7, + IFLA_VF_STATS_TX_DROPPED = 8, + __IFLA_VF_STATS_MAX = 9, +}; + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_PORT_UNSPEC = 0, + IFLA_VF_PORT = 1, + __IFLA_VF_PORT_MAX = 2, +}; + +enum { + IFLA_PORT_UNSPEC = 0, + IFLA_PORT_VF = 1, + IFLA_PORT_PROFILE = 2, + IFLA_PORT_VSI_TYPE = 3, + IFLA_PORT_INSTANCE_UUID = 4, + IFLA_PORT_HOST_UUID = 5, + IFLA_PORT_REQUEST = 6, + IFLA_PORT_RESPONSE = 7, + __IFLA_PORT_MAX = 8, +}; + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +enum { + IFLA_STATS_UNSPEC = 0, + IFLA_STATS_LINK_64 = 1, + IFLA_STATS_LINK_XSTATS = 2, + IFLA_STATS_LINK_XSTATS_SLAVE = 3, + IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, + IFLA_STATS_AF_SPEC = 5, + __IFLA_STATS_MAX = 6, +}; + +enum { + IFLA_STATS_GETSET_UNSPEC = 0, + IFLA_STATS_GET_FILTERS = 1, + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 2, + __IFLA_STATS_GETSET_MAX = 3, +}; + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 2, + IFLA_OFFLOAD_XSTATS_L3_STATS = 3, + __IFLA_OFFLOAD_XSTATS_MAX = 4, +}; + +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 2, + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX = 3, +}; + +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV = 1, + XDP_ATTACHED_SKB = 2, + XDP_ATTACHED_HW = 3, + XDP_ATTACHED_MULTI = 4, +}; + +enum { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +}; + +enum { + IFLA_EVENT_NONE = 0, + IFLA_EVENT_REBOOT = 1, + IFLA_EVENT_FEATURES = 2, + IFLA_EVENT_BONDING_FAILOVER = 3, + IFLA_EVENT_NOTIFY_PEERS = 4, + IFLA_EVENT_IGMP_RESEND = 5, + IFLA_EVENT_BONDING_OPTIONS = 6, +}; + +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +enum { + IFLA_BRIDGE_FLAGS = 0, + IFLA_BRIDGE_MODE = 1, + IFLA_BRIDGE_VLAN_INFO = 2, + IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, + IFLA_BRIDGE_MRP = 4, + IFLA_BRIDGE_CFM = 5, + IFLA_BRIDGE_MST = 6, + __IFLA_BRIDGE_MAX = 7, +}; + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +enum rtnl_kinds { + RTNL_KIND_NEW = 0, + RTNL_KIND_DEL = 1, + RTNL_KIND_GET = 2, + RTNL_KIND_SET = 3, +}; + +struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + struct module *owner; + unsigned int flags; + struct callback_head rcu; +}; + +struct rtnl_nets { + struct net *net[3]; + unsigned char len; +}; + +struct rtnl_newlink_tbs { + struct nlattr *tb[67]; + struct nlattr *linkinfo[6]; + struct nlattr *attr[51]; + struct nlattr *slave_attr[45]; +}; + +struct rtnl_offload_xstats_request_used { + bool request; + bool used; +}; + +struct rtnl_stats_dump_filters { + u32 mask[6]; +}; + +struct rtnl_mdb_dump_ctx { + long int idx; +}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; + int retval; +}; + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + bool is_uprobe; + long: 32; +}; + +struct page_pool_params { + union { + struct { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; + }; + struct page_pool_params_fast fast; + }; + union { + struct { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; + }; + struct page_pool_params_slow slow; + }; +}; + +struct trace_event_raw_bpf_trigger_tp { + struct trace_entry ent; + int nonce; + char __data[0]; +}; + +struct trace_event_raw_bpf_test_finish { + struct trace_entry ent; + int err; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trigger_tp {}; + +struct trace_event_data_offsets_bpf_test_finish {}; + +typedef void (*btf_trace_bpf_trigger_tp)(void *, int); + +typedef void (*btf_trace_bpf_test_finish)(void *, int *); + +struct bpf_test_timer { + enum { + NO_PREEMPT = 0, + NO_MIGRATE = 1, + } mode; + u32 i; + u64 time_start; + u64 time_spent; +}; + +struct xdp_page_head { + struct xdp_buff orig_ctx; + struct xdp_buff ctx; + union { + struct { + struct {} __empty_frame; + struct xdp_frame frame[0]; + }; + struct { + struct {} __empty_data; + u8 data[0]; + }; + }; +}; + +struct xdp_test_data { + struct xdp_buff *orig_ctx; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info rxq; + struct net_device *dev; + struct page_pool *pp; + struct xdp_frame **frames; + struct sk_buff **skbs; + struct xdp_mem_info mem; + u32 batch_size; + u32 frame_cnt; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; + +struct bpf_raw_tp_test_run_info { + struct bpf_prog *prog; + void *ctx; + u32 retval; +}; + +struct nf_sockopt_ops { + struct list_head list; + u_int8_t pf; + int set_optmin; + int set_optmax; + int (*set)(struct sock *, int, sockptr_t, unsigned int); + int get_optmin; + int get_optmax; + int (*get)(struct sock *, int, void *, int *); + struct module *owner; +}; + +enum nf_tables_msg_types { + NFT_MSG_NEWTABLE = 0, + NFT_MSG_GETTABLE = 1, + NFT_MSG_DELTABLE = 2, + NFT_MSG_NEWCHAIN = 3, + NFT_MSG_GETCHAIN = 4, + NFT_MSG_DELCHAIN = 5, + NFT_MSG_NEWRULE = 6, + NFT_MSG_GETRULE = 7, + NFT_MSG_DELRULE = 8, + NFT_MSG_NEWSET = 9, + NFT_MSG_GETSET = 10, + NFT_MSG_DELSET = 11, + NFT_MSG_NEWSETELEM = 12, + NFT_MSG_GETSETELEM = 13, + NFT_MSG_DELSETELEM = 14, + NFT_MSG_NEWGEN = 15, + NFT_MSG_GETGEN = 16, + NFT_MSG_TRACE = 17, + NFT_MSG_NEWOBJ = 18, + NFT_MSG_GETOBJ = 19, + NFT_MSG_DELOBJ = 20, + NFT_MSG_GETOBJ_RESET = 21, + NFT_MSG_NEWFLOWTABLE = 22, + NFT_MSG_GETFLOWTABLE = 23, + NFT_MSG_DELFLOWTABLE = 24, + NFT_MSG_GETRULE_RESET = 25, + NFT_MSG_DESTROYTABLE = 26, + NFT_MSG_DESTROYCHAIN = 27, + NFT_MSG_DESTROYRULE = 28, + NFT_MSG_DESTROYSET = 29, + NFT_MSG_DESTROYSETELEM = 30, + NFT_MSG_DESTROYOBJ = 31, + NFT_MSG_DESTROYFLOWTABLE = 32, + NFT_MSG_GETSETELEM_RESET = 33, + NFT_MSG_MAX = 34, +}; + +enum nft_list_attributes { + NFTA_LIST_UNSPEC = 0, + NFTA_LIST_ELEM = 1, + __NFTA_LIST_MAX = 2, +}; + +enum nft_hook_attributes { + NFTA_HOOK_UNSPEC = 0, + NFTA_HOOK_HOOKNUM = 1, + NFTA_HOOK_PRIORITY = 2, + NFTA_HOOK_DEV = 3, + NFTA_HOOK_DEVS = 4, + __NFTA_HOOK_MAX = 5, +}; + +enum nft_table_flags { + NFT_TABLE_F_DORMANT = 1, + NFT_TABLE_F_OWNER = 2, + NFT_TABLE_F_PERSIST = 4, +}; + +enum nft_table_attributes { + NFTA_TABLE_UNSPEC = 0, + NFTA_TABLE_NAME = 1, + NFTA_TABLE_FLAGS = 2, + NFTA_TABLE_USE = 3, + NFTA_TABLE_HANDLE = 4, + NFTA_TABLE_PAD = 5, + NFTA_TABLE_USERDATA = 6, + NFTA_TABLE_OWNER = 7, + __NFTA_TABLE_MAX = 8, +}; + +enum nft_chain_flags { + NFT_CHAIN_BASE = 1, + NFT_CHAIN_HW_OFFLOAD = 2, + NFT_CHAIN_BINDING = 4, +}; + +enum nft_chain_attributes { + NFTA_CHAIN_UNSPEC = 0, + NFTA_CHAIN_TABLE = 1, + NFTA_CHAIN_HANDLE = 2, + NFTA_CHAIN_NAME = 3, + NFTA_CHAIN_HOOK = 4, + NFTA_CHAIN_POLICY = 5, + NFTA_CHAIN_USE = 6, + NFTA_CHAIN_TYPE = 7, + NFTA_CHAIN_COUNTERS = 8, + NFTA_CHAIN_PAD = 9, + NFTA_CHAIN_FLAGS = 10, + NFTA_CHAIN_ID = 11, + NFTA_CHAIN_USERDATA = 12, + __NFTA_CHAIN_MAX = 13, +}; + +enum nft_rule_attributes { + NFTA_RULE_UNSPEC = 0, + NFTA_RULE_TABLE = 1, + NFTA_RULE_CHAIN = 2, + NFTA_RULE_HANDLE = 3, + NFTA_RULE_EXPRESSIONS = 4, + NFTA_RULE_COMPAT = 5, + NFTA_RULE_POSITION = 6, + NFTA_RULE_USERDATA = 7, + NFTA_RULE_PAD = 8, + NFTA_RULE_ID = 9, + NFTA_RULE_POSITION_ID = 10, + NFTA_RULE_CHAIN_ID = 11, + __NFTA_RULE_MAX = 12, +}; + +enum nft_set_policies { + NFT_SET_POL_PERFORMANCE = 0, + NFT_SET_POL_MEMORY = 1, +}; + +enum nft_set_desc_attributes { + NFTA_SET_DESC_UNSPEC = 0, + NFTA_SET_DESC_SIZE = 1, + NFTA_SET_DESC_CONCAT = 2, + __NFTA_SET_DESC_MAX = 3, +}; + +enum nft_set_field_attributes { + NFTA_SET_FIELD_UNSPEC = 0, + NFTA_SET_FIELD_LEN = 1, + __NFTA_SET_FIELD_MAX = 2, +}; + +enum nft_set_elem_flags { + NFT_SET_ELEM_INTERVAL_END = 1, + NFT_SET_ELEM_CATCHALL = 2, +}; + +enum nft_set_elem_attributes { + NFTA_SET_ELEM_UNSPEC = 0, + NFTA_SET_ELEM_KEY = 1, + NFTA_SET_ELEM_DATA = 2, + NFTA_SET_ELEM_FLAGS = 3, + NFTA_SET_ELEM_TIMEOUT = 4, + NFTA_SET_ELEM_EXPIRATION = 5, + NFTA_SET_ELEM_USERDATA = 6, + NFTA_SET_ELEM_EXPR = 7, + NFTA_SET_ELEM_PAD = 8, + NFTA_SET_ELEM_OBJREF = 9, + NFTA_SET_ELEM_KEY_END = 10, + NFTA_SET_ELEM_EXPRESSIONS = 11, + __NFTA_SET_ELEM_MAX = 12, +}; + +enum nft_set_elem_list_attributes { + NFTA_SET_ELEM_LIST_UNSPEC = 0, + NFTA_SET_ELEM_LIST_TABLE = 1, + NFTA_SET_ELEM_LIST_SET = 2, + NFTA_SET_ELEM_LIST_ELEMENTS = 3, + NFTA_SET_ELEM_LIST_SET_ID = 4, + __NFTA_SET_ELEM_LIST_MAX = 5, +}; + +enum nft_data_attributes { + NFTA_DATA_UNSPEC = 0, + NFTA_DATA_VALUE = 1, + NFTA_DATA_VERDICT = 2, + __NFTA_DATA_MAX = 3, +}; + +enum nft_verdict_attributes { + NFTA_VERDICT_UNSPEC = 0, + NFTA_VERDICT_CODE = 1, + NFTA_VERDICT_CHAIN = 2, + NFTA_VERDICT_CHAIN_ID = 3, + __NFTA_VERDICT_MAX = 4, +}; + +enum nft_expr_attributes { + NFTA_EXPR_UNSPEC = 0, + NFTA_EXPR_NAME = 1, + NFTA_EXPR_DATA = 2, + __NFTA_EXPR_MAX = 3, +}; + +enum nft_counter_attributes { + NFTA_COUNTER_UNSPEC = 0, + NFTA_COUNTER_BYTES = 1, + NFTA_COUNTER_PACKETS = 2, + NFTA_COUNTER_PAD = 3, + __NFTA_COUNTER_MAX = 4, +}; + +enum nft_gen_attributes { + NFTA_GEN_UNSPEC = 0, + NFTA_GEN_ID = 1, + NFTA_GEN_PROC_PID = 2, + NFTA_GEN_PROC_NAME = 3, + __NFTA_GEN_MAX = 4, +}; + +enum nft_object_attributes { + NFTA_OBJ_UNSPEC = 0, + NFTA_OBJ_TABLE = 1, + NFTA_OBJ_NAME = 2, + NFTA_OBJ_TYPE = 3, + NFTA_OBJ_DATA = 4, + NFTA_OBJ_USE = 5, + NFTA_OBJ_HANDLE = 6, + NFTA_OBJ_PAD = 7, + NFTA_OBJ_USERDATA = 8, + __NFTA_OBJ_MAX = 9, +}; + +enum nft_flowtable_flags { + NFT_FLOWTABLE_HW_OFFLOAD = 1, + NFT_FLOWTABLE_COUNTER = 2, + NFT_FLOWTABLE_MASK = 3, +}; + +enum nft_flowtable_attributes { + NFTA_FLOWTABLE_UNSPEC = 0, + NFTA_FLOWTABLE_TABLE = 1, + NFTA_FLOWTABLE_NAME = 2, + NFTA_FLOWTABLE_HOOK = 3, + NFTA_FLOWTABLE_USE = 4, + NFTA_FLOWTABLE_HANDLE = 5, + NFTA_FLOWTABLE_PAD = 6, + NFTA_FLOWTABLE_FLAGS = 7, + __NFTA_FLOWTABLE_MAX = 8, +}; + +enum nft_flowtable_hook_attributes { + NFTA_FLOWTABLE_HOOK_UNSPEC = 0, + NFTA_FLOWTABLE_HOOK_NUM = 1, + NFTA_FLOWTABLE_HOOK_PRIORITY = 2, + NFTA_FLOWTABLE_HOOK_DEVS = 3, + __NFTA_FLOWTABLE_HOOK_MAX = 4, +}; + +enum nft_devices_attributes { + NFTA_DEVICE_UNSPEC = 0, + NFTA_DEVICE_NAME = 1, + __NFTA_DEVICE_MAX = 2, +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS = 0, + NF_NETDEV_EGRESS = 1, + NF_NETDEV_NUMHOOKS = 2, +}; + +enum nft_data_desc_flags { + NFT_DATA_DESC_SETELEM = 1, +}; + +struct nft_userdata { + u8 len; + unsigned char data[0]; +}; + +struct nft_set_elem_expr { + u8 size; + long: 32; + unsigned char data[0]; +}; + +struct nft_set_binding { + struct list_head list; + const struct nft_chain *chain; + u32 flags; +}; + +struct nft_set_ext_type { + u8 len; + u8 align; +}; + +struct nft_set_ext_tmpl { + u16 len; + u8 offset[8]; + u8 ext_len[8]; +}; + +struct nft_rule { + struct list_head list; + u64 handle: 42; + u64 genmask: 2; + u64 dlen: 12; + u64 udata: 1; + unsigned char data[0]; +}; + +struct nft_rule_dp { + u64 is_last: 1; + u64 dlen: 12; + u64 handle: 42; + long: 0; + unsigned char data[0]; +}; + +struct nft_rule_dp_last { + struct nft_rule_dp end; + struct callback_head h; + struct nft_rule_blob *blob; + const struct nft_chain *chain; +}; + +struct nft_stats { + u64 bytes; + u64 pkts; + struct u64_stats_sync syncp; + long: 32; +}; + +struct nft_hook { + struct list_head list; + struct nf_hook_ops ops; + struct callback_head rcu; +}; + +struct nft_base_chain { + struct nf_hook_ops ops; + struct list_head hook_list; + const struct nft_chain_type *type; + u8 policy; + u8 flags; + struct nft_stats *stats; + long: 32; + struct nft_chain chain; + struct flow_block flow_block; +}; + +struct nft_object_hash_key { + const char *name; + const struct nft_table *table; +}; + +struct nft_object_ops; + +struct nft_object { + struct list_head list; + struct rhlist_head rhlhead; + struct nft_object_hash_key key; + u32 genmask: 2; + u32 use; + u64 handle; + u16 udlen; + u8 *udata; + long: 32; + long: 32; + long: 32; + long: 32; + const struct nft_object_ops *ops; + long: 32; + unsigned char data[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_object_type; + +struct nft_object_ops { + void (*eval)(struct nft_object *, struct nft_regs *, const struct nft_pktinfo *); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nlattr * const *, struct nft_object *); + void (*destroy)(const struct nft_ctx *, struct nft_object *); + int (*dump)(struct sk_buff *, struct nft_object *, bool); + void (*update)(struct nft_object *, struct nft_object *); + const struct nft_object_type *type; +}; + +struct nft_object_type { + const struct nft_object_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + const struct nft_object_ops *ops; + struct list_head list; + u32 type; + unsigned int maxattr; + u8 family; + struct module *owner; + const struct nla_policy *policy; +}; + +struct nft_flowtable { + struct list_head list; + struct nft_table *table; + char *name; + int hooknum; + int ops_len; + u32 genmask: 2; + u32 use; + u64 handle; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct list_head hook_list; + struct nf_flowtable data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_trans { + struct list_head list; + struct net *net; + struct nft_table *table; + int msg_type; + u32 seq; + u16 flags; + u8 report: 1; + u8 put_net: 1; +}; + +struct nft_trans_binding { + struct nft_trans nft_trans; + struct list_head binding_list; +}; + +struct nft_trans_rule { + struct nft_trans nft_trans; + struct nft_rule *rule; + struct nft_chain *chain; + struct nft_flow_rule *flow; + u32 rule_id; + bool bound; +}; + +struct nft_trans_set { + struct nft_trans_binding nft_trans_binding; + struct list_head list_trans_newset; + struct nft_set *set; + u32 set_id; + u32 gc_int; + u64 timeout; + bool update; + bool bound; + u32 size; +}; + +struct nft_trans_chain { + struct nft_trans_binding nft_trans_binding; + struct nft_chain *chain; + char *name; + struct nft_stats *stats; + u8 policy; + bool update; + bool bound; + u32 chain_id; + struct nft_base_chain *basechain; + struct list_head hook_list; +}; + +struct nft_trans_table { + struct nft_trans nft_trans; + bool update; +}; + +enum nft_trans_elem_flags { + NFT_TRANS_UPD_TIMEOUT = 1, + NFT_TRANS_UPD_EXPIRATION = 2, +}; + +struct nft_elem_update { + u64 timeout; + u64 expiration; + u8 flags; + long: 32; +}; + +struct nft_trans_one_elem { + struct nft_elem_priv *priv; + struct nft_elem_update *update; +}; + +struct nft_trans_elem { + struct nft_trans nft_trans; + struct nft_set *set; + bool bound; + unsigned int nelems; + struct nft_trans_one_elem elems[0]; +}; + +struct nft_trans_obj { + struct nft_trans nft_trans; + struct nft_object *obj; + struct nft_object *newobj; + bool update; +}; + +struct nft_trans_flowtable { + struct nft_trans nft_trans; + struct nft_flowtable *flowtable; + struct list_head hook_list; + u32 flags; + bool update; +}; + +enum { + NFT_VALIDATE_SKIP = 0, + NFT_VALIDATE_NEED = 1, + NFT_VALIDATE_DO = 2, +}; + +struct nft_audit_data { + struct nft_table *table; + int entries; + int op; + struct list_head list; +}; + +struct nft_set_elem_catchall { + struct list_head list; + struct callback_head rcu; + struct nft_elem_priv *elem; +}; + +struct nft_module_request { + struct list_head list; + char module[60]; + bool done; +}; + +struct nftnl_skb_parms { + bool report; +}; + +struct nft_chain_hook { + u32 num; + s32 priority; + const struct nft_chain_type *type; + struct list_head list; +}; + +struct nft_expr_info { + const struct nft_expr_ops *ops; + const struct nlattr *attr; + struct nlattr *tb[17]; +}; + +struct nft_rule_dump_ctx { + unsigned int s_idx; + char *table; + char *chain; + bool reset; +}; + +struct nft_set_dump_args { + const struct netlink_callback *cb; + struct nft_set_iter iter; + struct sk_buff *skb; + bool reset; +}; + +struct nft_set_dump_ctx { + const struct nft_set *set; + struct nft_ctx ctx; + bool reset; +}; + +struct nft_obj_dump_ctx { + unsigned int s_idx; + char *table; + u32 type; + bool reset; +}; + +struct nft_flowtable_hook { + u32 num; + int priority; + struct list_head list; +}; + +struct nft_flowtable_filter { + char *table; +}; + +struct net_proto_family { + int family; + int (*create)(struct net *, struct socket *, int, int); + struct module *owner; +}; + +struct rtentry { + long unsigned int rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + long unsigned int rt_pad3; + void *rt_pad4; + short int rt_metric; + char *rt_dev; + long unsigned int rt_mtu; + long unsigned int rt_window; + short unsigned int rt_irtt; +}; + +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); + void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + int (*icmpv6_err_convert)(u8, u8, int *); + void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); +}; + +struct bictcp { + u32 cnt; + u32 last_max_cwnd; + u32 last_cwnd; + u32 last_time; + u32 bic_origin_point; + u32 bic_K; + u32 delay_min; + u32 epoch_start; + u32 ack_cnt; + u32 tcp_cwnd; + u16 unused; + u8 sample_cnt; + u8 found; + u32 round_start; + u32 end_seq; + u32 last_ack; + u32 curr_rtt; +}; + +enum ieee80211_twt_setup_cmd { + TWT_SETUP_CMD_REQUEST = 0, + TWT_SETUP_CMD_SUGGEST = 1, + TWT_SETUP_CMD_DEMAND = 2, + TWT_SETUP_CMD_GROUPING = 3, + TWT_SETUP_CMD_ACCEPT = 4, + TWT_SETUP_CMD_ALTERNATE = 5, + TWT_SETUP_CMD_DICTATE = 6, + TWT_SETUP_CMD_REJECT = 7, +}; + +struct ieee80211_twt_params { + __le16 req_type; + __le64 twt; + u8 min_twt_dur; + __le16 mantissa; + u8 channel; +} __attribute__((packed)); + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +enum cpuhp_smt_control { + CPU_SMT_ENABLED = 0, + CPU_SMT_DISABLED = 1, + CPU_SMT_FORCE_DISABLED = 2, + CPU_SMT_NOT_SUPPORTED = 3, + CPU_SMT_NOT_IMPLEMENTED = 4, +}; + +typedef int (*cpu_stop_fn_t)(void *); + +struct trace_event_raw_cpuhp_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_multi_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_exit { + struct trace_entry ent; + unsigned int cpu; + int state; + int idx; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_cpuhp_enter {}; + +struct trace_event_data_offsets_cpuhp_multi_enter {}; + +struct trace_event_data_offsets_cpuhp_exit {}; + +typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); + +typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); + +typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); + +struct cpuhp_cpu_state { + enum cpuhp_state state; + enum cpuhp_state target; + enum cpuhp_state fail; + struct task_struct *thread; + bool should_run; + bool rollback; + bool single; + bool bringup; + struct hlist_node *node; + struct hlist_node *last; + enum cpuhp_state cb_state; + int result; + atomic_t ap_sync_state; + struct completion done_up; + struct completion done_down; +}; + +struct cpuhp_step { + const char *name; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } startup; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } teardown; + struct hlist_head list; + bool cant_stop; + bool multi_instance; +}; + +enum cpuhp_sync_state { + SYNC_STATE_DEAD = 0, + SYNC_STATE_KICKED = 1, + SYNC_STATE_SHOULD_DIE = 2, + SYNC_STATE_ALIVE = 3, + SYNC_STATE_SHOULD_ONLINE = 4, + SYNC_STATE_ONLINE = 5, +}; + +struct cpu_down_work { + unsigned int cpu; + enum cpuhp_state target; +}; + +enum cpu_mitigations { + CPU_MITIGATIONS_OFF = 0, + CPU_MITIGATIONS_AUTO = 1, + CPU_MITIGATIONS_AUTO_NOSMT = 2, +}; + +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER = 1, + CSID_S390_TOD = 2, + CSID_X86_TSC_EARLY = 3, + CSID_X86_TSC = 4, + CSID_X86_KVM_CLK = 5, + CSID_X86_ART = 6, + CSID_MAX = 7, +}; + +struct ktime_timestamps { + u64 mono; + u64 boot; + u64 real; +}; + +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t boot; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + long: 32; +}; + +struct system_counterval_t { + u64 cycles; + enum clocksource_ids cs_id; + bool use_nsecs; +}; + +typedef struct { + raw_spinlock_t *lock; + long unsigned int flags; +} class_raw_spinlock_irqsave_t; + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE = 0, + VDSO_CLOCKMODE_MAX = 1, + VDSO_CLOCKMODE_TIMENS = 2147483647, +}; + +struct clocksource_base; + +struct clocksource { + u64 (*read)(struct clocksource *); + long: 32; + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; + u32 uncertainty_margin; + u64 max_cycles; + const char *name; + struct list_head list; + u32 freq_khz; + int rating; + enum clocksource_ids id; + enum vdso_clock_mode vdso_clock_mode; + long unsigned int flags; + struct clocksource_base *base; + int (*enable)(struct clocksource *); + void (*disable)(struct clocksource *); + void (*suspend)(struct clocksource *); + void (*resume)(struct clocksource *); + void (*mark_unstable)(struct clocksource *); + void (*tick_stable)(struct clocksource *); + struct module *owner; +}; + +struct clocksource_base { + enum clocksource_ids id; + u32 freq_khz; + u64 offset; + u32 numerator; + u32 denominator; +}; + +struct tk_read_base { + struct clocksource *clock; + long: 32; + u64 mask; + u64 cycle_last; + u32 mult; + u32 shift; + u64 xtime_nsec; + ktime_t base; + u64 base_real; +}; + +struct timekeeper { + struct tk_read_base tkr_mono; + u64 xtime_sec; + long unsigned int ktime_sec; + long: 32; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + long: 32; + struct tk_read_base tkr_raw; + u64 raw_sec; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + struct timespec64 monotonic_to_boot; + u64 cycle_interval; + u64 xtime_interval; + s64 xtime_remainder; + u64 raw_interval; + ktime_t next_leap_ktime; + u64 ntp_tick; + s64 ntp_error; + u32 ntp_error_shift; + u32 ntp_err_mult; + u32 skip_second_overflow; + long: 32; +}; + +struct audit_ntp_data {}; + +enum timekeeping_adv_mode { + TK_ADV_TICK = 0, + TK_ADV_FREQ = 1, +}; + +struct tk_data { + seqcount_raw_spinlock_t seq; + long: 32; + struct timekeeper timekeeper; + struct timekeeper shadow_timekeeper; + raw_spinlock_t lock; + long: 32; +}; + +struct tk_fast { + seqcount_latch_t seq; + long: 32; + struct tk_read_base base[2]; +}; + +struct kimage_arch { + u32 kernel_r2; +}; + +typedef long unsigned int kimage_entry_t; + +struct kexec_segment { + union { + void *buf; + void *kbuf; + }; + size_t bufsz; + long unsigned int mem; + size_t memsz; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + long unsigned int start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + long unsigned int nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + long unsigned int control_page; + unsigned int type: 1; + unsigned int preserve_context: 1; + unsigned int file_mode: 1; + struct kimage_arch arch; + void *elf_headers; + long unsigned int elf_headers_sz; + long unsigned int elf_load_addr; +}; + +struct kexec_load_limit { + struct mutex mutex; + int limit; +}; + +enum { + TRACE_FTRACE_BIT = 0, + TRACE_FTRACE_NMI_BIT = 1, + TRACE_FTRACE_IRQ_BIT = 2, + TRACE_FTRACE_SIRQ_BIT = 3, + TRACE_FTRACE_TRANSITION_BIT = 4, + TRACE_INTERNAL_BIT = 5, + TRACE_INTERNAL_NMI_BIT = 6, + TRACE_INTERNAL_IRQ_BIT = 7, + TRACE_INTERNAL_SIRQ_BIT = 8, + TRACE_INTERNAL_TRANSITION_BIT = 9, + TRACE_BRANCH_BIT = 10, + TRACE_IRQ_BIT = 11, + TRACE_RECORD_RECURSION_BIT = 12, +}; + +struct ftrace_func_entry { + struct hlist_node hlist; + long unsigned int ip; + long unsigned int direct; +}; + +struct ftrace_graph_ent_entry { + struct trace_entry ent; + struct ftrace_graph_ent graph_ent; +}; + +struct ftrace_graph_ret_entry { + struct trace_entry ent; + struct ftrace_graph_ret ret; +}; + +enum { + FTRACE_HASH_FL_MOD = 1, +}; + +enum { + TRACE_GRAPH_FL = 1, + TRACE_GRAPH_DEPTH_START_BIT = 2, + TRACE_GRAPH_DEPTH_END_BIT = 3, + TRACE_GRAPH_NOTRACE_BIT = 4, +}; + +struct fgraph_cpu_data { + pid_t last_pid; + int depth; + int depth_irq; + int ignore; + long unsigned int enter_funcs[50]; +}; + +struct fgraph_data { + struct fgraph_cpu_data *cpu_data; + union { + struct ftrace_graph_ent_entry ent; + struct ftrace_graph_ent_entry rent; + } ent; + struct ftrace_graph_ret_entry ret; + int failed; + int cpu; +}; + +enum { + FLAGS_FILL_FULL = 268435456, + FLAGS_FILL_START = 536870912, + FLAGS_FILL_END = 805306368, +}; + +struct fgraph_times { + long long unsigned int calltime; + long long unsigned int sleeptime; +}; + +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); + +enum fetch_op { + FETCH_OP_NOP = 0, + FETCH_OP_REG = 1, + FETCH_OP_STACK = 2, + FETCH_OP_STACKP = 3, + FETCH_OP_RETVAL = 4, + FETCH_OP_IMM = 5, + FETCH_OP_COMM = 6, + FETCH_OP_ARG = 7, + FETCH_OP_FOFFS = 8, + FETCH_OP_DATA = 9, + FETCH_OP_EDATA = 10, + FETCH_OP_DEREF = 11, + FETCH_OP_UDEREF = 12, + FETCH_OP_ST_RAW = 13, + FETCH_OP_ST_MEM = 14, + FETCH_OP_ST_UMEM = 15, + FETCH_OP_ST_STRING = 16, + FETCH_OP_ST_USTRING = 17, + FETCH_OP_ST_SYMSTR = 18, + FETCH_OP_ST_EDATA = 19, + FETCH_OP_MOD_BF = 20, + FETCH_OP_LP_ARRAY = 21, + FETCH_OP_TP_ARG = 22, + FETCH_OP_END = 23, + FETCH_NOP_SYMBOL = 24, +}; + +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + long unsigned int immediate; + void *data; + }; +}; + +struct fetch_type { + const char *name; + size_t size; + bool is_signed; + bool is_string; + print_type_func_t print; + const char *fmt; + const char *fmttype; +}; + +struct probe_arg { + struct fetch_insn *code; + bool dynamic; + unsigned int offset; + unsigned int count; + const char *name; + const char *comm; + char *fmt; + const struct fetch_type *type; +}; + +struct probe_entry_arg { + struct fetch_insn *code; + unsigned int size; +}; + +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + +struct trace_probe_event { + unsigned int flags; + struct trace_event_class class; + struct trace_event_call call; + struct list_head files; + struct list_head probes; + struct trace_uprobe_filter filter[0]; +}; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; + ssize_t size; + unsigned int nr_args; + struct probe_entry_arg *entry_arg; + struct probe_arg args[0]; +}; + +struct event_file_link { + struct trace_event_file *file; + struct list_head list; +}; + +struct traceprobe_parse_context { + struct trace_event_call *event; + const char *funcname; + const struct btf_type *proto; + const struct btf_param *params; + s32 nr_params; + struct btf *btf; + const struct btf_type *last_type; + u32 last_bitoffs; + u32 last_bitsize; + struct trace_probe *tp; + unsigned int flags; + int offset; +}; + +enum probe_print_type { + PROBE_PRINT_NORMAL = 0, + PROBE_PRINT_RETURN = 1, + PROBE_PRINT_EVENT = 2, +}; + +enum { + TP_ERR_FILE_NOT_FOUND = 0, + TP_ERR_NO_REGULAR_FILE = 1, + TP_ERR_BAD_REFCNT = 2, + TP_ERR_REFCNT_OPEN_BRACE = 3, + TP_ERR_BAD_REFCNT_SUFFIX = 4, + TP_ERR_BAD_UPROBE_OFFS = 5, + TP_ERR_BAD_MAXACT_TYPE = 6, + TP_ERR_BAD_MAXACT = 7, + TP_ERR_MAXACT_TOO_BIG = 8, + TP_ERR_BAD_PROBE_ADDR = 9, + TP_ERR_NON_UNIQ_SYMBOL = 10, + TP_ERR_BAD_RETPROBE = 11, + TP_ERR_NO_TRACEPOINT = 12, + TP_ERR_BAD_ADDR_SUFFIX = 13, + TP_ERR_NO_GROUP_NAME = 14, + TP_ERR_GROUP_TOO_LONG = 15, + TP_ERR_BAD_GROUP_NAME = 16, + TP_ERR_NO_EVENT_NAME = 17, + TP_ERR_EVENT_TOO_LONG = 18, + TP_ERR_BAD_EVENT_NAME = 19, + TP_ERR_EVENT_EXIST = 20, + TP_ERR_RETVAL_ON_PROBE = 21, + TP_ERR_NO_RETVAL = 22, + TP_ERR_BAD_STACK_NUM = 23, + TP_ERR_BAD_ARG_NUM = 24, + TP_ERR_BAD_VAR = 25, + TP_ERR_BAD_REG_NAME = 26, + TP_ERR_BAD_MEM_ADDR = 27, + TP_ERR_BAD_IMM = 28, + TP_ERR_IMMSTR_NO_CLOSE = 29, + TP_ERR_FILE_ON_KPROBE = 30, + TP_ERR_BAD_FILE_OFFS = 31, + TP_ERR_SYM_ON_UPROBE = 32, + TP_ERR_TOO_MANY_OPS = 33, + TP_ERR_DEREF_NEED_BRACE = 34, + TP_ERR_BAD_DEREF_OFFS = 35, + TP_ERR_DEREF_OPEN_BRACE = 36, + TP_ERR_COMM_CANT_DEREF = 37, + TP_ERR_BAD_FETCH_ARG = 38, + TP_ERR_ARRAY_NO_CLOSE = 39, + TP_ERR_BAD_ARRAY_SUFFIX = 40, + TP_ERR_BAD_ARRAY_NUM = 41, + TP_ERR_ARRAY_TOO_BIG = 42, + TP_ERR_BAD_TYPE = 43, + TP_ERR_BAD_STRING = 44, + TP_ERR_BAD_SYMSTRING = 45, + TP_ERR_BAD_BITFIELD = 46, + TP_ERR_ARG_NAME_TOO_LONG = 47, + TP_ERR_NO_ARG_NAME = 48, + TP_ERR_BAD_ARG_NAME = 49, + TP_ERR_USED_ARG_NAME = 50, + TP_ERR_ARG_TOO_LONG = 51, + TP_ERR_NO_ARG_BODY = 52, + TP_ERR_BAD_INSN_BNDRY = 53, + TP_ERR_FAIL_REG_PROBE = 54, + TP_ERR_DIFF_PROBE_TYPE = 55, + TP_ERR_DIFF_ARG_TYPE = 56, + TP_ERR_SAME_PROBE = 57, + TP_ERR_NO_EVENT_INFO = 58, + TP_ERR_BAD_ATTACH_EVENT = 59, + TP_ERR_BAD_ATTACH_ARG = 60, + TP_ERR_NO_EP_FILTER = 61, + TP_ERR_NOSUP_BTFARG = 62, + TP_ERR_NO_BTFARG = 63, + TP_ERR_NO_BTF_ENTRY = 64, + TP_ERR_BAD_VAR_ARGS = 65, + TP_ERR_NOFENTRY_ARGS = 66, + TP_ERR_DOUBLE_ARGS = 67, + TP_ERR_ARGS_2LONG = 68, + TP_ERR_ARGIDX_2BIG = 69, + TP_ERR_NO_PTR_STRCT = 70, + TP_ERR_NOSUP_DAT_ARG = 71, + TP_ERR_BAD_HYPHEN = 72, + TP_ERR_NO_BTF_FIELD = 73, + TP_ERR_BAD_BTF_TID = 74, + TP_ERR_BAD_TYPE4STR = 75, + TP_ERR_NEED_STRING_TYPE = 76, +}; + +struct trace_probe_log { + const char *subsystem; + const char **argv; + int argc; + int index; +}; + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +struct bpf_iter__bpf_prog { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_prog *prog; + }; +}; + +struct bpf_iter__cgroup { + union { + struct bpf_iter_meta *meta; + }; + union { + struct cgroup *cgroup; + }; +}; + +struct cgroup_iter_priv { + struct cgroup_subsys_state *start_css; + bool visited_all; + bool terminate; + int order; +}; + +struct bpf_iter_css { + __u64 __opaque[3]; +}; + +struct bpf_iter_css_kern { + struct cgroup_subsys_state *start; + struct cgroup_subsys_state *pos; + unsigned int flags; + long: 32; +}; + +enum mm_cid_state { + MM_CID_UNSET = 4294967295, + MM_CID_LAZY_PUT = 2147483648, +}; + +struct user_arg_ptr { + union { + const char * const *native; + } ptr; +}; + +struct postprocess_bh_ctx { + struct work_struct work; + struct buffer_head *bh; +}; + +struct bh_lru { + struct buffer_head *bhs[16]; +}; + +struct bh_accounting { + int nr; + int ratelimit; +}; + +struct fsverity_info; + +typedef short unsigned int __kernel_uid16_t; + +typedef short unsigned int __kernel_gid16_t; + +typedef __kernel_uid16_t uid16_t; + +typedef __kernel_gid16_t gid16_t; + +struct ext4_io_end_vec { + struct list_head list; + loff_t offset; + ssize_t size; + long: 32; +}; + +struct ext4_io_end { + struct list_head list; + handle_t *handle; + struct inode *inode; + struct bio *bio; + unsigned int flag; + refcount_t count; + struct list_head list_vec; +}; + +typedef struct ext4_io_end ext4_io_end_t; + +struct ext4_io_submit { + struct writeback_control *io_wbc; + struct bio *io_bio; + ext4_io_end_t *io_end; + long: 32; + sector_t io_next_block; +}; + +struct ext4_inode { + __le16 i_mode; + __le16 i_uid; + __le32 i_size_lo; + __le32 i_atime; + __le32 i_ctime; + __le32 i_mtime; + __le32 i_dtime; + __le16 i_gid; + __le16 i_links_count; + __le32 i_blocks_lo; + __le32 i_flags; + union { + struct { + __le32 l_i_version; + } linux1; + struct { + __u32 h_i_translator; + } hurd1; + struct { + __u32 m_i_reserved1; + } masix1; + } osd1; + __le32 i_block[15]; + __le32 i_generation; + __le32 i_file_acl_lo; + __le32 i_size_high; + __le32 i_obso_faddr; + union { + struct { + __le16 l_i_blocks_high; + __le16 l_i_file_acl_high; + __le16 l_i_uid_high; + __le16 l_i_gid_high; + __le16 l_i_checksum_lo; + __le16 l_i_reserved; + } linux2; + struct { + __le16 h_i_reserved1; + __u16 h_i_mode_high; + __u16 h_i_uid_high; + __u16 h_i_gid_high; + __u32 h_i_author; + } hurd2; + struct { + __le16 h_i_reserved1; + __le16 m_i_file_acl_high; + __u32 m_i_reserved2[2]; + } masix2; + } osd2; + __le16 i_extra_isize; + __le16 i_checksum_hi; + __le32 i_ctime_extra; + __le32 i_mtime_extra; + __le32 i_atime_extra; + __le32 i_crtime; + __le32 i_crtime_extra; + __le32 i_version_hi; + __le32 i_projid; +}; + +enum { + ES_WRITTEN_B = 0, + ES_UNWRITTEN_B = 1, + ES_DELAYED_B = 2, + ES_HOLE_B = 3, + ES_REFERENCED_B = 4, + ES_FLAGS = 5, +}; + +struct ext4_xattr_ibody_header { + __le32 h_magic; +}; + +struct ext4_xattr_inode_array { + unsigned int count; + struct inode *inodes[0]; +}; + +struct mpage_da_data { + struct inode *inode; + struct writeback_control *wbc; + unsigned int can_map: 1; + long unsigned int first_page; + long unsigned int next_page; + long unsigned int last_page; + struct ext4_map_blocks map; + struct ext4_io_submit io_submit; + unsigned int do_map: 1; + unsigned int scanned_until_end: 1; + unsigned int journalled_more_data: 1; + long: 32; +}; + +struct fscrypt_inode_info; + +enum { + TRACEFS_EVENT_INODE = 2, + TRACEFS_GID_PERM_SET = 4, + TRACEFS_UID_PERM_SET = 8, + TRACEFS_INSTANCE_INODE = 16, +}; + +struct tracefs_inode { + struct inode vfs_inode; + struct list_head list; + long unsigned int flags; + void *private; +}; + +struct eventfs_attr { + int mode; + kuid_t uid; + kgid_t gid; +}; + +struct eventfs_inode { + union { + struct list_head list; + struct callback_head rcu; + }; + struct list_head children; + const struct eventfs_entry *entries; + const char *name; + struct eventfs_attr *entry_attrs; + void *data; + struct eventfs_attr attr; + struct kref kref; + unsigned int is_freed: 1; + unsigned int is_events: 1; + unsigned int nr_entries: 30; + unsigned int ino; +}; + +struct tracefs_dir_ops { + int (*mkdir)(const char *); + int (*rmdir)(const char *); +}; + +struct tracefs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid___4 = 0, + Opt_gid___4 = 1, + Opt_mode___2 = 2, +}; + +struct btrfs_data_container { + __u32 bytes_left; + __u32 bytes_missing; + __u32 elem_cnt; + __u32 elem_missed; + __u64 val[0]; +}; + +struct rb_simple_node { + struct rb_node rb_node; + long: 32; + u64 bytenr; +}; + +struct inode_fs_paths { + struct btrfs_path *btrfs_path; + struct btrfs_root *fs_root; + struct btrfs_data_container *fspath; +}; + +struct extent_inode_elem { + u64 inum; + u64 offset; + u64 num_bytes; + struct extent_inode_elem *next; + long: 32; +}; + +struct btrfs_backref_iter { + u64 bytenr; + struct btrfs_path *path; + struct btrfs_fs_info *fs_info; + struct btrfs_key cur_key; + u32 item_ptr; + u32 cur_ptr; + u32 end_ptr; +}; + +struct btrfs_backref_node { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + u64 new_bytenr; + u64 owner; + struct list_head list; + struct list_head upper; + struct list_head lower; + struct btrfs_root *root; + struct extent_buffer *eb; + unsigned int level: 8; + unsigned int cowonly: 1; + unsigned int lowest: 1; + unsigned int locked: 1; + unsigned int processed: 1; + unsigned int checked: 1; + unsigned int pending: 1; + unsigned int detached: 1; + unsigned int is_reloc_root: 1; + long: 32; +}; + +struct btrfs_backref_edge { + struct list_head list[2]; + struct btrfs_backref_node *node[2]; +}; + +struct btrfs_backref_cache { + struct rb_root rb_root; + struct btrfs_backref_node *path[8]; + struct list_head pending[8]; + struct list_head leaves; + struct list_head changed; + struct list_head detached; + long: 32; + u64 last_trans; + int nr_nodes; + int nr_edges; + struct list_head pending_edge; + struct list_head useless_node; + struct btrfs_fs_info *fs_info; + bool is_reloc; +}; + +struct btrfs_seq_list { + struct list_head list; + u64 seq; +}; + +struct preftree { + struct rb_root_cached root; + unsigned int count; +}; + +struct preftrees { + struct preftree direct; + struct preftree indirect; + struct preftree indirect_missing_keys; +}; + +struct share_check { + struct btrfs_backref_share_check_ctx *ctx; + struct btrfs_root *root; + u64 inum; + u64 data_bytenr; + u64 data_extent_gen; + int share_count; + int self_ref_count; + bool have_delayed_delete_refs; + long: 32; +}; + +typedef short unsigned int __kernel_ipc_pid_t; + +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + +struct shmid64_ds { + struct ipc64_perm shm_perm; + __kernel_size_t shm_segsz; + long unsigned int shm_atime; + long unsigned int shm_atime_high; + long unsigned int shm_dtime; + long unsigned int shm_dtime_high; + long unsigned int shm_ctime; + long unsigned int shm_ctime_high; + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + long unsigned int shm_nattch; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct shminfo64 { + long unsigned int shmmax; + long unsigned int shmmin; + long unsigned int shmmni; + long unsigned int shmseg; + long unsigned int shmall; + long unsigned int __unused1; + long unsigned int __unused2; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; + +struct shmid_kernel { + struct kern_ipc_perm shm_perm; + struct file *shm_file; + long unsigned int shm_nattch; + long unsigned int shm_segsz; + long: 32; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct ucounts *mlock_ucounts; + struct task_struct *shm_creator; + struct list_head shm_clist; + struct ipc_namespace *ns; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_scomp { + struct crypto_tfm base; +}; + +struct scomp_alg { + void * (*alloc_ctx)(struct crypto_scomp *); + void (*free_ctx)(struct crypto_scomp *, void *); + int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[5]; + atomic64_t aux_cnt[5]; +}; + +struct bfq_entity; + +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + u64 vtime; + long unsigned int wsum; + long: 32; +}; + +struct bfq_sched_data; + +struct bfq_queue; + +struct bfq_entity { + struct rb_node rb_node; + bool on_st_or_in_serv; + u64 start; + u64 finish; + struct rb_root *tree; + long: 32; + u64 min_start; + int service; + int budget; + int allocated; + int dev_weight; + int weight; + int new_weight; + int orig_weight; + struct bfq_entity *parent; + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + int prio_changed; + bool in_groups_with_pending_reqs; + struct bfq_queue *last_bfqq_created; + long: 32; +}; + +struct bfq_sched_data { + struct bfq_entity *in_service_entity; + struct bfq_entity *next_in_service; + struct bfq_service_tree service_tree[3]; + long unsigned int bfq_class_idle_last_service; + long: 32; +}; + +struct bfq_weight_counter { + unsigned int weight; + unsigned int num_active; + struct rb_node weights_node; +}; + +struct bfq_ttime { + u64 last_end_request; + u64 ttime_total; + long unsigned int ttime_samples; + long: 32; + u64 ttime_mean; +}; + +struct bfq_data; + +struct bfq_io_cq; + +struct bfq_queue { + int ref; + int stable_ref; + struct bfq_data *bfqd; + short unsigned int ioprio; + short unsigned int ioprio_class; + short unsigned int new_ioprio; + short unsigned int new_ioprio_class; + long: 32; + u64 last_serv_time_ns; + unsigned int inject_limit; + long unsigned int decrease_time_jif; + struct bfq_queue *new_bfqq; + struct rb_node pos_node; + struct rb_root *pos_root; + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int meta_pending; + struct list_head fifo; + struct bfq_entity entity; + struct bfq_weight_counter *weight_counter; + int max_budget; + long unsigned int budget_timeout; + int dispatched; + long unsigned int flags; + struct list_head bfqq_list; + long: 32; + struct bfq_ttime ttime; + u64 io_start_time; + u64 tot_idle_time; + u32 seek_history; + struct hlist_node burst_list_node; + long: 32; + sector_t last_request_pos; + unsigned int requests_within_timer; + pid_t pid; + struct bfq_io_cq *bic; + long unsigned int wr_cur_max_time; + long unsigned int soft_rt_next_start; + long unsigned int last_wr_start_finish; + unsigned int wr_coeff; + long unsigned int last_idle_bklogged; + long unsigned int service_from_backlogged; + long unsigned int service_from_wr; + long unsigned int wr_start_at_switch_to_srt; + long unsigned int split_time; + long unsigned int first_IO_time; + long unsigned int creation_time; + struct bfq_queue *waker_bfqq; + struct bfq_queue *tentative_waker_bfqq; + unsigned int num_waker_detections; + long: 32; + u64 waker_detection_started; + struct hlist_node woken_list_node; + struct hlist_head woken_list; + unsigned int actuator_idx; +}; + +struct bfq_group; + +struct bfq_data { + struct request_queue *queue; + struct list_head dispatch; + struct bfq_group *root_group; + struct rb_root_cached queue_weights_tree; + unsigned int num_groups_with_pending_reqs; + unsigned int busy_queues[3]; + int wr_busy_queues; + int queued; + int tot_rq_in_driver; + int rq_in_driver[8]; + bool nonrot_with_queueing; + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + int budgets_assigned; + struct hrtimer idle_slice_timer; + struct bfq_queue *in_service_queue; + long: 32; + sector_t last_position; + sector_t in_serv_last_pos; + u64 last_completion; + struct bfq_queue *last_completed_rq_bfqq; + struct bfq_queue *last_bfqq_created; + u64 last_empty_occupied_ns; + bool wait_dispatch; + struct request *waited_rq; + bool rqs_injected; + long: 32; + u64 first_dispatch; + u64 last_dispatch; + ktime_t last_budget_start; + ktime_t last_idling_start; + long unsigned int last_idling_start_jiffies; + int peak_rate_samples; + u32 sequential_samples; + long: 32; + u64 tot_sectors_dispatched; + u32 last_rq_max_size; + long: 32; + u64 delta_from_first; + u32 peak_rate; + int bfq_max_budget; + struct list_head active_list[8]; + struct list_head idle_list; + u64 bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + u32 bfq_slice_idle; + int bfq_user_max_budget; + unsigned int bfq_timeout; + bool strict_guarantees; + long unsigned int last_ins_in_burst; + long unsigned int bfq_burst_interval; + int burst_size; + struct bfq_entity *burst_parent_entity; + long unsigned int bfq_large_burst_thresh; + bool large_burst; + struct hlist_head burst_list; + bool low_latency; + unsigned int bfq_wr_coeff; + unsigned int bfq_wr_rt_max_time; + unsigned int bfq_wr_min_idle_time; + long unsigned int bfq_wr_min_inter_arr_async; + unsigned int bfq_wr_max_softrt_rate; + long: 32; + u64 rate_dur_prod; + struct bfq_queue oom_bfqq; + spinlock_t lock; + struct bfq_io_cq *bio_bic; + struct bfq_queue *bio_bfqq; + unsigned int word_depths[4]; + unsigned int full_depth_shift; + unsigned int num_actuators; + long: 32; + sector_t sector[8]; + sector_t nr_sectors[8]; + struct blk_independent_access_range ia_ranges[8]; + unsigned int actuator_load_threshold; + long: 32; +}; + +struct bfq_iocq_bfqq_data { + bool saved_has_short_ttime; + bool saved_IO_bound; + long: 32; + u64 saved_io_start_time; + u64 saved_tot_idle_time; + bool saved_in_large_burst; + bool was_in_burst_list; + unsigned int saved_weight; + long unsigned int saved_wr_coeff; + long unsigned int saved_last_wr_start_finish; + long unsigned int saved_service_from_wr; + long unsigned int saved_wr_start_at_switch_to_srt; + unsigned int saved_wr_cur_max_time; + long: 32; + struct bfq_ttime saved_ttime; + u64 saved_last_serv_time_ns; + unsigned int saved_inject_limit; + long unsigned int saved_decrease_time_jif; + struct bfq_queue *stable_merge_bfqq; + bool stably_merged; +}; + +struct bfq_io_cq { + struct io_cq icq; + struct bfq_queue *bfqq[16]; + int ioprio; + uint64_t blkcg_serial_nr; + struct bfq_iocq_bfqq_data bfqq_data[8]; + unsigned int requests; + long: 32; +}; + +struct bfqg_stats { + struct blkg_rwstat bytes; + struct blkg_rwstat ios; +}; + +struct bfq_group { + struct blkg_policy_data pd; + refcount_t ref; + struct bfq_entity entity; + struct bfq_sched_data sched_data; + struct bfq_data *bfqd; + struct bfq_queue *async_bfqq[128]; + struct bfq_queue *async_idle_bfqq[8]; + struct bfq_entity *my_entity; + int active_entities; + int num_queues_with_pending_reqs; + struct rb_root rq_pos_tree; + long: 32; + struct bfqg_stats stats; +}; + +enum bfqq_state_flags { + BFQQF_just_created = 0, + BFQQF_busy = 1, + BFQQF_wait_request = 2, + BFQQF_non_blocking_wait_rq = 3, + BFQQF_fifo_expire = 4, + BFQQF_has_short_ttime = 5, + BFQQF_sync = 6, + BFQQF_IO_bound = 7, + BFQQF_in_large_burst = 8, + BFQQF_softrt_update = 9, + BFQQF_coop = 10, + BFQQF_split_coop = 11, +}; + +enum bfqq_expiration { + BFQQE_TOO_IDLE = 0, + BFQQE_BUDGET_TIMEOUT = 1, + BFQQE_BUDGET_EXHAUSTED = 2, + BFQQE_NO_MORE_REQUESTS = 3, + BFQQE_PREEMPTED = 4, +}; + +typedef struct { + spinlock_t *lock; +} class_spinlock_t; + +enum io_uring_napi_op { + IO_URING_NAPI_REGISTER_OP = 0, + IO_URING_NAPI_STATIC_ADD_ID = 1, + IO_URING_NAPI_STATIC_DEL_ID = 2, +}; + +enum io_uring_napi_tracking_strategy { + IO_URING_NAPI_TRACKING_DYNAMIC = 0, + IO_URING_NAPI_TRACKING_STATIC = 1, + IO_URING_NAPI_TRACKING_INACTIVE = 255, +}; + +struct io_uring_napi { + __u32 busy_poll_to; + __u8 prefer_busy_poll; + __u8 opcode; + __u8 pad[2]; + __u32 op_param; + __u32 resv; +}; + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned int cq_tail; + unsigned int cq_min_tail; + unsigned int nr_timeouts; + int hit_timeout; + ktime_t min_timeout; + ktime_t timeout; + struct hrtimer t; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; + long: 32; +}; + +enum { + IO_CHECK_CQ_OVERFLOW_BIT = 0, + IO_CHECK_CQ_DROPPED_BIT = 1, +}; + +struct io_napi_entry { + unsigned int napi_id; + struct list_head list; + long unsigned int timeout; + struct hlist_node node; + struct callback_head rcu; +}; + +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +enum vp_vq_vector_policy { + VP_VQ_VECTOR_POLICY_EACH = 0, + VP_VQ_VECTOR_POLICY_SHARED_SLOW = 1, + VP_VQ_VECTOR_POLICY_SHARED = 2, +}; + +struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; + kuid_t uid; + kgid_t gid; + struct device *dev; +}; + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; + long: 32; +}; + +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + union { + __u32 data_len; + __u32 vec_cnt; + }; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; + __u64 result; +}; + +struct nvme_uring_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; +}; + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, + NVME_NQN_NVME = 2, + NVME_NQN_CURR = 3, +}; + +enum nvme_ctrl_type { + NVME_CTRL_IO = 1, + NVME_CTRL_DISC = 2, + NVME_CTRL_ADMIN = 3, +}; + +enum nvme_dctype { + NVME_DCTYPE_NOT_REPORTED = 0, + NVME_DCTYPE_DDC = 1, + NVME_DCTYPE_CDC = 2, +}; + +struct nvme_id_power_state { + __le16 max_power; + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; + __le32 exit_lat; + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; + +enum { + NVME_CTRL_CMIC_MULTI_PORT = 1, + NVME_CTRL_CMIC_MULTI_CTRL = 2, + NVME_CTRL_CMIC_ANA = 8, + NVME_CTRL_ONCS_COMPARE = 1, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 2, + NVME_CTRL_ONCS_DSM = 4, + NVME_CTRL_ONCS_WRITE_ZEROES = 8, + NVME_CTRL_ONCS_RESERVATIONS = 32, + NVME_CTRL_ONCS_TIMESTAMP = 64, + NVME_CTRL_VWC_PRESENT = 1, + NVME_CTRL_OACS_SEC_SUPP = 1, + NVME_CTRL_OACS_NS_MNGT_SUPP = 8, + NVME_CTRL_OACS_DIRECTIVES = 32, + NVME_CTRL_OACS_DBBUF_SUPP = 256, + NVME_CTRL_LPA_CMD_EFFECTS_LOG = 2, + NVME_CTRL_CTRATT_128_ID = 1, + NVME_CTRL_CTRATT_NON_OP_PSP = 2, + NVME_CTRL_CTRATT_NVM_SETS = 4, + NVME_CTRL_CTRATT_READ_RECV_LVLS = 8, + NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 16, + NVME_CTRL_CTRATT_PREDICTABLE_LAT = 32, + NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 128, + NVME_CTRL_CTRATT_UUID_LIST = 512, + NVME_CTRL_SGLS_BYTE_ALIGNED = 1, + NVME_CTRL_SGLS_DWORD_ALIGNED = 2, + NVME_CTRL_SGLS_KSDBDS = 4, + NVME_CTRL_SGLS_MSDS = 524288, + NVME_CTRL_SGLS_SAOS = 1048576, +}; + +enum { + NVME_ID_CNS_NS = 0, + NVME_ID_CNS_CTRL = 1, + NVME_ID_CNS_NS_ACTIVE_LIST = 2, + NVME_ID_CNS_NS_DESC_LIST = 3, + NVME_ID_CNS_CS_NS = 5, + NVME_ID_CNS_CS_CTRL = 6, + NVME_ID_CNS_NS_ACTIVE_LIST_CS = 7, + NVME_ID_CNS_NS_CS_INDEP = 8, + NVME_ID_CNS_NS_PRESENT_LIST = 16, + NVME_ID_CNS_NS_PRESENT = 17, + NVME_ID_CNS_CTRL_NS_LIST = 18, + NVME_ID_CNS_CTRL_LIST = 19, + NVME_ID_CNS_SCNDRY_CTRL_LIST = 21, + NVME_ID_CNS_NS_GRANULARITY = 22, + NVME_ID_CNS_UUID_LIST = 23, + NVME_ID_CNS_ENDGRP_LIST = 25, +}; + +enum { + NVME_CMD_EFFECTS_CSUPP = 1, + NVME_CMD_EFFECTS_LBCC = 2, + NVME_CMD_EFFECTS_NCC = 4, + NVME_CMD_EFFECTS_NIC = 8, + NVME_CMD_EFFECTS_CCC = 16, + NVME_CMD_EFFECTS_CSER_MASK = 49152, + NVME_CMD_EFFECTS_CSE_MASK = 458752, + NVME_CMD_EFFECTS_UUID_SEL = 524288, + NVME_CMD_EFFECTS_SCOPE_MASK = 4293918720, +}; + +struct nvme_effects_log { + __le32 acs[256]; + __le32 iocs[256]; + __u8 resv[2048]; +}; + +enum nvme_opcode { + nvme_cmd_flush = 0, + nvme_cmd_write = 1, + nvme_cmd_read = 2, + nvme_cmd_write_uncor = 4, + nvme_cmd_compare = 5, + nvme_cmd_write_zeroes = 8, + nvme_cmd_dsm = 9, + nvme_cmd_verify = 12, + nvme_cmd_resv_register = 13, + nvme_cmd_resv_report = 14, + nvme_cmd_resv_acquire = 17, + nvme_cmd_resv_release = 21, + nvme_cmd_zone_mgmt_send = 121, + nvme_cmd_zone_mgmt_recv = 122, + nvme_cmd_zone_append = 125, + nvme_cmd_vendor_start = 128, +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + union { + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + }; + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + } cdws; + }; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2; + __le32 cdw3; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +enum { + NVME_RW_LR = 32768, + NVME_RW_FUA = 16384, + NVME_RW_APPEND_PIREMAP = 512, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0, + NVME_RW_DSM_LATENCY_IDLE = 16, + NVME_RW_DSM_LATENCY_NORM = 32, + NVME_RW_DSM_LATENCY_LOW = 48, + NVME_RW_DSM_SEQ_REQ = 64, + NVME_RW_DSM_COMPRESSED = 128, + NVME_RW_PRINFO_PRCHK_REF = 1024, + NVME_RW_PRINFO_PRCHK_APP = 2048, + NVME_RW_PRINFO_PRCHK_GUARD = 4096, + NVME_RW_PRINFO_PRACT = 8192, + NVME_RW_DTYPE_STREAMS = 16, + NVME_WZ_DEAC = 512, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +struct nvme_zone_mgmt_send_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le32 cdw12; + __u8 zsa; + __u8 select_all; + __u8 rsvd13[2]; + __le32 cdw14[2]; +}; + +struct nvme_zone_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __le64 slba; + __le32 numd; + __u8 zra; + __u8 zrasf; + __u8 pr; + __u8 rsvd13; + __le32 cdw14[2]; +}; + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0, + nvme_admin_create_sq = 1, + nvme_admin_get_log_page = 2, + nvme_admin_delete_cq = 4, + nvme_admin_create_cq = 5, + nvme_admin_identify = 6, + nvme_admin_abort_cmd = 8, + nvme_admin_set_features = 9, + nvme_admin_get_features = 10, + nvme_admin_async_event = 12, + nvme_admin_ns_mgmt = 13, + nvme_admin_activate_fw = 16, + nvme_admin_download_fw = 17, + nvme_admin_dev_self_test = 20, + nvme_admin_ns_attach = 21, + nvme_admin_keep_alive = 24, + nvme_admin_directive_send = 25, + nvme_admin_directive_recv = 26, + nvme_admin_virtual_mgmt = 28, + nvme_admin_nvme_mi_send = 29, + nvme_admin_nvme_mi_recv = 30, + nvme_admin_dbbuf = 124, + nvme_admin_format_nvm = 128, + nvme_admin_security_send = 129, + nvme_admin_security_recv = 130, + nvme_admin_sanitize_nvm = 132, + nvme_admin_get_lba_status = 134, + nvme_admin_vendor_start = 192, +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; + __le16 cnssid; + __u8 rsvd11; + __u8 csi; + __u32 rsvd12[4]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + union nvme_data_ptr dptr; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 lsp; + __le16 numdl; + __le16 numdu; + __le16 lsi; + union { + struct { + __le32 lpol; + __le32 lpou; + }; + __le64 lpo; + }; + __u8 rsvd14[3]; + __u8 csi; + __u32 rsvd15; +}; + +struct nvme_directive_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 numd; + __u8 doper; + __u8 dtype; + __le16 dspec; + __u8 endir; + __u8 tdtype; + __u16 rsvd15; + __u32 rsvd16[3]; +}; + +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 127, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + +struct nvmf_auth_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al_tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_send_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_receive_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al; + __u8 resv4[16]; +}; + +struct nvme_dbbuf { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __u32 rsvd12[6]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_zone_mgmt_send_cmd zms; + struct nvme_zone_mgmt_recv_cmd zmr; + struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; + struct nvmf_auth_common_command auth_common; + struct nvmf_auth_send_command auth_send; + struct nvmf_auth_receive_command auth_receive; + struct nvme_dbbuf dbbuf; + struct nvme_directive_cmd directive; + }; +}; + +union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; +}; + +struct nvme_ctrl; + +struct nvme_request { + struct nvme_command *cmd; + long: 32; + union nvme_result result; + u8 genctr; + u8 retries; + u8 flags; + u16 status; + struct nvme_ctrl *ctrl; + long: 32; +}; + +enum nvme_ctrl_state { + NVME_CTRL_NEW = 0, + NVME_CTRL_LIVE = 1, + NVME_CTRL_RESETTING = 2, + NVME_CTRL_CONNECTING = 3, + NVME_CTRL_DELETING = 4, + NVME_CTRL_DELETING_NOIO = 5, + NVME_CTRL_DEAD = 6, +}; + +struct opal_dev; + +struct nvme_fault_inject {}; + +struct nvme_ctrl_ops; + +struct nvme_subsystem; + +struct nvmf_ctrl_options; + +struct nvme_ctrl { + bool comp_seen; + bool identified; + bool passthru_err_log_enabled; + enum nvme_ctrl_state state; + spinlock_t lock; + struct mutex scan_lock; + const struct nvme_ctrl_ops *ops; + struct request_queue *admin_q; + struct request_queue *connect_q; + struct request_queue *fabrics_q; + struct device *dev; + int instance; + int numa_node; + struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; + struct list_head namespaces; + struct mutex namespaces_lock; + struct srcu_struct srcu; + long: 32; + struct device ctrl_device; + struct device *device; + struct cdev cdev; + struct work_struct reset_work; + struct work_struct delete_work; + wait_queue_head_t state_wq; + struct nvme_subsystem *subsys; + struct list_head subsys_entry; + struct opal_dev *opal_dev; + u16 cntlid; + u16 mtfa; + u32 ctrl_config; + u32 queue_count; + u64 cap; + u32 max_hw_sectors; + u32 max_segments; + u32 max_integrity_segments; + u32 max_zeroes_sectors; + u16 crdt[3]; + u16 oncs; + u8 dmrl; + u32 dmrsl; + u16 oacs; + u16 sqsize; + u32 max_namespaces; + atomic_t abort_limit; + u8 vwc; + u32 vs; + u32 sgls; + u16 kas; + u8 npss; + u8 apsta; + u16 wctemp; + u16 cctemp; + u32 oaes; + u32 aen_result; + u32 ctratt; + unsigned int shutdown_timeout; + unsigned int kato; + bool subsystem; + long unsigned int quirks; + struct nvme_id_power_state psd[32]; + struct nvme_effects_log *effects; + struct xarray cels; + struct work_struct scan_work; + struct work_struct async_event_work; + struct delayed_work ka_work; + struct delayed_work failfast_work; + long: 32; + struct nvme_command ka_cmd; + long unsigned int ka_last_check_time; + struct work_struct fw_act_work; + long unsigned int events; + key_serial_t tls_pskid; + long: 32; + u64 ps_max_latency_us; + bool apst_enabled; + u16 hmmaxd; + u32 hmpre; + u32 hmmin; + u32 hmminds; + u32 ioccsz; + u32 iorcsz; + u16 icdoff; + u16 maxcmd; + int nr_reconnects; + long unsigned int flags; + struct nvmf_ctrl_options *opts; + struct page *discard_page; + long unsigned int discard_page_busy; + struct nvme_fault_inject fault_inject; + enum nvme_ctrl_type cntrltype; + enum nvme_dctype dctype; +}; + +enum { + NVME_REQ_CANCELLED = 1, + NVME_REQ_USERCMD = 2, + NVME_MPATH_IO_STATS = 4, + NVME_MPATH_CNT_ACTIVE = 8, +}; + +struct nvme_ctrl_ops { + const char *name; + struct module *module; + unsigned int flags; + const struct attribute_group **dev_attr_groups; + int (*reg_read32)(struct nvme_ctrl *, u32, u32 *); + int (*reg_write32)(struct nvme_ctrl *, u32, u32); + int (*reg_read64)(struct nvme_ctrl *, u32, u64 *); + void (*free_ctrl)(struct nvme_ctrl *); + void (*submit_async_event)(struct nvme_ctrl *); + int (*subsystem_reset)(struct nvme_ctrl *); + void (*delete_ctrl)(struct nvme_ctrl *); + void (*stop_ctrl)(struct nvme_ctrl *); + int (*get_address)(struct nvme_ctrl *, char *, int); + void (*print_device_info)(struct nvme_ctrl *); + bool (*supports_pci_p2pdma)(struct nvme_ctrl *); +}; + +struct nvme_subsystem { + int instance; + long: 32; + struct device dev; + struct kref ref; + struct list_head entry; + struct mutex lock; + struct list_head ctrls; + struct list_head nsheads; + char subnqn[223]; + char serial[20]; + char model[40]; + char firmware_rev[8]; + u8 cmic; + enum nvme_subsys_type subtype; + u16 vendor_id; + u16 awupf; + struct ida ns_ida; +}; + +struct nvme_ns_ids { + u8 eui64[8]; + u8 nguid[16]; + uuid_t uuid; + u8 csi; +}; + +struct nvme_ns_head { + struct list_head list; + struct srcu_struct srcu; + struct nvme_subsystem *subsys; + struct nvme_ns_ids ids; + u8 lba_shift; + u16 ms; + u16 pi_size; + u8 pi_type; + u8 guard_type; + struct list_head entry; + struct kref ref; + bool shared; + bool rotational; + bool passthru_err_log_enabled; + struct nvme_effects_log *effects; + long: 32; + u64 nuse; + unsigned int ns_id; + int instance; + long unsigned int features; + struct ratelimit_state rs_nuse; + struct cdev cdev; + long: 32; + struct device cdev_device; + struct gendisk *disk; + long: 32; +}; + +enum nvme_ns_features { + NVME_NS_EXT_LBAS = 1, + NVME_NS_METADATA_SUPPORTED = 2, + NVME_NS_DEAC = 4, +}; + +struct nvme_ns { + struct list_head list; + struct nvme_ctrl *ctrl; + struct request_queue *queue; + struct gendisk *disk; + struct list_head siblings; + struct kref kref; + struct nvme_ns_head *head; + long unsigned int flags; + struct cdev cdev; + long: 32; + struct device cdev_device; + struct nvme_fault_inject fault_inject; +}; + +enum { + NVME_IOCTL_VEC = 1, + NVME_IOCTL_PARTITION = 2, +}; + +struct nvme_uring_data { + __u64 metadata; + __u64 addr; + __u32 data_len; + __u32 metadata_len; + __u32 timeout_ms; + long: 32; +}; + +struct nvme_uring_cmd_pdu { + struct request *req; + struct bio *bio; + u64 result; + int status; + long: 32; +}; + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 255, +}; + +enum phy___2 { + phy_100a = 992, + phy_100c = 55575208, + phy_82555_tx = 22020776, + phy_nsc_tx = 1543512064, + phy_82562_et = 53478056, + phy_82562_em = 52429480, + phy_82562_ek = 51380904, + phy_82562_eh = 24117928, + phy_82552_v = 3496017997, + phy_unknown = 4294967295, +}; + +struct csr___2 { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 8, + rus_ready = 16, + rus_mask = 60, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0, + stat_ack_sw_gen = 4, + stat_ack_rnr = 16, + stat_ack_cu_idle = 32, + stat_ack_frame_rx = 64, + stat_ack_cu_cmd_done = 128, + stat_ack_not_present = 255, + stat_ack_rx = 84, + stat_ack_tx = 160, +}; + +enum scb_cmd_hi { + irq_mask_none = 0, + irq_mask_all = 1, + irq_sw_gen = 2, +}; + +enum scb_cmd_lo { + cuc_nop = 0, + ruc_start = 1, + ruc_load_base = 6, + cuc_start = 16, + cuc_resume = 32, + cuc_dump_addr = 64, + cuc_dump_stats = 80, + cuc_load_base = 96, + cuc_dump_reset = 112, +}; + +enum cuc_dump { + cuc_dump_complete = 40965, + cuc_dump_reset_complete = 40967, +}; + +enum port { + software_reset = 0, + selftest = 1, + selective_reset = 2, +}; + +enum eeprom_ctrl_lo { + eesk = 1, + eecs = 2, + eedi = 4, + eedo = 8, +}; + +enum mdi_ctrl { + mdi_write = 67108864, + mdi_read = 134217728, + mdi_ready = 268435456, +}; + +enum eeprom_op { + op_write = 5, + op_read = 6, + op_ewds = 16, + op_ewen = 19, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 3, + eeprom_phy_iface = 6, + eeprom_id = 10, + eeprom_config_asf = 13, + eeprom_smbus_addr = 144, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 128, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB = 1, + I82553C = 2, + I82503 = 3, + DP83840 = 4, + S80C240 = 5, + S80C24 = 6, + I82555 = 7, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 32, +}; + +enum eeprom_config_asf { + eeprom_asf = 32768, + eeprom_gcl = 16384, +}; + +enum cb_status { + cb_complete = 32768, + cb_ok = 8192, +}; + +enum cb_command { + cb_nop = 0, + cb_iaaddr = 1, + cb_config = 2, + cb_multi = 3, + cb_tx = 4, + cb_ucode = 5, + cb_dump = 6, + cb_tx_sf = 8, + cb_tx_nc = 16, + cb_cid = 7936, + cb_i = 8192, + cb_s = 16384, + cb_el = 32768, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next; + struct rx *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +struct config { + u8 byte_count: 6; + u8 pad0: 2; + u8 rx_fifo_limit: 4; + u8 tx_fifo_limit: 3; + u8 pad1: 1; + u8 adaptive_ifs; + u8 mwi_enable: 1; + u8 type_enable: 1; + u8 read_align_enable: 1; + u8 term_write_cache_line: 1; + u8 pad3: 4; + u8 rx_dma_max_count: 7; + u8 pad4: 1; + u8 tx_dma_max_count: 7; + u8 dma_max_count_enable: 1; + u8 late_scb_update: 1; + u8 direct_rx_dma: 1; + u8 tno_intr: 1; + u8 cna_intr: 1; + u8 standard_tcb: 1; + u8 standard_stat_counter: 1; + u8 rx_save_overruns: 1; + u8 rx_save_bad_frames: 1; + u8 rx_discard_short_frames: 1; + u8 tx_underrun_retry: 2; + u8 pad7: 2; + u8 rx_extended_rfd: 1; + u8 tx_two_frames_in_fifo: 1; + u8 tx_dynamic_tbd: 1; + u8 mii_mode: 1; + u8 pad8: 6; + u8 csma_disabled: 1; + u8 rx_tcpudp_checksum: 1; + u8 pad9: 3; + u8 vlan_arp_tco: 1; + u8 link_status_wake: 1; + u8 arp_wake: 1; + u8 mcmatch_wake: 1; + u8 pad10: 3; + u8 no_source_addr_insertion: 1; + u8 preamble_length: 2; + u8 loopback: 2; + u8 linear_priority: 3; + u8 pad11: 5; + u8 linear_priority_mode: 1; + u8 pad12: 3; + u8 ifs: 4; + u8 ip_addr_lo; + u8 ip_addr_hi; + u8 promiscuous_mode: 1; + u8 broadcast_disabled: 1; + u8 wait_after_win: 1; + u8 pad15_1: 1; + u8 ignore_ul_bit: 1; + u8 crc_16_bit: 1; + u8 pad15_2: 1; + u8 crs_or_cdt: 1; + u8 fc_delay_lo; + u8 fc_delay_hi; + u8 rx_stripping: 1; + u8 tx_padding: 1; + u8 rx_crc_transfer: 1; + u8 rx_long_ok: 1; + u8 fc_priority_threshold: 3; + u8 pad18: 1; + u8 addr_wake: 1; + u8 magic_packet_disable: 1; + u8 fc_disable: 1; + u8 fc_restop: 1; + u8 fc_restart: 1; + u8 fc_reject: 1; + u8 full_duplex_force: 1; + u8 full_duplex_pin: 1; + u8 pad20_1: 5; + u8 fc_priority_location: 1; + u8 multi_ia: 1; + u8 pad20_2: 1; + u8 pad21_1: 3; + u8 multicast_all: 1; + u8 pad21_2: 4; + u8 rx_d102_mode: 1; + u8 rx_vlan_drop: 1; + u8 pad22: 6; + u8 pad_d102[9]; +}; + +struct multi { + __le16 count; + u8 addr[386]; +}; + +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[6]; + __le32 ucode[134]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next; + struct cb *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, + lb_mac = 1, + lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames; + __le32 tx_max_collisions; + __le32 tx_late_collisions; + __le32 tx_underruns; + __le32 tx_lost_crs; + __le32 tx_deferred; + __le32 tx_single_collisions; + __le32 tx_multiple_collisions; + __le32 tx_total_collisions; + __le32 rx_good_frames; + __le32 rx_crc_errors; + __le32 rx_alignment_errors; + __le32 rx_resource_errors; + __le32 rx_overrun_errors; + __le32 rx_cdt_errors; + __le32 rx_short_frame_errors; + __le32 fc_xmt_pause; + __le32 fc_rcv_pause; + __le32 fc_rcv_unsupported; + __le16 xmt_tco_frames; + __le16 rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + u32 msg_enable; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *, u32, u32, u32, u16); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct rx *rxs; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t cb_lock; + spinlock_t cmd_lock; + struct csr___2 *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + long: 32; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + enum { + ich = 1, + promiscuous = 2, + multicast_all = 4, + wol_magic = 8, + ich_10h_workaround = 16, + } flags; + enum mac mac; + enum phy___2 phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + struct mem *mem; + dma_addr_t dma_addr; + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + long: 32; +}; + +enum led_state { + led_on = 1, + led_off = 4, + led_on_559 = 5, + led_on_557 = 7, +}; + +struct iwl_pnvm_image { + struct { + const void *data; + u32 len; + } chunks[64]; + u32 n_chunks; + u32 version; +}; + +enum iwl_prph_scratch_mtr_format { + IWL_PRPH_MTR_FORMAT_16B = 0, + IWL_PRPH_MTR_FORMAT_32B = 262144, + IWL_PRPH_MTR_FORMAT_64B = 524288, + IWL_PRPH_MTR_FORMAT_256B = 786432, +}; + +enum iwl_prph_scratch_flags { + IWL_PRPH_SCRATCH_IMR_DEBUG_EN = 2, + IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = 16, + IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = 256, + IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = 512, + IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = 1024, + IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = 2048, + IWL_PRPH_SCRATCH_RB_SIZE_4K = 65536, + IWL_PRPH_SCRATCH_MTR_MODE = 131072, + IWL_PRPH_SCRATCH_MTR_FORMAT = 786432, + IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 15728640, + IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8388608, + IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9437184, + IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10485760, + IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = 536870912, +}; + +struct iwl_prph_scrath_mem_desc_addr_array { + __le64 mem_descs[64]; +}; + +enum iwl_ltr_config_flags { + LTR_CFG_FLAG_FEATURE_ENABLE = 1, + LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = 2, + LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = 4, + LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = 8, + LTR_CFG_FLAG_SW_SET_SHORT = 16, + LTR_CFG_FLAG_SW_SET_LONG = 32, + LTR_CFG_FLAG_DENIE_C10_ON_PD = 64, + LTR_CFG_FLAG_UPDATE_VALUES = 128, +}; + +struct iwl_ltr_config_cmd { + __le32 flags; + __le32 static_long; + __le32 static_short; + __le32 ltr_cfg_values[4]; + __le32 ltr_short_idle_timeout; +}; + +enum iwl_dev_tx_power_cmd_mode { + IWL_TX_POWER_MODE_SET_MAC = 0, + IWL_TX_POWER_MODE_SET_DEVICE = 1, + IWL_TX_POWER_MODE_SET_CHAINS = 2, + IWL_TX_POWER_MODE_SET_ACK = 3, + IWL_TX_POWER_MODE_SET_SAR_TIMER = 4, + IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5, +}; + +struct iwl_dev_tx_power_common { + __le32 set_mode; + __le32 mac_context_id; + __le16 pwr_restriction; +} __attribute__((packed)); + +struct iwl_dev_tx_power_cmd_v3 { + __le16 per_chain[10]; +}; + +struct iwl_dev_tx_power_cmd_v4 { + __le16 per_chain[10]; + u8 enable_ack_reduction; + u8 reserved[3]; +}; + +struct iwl_dev_tx_power_cmd_v5 { + __le16 per_chain[10]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; +}; + +struct iwl_dev_tx_power_cmd_v6 { + __le16 per_chain[44]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; +}; + +struct iwl_dev_tx_power_cmd_v7 { + __le16 per_chain[44]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; + __le32 flags; +}; + +struct iwl_dev_tx_power_cmd_v8 { + __le16 per_chain[44]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; + __le32 flags; + __le32 tpc_vlp_backoff_level; +}; + +struct iwl_dev_tx_power_cmd_per_band { + __le16 dev_24; + __le16 dev_52_low; + __le16 dev_52_high; +}; + +struct iwl_dev_tx_power_cmd_v3_v8 { + struct iwl_dev_tx_power_common common; + struct iwl_dev_tx_power_cmd_per_band per_band; + union { + struct iwl_dev_tx_power_cmd_v3 v3; + struct iwl_dev_tx_power_cmd_v4 v4; + struct iwl_dev_tx_power_cmd_v5 v5; + struct iwl_dev_tx_power_cmd_v6 v6; + struct iwl_dev_tx_power_cmd_v7 v7; + struct iwl_dev_tx_power_cmd_v8 v8; + }; +}; + +struct iwl_dev_tx_power_cmd_v9 { + __le16 reserved; + __le16 per_chain[10]; + u8 per_chain_restriction_changed; + u8 reserved1[3]; + __le32 timer_period; +} __attribute__((packed)); + +struct iwl_dev_tx_power_cmd_v10 { + __le16 per_chain[44]; + u8 per_chain_restriction_changed; + u8 reserved; + __le32 timer_period; + __le32 flags; +} __attribute__((packed)); + +struct iwl_dev_tx_power_cmd { + struct iwl_dev_tx_power_common common; + union { + struct iwl_dev_tx_power_cmd_v9 v9; + struct iwl_dev_tx_power_cmd_v10 v10; + }; +}; + +enum iwl_geo_per_chain_offset_operation { + IWL_PER_CHAIN_OFFSET_SET_TABLES = 0, + IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE = 1, +}; + +struct iwl_per_chain_offset { + __le16 max_tx_power; + u8 chain_a; + u8 chain_b; +}; + +struct iwl_geo_tx_power_profiles_cmd_v1 { + __le32 ops; + struct iwl_per_chain_offset table[6]; +}; + +struct iwl_geo_tx_power_profiles_cmd_v2 { + __le32 ops; + struct iwl_per_chain_offset table[6]; + __le32 table_revision; +}; + +struct iwl_geo_tx_power_profiles_cmd_v3 { + __le32 ops; + struct iwl_per_chain_offset table[9]; + __le32 table_revision; +}; + +struct iwl_geo_tx_power_profiles_cmd_v4 { + __le32 ops; + struct iwl_per_chain_offset table[16]; + __le32 table_revision; +}; + +struct iwl_geo_tx_power_profiles_cmd_v5 { + __le32 ops; + struct iwl_per_chain_offset table[24]; + __le32 table_revision; +}; + +union iwl_geo_tx_power_profiles_cmd { + struct iwl_geo_tx_power_profiles_cmd_v1 v1; + struct iwl_geo_tx_power_profiles_cmd_v2 v2; + struct iwl_geo_tx_power_profiles_cmd_v3 v3; + struct iwl_geo_tx_power_profiles_cmd_v4 v4; + struct iwl_geo_tx_power_profiles_cmd_v5 v5; +}; + +struct iwl_geo_tx_power_profiles_resp { + __le32 profile_idx; +}; + +union iwl_ppag_table_cmd { + struct { + __le32 flags; + s8 gain[10]; + s8 reserved[2]; + } v1; + struct { + __le32 flags; + s8 gain[22]; + s8 reserved[2]; + } v2; +}; + +struct iwl_dqa_enable_cmd { + __le32 cmd_queue; +}; + +struct iwl_tx_ant_cfg_cmd { + __le32 valid; +}; + +struct iwl_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +}; + +struct iwl_phy_specific_cfg { + __le32 filter_cfg_chains[4]; +}; + +struct iwl_phy_cfg_cmd_v3 { + __le32 phy_cfg; + struct iwl_calib_ctrl calib_control; + struct iwl_phy_specific_cfg phy_specific_cfg; +}; + +struct iwl_nvm_access_complete_cmd { + __le32 reserved; +}; + +struct iwl_tas_config_cmd_common { + __le32 block_list_size; + __le32 block_list_array[16]; +}; + +struct iwl_tas_config_cmd_v3 { + __le16 override_tas_iec; + __le16 enable_tas_iec; +}; + +struct iwl_tas_config_cmd_v4 { + u8 override_tas_iec; + u8 enable_tas_iec; + u8 usa_tas_uhb_allowed; + u8 reserved; +}; + +struct iwl_tas_config_cmd { + struct iwl_tas_config_cmd_common common; + union { + struct iwl_tas_config_cmd_v3 v3; + struct iwl_tas_config_cmd_v4 v4; + }; +}; + +struct iwl_lari_config_change_cmd { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; + __le32 edt_bitmap; + __le32 oem_320mhz_allow_bitmap; + __le32 oem_11be_allow_bitmap; +}; + +struct iwl_tas_data { + __le32 block_list_size; + __le32 block_list_array[16]; + u8 override_tas_iec; + u8 enable_tas_iec; + u8 usa_tas_uhb_allowed; +}; + +enum iwl_dsm_funcs { + DSM_FUNC_QUERY = 0, + DSM_FUNC_DISABLE_SRD = 1, + DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, + DSM_FUNC_ENABLE_6E = 3, + DSM_FUNC_REGULATORY_CONFIG = 4, + DSM_FUNC_11AX_ENABLEMENT = 6, + DSM_FUNC_ENABLE_UNII4_CHAN = 7, + DSM_FUNC_ACTIVATE_CHANNEL = 8, + DSM_FUNC_FORCE_DISABLE_CHANNELS = 9, + DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10, + DSM_FUNC_RFI_CONFIG = 11, + DSM_FUNC_ENABLE_11BE = 12, + DSM_FUNC_NUM_FUNCS = 13, +}; + +enum iwl_dsm_values_rfi { + DSM_VALUE_RFI_DLVR_DISABLE = 1, + DSM_VALUE_RFI_DDR_DISABLE = 2, +}; + +enum iwl_time_sync_protocol_type { + IWL_TIME_SYNC_PROTOCOL_TM = 1, + IWL_TIME_SYNC_PROTOCOL_FTM = 2, +}; + +struct iwl_lmac_alive { + __le32 ucode_major; + __le32 ucode_minor; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le32 timestamp; + struct iwl_lmac_debug_addrs dbg_ptrs; +}; + +struct iwl_umac_alive { + __le32 umac_major; + __le32 umac_minor; + struct iwl_umac_debug_addrs dbg_ptrs; +}; + +struct iwl_sku_id { + __le32 data[3]; +}; + +struct iwl_alive_ntf_v3 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data; + struct iwl_umac_alive umac_data; +}; + +struct iwl_alive_ntf_v4 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; +}; + +struct iwl_alive_ntf_v5 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; + struct iwl_sku_id sku_id; +}; + +struct iwl_imr_alive_info { + __le64 base_addr; + __le32 size; + __le32 enabled; +}; + +struct iwl_alive_ntf_v6 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; + struct iwl_sku_id sku_id; + struct iwl_imr_alive_info imr; +}; + +enum iwl_extended_cfg_flags { + IWL_INIT_DEBUG_CFG = 0, + IWL_INIT_NVM = 1, + IWL_INIT_PHY = 2, +}; + +struct iwl_init_extended_cfg_cmd { + __le32 init_flags; +}; + +enum iwl_error_recovery_flags { + ERROR_RECOVERY_UPDATE_DB = 1, + ERROR_RECOVERY_END_OF_RECOVERY = 2, +}; + +struct iwl_fw_error_recovery_cmd { + __le32 flags; + __le32 buf_size; +}; + +enum iwl_rss_hash_func_en { + IWL_RSS_HASH_TYPE_IPV4_TCP = 0, + IWL_RSS_HASH_TYPE_IPV4_UDP = 1, + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD = 2, + IWL_RSS_HASH_TYPE_IPV6_TCP = 3, + IWL_RSS_HASH_TYPE_IPV6_UDP = 4, + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD = 5, +}; + +struct iwl_rss_config_cmd { + __le32 flags; + u8 hash_mask; + u8 reserved[3]; + __le32 secret_key[10]; + u8 indirection_table[128]; +}; + +struct iwl_mfuart_load_notif { + __le32 installed_ver; + __le32 external_ver; + __le32 status; + __le32 duration; + __le32 image_size; +}; + +struct iwl_mfu_assert_dump_notif { + __le32 assert_id; + __le32 curr_reset_num; + __le16 index_num; + __le16 parts_num; + __le32 data_size; + __le32 data[0]; +}; + +struct iwl_mvm_alive_data { + bool valid; + u32 scd_base_addr; +}; + +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + union { + u8 required_octet; + struct { + struct {} __empty_virtual_map; + u8 virtual_map[0]; + }; + }; +}; + +struct ieee80211_tpt_blink { + int throughput; + int blink_time; +}; + +enum txdone_entry_desc_flags { + TXDONE_UNKNOWN = 0, + TXDONE_SUCCESS = 1, + TXDONE_FALLBACK = 2, + TXDONE_FAILURE = 3, + TXDONE_EXCESSIVE_RETRY = 4, + TXDONE_AMPDU = 5, + TXDONE_NO_ACK_REQ = 6, +}; + +struct txdone_entry_desc { + long unsigned int flags; + int retry; +}; + +struct usb_otg_caps { + u16 otg_rev; + bool hnp_support; + bool srp_support; + bool adp_support; +}; + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN = 0, + USB_DR_MODE_HOST = 1, + USB_DR_MODE_PERIPHERAL = 2, + USB_DR_MODE_OTG = 3, +}; + +struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); +}; + +typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); + +struct io_err_c { + struct dm_dev *dev; + long: 32; + sector_t start; +}; + +struct linux_efi_tpm_eventlog { + u32 size; + u32 final_events_preboot_size; + u8 version; + u8 log[0]; +}; + +struct efi_tcg2_final_events_table { + u64 version; + u64 nr_events; + u8 events[0]; +}; + +struct tpm_digest { + u16 alg_id; + u8 digest[64]; +}; + +enum tpm_duration { + TPM_SHORT = 0, + TPM_MEDIUM = 1, + TPM_LONG = 2, + TPM_LONG_LONG = 3, + TPM_UNDEFINED = 4, + TPM_NUM_DURATIONS = 4, +}; + +enum tcpa_event_types { + PREBOOT = 0, + POST_CODE = 1, + UNUSED = 2, + NO_ACTION = 3, + SEPARATOR = 4, + ACTION = 5, + EVENT_TAG = 6, + SCRTM_CONTENTS = 7, + SCRTM_VERSION = 8, + CPU_MICROCODE = 9, + PLATFORM_CONFIG_FLAGS = 10, + TABLE_OF_DEVICES = 11, + COMPACT_HASH = 12, + IPL = 13, + IPL_PARTITION_DATA = 14, + NONHOST_CODE = 15, + NONHOST_CONFIG = 16, + NONHOST_INFO = 17, +}; + +struct tcg_efi_specid_event_algs { + u16 alg_id; + u16 digest_size; +}; + +struct tcg_efi_specid_event_head { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintnsize; + u32 num_algs; + struct tcg_efi_specid_event_algs digest_sizes[0]; +}; + +struct tcg_pcr_event { + u32 pcr_idx; + u32 event_type; + u8 digest[20]; + u32 event_size; + u8 event[0]; +}; + +struct tcg_event_field { + u32 event_size; + u8 event[0]; +}; + +struct tcg_pcr_event2_head { + u32 pcr_idx; + u32 event_type; + u32 count; + struct tpm_digest digests[0]; +}; + +struct efi_system_resource_entry_v1 { + efi_guid_t fw_class; + u32 fw_type; + u32 fw_version; + u32 lowest_supported_fw_version; + u32 capsule_flags; + u32 last_attempt_version; + u32 last_attempt_status; +}; + +struct efi_system_resource_table { + u32 fw_resource_count; + u32 fw_resource_count_max; + u64 fw_resource_version; + u8 entries[0]; +}; + +struct esre_entry { + union { + struct efi_system_resource_entry_v1 *esre1; + } esre; + struct kobject kobj; + struct list_head list; +}; + +struct esre_attribute { + struct attribute attr; + ssize_t (*show)(struct esre_entry *, char *); + ssize_t (*store)(struct esre_entry *, const char *, size_t); +}; + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; + long: 32; +}; + +typedef int (*armpmu_init_fn)(struct arm_pmu *); + +struct pmu_probe_info { + unsigned int cpuid; + unsigned int mask; + armpmu_init_fn init; +}; + +enum { + NETDEV_A_PAGE_POOL_STATS_INFO = 1, + NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW = 9, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER = 10, + NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY = 11, + NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL = 12, + NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE = 13, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED = 14, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL = 15, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING = 16, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL = 17, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT = 18, + __NETDEV_A_PAGE_POOL_STATS_MAX = 19, + NETDEV_A_PAGE_POOL_STATS_MAX = 18, +}; + +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = 1, + BPF_SK_STORAGE_GET_F_CREATE = 1, +}; + +struct bpf_local_storage_elem { + struct hlist_node map_node; + struct hlist_node snode; + struct bpf_local_storage *local_storage; + union { + struct callback_head rcu; + struct hlist_node free_node; + }; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_local_storage_data sdata; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + long: 32; + u64 idx_usage_counts[16]; +}; + +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE = 0, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, + __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE = 0, + SK_DIAG_BPF_STORAGE = 1, + __SK_DIAG_BPF_STORAGE_REP_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_NONE = 0, + SK_DIAG_BPF_STORAGE_PAD = 1, + SK_DIAG_BPF_STORAGE_MAP_ID = 2, + SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, + __SK_DIAG_BPF_STORAGE_MAX = 4, +}; + +typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); + +typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[0]; +}; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned int skip_elems; +}; + +struct bpf_iter__bpf_sk_storage_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + struct sock *sk; + }; + union { + void *value; + }; +}; + +enum ethtool_supported_ring_param { + ETHTOOL_RING_USE_RX_BUF_LEN = 1, + ETHTOOL_RING_USE_CQE_SIZE = 2, + ETHTOOL_RING_USE_TX_PUSH = 4, + ETHTOOL_RING_USE_RX_PUSH = 8, + ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = 16, + ETHTOOL_RING_USE_TCP_DATA_SPLIT = 32, +}; + +enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED = 1, + ETHTOOL_TCP_DATA_SPLIT_ENABLED = 2, +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; + u32 supported_ring_params; +}; + +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +enum nfqnl_msg_types { + NFQNL_MSG_PACKET = 0, + NFQNL_MSG_VERDICT = 1, + NFQNL_MSG_CONFIG = 2, + NFQNL_MSG_VERDICT_BATCH = 3, + NFQNL_MSG_MAX = 4, +}; + +struct nfqnl_msg_packet_hdr { + __be32 packet_id; + __be16 hw_protocol; + __u8 hook; +} __attribute__((packed)); + +struct nfqnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfqnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfqnl_vlan_attr { + NFQA_VLAN_UNSPEC = 0, + NFQA_VLAN_PROTO = 1, + NFQA_VLAN_TCI = 2, + __NFQA_VLAN_MAX = 3, +}; + +enum nfqnl_attr_type { + NFQA_UNSPEC = 0, + NFQA_PACKET_HDR = 1, + NFQA_VERDICT_HDR = 2, + NFQA_MARK = 3, + NFQA_TIMESTAMP = 4, + NFQA_IFINDEX_INDEV = 5, + NFQA_IFINDEX_OUTDEV = 6, + NFQA_IFINDEX_PHYSINDEV = 7, + NFQA_IFINDEX_PHYSOUTDEV = 8, + NFQA_HWADDR = 9, + NFQA_PAYLOAD = 10, + NFQA_CT = 11, + NFQA_CT_INFO = 12, + NFQA_CAP_LEN = 13, + NFQA_SKB_INFO = 14, + NFQA_EXP = 15, + NFQA_UID = 16, + NFQA_GID = 17, + NFQA_SECCTX = 18, + NFQA_VLAN = 19, + NFQA_L2HDR = 20, + NFQA_PRIORITY = 21, + NFQA_CGROUP_CLASSID = 22, + __NFQA_MAX = 23, +}; + +struct nfqnl_msg_verdict_hdr { + __be32 verdict; + __be32 id; +}; + +enum nfqnl_msg_config_cmds { + NFQNL_CFG_CMD_NONE = 0, + NFQNL_CFG_CMD_BIND = 1, + NFQNL_CFG_CMD_UNBIND = 2, + NFQNL_CFG_CMD_PF_BIND = 3, + NFQNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfqnl_msg_config_cmd { + __u8 command; + __u8 _pad; + __be16 pf; +}; + +enum nfqnl_config_mode { + NFQNL_COPY_NONE = 0, + NFQNL_COPY_META = 1, + NFQNL_COPY_PACKET = 2, +}; + +struct nfqnl_msg_config_params { + __be32 copy_range; + __u8 copy_mode; +} __attribute__((packed)); + +enum nfqnl_attr_config { + NFQA_CFG_UNSPEC = 0, + NFQA_CFG_CMD = 1, + NFQA_CFG_PARAMS = 2, + NFQA_CFG_QUEUE_MAXLEN = 3, + NFQA_CFG_MASK = 4, + NFQA_CFG_FLAGS = 5, + __NFQA_CFG_MAX = 6, +}; + +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *, unsigned int); + void (*nf_hook_drop)(struct net *); +}; + +struct nfqnl_instance { + struct hlist_node hlist; + struct callback_head rcu; + u32 peer_portid; + unsigned int queue_maxlen; + unsigned int copy_range; + unsigned int queue_dropped; + unsigned int queue_user_dropped; + u_int16_t queue_num; + u_int8_t copy_mode; + u_int32_t flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t lock; + unsigned int queue_total; + unsigned int id_sequence; + struct list_head queue_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, long unsigned int); + +struct nfnl_queue_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; +}; + +enum nft_meta_keys { + NFT_META_LEN = 0, + NFT_META_PROTOCOL = 1, + NFT_META_PRIORITY = 2, + NFT_META_MARK = 3, + NFT_META_IIF = 4, + NFT_META_OIF = 5, + NFT_META_IIFNAME = 6, + NFT_META_OIFNAME = 7, + NFT_META_IFTYPE = 8, + NFT_META_OIFTYPE = 9, + NFT_META_SKUID = 10, + NFT_META_SKGID = 11, + NFT_META_NFTRACE = 12, + NFT_META_RTCLASSID = 13, + NFT_META_SECMARK = 14, + NFT_META_NFPROTO = 15, + NFT_META_L4PROTO = 16, + NFT_META_BRI_IIFNAME = 17, + NFT_META_BRI_OIFNAME = 18, + NFT_META_PKTTYPE = 19, + NFT_META_CPU = 20, + NFT_META_IIFGROUP = 21, + NFT_META_OIFGROUP = 22, + NFT_META_CGROUP = 23, + NFT_META_PRANDOM = 24, + NFT_META_SECPATH = 25, + NFT_META_IIFKIND = 26, + NFT_META_OIFKIND = 27, + NFT_META_BRI_IIFPVID = 28, + NFT_META_BRI_IIFVPROTO = 29, + NFT_META_TIME_NS = 30, + NFT_META_TIME_DAY = 31, + NFT_META_TIME_HOUR = 32, + NFT_META_SDIF = 33, + NFT_META_SDIFNAME = 34, + NFT_META_BRI_BROUTE = 35, + __NFT_META_IIFTYPE = 36, +}; + +enum nft_meta_attributes { + NFTA_META_UNSPEC = 0, + NFTA_META_DREG = 1, + NFTA_META_KEY = 2, + NFTA_META_SREG = 3, + __NFTA_META_MAX = 4, +}; + +enum { + NFT_PAYLOAD_CTX_INNER_TUN = 1, + NFT_PAYLOAD_CTX_INNER_LL = 2, + NFT_PAYLOAD_CTX_INNER_NH = 4, + NFT_PAYLOAD_CTX_INNER_TH = 8, +}; + +struct nft_inner_tun_ctx { + u16 type; + u16 inner_tunoff; + u16 inner_lloff; + u16 inner_nhoff; + u16 inner_thoff; + __be16 llproto; + u8 l4proto; + u8 flags; +}; + +struct nft_meta { + enum nft_meta_keys key: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); + void (*add_port)(struct net_device *, struct udp_tunnel_info *); + void (*del_port)(struct net_device *, struct udp_tunnel_info *); + void (*reset_ntf)(struct net_device *); + size_t (*dump_size)(struct net_device *, unsigned int); + int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); +}; + +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + int ipv6mr_ifindex; +}; + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct compat_group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +struct ip6_ra_chain { + struct ip6_ra_chain *next; + struct sock *sk; + int sel; + void (*destructor)(struct sock *); +}; + +struct ieee80211_multi_link_elem { + __le16 control; + u8 variable[0]; +}; + +struct ieee80211_mle_basic_common_info { + u8 len; + u8 mld_mac_addr[6]; + u8 variable[0]; +}; + +struct cfg80211_assoc_failure { + const u8 *ap_mld_addr; + struct cfg80211_bss *bss[15]; + bool timeout; +}; + +struct cfg80211_mgmt_registration { + struct list_head list; + struct wireless_dev *wdev; + u32 nlportid; + int match_len; + __le16 frame_type; + bool multicast_rx; + u8 match[0]; +}; + +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = -1, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; + +struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +}; + +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET = 0, + AUDIT_NTP_FREQ = 1, + AUDIT_NTP_STATUS = 2, + AUDIT_NTP_TAI = 3, + AUDIT_NTP_TICK = 4, + AUDIT_NTP_ADJUST = 5, + AUDIT_NTP_NVALS = 6, +}; + +struct ntp_data { + long unsigned int tick_usec; + long: 32; + u64 tick_length; + u64 tick_length_base; + int time_state; + int time_status; + s64 time_offset; + long int time_constant; + long int time_maxerror; + long int time_esterror; + long: 32; + s64 time_freq; + time64_t time_reftime; + long int time_adjust; + long: 32; + s64 ntp_tick_adj; + time64_t ntp_next_leap_sec; +}; + +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[0]; +}; + +typedef int (*objpool_init_obj_cb)(void *, void *); + +struct objpool_head; + +typedef int (*objpool_fini_cb)(struct objpool_head *, void *); + +struct objpool_head { + int obj_size; + int nr_objs; + int nr_possible_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + long unsigned int flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +typedef u32 kprobe_opcode_t; + +struct kprobe; + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); + +typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); + +struct kprobe { + struct hlist_node hlist; + struct list_head list; + long unsigned int nmissed; + kprobe_opcode_t *addr; + const char *symbol_name; + unsigned int offset; + kprobe_pre_handler_t pre_handler; + kprobe_post_handler_t post_handler; + kprobe_opcode_t opcode; + struct arch_probes_insn ainsn; + u32 flags; +}; + +struct kprobe_ctlblk { + unsigned int kprobe_status; + struct prev_kprobe prev_kprobe; +}; + +struct arch_optimized_insn { + kprobe_opcode_t copied_insn[1]; + kprobe_opcode_t *insn; +}; + +struct kretprobe_instance; + +typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); + +struct kretprobe_holder; + +struct kretprobe_instance { + struct callback_head rcu; + struct llist_node llist; + struct kretprobe_holder *rph; + kprobe_opcode_t *ret_addr; + void *fp; + char data[0]; +}; + +struct kretprobe; + +struct kretprobe_holder { + struct kretprobe *rp; + struct objpool_head pool; +}; + +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct kretprobe_holder *rph; +}; + +struct kprobe_blacklist_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; +}; + +struct kprobe_insn_cache { + struct mutex mutex; + void * (*alloc)(); + void (*free)(void *); + const char *sym; + struct list_head pages; + size_t insn_size; + int nr_garbage; +}; + +struct optimized_kprobe { + struct kprobe kp; + struct list_head list; + struct arch_optimized_insn optinsn; +}; + +struct kprobe_insn_page { + struct list_head list; + kprobe_opcode_t *insns; + struct kprobe_insn_cache *cache; + int nused; + int ngarbage; + char slot_used[0]; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, +}; + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE = 2, + DYNEVENT_TYPE_NONE = 3, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +struct dyn_event; + +struct dyn_event_operations { + struct list_head list; + int (*create)(const char *); + int (*show)(struct seq_file *, struct dyn_event *); + bool (*is_busy)(struct dyn_event *); + int (*free)(struct dyn_event *); + bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); +}; + +struct dyn_event { + struct list_head list; + struct dyn_event_operations *ops; +}; + +typedef int (*dynevent_check_arg_fn_t)(void *); + +struct dynevent_arg { + const char *str; + char separator; +}; + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; + char separator; +}; + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +struct bpf_iter__bpf_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; +}; + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + long unsigned int rcu_state; +}; + +struct tcx_entry { + struct mini_Qdisc *miniq; + long: 32; + struct bpf_mprog_bundle bundle; + u32 miniq_active; + struct callback_head rcu; + long: 32; +}; + +struct tcx_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +struct trace_event_raw_mmap_lock { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + char __data[0]; +}; + +struct trace_event_raw_mmap_lock_acquire_returned { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + bool success; + char __data[0]; +}; + +struct trace_event_data_offsets_mmap_lock { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +struct trace_event_data_offsets_mmap_lock_acquire_returned { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, const char *, bool, bool); + +struct swap_iocb { + struct kiocb iocb; + struct bio_vec bvec[32]; + int pages; + int len; +}; + +typedef int __kernel_daddr_t; + +struct ustat { + __kernel_daddr_t f_tfree; + long unsigned int f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +struct statfs { + __u32 f_type; + __u32 f_bsize; + __u32 f_blocks; + __u32 f_bfree; + __u32 f_bavail; + __u32 f_files; + __u32 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_flags; + __u32 f_spare[4]; +}; + +struct statfs64 { + __u32 f_type; + __u32 f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_flags; + __u32 f_spare[4]; +}; + +struct inotify_event { + __s32 wd; + __u32 mask; + __u32 cookie; + __u32 len; + char name[0]; +}; + +typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); + +typedef struct { + __le32 *p; + __le32 key; + struct buffer_head *bh; +} Indirect; + +struct fat_entry { + int entry; + union { + u8 *ent12_p[2]; + __le16 *ent16_p; + __le32 *ent32_p; + } u; + int nr_bhs; + struct buffer_head *bhs[2]; + struct inode *fat_inode; +}; + +struct fatent_ra { + sector_t cur; + sector_t limit; + unsigned int ra_blocks; + long: 32; + sector_t ra_advance; + sector_t ra_next; + sector_t ra_limit; +}; + +enum fuse_opcode { + FUSE_LOOKUP = 1, + FUSE_FORGET = 2, + FUSE_GETATTR = 3, + FUSE_SETATTR = 4, + FUSE_READLINK = 5, + FUSE_SYMLINK = 6, + FUSE_MKNOD = 8, + FUSE_MKDIR = 9, + FUSE_UNLINK = 10, + FUSE_RMDIR = 11, + FUSE_RENAME = 12, + FUSE_LINK = 13, + FUSE_OPEN = 14, + FUSE_READ = 15, + FUSE_WRITE = 16, + FUSE_STATFS = 17, + FUSE_RELEASE = 18, + FUSE_FSYNC = 20, + FUSE_SETXATTR = 21, + FUSE_GETXATTR = 22, + FUSE_LISTXATTR = 23, + FUSE_REMOVEXATTR = 24, + FUSE_FLUSH = 25, + FUSE_INIT = 26, + FUSE_OPENDIR = 27, + FUSE_READDIR = 28, + FUSE_RELEASEDIR = 29, + FUSE_FSYNCDIR = 30, + FUSE_GETLK = 31, + FUSE_SETLK = 32, + FUSE_SETLKW = 33, + FUSE_ACCESS = 34, + FUSE_CREATE = 35, + FUSE_INTERRUPT = 36, + FUSE_BMAP = 37, + FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, + FUSE_BATCH_FORGET = 42, + FUSE_FALLOCATE = 43, + FUSE_READDIRPLUS = 44, + FUSE_RENAME2 = 45, + FUSE_LSEEK = 46, + FUSE_COPY_FILE_RANGE = 47, + FUSE_SETUPMAPPING = 48, + FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, + FUSE_TMPFILE = 51, + FUSE_STATX = 52, + CUSE_INIT = 4096, + CUSE_INIT_BSWAP_RESERVED = 1048576, + FUSE_INIT_BSWAP_RESERVED = 436207616, +}; + +struct fuse_setxattr_in { + uint32_t size; + uint32_t flags; + uint32_t setxattr_flags; + uint32_t padding; +}; + +struct fuse_getxattr_in { + uint32_t size; + uint32_t padding; +}; + +struct fuse_getxattr_out { + uint32_t size; + uint32_t padding; +}; + +enum { + WORK_DONE_BIT = 0, + WORK_ORDER_DONE_BIT = 1, +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned int done; +}; + +struct btrfs_stripe_hash { + struct list_head hash_list; + spinlock_t lock; +}; + +struct btrfs_stripe_hash_table { + struct list_head stripe_cache; + spinlock_t cache_lock; + int cache_size; + struct btrfs_stripe_hash table[0]; +}; + +struct sector_ptr { + struct page *page; + unsigned int pgoff: 24; + unsigned int uptodate: 8; +}; + +struct btrfs_plug_cb { + struct blk_plug_cb cb; + struct btrfs_fs_info *info; + struct list_head rbio_list; +}; + +struct assoc_array_ops { + long unsigned int (*get_key_chunk)(const void *, int); + long unsigned int (*get_object_key_chunk)(const void *, int); + bool (*compare_object)(const void *, const void *); + int (*diff_objects)(const void *, const void *); + void (*free_object)(void *); +}; + +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[16]; + long unsigned int nr_leaves_on_branch; +}; + +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + long unsigned int index_key[0]; +}; + +struct assoc_array_edit { + struct callback_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[16]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long int adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[17]; +}; + +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, + NOTIFY_KEY_UPDATED = 1, + NOTIFY_KEY_LINKED = 2, + NOTIFY_KEY_UNLINKED = 3, + NOTIFY_KEY_CLEARED = 4, + NOTIFY_KEY_REVOKED = 5, + NOTIFY_KEY_INVALIDATED = 6, + NOTIFY_KEY_SETATTR = 7, +}; + +struct keyring_read_iterator_context { + size_t buflen; + size_t count; + key_serial_t *buffer; +}; + +struct sha512_state { + u64 state[8]; + u64 count[2]; + u8 buf[128]; +}; + +typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); + +struct xor_block_template { + struct xor_block_template *next; + const char *name; + int speed; + void (*do_2)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict); + void (*do_3)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_4)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_5)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); +}; + +struct pkcs7_signed_info { + struct pkcs7_signed_info *next; + struct x509_certificate *signer; + unsigned int index; + bool unsupported_crypto; + bool blacklisted; + const void *msgdigest; + unsigned int msgdigest_len; + unsigned int authattrs_len; + const void *authattrs; + long unsigned int aa_set; + long: 32; + time64_t signing_time; + struct public_key_signature *sig; + long: 32; +}; + +struct pkcs7_message { + struct x509_certificate *certs; + struct x509_certificate *crl; + struct pkcs7_signed_info *signed_infos; + u8 version; + bool have_authattrs; + enum OID data_type; + size_t data_len; + size_t data_hdrlen; + const void *data; +}; + +struct mq_inflight { + struct block_device *part; + unsigned int inflight[2]; +}; + +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +struct blk_expired_data { + bool has_timedout_rq; + long unsigned int next; + long unsigned int timeout_start; +}; + +struct flush_busy_ctx_data { + struct blk_mq_hw_ctx *hctx; + struct list_head *list; +}; + +struct dispatch_rq_data { + struct blk_mq_hw_ctx *hctx; + struct request *rq; +}; + +enum prep_dispatch { + PREP_DISPATCH_OK = 0, + PREP_DISPATCH_NO_TAG = 1, + PREP_DISPATCH_NO_BUDGET = 2, +}; + +struct rq_iter_data { + struct blk_mq_hw_ctx *hctx; + bool has_rq; +}; + +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +enum { + IORING_MEM_REGION_TYPE_USER = 1, +}; + +struct io_uring_region_desc { + __u64 user_addr; + __u64 size; + __u32 flags; + __u32 id; + __u64 mmap_offset; + __u64 __resv[4]; +}; + +typedef ZSTD_customMem zstd_custom_mem; + +typedef ZSTD_DCtx zstd_dctx; + +typedef ZSTD_DDict zstd_ddict; + +typedef ZSTD_frameHeader zstd_frame_header; + +typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); + +typedef void sg_free_fn(struct scatterlist *, unsigned int); + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +struct resource_entry { + struct list_head node; + struct resource *res; + resource_size_t offset; + struct resource __res; +}; + +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +struct kbdiacruc { + unsigned int diacr; + unsigned int base; + unsigned int result; +}; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +typedef u32 note_buf_t[45]; + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask * const map; +}; + +typedef __u16 __virtio16; + +typedef __u32 __virtio32; + +typedef __u64 __virtio64; + +struct virtio_blk_geometry { + __virtio16 cylinders; + __u8 heads; + __u8 sectors; +}; + +struct virtio_blk_zoned_characteristics { + __virtio32 zone_sectors; + __virtio32 max_open_zones; + __virtio32 max_active_zones; + __virtio32 max_append_sectors; + __virtio32 write_granularity; + __u8 model; + __u8 unused2[3]; +}; + +struct virtio_blk_config { + __virtio64 capacity; + __virtio32 size_max; + __virtio32 seg_max; + struct virtio_blk_geometry geometry; + __virtio32 blk_size; + __u8 physical_block_exp; + __u8 alignment_offset; + __virtio16 min_io_size; + __virtio32 opt_io_size; + __u8 wce; + __u8 unused; + __virtio16 num_queues; + __virtio32 max_discard_sectors; + __virtio32 max_discard_seg; + __virtio32 discard_sector_alignment; + __virtio32 max_write_zeroes_sectors; + __virtio32 max_write_zeroes_seg; + __u8 write_zeroes_may_unmap; + __u8 unused1[3]; + __virtio32 max_secure_erase_sectors; + __virtio32 max_secure_erase_seg; + __virtio32 secure_erase_sector_alignment; + struct virtio_blk_zoned_characteristics zoned; +}; + +struct virtio_blk_outhdr { + __virtio32 type; + __virtio32 ioprio; + __virtio64 sector; +}; + +struct virtio_blk_discard_write_zeroes { + __le64 sector; + __le32 num_sectors; + __le32 flags; +}; + +enum string_size_units { + STRING_UNITS_10 = 0, + STRING_UNITS_2 = 1, + STRING_UNITS_MASK = 1, + STRING_UNITS_NO_SPACE = 1073741824, + STRING_UNITS_NO_BYTES = 2147483648, +}; + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[16]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct virtio_blk { + struct mutex vdev_mutex; + struct virtio_device *vdev; + struct gendisk *disk; + struct blk_mq_tag_set tag_set; + struct work_struct config_work; + int index; + int num_vqs; + int io_queues[3]; + struct virtio_blk_vq *vqs; + unsigned int zone_sectors; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + union { + u8 status; + struct { + __virtio64 sector; + u8 status; + long: 32; + } zone_append; + } in_hdr; + size_t in_hdr_len; + struct sg_table sg_table; + struct scatterlist sg[0]; +}; + +struct pci_bits { + unsigned int reg; + unsigned int width; + long unsigned int mask; + long unsigned int val; +}; + +enum { + PIIX_IOCFG = 84, + ICH5_PMR = 144, + ICH5_PCS = 146, + PIIX_SIDPR_BAR = 5, + PIIX_SIDPR_LEN = 16, + PIIX_SIDPR_IDX = 0, + PIIX_SIDPR_DATA = 4, + PIIX_FLAG_CHECKINTR = 268435456, + PIIX_FLAG_SIDPR = 536870912, + PIIX_PATA_FLAGS = 1, + PIIX_SATA_FLAGS = 268435458, + PIIX_FLAG_PIO16 = 1073741824, + PIIX_80C_PRI = 48, + PIIX_80C_SEC = 192, + P0 = 0, + P1 = 1, + P2 = 2, + P3 = 3, + IDE = -1, + NA = -2, + RV = -3, + PIIX_AHCI_DEVICE = 6, + PIIX_HOST_BROKEN_SUSPEND = 16777216, +}; + +enum piix_controller_ids { + piix_pata_mwdma = 0, + piix_pata_33 = 1, + ich_pata_33 = 2, + ich_pata_66 = 3, + ich_pata_100 = 4, + ich_pata_100_nomwdma1 = 5, + ich5_sata = 6, + ich6_sata = 7, + ich6m_sata = 8, + ich8_sata = 9, + ich8_2port_sata = 10, + ich8m_apple_sata = 11, + tolapai_sata = 12, + piix_pata_vmw = 13, + ich8_sata_snb = 14, + ich8_2port_sata_snb = 15, + ich8_2port_sata_byt = 16, +}; + +struct piix_map_db { + const u32 mask; + const u16 port_enable; + const int map[0]; +}; + +struct piix_host_priv { + const int *map; + u32 saved_iocfg; + void *sidpr; +}; + +struct ich_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; + +struct e1000_option___2 { + enum { + enable_option___2 = 0, + range_option___2 = 1, + list_option___2 = 2, + } type; + const char *name; + const char *err; + int def; + union { + struct { + int min; + int max; + } r; + struct { + int nr; + const struct e1000_opt_list *p; + } l; + } arg; +}; + +struct iwl_calib_res_notif_phy_db { + __le16 type; + __le16 length; + u8 data[0]; +}; + +struct iwl_phy_db_cmd { + __le16 type; + __le16 length; + u8 data[0]; +}; + +struct iwl_phy_db_entry { + u16 size; + u8 *data; +}; + +struct iwl_phy_db { + struct iwl_phy_db_entry cfg; + struct iwl_phy_db_entry calib_nch; + int n_group_papd; + struct iwl_phy_db_entry *calib_ch_group_papd; + int n_group_txp; + struct iwl_phy_db_entry *calib_ch_group_txp; + struct iwl_trans *trans; +}; + +enum iwl_phy_db_section_type { + IWL_PHY_DB_CFG = 1, + IWL_PHY_DB_CALIB_NCH = 2, + IWL_PHY_DB_UNUSED = 3, + IWL_PHY_DB_CALIB_CHG_PAPD = 4, + IWL_PHY_DB_CALIB_CHG_TXP = 5, + IWL_PHY_DB_MAX = 6, +}; + +struct iwl_phy_db_chg_txp { + __le32 space; + __le16 max_channel_idx; +} __attribute__((packed)); + +enum iwl_ibss_manager { + IWL_NOT_IBSS_MANAGER = 0, + IWL_IBSS_MANAGER = 1, +}; + +enum iwlagn_wowlan_wakeup_filters { + IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET = 1, + IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH = 2, + IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = 4, + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = 8, + IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = 16, + IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = 32, + IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = 64, + IWLAGN_WOWLAN_WAKEUP_ALWAYS = 128, + IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = 256, +}; + +struct iwlagn_tkip_rsc_tsc { + struct tkip_sc unicast_rsc[16]; + struct tkip_sc multicast_rsc[16]; + struct tkip_sc tsc; +}; + +struct iwlagn_aes_rsc_tsc { + struct aes_sc unicast_rsc[16]; + struct aes_sc multicast_rsc[16]; + struct aes_sc tsc; +}; + +union iwlagn_all_tsc_rsc { + struct iwlagn_tkip_rsc_tsc tkip; + struct iwlagn_aes_rsc_tsc aes; +}; + +struct iwlagn_wowlan_status { + __le64 replay_ctr; + __le32 rekey_status; + __le32 wakeup_reason; + u8 pattern_number; + u8 reserved1; + __le16 qos_seq_ctr[8]; + __le16 non_qos_seq_ctr; + __le16 reserved2; + union iwlagn_all_tsc_rsc tsc_rsc; + __le16 reserved3; +} __attribute__((packed)); + +struct iwl_vif_priv { + struct iwl_rxon_context *ctx; + u8 ibss_bssid_sta_id; +}; + +struct iwl_resume_data { + struct iwl_priv *priv; + struct iwlagn_wowlan_status *cmd; + bool valid; +}; + +enum { + NONE = 0, + ADD = 1, + REMOVE = 2, + HT_RATE_INIT = 3, + ADD_RATE_INIT = 4, +}; + +struct error_table_start { + u32 valid; + u32 error_id; +}; + +enum iwl_tof_bandwidth { + IWL_TOF_BW_20_LEGACY = 0, + IWL_TOF_BW_20_HT = 1, + IWL_TOF_BW_40 = 2, + IWL_TOF_BW_80 = 3, + IWL_TOF_BW_160 = 4, + IWL_TOF_BW_NUM = 5, +}; + +enum iwl_tof_responder_cmd_valid_field { + IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = 1, + IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = 2, + IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = 4, + IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = 8, + IWL_TOF_RESPONDER_CMD_VALID_BSSID = 16, + IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = 32, + IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = 64, + IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = 128, + IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = 256, + IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = 512, + IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = 1024, + IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = 2048, + IWL_TOF_RESPONDER_CMD_VALID_STA_ID = 4096, + IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = 4194304, + IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = 8388608, + IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = 16777216, + IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID = 33554432, + IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR = 67108864, + IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR = 134217728, +}; + +struct iwl_tof_responder_config_cmd { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 format_bw; + u8 bss_color; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 band; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[6]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; + __le16 max_time_between_msr; +}; + +struct iwl_tof_responder_dyn_config_cmd_v2 { + __le32 lci_len; + __le32 civic_len; + u8 lci_civic[0]; +}; + +enum iwl_responder_dyn_cfg_valid_flags { + IWL_RESPONDER_DYN_CFG_VALID_LCI = 1, + IWL_RESPONDER_DYN_CFG_VALID_CIVIC = 2, + IWL_RESPONDER_DYN_CFG_VALID_PASN_STA = 4, +}; + +struct iwl_tof_responder_dyn_config_cmd { + u8 cipher; + u8 valid_flags; + u8 lci_len; + u8 civic_len; + u8 lci_buf[160]; + u8 civic_buf[160]; + u8 hltk_buf[32]; + u8 addr[6]; + u8 reserved[2]; +}; + +enum iwl_location_frame_format { + IWL_LOCATION_FRAME_FORMAT_LEGACY = 0, + IWL_LOCATION_FRAME_FORMAT_HT = 1, + IWL_LOCATION_FRAME_FORMAT_VHT = 2, + IWL_LOCATION_FRAME_FORMAT_HE = 3, +}; + +enum iwl_location_bw { + IWL_LOCATION_BW_20MHZ = 0, + IWL_LOCATION_BW_40MHZ = 1, + IWL_LOCATION_BW_80MHZ = 2, + IWL_LOCATION_BW_160MHZ = 3, +}; + +enum iwl_location_cipher { + IWL_LOCATION_CIPHER_CCMP_128 = 0, + IWL_LOCATION_CIPHER_GCMP_128 = 1, + IWL_LOCATION_CIPHER_GCMP_256 = 2, + IWL_LOCATION_CIPHER_INVALID = 3, + IWL_LOCATION_CIPHER_MAX = 4, +}; + +enum ftm_responder_stats_flags { + FTM_RESP_STAT_NON_ASAP_STARTED = 1, + FTM_RESP_STAT_NON_ASAP_IN_WIN = 2, + FTM_RESP_STAT_NON_ASAP_OUT_WIN = 4, + FTM_RESP_STAT_TRIGGER_DUP = 8, + FTM_RESP_STAT_DUP = 16, + FTM_RESP_STAT_DUP_IN_WIN = 32, + FTM_RESP_STAT_DUP_OUT_WIN = 64, + FTM_RESP_STAT_SCHED_SUCCESS = 128, + FTM_RESP_STAT_ASAP_REQ = 256, + FTM_RESP_STAT_NON_ASAP_REQ = 512, + FTM_RESP_STAT_ASAP_RESP = 1024, + FTM_RESP_STAT_NON_ASAP_RESP = 2048, + FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = 4096, + FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = 8192, + FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = 16384, + FTM_RESP_STAT_FAIL_NEXT_SERVED = 32768, + FTM_RESP_STAT_FAIL_TRIGGER_ERR = 65536, + FTM_RESP_STAT_FAIL_GC = 131072, + FTM_RESP_STAT_SUCCESS = 262144, + FTM_RESP_STAT_INTEL_IE = 524288, + FTM_RESP_STAT_INITIATOR_ACTIVE = 1048576, + FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = 2097152, + FTM_RESP_STAT_TRIGGER_UNKNOWN = 4194304, + FTM_RESP_STAT_PROCESS_FAIL = 8388608, + FTM_RESP_STAT_ACK = 16777216, + FTM_RESP_STAT_NACK = 33554432, + FTM_RESP_STAT_INVALID_INITIATOR_ID = 67108864, + FTM_RESP_STAT_TIMER_MIN_DELTA = 134217728, + FTM_RESP_STAT_INITIATOR_REMOVED = 268435456, + FTM_RESP_STAT_INITIATOR_ADDED = 536870912, + FTM_RESP_STAT_ERR_LIST_FULL = 1073741824, + FTM_RESP_STAT_INITIATOR_SCHED_NOW = 2147483648, +}; + +struct iwl_ftm_responder_stats { + u8 addr[6]; + u8 success_ftm; + u8 ftm_per_burst; + __le32 flags; + __le32 duration; + __le32 allocated_duration; + u8 bw; + u8 rate; + __le16 reserved; +}; + +struct iwl_mvm_pasn_sta { + struct list_head list; + struct iwl_mvm_int_sta int_sta; + u8 addr[6]; + long: 32; + struct ieee80211_key_conf keyconf; +}; + +struct iwl_mvm_pasn_hltk_data { + u8 *addr; + u8 cipher; + u8 *hltk; +}; + +enum rtw_hw_key_type { + RTW_CAM_NONE = 0, + RTW_CAM_WEP40 = 1, + RTW_CAM_TKIP = 2, + RTW_CAM_AES = 4, + RTW_CAM_WEP104 = 5, +}; + +enum rtw_wow_pattern_type { + RTW_PATTERN_BROADCAST = 0, + RTW_PATTERN_MULTICAST = 1, + RTW_PATTERN_UNICAST = 2, + RTW_PATTERN_VALID = 3, + RTW_PATTERN_INVALID = 4, +}; + +enum rtw_wake_reason { + RTW_WOW_RSN_RX_PTK_REKEY = 1, + RTW_WOW_RSN_RX_GTK_REKEY = 2, + RTW_WOW_RSN_RX_DEAUTH = 8, + RTW_WOW_RSN_DISCONNECT = 16, + RTW_WOW_RSN_RX_MAGIC_PKT = 33, + RTW_WOW_RSN_RX_PATTERN_MATCH = 35, + RTW_WOW_RSN_RX_NLO = 85, +}; + +struct rtw_fw_media_status_iter_data { + struct rtw_dev *rtwdev; + u8 connect; +}; + +struct rtw_fw_key_type_iter_data { + struct rtw_dev *rtwdev; + u8 group_key_type; + u8 pairwise_key_type; +}; + +struct usb_phy_roothub { + struct phy *phy; + struct list_head list; +}; + +struct input_mt_slot { + int abs[14]; + unsigned int frame; + unsigned int key; +}; + +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[0]; +}; + +struct input_mt_pos { + s16 x; + s16 y; +}; + +struct lirc_scancode { + __u64 timestamp; + __u16 flags; + __u16 rc_proto; + __u32 keycode; + __u64 scancode; +}; + +enum mce_kbd_mode { + MCIR2_MODE_KEYBOARD = 0, + MCIR2_MODE_MOUSE = 1, + MCIR2_MODE_UNKNOWN = 2, +}; + +enum mce_kbd_state { + STATE_INACTIVE___3 = 0, + STATE_HEADER_BIT_START___2 = 1, + STATE_HEADER_BIT_END___2 = 2, + STATE_BODY_BIT_START___2 = 3, + STATE_BODY_BIT_END___2 = 4, + STATE_FINISHED___3 = 5, +}; + +struct r5l_payload_header { + __le16 type; + __le16 flags; +}; + +enum r5l_payload_type { + R5LOG_PAYLOAD_DATA = 0, + R5LOG_PAYLOAD_PARITY = 1, + R5LOG_PAYLOAD_FLUSH = 2, +}; + +struct r5l_payload_data_parity { + struct r5l_payload_header header; + __le32 size; + __le64 location; + __le32 checksum[0]; +}; + +struct r5l_payload_flush { + struct r5l_payload_header header; + __le32 size; + __le64 flush_stripes[0]; +}; + +struct r5l_meta_block { + __le32 magic; + __le32 checksum; + __u8 version; + __u8 __zero_pading_1; + __le16 __zero_pading_2; + __le32 meta_size; + __le64 seq; + __le64 position; + struct r5l_payload_header payloads[0]; +}; + +struct r5l_io_unit { + struct r5l_log *log; + struct page *meta_page; + int meta_offset; + struct bio *current_bio; + atomic_t pending_stripe; + long: 32; + u64 seq; + sector_t log_start; + sector_t log_end; + struct list_head log_sibling; + struct list_head stripe_list; + int state; + bool need_split_bio; + struct bio *split_bio; + unsigned int has_flush: 1; + unsigned int has_fua: 1; + unsigned int has_null_flush: 1; + unsigned int has_flush_payload: 1; + unsigned int io_deferred: 1; + struct bio_list flush_barriers; +}; + +enum r5c_journal_mode { + R5C_JOURNAL_MODE_WRITE_THROUGH = 0, + R5C_JOURNAL_MODE_WRITE_BACK = 1, +}; + +struct r5l_log { + struct md_rdev *rdev; + u32 uuid_checksum; + sector_t device_size; + sector_t max_free_space; + sector_t last_checkpoint; + u64 last_cp_seq; + sector_t log_start; + u64 seq; + sector_t next_checkpoint; + struct mutex io_mutex; + struct r5l_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head running_ios; + struct list_head io_end_ios; + struct list_head flushing_ios; + struct list_head finished_ios; + long: 32; + struct bio flush_bio; + struct list_head no_mem_stripes; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + mempool_t meta_pool; + struct md_thread *reclaim_thread; + long unsigned int reclaim_target; + wait_queue_head_t iounit_wait; + struct list_head no_space_stripes; + spinlock_t no_space_stripes_lock; + bool need_cache_flush; + enum r5c_journal_mode r5c_journal_mode; + struct list_head stripe_in_journal_list; + spinlock_t stripe_in_journal_lock; + atomic_t stripe_in_journal_count; + struct work_struct deferred_io_work; + struct work_struct disable_writeback_work; + spinlock_t tree_lock; + struct xarray big_stripe_tree; + long: 32; +}; + +enum r5l_io_unit_state { + IO_UNIT_RUNNING = 0, + IO_UNIT_IO_START = 1, + IO_UNIT_IO_END = 2, + IO_UNIT_STRIPE_END = 3, +}; + +struct r5l_recovery_ctx { + struct page *meta_page; + long: 32; + sector_t meta_total_blocks; + sector_t pos; + u64 seq; + int data_parity_stripes; + int data_only_stripes; + struct list_head cached_list; + struct page *ra_pool[256]; + struct bio_vec ra_bvec[256]; + sector_t pool_offset; + int total_pages; + int valid_pages; +}; + +enum { + IF_LINK_MODE_DEFAULT = 0, + IF_LINK_MODE_DORMANT = 1, + IF_LINK_MODE_TESTING = 2, +}; + +enum lw_bits { + LW_URGENT = 0, +}; + +struct datalink_proto { + unsigned char type[8]; + struct llc_sap *sap; + short unsigned int header_length; + int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + int (*request)(struct datalink_proto *, struct sk_buff *, const unsigned char *); + struct list_head node; +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A = 0, + ETHTOOL_A_CABLE_PAIR_B = 1, + ETHTOOL_A_CABLE_PAIR_C = 2, + ETHTOOL_A_CABLE_PAIR_D = 3, +}; + +enum { + ETHTOOL_A_CABLE_INF_SRC_UNSPEC = 0, + ETHTOOL_A_CABLE_INF_SRC_TDR = 1, + ETHTOOL_A_CABLE_INF_SRC_ALCD = 2, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, + ETHTOOL_A_CABLE_RESULT_PAIR = 1, + ETHTOOL_A_CABLE_RESULT_CODE = 2, + ETHTOOL_A_CABLE_RESULT_SRC = 3, + __ETHTOOL_A_CABLE_RESULT_CNT = 4, + ETHTOOL_A_CABLE_RESULT_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, + ETHTOOL_A_CABLE_FAULT_LENGTH_SRC = 3, + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 4, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_NEST_RESULT = 1, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, + __ETHTOOL_A_CABLE_NEST_CNT = 3, + ETHTOOL_A_CABLE_NEST_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, + ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, + __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, + ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, +}; + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, + ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, + __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, + ETHTOOL_A_CABLE_PULSE_mV = 1, + __ETHTOOL_A_CABLE_PULSE_CNT = 2, + ETHTOOL_A_CABLE_PULSE_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC = 0, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, + __ETHTOOL_A_CABLE_STEP_CNT = 4, + ETHTOOL_A_CABLE_STEP_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, + ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, + __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, + ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, +}; + +struct nft_rhash { + struct rhashtable ht; + struct delayed_work gc_work; +}; + +struct nft_rhash_elem { + struct nft_elem_priv priv; + struct rhash_head node; + struct nft_set_ext ext; +}; + +struct nft_rhash_cmp_arg { + const struct nft_set *set; + const u32 *key; + u8 genmask; + long: 32; + u64 tstamp; +}; + +struct nft_rhash_ctx { + const struct nft_ctx ctx; + const struct nft_set *set; +}; + +struct nft_hash { + u32 seed; + u32 buckets; + struct hlist_head table[0]; +}; + +struct nft_hash_elem { + struct nft_elem_priv priv; + struct hlist_node node; + struct nft_set_ext ext; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + union { + __be32 imsf_slist[1]; + struct { + struct {} __empty_imsf_slist_flex; + __be32 imsf_slist_flex[0]; + }; + }; +}; + +struct igmphdr { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; +}; + +struct igmp_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *in_dev; +}; + +struct igmp_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *idev; + struct ip_mc_list *im; +}; + +typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); + +struct bpf_struct_ops_tcp_congestion_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct tcp_congestion_ops data; +}; + +struct __bridge_info { + __u64 designated_root; + __u64 bridge_id; + __u32 root_path_cost; + __u32 max_age; + __u32 hello_time; + __u32 forward_delay; + __u32 bridge_max_age; + __u32 bridge_hello_time; + __u32 bridge_forward_delay; + __u8 topology_change; + __u8 topology_change_detected; + __u8 root_port; + __u8 stp_enabled; + __u32 ageing_time; + __u32 gc_interval; + __u32 hello_timer_value; + __u32 tcn_timer_value; + __u32 topology_change_timer_value; + __u32 gc_timer_value; +}; + +struct __port_info { + __u64 designated_root; + __u64 designated_bridge; + __u16 port_id; + __u16 designated_port; + __u32 path_cost; + __u32 designated_cost; + __u8 state; + __u8 top_change_ack; + __u8 config_pending; + __u8 unused0; + __u32 message_age_timer_value; + __u32 forward_delay_timer_value; + __u32 hold_timer_value; + long: 32; +}; + +enum nl80211_peer_measurement_status { + NL80211_PMSR_STATUS_SUCCESS = 0, + NL80211_PMSR_STATUS_REFUSED = 1, + NL80211_PMSR_STATUS_TIMEOUT = 2, + NL80211_PMSR_STATUS_FAILURE = 3, +}; + +enum nl80211_peer_measurement_resp { + __NL80211_PMSR_RESP_ATTR_INVALID = 0, + NL80211_PMSR_RESP_ATTR_DATA = 1, + NL80211_PMSR_RESP_ATTR_STATUS = 2, + NL80211_PMSR_RESP_ATTR_HOST_TIME = 3, + NL80211_PMSR_RESP_ATTR_AP_TSF = 4, + NL80211_PMSR_RESP_ATTR_FINAL = 5, + NL80211_PMSR_RESP_ATTR_PAD = 6, + NUM_NL80211_PMSR_RESP_ATTRS = 7, + NL80211_PMSR_RESP_ATTR_MAX = 6, +}; + +enum nl80211_peer_measurement_ftm_failure_reasons { + NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0, + NL80211_PMSR_FTM_FAILURE_NO_RESPONSE = 1, + NL80211_PMSR_FTM_FAILURE_REJECTED = 2, + NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 3, + NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE = 4, + NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP = 5, + NL80211_PMSR_FTM_FAILURE_PEER_BUSY = 6, + NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS = 7, +}; + +enum nl80211_peer_measurement_ftm_resp { + __NL80211_PMSR_FTM_RESP_ATTR_INVALID = 0, + NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON = 1, + NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX = 2, + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS = 3, + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES = 4, + NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME = 5, + NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP = 6, + NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION = 7, + NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST = 8, + NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG = 9, + NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD = 10, + NL80211_PMSR_FTM_RESP_ATTR_TX_RATE = 11, + NL80211_PMSR_FTM_RESP_ATTR_RX_RATE = 12, + NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG = 13, + NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE = 14, + NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD = 15, + NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG = 16, + NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE = 17, + NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD = 18, + NL80211_PMSR_FTM_RESP_ATTR_LCI = 19, + NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC = 20, + NL80211_PMSR_FTM_RESP_ATTR_PAD = 21, + NUM_NL80211_PMSR_FTM_RESP_ATTR = 22, + NL80211_PMSR_FTM_RESP_ATTR_MAX = 21, +}; + +struct cfg80211_pmsr_ftm_result { + const u8 *lci; + const u8 *civicloc; + unsigned int lci_len; + unsigned int civicloc_len; + enum nl80211_peer_measurement_ftm_failure_reasons failure_reason; + u32 num_ftmr_attempts; + u32 num_ftmr_successes; + s16 burst_index; + u8 busy_retry_time; + u8 num_bursts_exp; + u8 burst_duration; + u8 ftms_per_burst; + s32 rssi_avg; + s32 rssi_spread; + struct rate_info tx_rate; + struct rate_info rx_rate; + s64 rtt_avg; + s64 rtt_variance; + s64 rtt_spread; + s64 dist_avg; + s64 dist_variance; + s64 dist_spread; + u16 num_ftmr_attempts_valid: 1; + u16 num_ftmr_successes_valid: 1; + u16 rssi_avg_valid: 1; + u16 rssi_spread_valid: 1; + u16 tx_rate_valid: 1; + u16 rx_rate_valid: 1; + u16 rtt_avg_valid: 1; + u16 rtt_variance_valid: 1; + u16 rtt_spread_valid: 1; + u16 dist_avg_valid: 1; + u16 dist_variance_valid: 1; + u16 dist_spread_valid: 1; + long: 32; +}; + +struct cfg80211_pmsr_result { + u64 host_time; + u64 ap_tsf; + enum nl80211_peer_measurement_status status; + u8 addr[6]; + u8 final: 1; + u8 ap_tsf_valid: 1; + enum nl80211_peer_measurement_type type; + union { + struct cfg80211_pmsr_ftm_result ftm; + }; +}; + +struct ieee80211_csa_settings { + const u16 *counter_offsets_beacon; + const u16 *counter_offsets_presp; + int n_counter_offsets_beacon; + int n_counter_offsets_presp; + u8 count; +}; + +struct ieee80211_color_change_settings { + u16 counter_offset_beacon; + u16 counter_offset_presp; + u8 count; +}; + +enum sta_link_apply_mode { + STA_LINK_MODE_NEW = 0, + STA_LINK_MODE_STA_MODIFY = 1, + STA_LINK_MODE_LINK_MODIFY = 2, +}; + +struct svc_pt_regs { + struct pt_regs regs; + u32 dacr; + u32 ttbcr; +}; + +enum cpu_led_event { + CPU_LED_IDLE_START = 0, + CPU_LED_IDLE_END = 1, + CPU_LED_START = 2, + CPU_LED_STOP = 3, + CPU_LED_HALTED = 4, +}; + +struct stackframe { + long unsigned int fp; + long unsigned int sp; + long unsigned int lr; + long unsigned int pc; + long unsigned int *lr_addr; + struct llist_node *kr_cur; + struct task_struct *tsk; +}; + +struct tlb_args { + struct vm_area_struct *ta_vma; + long unsigned int ta_start; + long unsigned int ta_end; +}; + +struct robust_list { + struct robust_list *next; +}; + +struct robust_list_head { + struct robust_list list; + long int futex_offset; + struct robust_list *list_op_pending; +}; + +struct waitid_info { + pid_t pid; + uid_t uid; + int status; + int cause; +}; + +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + struct waitid_info *wo_info; + int wo_stat; + struct rusage *wo_rusage; + wait_queue_entry_t child_wait; + int notask_error; +}; + +struct die_args { + struct pt_regs *regs; + const char *str; + long int err; + int trapnr; + int signr; +}; + +struct trace_event_raw_notifier_info { + struct trace_entry ent; + void *cb; + char __data[0]; +}; + +struct trace_event_data_offsets_notifier_info {}; + +typedef void (*btf_trace_notifier_register)(void *, void *); + +typedef void (*btf_trace_notifier_unregister)(void *, void *); + +typedef void (*btf_trace_notifier_run)(void *, void *); + +struct prb_data_block { + long unsigned int id; + char data[0]; +}; + +struct rt_waiter_node { + struct rb_node entry; + int prio; + u64 deadline; +}; + +struct rt_mutex_waiter { + struct rt_waiter_node tree; + struct rt_waiter_node pi_tree; + struct task_struct *task; + struct rt_mutex_base *lock; + unsigned int wake_state; + struct ww_acquire_ctx *ww_ctx; +}; + +union futex_key { + struct { + u64 i_seq; + long unsigned int pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + long unsigned int address; + unsigned int offset; + } private; + struct { + u64 ptr; + long unsigned int word; + unsigned int offset; + } both; +}; + +struct futex_pi_state { + struct list_head list; + struct rt_mutex_base pi_mutex; + struct task_struct *owner; + refcount_t refcount; + union futex_key key; +}; + +enum { + FUTEX_STATE_OK = 0, + FUTEX_STATE_EXITING = 1, + FUTEX_STATE_DEAD = 2, +}; + +struct futex_hash_bucket { + atomic_t waiters; + spinlock_t lock; + struct plist_head chain; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct futex_q; + +typedef void futex_wake_fn(struct wake_q_head *, struct futex_q *); + +struct futex_q { + struct plist_node list; + struct task_struct *task; + spinlock_t *lock_ptr; + futex_wake_fn *wake; + void *wake_data; + long: 32; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; + atomic_t requeue_state; + long: 32; +}; + +enum futex_access { + FUTEX_READ = 0, + FUTEX_WRITE = 1, +}; + +enum { + TRACE_CTX_NMI = 0, + TRACE_CTX_IRQ = 1, + TRACE_CTX_SOFTIRQ = 2, + TRACE_CTX_NORMAL = 3, + TRACE_CTX_TRANSITION = 4, +}; + +struct dyn_arch_ftrace { + struct module *mod; +}; + +struct __arch_ftrace_regs { + struct pt_regs regs; +}; + +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN = 0, + FTRACE_BUG_INIT = 1, + FTRACE_BUG_NOP = 2, + FTRACE_BUG_CALL = 3, + FTRACE_BUG_UPDATE = 4, +}; + +enum { + FTRACE_FL_ENABLED = 2147483648, + FTRACE_FL_REGS = 1073741824, + FTRACE_FL_REGS_EN = 536870912, + FTRACE_FL_TRAMP = 268435456, + FTRACE_FL_TRAMP_EN = 134217728, + FTRACE_FL_IPMODIFY = 67108864, + FTRACE_FL_DISABLED = 33554432, + FTRACE_FL_DIRECT = 16777216, + FTRACE_FL_DIRECT_EN = 8388608, + FTRACE_FL_CALL_OPS = 4194304, + FTRACE_FL_CALL_OPS_EN = 2097152, + FTRACE_FL_TOUCHED = 1048576, + FTRACE_FL_MODIFIED = 524288, +}; + +struct dyn_ftrace { + long unsigned int ip; + long unsigned int flags; + struct dyn_arch_ftrace arch; +}; + +enum { + FTRACE_UPDATE_IGNORE = 0, + FTRACE_UPDATE_MAKE_CALL = 1, + FTRACE_UPDATE_MODIFY_CALL = 2, + FTRACE_UPDATE_MAKE_NOP = 3, +}; + +enum { + FTRACE_ITER_FILTER = 1, + FTRACE_ITER_NOTRACE = 2, + FTRACE_ITER_PRINTALL = 4, + FTRACE_ITER_DO_PROBES = 8, + FTRACE_ITER_PROBE = 16, + FTRACE_ITER_MOD = 32, + FTRACE_ITER_ENABLED = 64, + FTRACE_ITER_TOUCHED = 128, + FTRACE_ITER_ADDRS = 256, +}; + +struct ftrace_mod_load { + struct list_head list; + char *func; + char *module; + int enable; +}; + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY = 1, + MATCH_MIDDLE_ONLY = 2, + MATCH_END_ONLY = 3, + MATCH_GLOB = 4, + MATCH_INDEX = 5, +}; + +enum { + FTRACE_MODIFY_ENABLE_FL = 1, + FTRACE_MODIFY_MAY_SLEEP_FL = 2, +}; + +struct ftrace_func_probe { + struct ftrace_probe_ops *probe_ops; + struct ftrace_ops ops; + struct trace_array *tr; + struct list_head list; + void *data; + int ref; +}; + +struct ftrace_page { + struct ftrace_page *next; + struct dyn_ftrace *records; + int index; + int order; +}; + +struct ftrace_rec_iter { + struct ftrace_page *pg; + int index; +}; + +struct ftrace_iterator { + loff_t pos; + loff_t func_pos; + loff_t mod_pos; + struct ftrace_page *pg; + struct dyn_ftrace *func; + struct ftrace_func_probe *probe; + struct ftrace_func_entry *probe_entry; + struct trace_parser parser; + struct ftrace_hash *hash; + struct ftrace_ops *ops; + struct trace_array *tr; + struct list_head *mod_list; + int pidx; + int idx; + unsigned int flags; + long: 32; +}; + +struct ftrace_glob { + char *search; + unsigned int len; + int type; +}; + +struct ftrace_func_map { + struct ftrace_func_entry entry; + void *data; +}; + +struct ftrace_func_mapper { + struct ftrace_hash hash; +}; + +enum graph_filter_type { + GRAPH_FILTER_NOTRACE = 0, + GRAPH_FILTER_FUNCTION = 1, +}; + +struct ftrace_graph_data { + struct ftrace_hash *hash; + struct ftrace_func_entry *entry; + int idx; + enum graph_filter_type type; + struct ftrace_hash *new_hash; + const struct seq_operations *seq_ops; + struct trace_parser parser; +}; + +struct ftrace_mod_func { + struct list_head list; + char *name; + long unsigned int ip; + unsigned int size; +}; + +struct ftrace_mod_map { + struct callback_head rcu; + struct list_head list; + struct module *mod; + long unsigned int start_addr; + long unsigned int end_addr; + struct list_head funcs; + unsigned int num_funcs; +}; + +struct ftrace_init_func { + struct list_head list; + long unsigned int ip; +}; + +struct kallsyms_data { + long unsigned int *addrs; + const char **syms; + size_t cnt; + size_t found; +}; + +struct trace_event_raw_benchmark_event { + struct trace_entry ent; + char str[128]; + u64 delta; + char __data[0]; +}; + +struct trace_event_data_offsets_benchmark_event {}; + +typedef void (*btf_trace_benchmark_event)(void *, const char *, u64); + +struct bpf_iter_seq_link_info { + u32 link_id; +}; + +struct bpf_iter__bpf_link { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_link *link; + }; +}; + +typedef struct kmem_cache *kmem_buckets[14]; + +struct page_vma_mapped_walk { + long unsigned int pfn; + long unsigned int nr_pages; + long unsigned int pgoff; + struct vm_area_struct *vma; + long unsigned int address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +typedef unsigned int isolate_mode_t; + +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *, struct page *, enum migrate_mode); + void (*putback_page)(struct page *); +}; + +enum ttu_flags { + TTU_SPLIT_HUGE_PMD = 4, + TTU_IGNORE_MLOCK = 8, + TTU_SYNC = 16, + TTU_HWPOISON = 32, + TTU_BATCH_FLUSH = 64, + TTU_RMAP_LOCKED = 128, +}; + +enum rmp_flags { + RMP_LOCKED = 1, + RMP_USE_SHARED_ZEROPAGE = 2, +}; + +struct rmap_walk_control { + void *arg; + bool try_lock; + bool contended; + bool (*rmap_one)(struct folio *, struct vm_area_struct *, long unsigned int, void *); + int (*done)(struct folio *); + struct anon_vma * (*anon_lock)(const struct folio *, struct rmap_walk_control *); + bool (*invalid_vma)(struct vm_area_struct *, void *); +}; + +struct rmap_walk_arg { + struct folio *folio; + bool map_unused_to_zeropage; +}; + +enum { + PAGE_WAS_MAPPED = 1, + PAGE_WAS_MLOCKED = 2, + PAGE_OLD_STATES = 3, +}; + +struct migrate_pages_stats { + int nr_succeeded; + int nr_failed_pages; + int nr_thp_succeeded; + int nr_thp_failed; + int nr_thp_split; + int nr_split; +}; + +struct hugepage_subpool; + +typedef struct task_struct *class_task_lock_t; + +struct pidfd_info { + __u64 mask; + __u64 cgroupid; + __u32 pid; + __u32 tgid; + __u32 ppid; + __u32 ruid; + __u32 rgid; + __u32 euid; + __u32 egid; + __u32 suid; + __u32 sgid; + __u32 fsuid; + __u32 fsgid; + __u32 spare0[1]; +}; + +struct core_name { + char *corename; + int used; + int size; +}; + +struct dx_hash_info { + u32 hash; + u32 minor_hash; + int hash_version; + u32 *seed; +}; + +struct ext4_filename { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + struct dx_hash_info hinfo; +}; + +struct ext4_xattr_entry { + __u8 e_name_len; + __u8 e_name_index; + __le16 e_value_offs; + __le32 e_value_inum; + __le32 e_value_size; + __le32 e_hash; + char e_name[0]; +}; + +struct ext4_xattr_info { + const char *name; + const void *value; + size_t value_len; + int name_index; + int in_inode; +}; + +struct ext4_xattr_search { + struct ext4_xattr_entry *first; + void *base; + void *end; + struct ext4_xattr_entry *here; + int not_found; +}; + +struct ext4_xattr_ibody_find { + struct ext4_xattr_search s; + struct ext4_iloc iloc; +}; + +struct msdos_dir_slot { + __u8 id; + __u8 name0_4[10]; + __u8 attr; + __u8 reserved; + __u8 alias_checksum; + __u8 name5_10[12]; + __le16 start; + __u8 name11_12[4]; +}; + +struct shortname_info { + unsigned char lower: 1; + unsigned char upper: 1; + unsigned char valid: 1; +}; + +struct debugfs_short_fops { + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + loff_t (*llseek)(struct file *, loff_t, int); +}; + +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +struct debugfs_fsdata { + const struct file_operations *real_fops; + const struct debugfs_short_fops *short_fops; + union { + debugfs_automount_t automount; + struct { + refcount_t active_users; + struct completion active_users_drained; + struct mutex cancellations_mtx; + struct list_head cancellations; + }; + }; +}; + +struct debugfs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid___5 = 0, + Opt_gid___5 = 1, + Opt_mode___3 = 2, + Opt_source = 3, +}; + +struct file_extent_cluster { + u64 start; + u64 end; + u64 boundary[128]; + unsigned int nr; + long: 32; + u64 owning_root; +}; + +struct mapping_tree { + struct rb_root rb_root; + spinlock_t lock; +}; + +enum reloc_stage { + MOVE_DATA_EXTENTS = 0, + UPDATE_DATA_PTRS = 1, +}; + +struct reloc_control { + struct btrfs_block_group *block_group; + struct btrfs_root *extent_root; + struct inode *data_inode; + struct btrfs_block_rsv *block_rsv; + struct btrfs_backref_cache backref_cache; + struct file_extent_cluster cluster; + struct extent_io_tree processed_blocks; + struct mapping_tree reloc_root_tree; + struct list_head reloc_roots; + struct list_head dirty_subvol_roots; + u64 merging_rsv_size; + u64 nodes_relocated; + u64 reserved_bytes; + u64 search_start; + u64 extents_found; + enum reloc_stage stage; + bool create_reloc_tree; + bool merge_reloc_tree; + bool found_file_extent; +}; + +struct mapping_node { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + void *data; + long: 32; +}; + +struct tree_block { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + u64 owner; + struct btrfs_key key; + u8 level; + bool key_ready; + long: 32; +}; + +struct keyctl_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + __u32 __spare[7]; +}; + +enum { + Opt_err___2 = 0, + Opt_enc = 1, + Opt_hash = 2, +}; + +struct akcipher_instance { + void (*free)(struct akcipher_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + +typedef unsigned char u8___2; + +struct rand_data { + void *hash_state; + long: 32; + __u64 prev_time; + __u64 last_delta; + __s64 last_delta2; + unsigned int flags; + unsigned int osr; + unsigned char *mem; + unsigned int memlocation; + unsigned int memblocks; + unsigned int memblocksize; + unsigned int memaccessloops; + unsigned int rct_count; + unsigned int apt_cutoff; + unsigned int apt_cutoff_permanent; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + unsigned int health_failure; + unsigned int apt_base_set: 1; + long: 32; +}; + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data *entropy_collector; + struct crypto_shash *tfm; + struct shash_desc *sdesc; +}; + +typedef unsigned int iov_iter_extraction_t; + +struct bio_map_data { + bool is_our_pages: 1; + bool is_null_mapped: 1; + long: 32; + struct iov_iter iter; + struct iovec iov[0]; +}; + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + long: 32; + u64 batch; +}; + +struct blk_iolatency { + struct rq_qos rqos; + struct timer_list timer; + bool enabled; + atomic_t enable_cnt; + struct work_struct enable_work; +}; + +struct iolatency_grp; + +struct child_latency_info { + spinlock_t lock; + long: 32; + u64 last_scale_event; + u64 scale_lat; + u64 nr_samples; + struct iolatency_grp *scale_grp; + atomic_t scale_cookie; +}; + +struct percentile_stats { + u64 total; + u64 missed; +}; + +struct latency_stat { + union { + struct percentile_stats ps; + struct blk_rq_stat rqs; + }; +}; + +struct iolatency_grp { + struct blkg_policy_data pd; + struct latency_stat *stats; + struct latency_stat cur_stat; + struct blk_iolatency *blkiolat; + unsigned int max_depth; + struct rq_wait rq_wait; + atomic64_t window_start; + atomic_t scale_cookie; + long: 32; + u64 min_lat_nsec; + u64 cur_win_nsec; + u64 lat_avg; + u64 nr_samples; + bool ssd; + long: 32; + struct child_latency_info child_lat; +}; + +struct io_splice { + struct file *file_out; + long: 32; + loff_t off_out; + loff_t off_in; + u64 len; + int splice_fd_in; + unsigned int flags; + struct io_rsrc_node *rsrc_node; + long: 32; +}; + +union nested_table { + union nested_table *table; + struct rhash_lock_head *bucket; +}; + +typedef struct { + size_t bitContainer; + unsigned int bitsConsumed; + const char *ptr; + const char *start; + const char *limitPtr; +} BIT_DStream_t; + +typedef enum { + BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3, +} BIT_DStream_status; + +typedef struct { + BYTE maxTableLog; + BYTE tableType; + BYTE tableLog; + BYTE reserved; +} DTableDesc; + +typedef struct { + BYTE nbBits; + BYTE byte; +} HUF_DEltX1; + +typedef struct { + U32 rankVal[13]; + U32 rankStart[13]; + U32 statsWksp[218]; + BYTE symbols[256]; + BYTE huffWeight[256]; +} HUF_ReadDTableX1_Workspace; + +typedef struct { + U16 sequence; + BYTE nbBits; + BYTE length; +} HUF_DEltX2; + +typedef struct { + BYTE symbol; +} sortedSymbol_t; + +typedef U32 rankValCol_t[13]; + +typedef struct { + U32 rankVal[156]; + U32 rankStats[13]; + U32 rankStart0[15]; + sortedSymbol_t sortedSymbol[256]; + BYTE weightList[256]; + U32 calleeWksp[218]; +} HUF_ReadDTableX2_Workspace; + +typedef struct { + U32 tableTime; + U32 decode256Time; +} algo_time_t; + +struct simple_pm_bus { + struct clk_bulk_data *clks; + int num_clks; +}; + +enum pci_mmap_state { + pci_mmap_io = 0, + pci_mmap_mem = 1, +}; + +enum pci_mmap_api { + PCI_MMAP_SYSFS = 0, + PCI_MMAP_PROCFS = 1, +}; + +struct pci_filp_private { + enum pci_mmap_state mmap_state; + int write_combine; +}; + +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +struct clk_notifier_data { + struct clk *clk; + long unsigned int old_rate; + long unsigned int new_rate; +}; + +struct clk_parent_map; + +struct clk_core { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct module *owner; + struct device *dev; + struct hlist_node rpm_node; + struct device_node *of_node; + struct clk_core *parent; + struct clk_parent_map *parents; + u8 num_parents; + u8 new_parent_index; + long unsigned int rate; + long unsigned int req_rate; + long unsigned int new_rate; + struct clk_core *new_parent; + struct clk_core *new_child; + long unsigned int flags; + bool orphan; + bool rpm_enabled; + unsigned int enable_count; + unsigned int prepare_count; + unsigned int protect_count; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int accuracy; + int phase; + struct clk_duty duty; + struct hlist_head children; + struct hlist_node child_node; + struct hlist_head clks; + unsigned int notifier_count; + struct dentry *dentry; + struct hlist_node debug_node; + struct kref ref; +}; + +struct clk_onecell_data { + struct clk **clks; + unsigned int clk_num; +}; + +struct clk_hw_onecell_data { + unsigned int num; + struct clk_hw *hws[0]; +}; + +struct clk_parent_map { + const struct clk_hw *hw; + struct clk_core *core; + const char *fw_name; + const char *name; + int index; +}; + +struct trace_event_raw_clk { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_clk_rate { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int rate; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_range { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int min; + long unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_clk_parent { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + char __data[0]; +}; + +struct trace_event_raw_clk_phase { + struct trace_entry ent; + u32 __data_loc_name; + int phase; + char __data[0]; +}; + +struct trace_event_raw_clk_duty_cycle { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int num; + unsigned int den; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_request { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + long unsigned int min; + long unsigned int max; + long unsigned int prate; + char __data[0]; +}; + +struct trace_event_data_offsets_clk { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_range { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_parent { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +struct trace_event_data_offsets_clk_phase { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_duty_cycle { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_request { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +typedef void (*btf_trace_clk_enable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_min_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_max_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_range)(void *, struct clk_core *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_rate_request_start)(void *, struct clk_rate_request *); + +typedef void (*btf_trace_clk_rate_request_done)(void *, struct clk_rate_request *); + +struct clk_notifier_devres { + struct clk *clk; + struct notifier_block *nb; +}; + +struct of_clk_provider { + struct list_head link; + struct device_node *node; + struct clk * (*get)(struct of_phandle_args *, void *); + struct clk_hw * (*get_hw)(struct of_phandle_args *, void *); + void *data; +}; + +struct clock_provider { + void (*clk_init_cb)(struct device_node *); + struct device_node *np; + struct list_head node; +}; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, struct device *); + int (*configure)(struct transport_container *, struct device *, struct device *); + int (*remove)(struct transport_container *, struct device *, struct device *); +}; + +struct ata_internal { + struct scsi_transport_template t; + struct device_attribute private_port_attrs[3]; + struct device_attribute private_link_attrs[3]; + struct device_attribute private_dev_attrs[9]; + struct transport_container link_attr_cont; + struct transport_container dev_attr_cont; + struct device_attribute *link_attrs[4]; + struct device_attribute *port_attrs[4]; + struct device_attribute *dev_attrs[10]; +}; + +struct ata_show_ering_arg { + char *buf; + int written; +}; + +enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180, +}; + +struct iwl_trans_txq_scd_cfg { + u8 fifo; + u8 sta_id; + u8 tid; + bool aggregate; + int frame_limit; +}; + +enum { + TX_STATUS_SUCCESS = 1, + TX_STATUS_DIRECT_DONE = 2, + TX_STATUS_POSTPONE_DELAY = 64, + TX_STATUS_POSTPONE_FEW_BYTES = 65, + TX_STATUS_POSTPONE_BT_PRIO = 66, + TX_STATUS_POSTPONE_QUIET_PERIOD = 67, + TX_STATUS_POSTPONE_CALC_TTAK = 68, + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 129, + TX_STATUS_FAIL_SHORT_LIMIT = 130, + TX_STATUS_FAIL_LONG_LIMIT = 131, + TX_STATUS_FAIL_FIFO_UNDERRUN = 132, + TX_STATUS_FAIL_DRAIN_FLOW = 133, + TX_STATUS_FAIL_RFKILL_FLUSH = 134, + TX_STATUS_FAIL_LIFE_EXPIRE = 135, + TX_STATUS_FAIL_DEST_PS = 136, + TX_STATUS_FAIL_HOST_ABORTED = 137, + TX_STATUS_FAIL_BT_RETRY = 138, + TX_STATUS_FAIL_STA_INVALID = 139, + TX_STATUS_FAIL_FRAG_DROPPED = 140, + TX_STATUS_FAIL_TID_DISABLE = 141, + TX_STATUS_FAIL_FIFO_FLUSHED = 142, + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 143, + TX_STATUS_FAIL_PASSIVE_NO_RX = 144, + TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 145, +}; + +enum { + TX_STATUS_MSK = 255, + TX_STATUS_DELAY_MSK = 64, + TX_STATUS_ABORT_MSK = 128, + TX_PACKET_MODE_MSK = 65280, + TX_FIFO_NUMBER_MSK = 458752, + TX_RESERVED = 7864320, + TX_POWER_PA_DETECT_MSK = 2139095040, + TX_ABORT_REQUIRED_MSK = 2147483648, +}; + +enum { + AGG_TX_STATE_TRANSMITTED = 0, + AGG_TX_STATE_UNDERRUN_MSK = 1, + AGG_TX_STATE_BT_PRIO_MSK = 2, + AGG_TX_STATE_FEW_BYTES_MSK = 4, + AGG_TX_STATE_ABORT_MSK = 8, + AGG_TX_STATE_LAST_SENT_TTL_MSK = 16, + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 32, + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 64, + AGG_TX_STATE_SCD_QUERY_MSK = 128, + AGG_TX_STATE_TEST_BAD_CRC32_MSK = 256, + AGG_TX_STATE_RESPONSE_MSK = 511, + AGG_TX_STATE_DUMP_TX_MSK = 512, + AGG_TX_STATE_DELAY_TX_MSK = 1024, +}; + +struct agg_tx_status { + __le16 status; + __le16 sequence; +}; + +struct iwlagn_tx_resp { + u8 frame_count; + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 rate_n_flags; + __le16 wireless_media_time; + u8 pa_status; + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_C[3]; + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; + __le16 frame_ctrl; + struct agg_tx_status status; +}; + +struct iwl_compressed_ba_resp { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; + u8 txed; + u8 txed_2_done; + __le16 reserved1; +}; + +enum iwl_proto_offloads { + IWL_D3_PROTO_OFFLOAD_ARP = 1, + IWL_D3_PROTO_OFFLOAD_NS = 2, + IWL_D3_PROTO_IPV4_VALID = 4, + IWL_D3_PROTO_IPV6_VALID = 8, + IWL_D3_PROTO_OFFLOAD_BTM = 16, +}; + +struct iwl_proto_offload_cmd_common { + __le32 enabled; + __be32 remote_ipv4_addr; + __be32 host_ipv4_addr; + u8 arp_mac_addr[6]; + __le16 reserved; +}; + +struct iwl_proto_offload_cmd_v1 { + struct iwl_proto_offload_cmd_common common; + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[32]; + u8 ndp_mac_addr[6]; + __le16 reserved2; +}; + +struct iwl_proto_offload_cmd_v2 { + struct iwl_proto_offload_cmd_common common; + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[96]; + u8 ndp_mac_addr[6]; + u8 num_valid_ipv6_addrs; + u8 reserved2[3]; +} __attribute__((packed)); + +struct iwl_ns_config { + struct in6_addr source_ipv6_addr; + struct in6_addr dest_ipv6_addr; + u8 target_mac_addr[6]; + __le16 reserved; +}; + +struct iwl_targ_addr { + struct in6_addr addr; + __le32 config_num; +}; + +struct iwl_proto_offload_cmd_v3_small { + struct iwl_proto_offload_cmd_common common; + __le32 num_valid_ipv6_addrs; + struct iwl_targ_addr targ_addrs[4]; + struct iwl_ns_config ns_config[2]; +}; + +struct iwl_proto_offload_cmd_v4 { + __le32 sta_id; + struct iwl_proto_offload_cmd_common common; + __le32 num_valid_ipv6_addrs; + struct iwl_targ_addr targ_addrs[12]; + struct iwl_ns_config ns_config[4]; +}; + +enum csi_seg_len { + HAL_CSI_SEG_4K = 0, + HAL_CSI_SEG_8K = 1, + HAL_CSI_SEG_11K = 2, +}; + +struct cfg_mumimo_para { + u8 sounding_sts[6]; + u16 grouping_bitmap; + u8 mu_tx_en; + u32 given_gid_tab[2]; + u32 given_user_pos[4]; +}; + +struct mu_bfer_init_para { + u16 paid; + u16 csi_para; + u16 my_aid; + enum csi_seg_len csi_length_sel; + u8 bfer_address[6]; +}; + +struct usb_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; +}; + +struct usb_dev_cap_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum { + US_FL_SINGLE_LUN = 1, + US_FL_NEED_OVERRIDE = 2, + US_FL_SCM_MULT_TARG = 4, + US_FL_FIX_INQUIRY = 8, + US_FL_FIX_CAPACITY = 16, + US_FL_IGNORE_RESIDUE = 32, + US_FL_BULK32 = 64, + US_FL_NOT_LOCKABLE = 128, + US_FL_GO_SLOW = 256, + US_FL_NO_WP_DETECT = 512, + US_FL_MAX_SECTORS_64 = 1024, + US_FL_IGNORE_DEVICE = 2048, + US_FL_CAPACITY_HEURISTICS = 4096, + US_FL_MAX_SECTORS_MIN = 8192, + US_FL_BULK_IGNORE_TAG = 16384, + US_FL_SANE_SENSE = 32768, + US_FL_CAPACITY_OK = 65536, + US_FL_BAD_SENSE = 131072, + US_FL_NO_READ_DISC_INFO = 262144, + US_FL_NO_READ_CAPACITY_16 = 524288, + US_FL_INITIAL_READ10 = 1048576, + US_FL_WRITE_CACHE = 2097152, + US_FL_NEEDS_CAP16 = 4194304, + US_FL_IGNORE_UAS = 8388608, + US_FL_BROKEN_FUA = 16777216, + US_FL_NO_ATA_1X = 33554432, + US_FL_NO_REPORT_OPCODES = 67108864, + US_FL_MAX_SECTORS_240 = 134217728, + US_FL_NO_REPORT_LUNS = 268435456, + US_FL_ALWAYS_SYNC = 536870912, + US_FL_NO_SAME = 1073741824, + US_FL_SENSE_AFTER_SYNC = 2147483648, +}; + +struct pps_kinfo { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + long: 32; +}; + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +struct pps_bind_args { + int tsformat; + int edge; + int consumer; +}; + +struct dm_io_region { + struct block_device *bdev; + long: 32; + sector_t sector; + sector_t count; +}; + +struct page_list { + struct page_list *next; + struct page *page; +}; + +typedef void (*io_notify_fn)(long unsigned int, void *); + +enum dm_io_mem_type { + DM_IO_PAGE_LIST = 0, + DM_IO_BIO = 1, + DM_IO_VMA = 2, + DM_IO_KMEM = 3, +}; + +struct dm_io_memory { + enum dm_io_mem_type type; + unsigned int offset; + union { + struct page_list *pl; + struct bio *bio; + void *vma; + void *addr; + } ptr; +}; + +struct dm_io_notify { + io_notify_fn fn; + void *context; +}; + +struct dm_io_client; + +struct dm_io_request { + blk_opf_t bi_opf; + struct dm_io_memory mem; + struct dm_io_notify notify; + struct dm_io_client *client; +}; + +struct dm_kcopyd_throttle { + unsigned int throttle; + unsigned int num_io_jobs; + unsigned int io_period; + unsigned int total_period; + unsigned int last_jiffies; +}; + +typedef void (*dm_kcopyd_notify_fn)(int, long unsigned int, void *); + +struct dm_kcopyd_client { + struct page_list *pages; + unsigned int nr_reserved_pages; + unsigned int nr_free_pages; + unsigned int sub_job_size; + struct dm_io_client *io_client; + wait_queue_head_t destroyq; + mempool_t job_pool; + struct workqueue_struct *kcopyd_wq; + struct work_struct kcopyd_work; + struct dm_kcopyd_throttle *throttle; + atomic_t nr_jobs; + spinlock_t job_lock; + struct list_head callback_jobs; + struct list_head complete_jobs; + struct list_head io_jobs; + struct list_head pages_jobs; +}; + +struct kcopyd_job { + struct dm_kcopyd_client *kc; + struct list_head list; + unsigned int flags; + int read_err; + long unsigned int write_err; + enum req_op op; + long: 32; + struct dm_io_region source; + unsigned int num_dests; + long: 32; + struct dm_io_region dests[8]; + struct page_list *pages; + dm_kcopyd_notify_fn fn; + void *context; + struct mutex lock; + atomic_t sub_jobs; + long: 32; + sector_t progress; + sector_t write_offset; + struct kcopyd_job *master_job; + long: 32; +}; + +struct of_endpoint { + unsigned int port; + unsigned int id; + const struct device_node *local_node; +}; + +struct supplier_bindings { + struct device_node * (*parse_prop)(struct device_node *, const char *, int); + struct device_node * (*get_con_dev)(struct device_node *); + bool optional; + u8 fwlink_flags; +}; + +typedef size_t (*iov_step_f)(void *, size_t, size_t, void *, void *); + +typedef size_t (*iov_ustep_f)(void *, size_t, size_t, void *, void *); + +struct csum_state { + __wsum csum; + size_t off; +}; + +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; + +typedef int (*pp_nl_fill_cb)(struct sk_buff *, const struct page_pool *, const struct genl_info *); + +struct page_pool_dump_cb { + long unsigned int ifindex; + u32 pp_id; +}; + +struct bpf_dummy_ops_state { + int val; +}; + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *); + int (*test_2)(struct bpf_dummy_ops_state *, int, short unsigned int, char, long unsigned int); + int (*test_sleepable)(struct bpf_dummy_ops_state *); +}; + +typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *, ...); + +struct bpf_dummy_ops_test_args { + u64 args[12]; + struct bpf_dummy_ops_state state; + long: 32; +}; + +struct bpf_struct_ops_bpf_dummy_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_dummy_ops data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC = 0, + ETHTOOL_A_STATS_GRP_PAD = 1, + ETHTOOL_A_STATS_GRP_ID = 2, + ETHTOOL_A_STATS_GRP_SS_ID = 3, + ETHTOOL_A_STATS_GRP_STAT = 4, + ETHTOOL_A_STATS_GRP_HIST_RX = 5, + ETHTOOL_A_STATS_GRP_HIST_TX = 6, + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, + ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, + ETHTOOL_A_STATS_GRP_HIST_VAL = 9, + __ETHTOOL_A_STATS_GRP_CNT = 10, + ETHTOOL_A_STATS_GRP_MAX = 9, +}; + +struct stats_req_info { + struct ethnl_req_info base; + long unsigned int stat_mask[1]; + enum ethtool_mac_stats_src src; +}; + +struct stats_reply_data { + struct ethnl_reply_data base; + long: 32; + union { + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + }; + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + } stats; + }; + const struct ethtool_rmon_hist_range *rmon_ranges; + long: 32; +}; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); + void (*decode_session)(struct sk_buff *, struct flowi *); + void (*remove_nat_bysrc)(struct nf_conn *); +}; + +struct nf_conntrack_tuple_mask { + struct { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + } src; +}; + +struct nf_conntrack_expect { + struct hlist_node lnode; + struct hlist_node hnode; + struct nf_conntrack_tuple tuple; + struct nf_conntrack_tuple_mask mask; + refcount_t use; + unsigned int flags; + unsigned int class; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); + struct nf_conntrack_helper *helper; + struct nf_conn *master; + struct timer_list timeout; + union nf_inet_addr saved_addr; + union nf_conntrack_man_proto saved_proto; + enum ip_conntrack_dir dir; + struct callback_head rcu; +}; + +struct nf_ct_helper_expectfn { + struct list_head head; + const char *name; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); +}; + +struct nf_ct_seqadj { + u32 correction_pos; + s32 offset_before; + s32 offset_after; +}; + +struct nf_conn_seqadj { + struct nf_ct_seqadj seq[2]; +}; + +struct nf_conn_counter { + atomic64_t packets; + atomic64_t bytes; +}; + +struct nf_conn_acct { + struct nf_conn_counter counter[2]; +}; + +struct nf_conn_tstamp { + u_int64_t start; + u_int64_t stop; +}; + +struct nf_conn_synproxy { + u32 isn; + u32 its; + u32 tsoff; +}; + +enum cntl_msg_types { + IPCTNL_MSG_CT_NEW = 0, + IPCTNL_MSG_CT_GET = 1, + IPCTNL_MSG_CT_DELETE = 2, + IPCTNL_MSG_CT_GET_CTRZERO = 3, + IPCTNL_MSG_CT_GET_STATS_CPU = 4, + IPCTNL_MSG_CT_GET_STATS = 5, + IPCTNL_MSG_CT_GET_DYING = 6, + IPCTNL_MSG_CT_GET_UNCONFIRMED = 7, + IPCTNL_MSG_MAX = 8, +}; + +enum ctnl_exp_msg_types { + IPCTNL_MSG_EXP_NEW = 0, + IPCTNL_MSG_EXP_GET = 1, + IPCTNL_MSG_EXP_DELETE = 2, + IPCTNL_MSG_EXP_GET_STATS_CPU = 3, + IPCTNL_MSG_EXP_MAX = 4, +}; + +enum ctattr_type { + CTA_UNSPEC = 0, + CTA_TUPLE_ORIG = 1, + CTA_TUPLE_REPLY = 2, + CTA_STATUS = 3, + CTA_PROTOINFO = 4, + CTA_HELP = 5, + CTA_NAT_SRC = 6, + CTA_TIMEOUT = 7, + CTA_MARK = 8, + CTA_COUNTERS_ORIG = 9, + CTA_COUNTERS_REPLY = 10, + CTA_USE = 11, + CTA_ID = 12, + CTA_NAT_DST = 13, + CTA_TUPLE_MASTER = 14, + CTA_SEQ_ADJ_ORIG = 15, + CTA_NAT_SEQ_ADJ_ORIG = 15, + CTA_SEQ_ADJ_REPLY = 16, + CTA_NAT_SEQ_ADJ_REPLY = 16, + CTA_SECMARK = 17, + CTA_ZONE = 18, + CTA_SECCTX = 19, + CTA_TIMESTAMP = 20, + CTA_MARK_MASK = 21, + CTA_LABELS = 22, + CTA_LABELS_MASK = 23, + CTA_SYNPROXY = 24, + CTA_FILTER = 25, + CTA_STATUS_MASK = 26, + __CTA_MAX = 27, +}; + +enum ctattr_tuple { + CTA_TUPLE_UNSPEC = 0, + CTA_TUPLE_IP = 1, + CTA_TUPLE_PROTO = 2, + CTA_TUPLE_ZONE = 3, + __CTA_TUPLE_MAX = 4, +}; + +enum ctattr_ip { + CTA_IP_UNSPEC = 0, + CTA_IP_V4_SRC = 1, + CTA_IP_V4_DST = 2, + CTA_IP_V6_SRC = 3, + CTA_IP_V6_DST = 4, + __CTA_IP_MAX = 5, +}; + +enum ctattr_protoinfo { + CTA_PROTOINFO_UNSPEC = 0, + CTA_PROTOINFO_TCP = 1, + CTA_PROTOINFO_DCCP = 2, + CTA_PROTOINFO_SCTP = 3, + __CTA_PROTOINFO_MAX = 4, +}; + +enum ctattr_counters { + CTA_COUNTERS_UNSPEC = 0, + CTA_COUNTERS_PACKETS = 1, + CTA_COUNTERS_BYTES = 2, + CTA_COUNTERS32_PACKETS = 3, + CTA_COUNTERS32_BYTES = 4, + CTA_COUNTERS_PAD = 5, + __CTA_COUNTERS_MAX = 6, +}; + +enum ctattr_tstamp { + CTA_TIMESTAMP_UNSPEC = 0, + CTA_TIMESTAMP_START = 1, + CTA_TIMESTAMP_STOP = 2, + CTA_TIMESTAMP_PAD = 3, + __CTA_TIMESTAMP_MAX = 4, +}; + +enum ctattr_seqadj { + CTA_SEQADJ_UNSPEC = 0, + CTA_SEQADJ_CORRECTION_POS = 1, + CTA_SEQADJ_OFFSET_BEFORE = 2, + CTA_SEQADJ_OFFSET_AFTER = 3, + __CTA_SEQADJ_MAX = 4, +}; + +enum ctattr_synproxy { + CTA_SYNPROXY_UNSPEC = 0, + CTA_SYNPROXY_ISN = 1, + CTA_SYNPROXY_ITS = 2, + CTA_SYNPROXY_TSOFF = 3, + __CTA_SYNPROXY_MAX = 4, +}; + +enum ctattr_expect { + CTA_EXPECT_UNSPEC = 0, + CTA_EXPECT_MASTER = 1, + CTA_EXPECT_TUPLE = 2, + CTA_EXPECT_MASK = 3, + CTA_EXPECT_TIMEOUT = 4, + CTA_EXPECT_ID = 5, + CTA_EXPECT_HELP_NAME = 6, + CTA_EXPECT_ZONE = 7, + CTA_EXPECT_FLAGS = 8, + CTA_EXPECT_CLASS = 9, + CTA_EXPECT_NAT = 10, + CTA_EXPECT_FN = 11, + __CTA_EXPECT_MAX = 12, +}; + +enum ctattr_expect_nat { + CTA_EXPECT_NAT_UNSPEC = 0, + CTA_EXPECT_NAT_DIR = 1, + CTA_EXPECT_NAT_TUPLE = 2, + __CTA_EXPECT_NAT_MAX = 3, +}; + +enum ctattr_stats_cpu { + CTA_STATS_UNSPEC = 0, + CTA_STATS_SEARCHED = 1, + CTA_STATS_FOUND = 2, + CTA_STATS_NEW = 3, + CTA_STATS_INVALID = 4, + CTA_STATS_IGNORE = 5, + CTA_STATS_DELETE = 6, + CTA_STATS_DELETE_LIST = 7, + CTA_STATS_INSERT = 8, + CTA_STATS_INSERT_FAILED = 9, + CTA_STATS_DROP = 10, + CTA_STATS_EARLY_DROP = 11, + CTA_STATS_ERROR = 12, + CTA_STATS_SEARCH_RESTART = 13, + CTA_STATS_CLASH_RESOLVE = 14, + CTA_STATS_CHAIN_TOOLONG = 15, + __CTA_STATS_MAX = 16, +}; + +enum ctattr_stats_global { + CTA_STATS_GLOBAL_UNSPEC = 0, + CTA_STATS_GLOBAL_ENTRIES = 1, + CTA_STATS_GLOBAL_MAX_ENTRIES = 2, + __CTA_STATS_GLOBAL_MAX = 3, +}; + +enum ctattr_expect_stats { + CTA_STATS_EXP_UNSPEC = 0, + CTA_STATS_EXP_NEW = 1, + CTA_STATS_EXP_CREATE = 2, + CTA_STATS_EXP_DELETE = 3, + __CTA_STATS_EXP_MAX = 4, +}; + +enum ctattr_filter { + CTA_FILTER_UNSPEC = 0, + CTA_FILTER_ORIG_FLAGS = 1, + CTA_FILTER_REPLY_FLAGS = 2, + __CTA_FILTER_MAX = 3, +}; + +struct ctnetlink_list_dump_ctx { + struct nf_conn *last; + unsigned int cpu; + bool done; +}; + +struct ctnetlink_filter_u32 { + u32 val; + u32 mask; +}; + +struct ctnetlink_filter { + u8 family; + bool zone_filter; + u_int32_t orig_flags; + u_int32_t reply_flags; + struct nf_conntrack_tuple orig; + struct nf_conntrack_tuple reply; + struct nf_conntrack_zone zone; + struct ctnetlink_filter_u32 mark; + struct ctnetlink_filter_u32 status; +}; + +enum nft_exthdr_flags { + NFT_EXTHDR_F_PRESENT = 1, +}; + +enum nft_exthdr_op { + NFT_EXTHDR_OP_IPV6 = 0, + NFT_EXTHDR_OP_TCPOPT = 1, + NFT_EXTHDR_OP_IPV4 = 2, + NFT_EXTHDR_OP_SCTP = 3, + NFT_EXTHDR_OP_DCCP = 4, + __NFT_EXTHDR_OP_MAX = 5, +}; + +enum nft_exthdr_attributes { + NFTA_EXTHDR_UNSPEC = 0, + NFTA_EXTHDR_DREG = 1, + NFTA_EXTHDR_TYPE = 2, + NFTA_EXTHDR_OFFSET = 3, + NFTA_EXTHDR_LEN = 4, + NFTA_EXTHDR_FLAGS = 5, + NFTA_EXTHDR_OP = 6, + NFTA_EXTHDR_SREG = 7, + __NFTA_EXTHDR_MAX = 8, +}; + +struct dccp_hdr { + __be16 dccph_sport; + __be16 dccph_dport; + __u8 dccph_doff; + __u8 dccph_cscov: 4; + __u8 dccph_ccval: 4; + __sum16 dccph_checksum; + __u8 dccph_x: 1; + __u8 dccph_type: 4; + __u8 dccph_reserved: 3; + __u8 dccph_seq2; + __be16 dccph_seq; +}; + +enum dccp_pkt_type { + DCCP_PKT_REQUEST = 0, + DCCP_PKT_RESPONSE = 1, + DCCP_PKT_DATA = 2, + DCCP_PKT_ACK = 3, + DCCP_PKT_DATAACK = 4, + DCCP_PKT_CLOSEREQ = 5, + DCCP_PKT_CLOSE = 6, + DCCP_PKT_RESET = 7, + DCCP_PKT_SYNC = 8, + DCCP_PKT_SYNCACK = 9, + DCCP_PKT_INVALID = 10, +}; + +enum { + DCCPO_PADDING = 0, + DCCPO_MANDATORY = 1, + DCCPO_MIN_RESERVED = 3, + DCCPO_MAX_RESERVED = 31, + DCCPO_CHANGE_L = 32, + DCCPO_CONFIRM_L = 33, + DCCPO_CHANGE_R = 34, + DCCPO_CONFIRM_R = 35, + DCCPO_NDP_COUNT = 37, + DCCPO_ACK_VECTOR_0 = 38, + DCCPO_ACK_VECTOR_1 = 39, + DCCPO_TIMESTAMP = 41, + DCCPO_TIMESTAMP_ECHO = 42, + DCCPO_ELAPSED_TIME = 43, + DCCPO_MAX = 45, + DCCPO_MIN_RX_CCID_SPECIFIC = 128, + DCCPO_MAX_RX_CCID_SPECIFIC = 191, + DCCPO_MIN_TX_CCID_SPECIFIC = 192, + DCCPO_MAX_TX_CCID_SPECIFIC = 255, +}; + +struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +}; + +struct nft_exthdr { + u8 type; + u8 offset; + u8 len; + u8 op; + u8 dreg; + u8 sreg; + u8 flags; +}; + +enum ip_defrag_users { + IP_DEFRAG_LOCAL_DELIVER = 0, + IP_DEFRAG_CALL_RA_CHAIN = 1, + IP_DEFRAG_CONNTRACK_IN = 2, + __IP_DEFRAG_CONNTRACK_IN_END = 65537, + IP_DEFRAG_CONNTRACK_OUT = 65538, + __IP_DEFRAG_CONNTRACK_OUT_END = 131073, + IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, + IP_DEFRAG_VS_IN = 196610, + IP_DEFRAG_VS_OUT = 196611, + IP_DEFRAG_VS_FWD = 196612, + IP_DEFRAG_AF_PACKET = 196613, + IP_DEFRAG_MACVLAN = 196614, +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct fib_config { + u8 fc_dst_len; + dscp_t fc_dscp; + u8 fc_protocol; + u8 fc_scope; + u8 fc_type; + u8 fc_gw_family; + u32 fc_table; + __be32 fc_dst; + union { + __be32 fc_gw4; + struct in6_addr fc_gw6; + }; + int fc_oif; + u32 fc_flags; + u32 fc_priority; + __be32 fc_prefsrc; + u32 fc_nh_id; + struct nlattr *fc_mx; + struct rtnexthop *fc_mp; + int fc_mx_len; + int fc_mp_len; + u32 fc_flow; + u32 fc_nlflags; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; +}; + +struct fib_result_nl { + __be32 fl_addr; + u32 fl_mark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + unsigned char tb_id; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + +struct nd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + __u8 opt[0]; +}; + +struct sta_link_alloc { + struct link_sta_info info; + struct ieee80211_link_sta sta; + struct callback_head callback_head; +}; + +struct undef_hook { + struct list_head node; + u32 instr_mask; + u32 instr_val; + u32 cpsr_mask; + u32 cpsr_val; + int (*fn)(struct pt_regs *, unsigned int); +}; + +typedef struct { + void *lock; +} class_rcu_tasks_trace_t; + +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_3 = 3, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_5 = 5, + HW_BREAKPOINT_LEN_6 = 6, + HW_BREAKPOINT_LEN_7 = 7, + HW_BREAKPOINT_LEN_8 = 8, +}; + +struct trace_event_raw_sys_enter { + struct trace_entry ent; + long int id; + long unsigned int args[6]; + char __data[0]; +}; + +struct trace_event_raw_sys_exit { + struct trace_entry ent; + long int id; + long int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_sys_enter {}; + +struct trace_event_data_offsets_sys_exit {}; + +typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int); + +typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int); + +struct pt_regs_offset { + const char *name; + int offset; +}; + +enum arm_regset { + REGSET_GPR = 0, + REGSET_FPR = 1, +}; + +enum ptrace_syscall_dir { + PTRACE_SYSCALL_ENTER = 0, + PTRACE_SYSCALL_EXIT = 1, +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short int contexts; + bool contexts_maxed; +}; + +struct frame_tail { + struct frame_tail *fp; + long unsigned int sp; + long unsigned int lr; +}; + +typedef void (*harden_branch_predictor_fn_t)(); + +struct fsr_info { + int (*fn)(long unsigned int, unsigned int, struct pt_regs *); + int sig; + int code; + const char *name; +}; + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +struct cpu_cache_fns { + void (*flush_icache_all)(); + void (*flush_kern_all)(); + void (*flush_kern_louis)(); + void (*flush_user_all)(); + void (*flush_user_range)(long unsigned int, long unsigned int, unsigned int); + void (*coherent_kern_range)(long unsigned int, long unsigned int); + int (*coherent_user_range)(long unsigned int, long unsigned int); + void (*flush_kern_dcache_area)(void *, size_t); + void (*dma_map_area)(const void *, size_t, int); + void (*dma_unmap_area)(const void *, size_t, int); + void (*dma_flush_range)(const void *, const void *); +}; + +struct proc_info_list { + unsigned int cpu_val; + unsigned int cpu_mask; + long unsigned int __cpu_mm_mmu_flags; + long unsigned int __cpu_io_mmu_flags; + long unsigned int __cpu_flush; + const char *arch_name; + const char *elf_name; + unsigned int elf_hwcap; + const char *cpu_name; + struct processor *proc; + struct cpu_tlb_fns *tlb; + struct cpu_user_fns *user; + struct cpu_cache_fns *cache; +}; + +enum { + KERNEL_PARAM_OPS_FL_NOARG = 1, +}; + +struct param_attribute { + struct module_attribute mattr; + const struct kernel_param *param; +}; + +struct module_param_attrs { + unsigned int num; + struct attribute_group grp; + struct param_attribute attrs[0]; +}; + +struct kmalloced_param { + struct list_head list; + char val[0]; +}; + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; + +struct irq_domain_chip_generic_info { + const char *name; + irq_flow_handler_t handler; + unsigned int irqs_per_chip; + unsigned int num_ct; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + int (*init)(struct irq_chip_generic *); + void (*exit)(struct irq_chip_generic *); +}; + +struct irq_domain_info { + struct fwnode_handle *fwnode; + unsigned int domain_flags; + unsigned int size; + irq_hw_number_t hwirq_max; + int direct_max; + unsigned int hwirq_base; + unsigned int virq_base; + enum irq_domain_bus_token bus_token; + const char *name_suffix; + const struct irq_domain_ops *ops; + void *host_data; + struct irq_domain *parent; + struct irq_domain_chip_generic_info *dgc_info; + int (*init)(struct irq_domain *); + void (*exit)(struct irq_domain *); +}; + +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +struct irq_desc_devres { + unsigned int from; + unsigned int cnt; +}; + +struct ce_unbind { + struct clock_event_device *ce; + int res; +}; + +typedef int (*tracing_map_cmp_fn_t)(void *, void *); + +struct tracing_map_field { + tracing_map_cmp_fn_t cmp_fn; + long: 32; + union { + atomic64_t sum; + unsigned int offset; + }; +}; + +struct tracing_map; + +struct tracing_map_elt { + struct tracing_map *map; + struct tracing_map_field *fields; + atomic64_t *vars; + bool *var_set; + void *key; + void *private_data; +}; + +struct tracing_map_sort_key { + unsigned int field_idx; + bool descending; +}; + +struct tracing_map_array; + +struct tracing_map_ops; + +struct tracing_map { + unsigned int key_size; + unsigned int map_bits; + unsigned int map_size; + unsigned int max_elts; + atomic_t next_elt; + struct tracing_map_array *elts; + struct tracing_map_array *map; + const struct tracing_map_ops *ops; + void *private_data; + long: 32; + struct tracing_map_field fields[6]; + unsigned int n_fields; + int key_idx[3]; + unsigned int n_keys; + struct tracing_map_sort_key sort_key; + unsigned int n_vars; + atomic64_t hits; + atomic64_t drops; +}; + +struct tracing_map_entry { + u32 key; + struct tracing_map_elt *val; +}; + +struct tracing_map_sort_entry { + void *key; + struct tracing_map_elt *elt; + bool elt_copied; + bool dup; +}; + +struct tracing_map_array { + unsigned int entries_per_page; + unsigned int entry_size_shift; + unsigned int entry_shift; + unsigned int entry_mask; + unsigned int n_pages; + void **pages; +}; + +struct tracing_map_ops { + int (*elt_alloc)(struct tracing_map_elt *); + void (*elt_free)(struct tracing_map_elt *); + void (*elt_clear)(struct tracing_map_elt *); + void (*elt_init)(struct tracing_map_elt *); +}; + +enum { + TRACE_NOP_OPT_ACCEPT = 1, + TRACE_NOP_OPT_REFUSE = 2, +}; + +enum { + BTF_TRACING_TYPE_TASK = 0, + BTF_TRACING_TYPE_FILE = 1, + BTF_TRACING_TYPE_VMA = 2, + MAX_BTF_TRACING_TYPE = 3, +}; + +typedef u64 (*btf_bpf_task_storage_get_recur)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_delete_recur)(struct bpf_map *, struct task_struct *); + +typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); + +struct reuseport_array { + struct bpf_map map; + struct sock *ptrs[0]; +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH = 0, + TLB_REMOTE_SHOOTDOWN = 1, + TLB_LOCAL_SHOOTDOWN = 2, + TLB_LOCAL_MM_SHOOTDOWN = 3, + TLB_REMOTE_SEND_IPI = 4, + NR_TLB_FLUSH_REASONS = 5, +}; + +struct trace_event_raw_tlb_flush { + struct trace_entry ent; + int reason; + long unsigned int pages; + char __data[0]; +}; + +struct trace_event_data_offsets_tlb_flush {}; + +typedef void (*btf_trace_tlb_flush)(void *, int, long unsigned int); + +struct trace_event_raw_mm_migrate_pages { + struct trace_entry ent; + long unsigned int succeeded; + long unsigned int failed; + long unsigned int thp_succeeded; + long unsigned int thp_failed; + long unsigned int thp_split; + long unsigned int large_folio_split; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_mm_migrate_pages_start { + struct trace_entry ent; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_migration_pte { + struct trace_entry ent; + long unsigned int addr; + long unsigned int pte; + int order; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_migrate_pages {}; + +struct trace_event_data_offsets_mm_migrate_pages_start {}; + +struct trace_event_data_offsets_migration_pte {}; + +typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, enum migrate_mode, int); + +typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); + +typedef void (*btf_trace_set_migration_pte)(void *, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_remove_migration_pte)(void *, long unsigned int, long unsigned int, int); + +struct folio_referenced_arg { + int mapcount; + int referenced; + long unsigned int vm_flags; + struct mem_cgroup *memcg; +}; + +struct zpool { + struct zpool_driver *driver; + void *pool; +}; + +enum { + EXT4_MF_MNTDIR_SAMPLED = 0, + EXT4_MF_FC_INELIGIBLE = 1, +}; + +struct ext4_dir_entry { + __le32 inode; + __le16 rec_len; + __le16 name_len; + char name[255]; +}; + +struct ext4_dir_entry_tail { + __le32 det_reserved_zero1; + __le16 det_rec_len; + __u8 det_reserved_zero2; + __u8 det_reserved_ft; + __le32 det_checksum; +}; + +typedef enum { + EITHER = 0, + INDEX = 1, + DIRENT = 2, + DIRENT_HTREE = 3, +} dirblock_type_t; + +struct fake_dirent { + __le32 inode; + __le16 rec_len; + u8 name_len; + u8 file_type; +}; + +struct dx_countlimit { + __le16 limit; + __le16 count; +}; + +struct dx_entry { + __le32 hash; + __le32 block; +}; + +struct dx_root_info { + __le32 reserved_zero; + u8 hash_version; + u8 info_length; + u8 indirect_levels; + u8 unused_flags; +}; + +struct dx_root { + struct fake_dirent dot; + char dot_name[4]; + struct fake_dirent dotdot; + char dotdot_name[4]; + struct dx_root_info info; + struct dx_entry entries[0]; +}; + +struct dx_node { + struct fake_dirent fake; + struct dx_entry entries[0]; +}; + +struct dx_frame { + struct buffer_head *bh; + struct dx_entry *entries; + struct dx_entry *at; +}; + +struct dx_map_entry { + u32 hash; + u16 offs; + u16 size; +}; + +struct dx_tail { + u32 dt_reserved; + __le32 dt_checksum; +}; + +struct ext4_renament { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool is_dir; + int dir_nlink_delta; + struct buffer_head *bh; + struct ext4_dir_entry_2 *de; + int inlined; + struct buffer_head *dir_bh; + struct ext4_dir_entry_2 *parent_de; + int dir_inlined; +}; + +struct hash_prefix { + const char *name; + const u8 *data; + size_t size; +}; + +struct rsassa_pkcs1_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct rsassa_pkcs1_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct hash_prefix *hash_prefix; +}; + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC = 0, + CRYPTO_AUTHENC_KEYA_PARAM = 1, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct authenc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct crypto_authenc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct blkg_rwstat_sample { + u64 cnt[5]; +}; + +typedef __kernel_rwf_t rwf_t; + +struct iov_iter_state { + size_t iov_offset; + size_t count; + long unsigned int nr_segs; +}; + +struct wait_page_key { + struct folio *folio; + int bit_nr; + int page_match; +}; + +struct io_async_rw { + size_t bytes_done; + long: 32; + struct iov_iter iter; + struct iov_iter_state iter_state; + struct iovec fast_iov; + struct iovec *free_iovec; + int free_iov_nr; + struct wait_page_queue wpq; +}; + +struct io_rw { + struct kiocb kiocb; + u64 addr; + u32 len; + rwf_t flags; +}; + +typedef void (*swap_r_func_t)(void *, void *, int, const void *); + +typedef int (*cmp_r_func_t)(const void *, const void *, const void *); + +struct wrapper { + cmp_func_t cmp; + swap_func_t swap; +}; + +typedef struct { + FSE_CTable CTable[59]; + U32 scratchBuffer[41]; + unsigned int count[13]; + S16 norm[13]; +} HUF_CompressWeightsWksp; + +typedef struct { + HUF_CompressWeightsWksp wksp; + BYTE bitsToWeight[13]; + BYTE huffWeight[255]; +} HUF_WriteCTableWksp; + +struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +}; + +typedef struct nodeElt_s nodeElt; + +typedef struct { + U16 base; + U16 curr; +} rankPos; + +typedef nodeElt huffNodeTable[512]; + +typedef struct { + huffNodeTable huffNodeTbl; + rankPos rankPosition[192]; +} HUF_buildCTable_wksp_tables; + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + BYTE *startPtr; + BYTE *ptr; + BYTE *endPtr; +} HUF_CStream_t; + +typedef enum { + HUF_singleStream = 0, + HUF_fourStreams = 1, +} HUF_nbStreams_e; + +typedef struct { + unsigned int count[256]; + HUF_CElt CTable[257]; + union { + HUF_buildCTable_wksp_tables buildCTable_wksp; + HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[1024]; + } wksps; +} HUF_compress_tables_t; + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +struct pcim_iomap_devres { + void *table[6]; +}; + +struct pcim_intx_devres { + int orig_intx; +}; + +enum pcim_addr_devres_type { + PCIM_ADDR_DEVRES_TYPE_INVALID = 0, + PCIM_ADDR_DEVRES_TYPE_REGION = 1, + PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING = 2, + PCIM_ADDR_DEVRES_TYPE_MAPPING = 3, +}; + +struct pcim_addr_devres { + enum pcim_addr_devres_type type; + void *baseaddr; + long unsigned int offset; + long unsigned int len; + int bar; +}; + +struct console_font_op { + unsigned int op; + unsigned int flags; + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +enum translation_map { + LAT1_MAP = 0, + GRAF_MAP = 1, + IBMPC_MAP = 2, + USER_MAP = 3, + FIRST_MAP = 0, + LAST_MAP = 3, +}; + +struct tiocl_selection { + short unsigned int xs; + short unsigned int ys; + short unsigned int xe; + short unsigned int ye; + short unsigned int sel_mode; +}; + +struct con_driver { + const struct consw *con; + const char *desc; + struct device *dev; + int node; + int first; + int last; + int flag; +}; + +enum { + blank_off = 0, + blank_normal_wait = 1, + blank_vesa_wait = 2, +}; + +enum { + EPecma = 0, + EPdec = 1, + EPeq = 2, + EPgt = 3, + EPlt = 4, +}; + +enum CSI_J { + CSI_J_CURSOR_TO_END = 0, + CSI_J_START_TO_CURSOR = 1, + CSI_J_VISIBLE = 2, + CSI_J_FULL = 3, +}; + +enum { + CSI_K_CURSOR_TO_LINEEND = 0, + CSI_K_LINESTART_TO_CURSOR = 1, + CSI_K_LINE = 2, +}; + +struct rgb { + u8 r; + u8 g; + u8 b; +}; + +enum { + CSI_m_DEFAULT = 0, + CSI_m_BOLD = 1, + CSI_m_HALF_BRIGHT = 2, + CSI_m_ITALIC = 3, + CSI_m_UNDERLINE = 4, + CSI_m_BLINK = 5, + CSI_m_REVERSE = 7, + CSI_m_PRI_FONT = 10, + CSI_m_ALT_FONT1 = 11, + CSI_m_ALT_FONT2 = 12, + CSI_m_DOUBLE_UNDERLINE = 21, + CSI_m_NORMAL_INTENSITY = 22, + CSI_m_NO_ITALIC = 23, + CSI_m_NO_UNDERLINE = 24, + CSI_m_NO_BLINK = 25, + CSI_m_NO_REVERSE = 27, + CSI_m_FG_COLOR_BEG = 30, + CSI_m_FG_COLOR_END = 37, + CSI_m_FG_COLOR = 38, + CSI_m_DEFAULT_FG_COLOR = 39, + CSI_m_BG_COLOR_BEG = 40, + CSI_m_BG_COLOR_END = 47, + CSI_m_BG_COLOR = 48, + CSI_m_DEFAULT_BG_COLOR = 49, + CSI_m_BRIGHT_FG_COLOR_BEG = 90, + CSI_m_BRIGHT_FG_COLOR_END = 97, + CSI_m_BRIGHT_FG_COLOR_OFF = 60, + CSI_m_BRIGHT_BG_COLOR_BEG = 100, + CSI_m_BRIGHT_BG_COLOR_END = 107, + CSI_m_BRIGHT_BG_COLOR_OFF = 60, +}; + +enum { + CSI_DEC_hl_CURSOR_KEYS = 1, + CSI_DEC_hl_132_COLUMNS = 3, + CSI_DEC_hl_REVERSE_VIDEO = 5, + CSI_DEC_hl_ORIGIN_MODE = 6, + CSI_DEC_hl_AUTOWRAP = 7, + CSI_DEC_hl_AUTOREPEAT = 8, + CSI_DEC_hl_MOUSE_X10 = 9, + CSI_DEC_hl_SHOW_CURSOR = 25, + CSI_DEC_hl_MOUSE_VT200 = 1000, +}; + +enum { + CSI_hl_DISPLAY_CTRL = 3, + CSI_hl_INSERT = 4, + CSI_hl_AUTO_NL = 20, +}; + +enum CSI_right_square_bracket { + CSI_RSB_COLOR_FOR_UNDERLINE = 1, + CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2, + CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8, + CSI_RSB_BLANKING_INTERVAL = 9, + CSI_RSB_BELL_FREQUENCY = 10, + CSI_RSB_BELL_DURATION = 11, + CSI_RSB_BRING_CONSOLE_TO_FRONT = 12, + CSI_RSB_UNBLANK = 13, + CSI_RSB_VESA_OFF_INTERVAL = 14, + CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15, + CSI_RSB_CURSOR_BLINK_INTERVAL = 16, +}; + +enum vc_ctl_state { + ESnormal = 0, + ESesc = 1, + ESsquare = 2, + ESgetpars = 3, + ESfunckey = 4, + EShash = 5, + ESsetG0 = 6, + ESsetG1 = 7, + ESpercent = 8, + EScsiignore = 9, + ESnonstd = 10, + ESpalette = 11, + ESosc = 12, + ESANSI_first = 12, + ESapc = 13, + ESpm = 14, + ESdcs = 15, + ESANSI_last = 15, +}; + +enum { + ASCII_NULL = 0, + ASCII_BELL = 7, + ASCII_BACKSPACE = 8, + ASCII_IGNORE_FIRST = 8, + ASCII_HTAB = 9, + ASCII_LINEFEED = 10, + ASCII_VTAB = 11, + ASCII_FORMFEED = 12, + ASCII_CAR_RET = 13, + ASCII_IGNORE_LAST = 13, + ASCII_SHIFTOUT = 14, + ASCII_SHIFTIN = 15, + ASCII_CANCEL = 24, + ASCII_SUBSTITUTE = 26, + ASCII_ESCAPE = 27, + ASCII_CSI_IGNORE_FIRST = 32, + ASCII_CSI_IGNORE_LAST = 63, + ASCII_DEL = 127, + ASCII_EXT_CSI = 155, +}; + +struct interval { + uint32_t first; + uint32_t last; +}; + +struct vc_draw_region { + long unsigned int from; + long unsigned int to; + int x; +}; + +struct devcd_entry { + struct device devcd_dev; + void *data; + size_t datalen; + struct mutex mutex; + bool delete_work; + struct module *owner; + ssize_t (*read)(char *, loff_t, size_t, void *, size_t); + void (*free)(void *); + struct delayed_work del_wk; + struct device *failing_dev; + long: 32; +}; + +typedef void (*async_func_t)(void *, async_cookie_t); + +struct async_domain { + struct list_head pending; + unsigned int registered: 1; +}; + +enum { + GENHD_FL_REMOVABLE = 1, + GENHD_FL_HIDDEN = 2, + GENHD_FL_NO_PART = 4, +}; + +enum blk_integrity_flags { + BLK_INTEGRITY_NOVERIFY = 1, + BLK_INTEGRITY_NOGENERATE = 2, + BLK_INTEGRITY_DEVICE_CAPABLE = 4, + BLK_INTEGRITY_REF_TAG = 8, + BLK_INTEGRITY_STACKED = 16, +}; + +enum { + NVME_REG_CAP = 0, + NVME_REG_VS = 8, + NVME_REG_INTMS = 12, + NVME_REG_INTMC = 16, + NVME_REG_CC = 20, + NVME_REG_CSTS = 28, + NVME_REG_NSSR = 32, + NVME_REG_AQA = 36, + NVME_REG_ASQ = 40, + NVME_REG_ACQ = 48, + NVME_REG_CMBLOC = 56, + NVME_REG_CMBSZ = 60, + NVME_REG_BPINFO = 64, + NVME_REG_BPRSEL = 68, + NVME_REG_BPMBL = 72, + NVME_REG_CMBMSC = 80, + NVME_REG_CRTO = 104, + NVME_REG_PMRCAP = 3584, + NVME_REG_PMRCTL = 3588, + NVME_REG_PMRSTS = 3592, + NVME_REG_PMREBS = 3596, + NVME_REG_PMRSWTP = 3600, + NVME_REG_DBS = 4096, +}; + +enum { + NVME_CC_ENABLE = 1, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 96, + NVME_CC_CSS_MASK = 112, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 2048, + NVME_CC_AMS_VS = 14336, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 16384, + NVME_CC_SHN_ABRUPT = 32768, + NVME_CC_SHN_MASK = 49152, + NVME_CC_IOSQES = 393216, + NVME_CC_IOCQES = 4194304, + NVME_CC_CRIME = 16777216, +}; + +enum { + NVME_CSTS_RDY = 1, + NVME_CSTS_CFS = 2, + NVME_CSTS_NSSRO = 16, + NVME_CSTS_PP = 32, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 4, + NVME_CSTS_SHST_CMPLT = 8, + NVME_CSTS_SHST_MASK = 12, +}; + +enum { + NVME_CAP_CSS_NVM = 1, + NVME_CAP_CSS_CSI = 64, +}; + +enum { + NVME_CAP_CRMS_CRWMS = 576460752303423488ULL, + NVME_CAP_CRMS_CRIMS = 1152921504606846976ULL, +}; + +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1, + NVME_PS_FLAGS_NON_OP_STATE = 2, +}; + +enum nvme_ctrl_attr { + NVME_CTRL_ATTR_HID_128_BIT = 1, + NVME_CTRL_ATTR_TBKAS = 64, + NVME_CTRL_ATTR_ELBAS = 32768, +}; + +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 cmic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[11]; + __u8 cntrltype; + __u8 fguid[16]; + __le16 crdt1; + __le16 crdt2; + __le16 crdt3; + __u8 rsvd134[122]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __le16 edstt; + __u8 dsto; + __u8 fwug; + __le16 kas; + __le16 hctma; + __le16 mntmt; + __le16 mxtmt; + __le32 sanicap; + __le32 hmminds; + __le16 hmmaxd; + __le16 nvmsetidmax; + __le16 endgidmax; + __u8 anatt; + __u8 anacap; + __le32 anagrpmax; + __le32 nanagrpid; + __u8 rsvd352[160]; + __u8 sqes; + __u8 cqes; + __le16 maxcmd; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 nwpc; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __le32 mnan; + __u8 rsvd544[224]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[2]; + __u8 dctype; + __u8 rsvd1807[241]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; + +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 dlfeat; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __le16 noiob; + __u8 nvmcap[16]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __u8 rsvd74[18]; + __le32 anagrpid; + __u8 rsvd96[3]; + __u8 nsattr; + __le16 nvmsetid; + __le16 endgid; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[64]; + __u8 vs[3712]; +}; + +struct nvme_id_ns_cs_indep { + __u8 nsfeat; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __le32 anagrpid; + __u8 nsattr; + __u8 rsvd9; + __le16 nvmsetid; + __le16 endgid; + __u8 nstat; + __u8 rsvd15[4081]; +}; + +struct nvme_id_ns_nvm { + __le64 lbstm; + __u8 pic; + __u8 rsvd9[3]; + __le32 elbaf[64]; + __u8 rsvd268[3828]; +}; + +enum { + NVME_ID_NS_NVM_STS_MASK = 127, + NVME_ID_NS_NVM_GUARD_SHIFT = 7, + NVME_ID_NS_NVM_GUARD_MASK = 3, + NVME_ID_NS_NVM_QPIF_SHIFT = 9, + NVME_ID_NS_NVM_QPIF_MASK = 15, + NVME_ID_NS_NVM_QPIFS = 8, +}; + +struct nvme_id_ctrl_nvm { + __u8 vsl; + __u8 wzsl; + __u8 wusl; + __u8 dmrl; + __le32 dmrsl; + __le64 dmsl; + __u8 rsvd16[4080]; +}; + +enum { + NVME_CSI_NVM = 0, + NVME_CSI_ZNS = 2, +}; + +enum { + NVME_NS_FEAT_THIN = 1, + NVME_NS_FEAT_ATOMICS = 2, + NVME_NS_FEAT_IO_OPT = 16, + NVME_NS_ATTR_RO = 1, + NVME_NS_FLBAS_LBA_MASK = 15, + NVME_NS_FLBAS_LBA_UMASK = 96, + NVME_NS_FLBAS_LBA_SHIFT = 1, + NVME_NS_FLBAS_META_EXT = 16, + NVME_NS_NMIC_SHARED = 1, + NVME_NS_ROTATIONAL = 16, + NVME_NS_VWC_NOT_PRESENT = 32, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 16, + NVME_NS_DPC_PI_FIRST = 8, + NVME_NS_DPC_PI_TYPE3 = 4, + NVME_NS_DPC_PI_TYPE2 = 2, + NVME_NS_DPC_PI_TYPE1 = 1, + NVME_NS_DPS_PI_FIRST = 8, + NVME_NS_DPS_PI_MASK = 7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +enum { + NVME_NSTAT_NRDY = 1, +}; + +enum { + NVME_NVM_NS_16B_GUARD = 0, + NVME_NVM_NS_32B_GUARD = 1, + NVME_NVM_NS_64B_GUARD = 2, + NVME_NVM_NS_QTYPE_GUARD = 3, +}; + +struct nvme_ns_id_desc { + __u8 nidt; + __u8 nidl; + __le16 reserved; +}; + +enum { + NVME_NIDT_EUI64 = 1, + NVME_NIDT_NGUID = 2, + NVME_NIDT_UUID = 3, + NVME_NIDT_CSI = 4, +}; + +struct nvme_fw_slot_info_log { + __u8 afi; + __u8 rsvd1[7]; + __le64 frs[7]; + __u8 rsvd64[448]; +}; + +enum { + NVME_AER_ERROR = 0, + NVME_AER_SMART = 1, + NVME_AER_NOTICE = 2, + NVME_AER_CSS = 6, + NVME_AER_VS = 7, +}; + +enum { + NVME_AER_ERROR_PERSIST_INT_ERR = 3, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0, + NVME_AER_NOTICE_FW_ACT_STARTING = 1, + NVME_AER_NOTICE_ANA = 3, + NVME_AER_NOTICE_DISC_CHANGED = 240, +}; + +enum { + NVME_AEN_CFG_NS_ATTR = 256, + NVME_AEN_CFG_FW_ACT = 512, + NVME_AEN_CFG_ANA_CHANGE = 2048, + NVME_AEN_CFG_DISC_CHANGE = -2147483648, +}; + +enum { + NVME_CMD_FUSE_FIRST = 1, + NVME_CMD_FUSE_SECOND = 2, + NVME_CMD_SGL_METABUF = 64, + NVME_CMD_SGL_METASEG = 128, + NVME_CMD_SGL_ALL = 192, +}; + +enum { + NVME_DSMGMT_IDR = 1, + NVME_DSMGMT_IDW = 2, + NVME_DSMGMT_AD = 4, +}; + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +enum nvme_zone_mgmt_action { + NVME_ZONE_CLOSE = 1, + NVME_ZONE_FINISH = 2, + NVME_ZONE_OPEN = 3, + NVME_ZONE_RESET = 4, + NVME_ZONE_OFFLINE = 5, + NVME_ZONE_SET_DESC_EXT = 16, +}; + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + +struct nvme_feat_host_behavior { + __u8 acre; + __u8 etdas; + __u8 lbafee; + __u8 resv1[509]; +}; + +enum { + NVME_ENABLE_ACRE = 1, + NVME_ENABLE_LBAFEE = 1, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = 1, + NVME_CQ_IRQ_ENABLED = 2, + NVME_SQ_PRIO_URGENT = 0, + NVME_SQ_PRIO_HIGH = 2, + NVME_SQ_PRIO_MEDIUM = 4, + NVME_SQ_PRIO_LOW = 6, + NVME_FEAT_ARBITRATION = 1, + NVME_FEAT_POWER_MGMT = 2, + NVME_FEAT_LBA_RANGE = 3, + NVME_FEAT_TEMP_THRESH = 4, + NVME_FEAT_ERR_RECOVERY = 5, + NVME_FEAT_VOLATILE_WC = 6, + NVME_FEAT_NUM_QUEUES = 7, + NVME_FEAT_IRQ_COALESCE = 8, + NVME_FEAT_IRQ_CONFIG = 9, + NVME_FEAT_WRITE_ATOMIC = 10, + NVME_FEAT_ASYNC_EVENT = 11, + NVME_FEAT_AUTO_PST = 12, + NVME_FEAT_HOST_MEM_BUF = 13, + NVME_FEAT_TIMESTAMP = 14, + NVME_FEAT_KATO = 15, + NVME_FEAT_HCTM = 16, + NVME_FEAT_NOPSC = 17, + NVME_FEAT_RRL = 18, + NVME_FEAT_PLM_CONFIG = 19, + NVME_FEAT_PLM_WINDOW = 20, + NVME_FEAT_HOST_BEHAVIOR = 22, + NVME_FEAT_SANITIZE = 23, + NVME_FEAT_SW_PROGRESS = 128, + NVME_FEAT_HOST_ID = 129, + NVME_FEAT_RESV_MASK = 130, + NVME_FEAT_RESV_PERSIST = 131, + NVME_FEAT_WRITE_PROTECT = 132, + NVME_FEAT_VENDOR_START = 192, + NVME_FEAT_VENDOR_END = 255, + NVME_LOG_SUPPORTED = 0, + NVME_LOG_ERROR = 1, + NVME_LOG_SMART = 2, + NVME_LOG_FW_SLOT = 3, + NVME_LOG_CHANGED_NS = 4, + NVME_LOG_CMD_EFFECTS = 5, + NVME_LOG_DEVICE_SELF_TEST = 6, + NVME_LOG_TELEMETRY_HOST = 7, + NVME_LOG_TELEMETRY_CTRL = 8, + NVME_LOG_ENDURANCE_GROUP = 9, + NVME_LOG_ANA = 12, + NVME_LOG_FEATURES = 18, + NVME_LOG_RMI = 22, + NVME_LOG_DISC = 112, + NVME_LOG_RESERVATION = 128, + NVME_FWACT_REPL = 0, + NVME_FWACT_REPL_ACTV = 8, + NVME_FWACT_ACTV = 16, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0, + nvme_fabrics_type_connect = 1, + nvme_fabrics_type_property_get = 4, + nvme_fabrics_type_auth_send = 5, + nvme_fabrics_type_auth_receive = 6, +}; + +enum { + NVME_SCT_GENERIC = 0, + NVME_SC_SUCCESS = 0, + NVME_SC_INVALID_OPCODE = 1, + NVME_SC_INVALID_FIELD = 2, + NVME_SC_CMDID_CONFLICT = 3, + NVME_SC_DATA_XFER_ERROR = 4, + NVME_SC_POWER_LOSS = 5, + NVME_SC_INTERNAL = 6, + NVME_SC_ABORT_REQ = 7, + NVME_SC_ABORT_QUEUE = 8, + NVME_SC_FUSED_FAIL = 9, + NVME_SC_FUSED_MISSING = 10, + NVME_SC_INVALID_NS = 11, + NVME_SC_CMD_SEQ_ERROR = 12, + NVME_SC_SGL_INVALID_LAST = 13, + NVME_SC_SGL_INVALID_COUNT = 14, + NVME_SC_SGL_INVALID_DATA = 15, + NVME_SC_SGL_INVALID_METADATA = 16, + NVME_SC_SGL_INVALID_TYPE = 17, + NVME_SC_CMB_INVALID_USE = 18, + NVME_SC_PRP_INVALID_OFFSET = 19, + NVME_SC_ATOMIC_WU_EXCEEDED = 20, + NVME_SC_OP_DENIED = 21, + NVME_SC_SGL_INVALID_OFFSET = 22, + NVME_SC_RESERVED = 23, + NVME_SC_HOST_ID_INCONSIST = 24, + NVME_SC_KA_TIMEOUT_EXPIRED = 25, + NVME_SC_KA_TIMEOUT_INVALID = 26, + NVME_SC_ABORTED_PREEMPT_ABORT = 27, + NVME_SC_SANITIZE_FAILED = 28, + NVME_SC_SANITIZE_IN_PROGRESS = 29, + NVME_SC_SGL_INVALID_GRANULARITY = 30, + NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 31, + NVME_SC_NS_WRITE_PROTECTED = 32, + NVME_SC_CMD_INTERRUPTED = 33, + NVME_SC_TRANSIENT_TR_ERR = 34, + NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 36, + NVME_SC_INVALID_IO_CMD_SET = 44, + NVME_SC_LBA_RANGE = 128, + NVME_SC_CAP_EXCEEDED = 129, + NVME_SC_NS_NOT_READY = 130, + NVME_SC_RESERVATION_CONFLICT = 131, + NVME_SC_FORMAT_IN_PROGRESS = 132, + NVME_SCT_COMMAND_SPECIFIC = 256, + NVME_SC_CQ_INVALID = 256, + NVME_SC_QID_INVALID = 257, + NVME_SC_QUEUE_SIZE = 258, + NVME_SC_ABORT_LIMIT = 259, + NVME_SC_ABORT_MISSING = 260, + NVME_SC_ASYNC_LIMIT = 261, + NVME_SC_FIRMWARE_SLOT = 262, + NVME_SC_FIRMWARE_IMAGE = 263, + NVME_SC_INVALID_VECTOR = 264, + NVME_SC_INVALID_LOG_PAGE = 265, + NVME_SC_INVALID_FORMAT = 266, + NVME_SC_FW_NEEDS_CONV_RESET = 267, + NVME_SC_INVALID_QUEUE = 268, + NVME_SC_FEATURE_NOT_SAVEABLE = 269, + NVME_SC_FEATURE_NOT_CHANGEABLE = 270, + NVME_SC_FEATURE_NOT_PER_NS = 271, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 272, + NVME_SC_FW_NEEDS_RESET = 273, + NVME_SC_FW_NEEDS_MAX_TIME = 274, + NVME_SC_FW_ACTIVATE_PROHIBITED = 275, + NVME_SC_OVERLAPPING_RANGE = 276, + NVME_SC_NS_INSUFFICIENT_CAP = 277, + NVME_SC_NS_ID_UNAVAILABLE = 278, + NVME_SC_NS_ALREADY_ATTACHED = 280, + NVME_SC_NS_IS_PRIVATE = 281, + NVME_SC_NS_NOT_ATTACHED = 282, + NVME_SC_THIN_PROV_NOT_SUPP = 283, + NVME_SC_CTRL_LIST_INVALID = 284, + NVME_SC_SELT_TEST_IN_PROGRESS = 285, + NVME_SC_BP_WRITE_PROHIBITED = 286, + NVME_SC_CTRL_ID_INVALID = 287, + NVME_SC_SEC_CTRL_STATE_INVALID = 288, + NVME_SC_CTRL_RES_NUM_INVALID = 289, + NVME_SC_RES_ID_INVALID = 290, + NVME_SC_PMR_SAN_PROHIBITED = 291, + NVME_SC_ANA_GROUP_ID_INVALID = 292, + NVME_SC_ANA_ATTACH_FAILED = 293, + NVME_SC_BAD_ATTRIBUTES = 384, + NVME_SC_INVALID_PI = 385, + NVME_SC_READ_ONLY = 386, + NVME_SC_ONCS_NOT_SUPPORTED = 387, + NVME_SC_CONNECT_FORMAT = 384, + NVME_SC_CONNECT_CTRL_BUSY = 385, + NVME_SC_CONNECT_INVALID_PARAM = 386, + NVME_SC_CONNECT_RESTART_DISC = 387, + NVME_SC_CONNECT_INVALID_HOST = 388, + NVME_SC_DISCOVERY_RESTART = 400, + NVME_SC_AUTH_REQUIRED = 401, + NVME_SC_ZONE_BOUNDARY_ERROR = 440, + NVME_SC_ZONE_FULL = 441, + NVME_SC_ZONE_READ_ONLY = 442, + NVME_SC_ZONE_OFFLINE = 443, + NVME_SC_ZONE_INVALID_WRITE = 444, + NVME_SC_ZONE_TOO_MANY_ACTIVE = 445, + NVME_SC_ZONE_TOO_MANY_OPEN = 446, + NVME_SC_ZONE_INVALID_TRANSITION = 447, + NVME_SCT_MEDIA_ERROR = 512, + NVME_SC_WRITE_FAULT = 640, + NVME_SC_READ_ERROR = 641, + NVME_SC_GUARD_CHECK = 642, + NVME_SC_APPTAG_CHECK = 643, + NVME_SC_REFTAG_CHECK = 644, + NVME_SC_COMPARE_FAILED = 645, + NVME_SC_ACCESS_DENIED = 646, + NVME_SC_UNWRITTEN_BLOCK = 647, + NVME_SCT_PATH = 768, + NVME_SC_INTERNAL_PATH_ERROR = 768, + NVME_SC_ANA_PERSISTENT_LOSS = 769, + NVME_SC_ANA_INACCESSIBLE = 770, + NVME_SC_ANA_TRANSITION = 771, + NVME_SC_CTRL_PATH_ERROR = 864, + NVME_SC_HOST_PATH_ERROR = 880, + NVME_SC_HOST_ABORTED_CMD = 881, + NVME_SC_MASK = 255, + NVME_SCT_MASK = 1792, + NVME_SCT_SC_MASK = 2047, + NVME_STATUS_CRD = 6144, + NVME_STATUS_MORE = 8192, + NVME_STATUS_DNR = 16384, +}; + +enum nvme_quirks { + NVME_QUIRK_STRIPE_SIZE = 1, + NVME_QUIRK_IDENTIFY_CNS = 2, + NVME_QUIRK_DEALLOCATE_ZEROES = 4, + NVME_QUIRK_DELAY_BEFORE_CHK_RDY = 8, + NVME_QUIRK_NO_APST = 16, + NVME_QUIRK_NO_DEEPEST_PS = 32, + NVME_QUIRK_QDEPTH_ONE = 64, + NVME_QUIRK_MEDIUM_PRIO_SQ = 128, + NVME_QUIRK_IGNORE_DEV_SUBNQN = 256, + NVME_QUIRK_DISABLE_WRITE_ZEROES = 512, + NVME_QUIRK_SIMPLE_SUSPEND = 1024, + NVME_QUIRK_SINGLE_VECTOR = 2048, + NVME_QUIRK_128_BYTES_SQES = 4096, + NVME_QUIRK_SHARED_TAGS = 8192, + NVME_QUIRK_NO_TEMP_THRESH_CHANGE = 16384, + NVME_QUIRK_NO_NS_DESC_LIST = 32768, + NVME_QUIRK_DMA_ADDRESS_BITS_48 = 65536, + NVME_QUIRK_SKIP_CID_GEN = 131072, + NVME_QUIRK_BOGUS_NID = 262144, + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = 524288, + NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = 1048576, + NVME_QUIRK_BROKEN_MSI = 2097152, +}; + +enum nvme_ctrl_flags { + NVME_CTRL_FAILFAST_EXPIRED = 0, + NVME_CTRL_ADMIN_Q_STOPPED = 1, + NVME_CTRL_STARTED_ONCE = 2, + NVME_CTRL_STOPPED = 3, + NVME_CTRL_SKIP_ID_CNS_CS = 4, + NVME_CTRL_DIRTY_CAPABILITY = 5, + NVME_CTRL_FROZEN = 6, +}; + +struct nvmf_host; + +struct nvmf_ctrl_options { + unsigned int mask; + int max_reconnects; + char *transport; + char *subsysnqn; + char *traddr; + char *trsvcid; + char *host_traddr; + char *host_iface; + size_t queue_size; + unsigned int nr_io_queues; + unsigned int reconnect_delay; + bool discovery_nqn; + bool duplicate_connect; + unsigned int kato; + struct nvmf_host *host; + char *dhchap_secret; + char *dhchap_ctrl_secret; + struct key *keyring; + struct key *tls_key; + bool tls; + bool disable_sqflow; + bool hdr_digest; + bool data_digest; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; + int tos; + int fast_io_fail_tmo; +}; + +typedef __u32 nvme_submit_flags_t; + +enum { + NVME_SUBMIT_AT_HEAD = 1, + NVME_SUBMIT_NOWAIT = 2, + NVME_SUBMIT_RESERVED = 4, + NVME_SUBMIT_RETRY = 8, +}; + +struct nvme_zone_info { + u64 zone_size; + unsigned int max_open_zones; + unsigned int max_active_zones; +}; + +struct nvmf_host { + struct kref ref; + struct list_head list; + char nqn[223]; + uuid_t id; +}; + +struct trace_event_raw_nvme_setup_cmd { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + u8 opcode; + u8 flags; + u8 fctype; + u16 cid; + u32 nsid; + bool metadata; + u8 cdw10[24]; + char __data[0]; +}; + +struct trace_event_raw_nvme_complete_rq { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + int cid; + long: 32; + u64 result; + u8 retries; + u8 flags; + u16 status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_nvme_async_event { + struct trace_entry ent; + int ctrl_id; + u32 result; + char __data[0]; +}; + +struct trace_event_raw_nvme_sq { + struct trace_entry ent; + int ctrl_id; + char disk[32]; + int qid; + u16 sq_head; + u16 sq_tail; + char __data[0]; +}; + +struct trace_event_data_offsets_nvme_setup_cmd {}; + +struct trace_event_data_offsets_nvme_complete_rq {}; + +struct trace_event_data_offsets_nvme_async_event {}; + +struct trace_event_data_offsets_nvme_sq {}; + +typedef void (*btf_trace_nvme_setup_cmd)(void *, struct request *, struct nvme_command *); + +typedef void (*btf_trace_nvme_complete_rq)(void *, struct request *); + +typedef void (*btf_trace_nvme_async_event)(void *, struct nvme_ctrl *, u32); + +typedef void (*btf_trace_nvme_sq)(void *, struct request *, __le16, int); + +struct nvme_ns_info { + struct nvme_ns_ids ids; + u32 nsid; + __le32 anagrpid; + u8 pi_offset; + bool is_shared; + bool is_readonly; + bool is_ready; + bool is_removed; + bool is_rotational; + bool no_vwc; +}; + +enum nvme_disposition { + COMPLETE = 0, + RETRY = 1, + FAILOVER = 2, + AUTHENTICATE = 3, +}; + +struct nvme_core_quirk_entry { + u16 vid; + const char *mn; + const char *fr; + long unsigned int quirks; +}; + +struct async_scan_info { + struct nvme_ctrl *ctrl; + atomic_t next_nsid; + __le32 *ns_list; +}; + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 1, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 2, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 3, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 4, + E1000_INVM_INVALIDATED_STRUCTURE = 15, +}; + +enum iwl_context_info_flags { + IWL_CTXT_INFO_AUTO_FUNC_INIT = 1, + IWL_CTXT_INFO_EARLY_DEBUG = 2, + IWL_CTXT_INFO_ENABLE_CDMP = 4, + IWL_CTXT_INFO_RB_CB_SIZE = 240, + IWL_CTXT_INFO_TFD_FORMAT_LONG = 256, + IWL_CTXT_INFO_RB_SIZE = 7680, + IWL_CTXT_INFO_RB_SIZE_1K = 1, + IWL_CTXT_INFO_RB_SIZE_2K = 2, + IWL_CTXT_INFO_RB_SIZE_4K = 4, + IWL_CTXT_INFO_RB_SIZE_8K = 8, + IWL_CTXT_INFO_RB_SIZE_12K = 9, + IWL_CTXT_INFO_RB_SIZE_16K = 10, + IWL_CTXT_INFO_RB_SIZE_20K = 11, + IWL_CTXT_INFO_RB_SIZE_24K = 12, + IWL_CTXT_INFO_RB_SIZE_28K = 13, + IWL_CTXT_INFO_RB_SIZE_32K = 14, +}; + +enum { + CHANNEL_MODE_LEGACY = 0, + CHANNEL_MODE_PURE_40 = 1, + CHANNEL_MODE_MIXED = 2, + CHANNEL_MODE_RESERVED = 3, +}; + +struct iwl_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 reserved1; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 ofdm_ht_triple_stream_basic_rates; + u8 reserved2; + __le16 rx_chain_select_flags; + __le16 acquisition_data; + __le32 reserved3; +}; + +struct iwl_calib_hdr { + u8 op_code; + u8 first_group; + u8 groups_num; + u8 data_valid; +}; + +struct iwl_calib_chain_noise_reset_cmd { + struct iwl_calib_hdr hdr; + u8 data[0]; +}; + +struct iwl_wipan_slot { + __le16 width; + u8 type; + u8 reserved; +}; + +struct iwl_wipan_params_cmd { + __le16 flags; + u8 reserved; + u8 num_slots; + struct iwl_wipan_slot slots[10]; +}; + +enum iwlagn_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, + IWL_CHAIN_NOISE_ACCUMULATE = 1, + IWL_CHAIN_NOISE_CALIBRATED = 2, + IWL_CHAIN_NOISE_DONE = 3, +}; + +enum { + IWL_CALIB_ENABLE_ALL = 0, + IWL_SENSITIVITY_CALIB_DISABLED = 1, + IWL_CHAIN_NOISE_CALIB_DISABLED = 2, + IWL_TX_POWER_CALIB_DISABLED = 4, + IWL_CALIB_DISABLE_ALL = 4294967295, +}; + +struct iwl_he_pkt_ext_v2 { + u8 pkt_ext_qam_th[20]; +}; + +enum iwl_fw_sta_type { + STATION_TYPE_PEER = 0, + STATION_TYPE_BCAST_MGMT = 1, + STATION_TYPE_MCAST = 2, + STATION_TYPE_AUX = 3, +}; + +struct iwl_sta_cfg_cmd { + __le32 sta_id; + __le32 link_id; + u8 peer_mld_address[6]; + __le16 reserved_for_peer_mld_address; + u8 peer_link_address[6]; + __le16 reserved_for_peer_link_address; + __le32 station_type; + __le32 assoc_id; + __le32 beamform_flags; + __le32 mfp; + __le32 mimo; + __le32 mimo_protection; + __le32 ack_enabled; + __le32 trig_rnd_alloc; + __le32 tx_ampdu_spacing; + __le32 tx_ampdu_max_size; + __le32 sp_length; + __le32 uapsd_acs; + struct iwl_he_pkt_ext_v2 pkt_ext; + __le32 htc_flags; +}; + +struct iwl_mvm_aux_sta_cmd { + __le32 sta_id; + __le32 lmac_id; + u8 mac_addr[6]; + __le16 reserved_for_mac_addr; +}; + +struct iwl_remove_sta_cmd { + __le32 sta_id; +}; + +struct iwl_mvm_sta_disable_tx_cmd { + __le32 sta_id; + __le32 disable; +}; + +enum iwl_rx_baid_action { + IWL_RX_BAID_ACTION_ADD = 0, + IWL_RX_BAID_ACTION_MODIFY = 1, + IWL_RX_BAID_ACTION_REMOVE = 2, +}; + +struct iwl_rx_baid_cfg_cmd_alloc { + __le32 sta_id_mask; + u8 tid; + u8 reserved[3]; + __le16 ssn; + __le16 win_size; +}; + +struct iwl_rx_baid_cfg_cmd_modify { + __le32 old_sta_id_mask; + __le32 new_sta_id_mask; + __le32 tid; +}; + +struct iwl_rx_baid_cfg_cmd_remove_v1 { + __le32 baid; +}; + +struct iwl_rx_baid_cfg_cmd_remove { + __le32 sta_id_mask; + __le32 tid; +}; + +struct iwl_rx_baid_cfg_cmd { + __le32 action; + union { + struct iwl_rx_baid_cfg_cmd_alloc alloc; + struct iwl_rx_baid_cfg_cmd_modify modify; + struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1; + struct iwl_rx_baid_cfg_cmd_remove remove; + }; +}; + +struct ehci_iso_packet { + u64 bufp; + __le32 transaction; + u8 cross; + u32 buf1; + long: 32; +}; + +struct ehci_iso_sched { + struct list_head td_list; + unsigned int span; + unsigned int first_packet; + struct ehci_iso_packet packet[0]; +}; + +struct ehci_tt { + u16 bandwidth[8]; + struct list_head tt_list; + struct list_head ps_list; + struct usb_tt *usb_tt; + int tt_port; +}; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); + struct usb_bus *bus; + struct mutex mutex; + size_t count; + char *output_buf; + size_t alloc_size; +}; + +enum hwmon_chip_attributes { + hwmon_chip_temp_reset_history = 0, + hwmon_chip_in_reset_history = 1, + hwmon_chip_curr_reset_history = 2, + hwmon_chip_power_reset_history = 3, + hwmon_chip_register_tz = 4, + hwmon_chip_update_interval = 5, + hwmon_chip_alarms = 6, + hwmon_chip_samples = 7, + hwmon_chip_curr_samples = 8, + hwmon_chip_in_samples = 9, + hwmon_chip_power_samples = 10, + hwmon_chip_temp_samples = 11, + hwmon_chip_beep_enable = 12, + hwmon_chip_pec = 13, +}; + +enum hwmon_energy_attributes { + hwmon_energy_enable = 0, + hwmon_energy_input = 1, + hwmon_energy_label = 2, +}; + +enum hwmon_humidity_attributes { + hwmon_humidity_enable = 0, + hwmon_humidity_input = 1, + hwmon_humidity_label = 2, + hwmon_humidity_min = 3, + hwmon_humidity_min_hyst = 4, + hwmon_humidity_max = 5, + hwmon_humidity_max_hyst = 6, + hwmon_humidity_alarm = 7, + hwmon_humidity_fault = 8, + hwmon_humidity_rated_min = 9, + hwmon_humidity_rated_max = 10, + hwmon_humidity_min_alarm = 11, + hwmon_humidity_max_alarm = 12, +}; + +enum hwmon_fan_attributes { + hwmon_fan_enable = 0, + hwmon_fan_input = 1, + hwmon_fan_label = 2, + hwmon_fan_min = 3, + hwmon_fan_max = 4, + hwmon_fan_div = 5, + hwmon_fan_pulses = 6, + hwmon_fan_target = 7, + hwmon_fan_alarm = 8, + hwmon_fan_min_alarm = 9, + hwmon_fan_max_alarm = 10, + hwmon_fan_fault = 11, + hwmon_fan_beep = 12, +}; + +enum hwmon_pwm_attributes { + hwmon_pwm_input = 0, + hwmon_pwm_enable = 1, + hwmon_pwm_mode = 2, + hwmon_pwm_freq = 3, + hwmon_pwm_auto_channels_temp = 4, +}; + +enum hwmon_intrusion_attributes { + hwmon_intrusion_alarm = 0, + hwmon_intrusion_beep = 1, +}; + +struct trace_event_raw_hwmon_attr_class { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + long int val; + char __data[0]; +}; + +struct trace_event_raw_hwmon_attr_show_string { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + u32 __data_loc_label; + char __data[0]; +}; + +struct trace_event_data_offsets_hwmon_attr_class { + u32 attr_name; + const void *attr_name_ptr_; +}; + +struct trace_event_data_offsets_hwmon_attr_show_string { + u32 attr_name; + const void *attr_name_ptr_; + u32 label; + const void *label_ptr_; +}; + +typedef void (*btf_trace_hwmon_attr_show)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_store)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_show_string)(void *, int, const char *, const char *); + +struct hwmon_device { + const char *name; + const char *label; + struct device dev; + const struct hwmon_chip_info *chip; + struct list_head tzdata; + struct attribute_group group; + const struct attribute_group **groups; +}; + +struct hwmon_device_attribute { + struct device_attribute dev_attr; + const struct hwmon_ops *ops; + enum hwmon_sensor_types type; + u32 attr; + int index; + char name[32]; +}; + +struct hwmon_thermal_data { + struct list_head node; + struct device *dev; + int index; + struct thermal_zone_device *tzd; +}; + +struct usage_priority { + __u32 usage; + bool global; + unsigned int slot_overwrite; +}; + +typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int); + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +enum { + NDTPA_UNSPEC = 0, + NDTPA_IFINDEX = 1, + NDTPA_REFCNT = 2, + NDTPA_REACHABLE_TIME = 3, + NDTPA_BASE_REACHABLE_TIME = 4, + NDTPA_RETRANS_TIME = 5, + NDTPA_GC_STALETIME = 6, + NDTPA_DELAY_PROBE_TIME = 7, + NDTPA_QUEUE_LEN = 8, + NDTPA_APP_PROBES = 9, + NDTPA_UCAST_PROBES = 10, + NDTPA_MCAST_PROBES = 11, + NDTPA_ANYCAST_DELAY = 12, + NDTPA_PROXY_DELAY = 13, + NDTPA_PROXY_QLEN = 14, + NDTPA_LOCKTIME = 15, + NDTPA_QUEUE_LENBYTES = 16, + NDTPA_MCAST_REPROBES = 17, + NDTPA_PAD = 18, + NDTPA_INTERVAL_PROBE_TIME_MS = 19, + __NDTPA_MAX = 20, +}; + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC = 0, + NDTA_NAME = 1, + NDTA_THRESH1 = 2, + NDTA_THRESH2 = 3, + NDTA_THRESH3 = 4, + NDTA_CONFIG = 5, + NDTA_PARMS = 6, + NDTA_STATS = 7, + NDTA_GC_INTERVAL = 8, + NDTA_PAD = 9, + __NDTA_MAX = 10, +}; + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); + unsigned int bucket; + unsigned int flags; +}; + +struct neighbour_cb { + long unsigned int sched_next; + unsigned int flags; +}; + +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + +struct neigh_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table neigh_vars[21]; +}; + +enum { + CTRL_CMD_UNSPEC = 0, + CTRL_CMD_NEWFAMILY = 1, + CTRL_CMD_DELFAMILY = 2, + CTRL_CMD_GETFAMILY = 3, + CTRL_CMD_NEWOPS = 4, + CTRL_CMD_DELOPS = 5, + CTRL_CMD_GETOPS = 6, + CTRL_CMD_NEWMCAST_GRP = 7, + CTRL_CMD_DELMCAST_GRP = 8, + CTRL_CMD_GETMCAST_GRP = 9, + CTRL_CMD_GETPOLICY = 10, + __CTRL_CMD_MAX = 11, +}; + +enum { + CTRL_ATTR_UNSPEC = 0, + CTRL_ATTR_FAMILY_ID = 1, + CTRL_ATTR_FAMILY_NAME = 2, + CTRL_ATTR_VERSION = 3, + CTRL_ATTR_HDRSIZE = 4, + CTRL_ATTR_MAXATTR = 5, + CTRL_ATTR_OPS = 6, + CTRL_ATTR_MCAST_GROUPS = 7, + CTRL_ATTR_POLICY = 8, + CTRL_ATTR_OP_POLICY = 9, + CTRL_ATTR_OP = 10, + __CTRL_ATTR_MAX = 11, +}; + +enum { + CTRL_ATTR_OP_UNSPEC = 0, + CTRL_ATTR_OP_ID = 1, + CTRL_ATTR_OP_FLAGS = 2, + __CTRL_ATTR_OP_MAX = 3, +}; + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC = 0, + CTRL_ATTR_MCAST_GRP_NAME = 1, + CTRL_ATTR_MCAST_GRP_ID = 2, + __CTRL_ATTR_MCAST_GRP_MAX = 3, +}; + +enum { + CTRL_ATTR_POLICY_UNSPEC = 0, + CTRL_ATTR_POLICY_DO = 1, + CTRL_ATTR_POLICY_DUMP = 2, + __CTRL_ATTR_POLICY_DUMP_MAX = 3, + CTRL_ATTR_POLICY_DUMP_MAX = 2, +}; + +struct genl_op_iter { + const struct genl_family *family; + struct genl_split_ops doit; + struct genl_split_ops dumpit; + int cmd_idx; + int entry_idx; + u32 cmd; + u8 flags; +}; + +struct genl_start_context { + const struct genl_family *family; + struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; + const struct genl_split_ops *ops; + int hdrlen; +}; + +struct ctrl_dump_policy_ctx { + struct netlink_policy_dump_state *state; + const struct genl_family *rt; + struct genl_op_iter *op_iter; + u32 op; + u16 fam_id; + u8 dump_map: 1; + u8 single_op: 1; +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +struct nf_nat_lookup_hook_priv { + struct nf_hook_entries *entries; + struct callback_head callback_head; +}; + +struct nf_nat_hooks_net { + struct nf_hook_ops *nat_hook_ops; + unsigned int users; +}; + +struct nat_net { + struct nf_nat_hooks_net nat_proto_net[11]; +}; + +struct nf_nat_proto_clean { + u8 l3proto; + u8 l4proto; +}; + +enum ctattr_nat { + CTA_NAT_UNSPEC = 0, + CTA_NAT_V4_MINIP = 1, + CTA_NAT_V4_MAXIP = 2, + CTA_NAT_PROTO = 3, + CTA_NAT_V6_MINIP = 4, + CTA_NAT_V6_MAXIP = 5, + __CTA_NAT_MAX = 6, +}; + +enum ctattr_protonat { + CTA_PROTONAT_UNSPEC = 0, + CTA_PROTONAT_PORT_MIN = 1, + CTA_PROTONAT_PORT_MAX = 2, + __CTA_PROTONAT_MAX = 3, +}; + +struct nft_counter { + u64_stats_t bytes; + u64_stats_t packets; +}; + +struct nft_counter_tot { + s64 bytes; + s64 packets; +}; + +struct nft_counter_percpu_priv { + struct nft_counter *counter; +}; + +typedef u32 inet_ehashfn_t(const struct net *, const __be32, const __u16, const __be32, const __be16); + +struct sr6_tlv { + __u8 type; + __u8 len; + __u8 data[0]; +}; + +enum { + SEG6_ATTR_UNSPEC = 0, + SEG6_ATTR_DST = 1, + SEG6_ATTR_DSTLEN = 2, + SEG6_ATTR_HMACKEYID = 3, + SEG6_ATTR_SECRET = 4, + SEG6_ATTR_SECRETLEN = 5, + SEG6_ATTR_ALGID = 6, + SEG6_ATTR_HMACINFO = 7, + __SEG6_ATTR_MAX = 8, +}; + +enum { + SEG6_CMD_UNSPEC = 0, + SEG6_CMD_SETHMAC = 1, + SEG6_CMD_DUMPHMAC = 2, + SEG6_CMD_SET_TUNSRC = 3, + SEG6_CMD_GET_TUNSRC = 4, + __SEG6_CMD_MAX = 5, +}; + +enum br_pkt_type { + BR_PKT_UNICAST = 0, + BR_PKT_MULTICAST = 1, + BR_PKT_BROADCAST = 2, +}; + +typedef void (*phys_reset_t)(long unsigned int, bool); + +typedef struct { + void *lock; + long unsigned int flags; +} class_irqsave_t; + +struct pin_cookie {}; + +enum scx_ent_flags { + SCX_TASK_QUEUED = 1, + SCX_TASK_RESET_RUNNABLE_AT = 4, + SCX_TASK_DEQD_FOR_SLEEP = 8, + SCX_TASK_STATE_SHIFT = 8, + SCX_TASK_STATE_BITS = 2, + SCX_TASK_STATE_MASK = 768, + SCX_TASK_CURSOR = -2147483648, +}; + +enum scx_task_state { + SCX_TASK_NONE = 0, + SCX_TASK_INIT = 1, + SCX_TASK_READY = 2, + SCX_TASK_ENABLED = 3, + SCX_TASK_NR_STATES = 4, +}; + +enum scx_ent_dsq_flags { + SCX_TASK_DSQ_ON_PRIQ = 1, +}; + +enum scx_kf_mask { + SCX_KF_UNLOCKED = 0, + SCX_KF_CPU_RELEASE = 1, + SCX_KF_DISPATCH = 2, + SCX_KF_ENQUEUE = 4, + SCX_KF_SELECT_CPU = 8, + SCX_KF_REST = 16, + __SCX_KF_RQ_LOCKED = 31, + __SCX_KF_TERMINAL = 28, +}; + +enum scx_dsq_lnode_flags { + SCX_DSQ_LNODE_ITER_CURSOR = 1, + __SCX_DSQ_LNODE_PRIV_SHIFT = 16, +}; + +struct dl_bw { + raw_spinlock_t lock; + long: 32; + u64 bw; + u64 total_bw; +}; + +struct cpudl_item; + +struct cpudl { + raw_spinlock_t lock; + int size; + cpumask_var_t free_cpus; + struct cpudl_item *elements; +}; + +struct cpupri_vec { + atomic_t count; + cpumask_var_t mask; +}; + +struct cpupri { + struct cpupri_vec pri_to_cpu[101]; + int *cpu_to_pri; +}; + +struct perf_domain; + +struct root_domain { + atomic_t refcount; + atomic_t rto_count; + struct callback_head rcu; + cpumask_var_t span; + cpumask_var_t online; + bool overloaded; + bool overutilized; + cpumask_var_t dlo_mask; + atomic_t dlo_count; + long: 32; + struct dl_bw dl_bw; + struct cpudl cpudl; + u64 visit_gen; + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + int rto_loop; + int rto_cpu; + atomic_t rto_loop_next; + atomic_t rto_loop_start; + cpumask_var_t rto_mask; + struct cpupri cpupri; + struct perf_domain *pd; +}; + +struct sched_param { + int sched_priority; +}; + +struct cfs_rq { + struct load_weight load; + unsigned int nr_running; + unsigned int h_nr_running; + unsigned int idle_nr_running; + unsigned int idle_h_nr_running; + s64 avg_vruntime; + u64 avg_load; + u64 min_vruntime; + unsigned int forceidle_seq; + long: 32; + u64 min_vruntime_fi; + struct rb_root_cached tasks_timeline; + struct sched_entity *curr; + struct sched_entity *next; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_avg avg; + u64 last_update_time_copy; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + raw_spinlock_t lock; + int nr; + long unsigned int load_avg; + long unsigned int util_avg; + long unsigned int runnable_avg; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + } removed; + u64 last_update_tg_load_avg; + long unsigned int tg_load_avg_contrib; + long int propagate; + long int prop_runnable_sum; + long unsigned int h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; + struct rq *rq; + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; + int idle; + int runtime_enabled; + s64 runtime_remaining; + u64 throttled_pelt_idle; + u64 throttled_pelt_idle_copy; + u64 throttled_clock; + u64 throttled_clock_pelt; + u64 throttled_clock_pelt_time; + u64 throttled_clock_self; + u64 throttled_clock_self_time; + int throttled; + int throttle_count; + struct list_head throttled_list; + struct list_head throttled_csd_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct uclamp_bucket { + long unsigned int value: 11; + long unsigned int tasks: 21; +}; + +struct uclamp_rq { + unsigned int value; + struct uclamp_bucket bucket[5]; +}; + +struct rt_prio_array { + long unsigned int bitmap[4]; + struct list_head queue[100]; +}; + +struct rt_rq { + struct rt_prio_array active; + unsigned int rt_nr_running; + unsigned int rr_nr_running; + struct { + int curr; + int next; + } highest_prio; + bool overloaded; + struct plist_head pushable_tasks; + int rt_queued; +}; + +struct dl_rq { + struct rb_root_cached root; + unsigned int dl_nr_running; + long: 32; + struct { + u64 curr; + u64 next; + } earliest_dl; + bool overloaded; + struct rb_root_cached pushable_dl_tasks_root; + long: 32; + u64 running_bw; + u64 this_bw; + u64 extra_bw; + u64 max_bw; + u64 bw_ratio; +}; + +struct balance_callback { + struct balance_callback *next; + void (*func)(struct rq *); +}; + +struct scx_rq { + struct scx_dispatch_q local_dsq; + struct list_head runnable_list; + struct list_head ddsp_deferred_locals; + long unsigned int ops_qseq; + long: 32; + u64 extra_enq_flags; + u32 nr_running; + u32 flags; + u32 cpuperf_target; + bool cpu_released; + cpumask_var_t cpus_to_kick; + cpumask_var_t cpus_to_kick_if_idle; + cpumask_var_t cpus_to_preempt; + cpumask_var_t cpus_to_wait; + long unsigned int pnt_seq; + struct balance_callback deferred_bal_cb; + struct irq_work deferred_irq_work; + struct irq_work kick_cpus_irq_work; + long: 32; +}; + +struct cpu_stop_done; + +struct cpu_stop_work { + struct list_head list; + cpu_stop_fn_t fn; + long unsigned int caller; + void *arg; + struct cpu_stop_done *done; +}; + +struct sched_domain; + +struct rq { + raw_spinlock_t __lock; + unsigned int nr_running; + long unsigned int last_blocked_load_update_tick; + unsigned int has_blocked_load; + call_single_data_t nohz_csd; + unsigned int nohz_tick_stopped; + atomic_t nohz_flags; + unsigned int ttwu_pending; + long: 32; + u64 nr_switches; + long: 32; + long: 32; + struct uclamp_rq uclamp[2]; + unsigned int uclamp_flags; + long: 32; + long: 32; + long: 32; + struct cfs_rq cfs; + struct rt_rq rt; + struct dl_rq dl; + struct scx_rq scx; + struct sched_dl_entity fair_server; + struct list_head leaf_cfs_rq_list; + struct list_head *tmp_alone_branch; + unsigned int nr_uninterruptible; + union { + struct task_struct *donor; + struct task_struct *curr; + }; + struct sched_dl_entity *dl_server; + struct task_struct *idle; + struct task_struct *stop; + long unsigned int next_balance; + struct mm_struct *prev_mm; + unsigned int clock_update_flags; + long: 32; + u64 clock; + u64 clock_task; + u64 clock_pelt; + long unsigned int lost_idle_time; + long: 32; + u64 clock_pelt_idle; + u64 clock_idle; + u64 clock_pelt_idle_copy; + u64 clock_idle_copy; + atomic_t nr_iowait; + long: 32; + u64 last_seen_need_resched_ns; + int ticks_without_resched; + int membarrier_state; + struct root_domain *rd; + struct sched_domain *sd; + long unsigned int cpu_capacity; + struct balance_callback *balance_callback; + unsigned char nohz_idle_balance; + unsigned char idle_balance; + long unsigned int misfit_task_load; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + int cpu; + int online; + struct list_head cfs_tasks; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_avg avg_rt; + struct sched_avg avg_dl; + u64 idle_stamp; + u64 avg_idle; + u64 max_idle_balance_cost; + struct rcuwait hotplug_wait; + long unsigned int calc_load_update; + long int calc_load_active; + long: 32; + long: 32; + long: 32; + call_single_data_t hrtick_csd; + struct hrtimer hrtick_timer; + ktime_t hrtick_time; + struct cpuidle_state *idle_state; + unsigned int nr_pinned; + unsigned int push_busy; + struct cpu_stop_work push_work; + struct rq *core; + struct task_struct *core_pick; + struct sched_dl_entity *core_dl_server; + unsigned int core_enabled; + unsigned int core_sched_seq; + struct rb_root core_tree; + unsigned int core_task_seq; + unsigned int core_pick_seq; + long unsigned int core_cookie; + unsigned int core_forceidle_count; + unsigned int core_forceidle_seq; + unsigned int core_forceidle_occupation; + long: 32; + u64 core_forceidle_start; + cpumask_var_t scratch_mask; + long: 32; + call_single_data_t cfsb_csd; + struct list_head cfsb_csd_list; + long: 32; + long: 32; +}; + +struct cfs_bandwidth { + raw_spinlock_t lock; + long: 32; + ktime_t period; + u64 quota; + u64 runtime; + u64 burst; + u64 runtime_snap; + s64 hierarchical_quota; + u8 idle; + u8 period_active; + u8 slack_started; + long: 32; + struct hrtimer period_timer; + struct hrtimer slack_timer; + struct list_head throttled_cfs_rq; + int nr_periods; + int nr_throttled; + int nr_burst; + long: 32; + u64 throttled_time; + u64 burst_time; +}; + +struct task_group { + struct cgroup_subsys_state css; + int idle; + struct sched_entity **se; + struct cfs_rq **cfs_rq; + long unsigned int shares; + atomic_long_t load_avg; + u32 scx_flags; + u32 scx_weight; + struct callback_head rcu; + struct list_head list; + struct task_group *parent; + struct list_head siblings; + struct list_head children; + struct cfs_bandwidth cfs_bandwidth; + long: 32; + long: 32; +}; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_t; + +enum { + SD_BALANCE_NEWIDLE = 1, + SD_BALANCE_EXEC = 2, + SD_BALANCE_FORK = 4, + SD_BALANCE_WAKE = 8, + SD_WAKE_AFFINE = 16, + SD_ASYM_CPUCAPACITY = 32, + SD_ASYM_CPUCAPACITY_FULL = 64, + SD_SHARE_CPUCAPACITY = 128, + SD_CLUSTER = 256, + SD_SHARE_LLC = 512, + SD_SERIALIZE = 1024, + SD_ASYM_PACKING = 2048, + SD_PREFER_SIBLING = 4096, + SD_OVERLAP = 8192, + SD_NUMA = 16384, +}; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; + int nr_idle_scan; +}; + +struct sched_group; + +struct sched_domain { + struct sched_domain *parent; + struct sched_domain *child; + struct sched_group *groups; + long unsigned int min_interval; + long unsigned int max_interval; + unsigned int busy_factor; + unsigned int imbalance_pct; + unsigned int cache_nice_tries; + unsigned int imb_numa_nr; + int nohz_idle; + int flags; + int level; + long unsigned int last_balance; + unsigned int balance_interval; + unsigned int nr_balance_failed; + long: 32; + u64 max_newidle_lb_cost; + long unsigned int last_decay_max_lb_cost; + char *name; + union { + void *private; + struct callback_head rcu; + }; + struct sched_domain_shared *shared; + unsigned int span_weight; + long unsigned int span[0]; +}; + +struct sched_group_capacity; + +struct sched_group { + struct sched_group *next; + atomic_t ref; + unsigned int group_weight; + unsigned int cores; + struct sched_group_capacity *sgc; + int asym_prefer_cpu; + int flags; + long unsigned int cpumask[0]; +}; + +struct sched_group_capacity { + atomic_t ref; + long unsigned int capacity; + long unsigned int min_capacity; + long unsigned int max_capacity; + long unsigned int next_update; + int imbalance; + int id; + long unsigned int cpumask[0]; +}; + +struct em_perf_state { + long unsigned int performance; + long unsigned int frequency; + long unsigned int power; + long unsigned int cost; + long unsigned int flags; +}; + +struct em_perf_table { + struct callback_head rcu; + struct kref kref; + struct em_perf_state state[0]; +}; + +struct em_perf_domain { + struct em_perf_table *em_table; + int nr_perf_states; + int min_perf_state; + int max_perf_state; + long unsigned int flags; + long unsigned int cpus[0]; +}; + +enum s2idle_states { + S2IDLE_STATE_NONE = 0, + S2IDLE_STATE_ENTER = 1, + S2IDLE_STATE_WAKE = 2, +}; + +struct sched_attr { + __u32 size; + __u32 sched_policy; + __u64 sched_flags; + __s32 sched_nice; + __u32 sched_priority; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; + __u32 sched_util_min; + __u32 sched_util_max; +}; + +struct bpf_bprintf_data { + u32 *bin_args; + char *buf; + bool get_bin_args; + bool get_buf; +}; + +struct cpudl_item { + u64 dl; + int cpu; + int idx; +}; + +enum scx_rq_flags { + SCX_RQ_ONLINE = 1, + SCX_RQ_CAN_STOP_TICK = 2, + SCX_RQ_BAL_PENDING = 4, + SCX_RQ_BAL_KEEP = 8, + SCX_RQ_BYPASSING = 16, + SCX_RQ_IN_WAKEUP = 65536, + SCX_RQ_IN_BALANCE = 131072, +}; + +struct perf_domain { + struct em_perf_domain *em_pd; + struct perf_domain *next; + struct callback_head rcu; +}; + +struct rq_flags { + long unsigned int flags; + struct pin_cookie cookie; + unsigned int clock_update_flags; +}; + +typedef struct { + struct task_struct *lock; + struct rq *rq; + struct rq_flags rf; +} class_task_rq_lock_t; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_t; + +enum { + __SCHED_FEAT_PLACE_LAG = 0, + __SCHED_FEAT_PLACE_DEADLINE_INITIAL = 1, + __SCHED_FEAT_PLACE_REL_DEADLINE = 2, + __SCHED_FEAT_RUN_TO_PARITY = 3, + __SCHED_FEAT_PREEMPT_SHORT = 4, + __SCHED_FEAT_NEXT_BUDDY = 5, + __SCHED_FEAT_CACHE_HOT_BUDDY = 6, + __SCHED_FEAT_DELAY_DEQUEUE = 7, + __SCHED_FEAT_DELAY_ZERO = 8, + __SCHED_FEAT_WAKEUP_PREEMPTION = 9, + __SCHED_FEAT_HRTICK = 10, + __SCHED_FEAT_HRTICK_DL = 11, + __SCHED_FEAT_NONTASK_CAPACITY = 12, + __SCHED_FEAT_TTWU_QUEUE = 13, + __SCHED_FEAT_SIS_UTIL = 14, + __SCHED_FEAT_WARN_DOUBLE_CLOCK = 15, + __SCHED_FEAT_RT_PUSH_IPI = 16, + __SCHED_FEAT_RT_RUNTIME_SHARE = 17, + __SCHED_FEAT_LB_MIN = 18, + __SCHED_FEAT_ATTACH_AGE_LOAD = 19, + __SCHED_FEAT_WA_IDLE = 20, + __SCHED_FEAT_WA_WEIGHT = 21, + __SCHED_FEAT_WA_BIAS = 22, + __SCHED_FEAT_UTIL_EST = 23, + __SCHED_FEAT_LATENCY_WARN = 24, + __SCHED_FEAT_NR = 25, +}; + +struct affinity_context { + const struct cpumask *new_mask; + struct cpumask *user_mask; + unsigned int flags; +}; + +typedef struct { + struct rq *lock; + struct rq *lock2; +} class_double_rq_lock_t; + +struct sched_enq_and_set_ctx { + struct task_struct *p; + int queue_flags; + bool queued; + bool running; +}; + +struct idle_timer { + struct hrtimer timer; + int done; + long: 32; +}; + +typedef struct rt_rq *rt_rq_iter_t; + +enum dl_bw_request { + dl_bw_req_check_overflow = 0, + dl_bw_req_alloc = 1, + dl_bw_req_free = 2, +}; + +enum scx_consts { + SCX_DSP_DFL_MAX_BATCH = 32, + SCX_DSP_MAX_LOOPS = 32, + SCX_WATCHDOG_MAX_TIMEOUT = 30000, + SCX_EXIT_BT_LEN = 64, + SCX_EXIT_MSG_LEN = 1024, + SCX_EXIT_DUMP_DFL_LEN = 32768, + SCX_CPUPERF_ONE = 1024, + SCX_OPS_TASK_ITER_BATCH = 32, +}; + +enum scx_exit_kind { + SCX_EXIT_NONE = 0, + SCX_EXIT_DONE = 1, + SCX_EXIT_UNREG = 64, + SCX_EXIT_UNREG_BPF = 65, + SCX_EXIT_UNREG_KERN = 66, + SCX_EXIT_SYSRQ = 67, + SCX_EXIT_ERROR = 1024, + SCX_EXIT_ERROR_BPF = 1025, + SCX_EXIT_ERROR_STALL = 1026, +}; + +enum scx_exit_code { + SCX_ECODE_RSN_HOTPLUG = 4294967296ULL, + SCX_ECODE_ACT_RESTART = 281474976710656ULL, +}; + +struct scx_exit_info { + enum scx_exit_kind kind; + long: 32; + s64 exit_code; + const char *reason; + long unsigned int *bt; + u32 bt_len; + char *msg; + char *dump; + long: 32; +}; + +enum scx_ops_flags { + SCX_OPS_KEEP_BUILTIN_IDLE = 1, + SCX_OPS_ENQ_LAST = 2, + SCX_OPS_ENQ_EXITING = 4, + SCX_OPS_SWITCH_PARTIAL = 8, + SCX_OPS_HAS_CGROUP_WEIGHT = 65536, + SCX_OPS_ALL_FLAGS = 65551, +}; + +struct scx_init_task_args { + bool fork; + struct cgroup *cgroup; +}; + +struct scx_exit_task_args { + bool cancelled; +}; + +struct scx_cgroup_init_args { + u32 weight; +}; + +enum scx_cpu_preempt_reason { + SCX_CPU_PREEMPT_RT = 0, + SCX_CPU_PREEMPT_DL = 1, + SCX_CPU_PREEMPT_STOP = 2, + SCX_CPU_PREEMPT_UNKNOWN = 3, +}; + +struct scx_cpu_acquire_args {}; + +struct scx_cpu_release_args { + enum scx_cpu_preempt_reason reason; + struct task_struct *task; +}; + +struct scx_dump_ctx { + enum scx_exit_kind kind; + long: 32; + s64 exit_code; + const char *reason; + long: 32; + u64 at_ns; + u64 at_jiffies; +}; + +struct sched_ext_ops { + s32 (*select_cpu)(struct task_struct *, s32, u64); + void (*enqueue)(struct task_struct *, u64); + void (*dequeue)(struct task_struct *, u64); + void (*dispatch)(s32, struct task_struct *); + void (*tick)(struct task_struct *); + void (*runnable)(struct task_struct *, u64); + void (*running)(struct task_struct *); + void (*stopping)(struct task_struct *, bool); + void (*quiescent)(struct task_struct *, u64); + bool (*yield)(struct task_struct *, struct task_struct *); + bool (*core_sched_before)(struct task_struct *, struct task_struct *); + void (*set_weight)(struct task_struct *, u32); + void (*set_cpumask)(struct task_struct *, const struct cpumask *); + void (*update_idle)(s32, bool); + void (*cpu_acquire)(s32, struct scx_cpu_acquire_args *); + void (*cpu_release)(s32, struct scx_cpu_release_args *); + s32 (*init_task)(struct task_struct *, struct scx_init_task_args *); + void (*exit_task)(struct task_struct *, struct scx_exit_task_args *); + void (*enable)(struct task_struct *); + void (*disable)(struct task_struct *); + void (*dump)(struct scx_dump_ctx *); + void (*dump_cpu)(struct scx_dump_ctx *, s32, bool); + void (*dump_task)(struct scx_dump_ctx *, struct task_struct *); + s32 (*cgroup_init)(struct cgroup *, struct scx_cgroup_init_args *); + void (*cgroup_exit)(struct cgroup *); + s32 (*cgroup_prep_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_cancel_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_set_weight)(struct cgroup *, u32); + void (*cpu_online)(s32); + void (*cpu_offline)(s32); + s32 (*init)(); + void (*exit)(struct scx_exit_info *); + u32 dispatch_max_batch; + u64 flags; + u32 timeout_ms; + u32 exit_dump_len; + u64 hotplug_seq; + char name[128]; +}; + +enum scx_opi { + SCX_OPI_BEGIN = 0, + SCX_OPI_NORMAL_BEGIN = 0, + SCX_OPI_NORMAL_END = 29, + SCX_OPI_CPU_HOTPLUG_BEGIN = 29, + SCX_OPI_CPU_HOTPLUG_END = 31, + SCX_OPI_END = 31, +}; + +enum scx_wake_flags { + SCX_WAKE_FORK = 4, + SCX_WAKE_TTWU = 8, + SCX_WAKE_SYNC = 16, +}; + +enum scx_enq_flags { + SCX_ENQ_WAKEUP = 1ULL, + SCX_ENQ_HEAD = 16ULL, + SCX_ENQ_CPU_SELECTED = 1024ULL, + SCX_ENQ_PREEMPT = 4294967296ULL, + SCX_ENQ_REENQ = 1099511627776ULL, + SCX_ENQ_LAST = 2199023255552ULL, + __SCX_ENQ_INTERNAL_MASK = 18374686479671623680ULL, + SCX_ENQ_CLEAR_OPSS = 72057594037927936ULL, + SCX_ENQ_DSQ_PRIQ = 144115188075855872ULL, +}; + +enum scx_deq_flags { + SCX_DEQ_SLEEP = 1ULL, + SCX_DEQ_CORE_SCHED_EXEC = 4294967296ULL, +}; + +enum scx_pick_idle_cpu_flags { + SCX_PICK_IDLE_CORE = 1, +}; + +enum scx_kick_flags { + SCX_KICK_IDLE = 1, + SCX_KICK_PREEMPT = 2, + SCX_KICK_WAIT = 4, +}; + +enum scx_tg_flags { + SCX_TG_ONLINE = 1, + SCX_TG_INITED = 2, +}; + +enum scx_ops_enable_state { + SCX_OPS_ENABLING = 0, + SCX_OPS_ENABLED = 1, + SCX_OPS_DISABLING = 2, + SCX_OPS_DISABLED = 3, +}; + +enum scx_ops_state { + SCX_OPSS_NONE = 0, + SCX_OPSS_QUEUEING = 1, + SCX_OPSS_QUEUED = 2, + SCX_OPSS_DISPATCHING = 3, + SCX_OPSS_QSEQ_SHIFT = 2, +}; + +struct scx_dsp_buf_ent { + struct task_struct *task; + long unsigned int qseq; + u64 dsq_id; + u64 enq_flags; +}; + +struct scx_dsp_ctx { + struct rq *rq; + u32 cursor; + u32 nr_tasks; + long: 32; + struct scx_dsp_buf_ent buf[0]; +}; + +struct scx_bstr_buf { + u64 data[12]; + char line[1024]; +}; + +struct scx_dump_data { + s32 cpu; + bool first; + s32 cursor; + struct seq_buf *s; + const char *prefix; + long: 32; + struct scx_bstr_buf buf; +}; + +struct trace_event_raw_sched_ext_dump { + struct trace_entry ent; + u32 __data_loc_line; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_ext_dump { + u32 line; + const void *line_ptr_; +}; + +typedef void (*btf_trace_sched_ext_dump)(void *, const char *); + +enum scx_dsq_iter_flags { + SCX_DSQ_ITER_REV = 65536, + __SCX_DSQ_ITER_HAS_SLICE = 1073741824, + __SCX_DSQ_ITER_HAS_VTIME = 2147483648, + __SCX_DSQ_ITER_USER_FLAGS = 65536, + __SCX_DSQ_ITER_ALL_FLAGS = 3221291008, +}; + +struct bpf_iter_scx_dsq_kern { + struct scx_dsq_list_node cursor; + struct scx_dispatch_q *dsq; + long: 32; + u64 slice; + u64 vtime; +}; + +struct bpf_iter_scx_dsq { + u64 __opaque[6]; +}; + +struct scx_task_iter { + struct sched_ext_entity cursor; + struct task_struct *locked; + struct rq *rq; + struct rq_flags rf; + u32 cnt; + long: 32; +}; + +typedef struct task_struct *class_find_get_task_t; + +struct bpf_struct_ops_sched_ext_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_ext_ops data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef long unsigned int perf_trace_t[2048]; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT = 0, + BPF_FD_TYPE_TRACEPOINT = 1, + BPF_FD_TYPE_KPROBE = 2, + BPF_FD_TYPE_KRETPROBE = 3, + BPF_FD_TYPE_UPROBE = 4, + BPF_FD_TYPE_URETPROBE = 5, +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *, struct pt_regs *, __u64 *); + int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *, __u64 *); + bool (*filter)(struct uprobe_consumer *, struct mm_struct *); + struct list_head cons_node; + long: 32; + __u64 id; +}; + +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *, const void *); + +struct trace_uprobe; + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + long unsigned int bp_addr; +}; + +struct trace_uprobe { + struct dyn_event devent; + long: 32; + struct uprobe_consumer consumer; + struct path path; + char *filename; + struct uprobe *uprobe; + long unsigned int offset; + long unsigned int ref_ctr_offset; + long unsigned int *nhits; + struct trace_probe tp; + long: 32; +}; + +struct uprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int vaddr[0]; +}; + +struct uprobe_cpu_buffer { + struct mutex mutex; + void *buf; + int dsize; +}; + +typedef bool (*filter_func_t)(struct uprobe_consumer *, struct mm_struct *); + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 jited_prog_insns; + __u64 xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 map_ids; + char name[16]; + __u32 ifindex; + __u32 gpl_compatible: 1; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 jited_ksyms; + __u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 line_info; + __u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; + __u64 recursion_misses; + __u32 verified_insns; + __u32 attach_btf_obj_id; + __u32 attach_btf_id; + long: 32; +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *, int, int); + int (*finalize)(struct bpf_verifier_env *); + int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); + int (*remove_insns)(struct bpf_verifier_env *, u32, u32); + int (*prepare)(struct bpf_prog *); + int (*translate)(struct bpf_prog *); + void (*destroy)(struct bpf_prog *); +}; + +struct bpf_offload_dev { + const struct bpf_prog_offload_ops *ops; + struct list_head netdevs; + void *priv; +}; + +enum xdp_rx_metadata { + XDP_METADATA_KFUNC_RX_TIMESTAMP = 0, + XDP_METADATA_KFUNC_RX_HASH = 1, + XDP_METADATA_KFUNC_RX_VLAN_TAG = 2, + MAX_XDP_METADATA_KFUNC = 3, +}; + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + struct list_head progs; + struct list_head maps; + struct list_head offdev_netdevs; +}; + +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +struct ns_get_path_bpf_map_args { + struct bpf_offloaded_map *offmap; + struct bpf_map_info *info; +}; + +enum slab_state { + DOWN = 0, + PARTIAL = 1, + UP = 2, + FULL = 3, +}; + +struct kmalloc_info_struct { + const char *name[3]; + unsigned int size; +}; + +struct slabinfo { + long unsigned int active_objs; + long unsigned int num_objs; + long unsigned int active_slabs; + long unsigned int num_slabs; + long unsigned int shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +struct kmem_obj_info { + void *kp_ptr; + struct slab *kp_slab; + void *kp_objp; + long unsigned int kp_data_offset; + struct kmem_cache *kp_slab_cache; + void *kp_ret; + void *kp_stack[16]; + void *kp_free_stack[16]; +}; + +struct trace_event_raw_kmem_cache_alloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + bool accounted; + char __data[0]; +}; + +struct trace_event_raw_kmalloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + char __data[0]; +}; + +struct trace_event_raw_kfree { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + char __data[0]; +}; + +struct trace_event_raw_kmem_cache_free { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free_batched { + struct trace_entry ent; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + long unsigned int gfp_flags; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + int percpu_refill; + char __data[0]; +}; + +struct trace_event_raw_mm_page_pcpu_drain { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc_extfrag { + struct trace_entry ent; + long unsigned int pfn; + int alloc_order; + int fallback_order; + int alloc_migratetype; + int fallback_migratetype; + int change_ownership; + char __data[0]; +}; + +struct trace_event_raw_mm_alloc_contig_migrate_range_info { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + long unsigned int nr_migrated; + long unsigned int nr_reclaimed; + long unsigned int nr_mapped; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_rss_stat { + struct trace_entry ent; + unsigned int mm_id; + unsigned int curr; + int member; + long int size; + char __data[0]; +}; + +struct trace_event_data_offsets_kmem_cache_alloc {}; + +struct trace_event_data_offsets_kmalloc {}; + +struct trace_event_data_offsets_kfree {}; + +struct trace_event_data_offsets_kmem_cache_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_mm_page_free {}; + +struct trace_event_data_offsets_mm_page_free_batched {}; + +struct trace_event_data_offsets_mm_page_alloc {}; + +struct trace_event_data_offsets_mm_page {}; + +struct trace_event_data_offsets_mm_page_pcpu_drain {}; + +struct trace_event_data_offsets_mm_page_alloc_extfrag {}; + +struct trace_event_data_offsets_mm_alloc_contig_migrate_range_info {}; + +struct trace_event_data_offsets_rss_stat {}; + +typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, struct kmem_cache *, gfp_t, int); + +typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *); + +typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *, const struct kmem_cache *); + +typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); + +typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); + +typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); + +typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int, int); + +typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); + +typedef void (*btf_trace_mm_alloc_contig_migrate_range_info)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int); + +struct kasan_report_info { + enum kasan_report_type type; + const void *access_addr; + size_t access_size; + bool is_write; + long unsigned int ip; + const void *first_bad_addr; + struct kmem_cache *cache; + void *object; + size_t alloc_size; + const char *bug_type; + struct kasan_track alloc_track; + struct kasan_track free_track; +}; + +enum legacy_fs_param { + LEGACY_FS_UNSET_PARAMS = 0, + LEGACY_FS_MONOLITHIC_PARAMS = 1, + LEGACY_FS_INDIVIDUAL_PARAMS = 2, +}; + +struct legacy_fs_context { + char *legacy_data; + size_t data_size; + enum legacy_fs_param param_type; +}; + +struct kernfs_global_locks { + struct mutex open_file_mutex[1024]; +}; + +struct kernfs_open_node { + struct callback_head callback_head; + atomic_t event; + wait_queue_head_t poll; + struct list_head files; + unsigned int nr_mmapped; + unsigned int nr_to_release; +}; + +struct kernfs_super_info { + struct super_block *sb; + struct kernfs_root *root; + const void *ns; + struct list_head node; +}; + +typedef int (*writepage_t)(struct folio *, struct writeback_control *, void *); + +struct dax_holder_operations { + int (*notify_failure)(struct dax_device *, u64, u64, int); +}; + +struct fs_error_report { + int error; + struct inode *inode; + struct super_block *sb; +}; + +struct ext4_lazy_init { + long unsigned int li_state; + struct list_head li_request_list; + struct mutex li_list_mtx; +}; + +struct partial_cluster { + ext4_fsblk_t pclu; + ext4_lblk_t lblk; + enum { + initial = 0, + tofree = 1, + nofree = 2, + } state; +}; + +struct ext4_journal_cb_entry { + struct list_head jce_list; + void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); +}; + +struct ext4_prealloc_space { + union { + struct rb_node inode_node; + struct list_head lg_list; + } pa_node; + struct list_head pa_group_list; + union { + struct list_head pa_tmp_list; + struct callback_head pa_rcu; + } u; + spinlock_t pa_lock; + atomic_t pa_count; + unsigned int pa_deleted; + ext4_fsblk_t pa_pstart; + ext4_lblk_t pa_lstart; + ext4_grpblk_t pa_len; + ext4_grpblk_t pa_free; + short unsigned int pa_type; + union { + rwlock_t *inode_lock; + spinlock_t *lg_lock; + } pa_node_lock; + struct inode *pa_inode; +}; + +struct ext4_free_extent { + ext4_lblk_t fe_logical; + ext4_grpblk_t fe_start; + ext4_group_t fe_group; + ext4_grpblk_t fe_len; +}; + +struct ext4_allocation_context { + struct inode *ac_inode; + struct super_block *ac_sb; + struct ext4_free_extent ac_o_ex; + struct ext4_free_extent ac_g_ex; + struct ext4_free_extent ac_b_ex; + struct ext4_free_extent ac_f_ex; + ext4_grpblk_t ac_orig_goal_len; + __u32 ac_flags; + __u32 ac_groups_linear_remaining; + __u16 ac_groups_scanned; + __u16 ac_found; + __u16 ac_cX_found[5]; + __u16 ac_tail; + __u16 ac_buddy; + __u8 ac_status; + __u8 ac_criteria; + __u8 ac_2order; + __u8 ac_op; + struct folio *ac_bitmap_folio; + struct folio *ac_buddy_folio; + struct ext4_prealloc_space *ac_pa; + struct ext4_locality_group *ac_lg; +}; + +struct trace_event_raw_ext4_other_inode_update_time { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t orig_ino; + uid_t uid; + gid_t gid; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + uid_t uid; + gid_t gid; + __u64 blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_request_inode { + struct trace_entry ent; + dev_t dev; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_evict_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int nlink; + char __data[0]; +}; + +struct trace_event_raw_ext4_drop_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int drop; + char __data[0]; +}; + +struct trace_event_raw_ext4_nfs_commit_metadata { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_mark_inode_dirty { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_ext4_begin_ordered_truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t new_size; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__write_end { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int copied; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + long unsigned int writeback_index; + int sync_mode; + char for_kupdate; + char range_cyclic; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_da_write_pages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int first_page; + long int nr_to_write; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 lblk; + __u32 len; + __u32 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages_result { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + int pages_written; + long int pages_skipped; + long unsigned int writeback_index; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_ext4_invalidate_folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + size_t offset; + size_t length; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_blocks { + struct trace_entry ent; + dev_t dev; + long: 32; + __u64 blk; + __u64 count; + char __data[0]; +}; + +struct trace_event_raw_ext4__mb_new_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 pa_pstart; + __u64 pa_lstart; + __u32 pa_len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_mb_release_inode_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + __u32 count; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_mb_release_group_pa { + struct trace_entry ent; + dev_t dev; + long: 32; + __u64 pa_pstart; + __u32 pa_len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_discard_preallocations { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_discard_preallocations { + struct trace_entry ent; + dev_t dev; + int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_allocate_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_free_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + long unsigned int count; + int flags; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_sync_file_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + int datasync; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_fs { + struct trace_entry ent; + dev_t dev; + int wait; + char __data[0]; +}; + +struct trace_event_raw_ext4_alloc_da_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int data_blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_alloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 goal_logical; + int goal_start; + __u32 goal_group; + int goal_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + __u16 found; + __u16 groups; + __u16 buddy; + __u16 flags; + __u16 tail; + __u8 cr; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_prealloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4__mballoc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_forget { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + int is_metadata; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_update_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int used_blocks; + int reserved_data_blocks; + int quota_claim; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int reserve_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_da_release_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int freed_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_read_block_bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + bool prefetch; + char __data[0]; +}; + +struct trace_event_raw_ext4__fallocate_mode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + int mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_fallocate_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int blocks; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + long: 32; + loff_t size; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4__truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + ext4_lblk_t i_lblk; + unsigned int i_len; + ext4_fsblk_t i_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int flags; + long: 32; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + unsigned int len; + unsigned int mflags; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_load_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_load_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_sb { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_inode { + struct trace_entry ent; + long unsigned int ino; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_reserved { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4__trim { + struct trace_entry ent; + int dev_major; + int dev_minor; + __u32 group; + int start; + int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_handle_unwritten_extents { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + unsigned int allocated; + ext4_fsblk_t newblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + ext4_lblk_t lblk; + long: 32; + ext4_fsblk_t pblk; + unsigned int len; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_show_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + short unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_remove_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t from; + ext4_lblk_t to; + ext4_fsblk_t ee_pblk; + ext4_lblk_t ee_lblk; + short unsigned int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_leaf { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t ee_lblk; + ext4_fsblk_t ee_pblk; + short int ee_len; + long: 32; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_idx { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space_done { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + long: 32; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + short unsigned int eh_entries; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__es_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_es_remove_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t lblk; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_es_lookup_extent_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + int found; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_shrink_enter { + struct trace_entry ent; + dev_t dev; + int nr_to_scan; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_collapse_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_insert_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + long long unsigned int scan_time; + int nr_skipped; + int retried; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_insert_delayed_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + bool lclu_allocated; + bool end_allocated; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_fsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u32 agno; + long: 32; + u64 bno; + u64 len; + u64 owner; + char __data[0]; +}; + +struct trace_event_raw_ext4_getfsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u64 block; + u64 len; + u64 owner; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_shutdown { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_error { + struct trace_entry ent; + dev_t dev; + const char *function; + unsigned int line; + char __data[0]; +}; + +struct trace_event_raw_ext4_prefetch_bitmaps { + struct trace_entry ent; + dev_t dev; + __u32 group; + __u32 next; + __u32 ios; + char __data[0]; +}; + +struct trace_event_raw_ext4_lazy_itable_init { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay_scan { + struct trace_entry ent; + dev_t dev; + int error; + int off; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay { + struct trace_entry ent; + dev_t dev; + int tag; + int ino; + int priv1; + int priv2; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_start { + struct trace_entry ent; + dev_t dev; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_stop { + struct trace_entry ent; + dev_t dev; + int nblks; + int reason; + int num_fc; + int num_fc_ineligible; + int nblks_agg; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_stats { + struct trace_entry ent; + dev_t dev; + unsigned int fc_ineligible_rc[10]; + long unsigned int fc_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_numblks; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_dentry { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_inode { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_range { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + long int start; + long int end; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_cleanup { + struct trace_entry ent; + dev_t dev; + int j_fc_off; + int full; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_update_sb { + struct trace_entry ent; + dev_t dev; + long: 32; + ext4_fsblk_t fsblk; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_ext4_other_inode_update_time {}; + +struct trace_event_data_offsets_ext4_free_inode {}; + +struct trace_event_data_offsets_ext4_request_inode {}; + +struct trace_event_data_offsets_ext4_allocate_inode {}; + +struct trace_event_data_offsets_ext4_evict_inode {}; + +struct trace_event_data_offsets_ext4_drop_inode {}; + +struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; + +struct trace_event_data_offsets_ext4_mark_inode_dirty {}; + +struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; + +struct trace_event_data_offsets_ext4__write_begin {}; + +struct trace_event_data_offsets_ext4__write_end {}; + +struct trace_event_data_offsets_ext4_writepages {}; + +struct trace_event_data_offsets_ext4_da_write_pages {}; + +struct trace_event_data_offsets_ext4_da_write_pages_extent {}; + +struct trace_event_data_offsets_ext4_writepages_result {}; + +struct trace_event_data_offsets_ext4__folio_op {}; + +struct trace_event_data_offsets_ext4_invalidate_folio_op {}; + +struct trace_event_data_offsets_ext4_discard_blocks {}; + +struct trace_event_data_offsets_ext4__mb_new_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_group_pa {}; + +struct trace_event_data_offsets_ext4_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_request_blocks {}; + +struct trace_event_data_offsets_ext4_allocate_blocks {}; + +struct trace_event_data_offsets_ext4_free_blocks {}; + +struct trace_event_data_offsets_ext4_sync_file_enter {}; + +struct trace_event_data_offsets_ext4_sync_file_exit {}; + +struct trace_event_data_offsets_ext4_sync_fs {}; + +struct trace_event_data_offsets_ext4_alloc_da_blocks {}; + +struct trace_event_data_offsets_ext4_mballoc_alloc {}; + +struct trace_event_data_offsets_ext4_mballoc_prealloc {}; + +struct trace_event_data_offsets_ext4__mballoc {}; + +struct trace_event_data_offsets_ext4_forget {}; + +struct trace_event_data_offsets_ext4_da_update_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_release_space {}; + +struct trace_event_data_offsets_ext4__bitmap_load {}; + +struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; + +struct trace_event_data_offsets_ext4__fallocate_mode {}; + +struct trace_event_data_offsets_ext4_fallocate_exit {}; + +struct trace_event_data_offsets_ext4_unlink_enter {}; + +struct trace_event_data_offsets_ext4_unlink_exit {}; + +struct trace_event_data_offsets_ext4__truncate {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; + +struct trace_event_data_offsets_ext4__map_blocks_enter {}; + +struct trace_event_data_offsets_ext4__map_blocks_exit {}; + +struct trace_event_data_offsets_ext4_ext_load_extent {}; + +struct trace_event_data_offsets_ext4_load_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_sb {}; + +struct trace_event_data_offsets_ext4_journal_start_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_reserved {}; + +struct trace_event_data_offsets_ext4__trim {}; + +struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; + +struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; + +struct trace_event_data_offsets_ext4_ext_show_extent {}; + +struct trace_event_data_offsets_ext4_remove_blocks {}; + +struct trace_event_data_offsets_ext4_ext_rm_leaf {}; + +struct trace_event_data_offsets_ext4_ext_rm_idx {}; + +struct trace_event_data_offsets_ext4_ext_remove_space {}; + +struct trace_event_data_offsets_ext4_ext_remove_space_done {}; + +struct trace_event_data_offsets_ext4__es_extent {}; + +struct trace_event_data_offsets_ext4_es_remove_extent {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; + +struct trace_event_data_offsets_ext4__es_shrink_enter {}; + +struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; + +struct trace_event_data_offsets_ext4_collapse_range {}; + +struct trace_event_data_offsets_ext4_insert_range {}; + +struct trace_event_data_offsets_ext4_es_shrink {}; + +struct trace_event_data_offsets_ext4_es_insert_delayed_extent {}; + +struct trace_event_data_offsets_ext4_fsmap_class {}; + +struct trace_event_data_offsets_ext4_getfsmap_class {}; + +struct trace_event_data_offsets_ext4_shutdown {}; + +struct trace_event_data_offsets_ext4_error {}; + +struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; + +struct trace_event_data_offsets_ext4_lazy_itable_init {}; + +struct trace_event_data_offsets_ext4_fc_replay_scan {}; + +struct trace_event_data_offsets_ext4_fc_replay {}; + +struct trace_event_data_offsets_ext4_fc_commit_start {}; + +struct trace_event_data_offsets_ext4_fc_commit_stop {}; + +struct trace_event_data_offsets_ext4_fc_stats {}; + +struct trace_event_data_offsets_ext4_fc_track_dentry {}; + +struct trace_event_data_offsets_ext4_fc_track_inode {}; + +struct trace_event_data_offsets_ext4_fc_track_range {}; + +struct trace_event_data_offsets_ext4_fc_cleanup {}; + +struct trace_event_data_offsets_ext4_update_sb {}; + +typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); + +typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); + +typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int); + +typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); + +typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); + +typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); + +typedef void (*btf_trace_ext4_read_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_release_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_journalled_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int); + +typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int); + +typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); + +typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int); + +typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int); + +typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); + +typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); + +typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); + +typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int, bool); + +typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); + +typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); + +typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); + +typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); + +typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_sb)(void *, struct super_block *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_inode)(void *, struct inode *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int); + +typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int); + +typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); + +typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); + +typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); + +typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); + +typedef void (*btf_trace_ext4_es_insert_delayed_extent)(void *, struct inode *, struct extent_status *, bool, bool); + +typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); + +typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); + +typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); + +typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); + +typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *, tid_t); + +typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int, tid_t); + +typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_track_create)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_link)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_unlink)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_inode)(void *, handle_t *, struct inode *, int); + +typedef void (*btf_trace_ext4_fc_track_range)(void *, handle_t *, struct inode *, long int, long int, int); + +typedef void (*btf_trace_ext4_fc_cleanup)(void *, journal_t *, int, tid_t); + +typedef void (*btf_trace_ext4_update_sb)(void *, struct super_block *, ext4_fsblk_t, unsigned int); + +struct ext4_err_translation { + int code; + int errno; +}; + +enum { + Opt_bsd_df = 0, + Opt_minix_df = 1, + Opt_grpid = 2, + Opt_nogrpid = 3, + Opt_resgid = 4, + Opt_resuid = 5, + Opt_sb = 6, + Opt_nouid32 = 7, + Opt_debug___2 = 8, + Opt_removed = 9, + Opt_user_xattr = 10, + Opt_acl___2 = 11, + Opt_auto_da_alloc = 12, + Opt_noauto_da_alloc = 13, + Opt_noload = 14, + Opt_commit = 15, + Opt_min_batch_time = 16, + Opt_max_batch_time = 17, + Opt_journal_dev = 18, + Opt_journal_path = 19, + Opt_journal_checksum = 20, + Opt_journal_async_commit = 21, + Opt_abort = 22, + Opt_data_journal = 23, + Opt_data_ordered = 24, + Opt_data_writeback = 25, + Opt_data_err_abort = 26, + Opt_data_err_ignore = 27, + Opt_test_dummy_encryption = 28, + Opt_inlinecrypt = 29, + Opt_usrjquota = 30, + Opt_grpjquota = 31, + Opt_quota___2 = 32, + Opt_noquota = 33, + Opt_barrier___2 = 34, + Opt_nobarrier = 35, + Opt_err___3 = 36, + Opt_usrquota___2 = 37, + Opt_grpquota___2 = 38, + Opt_prjquota = 39, + Opt_dax = 40, + Opt_dax_always = 41, + Opt_dax_inode = 42, + Opt_dax_never = 43, + Opt_stripe = 44, + Opt_delalloc = 45, + Opt_nodelalloc = 46, + Opt_warn_on_error = 47, + Opt_nowarn_on_error = 48, + Opt_mblk_io_submit = 49, + Opt_debug_want_extra_isize = 50, + Opt_nomblk_io_submit = 51, + Opt_block_validity = 52, + Opt_noblock_validity = 53, + Opt_inode_readahead_blks = 54, + Opt_journal_ioprio = 55, + Opt_dioread_nolock = 56, + Opt_dioread_lock = 57, + Opt_discard___3 = 58, + Opt_nodiscard = 59, + Opt_init_itable = 60, + Opt_noinit_itable = 61, + Opt_max_dir_size_kb = 62, + Opt_nojournal_checksum = 63, + Opt_nombcache = 64, + Opt_no_prefetch_block_bitmaps = 65, + Opt_mb_optimize_scan = 66, + Opt_errors___2 = 67, + Opt_data = 68, + Opt_data_err = 69, + Opt_jqfmt = 70, + Opt_dax_type = 71, +}; + +struct mount_opts { + int token; + int mount_opt; + int flags; +}; + +struct ext4_fs_context { + char *s_qf_names[3]; + struct fscrypt_dummy_policy dummy_enc_policy; + int s_jquota_fmt; + short unsigned int qname_spec; + long unsigned int vals_s_flags; + long unsigned int mask_s_flags; + long unsigned int journal_devnum; + long unsigned int s_commit_interval; + long unsigned int s_stripe; + unsigned int s_inode_readahead_blks; + unsigned int s_want_extra_isize; + unsigned int s_li_wait_mult; + unsigned int s_max_dir_size_kb; + unsigned int journal_ioprio; + unsigned int vals_s_mount_opt; + unsigned int mask_s_mount_opt; + unsigned int vals_s_mount_opt2; + unsigned int mask_s_mount_opt2; + unsigned int opt_flags; + unsigned int spec; + u32 s_max_batch_time; + u32 s_min_batch_time; + kuid_t s_resuid; + kgid_t s_resgid; + long: 32; + ext4_fsblk_t s_sb_block; +}; + +struct ext4_mount_options { + long unsigned int s_mount_opt; + long unsigned int s_mount_opt2; + kuid_t s_resuid; + kgid_t s_resgid; + long unsigned int s_commit_interval; + u32 s_min_batch_time; + u32 s_max_batch_time; +}; + +struct msg_msgseg { + struct msg_msgseg *next; +}; + +enum submit_disposition { + ASYNC_TX_SUBMITTED = 0, + ASYNC_TX_CHANNEL_SWITCH = 1, + ASYNC_TX_DIRECT_SUBMIT = 2, +}; + +struct badblocks_context { + sector_t start; + sector_t len; + int ack; + long: 32; +}; + +enum prio_policy { + POLICY_NO_CHANGE = 0, + POLICY_PROMOTE_TO_RT = 1, + POLICY_RESTRICT_TO_BE = 2, + POLICY_ALL_TO_IDLE = 3, + POLICY_NONE_TO_RT = 4, +}; + +struct ioprio_blkcg { + struct blkcg_policy_data cpd; + enum prio_policy prio_policy; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_mkdir { + struct file *file; + int dfd; + umode_t mode; + struct filename *filename; +}; + +struct io_link { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct ddebug_class_param { + union { + long unsigned int *bits; + unsigned int *lvl; + }; + char flags[8]; + const struct ddebug_class_map *map; +}; + +struct dim_cq_moder; + +struct dim_irq_moder { + u8 profile_flags; + u8 coal_flags; + u8 dim_rx_mode; + u8 dim_tx_mode; + struct dim_cq_moder *rx_profile; + struct dim_cq_moder *tx_profile; + void (*rx_dim_work)(struct work_struct *); + void (*tx_dim_work)(struct work_struct *); +}; + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; + struct callback_head rcu; +}; + +enum ib_uverbs_write_cmds { + IB_USER_VERBS_CMD_GET_CONTEXT = 0, + IB_USER_VERBS_CMD_QUERY_DEVICE = 1, + IB_USER_VERBS_CMD_QUERY_PORT = 2, + IB_USER_VERBS_CMD_ALLOC_PD = 3, + IB_USER_VERBS_CMD_DEALLOC_PD = 4, + IB_USER_VERBS_CMD_CREATE_AH = 5, + IB_USER_VERBS_CMD_MODIFY_AH = 6, + IB_USER_VERBS_CMD_QUERY_AH = 7, + IB_USER_VERBS_CMD_DESTROY_AH = 8, + IB_USER_VERBS_CMD_REG_MR = 9, + IB_USER_VERBS_CMD_REG_SMR = 10, + IB_USER_VERBS_CMD_REREG_MR = 11, + IB_USER_VERBS_CMD_QUERY_MR = 12, + IB_USER_VERBS_CMD_DEREG_MR = 13, + IB_USER_VERBS_CMD_ALLOC_MW = 14, + IB_USER_VERBS_CMD_BIND_MW = 15, + IB_USER_VERBS_CMD_DEALLOC_MW = 16, + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, + IB_USER_VERBS_CMD_CREATE_CQ = 18, + IB_USER_VERBS_CMD_RESIZE_CQ = 19, + IB_USER_VERBS_CMD_DESTROY_CQ = 20, + IB_USER_VERBS_CMD_POLL_CQ = 21, + IB_USER_VERBS_CMD_PEEK_CQ = 22, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, + IB_USER_VERBS_CMD_CREATE_QP = 24, + IB_USER_VERBS_CMD_QUERY_QP = 25, + IB_USER_VERBS_CMD_MODIFY_QP = 26, + IB_USER_VERBS_CMD_DESTROY_QP = 27, + IB_USER_VERBS_CMD_POST_SEND = 28, + IB_USER_VERBS_CMD_POST_RECV = 29, + IB_USER_VERBS_CMD_ATTACH_MCAST = 30, + IB_USER_VERBS_CMD_DETACH_MCAST = 31, + IB_USER_VERBS_CMD_CREATE_SRQ = 32, + IB_USER_VERBS_CMD_MODIFY_SRQ = 33, + IB_USER_VERBS_CMD_QUERY_SRQ = 34, + IB_USER_VERBS_CMD_DESTROY_SRQ = 35, + IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, + IB_USER_VERBS_CMD_OPEN_XRCD = 37, + IB_USER_VERBS_CMD_CLOSE_XRCD = 38, + IB_USER_VERBS_CMD_CREATE_XSRQ = 39, + IB_USER_VERBS_CMD_OPEN_QP = 40, +}; + +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, + IB_UVERBS_WC_FLUSH = 8, + IB_UVERBS_WC_ATOMIC_WRITE = 9, +}; + +enum ib_uverbs_create_qp_mask { + IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, +}; + +enum ib_uverbs_wr_opcode { + IB_UVERBS_WR_RDMA_WRITE = 0, + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, + IB_UVERBS_WR_SEND = 2, + IB_UVERBS_WR_SEND_WITH_IMM = 3, + IB_UVERBS_WR_RDMA_READ = 4, + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_UVERBS_WR_LOCAL_INV = 7, + IB_UVERBS_WR_BIND_MW = 8, + IB_UVERBS_WR_SEND_WITH_INV = 9, + IB_UVERBS_WR_TSO = 10, + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + IB_UVERBS_WR_FLUSH = 14, + IB_UVERBS_WR_ATOMIC_WRITE = 15, +}; + +enum ib_uverbs_device_cap_flags { + IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1ULL, + IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 2ULL, + IB_UVERBS_DEVICE_BAD_QKEY_CNTR = 4ULL, + IB_UVERBS_DEVICE_RAW_MULTI = 8ULL, + IB_UVERBS_DEVICE_AUTO_PATH_MIG = 16ULL, + IB_UVERBS_DEVICE_CHANGE_PHY_PORT = 32ULL, + IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = 64ULL, + IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = 128ULL, + IB_UVERBS_DEVICE_SHUTDOWN_PORT = 256ULL, + IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = 1024ULL, + IB_UVERBS_DEVICE_SYS_IMAGE_GUID = 2048ULL, + IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = 4096ULL, + IB_UVERBS_DEVICE_SRQ_RESIZE = 8192ULL, + IB_UVERBS_DEVICE_N_NOTIFY_CQ = 16384ULL, + IB_UVERBS_DEVICE_MEM_WINDOW = 131072ULL, + IB_UVERBS_DEVICE_UD_IP_CSUM = 262144ULL, + IB_UVERBS_DEVICE_XRC = 1048576ULL, + IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = 2097152ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = 8388608ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = 16777216ULL, + IB_UVERBS_DEVICE_RC_IP_CSUM = 33554432ULL, + IB_UVERBS_DEVICE_RAW_IP_CSUM = 67108864ULL, + IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = 536870912ULL, + IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 17179869184ULL, + IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 68719476736ULL, + IB_UVERBS_DEVICE_FLUSH_GLOBAL = 274877906944ULL, + IB_UVERBS_DEVICE_FLUSH_PERSISTENT = 549755813888ULL, + IB_UVERBS_DEVICE_ATOMIC_WRITE = 1099511627776ULL, +}; + +enum ib_uverbs_raw_packet_caps { + IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = 1, + IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = 2, + IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = 4, + IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = 8, +}; + +enum ib_uverbs_access_flags { + IB_UVERBS_ACCESS_LOCAL_WRITE = 1, + IB_UVERBS_ACCESS_REMOTE_WRITE = 2, + IB_UVERBS_ACCESS_REMOTE_READ = 4, + IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, + IB_UVERBS_ACCESS_MW_BIND = 16, + IB_UVERBS_ACCESS_ZERO_BASED = 32, + IB_UVERBS_ACCESS_ON_DEMAND = 64, + IB_UVERBS_ACCESS_HUGETLB = 128, + IB_UVERBS_ACCESS_FLUSH_GLOBAL = 256, + IB_UVERBS_ACCESS_FLUSH_PERSISTENT = 512, + IB_UVERBS_ACCESS_RELAXED_ORDERING = 1048576, + IB_UVERBS_ACCESS_OPTIONAL_RANGE = 1072693248, +}; + +enum ib_uverbs_srq_type { + IB_UVERBS_SRQT_BASIC = 0, + IB_UVERBS_SRQT_XRC = 1, + IB_UVERBS_SRQT_TM = 2, +}; + +enum ib_uverbs_wq_type { + IB_UVERBS_WQT_RQ = 0, +}; + +enum ib_uverbs_wq_flags { + IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1, + IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 2, + IB_UVERBS_WQ_FLAGS_DELAY_DROP = 4, + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 8, +}; + +enum ib_uverbs_qp_type { + IB_UVERBS_QPT_RC = 2, + IB_UVERBS_QPT_UC = 3, + IB_UVERBS_QPT_UD = 4, + IB_UVERBS_QPT_RAW_PACKET = 8, + IB_UVERBS_QPT_XRC_INI = 9, + IB_UVERBS_QPT_XRC_TGT = 10, + IB_UVERBS_QPT_DRIVER = 255, +}; + +enum ib_uverbs_qp_create_flags { + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, + IB_UVERBS_QP_CREATE_SCATTER_FCS = 256, + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 512, + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 2048, + IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 4096, +}; + +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB = 0, + IB_UVERBS_GID_TYPE_ROCE_V1 = 1, + IB_UVERBS_GID_TYPE_ROCE_V2 = 2, +}; + +enum ib_poll_context { + IB_POLL_SOFTIRQ = 0, + IB_POLL_WORKQUEUE = 1, + IB_POLL_UNBOUND_WORKQUEUE = 2, + IB_POLL_LAST_POOL_TYPE = 2, + IB_POLL_DIRECT = 3, +}; + +struct ddebug_table { + struct list_head link; + struct list_head maps; + const char *mod_name; + unsigned int num_ddebugs; + struct _ddebug *ddebugs; +}; + +struct ddebug_query { + const char *filename; + const char *module; + const char *function; + const char *format; + const char *class_string; + unsigned int first_lineno; + unsigned int last_lineno; +}; + +struct ddebug_iter { + struct ddebug_table *table; + int idx; +}; + +struct flag_settings { + unsigned int flags; + unsigned int mask; +}; + +struct flagsbuf { + char buf[8]; +}; + +struct clk_multiplier { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + short unsigned int kb_value; +}; + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; + +struct kbdiacr { + unsigned char diacr; + unsigned char base; + unsigned char result; +}; + +struct kbdiacrs { + unsigned int kb_cnt; + struct kbdiacr kbdiacr[256]; +}; + +struct kbdiacrsuc { + unsigned int kb_cnt; + struct kbdiacruc kbdiacruc[256]; +}; + +struct kbkeycode { + unsigned int scancode; + unsigned int keycode; +}; + +struct kbd_repeat { + int delay; + int period; +}; + +struct keyboard_notifier_param { + struct vc_data *vc; + int down; + int shift; + int ledstate; + unsigned int value; +}; + +struct kbd_struct { + unsigned char lockstate; + unsigned char slockstate; + unsigned char ledmode: 1; + unsigned char ledflagstate: 4; + char: 3; + unsigned char default_ledflagstate: 4; + unsigned char kbdmode: 3; + long: 1; + unsigned char modeflags: 5; +}; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; + +typedef void k_handler_fn(struct vc_data *, unsigned char, char); + +typedef void fn_handler_fn(struct vc_data *); + +struct getset_keycode_data { + struct input_keymap_entry ke; + int error; +}; + +struct dma_fence_array; + +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +struct dma_fence_array { + struct dma_fence base; + spinlock_t lock; + unsigned int num_fences; + atomic_t num_pending; + struct dma_fence **fences; + struct irq_work work; + struct dma_fence_array_cb callbacks[0]; +}; + +struct dma_fence_chain { + struct dma_fence base; + struct dma_fence *prev; + long: 32; + u64 prev_seqno; + struct dma_fence *fence; + union { + struct dma_fence_cb cb; + struct irq_work work; + }; + spinlock_t lock; +}; + +struct phy_link_topology { + struct xarray phys; + u32 next_phy_index; +}; + +enum phy_upstream { + PHY_UPSTREAM_MAC = 0, + PHY_UPSTREAM_PHY = 1, +}; + +struct phy_fixup { + struct list_head list; + char bus_id[64]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *); +}; + +struct sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; + u8 if_1x_copper_passive: 1; + u8 if_1x_copper_active: 1; + u8 if_1x_lx: 1; + u8 if_1x_sx: 1; + u8 e10g_base_sr: 1; + u8 e10g_base_lr: 1; + u8 e10g_base_lrm: 1; + u8 e10g_base_er: 1; + u8 sonet_oc3_short_reach: 1; + u8 sonet_oc3_smf_intermediate_reach: 1; + u8 sonet_oc3_smf_long_reach: 1; + u8 unallocated_5_3: 1; + u8 sonet_oc12_short_reach: 1; + u8 sonet_oc12_smf_intermediate_reach: 1; + u8 sonet_oc12_smf_long_reach: 1; + u8 unallocated_5_7: 1; + u8 sonet_oc48_short_reach: 1; + u8 sonet_oc48_intermediate_reach: 1; + u8 sonet_oc48_long_reach: 1; + u8 sonet_reach_bit2: 1; + u8 sonet_reach_bit1: 1; + u8 sonet_oc192_short_reach: 1; + u8 escon_smf_1310_laser: 1; + u8 escon_mmf_1310_led: 1; + u8 e1000_base_sx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_t: 1; + u8 e100_base_lx: 1; + u8 e100_base_fx: 1; + u8 e_base_bx10: 1; + u8 e_base_px: 1; + u8 fc_tech_electrical_inter_enclosure: 1; + u8 fc_tech_lc: 1; + u8 fc_tech_sa: 1; + u8 fc_ll_m: 1; + u8 fc_ll_l: 1; + u8 fc_ll_i: 1; + u8 fc_ll_s: 1; + u8 fc_ll_v: 1; + u8 unallocated_8_0: 1; + u8 unallocated_8_1: 1; + u8 sfp_ct_passive: 1; + u8 sfp_ct_active: 1; + u8 fc_tech_ll: 1; + u8 fc_tech_sl: 1; + u8 fc_tech_sn: 1; + u8 fc_tech_electrical_intra_enclosure: 1; + u8 fc_media_sm: 1; + u8 unallocated_9_1: 1; + u8 fc_media_m5: 1; + u8 fc_media_m6: 1; + u8 fc_media_tv: 1; + u8 fc_media_mi: 1; + u8 fc_media_tp: 1; + u8 fc_media_tw: 1; + u8 fc_speed_100: 1; + u8 unallocated_10_1: 1; + u8 fc_speed_200: 1; + u8 fc_speed_3200: 1; + u8 fc_speed_400: 1; + u8 fc_speed_1600: 1; + u8 fc_speed_800: 1; + u8 fc_speed_1200: 1; + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + __be16 cable_compliance; + struct { + u8 sff8431_app_e: 1; + u8 fc_pi_4_app_h: 1; + u8 reserved60_2: 6; + u8 reserved61: 8; + } passive; + struct { + u8 sff8431_app_e: 1; + u8 fc_pi_4_app_h: 1; + u8 sff8431_lim: 1; + u8 fc_pi_4_lim: 1; + u8 reserved60_4: 4; + u8 reserved61: 8; + } active; + }; + u8 reserved62; + u8 cc_base; +}; + +struct sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +}; + +struct sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +}; + +struct sfp_upstream_ops { + void (*attach)(void *, struct sfp_bus *); + void (*detach)(void *, struct sfp_bus *); + int (*module_insert)(void *, const struct sfp_eeprom_id *); + void (*module_remove)(void *); + int (*module_start)(void *); + void (*module_stop)(void *); + void (*link_down)(void *); + void (*link_up)(void *); + int (*connect_phy)(void *, struct phy_device *); + void (*disconnect_phy)(void *, struct phy_device *); +}; + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *, struct phy_device *); + +struct phy_reg { + u16 reg; + u16 val; +}; + +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[33]; +}; + +struct tx_status { + u8 status; + u8 reserved[3]; +}; + +struct iwl_fw_dbg_trigger_tx_status { + struct tx_status statuses[16]; + __le32 reserved[2]; +}; + +enum iwl_tx_flags { + TX_CMD_FLG_PROT_REQUIRE = 1, + TX_CMD_FLG_WRITE_TX_POWER = 2, + TX_CMD_FLG_ACK = 8, + TX_CMD_FLG_STA_RATE = 16, + TX_CMD_FLG_BAR = 64, + TX_CMD_FLG_TXOP_PROT = 128, + TX_CMD_FLG_VHT_NDPA = 256, + TX_CMD_FLG_HT_NDPA = 512, + TX_CMD_FLG_CSI_FDBK2HOST = 1024, + TX_CMD_FLG_BT_PRIO_POS = 11, + TX_CMD_FLG_BT_PRIO_MASK = 6144, + TX_CMD_FLG_BT_DIS = 4096, + TX_CMD_FLG_SEQ_CTL = 8192, + TX_CMD_FLG_MORE_FRAG = 16384, + TX_CMD_FLG_TSF = 65536, + TX_CMD_FLG_CALIB = 131072, + TX_CMD_FLG_KEEP_SEQ_CTL = 262144, + TX_CMD_FLG_MH_PAD = 1048576, + TX_CMD_FLG_RESP_TO_DRV = 2097152, + TX_CMD_FLG_TKIP_MIC_DONE = 8388608, + TX_CMD_FLG_DUR = 33554432, + TX_CMD_FLG_FW_DROP = 67108864, + TX_CMD_FLG_EXEC_PAPD = 134217728, + TX_CMD_FLG_PAPD_TYPE = 268435456, + TX_CMD_FLG_HCCA_CHUNK = 2147483648, +}; + +enum iwl_tx_cmd_flags { + IWL_TX_FLAGS_CMD_RATE = 1, + IWL_TX_FLAGS_ENCRYPT_DIS = 2, + IWL_TX_FLAGS_HIGH_PRI = 4, + IWL_TX_FLAGS_RTS = 8, + IWL_TX_FLAGS_CTS = 16, +}; + +enum iwl_tx_pm_timeouts { + PM_FRAME_NONE = 0, + PM_FRAME_MGMT = 2, + PM_FRAME_ASSOC = 3, +}; + +enum iwl_tx_cmd_sec_ctrl { + TX_CMD_SEC_WEP = 1, + TX_CMD_SEC_CCM = 2, + TX_CMD_SEC_TKIP = 3, + TX_CMD_SEC_EXT = 4, + TX_CMD_SEC_GCMP = 5, + TX_CMD_SEC_KEY128 = 8, + TX_CMD_SEC_KEY_FROM_TABLE = 16, +}; + +enum iwl_tx_offload_assist_flags_pos { + TX_CMD_OFFLD_IP_HDR = 0, + TX_CMD_OFFLD_L4_EN = 6, + TX_CMD_OFFLD_L3_EN = 7, + TX_CMD_OFFLD_MH_SIZE = 8, + TX_CMD_OFFLD_PAD = 13, + TX_CMD_OFFLD_AMSDU = 14, +}; + +struct iwl_tx_cmd___2 { + __le16 len; + __le16 offload_assist; + __le32 tx_flags; + struct { + u8 try_cnt; + u8 btkill_cnt; + __le16 reserved; + } scratch; + __le32 rate_n_flags; + u8 sta_id; + u8 sec_ctl; + u8 initial_rate_index; + u8 reserved2; + u8 key[16]; + __le32 reserved3; + __le32 life_time; + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 tid_tspec; + __le16 pm_frame_timeout; + __le16 reserved4; + union { + struct { + struct {} __empty_payload; + u8 payload[0]; + }; + struct { + struct {} __empty_hdr; + struct ieee80211_hdr hdr[0]; + }; + }; +}; + +enum iwl_tx_status { + TX_STATUS_MSK___2 = 255, + TX_STATUS_SUCCESS___2 = 1, + TX_STATUS_DIRECT_DONE___2 = 2, + TX_STATUS_POSTPONE_DELAY___2 = 64, + TX_STATUS_POSTPONE_FEW_BYTES___2 = 65, + TX_STATUS_POSTPONE_BT_PRIO___2 = 66, + TX_STATUS_POSTPONE_QUIET_PERIOD___2 = 67, + TX_STATUS_POSTPONE_CALC_TTAK___2 = 68, + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY___2 = 129, + TX_STATUS_FAIL_SHORT_LIMIT___2 = 130, + TX_STATUS_FAIL_LONG_LIMIT___2 = 131, + TX_STATUS_FAIL_UNDERRUN = 132, + TX_STATUS_FAIL_DRAIN_FLOW___2 = 133, + TX_STATUS_FAIL_RFKILL_FLUSH___2 = 134, + TX_STATUS_FAIL_LIFE_EXPIRE___2 = 135, + TX_STATUS_FAIL_DEST_PS___2 = 136, + TX_STATUS_FAIL_HOST_ABORTED___2 = 137, + TX_STATUS_FAIL_BT_RETRY___2 = 138, + TX_STATUS_FAIL_STA_INVALID___2 = 139, + TX_STATUS_FAIL_FRAG_DROPPED___2 = 140, + TX_STATUS_FAIL_TID_DISABLE___2 = 141, + TX_STATUS_FAIL_FIFO_FLUSHED___2 = 142, + TX_STATUS_FAIL_SMALL_CF_POLL = 143, + TX_STATUS_FAIL_FW_DROP = 144, + TX_STATUS_FAIL_STA_COLOR_MISMATCH = 145, + TX_STATUS_INTERNAL_ABORT = 146, + TX_MODE_MSK = 3840, + TX_MODE_NO_BURST = 0, + TX_MODE_IN_BURST_SEQ = 256, + TX_MODE_FIRST_IN_BURST = 512, + TX_QUEUE_NUM_MSK = 126976, + TX_NARROW_BW_MSK = 393216, + TX_NARROW_BW_1DIV2 = 131072, + TX_NARROW_BW_1DIV4 = 262144, + TX_NARROW_BW_1DIV8 = 393216, +}; + +struct iwl_tx_resp_v3 { + u8 frame_count; + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 initial_rate; + __le16 wireless_media_time; + u8 pa_status; + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_c[3]; + __le16 measurement_req_id; + u8 reduced_tpc; + u8 reserved; + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; + __le16 frame_ctrl; + struct agg_tx_status status[0]; +}; + +struct iwl_tx_resp { + u8 frame_count; + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 initial_rate; + __le16 wireless_media_time; + u8 pa_status; + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_c[3]; + __le16 measurement_req_id; + u8 reduced_tpc; + u8 reserved; + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; + __le16 frame_ctrl; + __le16 tx_queue; + __le16 reserved2; + struct agg_tx_status status; +}; + +struct iwl_mvm_ba_notif { + u8 sta_addr[6]; + __le16 reserved; + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; + u8 txed; + u8 txed_2_done; + u8 reduced_txp; + u8 reserved1; +}; + +struct iwl_mvm_compressed_ba_tfd { + __le16 q_num; + __le16 tfd_index; + u8 scd_queue; + u8 tid; + u8 reserved[2]; +}; + +struct iwl_mvm_compressed_ba_ratid { + u8 q_num; + u8 tid; + __le16 ssn; +}; + +struct iwl_mvm_compressed_ba_notif { + __le32 flags; + u8 sta_id; + u8 reduced_txp; + u8 tlc_rate_info; + u8 retry_cnt; + __le32 query_byte_cnt; + __le16 query_frame_cnt; + __le16 txed; + __le16 done; + u8 rts_retry_cnt; + u8 reserved; + __le32 wireless_time; + __le32 tx_rate; + __le16 tfd_cnt; + __le16 ra_tid_cnt; + union { + struct { + struct {} __empty_ra_tid; + struct iwl_mvm_compressed_ba_ratid ra_tid[0]; + }; + struct { + struct {} __empty_tfd; + struct iwl_mvm_compressed_ba_tfd tfd[0]; + }; + }; +}; + +enum iwl_dump_control { + DUMP_TX_FIFO_FLUSH = 2, +}; + +struct iwl_tx_path_flush_cmd_v1 { + __le32 queues_ctl; + __le16 flush_ctl; + __le16 reserved; +}; + +struct iwl_tx_path_flush_cmd { + __le32 sta_id; + __le16 tid_mask; + __le16 reserved; +}; + +struct iwl_flush_queue_info { + __le16 tid; + __le16 queue_num; + __le16 read_before_flush; + __le16 read_after_flush; +}; + +struct iwl_tx_path_flush_cmd_rsp { + __le16 sta_id; + __le16 num_flushed_queues; + struct iwl_flush_queue_info queues[16]; +}; + +enum rtw_rx_desc_enc { + RX_DESC_ENC_NONE = 0, + RX_DESC_ENC_WEP40 = 1, + RX_DESC_ENC_TKIP_WO_MIC = 2, + RX_DESC_ENC_TKIP_MIC = 3, + RX_DESC_ENC_AES = 4, + RX_DESC_ENC_WEP104 = 5, +}; + +struct rtw_rx_desc { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; +}; + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; + void *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; + void *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void *context; +}; + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[256]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +struct usbdevfs_conninfo_ex { + __u32 size; + __u32 busnum; + __u32 devnum; + __u32 speed; + __u8 num_ports; + __u8 ports[7]; +}; + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void *buffer; + int buffer_length; + int actual_length; + int start_frame; + union { + int number_of_packets; + unsigned int stream_id; + }; + int error_count; + unsigned int signr; + void *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbdevfs_ioctl { + int ifno; + int ioctl_code; + void *data; +}; + +struct usbdevfs_disconnect_claim { + unsigned int interface; + unsigned int flags; + char driver[256]; +}; + +struct usbdevfs_streams { + unsigned int num_streams; + unsigned int num_eps; + unsigned char eps[0]; +}; + +struct usb_dev_state { + struct list_head list; + struct usb_device *dev; + struct file *file; + spinlock_t lock; + struct list_head async_pending; + struct list_head async_completed; + struct list_head memory_list; + wait_queue_head_t wait; + wait_queue_head_t wait_for_resume; + unsigned int discsignr; + struct pid *disc_pid; + const struct cred *cred; + sigval_t disccontext; + long unsigned int ifclaimed; + u32 disabled_bulk_eps; + long unsigned int interface_allowed_mask; + int not_yet_resumed; + bool suspend_allowed; + bool privileges_dropped; +}; + +struct usb_memory { + struct list_head memlist; + int vma_use_count; + int urb_use_count; + u32 size; + void *mem; + dma_addr_t dma_handle; + long unsigned int vm_start; + struct usb_dev_state *ps; +}; + +struct async { + struct list_head asynclist; + struct usb_dev_state *ps; + struct pid *pid; + const struct cred *cred; + unsigned int signr; + unsigned int ifnum; + void *userbuffer; + void *userurb; + sigval_t userurb_sigval; + struct urb *urb; + struct usb_memory *usbm; + unsigned int mem_usage; + int status; + u8 bulk_addr; + u8 bulk_status; +}; + +enum snoop_when { + SUBMIT = 0, + COMPLETE___2 = 1, +}; + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING = 1, + POWER_SUPPLY_STATUS_DISCHARGING = 2, + POWER_SUPPLY_STATUS_NOT_CHARGING = 3, + POWER_SUPPLY_STATUS_FULL = 4, +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE = 1, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, + POWER_SUPPLY_CHARGE_TYPE_FAST = 3, + POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, + POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, + POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, + POWER_SUPPLY_CHARGE_TYPE_BYPASS = 8, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD = 1, + POWER_SUPPLY_HEALTH_OVERHEAT = 2, + POWER_SUPPLY_HEALTH_DEAD = 3, + POWER_SUPPLY_HEALTH_OVERVOLTAGE = 4, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE = 5, + POWER_SUPPLY_HEALTH_COLD = 6, + POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE = 7, + POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE = 8, + POWER_SUPPLY_HEALTH_OVERCURRENT = 9, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED = 10, + POWER_SUPPLY_HEALTH_WARM = 11, + POWER_SUPPLY_HEALTH_COOL = 12, + POWER_SUPPLY_HEALTH_HOT = 13, + POWER_SUPPLY_HEALTH_NO_BATTERY = 14, +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH = 1, + POWER_SUPPLY_TECHNOLOGY_LION = 2, + POWER_SUPPLY_TECHNOLOGY_LIPO = 3, + POWER_SUPPLY_TECHNOLOGY_LiFe = 4, + POWER_SUPPLY_TECHNOLOGY_NiCd = 5, + POWER_SUPPLY_TECHNOLOGY_LiMn = 6, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, + POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, + POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM = 1, + POWER_SUPPLY_SCOPE_DEVICE = 2, +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP = 1, + POWER_SUPPLY_USB_TYPE_DCP = 2, + POWER_SUPPLY_USB_TYPE_CDP = 3, + POWER_SUPPLY_USB_TYPE_ACA = 4, + POWER_SUPPLY_USB_TYPE_C = 5, + POWER_SUPPLY_USB_TYPE_PD = 6, + POWER_SUPPLY_USB_TYPE_PD_DRP = 7, + POWER_SUPPLY_USB_TYPE_PD_PPS = 8, + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, +}; + +enum power_supply_charge_behaviour { + POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0, + POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE = 1, + POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE = 2, +}; + +struct power_supply_attr { + const char *prop_name; + char attr_name[31]; + struct device_attribute dev_attr; + const char * const *text_values; + int text_values_len; +}; + +struct dm_stats_last_position { + sector_t last_sector; + unsigned int last_rw; + long: 32; +}; + +struct dm_stat_percpu { + long long unsigned int sectors[2]; + long long unsigned int ios[2]; + long long unsigned int merges[2]; + long long unsigned int ticks[2]; + long long unsigned int io_ticks[2]; + long long unsigned int io_ticks_total; + long long unsigned int time_in_queue; + long long unsigned int *histogram; + long: 32; +}; + +struct dm_stat_shared { + atomic_t in_flight[2]; + long long unsigned int stamp; + struct dm_stat_percpu tmp; +}; + +struct dm_stat { + struct list_head list_entry; + int id; + unsigned int stat_flags; + size_t n_entries; + long: 32; + sector_t start; + sector_t end; + sector_t step; + unsigned int n_histogram_entries; + long long unsigned int *histogram_boundaries; + const char *program_id; + const char *aux_data; + struct callback_head callback_head; + size_t shared_alloc_size; + size_t percpu_alloc_size; + size_t histogram_alloc_size; + struct dm_stat_percpu *stat_percpu[32]; + long: 32; + struct dm_stat_shared stat_shared[0]; +}; + +struct net_device_devres { + struct net_device *ndev; +}; + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + short unsigned int nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED = 0, + NLMSGERR_ATTR_MSG = 1, + NLMSGERR_ATTR_OFFS = 2, + NLMSGERR_ATTR_COOKIE = 3, + NLMSGERR_ATTR_POLICY = 4, + NLMSGERR_ATTR_MISS_TYPE = 5, + NLMSGERR_ATTR_MISS_NEST = 6, + __NLMSGERR_ATTR_MAX = 7, + NLMSGERR_ATTR_MAX = 6, +}; + +struct nl_pktinfo { + __u32 group; +}; + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED = 1, +}; + +enum netlink_skb_flags { + NETLINK_SKB_DST = 8, +}; + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +struct trace_event_raw_netlink_extack { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_netlink_extack { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_netlink_extack)(void *, const char *); + +enum { + NETLINK_F_KERNEL_SOCKET = 0, + NETLINK_F_RECV_PKTINFO = 1, + NETLINK_F_BROADCAST_SEND_ERROR = 2, + NETLINK_F_RECV_NO_ENOBUFS = 3, + NETLINK_F_LISTEN_ALL_NSID = 4, + NETLINK_F_CAP_ACK = 5, + NETLINK_F_EXT_ACK = 6, + NETLINK_F_STRICT_CHK = 7, +}; + +struct netlink_sock { + struct sock sk; + long unsigned int flags; + u32 portid; + u32 dst_portid; + u32 dst_group; + u32 subscriptions; + u32 ngroups; + long unsigned int *groups; + long unsigned int state; + size_t max_recvmsg_len; + wait_queue_head_t wait; + bool bound; + bool cb_running; + int dump_done_errno; + struct netlink_callback cb; + struct mutex nl_cb_mutex; + void (*netlink_rcv)(struct sk_buff *); + int (*netlink_bind)(struct net *, int); + void (*netlink_unbind)(struct net *, int); + void (*netlink_release)(struct sock *, long unsigned int *); + struct module *module; + struct rhash_head node; + struct callback_head rcu; +}; + +struct listeners; + +struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; + struct listeners *listeners; + unsigned int flags; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); + int registered; +}; + +struct listeners { + struct callback_head rcu; + long unsigned int masks[0]; +}; + +struct netlink_tap_net { + struct list_head netlink_tap_all; + struct mutex netlink_tap_lock; +}; + +struct netlink_compare_arg { + possible_net_t pnet; + u32 portid; +}; + +struct netlink_broadcast_data { + struct sock *exclude_sk; + struct net *net; + u32 portid; + u32 group; + int failure; + int delivery_failure; + int congested; + int delivered; + gfp_t allocation; + struct sk_buff *skb; + struct sk_buff *skb2; + int (*tx_filter)(struct sock *, struct sk_buff *, void *); + void *tx_data; +}; + +struct netlink_set_err_data { + struct sock *exclude_sk; + u32 portid; + u32 group; + int code; +}; + +struct nl_seq_iter { + struct seq_net_private p; + struct rhashtable_iter hti; + int link; +}; + +struct bpf_iter__netlink { + union { + struct bpf_iter_meta *meta; + }; + union { + struct netlink_sock *sk; + }; +}; + +enum nfnl_acct_msg_types { + NFNL_MSG_ACCT_NEW = 0, + NFNL_MSG_ACCT_GET = 1, + NFNL_MSG_ACCT_GET_CTRZERO = 2, + NFNL_MSG_ACCT_DEL = 3, + NFNL_MSG_ACCT_OVERQUOTA = 4, + NFNL_MSG_ACCT_MAX = 5, +}; + +enum nfnl_acct_flags { + NFACCT_F_QUOTA_PKTS = 1, + NFACCT_F_QUOTA_BYTES = 2, + NFACCT_F_OVERQUOTA = 4, +}; + +enum nfnl_acct_type { + NFACCT_UNSPEC = 0, + NFACCT_NAME = 1, + NFACCT_PKTS = 2, + NFACCT_BYTES = 3, + NFACCT_USE = 4, + NFACCT_FLAGS = 5, + NFACCT_QUOTA = 6, + NFACCT_FILTER = 7, + NFACCT_PAD = 8, + __NFACCT_MAX = 9, +}; + +enum nfnl_attr_filter_type { + NFACCT_FILTER_UNSPEC = 0, + NFACCT_FILTER_MASK = 1, + NFACCT_FILTER_VALUE = 2, + __NFACCT_FILTER_MAX = 3, +}; + +enum { + NFACCT_NO_QUOTA = -1, + NFACCT_UNDERQUOTA = 0, + NFACCT_OVERQUOTA = 1, +}; + +struct nf_acct { + atomic64_t pkts; + atomic64_t bytes; + long unsigned int flags; + struct list_head head; + refcount_t refcnt; + char name[32]; + struct callback_head callback_head; + char data[0]; +}; + +struct nfacct_filter { + u32 value; + u32 mask; +}; + +struct nfnl_acct_net { + struct list_head nfnl_acct_list; +}; + +struct seqcount_rwlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_rwlock seqcount_rwlock_t; + +struct nft_rbtree { + struct rb_root root; + rwlock_t lock; + seqcount_rwlock_t count; + long unsigned int last_gc; +}; + +struct nft_rbtree_elem { + struct nft_elem_priv priv; + struct rb_node node; + struct nft_set_ext ext; +}; + +enum tcp_tw_status { + TCP_TW_SUCCESS = 0, + TCP_TW_RST = 1, + TCP_TW_ACK = 2, + TCP_TW_SYN = 3, +}; + +struct ip_tunnel_parm { + char name[16]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +enum tunnel_encap_types { + TUNNEL_ENCAP_NONE = 0, + TUNNEL_ENCAP_FOU = 1, + TUNNEL_ENCAP_GUE = 2, + TUNNEL_ENCAP_MPLS = 3, +}; + +struct gro_cell; + +struct gro_cells { + struct gro_cell *cells; +}; + +struct ip_tunnel_prl_entry { + struct ip_tunnel_prl_entry *next; + __be32 addr; + u16 flags; + struct callback_head callback_head; +}; + +struct ip_tunnel { + struct ip_tunnel *next; + struct hlist_node hash_node; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net *net; + long unsigned int err_time; + int err_count; + u32 i_seqno; + atomic_t o_seqno; + int tun_hlen; + u32 index; + u8 erspan_ver; + u8 dir; + u16 hwid; + struct dst_cache dst_cache; + struct ip_tunnel_parm_kern parms; + int mlink; + int encap_hlen; + int hlen; + struct ip_tunnel_encap encap; + struct ip_tunnel_prl_entry *prl; + unsigned int prl_count; + unsigned int ip_tnl_net_id; + struct gro_cells gro_cells; + __u32 fwmark; + bool collect_md; + bool ignore_df; +}; + +struct tnl_ptk_info { + long unsigned int flags[1]; + __be16 proto; + __be32 key; + __be32 seq; + int hdr_len; +}; + +struct ip_tunnel_net { + struct net_device *fb_tunnel_dev; + struct rtnl_link_ops *rtnl_link_ops; + struct hlist_head tunnels[128]; + struct ip_tunnel *collect_md_tun; + int type; +}; + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __sum16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; + __u8 qrv: 3; + __u8 suppress: 1; + __u8 resv: 4; + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +enum { + PIM_TYPE_HELLO = 0, + PIM_TYPE_REGISTER = 1, + PIM_TYPE_REGISTER_STOP = 2, + PIM_TYPE_JOIN_PRUNE = 3, + PIM_TYPE_BOOTSTRAP = 4, + PIM_TYPE_ASSERT = 5, + PIM_TYPE_GRAFT = 6, + PIM_TYPE_GRAFT_ACK = 7, + PIM_TYPE_CANDIDATE_RP_ADV = 8, +}; + +struct pimhdr { + __u8 type; + __u8 reserved; + __be16 csum; +}; + +enum switchdev_attr_id { + SWITCHDEV_ATTR_ID_UNDEFINED = 0, + SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1, + SWITCHDEV_ATTR_ID_PORT_MST_STATE = 2, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 3, + SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 4, + SWITCHDEV_ATTR_ID_PORT_MROUTER = 5, + SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 6, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 7, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL = 8, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 9, + SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 10, + SWITCHDEV_ATTR_ID_BRIDGE_MST = 11, + SWITCHDEV_ATTR_ID_MRP_PORT_ROLE = 12, + SWITCHDEV_ATTR_ID_VLAN_MSTI = 13, +}; + +struct switchdev_mst_state { + u16 msti; + u8 state; +}; + +struct switchdev_brport_flags { + long unsigned int val; + long unsigned int mask; +}; + +struct switchdev_vlan_msti { + u16 vid; + u16 msti; +}; + +struct switchdev_attr { + struct net_device *orig_dev; + enum switchdev_attr_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); + union { + u8 stp_state; + struct switchdev_mst_state mst_state; + struct switchdev_brport_flags brport_flags; + bool mrouter; + clock_t ageing_time; + bool vlan_filtering; + u16 vlan_protocol; + bool mst; + bool mc_disabled; + u8 mrp_port_role; + struct switchdev_vlan_msti vlan_msti; + } u; +}; + +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; + __u8 mld2q_qrv: 3; + __u8 mld2q_suppress: 1; + __u8 mld2q_resv2: 4; + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +enum { + MDB_RTR_TYPE_DISABLED = 0, + MDB_RTR_TYPE_TEMP_QUERY = 1, + MDB_RTR_TYPE_PERM = 2, + MDB_RTR_TYPE_TEMP = 3, +}; + +enum { + BRIDGE_QUERIER_UNSPEC = 0, + BRIDGE_QUERIER_IP_ADDRESS = 1, + BRIDGE_QUERIER_IP_PORT = 2, + BRIDGE_QUERIER_IP_OTHER_TIMER = 3, + BRIDGE_QUERIER_PAD = 4, + BRIDGE_QUERIER_IPV6_ADDRESS = 5, + BRIDGE_QUERIER_IPV6_PORT = 6, + BRIDGE_QUERIER_IPV6_OTHER_TIMER = 7, + __BRIDGE_QUERIER_MAX = 8, +}; + +struct br_ip_list { + struct list_head list; + struct br_ip addr; +}; + +struct rate_control_alg { + struct list_head list; + const struct rate_control_ops *ops; +}; + +typedef __u64 Elf64_Addr; + +typedef __u16 Elf64_Half; + +typedef __u64 Elf64_Off; + +typedef __u32 Elf64_Word; + +typedef __u64 Elf64_Xword; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +typedef struct elf64_hdr Elf64_Ehdr; + +typedef struct elf32_phdr Elf32_Phdr; + +struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +}; + +typedef struct elf64_phdr Elf64_Phdr; + +typedef struct elf32_note Elf32_Nhdr; + +struct freader { + void *buf; + u32 buf_sz; + int err; + long: 32; + union { + struct { + struct file *file; + struct folio *folio; + void *addr; + long: 32; + loff_t folio_off; + bool may_fault; + long: 32; + }; + struct { + const char *data; + long: 32; + u64 data_sz; + }; + }; +}; + +union decode_item { + u32 bits; + const union decode_item *table; + int action; +}; + +enum probes_insn { + INSN_REJECTED = 0, + INSN_GOOD = 1, + INSN_GOOD_NO_SLOT = 2, +}; + +struct decode_header; + +typedef enum probes_insn probes_custom_decode_t(probes_opcode_t, struct arch_probes_insn *, const struct decode_header *); + +struct decode_header { + union decode_item type_regs; + union decode_item mask; + union decode_item value; +}; + +union decode_action { + probes_insn_handler_t *handler; + probes_custom_decode_t *decoder; +}; + +typedef enum probes_insn probes_check_t(probes_opcode_t, struct arch_probes_insn *, const struct decode_header *); + +struct decode_checker { + probes_check_t *checker; +}; + +enum { + STACK_USE_NONE = 0, + STACK_USE_UNKNOWN = 1, + STACK_USE_FIXED_X0X = 2, + STACK_USE_FIXED_XXX = 3, + STACK_USE_STMDX = 4, + NUM_STACK_USE_TYPES = 5, +}; + +enum probes_arm_action { + PROBES_PRELOAD_IMM = 0, + PROBES_PRELOAD_REG = 1, + PROBES_BRANCH_IMM = 2, + PROBES_BRANCH_REG = 3, + PROBES_MRS = 4, + PROBES_CLZ = 5, + PROBES_SATURATING_ARITHMETIC = 6, + PROBES_MUL1 = 7, + PROBES_MUL2 = 8, + PROBES_SWP = 9, + PROBES_LDRSTRD = 10, + PROBES_LOAD = 11, + PROBES_STORE = 12, + PROBES_LOAD_EXTRA = 13, + PROBES_STORE_EXTRA = 14, + PROBES_MOV_IP_SP = 15, + PROBES_DATA_PROCESSING_REG = 16, + PROBES_DATA_PROCESSING_IMM = 17, + PROBES_MOV_HALFWORD = 18, + PROBES_SEV = 19, + PROBES_WFE = 20, + PROBES_SATURATE = 21, + PROBES_REV = 22, + PROBES_MMI = 23, + PROBES_PACK = 24, + PROBES_EXTEND = 25, + PROBES_EXTEND_ADD = 26, + PROBES_MUL_ADD_LONG = 27, + PROBES_MUL_ADD = 28, + PROBES_BITFIELD = 29, + PROBES_BRANCH = 30, + PROBES_LDMSTM = 31, + NUM_PROBES_ARM_ACTIONS = 32, +}; + +enum decode_type { + DECODE_TYPE_END = 0, + DECODE_TYPE_TABLE = 1, + DECODE_TYPE_CUSTOM = 2, + DECODE_TYPE_SIMULATE = 3, + DECODE_TYPE_EMULATE = 4, + DECODE_TYPE_OR = 5, + DECODE_TYPE_REJECT = 6, + NUM_DECODE_TYPES = 7, +}; + +struct bpf_binary_header { + u32 size; + long: 32; + u8 image[0]; +}; + +typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); + +enum { + BPF_R2_HI = 0, + BPF_R2_LO = 1, + BPF_R3_HI = 2, + BPF_R3_LO = 3, + BPF_R4_HI = 4, + BPF_R4_LO = 5, + BPF_R5_HI = 6, + BPF_R5_LO = 7, + BPF_R7_HI = 8, + BPF_R7_LO = 9, + BPF_R8_HI = 10, + BPF_R8_LO = 11, + BPF_R9_HI = 12, + BPF_R9_LO = 13, + BPF_FP_HI = 14, + BPF_FP_LO = 15, + BPF_TC_HI = 16, + BPF_TC_LO = 17, + BPF_AX_HI = 18, + BPF_AX_LO = 19, + BPF_JIT_SCRATCH_REGS = 20, +}; + +struct jit_ctx { + const struct bpf_prog *prog; + unsigned int idx; + unsigned int prologue_bytes; + unsigned int epilogue_offset; + unsigned int cpu_architecture; + u32 flags; + u32 *offsets; + u32 *target; + u32 stack_size; +}; + +typedef struct { + rwlock_t *lock; +} class_write_lock_irq_t; + +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + +struct ptrace_syscall_info { + __u8 op; + __u8 pad[3]; + __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + long: 32; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + long: 32; + } seccomp; + }; +}; + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + +enum { + KTW_FREEZABLE = 1, +}; + +struct kthread_create_info { + char *full_name; + int (*threadfn)(void *); + void *data; + int node; + struct task_struct *result; + struct completion *done; + struct list_head list; +}; + +struct kthread { + long unsigned int flags; + unsigned int cpu; + int result; + int (*threadfn)(void *); + void *data; + struct completion parked; + struct completion exited; + struct cgroup_subsys_state *blkcg_css; + char *full_name; +}; + +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP = 1, + KTHREAD_SHOULD_PARK = 2, +}; + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +enum { + __SD_BALANCE_NEWIDLE = 0, + __SD_BALANCE_EXEC = 1, + __SD_BALANCE_FORK = 2, + __SD_BALANCE_WAKE = 3, + __SD_WAKE_AFFINE = 4, + __SD_ASYM_CPUCAPACITY = 5, + __SD_ASYM_CPUCAPACITY_FULL = 6, + __SD_SHARE_CPUCAPACITY = 7, + __SD_CLUSTER = 8, + __SD_SHARE_LLC = 9, + __SD_SERIALIZE = 10, + __SD_ASYM_PACKING = 11, + __SD_PREFER_SIBLING = 12, + __SD_OVERLAP = 13, + __SD_NUMA = 14, + __SD_FLAG_CNT = 15, +}; + +struct sd_flag_debug { + unsigned int meta_flags; + char *name; +}; + +struct sched_domain_attr { + int relax_domain_level; +}; + +typedef const struct cpumask * (*sched_domain_mask_f)(int); + +typedef int (*sched_domain_flags_f)(); + +struct sd_data { + struct sched_domain **sd; + struct sched_domain_shared **sds; + struct sched_group **sg; + struct sched_group_capacity **sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; + char *name; +}; + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = 1, + MEMBARRIER_FLAG_RSEQ = 2, +}; + +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *, char *); + ssize_t (*store)(struct gov_attr_set *, const char *, size_t); +}; + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = 1, + MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, + MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, + MEMBARRIER_CMD_GET_REGISTRATIONS = 512, + MEMBARRIER_CMD_SHARED = 1, +}; + +enum membarrier_cmd_flag { + MEMBARRIER_CMD_FLAG_CPU = 1, +}; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE = 0, + SCHED_TUNABLESCALING_LOG = 1, + SCHED_TUNABLESCALING_LINEAR = 2, + SCHED_TUNABLESCALING_END = 3, +}; + +struct asym_cap_data { + struct list_head link; + struct callback_head rcu; + long unsigned int capacity; + long unsigned int cpus[0]; +}; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irqsave_t; + +enum cpuacct_stat_index { + CPUACCT_STAT_USER = 0, + CPUACCT_STAT_SYSTEM = 1, + CPUACCT_STAT_NSTATS = 2, +}; + +struct cpuacct { + struct cgroup_subsys_state css; + u64 *cpuusage; + struct kernel_cpustat *cpustat; +}; + +struct sugov_tunables { + struct gov_attr_set attr_set; + unsigned int rate_limit_us; +}; + +struct sugov_policy { + struct cpufreq_policy *policy; + struct sugov_tunables *tunables; + struct list_head tunables_hook; + raw_spinlock_t update_lock; + long: 32; + u64 last_freq_update_time; + s64 freq_update_delay_ns; + unsigned int next_freq; + unsigned int cached_raw_freq; + struct irq_work irq_work; + struct kthread_work work; + struct mutex work_lock; + struct kthread_worker worker; + struct task_struct *thread; + bool work_in_progress; + bool limits_changed; + bool need_freq_update; +}; + +struct sugov_cpu { + struct update_util_data update_util; + struct sugov_policy *sg_policy; + unsigned int cpu; + bool iowait_boost_pending; + unsigned int iowait_boost; + long: 32; + u64 last_update; + long unsigned int util; + long unsigned int bw_min; + long unsigned int saved_idle_calls; + long: 32; +}; + +enum dl_param { + DL_RUNTIME = 0, + DL_PERIOD = 1, +}; + +struct s_data { + struct sched_domain **sd; + struct root_domain *rd; +}; + +enum s_alloc { + sa_rootdomain = 0, + sa_sd = 1, + sa_sd_storage = 2, + sa_none = 3, +}; + +struct sched_core_cookie { + refcount_t refcnt; +}; + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = 2, + HK_FLAG_MISC = 4, + HK_FLAG_SCHED = 8, + HK_FLAG_TICK = 16, + HK_FLAG_DOMAIN = 32, + HK_FLAG_WQ = 64, + HK_FLAG_MANAGED_IRQ = 128, + HK_FLAG_KTHREAD = 256, +}; + +struct housekeeping { + struct cpumask cpumasks[9]; + long unsigned int flags; +}; + +struct rt_wake_q_head { + struct wake_q_head head; + struct task_struct *rtlock_task; +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct range ranges[0]; +}; + +struct synth_field; + +struct synth_event { + struct dyn_event devent; + int ref; + char *name; + struct synth_field **fields; + unsigned int n_fields; + struct synth_field **dynamic_fields; + unsigned int n_dynamic_fields; + unsigned int n_u64; + struct trace_event_class class; + struct trace_event_call call; + struct tracepoint *tp; + struct module *mod; +}; + +struct event_trigger_ops; + +struct event_command; + +struct event_trigger_data { + long unsigned int count; + int ref; + int flags; + struct event_trigger_ops *ops; + struct event_command *cmd_ops; + struct event_filter *filter; + char *filter_str; + void *private_data; + bool paused; + bool paused_tmp; + struct list_head list; + char *name; + struct list_head named_list; + struct event_trigger_data *named_data; +}; + +struct event_trigger_ops { + void (*trigger)(struct event_trigger_data *, struct trace_buffer *, void *, struct ring_buffer_event *); + int (*init)(struct event_trigger_data *); + void (*free)(struct event_trigger_data *); + int (*print)(struct seq_file *, struct event_trigger_data *); +}; + +struct event_command { + struct list_head list; + char *name; + enum event_trigger_type trigger_type; + int flags; + int (*parse)(struct event_command *, struct trace_event_file *, char *, char *, char *); + int (*reg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg_all)(struct trace_event_file *); + int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); + struct event_trigger_ops * (*get_trigger_ops)(char *, char *); +}; + +struct enable_trigger_data { + struct trace_event_file *file; + bool enable; + bool hist; +}; + +enum event_command_flags { + EVENT_CMD_FL_POST_TRIGGER = 1, + EVENT_CMD_FL_NEEDS_REC = 2, +}; + +struct synth_field { + char *type; + char *name; + size_t size; + unsigned int offset; + unsigned int field_pos; + bool is_signed; + bool is_string; + bool is_dynamic; + bool is_stack; +}; + +enum { + HIST_ERR_NONE = 0, + HIST_ERR_DUPLICATE_VAR = 1, + HIST_ERR_VAR_NOT_UNIQUE = 2, + HIST_ERR_TOO_MANY_VARS = 3, + HIST_ERR_MALFORMED_ASSIGNMENT = 4, + HIST_ERR_NAMED_MISMATCH = 5, + HIST_ERR_TRIGGER_EEXIST = 6, + HIST_ERR_TRIGGER_ENOENT_CLEAR = 7, + HIST_ERR_SET_CLOCK_FAIL = 8, + HIST_ERR_BAD_FIELD_MODIFIER = 9, + HIST_ERR_TOO_MANY_SUBEXPR = 10, + HIST_ERR_TIMESTAMP_MISMATCH = 11, + HIST_ERR_TOO_MANY_FIELD_VARS = 12, + HIST_ERR_EVENT_FILE_NOT_FOUND = 13, + HIST_ERR_HIST_NOT_FOUND = 14, + HIST_ERR_HIST_CREATE_FAIL = 15, + HIST_ERR_SYNTH_VAR_NOT_FOUND = 16, + HIST_ERR_SYNTH_EVENT_NOT_FOUND = 17, + HIST_ERR_SYNTH_TYPE_MISMATCH = 18, + HIST_ERR_SYNTH_COUNT_MISMATCH = 19, + HIST_ERR_FIELD_VAR_PARSE_FAIL = 20, + HIST_ERR_VAR_CREATE_FIND_FAIL = 21, + HIST_ERR_ONX_NOT_VAR = 22, + HIST_ERR_ONX_VAR_NOT_FOUND = 23, + HIST_ERR_ONX_VAR_CREATE_FAIL = 24, + HIST_ERR_FIELD_VAR_CREATE_FAIL = 25, + HIST_ERR_TOO_MANY_PARAMS = 26, + HIST_ERR_PARAM_NOT_FOUND = 27, + HIST_ERR_INVALID_PARAM = 28, + HIST_ERR_ACTION_NOT_FOUND = 29, + HIST_ERR_NO_SAVE_PARAMS = 30, + HIST_ERR_TOO_MANY_SAVE_ACTIONS = 31, + HIST_ERR_ACTION_MISMATCH = 32, + HIST_ERR_NO_CLOSING_PAREN = 33, + HIST_ERR_SUBSYS_NOT_FOUND = 34, + HIST_ERR_INVALID_SUBSYS_EVENT = 35, + HIST_ERR_INVALID_REF_KEY = 36, + HIST_ERR_VAR_NOT_FOUND = 37, + HIST_ERR_FIELD_NOT_FOUND = 38, + HIST_ERR_EMPTY_ASSIGNMENT = 39, + HIST_ERR_INVALID_SORT_MODIFIER = 40, + HIST_ERR_EMPTY_SORT_FIELD = 41, + HIST_ERR_TOO_MANY_SORT_FIELDS = 42, + HIST_ERR_INVALID_SORT_FIELD = 43, + HIST_ERR_INVALID_STR_OPERAND = 44, + HIST_ERR_EXPECT_NUMBER = 45, + HIST_ERR_UNARY_MINUS_SUBEXPR = 46, + HIST_ERR_DIVISION_BY_ZERO = 47, + HIST_ERR_NEED_NOHC_VAL = 48, +}; + +enum hist_field_fn { + HIST_FIELD_FN_NOP = 0, + HIST_FIELD_FN_VAR_REF = 1, + HIST_FIELD_FN_COUNTER = 2, + HIST_FIELD_FN_CONST = 3, + HIST_FIELD_FN_LOG2 = 4, + HIST_FIELD_FN_BUCKET = 5, + HIST_FIELD_FN_TIMESTAMP = 6, + HIST_FIELD_FN_CPU = 7, + HIST_FIELD_FN_STRING = 8, + HIST_FIELD_FN_DYNSTRING = 9, + HIST_FIELD_FN_RELDYNSTRING = 10, + HIST_FIELD_FN_PSTRING = 11, + HIST_FIELD_FN_S64 = 12, + HIST_FIELD_FN_U64 = 13, + HIST_FIELD_FN_S32 = 14, + HIST_FIELD_FN_U32 = 15, + HIST_FIELD_FN_S16 = 16, + HIST_FIELD_FN_U16 = 17, + HIST_FIELD_FN_S8 = 18, + HIST_FIELD_FN_U8 = 19, + HIST_FIELD_FN_UMINUS = 20, + HIST_FIELD_FN_MINUS = 21, + HIST_FIELD_FN_PLUS = 22, + HIST_FIELD_FN_DIV = 23, + HIST_FIELD_FN_MULT = 24, + HIST_FIELD_FN_DIV_POWER2 = 25, + HIST_FIELD_FN_DIV_NOT_POWER2 = 26, + HIST_FIELD_FN_DIV_MULT_SHIFT = 27, + HIST_FIELD_FN_EXECNAME = 28, + HIST_FIELD_FN_STACK = 29, +}; + +struct hist_trigger_data; + +struct hist_var { + char *name; + struct hist_trigger_data *hist_data; + unsigned int idx; +}; + +enum field_op_id { + FIELD_OP_NONE = 0, + FIELD_OP_PLUS = 1, + FIELD_OP_MINUS = 2, + FIELD_OP_UNARY_MINUS = 3, + FIELD_OP_DIV = 4, + FIELD_OP_MULT = 5, +}; + +struct hist_field { + struct ftrace_event_field *field; + long unsigned int flags; + long unsigned int buckets; + const char *type; + struct hist_field *operands[2]; + struct hist_trigger_data *hist_data; + enum hist_field_fn fn_num; + unsigned int ref; + unsigned int size; + unsigned int offset; + unsigned int is_signed; + struct hist_var var; + enum field_op_id operator; + char *system; + char *event_name; + char *name; + unsigned int var_ref_idx; + bool read_once; + unsigned int var_str_idx; + u64 constant; + u64 div_multiplier; +}; + +struct hist_trigger_attrs; + +struct action_data; + +struct field_var; + +struct field_var_hist; + +struct hist_trigger_data { + struct hist_field *fields[22]; + unsigned int n_vals; + unsigned int n_keys; + unsigned int n_fields; + unsigned int n_vars; + unsigned int n_var_str; + unsigned int key_size; + struct tracing_map_sort_key sort_keys[2]; + unsigned int n_sort_keys; + struct trace_event_file *event_file; + struct hist_trigger_attrs *attrs; + struct tracing_map *map; + bool enable_timestamps; + bool remove; + struct hist_field *var_refs[16]; + unsigned int n_var_refs; + struct action_data *actions[8]; + unsigned int n_actions; + struct field_var *field_vars[64]; + unsigned int n_field_vars; + unsigned int n_field_var_str; + struct field_var_hist *field_var_hists[64]; + unsigned int n_field_var_hists; + struct field_var *save_vars[64]; + unsigned int n_save_vars; + unsigned int n_save_var_str; +}; + +enum hist_field_flags { + HIST_FIELD_FL_HITCOUNT = 1, + HIST_FIELD_FL_KEY = 2, + HIST_FIELD_FL_STRING = 4, + HIST_FIELD_FL_HEX = 8, + HIST_FIELD_FL_SYM = 16, + HIST_FIELD_FL_SYM_OFFSET = 32, + HIST_FIELD_FL_EXECNAME = 64, + HIST_FIELD_FL_SYSCALL = 128, + HIST_FIELD_FL_STACKTRACE = 256, + HIST_FIELD_FL_LOG2 = 512, + HIST_FIELD_FL_TIMESTAMP = 1024, + HIST_FIELD_FL_TIMESTAMP_USECS = 2048, + HIST_FIELD_FL_VAR = 4096, + HIST_FIELD_FL_EXPR = 8192, + HIST_FIELD_FL_VAR_REF = 16384, + HIST_FIELD_FL_CPU = 32768, + HIST_FIELD_FL_ALIAS = 65536, + HIST_FIELD_FL_BUCKET = 131072, + HIST_FIELD_FL_CONST = 262144, + HIST_FIELD_FL_PERCENT = 524288, + HIST_FIELD_FL_GRAPH = 1048576, +}; + +struct var_defs { + unsigned int n_vars; + char *name[16]; + char *expr[16]; +}; + +struct hist_trigger_attrs { + char *keys_str; + char *vals_str; + char *sort_key_str; + char *name; + char *clock; + bool pause; + bool cont; + bool clear; + bool ts_in_usecs; + bool no_hitcount; + unsigned int map_bits; + char *assignment_str[16]; + unsigned int n_assignments; + char *action_str[8]; + unsigned int n_actions; + struct var_defs var_defs; +}; + +struct field_var { + struct hist_field *var; + struct hist_field *val; +}; + +struct field_var_hist { + struct hist_trigger_data *hist_data; + char *cmd; +}; + +enum handler_id { + HANDLER_ONMATCH = 1, + HANDLER_ONMAX = 2, + HANDLER_ONCHANGE = 3, +}; + +enum action_id { + ACTION_SAVE = 1, + ACTION_TRACE = 2, + ACTION_SNAPSHOT = 3, +}; + +typedef void (*action_fn_t)(struct hist_trigger_data *, struct tracing_map_elt *, struct trace_buffer *, void *, struct ring_buffer_event *, void *, struct action_data *, u64 *); + +typedef bool (*check_track_val_fn_t)(u64, u64); + +struct action_data { + enum handler_id handler; + enum action_id action; + char *action_name; + action_fn_t fn; + unsigned int n_params; + char *params[64]; + unsigned int var_ref_idx[64]; + struct synth_event *synth_event; + bool use_trace_keyword; + char *synth_event_name; + union { + struct { + char *event; + char *event_system; + } match_data; + struct { + char *var_str; + struct hist_field *var_ref; + struct hist_field *track_var; + check_track_val_fn_t check_val; + action_fn_t save_data; + } track_data; + }; +}; + +struct track_data { + u64 track_val; + bool updated; + unsigned int key_len; + void *key; + struct tracing_map_elt elt; + struct action_data *action_data; + struct hist_trigger_data *hist_data; + long: 32; +}; + +struct hist_elt_data { + char *comm; + u64 *var_ref_vals; + char **field_var_str; + int n_field_var_str; +}; + +typedef void (*synth_probe_func_t)(void *, u64 *, unsigned int *); + +struct hist_var_data { + struct list_head list; + struct hist_trigger_data *hist_data; +}; + +struct hist_val_stat { + u64 max; + u64 total; +}; + +struct mmap_unlock_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; + enum bpf_iter_task_type type; + u32 pid; + u32 pid_visiting; +}; + +struct bpf_iter_seq_task_info { + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +struct bpf_iter__task { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; +}; + +struct bpf_iter_seq_task_file_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + u32 tid; + u32 fd; +}; + +struct bpf_iter__task_file { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + u32 fd; + long: 32; + union { + struct file *file; + }; +}; + +struct bpf_iter_seq_task_vma_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + u32 tid; + long unsigned int prev_vm_start; + long unsigned int prev_vm_end; +}; + +enum bpf_task_vma_iter_find_op { + task_vma_iter_first_vma = 0, + task_vma_iter_next_vma = 1, + task_vma_iter_find_vma = 2, +}; + +struct bpf_iter__task_vma { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + union { + struct vm_area_struct *vma; + }; +}; + +typedef u64 (*btf_bpf_find_vma)(struct task_struct *, u64, bpf_callback_t, void *, u64); + +struct bpf_iter_task_vma_kern_data { + struct task_struct *task; + struct mm_struct *mm; + struct mmap_unlock_irq_work *work; + struct vma_iterator vmi; +}; + +struct bpf_iter_task_vma { + __u64 __opaque[1]; +}; + +struct bpf_iter_task_vma_kern { + struct bpf_iter_task_vma_kern_data *data; + long: 32; +}; + +struct bpf_iter_css_task { + __u64 __opaque[1]; +}; + +struct bpf_iter_css_task_kern { + struct css_task_iter *css_it; + long: 32; +}; + +struct bpf_iter_task { + __u64 __opaque[3]; +}; + +struct bpf_iter_task_kern { + struct task_struct *task; + struct task_struct *pos; + unsigned int flags; + long: 32; +}; + +enum { + BPF_TASK_ITER_ALL_PROCS = 0, + BPF_TASK_ITER_ALL_THREADS = 1, + BPF_TASK_ITER_PROC_THREADS = 2, +}; + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head; + u32 tail; + u32 size; + char elements[0]; +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = 18446744073709551584ULL, + PERF_CONTEXT_KERNEL = 18446744073709551488ULL, + PERF_CONTEXT_USER = 18446744073709551104ULL, + PERF_CONTEXT_GUEST = 18446744073709549568ULL, + PERF_CONTEXT_GUEST_KERNEL = 18446744073709549440ULL, + PERF_CONTEXT_GUEST_USER = 18446744073709549056ULL, + PERF_CONTEXT_MAX = 18446744073709547521ULL, +}; + +struct callchain_cpus_entries { + struct callback_head callback_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +struct mlock_fbatch { + local_lock_t lock; + struct folio_batch fbatch; +}; + +typedef struct { + long unsigned int fds_bits[32]; +} __kernel_fd_set; + +typedef __kernel_fd_set fd_set; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +struct poll_table_page; + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[18]; +}; + +struct poll_table_page { + struct poll_table_page *next; + struct poll_table_entry *entry; + struct poll_table_entry entries[0]; +}; + +enum poll_time_type { + PT_TIMEVAL = 0, + PT_OLD_TIMEVAL = 1, + PT_TIMESPEC = 2, + PT_OLD_TIMESPEC = 3, +}; + +typedef struct { + long unsigned int *in; + long unsigned int *out; + long unsigned int *ex; + long unsigned int *res_in; + long unsigned int *res_out; + long unsigned int *res_ex; +} fd_set_bits; + +struct sigset_argpack { + sigset_t *p; + size_t size; +}; + +struct sel_arg_struct { + long unsigned int n; + fd_set *inp; + fd_set *outp; + fd_set *exp; + struct __kernel_old_timeval *tvp; +}; + +struct poll_list { + struct poll_list *next; + unsigned int len; + struct pollfd entries[0]; +}; + +struct posix_acl_xattr_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +struct posix_acl_xattr_header { + __le32 a_version; +}; + +enum SHIFT_DIRECTION { + SHIFT_LEFT = 0, + SHIFT_RIGHT = 1, +}; + +struct ext4_extent_tail { + __le32 et_checksum; +}; + +struct autofs_packet_hdr { + int proto_version; + int type; +}; + +struct autofs_packet_missing { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +struct autofs_packet_expire { + struct autofs_packet_hdr hdr; + int len; + char name[256]; +}; + +enum autofs_notify { + NFY_NONE = 0, + NFY_MOUNT = 1, + NFY_EXPIRE = 2, +}; + +struct autofs_packet_expire_multi { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +union autofs_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_packet_missing missing; + struct autofs_packet_expire expire; + struct autofs_packet_expire_multi expire_multi; +}; + +struct autofs_v5_packet { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + __u32 dev; + __u64 ino; + __u32 uid; + __u32 gid; + __u32 pid; + __u32 tgid; + __u32 len; + char name[256]; + long: 32; +}; + +typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_missing_direct_t; + +typedef struct autofs_v5_packet autofs_packet_expire_direct_t; + +union autofs_v5_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_v5_packet v5_packet; + autofs_packet_missing_indirect_t missing_indirect; + autofs_packet_expire_indirect_t expire_indirect; + autofs_packet_missing_direct_t missing_direct; + autofs_packet_expire_direct_t expire_direct; +}; + +struct btrfs_file_extent { + u64 disk_bytenr; + u64 disk_num_bytes; + u64 num_bytes; + u64 ram_bytes; + u64 offset; + u8 compression; + long: 32; +}; + +struct btrfs_stripe { + __le64 devid; + __le64 offset; + __u8 dev_uuid[16]; +}; + +struct btrfs_chunk { + __le64 length; + __le64 owner; + __le64 stripe_len; + __le64 type; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le16 num_stripes; + __le16 sub_stripes; + struct btrfs_stripe stripe; +}; + +struct btrfs_dev_extent { + __le64 chunk_tree; + __le64 chunk_objectid; + __le64 chunk_offset; + __le64 length; + __u8 chunk_tree_uuid[16]; +}; + +struct btrfs_block_group_item { + __le64 used; + __le64 chunk_objectid; + __le64 flags; +}; + +enum btrfs_tree_block_status { + BTRFS_TREE_BLOCK_CLEAN = 0, + BTRFS_TREE_BLOCK_INVALID_NRITEMS = 1, + BTRFS_TREE_BLOCK_INVALID_PARENT_KEY = 2, + BTRFS_TREE_BLOCK_BAD_KEY_ORDER = 3, + BTRFS_TREE_BLOCK_INVALID_LEVEL = 4, + BTRFS_TREE_BLOCK_INVALID_FREE_SPACE = 5, + BTRFS_TREE_BLOCK_INVALID_OFFSETS = 6, + BTRFS_TREE_BLOCK_INVALID_BLOCKPTR = 7, + BTRFS_TREE_BLOCK_INVALID_ITEM = 8, + BTRFS_TREE_BLOCK_INVALID_OWNER = 9, + BTRFS_TREE_BLOCK_WRITTEN_NOT_SET = 10, +}; + +enum key_lookup_flag { + KEY_LOOKUP_CREATE = 1, + KEY_LOOKUP_PARTIAL = 2, + KEY_LOOKUP_ALL = 3, +}; + +enum x509_actions { + ACT_x509_extract_key_data = 0, + ACT_x509_extract_name_segment___2 = 1, + ACT_x509_note_OID___2 = 2, + ACT_x509_note_issuer = 3, + ACT_x509_note_not_after = 4, + ACT_x509_note_not_before = 5, + ACT_x509_note_params = 6, + ACT_x509_note_serial = 7, + ACT_x509_note_sig_algo = 8, + ACT_x509_note_signature = 9, + ACT_x509_note_subject = 10, + ACT_x509_note_tbs_certificate = 11, + ACT_x509_process_extension = 12, + NR__x509_actions = 13, +}; + +enum io_uring_socket_op { + SOCKET_URING_OP_SIOCINQ = 0, + SOCKET_URING_OP_SIOCOUTQ = 1, + SOCKET_URING_OP_GETSOCKOPT = 2, + SOCKET_URING_OP_SETSOCKOPT = 3, +}; + +struct uring_cache { + struct io_uring_sqe sqes[2]; +}; + +struct ei_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; + int etype; + void *priv; +}; + +struct vring_desc { + __virtio64 addr; + __virtio32 len; + __virtio16 flags; + __virtio16 next; +}; + +struct vring_avail { + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[0]; +}; + +struct vring_used_elem { + __virtio32 id; + __virtio32 len; +}; + +typedef struct vring_used_elem vring_used_elem_t; + +struct vring_used { + __virtio16 flags; + __virtio16 idx; + vring_used_elem_t ring[0]; +}; + +typedef struct vring_desc vring_desc_t; + +typedef struct vring_avail vring_avail_t; + +typedef struct vring_used vring_used_t; + +struct vring { + unsigned int num; + vring_desc_t *desc; + vring_avail_t *avail; + vring_used_t *used; +}; + +struct vring_packed_desc_event { + __le16 off_wrap; + __le16 flags; +}; + +struct vring_packed_desc { + __le64 addr; + __le32 len; + __le16 id; + __le16 flags; +}; + +struct vring_desc_state_split { + void *data; + struct vring_desc *indir_desc; +}; + +struct vring_desc_state_packed { + void *data; + struct vring_packed_desc *indir_desc; + u16 num; + u16 last; +}; + +struct vring_desc_extra { + dma_addr_t addr; + u32 len; + u16 flags; + u16 next; +}; + +struct vring_virtqueue_split { + struct vring vring; + u16 avail_flags_shadow; + u16 avail_idx_shadow; + struct vring_desc_state_split *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + u32 vring_align; + bool may_reduce_num; +}; + +struct vring_virtqueue_packed { + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + bool avail_wrap_counter; + u16 avail_used_flags; + u16 next_avail_idx; + u16 event_flags_shadow; + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; +}; + +struct vring_virtqueue { + struct virtqueue vq; + bool packed_ring; + bool use_dma_api; + bool weak_barriers; + bool broken; + bool indirect; + bool event; + unsigned int free_head; + unsigned int num_added; + u16 last_used_idx; + bool event_triggered; + union { + struct vring_virtqueue_split split; + struct vring_virtqueue_packed packed; + }; + bool (*notify)(struct virtqueue *); + bool we_own_ring; + struct device *dma_dev; +}; + +enum suspend_stat_step { + SUSPEND_WORKING = 0, + SUSPEND_FREEZE = 1, + SUSPEND_PREPARE = 2, + SUSPEND_SUSPEND = 3, + SUSPEND_SUSPEND_LATE = 4, + SUSPEND_SUSPEND_NOIRQ = 5, + SUSPEND_RESUME_NOIRQ = 6, + SUSPEND_RESUME_EARLY = 7, + SUSPEND_RESUME = 8, +}; + +typedef int (*pm_callback_t)(struct device *); + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +struct reg { + u32 addr; + bool is64; +}; + +struct iwl_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; +}; + +enum { + IWL_PHY_CALIBRATE_DC_CMD = 8, + IWL_PHY_CALIBRATE_LO_CMD = 9, + IWL_PHY_CALIBRATE_TX_IQ_CMD = 11, + IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, + IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, + IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, + IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18, +}; + +struct iwl_calib_cmd { + struct iwl_calib_hdr hdr; + u8 data[0]; +}; + +struct iwl_calib_xtal_freq_cmd { + struct iwl_calib_hdr hdr; + u8 cap_pin1; + u8 cap_pin2; + u8 pad[2]; +}; + +struct iwl_calib_temperature_offset_cmd { + struct iwl_calib_hdr hdr; + __le16 radio_sensor_offset; + __le16 reserved; +}; + +struct iwl_calib_temperature_offset_v2_cmd { + struct iwl_calib_hdr hdr; + __le16 radio_sensor_offset_high; + __le16 radio_sensor_offset_low; + __le16 burntVoltageRef; + __le16 reserved; +}; + +struct iwl_wimax_coex_event_entry { + u8 request_prio; + u8 win_medium_prio; + u8 reserved; + u8 flags; +}; + +struct iwl_wimax_coex_cmd { + u8 flags; + u8 reserved[3]; + struct iwl_wimax_coex_event_entry sta_prio[16]; +}; + +enum bt_coex_prio_table_priorities { + BT_COEX_PRIO_TBL_DISABLED = 0, + BT_COEX_PRIO_TBL_PRIO_LOW = 1, + BT_COEX_PRIO_TBL_PRIO_HIGH = 2, + BT_COEX_PRIO_TBL_PRIO_BYPASS = 3, + BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4, + BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5, + BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6, + BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7, + BT_COEX_PRIO_TBL_MAX = 8, +}; + +struct iwl_bt_coex_prio_table_cmd { + u8 prio_tbl[16]; +}; + +struct iwl_bt_coex_prot_env_cmd { + u8 action; + u8 type; + u8 reserved[2]; +}; + +struct iwl_alive_data { + bool valid; + u8 subtype; +}; + +enum iwl_mac_filter_flags { + MAC_FILTER_IN_PROMISC = 1, + MAC_FILTER_IN_CONTROL_AND_MGMT = 2, + MAC_FILTER_ACCEPT_GRP = 4, + MAC_FILTER_DIS_DECRYPT = 8, + MAC_FILTER_DIS_GRP_DECRYPT = 16, + MAC_FILTER_IN_BEACON = 64, + MAC_FILTER_OUT_BCAST = 256, + MAC_FILTER_IN_CRC32 = 2048, + MAC_FILTER_IN_PROBE_REQUEST = 4096, + MAC_FILTER_IN_11AX = 16384, +}; + +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[32]; +}; + +enum scan_framework_client { + SCAN_CLIENT_SCHED_SCAN = 1, + SCAN_CLIENT_NETDETECT = 2, + SCAN_CLIENT_ASSET_TRACKING = 4, +}; + +struct iwl_scan_offload_blocklist { + u8 ssid[6]; + u8 reported_rssi; + u8 client_bitmap; +}; + +enum iwl_scan_offload_network_type { + IWL_NETWORK_TYPE_BSS = 1, + IWL_NETWORK_TYPE_IBSS = 2, + IWL_NETWORK_TYPE_ANY = 3, +}; + +enum iwl_scan_offload_band_selection { + IWL_SCAN_OFFLOAD_SELECT_2_4 = 4, + IWL_SCAN_OFFLOAD_SELECT_5_2 = 8, + IWL_SCAN_OFFLOAD_SELECT_ANY = 12, +}; + +enum iwl_scan_offload_auth_alg { + IWL_AUTH_ALGO_UNSUPPORTED = 0, + IWL_AUTH_ALGO_NONE = 1, + IWL_AUTH_ALGO_PSK = 2, + IWL_AUTH_ALGO_8021X = 4, + IWL_AUTH_ALGO_SAE = 8, + IWL_AUTH_ALGO_8021X_SHA384 = 16, + IWL_AUTH_ALGO_OWE = 32, +}; + +struct iwl_scan_offload_profile { + u8 ssid_index; + u8 unicast_cipher; + u8 auth_alg; + u8 network_type; + u8 band_selection; + u8 client_bitmap; + u8 reserved[2]; +}; + +struct iwl_scan_offload_profile_cfg_data { + u8 blocklist_len; + u8 num_profiles; + u8 match_notify; + u8 pass_match; + u8 active_clients; + u8 any_beacon_notify; + u8 reserved[2]; +}; + +struct iwl_scan_offload_profile_cfg_v1 { + struct iwl_scan_offload_profile profiles[11]; + struct iwl_scan_offload_profile_cfg_data data; +}; + +struct iwl_scan_offload_profile_cfg { + struct iwl_scan_offload_profile profiles[8]; + struct iwl_scan_offload_profile_cfg_data data; +}; + +struct iwl_scan_schedule_lmac { + __le16 delay; + u8 iterations; + u8 full_scan_mul; +}; + +enum iwl_scan_offload_complete_status { + IWL_SCAN_OFFLOAD_COMPLETED = 1, + IWL_SCAN_OFFLOAD_ABORTED = 2, +}; + +enum iwl_scan_ebs_status { + IWL_SCAN_EBS_SUCCESS = 0, + IWL_SCAN_EBS_FAILED = 1, + IWL_SCAN_EBS_CHAN_NOT_FOUND = 2, + IWL_SCAN_EBS_INACTIVE = 3, +}; + +struct iwl_scan_req_tx_cmd { + __le32 tx_flags; + __le32 rate_n_flags; + u8 sta_id; + u8 reserved[3]; +}; + +enum iwl_scan_channel_flags_lmac { + IWL_UNIFIED_SCAN_CHANNEL_FULL = 134217728, + IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = 268435456, +}; + +struct iwl_scan_channel_cfg_lmac { + __le32 flags; + __le16 channel_num; + __le16 iter_count; + __le32 iter_interval; +}; + +struct iwl_scan_probe_segment { + __le16 offset; + __le16 len; +}; + +struct iwl_scan_probe_req_v1 { + struct iwl_scan_probe_segment mac_header; + struct iwl_scan_probe_segment band_data[2]; + struct iwl_scan_probe_segment common_data; + u8 buf[512]; +}; + +struct iwl_scan_probe_req { + struct iwl_scan_probe_segment mac_header; + struct iwl_scan_probe_segment band_data[3]; + struct iwl_scan_probe_segment common_data; + u8 buf[512]; +}; + +enum iwl_scan_channel_flags { + IWL_SCAN_CHANNEL_FLAG_EBS = 1, + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = 2, + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = 4, + IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = 8, + IWL_SCAN_CHANNEL_FLAG_FORCE_EBS = 16, + IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER = 32, + IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = 64, +}; + +struct iwl_scan_channel_opt { + __le16 flags; + __le16 non_ebs_ratio; +}; + +enum iwl_mvm_lmac_scan_flags { + IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = 1, + IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = 2, + IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = 4, + IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = 8, + IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = 16, + IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = 32, + IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = 64, + IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = 128, + IWL_MVM_LMAC_SCAN_FLAG_MATCH = 512, +}; + +enum iwl_scan_priority_ext { + IWL_SCAN_PRIORITY_EXT_0_LOWEST = 0, + IWL_SCAN_PRIORITY_EXT_1 = 1, + IWL_SCAN_PRIORITY_EXT_2 = 2, + IWL_SCAN_PRIORITY_EXT_3 = 3, + IWL_SCAN_PRIORITY_EXT_4 = 4, + IWL_SCAN_PRIORITY_EXT_5 = 5, + IWL_SCAN_PRIORITY_EXT_6 = 6, + IWL_SCAN_PRIORITY_EXT_7_HIGHEST = 7, +}; + +struct iwl_scan_req_lmac { + __le32 reserved1; + u8 n_channels; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + u8 extended_dwell; + u8 reserved2; + __le16 rx_chain_select; + __le32 scan_flags; + __le32 max_out_time; + __le32 suspend_time; + __le32 flags; + __le32 filter_flags; + struct iwl_scan_req_tx_cmd tx_cmd[2]; + struct iwl_ssid_ie direct_scan[20]; + __le32 scan_prio; + __le32 iter_num; + __le32 delay; + struct iwl_scan_schedule_lmac schedule[2]; + struct iwl_scan_channel_opt channel_opt[2]; + u8 data[0]; +}; + +struct iwl_scan_results_notif { + u8 channel; + u8 band; + u8 probe_status; + u8 num_probe_not_sent; + __le32 duration; +}; + +struct iwl_lmac_scan_complete_notif { + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; + struct iwl_scan_results_notif results[0]; +}; + +struct iwl_periodic_scan_complete { + u8 last_schedule_line; + u8 last_schedule_iteration; + u8 status; + u8 ebs_status; + __le32 time_after_last_iter; + __le32 reserved; +}; + +enum scan_config_flags { + SCAN_CONFIG_FLAG_ACTIVATE = 1, + SCAN_CONFIG_FLAG_DEACTIVATE = 2, + SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = 4, + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = 8, + SCAN_CONFIG_FLAG_SET_TX_CHAINS = 256, + SCAN_CONFIG_FLAG_SET_RX_CHAINS = 512, + SCAN_CONFIG_FLAG_SET_AUX_STA_ID = 1024, + SCAN_CONFIG_FLAG_SET_ALL_TIMES = 2048, + SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = 4096, + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = 8192, + SCAN_CONFIG_FLAG_SET_LEGACY_RATES = 16384, + SCAN_CONFIG_FLAG_SET_MAC_ADDR = 32768, + SCAN_CONFIG_FLAG_SET_FRAGMENTED = 65536, + SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = 131072, + SCAN_CONFIG_FLAG_SET_CAM_MODE = 262144, + SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = 524288, + SCAN_CONFIG_FLAG_SET_PROMISC_MODE = 1048576, + SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = 2097152, + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = 4194304, + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = 8388608, +}; + +enum scan_config_rates { + SCAN_CONFIG_RATE_6M = 1, + SCAN_CONFIG_RATE_9M = 2, + SCAN_CONFIG_RATE_12M = 4, + SCAN_CONFIG_RATE_18M = 8, + SCAN_CONFIG_RATE_24M = 16, + SCAN_CONFIG_RATE_36M = 32, + SCAN_CONFIG_RATE_48M = 64, + SCAN_CONFIG_RATE_54M = 128, + SCAN_CONFIG_RATE_1M = 256, + SCAN_CONFIG_RATE_2M = 512, + SCAN_CONFIG_RATE_5M = 1024, + SCAN_CONFIG_RATE_11M = 2048, +}; + +enum iwl_channel_flags { + IWL_CHANNEL_FLAG_EBS = 1, + IWL_CHANNEL_FLAG_ACCURATE_EBS = 2, + IWL_CHANNEL_FLAG_EBS_ADD = 4, + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = 8, +}; + +enum iwl_uhb_chan_cfg_flags { + IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES = 16777216, + IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN = 33554432, + IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE = 67108864, +}; + +struct iwl_scan_dwell { + u8 active; + u8 passive; + u8 fragmented; + u8 extended; +}; + +struct iwl_scan_config_v1 { + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time; + __le32 suspend_time; + struct iwl_scan_dwell dwell; + u8 mac_addr[6]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[0]; +}; + +struct iwl_scan_config_v2 { + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time[2]; + __le32 suspend_time[2]; + struct iwl_scan_dwell dwell; + u8 mac_addr[6]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[0]; +}; + +struct iwl_scan_config { + u8 enable_cam_mode; + u8 enable_promiscouos_mode; + u8 bcast_sta_id; + u8 reserved; + __le32 tx_chains; + __le32 rx_chains; +}; + +enum iwl_umac_scan_flags { + IWL_UMAC_SCAN_FLAG_PREEMPTIVE = 1, + IWL_UMAC_SCAN_FLAG_START_NOTIF = 2, +}; + +enum iwl_umac_scan_general_flags { + IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = 1, + IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = 2, + IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = 4, + IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = 8, + IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = 16, + IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = 32, + IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = 64, + IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = 128, + IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = 256, + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = 512, + IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = 1024, + IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP = 1024, + IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = 2048, + IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = 8192, + IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME = 16384, + IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE = 32768, +}; + +enum iwl_umac_scan_general_flags2 { + IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = 1, + IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = 2, + IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS = 8, +}; + +enum iwl_umac_scan_general_flags_v2 { + IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = 1, + IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL = 2, + IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE = 4, + IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 = 8, + IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 = 16, + IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH = 32, + IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS = 64, + IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL = 128, + IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE = 256, + IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START = 512, + IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = 1024, + IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = 2048, + IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = 4096, + IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = 8192, + IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = 16384, + IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = 32768, +}; + +enum iwl_umac_scan_general_params_flags2 { + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = 1, + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = 2, + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT = 4, +}; + +struct iwl_scan_channel_cfg_umac { + __le32 flags; + u8 channel_num; + union { + struct { + u8 iter_count; + __le16 iter_interval; + } __attribute__((packed)) v1; + struct { + u8 band; + u8 iter_count; + u8 iter_interval; + } v2; + struct { + u8 psd_20; + u8 iter_count; + u8 iter_interval; + } v5; + }; +}; + +struct iwl_scan_umac_schedule { + __le16 interval; + u8 iter_count; + u8 reserved; +}; + +struct iwl_scan_req_umac_tail_v1 { + struct iwl_scan_umac_schedule schedule[2]; + __le16 delay; + __le16 reserved; + struct iwl_scan_probe_req_v1 preq; + struct iwl_ssid_ie direct_scan[20]; +}; + +struct iwl_scan_req_umac_tail_v2 { + struct iwl_scan_umac_schedule schedule[2]; + __le16 delay; + __le16 reserved; + struct iwl_scan_probe_req preq; + struct iwl_ssid_ie direct_scan[20]; +}; + +struct iwl_scan_umac_chan_param { + u8 flags; + u8 count; + __le16 reserved; +}; + +struct iwl_scan_req_umac { + __le32 flags; + __le32 uid; + __le32 ooc_priority; + __le16 general_flags; + u8 reserved; + u8 scan_start_mac_id; + union { + struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + __le32 max_out_time; + __le32 suspend_time; + __le32 scan_priority; + struct iwl_scan_umac_chan_param channel; + u8 data[0]; + } v1; + struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + __le32 max_out_time[2]; + __le32 suspend_time[2]; + __le32 scan_priority; + struct iwl_scan_umac_chan_param channel; + u8 data[0]; + } v6; + struct { + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + u8 adwell_default_n_aps; + u8 adwell_default_n_aps_social; + u8 reserved3; + __le16 adwell_max_budget; + __le32 max_out_time[2]; + __le32 suspend_time[2]; + __le32 scan_priority; + struct iwl_scan_umac_chan_param channel; + u8 data[0]; + } v7; + struct { + u8 active_dwell[2]; + u8 reserved2; + u8 adwell_default_n_aps; + u8 adwell_default_n_aps_social; + u8 general_flags2; + __le16 adwell_max_budget; + __le32 max_out_time[2]; + __le32 suspend_time[2]; + __le32 scan_priority; + u8 passive_dwell[2]; + u8 num_of_fragments[2]; + struct iwl_scan_umac_chan_param channel; + u8 data[0]; + } v8; + struct { + u8 active_dwell[2]; + u8 adwell_default_hb_n_aps; + u8 adwell_default_lb_n_aps; + u8 adwell_default_n_aps_social; + u8 general_flags2; + __le16 adwell_max_budget; + __le32 max_out_time[2]; + __le32 suspend_time[2]; + __le32 scan_priority; + u8 passive_dwell[2]; + u8 num_of_fragments[2]; + struct iwl_scan_umac_chan_param channel; + u8 data[0]; + } v9; + }; +}; + +struct iwl_scan_probe_params_v3 { + struct iwl_scan_probe_req preq; + u8 ssid_num; + u8 short_ssid_num; + u8 bssid_num; + u8 reserved; + struct iwl_ssid_ie direct_scan[20]; + __le32 short_ssid[8]; + u8 bssid_array[96]; +}; + +struct iwl_scan_probe_params_v4 { + struct iwl_scan_probe_req preq; + u8 short_ssid_num; + u8 bssid_num; + __le16 reserved; + struct iwl_ssid_ie direct_scan[20]; + __le32 short_ssid[8]; + u8 bssid_array[96]; +}; + +struct iwl_scan_channel_params_v4 { + u8 flags; + u8 count; + u8 num_of_aps_override; + u8 reserved; + struct iwl_scan_channel_cfg_umac channel_config[67]; + u8 adwell_ch_override_bitmap[16]; +}; + +struct iwl_scan_channel_params_v7 { + u8 flags; + u8 count; + u8 n_aps_override[2]; + struct iwl_scan_channel_cfg_umac channel_config[67]; +}; + +struct iwl_scan_general_params_v11 { + __le16 flags; + u8 reserved; + u8 scan_start_mac_or_link_id; + u8 active_dwell[2]; + u8 adwell_default_2g; + u8 adwell_default_5g; + u8 adwell_default_social_chn; + u8 flags2; + __le16 adwell_max_budget; + __le32 max_out_of_time[2]; + __le32 suspend_time[2]; + __le32 scan_priority; + u8 passive_dwell[2]; + u8 num_of_fragments[2]; +}; + +struct iwl_scan_periodic_parms_v1 { + struct iwl_scan_umac_schedule schedule[2]; + __le16 delay; + __le16 reserved; +}; + +struct iwl_scan_req_params_v12 { + struct iwl_scan_general_params_v11 general_params; + struct iwl_scan_channel_params_v4 channel_params; + struct iwl_scan_periodic_parms_v1 periodic_params; + struct iwl_scan_probe_params_v3 probe_params; +}; + +struct iwl_scan_req_params_v17 { + struct iwl_scan_general_params_v11 general_params; + struct iwl_scan_channel_params_v7 channel_params; + struct iwl_scan_periodic_parms_v1 periodic_params; + struct iwl_scan_probe_params_v4 probe_params; +}; + +struct iwl_scan_req_umac_v12 { + __le32 uid; + __le32 ooc_priority; + struct iwl_scan_req_params_v12 scan_params; +}; + +struct iwl_scan_req_umac_v17 { + __le32 uid; + __le32 ooc_priority; + struct iwl_scan_req_params_v17 scan_params; +}; + +struct iwl_umac_scan_abort { + __le32 uid; + __le32 flags; +}; + +enum iwl_umac_scan_abort_status { + IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS = 0, + IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS = 1, + IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND = 2, +}; + +struct iwl_umac_scan_complete { + __le32 uid; + u8 last_schedule; + u8 last_iter; + u8 status; + u8 ebs_status; + __le32 time_from_last_iter; + __le32 reserved; +}; + +struct iwl_umac_scan_iter_complete_notif { + __le32 uid; + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le64 start_tsf; + struct iwl_scan_results_notif results[0]; +}; + +struct iwl_umac_scan_channel_survey_notif { + __le32 channel; + __le32 band; + u8 noise[22]; + u8 reserved[2]; + __le32 active_time; + __le32 busy_time; + __le32 tx_time; + __le32 rx_time; +}; + +struct iwl_mvm_scan_timing_params { + u32 suspend_time; + u32 max_out_time; +}; + +struct iwl_mvm_scan_params { + enum iwl_mvm_scan_type type; + enum iwl_mvm_scan_type hb_type; + u32 n_channels; + u16 delay; + int n_ssids; + struct cfg80211_ssid *ssids; + struct ieee80211_channel **channels; + u32 flags; + u8 *mac_addr; + u8 *mac_addr_mask; + bool no_cck; + bool pass_all; + int n_match_sets; + struct iwl_scan_probe_req preq; + struct cfg80211_match_set *match_sets; + int n_scan_plans; + struct cfg80211_sched_scan_plan *scan_plans; + bool iter_notif; + struct cfg80211_scan_6ghz_params *scan_6ghz_params; + u32 n_6ghz_params; + bool scan_6ghz; + bool enable_6ghz_passive; + bool respect_p2p_go; + bool respect_p2p_go_hb; + s8 tsf_report_link_id; + short: 0; + u8 bssid[6]; +}; + +struct iwl_mvm_scan_iter_data { + u32 global_cnt; + struct ieee80211_vif *current_vif; + bool is_dcm_with_p2p_go; +}; + +struct iwl_mvm_scan_channel_segment { + u8 start_idx; + u8 end_idx; + u8 first_channel_id; + u8 last_channel_id; + u8 channel_spacing_shift; + u8 band; +}; + +struct iwl_scan_umac_handler { + u8 version; + int (*handler)(struct iwl_mvm *, struct ieee80211_vif *, struct iwl_mvm_scan_params *, int, int); +}; + +struct iwl_mvm_scan_respect_p2p_go_iter_data { + struct ieee80211_vif *current_vif; + bool p2p_go; + enum nl80211_band band; +}; + +enum rtw_rate_index { + RTW_RATEID_BGN_40M_2SS = 0, + RTW_RATEID_BGN_40M_1SS = 1, + RTW_RATEID_BGN_20M_2SS = 2, + RTW_RATEID_BGN_20M_1SS = 3, + RTW_RATEID_GN_N2SS = 4, + RTW_RATEID_GN_N1SS = 5, + RTW_RATEID_BG = 6, + RTW_RATEID_G = 7, + RTW_RATEID_B_20M = 8, + RTW_RATEID_ARFR0_AC_2SS = 9, + RTW_RATEID_ARFR1_AC_1SS = 10, + RTW_RATEID_ARFR2_AC_2G_1SS = 11, + RTW_RATEID_ARFR3_AC_2G_2SS = 12, + RTW_RATEID_ARFR4_AC_3SS = 13, + RTW_RATEID_ARFR5_N_3SS = 14, + RTW_RATEID_ARFR7_N_4SS = 15, + RTW_RATEID_ARFR6_AC_4SS = 16, +}; + +enum rtw_txq_flags { + RTW_TXQ_AMPDU = 0, + RTW_TXQ_BLOCK_BA = 1, +}; + +struct rtw_txq { + struct list_head list; + long unsigned int flags; +}; + +struct rtw_tx_desc { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; + __le32 w6; + __le32 w7; + __le32 w8; + __le32 w9; +}; + +enum rtw_tx_desc_queue_select { + TX_DESC_QSEL_TID0 = 0, + TX_DESC_QSEL_TID1 = 1, + TX_DESC_QSEL_TID2 = 2, + TX_DESC_QSEL_TID3 = 3, + TX_DESC_QSEL_TID4 = 4, + TX_DESC_QSEL_TID5 = 5, + TX_DESC_QSEL_TID6 = 6, + TX_DESC_QSEL_TID7 = 7, + TX_DESC_QSEL_TID8 = 8, + TX_DESC_QSEL_TID9 = 9, + TX_DESC_QSEL_TID10 = 10, + TX_DESC_QSEL_TID11 = 11, + TX_DESC_QSEL_TID12 = 12, + TX_DESC_QSEL_TID13 = 13, + TX_DESC_QSEL_TID14 = 14, + TX_DESC_QSEL_TID15 = 15, + TX_DESC_QSEL_BEACON = 16, + TX_DESC_QSEL_HIGH = 17, + TX_DESC_QSEL_MGMT = 18, + TX_DESC_QSEL_H2C = 19, +}; + +struct ep_device { + struct usb_endpoint_descriptor *desc; + struct usb_device *udev; + struct device dev; +}; + +typedef struct thermal_zone_device *class_thermal_zone_reverse_t; + +struct trace_event_raw_thermal_temperature { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int temp_prev; + int temp; + char __data[0]; +}; + +struct trace_event_raw_cdev_update { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int target; + char __data[0]; +}; + +struct trace_event_raw_thermal_zone_trip { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int trip; + enum thermal_trip_type trip_type; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_temperature { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +struct trace_event_data_offsets_cdev_update { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_thermal_zone_trip { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); + +typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int); + +typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); + +struct of_bus; + +struct of_pci_range_parser { + struct device_node *node; + const struct of_bus *bus; + const __be32 *range; + const __be32 *end; + int na; + int ns; + int pna; + bool dma; +}; + +struct of_bus { + const char *name; + const char *addresses; + int (*match)(struct device_node *); + void (*count_cells)(struct device_node *, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int, int); + int (*translate)(__be32 *, u64, int); + int flag_cells; + unsigned int (*get_flags)(const __be32 *); +}; + +struct of_pci_range { + union { + u64 pci_addr; + u64 bus_addr; + }; + u64 cpu_addr; + u64 size; + u32 flags; + long: 32; +}; + +enum { + M_I17 = 0, + M_I20 = 1, + M_I20_SR = 2, + M_I24 = 3, + M_I24_8_1 = 4, + M_I24_10_1 = 5, + M_I27_11_1 = 6, + M_MINI = 7, + M_MINI_3_1 = 8, + M_MINI_4_1 = 9, + M_MB = 10, + M_MB_2 = 11, + M_MB_3 = 12, + M_MB_5_1 = 13, + M_MB_6_1 = 14, + M_MB_7_1 = 15, + M_MB_SR = 16, + M_MBA = 17, + M_MBA_3 = 18, + M_MBP = 19, + M_MBP_2 = 20, + M_MBP_2_2 = 21, + M_MBP_SR = 22, + M_MBP_4 = 23, + M_MBP_5_1 = 24, + M_MBP_5_2 = 25, + M_MBP_5_3 = 26, + M_MBP_6_1 = 27, + M_MBP_6_2 = 28, + M_MBP_7_1 = 29, + M_MBP_8_2 = 30, + M_UNKNOWN = 31, +}; + +struct efifb_dmi_info { + char *optname; + long unsigned int base; + int stride; + int width; + int height; + int flags; +}; + +enum { + OVERRIDE_NONE = 0, + OVERRIDE_BASE = 1, + OVERRIDE_STRIDE = 2, + OVERRIDE_HEIGHT = 4, + OVERRIDE_WIDTH = 8, +}; + +enum { + SOCK_WAKE_IO = 0, + SOCK_WAKE_WAITD = 1, + SOCK_WAKE_SPACE = 2, + SOCK_WAKE_URG = 3, +}; + +struct tc_qopt_offload_stats { + struct gnet_stats_basic_sync *bstats; + struct gnet_stats_queue *qstats; +}; + +enum tc_mq_command { + TC_MQ_CREATE = 0, + TC_MQ_DESTROY = 1, + TC_MQ_STATS = 2, + TC_MQ_GRAFT = 3, +}; + +struct tc_mq_opt_offload_graft_params { + long unsigned int queue; + u32 child_handle; +}; + +struct tc_mq_qopt_offload { + enum tc_mq_command command; + u32 handle; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; +}; + +struct mq_sched { + struct Qdisc **qdiscs; +}; + +enum ethtool_module_fw_flash_status { + ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS = 2, + ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED = 3, + ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR = 4, +}; + +enum { + SFF8024_ID_UNK = 0, + SFF8024_ID_SFF_8472 = 2, + SFF8024_ID_SFP = 3, + SFF8024_ID_DWDM_SFP = 11, + SFF8024_ID_QSFP_8438 = 12, + SFF8024_ID_QSFP_8436_8636 = 13, + SFF8024_ID_QSFP28_8636 = 17, + SFF8024_ID_QSFP_DD = 24, + SFF8024_ID_OSFP = 25, + SFF8024_ID_DSFP = 27, + SFF8024_ID_QSFP_PLUS_CMIS = 30, + SFF8024_ID_SFP_DD_CMIS = 31, + SFF8024_ID_SFP_PLUS_CMIS = 32, + SFF8024_ENCODING_UNSPEC = 0, + SFF8024_ENCODING_8B10B = 1, + SFF8024_ENCODING_4B5B = 2, + SFF8024_ENCODING_NRZ = 3, + SFF8024_ENCODING_8472_MANCHESTER = 4, + SFF8024_ENCODING_8472_SONET = 5, + SFF8024_ENCODING_8472_64B66B = 6, + SFF8024_ENCODING_8436_MANCHESTER = 6, + SFF8024_ENCODING_8436_SONET = 4, + SFF8024_ENCODING_8436_64B66B = 5, + SFF8024_ENCODING_256B257B = 7, + SFF8024_ENCODING_PAM4 = 8, + SFF8024_CONNECTOR_UNSPEC = 0, + SFF8024_CONNECTOR_SC = 1, + SFF8024_CONNECTOR_FIBERJACK = 6, + SFF8024_CONNECTOR_LC = 7, + SFF8024_CONNECTOR_MT_RJ = 8, + SFF8024_CONNECTOR_MU = 9, + SFF8024_CONNECTOR_SG = 10, + SFF8024_CONNECTOR_OPTICAL_PIGTAIL = 11, + SFF8024_CONNECTOR_MPO_1X12 = 12, + SFF8024_CONNECTOR_MPO_2X16 = 13, + SFF8024_CONNECTOR_HSSDC_II = 32, + SFF8024_CONNECTOR_COPPER_PIGTAIL = 33, + SFF8024_CONNECTOR_RJ45 = 34, + SFF8024_CONNECTOR_NOSEPARATE = 35, + SFF8024_CONNECTOR_MXC_2X16 = 36, + SFF8024_ECC_UNSPEC = 0, + SFF8024_ECC_100G_25GAUI_C2M_AOC = 1, + SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 2, + SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 3, + SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 4, + SFF8024_ECC_100GBASE_SR10 = 5, + SFF8024_ECC_100GBASE_CR4 = 11, + SFF8024_ECC_25GBASE_CR_S = 12, + SFF8024_ECC_25GBASE_CR_N = 13, + SFF8024_ECC_10GBASE_T_SFI = 22, + SFF8024_ECC_10GBASE_T_SR = 28, + SFF8024_ECC_5GBASE_T = 29, + SFF8024_ECC_2_5GBASE_T = 30, +}; + +enum { + SFP_PHYS_ID = 0, + SFP_PHYS_EXT_ID = 1, + SFP_PHYS_EXT_ID_SFP = 4, + SFP_CONNECTOR = 2, + SFP_COMPLIANCE = 3, + SFP_ENCODING = 11, + SFP_BR_NOMINAL = 12, + SFP_RATE_ID = 13, + SFF_RID_8079 = 1, + SFF_RID_8431_RX_ONLY = 2, + SFF_RID_8431_TX_ONLY = 4, + SFF_RID_8431 = 6, + SFF_RID_10G8G = 14, + SFP_LINK_LEN_SM_KM = 14, + SFP_LINK_LEN_SM_100M = 15, + SFP_LINK_LEN_50UM_OM2_10M = 16, + SFP_LINK_LEN_62_5UM_OM1_10M = 17, + SFP_LINK_LEN_COPPER_1M = 18, + SFP_LINK_LEN_50UM_OM4_10M = 18, + SFP_LINK_LEN_50UM_OM3_10M = 19, + SFP_VENDOR_NAME = 20, + SFP_VENDOR_OUI = 37, + SFP_VENDOR_PN = 40, + SFP_VENDOR_REV = 56, + SFP_OPTICAL_WAVELENGTH_MSB = 60, + SFP_OPTICAL_WAVELENGTH_LSB = 61, + SFP_CABLE_SPEC = 60, + SFP_CC_BASE = 63, + SFP_OPTIONS = 64, + SFP_OPTIONS_HIGH_POWER_LEVEL = 8192, + SFP_OPTIONS_PAGING_A2 = 4096, + SFP_OPTIONS_RETIMER = 2048, + SFP_OPTIONS_COOLED_XCVR = 1024, + SFP_OPTIONS_POWER_DECL = 512, + SFP_OPTIONS_RX_LINEAR_OUT = 256, + SFP_OPTIONS_RX_DECISION_THRESH = 128, + SFP_OPTIONS_TUNABLE_TX = 64, + SFP_OPTIONS_RATE_SELECT = 32, + SFP_OPTIONS_TX_DISABLE = 16, + SFP_OPTIONS_TX_FAULT = 8, + SFP_OPTIONS_LOS_INVERTED = 4, + SFP_OPTIONS_LOS_NORMAL = 2, + SFP_BR_MAX = 66, + SFP_BR_MIN = 67, + SFP_VENDOR_SN = 68, + SFP_DATECODE = 84, + SFP_DIAGMON = 92, + SFP_DIAGMON_DDM = 64, + SFP_DIAGMON_INT_CAL = 32, + SFP_DIAGMON_EXT_CAL = 16, + SFP_DIAGMON_RXPWR_AVG = 8, + SFP_DIAGMON_ADDRMODE = 4, + SFP_ENHOPTS = 93, + SFP_ENHOPTS_ALARMWARN = 128, + SFP_ENHOPTS_SOFT_TX_DISABLE = 64, + SFP_ENHOPTS_SOFT_TX_FAULT = 32, + SFP_ENHOPTS_SOFT_RX_LOS = 16, + SFP_ENHOPTS_SOFT_RATE_SELECT = 8, + SFP_ENHOPTS_APP_SELECT_SFF8079 = 4, + SFP_ENHOPTS_SOFT_RATE_SFF8431 = 2, + SFP_SFF8472_COMPLIANCE = 94, + SFP_SFF8472_COMPLIANCE_NONE = 0, + SFP_SFF8472_COMPLIANCE_REV9_3 = 1, + SFP_SFF8472_COMPLIANCE_REV9_5 = 2, + SFP_SFF8472_COMPLIANCE_REV10_2 = 3, + SFP_SFF8472_COMPLIANCE_REV10_4 = 4, + SFP_SFF8472_COMPLIANCE_REV11_0 = 5, + SFP_SFF8472_COMPLIANCE_REV11_3 = 6, + SFP_SFF8472_COMPLIANCE_REV11_4 = 7, + SFP_SFF8472_COMPLIANCE_REV12_0 = 8, + SFP_CC_EXT = 95, +}; + +enum ethnl_sock_type { + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH = 0, +}; + +struct ethnl_sock_priv { + struct net_device *dev; + u32 portid; + enum ethnl_sock_type type; +}; + +struct ethnl_module_fw_flash_ntf_params { + u32 portid; + u32 seq; + bool closed_sock; +}; + +struct ethtool_module_fw_flash_params { + __be32 password; + u8 password_valid: 1; +}; + +struct ethtool_cmis_fw_update_params { + struct net_device *dev; + struct ethtool_module_fw_flash_params params; + struct ethnl_module_fw_flash_ntf_params ntf_params; + const struct firmware *fw; +}; + +struct ethtool_module_fw_flash { + struct list_head list; + netdevice_tracker dev_tracker; + struct work_struct work; + struct ethtool_cmis_fw_update_params fw_update; +}; + +struct module_reply_data { + struct ethnl_reply_data base; + struct ethtool_module_power_mode_params power; +}; + +struct skb_checksum_ops { + __wsum (*update)(const void *, int, __wsum); + __wsum (*combine)(__wsum, __wsum, int, int); +}; + +enum nft_payload_bases { + NFT_PAYLOAD_LL_HEADER = 0, + NFT_PAYLOAD_NETWORK_HEADER = 1, + NFT_PAYLOAD_TRANSPORT_HEADER = 2, + NFT_PAYLOAD_INNER_HEADER = 3, + NFT_PAYLOAD_TUN_HEADER = 4, +}; + +enum nft_inner_type { + NFT_INNER_UNSPEC = 0, + NFT_INNER_VXLAN = 1, + NFT_INNER_GENEVE = 2, +}; + +enum nft_inner_flags { + NFT_INNER_HDRSIZE = 1, + NFT_INNER_LL = 2, + NFT_INNER_NH = 4, + NFT_INNER_TH = 8, +}; + +enum nft_inner_attributes { + NFTA_INNER_UNSPEC = 0, + NFTA_INNER_NUM = 1, + NFTA_INNER_TYPE = 2, + NFTA_INNER_FLAGS = 3, + NFTA_INNER_HDRSIZE = 4, + NFTA_INNER_EXPR = 5, + __NFTA_INNER_MAX = 6, +}; + +struct nft_payload { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 dreg; +}; + +struct genevehdr { + u8 opt_len: 6; + u8 ver: 2; + u8 rsvd1: 6; + u8 critical: 1; + u8 oam: 1; + __be16 proto_type; + u8 vni[3]; + u8 rsvd2; + u8 options[0]; +}; + +struct __nft_expr { + const struct nft_expr_ops *ops; + long: 32; + union { + struct nft_payload payload; + struct nft_meta meta; + }; +}; + +enum { + NFT_INNER_EXPR_PAYLOAD = 0, + NFT_INNER_EXPR_META = 1, +}; + +struct nft_inner { + u8 flags; + u8 hdrsize; + u8 type; + u8 expr_type; + long: 32; + struct __nft_expr expr; +}; + +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; +}; + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +struct ipcm_cookie { + struct sockcm_cookie sockc; + __be32 addr; + int oif; + struct ip_options_rcu *opt; + __u8 protocol; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + long: 32; +}; + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats: 1; + u8 unused: 7; +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; + struct fib_nh *fib_nh; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_rec; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + u8 async_capable: 1; + long unsigned int tx_bitmask; +}; + +enum { + TCP_BPF_IPV4 = 0, + TCP_BPF_IPV6 = 1, + TCP_BPF_NUM_PROTS = 2, +}; + +enum { + TCP_BPF_BASE = 0, + TCP_BPF_TX = 1, + TCP_BPF_RX = 2, + TCP_BPF_TXRX = 3, + TCP_BPF_NUM_CFGS = 4, +}; + +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_bridge_frag_data; + +typedef unsigned int ieee80211_tx_result; + +struct ieee80211_tx_data { + struct sk_buff *skb; + struct sk_buff_head skbs; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_key *key; + struct ieee80211_tx_rate rate; + unsigned int flags; +}; + +struct stack { + u32 irq[4]; + u32 abt[4]; + u32 und[4]; + u32 fiq[4]; +}; + +struct tmigr_event { + struct timerqueue_node nextevt; + unsigned int cpu; + bool ignore; +}; + +struct tmigr_group { + raw_spinlock_t lock; + struct tmigr_group *parent; + struct tmigr_event groupevt; + u64 next_expiry; + struct timerqueue_head events; + atomic_t migr_state; + unsigned int level; + int numa_node; + unsigned int num_children; + u8 groupmask; + struct list_head list; + long: 32; +}; + +struct tmigr_cpu { + raw_spinlock_t lock; + bool online; + bool idle; + bool remote; + struct tmigr_group *tmgroup; + u8 groupmask; + u64 wakeup; + struct tmigr_event cpuevt; +}; + +union tmigr_state { + u32 state; + struct { + u8 active; + u8 migrator; + u16 seq; + }; +}; + +struct timer_events { + u64 local; + u64 global; +}; + +struct trace_event_raw_tmigr_group_set { + struct trace_entry ent; + void *group; + unsigned int lvl; + unsigned int numa_node; + char __data[0]; +}; + +struct trace_event_raw_tmigr_connect_child_parent { + struct trace_entry ent; + void *child; + void *parent; + unsigned int lvl; + unsigned int numa_node; + unsigned int num_children; + u32 groupmask; + char __data[0]; +}; + +struct trace_event_raw_tmigr_connect_cpu_parent { + struct trace_entry ent; + void *parent; + unsigned int cpu; + unsigned int lvl; + unsigned int numa_node; + unsigned int num_children; + u32 groupmask; + char __data[0]; +}; + +struct trace_event_raw_tmigr_group_and_cpu { + struct trace_entry ent; + void *group; + void *parent; + unsigned int lvl; + unsigned int numa_node; + u32 childmask; + u8 active; + u8 migrator; + char __data[0]; +}; + +struct trace_event_raw_tmigr_cpugroup { + struct trace_entry ent; + u64 wakeup; + void *parent; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_tmigr_idle { + struct trace_entry ent; + u64 nextevt; + u64 wakeup; + void *parent; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_tmigr_update_events { + struct trace_entry ent; + void *child; + void *group; + u64 nextevt; + u64 group_next_expiry; + u64 child_evt_expiry; + unsigned int group_lvl; + unsigned int child_evtcpu; + u8 child_active; + u8 group_active; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_tmigr_handle_remote { + struct trace_entry ent; + void *group; + unsigned int lvl; + char __data[0]; +}; + +struct trace_event_data_offsets_tmigr_group_set {}; + +struct trace_event_data_offsets_tmigr_connect_child_parent {}; + +struct trace_event_data_offsets_tmigr_connect_cpu_parent {}; + +struct trace_event_data_offsets_tmigr_group_and_cpu {}; + +struct trace_event_data_offsets_tmigr_cpugroup {}; + +struct trace_event_data_offsets_tmigr_idle {}; + +struct trace_event_data_offsets_tmigr_update_events {}; + +struct trace_event_data_offsets_tmigr_handle_remote {}; + +typedef void (*btf_trace_tmigr_group_set)(void *, struct tmigr_group *); + +typedef void (*btf_trace_tmigr_connect_child_parent)(void *, struct tmigr_group *); + +typedef void (*btf_trace_tmigr_connect_cpu_parent)(void *, struct tmigr_cpu *); + +typedef void (*btf_trace_tmigr_group_set_cpu_inactive)(void *, struct tmigr_group *, union tmigr_state, u32); + +typedef void (*btf_trace_tmigr_group_set_cpu_active)(void *, struct tmigr_group *, union tmigr_state, u32); + +typedef void (*btf_trace_tmigr_cpu_new_timer)(void *, struct tmigr_cpu *); + +typedef void (*btf_trace_tmigr_cpu_active)(void *, struct tmigr_cpu *); + +typedef void (*btf_trace_tmigr_cpu_online)(void *, struct tmigr_cpu *); + +typedef void (*btf_trace_tmigr_cpu_offline)(void *, struct tmigr_cpu *); + +typedef void (*btf_trace_tmigr_handle_remote_cpu)(void *, struct tmigr_cpu *); + +typedef void (*btf_trace_tmigr_cpu_idle)(void *, struct tmigr_cpu *, u64); + +typedef void (*btf_trace_tmigr_cpu_new_timer_idle)(void *, struct tmigr_cpu *, u64); + +typedef void (*btf_trace_tmigr_update_events)(void *, struct tmigr_group *, struct tmigr_group *, union tmigr_state, union tmigr_state, u64); + +typedef void (*btf_trace_tmigr_handle_remote)(void *, struct tmigr_group *); + +struct tmigr_walk { + u64 nextexp; + u64 firstexp; + struct tmigr_event *evt; + u8 childmask; + bool remote; + long unsigned int basej; + long: 32; + u64 now; + bool check; + bool tmc_active; + long: 32; +}; + +typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *); + +struct trace_event_raw_rpm_internal { + struct trace_entry ent; + u32 __data_loc_name; + int flags; + int usage_count; + int disable_depth; + int runtime_auto; + int request_pending; + int irq_safe; + int child_count; + char __data[0]; +}; + +struct trace_event_raw_rpm_return_int { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int ip; + int ret; + char __data[0]; +}; + +struct trace_event_raw_rpm_status { + struct trace_entry ent; + u32 __data_loc_name; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_rpm_internal { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_return_int { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_status { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_return_int)(void *, struct device *, long unsigned int, int); + +typedef void (*btf_trace_rpm_status)(void *, struct device *, enum rpm_status); + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL = 0, +}; + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +struct bpf_iter__bpf_map_elem { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + void *value; + }; +}; + +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +struct bpf_crypto_type { + void * (*alloc_tfm)(const char *); + void (*free_tfm)(void *); + int (*has_algo)(const char *); + int (*setkey)(void *, const u8 *, unsigned int); + int (*setauthsize)(void *, unsigned int); + int (*encrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + int (*decrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + unsigned int (*ivsize)(void *); + unsigned int (*statesize)(void *); + u32 (*get_flags)(void *); + struct module *owner; + char name[14]; +}; + +struct bpf_crypto_type_list { + const struct bpf_crypto_type *type; + struct list_head list; +}; + +struct bpf_crypto_params { + char type[14]; + u8 reserved[2]; + char algo[128]; + u8 key[256]; + u32 key_len; + u32 authsize; +}; + +struct bpf_crypto_ctx { + const struct bpf_crypto_type *type; + void *tfm; + u32 siv_len; + struct callback_head rcu; + refcount_t usage; +}; + +struct slabobj_ext { + struct obj_cgroup *objcg; + long: 32; +}; + +typedef u64 freelist_full_t; + +typedef union { + struct { + void *freelist; + long unsigned int counter; + }; + freelist_full_t full; +} freelist_aba_t; + +struct kmem_cache_cpu { + union { + struct { + void **freelist; + long unsigned int tid; + }; + freelist_aba_t freelist_tid; + }; + struct slab *slab; + struct slab *partial; + local_lock_t lock; +}; + +struct kmem_cache_node { + spinlock_t list_lock; + long unsigned int nr_partial; + struct list_head partial; + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +}; + +struct memory_notify { + long unsigned int altmap_start_pfn; + long unsigned int altmap_nr_pages; + long unsigned int start_pfn; + long unsigned int nr_pages; + int status_change_nid_normal; + int status_change_nid; +}; + +struct partial_context { + gfp_t flags; + unsigned int orig_size; + void *object; +}; + +struct track { + long unsigned int addr; + depot_stack_handle_t handle; + int cpu; + int pid; + long unsigned int when; +}; + +enum track_item { + TRACK_ALLOC = 0, + TRACK_FREE = 1, +}; + +enum stat_item { + ALLOC_FASTPATH = 0, + ALLOC_SLOWPATH = 1, + FREE_FASTPATH = 2, + FREE_SLOWPATH = 3, + FREE_FROZEN = 4, + FREE_ADD_PARTIAL = 5, + FREE_REMOVE_PARTIAL = 6, + ALLOC_FROM_PARTIAL = 7, + ALLOC_SLAB = 8, + ALLOC_REFILL = 9, + ALLOC_NODE_MISMATCH = 10, + FREE_SLAB = 11, + CPUSLAB_FLUSH = 12, + DEACTIVATE_FULL = 13, + DEACTIVATE_EMPTY = 14, + DEACTIVATE_TO_HEAD = 15, + DEACTIVATE_TO_TAIL = 16, + DEACTIVATE_REMOTE_FREES = 17, + DEACTIVATE_BYPASS = 18, + ORDER_FALLBACK = 19, + CMPXCHG_DOUBLE_CPU_FAIL = 20, + CMPXCHG_DOUBLE_FAIL = 21, + CPU_PARTIAL_ALLOC = 22, + CPU_PARTIAL_FREE = 23, + CPU_PARTIAL_NODE = 24, + CPU_PARTIAL_DRAIN = 25, + NR_SLUB_STAT_ITEMS = 26, +}; + +struct rcu_delayed_free { + struct callback_head head; + void *object; +}; + +struct slub_flush_work { + struct work_struct work; + struct kmem_cache *s; + bool skip; +}; + +struct detached_freelist { + struct slab *slab; + void *tail; + void *freelist; + int cnt; + struct kmem_cache *s; +}; + +struct location { + depot_stack_handle_t handle; + long unsigned int count; + long unsigned int addr; + long unsigned int waste; + long long int sum_time; + long int min_time; + long int max_time; + long int min_pid; + long int max_pid; + long unsigned int cpus[1]; + nodemask_t nodes; +}; + +struct loc_track { + long unsigned int max; + long unsigned int count; + struct location *loc; + long: 32; + loff_t idx; +}; + +enum slab_stat_type { + SL_ALL = 0, + SL_PARTIAL = 1, + SL_CPU = 2, + SL_OBJECTS = 3, + SL_TOTAL = 4, +}; + +struct slab_attribute { + struct attribute attr; + ssize_t (*show)(struct kmem_cache *, char *); + ssize_t (*store)(struct kmem_cache *, const char *, size_t); +}; + +struct saved_alias { + struct kmem_cache *s; + const char *name; + struct saved_alias *next; +}; + +struct mnt_idmap { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + refcount_t count; +}; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; + +struct timerfd_ctx { + union { + struct hrtimer tmr; + struct alarm alarm; + } t; + ktime_t tintv; + ktime_t moffs; + wait_queue_head_t wqh; + long: 32; + u64 ticks; + int clockid; + short unsigned int expired; + short unsigned int settime_flags; + struct callback_head rcu; + struct list_head clist; + spinlock_t cancel_lock; + bool might_cancel; +}; + +struct ext4_xattr_header { + __le32 h_magic; + __le32 h_refcount; + __le32 h_blocks; + __le32 h_hash; + __le32 h_checksum; + __u32 h_reserved[3]; +}; + +struct ext4_xattr_block_find { + struct ext4_xattr_search s; + struct buffer_head *bh; +}; + +enum btrfs_csum_type { + BTRFS_CSUM_TYPE_CRC32 = 0, + BTRFS_CSUM_TYPE_XXHASH = 1, + BTRFS_CSUM_TYPE_SHA256 = 2, + BTRFS_CSUM_TYPE_BLAKE2 = 3, +}; + +enum btrfs_mod_log_op { + BTRFS_MOD_LOG_KEY_REPLACE = 0, + BTRFS_MOD_LOG_KEY_ADD = 1, + BTRFS_MOD_LOG_KEY_REMOVE = 2, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING = 3, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING = 4, + BTRFS_MOD_LOG_MOVE_KEYS = 5, + BTRFS_MOD_LOG_ROOT_REPLACE = 6, +}; + +struct btrfs_csums { + u16 size; + const char name[10]; + const char driver[12]; +}; + +struct prop_handler { + struct hlist_node node; + const char *xattr_name; + int (*validate)(const struct btrfs_inode *, const char *, size_t); + int (*apply)(struct inode *, const char *, size_t); + const char * (*extract)(const struct inode *); + bool (*ignore)(const struct btrfs_inode *); + int inheritable; +}; + +struct user_key_payload { + struct callback_head rcu; + short unsigned int datalen; + long: 32; + char data[0]; +}; + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP = 2, + IOPRIO_WHO_USER = 3, +}; + +struct io_uring_file_index_range { + __u32 off; + __u32 len; + __u64 resv; +}; + +enum { + REQ_F_FIXED_FILE_BIT = 0, + REQ_F_IO_DRAIN_BIT = 1, + REQ_F_LINK_BIT = 2, + REQ_F_HARDLINK_BIT = 3, + REQ_F_FORCE_ASYNC_BIT = 4, + REQ_F_BUFFER_SELECT_BIT = 5, + REQ_F_CQE_SKIP_BIT = 6, + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT = 9, + REQ_F_CUR_POS_BIT = 10, + REQ_F_NOWAIT_BIT = 11, + REQ_F_LINK_TIMEOUT_BIT = 12, + REQ_F_NEED_CLEANUP_BIT = 13, + REQ_F_POLLED_BIT = 14, + REQ_F_HYBRID_IOPOLL_STATE_BIT = 15, + REQ_F_BUFFER_SELECTED_BIT = 16, + REQ_F_BUFFER_RING_BIT = 17, + REQ_F_REISSUE_BIT = 18, + REQ_F_CREDS_BIT = 19, + REQ_F_REFCOUNT_BIT = 20, + REQ_F_ARM_LTIMEOUT_BIT = 21, + REQ_F_ASYNC_DATA_BIT = 22, + REQ_F_SKIP_LINK_CQES_BIT = 23, + REQ_F_SINGLE_POLL_BIT = 24, + REQ_F_DOUBLE_POLL_BIT = 25, + REQ_F_APOLL_MULTISHOT_BIT = 26, + REQ_F_CLEAR_POLLIN_BIT = 27, + REQ_F_SUPPORT_NOWAIT_BIT = 28, + REQ_F_ISREG_BIT = 29, + REQ_F_POLL_NO_LAZY_BIT = 30, + REQ_F_CAN_POLL_BIT = 31, + REQ_F_BL_EMPTY_BIT = 32, + REQ_F_BL_NO_RECYCLE_BIT = 33, + REQ_F_BUFFERS_COMMIT_BIT = 34, + REQ_F_BUF_NODE_BIT = 35, + __REQ_F_LAST_BIT = 36, +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + +struct rnd_state { + __u32 s1; + __u32 s2; + __u32 s3; + __u32 s4; +}; + +struct sg_append_table { + struct sg_table sgt; + struct scatterlist *prv; + unsigned int total_nents; +}; + +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + mpi_size_t tspace_size; + mpi_ptr_t tp; + mpi_size_t tp_size; +}; + +enum pci_bar_type { + pci_bar_unknown = 0, + pci_bar_io = 1, + pci_bar_mem32 = 2, + pci_bar_mem64 = 3, +}; + +struct pci_domain_busn_res { + struct list_head list; + struct resource res; + int domain_nr; +}; + +struct fb_cmap_user { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct scsi_mode_data { + __u32 length; + __u16 block_descriptor_length; + __u8 medium_type; + __u8 device_specific; + __u8 header_length; + __u8 longlba: 1; +}; + +enum scsi_host_prot_capabilities { + SHOST_DIF_TYPE1_PROTECTION = 1, + SHOST_DIF_TYPE2_PROTECTION = 2, + SHOST_DIF_TYPE3_PROTECTION = 4, + SHOST_DIX_TYPE0_PROTECTION = 8, + SHOST_DIX_TYPE1_PROTECTION = 16, + SHOST_DIX_TYPE2_PROTECTION = 32, + SHOST_DIX_TYPE3_PROTECTION = 64, +}; + +enum { + ACTION_FAIL = 0, + ACTION_REPREP = 1, + ACTION_DELAYED_REPREP = 2, + ACTION_RETRY = 3, + ACTION_DELAYED_RETRY = 4, +}; + +struct iwl_fw_ini_addr_size { + __le32 addr; + __le32 size; +}; + +struct iwl_fw_ini_debug_info_tlv { + struct iwl_fw_ini_header hdr; + __le32 image_type; + u8 debug_cfg_name[64]; +}; + +enum iwl_fw_ini_trigger_apply_policy { + IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = 1, + IWL_FW_INI_APPLY_POLICY_MATCH_DATA = 2, + IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = 256, + IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = 512, + IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = 1024, + IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = 65536, +}; + +enum iwl_fw_ini_trigger_reset_fw_policy { + IWL_FW_INI_RESET_FW_MODE_NOTHING = 0, + IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY = 1, + IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW = 2, +}; + +enum iwl_fw_ini_dump_policy { + IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = 1, + IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = 2, + IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = 4, +}; + +enum iwl_fw_ini_dump_type { + IWL_FW_INI_DUMP_BRIEF = 0, + IWL_FW_INI_DUMP_MEDIUM = 1, + IWL_FW_INI_DUMP_VERBOSE = 2, +}; + +enum iwl_fw_dbg_trigger_flags { + IWL_FW_DBG_FORCE_RESTART = 1, +}; + +struct iwl_dump_file_name_info { + __le32 type; + __le32 len; + __u8 data[0]; +}; + +struct iwl_fw_error_dump_file { + __le32 barker; + __le32 file_len; + u8 data[0]; +}; + +struct iwl_fw_error_dump_fifo { + __le32 fifo_num; + __le32 available_bytes; + __le32 wr_ptr; + __le32 rd_ptr; + __le32 fence_ptr; + __le32 fence_mode; + u8 data[0]; +}; + +struct iwl_fw_error_dump_info { + __le32 hw_type; + __le32 hw_step; + u8 fw_human_readable[64]; + u8 dev_human_readable[64]; + u8 bus_human_readable[8]; + u8 num_of_lmacs; + __le32 umac_err_id; + __le32 lmac_err_id[2]; +} __attribute__((packed)); + +struct iwl_fw_error_dump_smem_cfg { + __le32 num_lmacs; + __le32 num_txfifo_entries; + struct { + __le32 txfifo_size[15]; + __le32 rxfifo1_size; + } lmac[2]; + __le32 rxfifo2_size; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[6]; +}; + +struct iwl_fw_error_dump_prph { + __le32 prph_start; + __le32 data[0]; +}; + +enum iwl_fw_error_dump_mem_type { + IWL_FW_ERROR_DUMP_MEM_SRAM = 0, + IWL_FW_ERROR_DUMP_MEM_SMEM = 1, + IWL_FW_ERROR_DUMP_MEM_NAMED_MEM = 10, +}; + +struct iwl_fw_error_dump_mem { + __le32 type; + __le32 offset; + u8 data[0]; +}; + +struct iwl_fw_ini_error_dump_data { + u8 type; + u8 sub_type; + u8 sub_type_ver; + u8 reserved; + __le32 len; + __u8 data[0]; +}; + +struct iwl_fw_ini_dump_entry { + struct list_head list; + u32 size; + u8 data[0]; +}; + +struct iwl_fw_ini_dump_file_hdr { + __le32 barker; + __le32 file_len; +}; + +struct iwl_fw_ini_fifo_hdr { + __le32 fifo_num; + __le32 num_of_registers; +}; + +struct iwl_fw_ini_error_dump_range { + __le32 range_data_size; + union { + __le32 internal_base_addr; + __le64 dram_base_addr; + __le32 page_num; + struct iwl_fw_ini_fifo_hdr fifo_hdr; + struct iwl_cmd_header fw_pkt_hdr; + }; + __le32 data[0]; +}; + +struct iwl_fw_ini_error_dump_header { + __le32 version; + __le32 region_id; + __le32 num_of_ranges; + __le32 name_len; + u8 name[32]; +}; + +struct iwl_fw_ini_error_dump { + struct iwl_fw_ini_error_dump_header header; + u8 data[0]; +}; + +struct iwl_fw_ini_error_dump_register { + __le32 addr; + __le32 data; +}; + +struct iwl_fw_ini_dump_cfg_name { + __le32 image_type; + __le32 cfg_name_len; + u8 cfg_name[64]; +}; + +struct iwl_fw_ini_dump_info { + __le32 version; + __le32 time_point; + __le32 trigger_reason; + __le32 external_cfg_state; + __le32 ver_type; + __le32 ver_subtype; + __le32 hw_step; + __le32 hw_type; + __le32 rf_id_flavor; + __le32 rf_id_dash; + __le32 rf_id_step; + __le32 rf_id_type; + __le32 lmac_major; + __le32 lmac_minor; + __le32 umac_major; + __le32 umac_minor; + __le32 fw_mon_mode; + __le64 regions_mask; + __le32 build_tag_len; + u8 build_tag[64]; + __le32 num_of_cfg_names; + struct iwl_fw_ini_dump_cfg_name cfg_names[0]; +}; + +struct iwl_fw_ini_err_table_dump { + struct iwl_fw_ini_error_dump_header header; + __le32 version; + u8 data[0]; +}; + +struct iwl_fw_ini_monitor_dump { + struct iwl_fw_ini_error_dump_header header; + __le32 write_ptr; + __le32 cycle_cnt; + __le32 cur_frag; + u8 data[0]; +}; + +struct iwl_fw_ini_special_device_memory { + struct iwl_fw_ini_error_dump_header header; + __le16 type; + __le16 version; + u8 data[0]; +}; + +struct iwl_dbg_tlv_node { + struct list_head list; + struct iwl_ucode_tlv tlv; +}; + +enum iwl_dbg_suspend_resume_cmds { + DBGC_RESUME_CMD = 0, + DBGC_SUSPEND_CMD = 1, +}; + +enum iwl_mvm_marker_id { + MARKER_ID_TX_FRAME_LATENCY = 1, + MARKER_ID_SYNC_CLOCK = 2, +}; + +struct iwl_mvm_marker { + u8 dw_len; + u8 marker_id; + __le16 reserved; + __le64 timestamp; + __le32 metadata[0]; +}; + +struct iwl_mvm_marker_rsp { + __le32 gp2; +}; + +struct iwl_dbg_suspend_resume_cmd { + __le32 operation; +}; + +struct iwl_dbg_dump_complete_cmd { + __le32 tp; + __le32 tp_data; +}; + +enum iwl_fw_dbg_config_cmd_type { + DEBUG_TOKEN_CONFIG_TYPE = 43, +}; + +struct iwl_fw_dbg_config_cmd { + __le32 type; + __le32 conf; +}; + +struct iwl_fw_dump_ptrs { + struct iwl_trans_dump_data *trans_ptr; + void *fwrt_ptr; + u32 fwrt_len; +}; + +struct iwl_prph_range { + u32 start; + u32 end; +}; + +struct iwl_dump_ini_region_data { + struct iwl_ucode_tlv *reg_tlv; + struct iwl_fwrt_dump_data *dump_data; +}; + +struct iwl_ini_rxf_data { + u32 fifo_num; + u32 size; + u32 offset; +}; + +struct iwl_dump_ini_mem_ops { + u32 (*get_num_of_ranges)(struct iwl_fw_runtime *, struct iwl_dump_ini_region_data *); + u32 (*get_size)(struct iwl_fw_runtime *, struct iwl_dump_ini_region_data *); + void * (*fill_mem_hdr)(struct iwl_fw_runtime *, struct iwl_dump_ini_region_data *, void *, u32); + int (*fill_range)(struct iwl_fw_runtime *, struct iwl_dump_ini_region_data *, void *, u32, int); +}; + +struct iwl_fw_dbg_trigger_time_event { + struct { + __le32 id; + __le32 action_bitmap; + __le32 status_bitmap; + } time_events[16]; +}; + +enum iwl_time_event_type { + TE_BSS_STA_AGGRESSIVE_ASSOC = 0, + TE_BSS_STA_ASSOC = 1, + TE_BSS_EAP_DHCP_PROT = 2, + TE_BSS_QUIET_PERIOD = 3, + TE_P2P_DEVICE_DISCOVERABLE = 4, + TE_P2P_DEVICE_LISTEN = 5, + TE_P2P_DEVICE_ACTION_SCAN = 6, + TE_P2P_DEVICE_FULL_SCAN = 7, + TE_P2P_CLIENT_AGGRESSIVE_ASSOC = 8, + TE_P2P_CLIENT_ASSOC = 9, + TE_P2P_CLIENT_QUIET_PERIOD = 10, + TE_P2P_GO_ASSOC_PROT = 11, + TE_P2P_GO_REPETITIVET_NOA = 12, + TE_P2P_GO_CT_WINDOW = 13, + TE_WIDI_TX_SYNC = 14, + TE_CHANNEL_SWITCH_PERIOD = 15, + TE_MAX = 16, +}; + +enum { + TE_V2_FRAG_NONE = 0, + TE_V2_FRAG_SINGLE = 1, + TE_V2_FRAG_DUAL = 2, + TE_V2_FRAG_MAX = 254, + TE_V2_FRAG_ENDLESS = 255, +}; + +enum iwl_time_event_policy { + TE_V2_DEFAULT_POLICY = 0, + TE_V2_NOTIF_HOST_EVENT_START = 1, + TE_V2_NOTIF_HOST_EVENT_END = 2, + TE_V2_NOTIF_INTERNAL_EVENT_START = 4, + TE_V2_NOTIF_INTERNAL_EVENT_END = 8, + TE_V2_NOTIF_HOST_FRAG_START = 16, + TE_V2_NOTIF_HOST_FRAG_END = 32, + TE_V2_NOTIF_INTERNAL_FRAG_START = 64, + TE_V2_NOTIF_INTERNAL_FRAG_END = 128, + TE_V2_START_IMMEDIATELY = 2048, + TE_V2_DEP_OTHER = 4096, + TE_V2_DEP_TSF = 8192, + TE_V2_EVENT_SOCIOPATHIC = 16384, + TE_V2_ABSENCE = 32768, +}; + +struct iwl_time_event_cmd { + __le32 id_and_color; + __le32 action; + __le32 id; + __le32 apply_time; + __le32 max_delay; + __le32 depends_on; + __le32 interval; + __le32 duration; + u8 repeat; + u8 max_frags; + __le16 policy; +}; + +struct iwl_time_event_resp { + __le32 status; + __le32 id; + __le32 unique_id; + __le32 id_and_color; +}; + +struct iwl_time_event_notif { + __le32 timestamp; + __le32 session_id; + __le32 unique_id; + __le32 id_and_color; + __le32 action; + __le32 status; +}; + +struct iwl_hs20_roc_req_tail { + u8 node_addr[6]; + __le16 reserved; + __le32 apply_time; + __le32 apply_time_max_delay; + __le32 duration; +}; + +struct iwl_hs20_roc_req { + __le32 id_and_color; + __le32 action; + __le32 event_unique_id; + __le32 sta_id_and_color; + struct iwl_fw_channel_info channel_info; + struct iwl_hs20_roc_req_tail tail; +}; + +struct iwl_roc_req { + __le32 action; + __le32 activity; + __le32 sta_id; + struct iwl_fw_channel_info channel_info; + u8 node_addr[6]; + __le16 reserved; + __le32 max_delay; + __le32 duration; +}; + +struct iwl_roc_notif { + __le32 success; + __le32 started; + __le32 activity; +}; + +enum iwl_mvm_session_prot_conf_id { + SESSION_PROTECT_CONF_ASSOC = 0, + SESSION_PROTECT_CONF_GO_CLIENT_ASSOC = 1, + SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV = 2, + SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION = 3, + SESSION_PROTECT_CONF_MAX_ID = 4, +}; + +struct iwl_mvm_session_prot_cmd { + __le32 id_and_color; + __le32 action; + __le32 conf_id; + __le32 duration_tu; + __le32 repetition_count; + __le32 interval; +}; + +struct iwl_mvm_session_prot_notif { + __le32 mac_link_id; + __le32 status; + __le32 start; + __le32 conf_id; +}; + +struct iwl_mvm_rx_roc_iterator_data { + u32 activity; + bool end_activity; + bool found; +}; + +enum ieee80211_max_ampdu_length_exp { + IEEE80211_HT_MAX_AMPDU_8K = 0, + IEEE80211_HT_MAX_AMPDU_16K = 1, + IEEE80211_HT_MAX_AMPDU_32K = 2, + IEEE80211_HT_MAX_AMPDU_64K = 3, +}; + +enum rtw_sc_offset { + RTW_SC_DONT_CARE = 0, + RTW_SC_20_UPPER = 1, + RTW_SC_20_LOWER = 2, + RTW_SC_20_UPMOST = 3, + RTW_SC_20_LOWEST = 4, + RTW_SC_40_UPPER = 9, + RTW_SC_40_LOWER = 10, +}; + +enum rtw_wireless_set { + WIRELESS_CCK = 1, + WIRELESS_OFDM = 2, + WIRELESS_HT = 4, + WIRELESS_VHT = 8, +}; + +struct rtw_channel_params { + u8 center_chan; + u8 primary_chan; + u8 bandwidth; +}; + +enum rtw_vif_port_set { + PORT_SET_MAC_ADDR = 1, + PORT_SET_BSSID = 2, + PORT_SET_NET_TYPE = 4, + PORT_SET_AID = 8, + PORT_SET_BCN_CTRL = 16, +}; + +enum rtw_fwcd_item { + RTW_FWCD_TLV = 0, + RTW_FWCD_REG = 1, + RTW_FWCD_ROM = 2, + RTW_FWCD_IMEM = 3, + RTW_FWCD_DMEM = 4, + RTW_FWCD_EMEM = 5, +}; + +struct rtw_fw_hdr { + __le16 signature; + u8 category; + u8 function; + __le16 version; + u8 subversion; + u8 subindex; + __le32 rsvd; + __le32 feature; + u8 month; + u8 day; + u8 hour; + u8 min; + __le16 year; + __le16 rsvd3; + u8 mem_usage; + u8 rsvd4[3]; + __le16 h2c_fmt_ver; + __le16 rsvd5; + __le32 dmem_addr; + __le32 dmem_size; + __le32 rsvd6; + __le32 rsvd7; + __le32 imem_size; + __le32 emem_size; + __le32 emem_addr; + __le32 imem_addr; +}; + +struct rtw_fw_hdr_legacy { + __le16 signature; + u8 category; + u8 function; + __le16 version; + u8 subversion1; + u8 subversion2; + u8 month; + u8 day; + u8 hour; + u8 minute; + __le16 size; + __le16 rsvd2; + __le32 idx; + __le32 rsvd3; + __le32 rsvd4; + __le32 rsvd5; +}; + +struct rtw_watch_dog_iter_data { + struct rtw_dev *rtwdev; + struct rtw_vif *rtwvif; +}; + +struct rtw_fwcd_hdr { + u32 item; + u32 size; + u32 padding1; + u32 padding2; +}; + +struct rtw_txq_ba_iter_data {}; + +struct rtw_iter_port_switch_data { + struct rtw_dev *rtwdev; + struct rtw_vif *rtwvif_ap; +}; + +struct usb_cdc_header_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdCDC; +} __attribute__((packed)); + +struct usb_cdc_call_mgmt_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; + __u8 bDataInterface; +}; + +struct usb_cdc_acm_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; +}; + +struct usb_cdc_union_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bMasterInterface0; + __u8 bSlaveInterface0; +}; + +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; +}; + +struct usb_cdc_network_terminal_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bEntityId; + __u8 iName; + __u8 bChannelIndex; + __u8 bPhysicalInterface; +}; + +struct usb_cdc_ether_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iMACAddress; + __le32 bmEthernetStatistics; + __le16 wMaxSegmentSize; + __le16 wNumberMCFilters; + __u8 bNumberPowerFilters; +} __attribute__((packed)); + +struct usb_cdc_dmm_desc { + __u8 bFunctionLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u16 bcdVersion; + __le16 wMaxCommand; +} __attribute__((packed)); + +struct usb_cdc_mdlm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; + __u8 bGUID[16]; +} __attribute__((packed)); + +struct usb_cdc_mdlm_detail_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bGuidDescriptorType; + __u8 bDetailData[0]; +}; + +struct usb_cdc_obex_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; +} __attribute__((packed)); + +struct usb_cdc_ncm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdNcmVersion; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMVersion; + __le16 wMaxControlMessage; + __u8 bNumberFilters; + __u8 bMaxFilterSize; + __le16 wMaxSegmentSize; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_extended_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMExtendedVersion; + __u8 bMaxOutstandingCommandMessages; + __le16 wMTU; +} __attribute__((packed)); + +struct usb_cdc_parsed_header { + struct usb_cdc_union_desc *usb_cdc_union_desc; + struct usb_cdc_header_desc *usb_cdc_header_desc; + struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; + struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; + struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; + struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; + struct usb_cdc_ether_desc *usb_cdc_ether_desc; + struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; + struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; + struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; + struct usb_cdc_obex_desc *usb_cdc_obex_desc; + struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; + struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; + struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; + bool phonet_magic_present; +}; + +struct api_context { + struct completion done; + int status; +}; + +struct set_config_request { + struct usb_device *udev; + int config; + struct work_struct work; + struct list_head node; +}; + +struct pps_event_time { + struct timespec64 ts_real; +}; + +enum ptp_clock_events { + PTP_CLOCK_ALARM = 0, + PTP_CLOCK_EXTTS = 1, + PTP_CLOCK_EXTOFF = 2, + PTP_CLOCK_PPS = 3, + PTP_CLOCK_PPSUSR = 4, +}; + +struct ptp_clock_event { + int type; + int index; + union { + u64 timestamp; + s64 offset; + struct pps_event_time pps_times; + }; +}; + +enum { + OD_NORMAL_SAMPLE = 0, + OD_SUB_SAMPLE = 1, +}; + +struct od_ops { + unsigned int (*powersave_bias_target)(struct cpufreq_policy *, unsigned int, unsigned int); +}; + +struct od_policy_dbs_info { + struct policy_dbs_info policy_dbs; + unsigned int freq_lo; + unsigned int freq_lo_delay_us; + unsigned int freq_hi_delay_us; + unsigned int sample_type: 1; +}; + +struct od_dbs_tuners { + unsigned int powersave_bias; +}; + +typedef u16 ucs2_char_t; + +struct of_bus___2 { + void (*count_cells)(const void *, int, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int); + int (*translate)(__be32 *, u64, int); +}; + +struct mbox_client { + struct device *dev; + bool tx_block; + long unsigned int tx_tout; + bool knows_txdone; + void (*rx_callback)(struct mbox_client *, void *); + void (*tx_prepare)(struct mbox_client *, void *); + void (*tx_done)(struct mbox_client *, void *, int); +}; + +struct mbox_chan; + +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *, void *); + int (*flush)(struct mbox_chan *, long unsigned int); + int (*startup)(struct mbox_chan *); + void (*shutdown)(struct mbox_chan *); + bool (*last_tx_done)(struct mbox_chan *); + bool (*peek_data)(struct mbox_chan *); +}; + +struct mbox_controller; + +struct mbox_chan { + struct mbox_controller *mbox; + unsigned int txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned int msg_count; + unsigned int msg_free; + void *msg_data[20]; + spinlock_t lock; + void *con_priv; +}; + +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned int txpoll_period; + struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); + long: 32; + struct hrtimer poll_hrt; + spinlock_t poll_hrt_lock; + struct list_head node; + long: 32; +}; + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +struct percpu_free_defer { + struct callback_head rcu; + void *ptr; +}; + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE = 1, + __ETHTOOL_A_TUNNEL_UDP_CNT = 2, + ETHTOOL_A_TUNNEL_UDP_MAX = 1, +}; + +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN = 1, + UDP_TUNNEL_TYPE_GENEVE = 2, + UDP_TUNNEL_TYPE_VXLAN_GPE = 4, +}; + +enum udp_tunnel_nic_info_flags { + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, +}; + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + long unsigned int ifindex; +}; + +enum { + BPF_F_CURRENT_NETNS = -1, +}; + +struct nf_conn___init { + struct nf_conn ct; +}; + +struct bpf_ct_opts { + s32 netns_id; + s32 error; + u8 l4proto; + u8 dir; + u16 ct_zone_id; + u8 ct_zone_dir; + u8 reserved[3]; +}; + +enum { + NF_BPF_CT_OPTS_SZ = 16, +}; + +enum nft_ct_keys { + NFT_CT_STATE = 0, + NFT_CT_DIRECTION = 1, + NFT_CT_STATUS = 2, + NFT_CT_MARK = 3, + NFT_CT_SECMARK = 4, + NFT_CT_EXPIRATION = 5, + NFT_CT_HELPER = 6, + NFT_CT_L3PROTOCOL = 7, + NFT_CT_SRC = 8, + NFT_CT_DST = 9, + NFT_CT_PROTOCOL = 10, + NFT_CT_PROTO_SRC = 11, + NFT_CT_PROTO_DST = 12, + NFT_CT_LABELS = 13, + NFT_CT_PKTS = 14, + NFT_CT_BYTES = 15, + NFT_CT_AVGPKT = 16, + NFT_CT_ZONE = 17, + NFT_CT_EVENTMASK = 18, + NFT_CT_SRC_IP = 19, + NFT_CT_DST_IP = 20, + NFT_CT_SRC_IP6 = 21, + NFT_CT_DST_IP6 = 22, + NFT_CT_ID = 23, + __NFT_CT_MAX = 24, +}; + +enum nft_ct_attributes { + NFTA_CT_UNSPEC = 0, + NFTA_CT_DREG = 1, + NFTA_CT_KEY = 2, + NFTA_CT_DIRECTION = 3, + NFTA_CT_SREG = 4, + __NFTA_CT_MAX = 5, +}; + +enum nft_ct_helper_attributes { + NFTA_CT_HELPER_UNSPEC = 0, + NFTA_CT_HELPER_NAME = 1, + NFTA_CT_HELPER_L3PROTO = 2, + NFTA_CT_HELPER_L4PROTO = 3, + __NFTA_CT_HELPER_MAX = 4, +}; + +enum nft_ct_expectation_attributes { + NFTA_CT_EXPECT_UNSPEC = 0, + NFTA_CT_EXPECT_L3PROTO = 1, + NFTA_CT_EXPECT_L4PROTO = 2, + NFTA_CT_EXPECT_DPORT = 3, + NFTA_CT_EXPECT_TIMEOUT = 4, + NFTA_CT_EXPECT_SIZE = 5, + __NFTA_CT_EXPECT_MAX = 6, +}; + +struct nft_ct { + enum nft_ct_keys key: 8; + enum ip_conntrack_dir dir: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +struct nft_ct_helper_obj { + struct nf_conntrack_helper *helper4; + struct nf_conntrack_helper *helper6; + u8 l4proto; +}; + +struct nft_ct_expect_obj { + u16 l3num; + __be16 dport; + u8 l4proto; + u8 size; + u32 timeout; +}; + +struct sock_bh_locked { + struct sock *sock; + local_lock_t bh_lock; +}; + +struct ip_reply_arg { + struct kvec iov[1]; + int flags; + __wsum csum; + int csumoffset; + int bound_dev_if; + u8 tos; + kuid_t uid; +}; + +struct icmp_err { + int errno; + unsigned int fatal: 1; +}; + +enum tcp_seq_states { + TCP_SEQ_STATE_LISTENING = 0, + TCP_SEQ_STATE_ESTABLISHED = 1, +}; + +struct tcp_seq_afinfo { + sa_family_t family; +}; + +struct tcp_iter_state { + struct seq_net_private p; + enum tcp_seq_states state; + struct sock *syn_wait_sk; + int bucket; + int offset; + int sbucket; + int num; + long: 32; + loff_t last_pos; +}; + +struct bpf_tcp_iter_state { + struct tcp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; + long: 32; +}; + +struct bpf_iter__tcp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct sock_common *sk_common; + }; + uid_t uid; + long: 32; +}; + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +struct bpf_unix_iter_state { + struct seq_net_private p; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct bpf_iter__unix { + union { + struct bpf_iter_meta *meta; + }; + union { + struct unix_sock *unix_sk; + }; + uid_t uid; + long: 32; +}; + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER = 0, + IP6_DEFRAG_CONNTRACK_IN = 1, + __IP6_DEFRAG_CONNTRACK_IN = 65536, + IP6_DEFRAG_CONNTRACK_OUT = 65537, + __IP6_DEFRAG_CONNTRACK_OUT = 131072, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, +}; + +enum { + IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC = 0, + IFLA_BRIDGE_VLAN_TUNNEL_ID = 1, + IFLA_BRIDGE_VLAN_TUNNEL_VID = 2, + IFLA_BRIDGE_VLAN_TUNNEL_FLAGS = 3, + __IFLA_BRIDGE_VLAN_TUNNEL_MAX = 4, +}; + +struct vtunnel_info { + u32 tunid; + u16 vid; + u16 flags; +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED = 0, + __QDISC_STATE_DEACTIVATED = 1, + __QDISC_STATE_MISSED = 2, + __QDISC_STATE_DRAINING = 3, +}; + +struct ieee80211_channel_sw_ie { + u8 mode; + u8 new_ch_num; + u8 count; +}; + +struct ieee80211_sec_chan_offs_ie { + u8 sec_chan_offs; +}; + +struct ieee80211_mesh_chansw_params_ie { + u8 mesh_ttl; + u8 mesh_flags; + __le16 mesh_reason; + __le16 mesh_pre_value; +}; + +struct ieee80211_wide_bw_chansw_ie { + u8 new_channel_width; + u8 new_center_freq_seg0; + u8 new_center_freq_seg1; +}; + +struct ieee80211_rann_ie { + u8 rann_flags; + u8 rann_hopcount; + u8 rann_ttl; + u8 rann_addr[6]; + __le32 rann_seq; + __le32 rann_interval; + __le32 rann_metric; +} __attribute__((packed)); + +struct ieee80211_addba_ext_ie { + u8 data; +}; + +struct ieee80211_s1g_bcn_compat_ie { + __le16 compat_info; + __le16 beacon_int; + __le32 tsf_completion; +}; + +struct ieee80211_s1g_oper_ie { + u8 ch_width; + u8 oper_class; + u8 primary_ch; + u8 oper_ch; + __le16 basic_mcs_nss; +}; + +struct ieee80211_aid_response_ie { + __le16 aid; + u8 switch_count; + __le16 response_int; +} __attribute__((packed)); + +struct ieee80211_ttlm_elem { + u8 control; + u8 optional[0]; +}; + +struct ieee80211_ch_switch_timing { + __le16 switch_time; + __le16 switch_timeout; +}; + +struct ieee80211_tdls_lnkie { + u8 ie_type; + u8 ie_len; + u8 bssid[6]; + u8 init_sta[6]; + u8 resp_sta[6]; +}; + +struct ieee80211_ht_operation { + u8 primary_chan; + u8 ht_param; + __le16 operation_mode; + __le16 stbc_param; + u8 basic_set[16]; +}; + +struct ieee80211_vht_operation { + u8 chan_width; + u8 center_freq_seg0_idx; + u8 center_freq_seg1_idx; + __le16 basic_mcs_set; +} __attribute__((packed)); + +struct ieee80211_he_spr { + u8 he_sr_control; + u8 optional[0]; +}; + +struct ieee80211_mu_edca_param_set { + u8 mu_qos_info; + struct ieee80211_he_mu_edca_param_ac_rec ac_be; + struct ieee80211_he_mu_edca_param_ac_rec ac_bk; + struct ieee80211_he_mu_edca_param_ac_rec ac_vi; + struct ieee80211_he_mu_edca_param_ac_rec ac_vo; +}; + +struct ieee80211_eht_operation_info { + u8 control; + u8 ccfs0; + u8 ccfs1; + u8 optional[0]; +}; + +struct ieee80211_bandwidth_indication { + u8 params; + struct ieee80211_eht_operation_info info; +}; + +struct ieee80211_timeout_interval_ie { + u8 type; + __le32 value; +} __attribute__((packed)); + +struct ieee80211_bss_max_idle_period_ie { + __le16 max_idle_period; + u8 idle_options; +} __attribute__((packed)); + +struct ieee80211_bssid_index { + u8 bssid_index; + u8 dtim_period; + u8 dtim_count; +}; + +struct ieee80211_multiple_bssid_configuration { + u8 bssid_count; + u8 profile_periodicity; +}; + +struct ieee80211_mle_per_sta_profile { + __le16 control; + u8 sta_info_len; + u8 variable[0]; +} __attribute__((packed)); + +struct cfg80211_inform_bss { + struct ieee80211_channel *chan; + s32 signal; + u64 boottime_ns; + u64 parent_tsf; + u8 parent_bssid[6]; + u8 chains; + s8 chain_signal[4]; + u8 restrict_use: 1; + u8 use_for: 7; + u8 cannot_use_reasons; + void *drv_data; + long: 32; +}; + +struct ieee80211_bss { + u32 device_ts_beacon; + u32 device_ts_presp; + bool wmm_used; + bool uapsd_supported; + u8 supp_rates[32]; + size_t supp_rates_len; + struct ieee80211_rate *beacon_rate; + u32 vht_cap_info; + bool has_erp_value; + u8 erp_value; + u8 corrupt_data; + u8 valid_data; +}; + +enum ieee80211_bss_corrupt_data_flags { + IEEE80211_BSS_CORRUPT_BEACON = 1, + IEEE80211_BSS_CORRUPT_PROBE_RESP = 2, +}; + +enum ieee80211_bss_valid_data_flags { + IEEE80211_BSS_VALID_WMM = 2, + IEEE80211_BSS_VALID_RATES = 4, + IEEE80211_BSS_VALID_ERP = 8, +}; + +enum ieee80211_sta_flags { + IEEE80211_STA_CONNECTION_POLL = 2, + IEEE80211_STA_CONTROL_PORT = 4, + IEEE80211_STA_MFP_ENABLED = 64, + IEEE80211_STA_UAPSD_ENABLED = 128, + IEEE80211_STA_NULLFUNC_ACKED = 256, + IEEE80211_STA_ENABLE_RRM = 32768, +}; + +struct ieee802_11_elems { + const u8 *ie_start; + size_t total_len; + u32 crc; + const struct ieee80211_tdls_lnkie *lnk_id; + const struct ieee80211_ch_switch_timing *ch_sw_timing; + const u8 *ext_capab; + const u8 *ssid; + const u8 *supp_rates; + const u8 *ds_params; + const struct ieee80211_tim_ie *tim; + const u8 *rsn; + const u8 *rsnx; + const u8 *erp_info; + const u8 *ext_supp_rates; + const u8 *wmm_info; + const u8 *wmm_param; + const struct ieee80211_ht_cap *ht_cap_elem; + const struct ieee80211_ht_operation *ht_operation; + const struct ieee80211_vht_cap *vht_cap_elem; + const struct ieee80211_vht_operation *vht_operation; + const struct ieee80211_meshconf_ie *mesh_config; + const u8 *he_cap; + const struct ieee80211_he_operation *he_operation; + const struct ieee80211_he_spr *he_spr; + const struct ieee80211_mu_edca_param_set *mu_edca_param_set; + const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const u8 *uora_element; + const u8 *mesh_id; + const u8 *peering; + const __le16 *awake_window; + const u8 *preq; + const u8 *prep; + const u8 *perr; + const struct ieee80211_rann_ie *rann; + const struct ieee80211_channel_sw_ie *ch_switch_ie; + const struct ieee80211_ext_chansw_ie *ext_chansw_ie; + const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; + const u8 *max_channel_switch_time; + const u8 *country_elem; + const u8 *pwr_constr_elem; + const u8 *cisco_dtpc_elem; + const struct ieee80211_timeout_interval_ie *timeout_int; + const u8 *opmode_notif; + const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; + struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; + const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; + const struct ieee80211_multiple_bssid_configuration *mbssid_config_ie; + const struct ieee80211_bssid_index *bssid_index; + u8 max_bssid_indicator; + u8 dtim_count; + u8 dtim_period; + const struct ieee80211_addba_ext_ie *addba_ext_ie; + const struct ieee80211_s1g_cap *s1g_capab; + const struct ieee80211_s1g_oper_ie *s1g_oper; + const struct ieee80211_s1g_bcn_compat_ie *s1g_bcn_compat; + const struct ieee80211_aid_response_ie *aid_resp; + const struct ieee80211_eht_cap_elem *eht_cap; + const struct ieee80211_eht_operation *eht_operation; + const struct ieee80211_multi_link_elem *ml_basic; + const struct ieee80211_multi_link_elem *ml_reconf; + const struct ieee80211_bandwidth_indication *bandwidth_indication; + const struct ieee80211_ttlm_elem *ttlm[2]; + struct ieee80211_parsed_tpe tpe; + struct ieee80211_parsed_tpe csa_tpe; + u8 ext_capab_len; + u8 ssid_len; + u8 supp_rates_len; + u8 tim_len; + u8 rsn_len; + u8 rsnx_len; + u8 ext_supp_rates_len; + u8 wmm_info_len; + u8 wmm_param_len; + u8 he_cap_len; + u8 mesh_id_len; + u8 peering_len; + u8 preq_len; + u8 prep_len; + u8 perr_len; + u8 country_elem_len; + u8 bssid_index_len; + u8 eht_cap_len; + size_t ml_basic_len; + size_t ml_reconf_len; + u8 ttlm_num; + struct ieee80211_mle_per_sta_profile *prof; + size_t sta_prof_len; + u8 parse_error; +}; + +struct ieee80211_elems_parse_params { + enum ieee80211_conn_mode mode; + const u8 *start; + size_t len; + bool action; + u64 filter; + u32 crc; + struct cfg80211_bss *bss; + int link_id; + bool from_ap; +}; + +enum { + IEEE80211_PROBE_FLAG_DIRECTED = 1, + IEEE80211_PROBE_FLAG_MIN_CONTENT = 2, + IEEE80211_PROBE_FLAG_RANDOM_SN = 4, +}; + +struct inform_bss_update_data { + struct ieee80211_rx_status *rx_status; + bool beacon; +}; + +struct ieee80211_wmm_ac_param { + u8 aci_aifsn; + u8 cw; + __le16 txop_limit; +}; + +struct ieee80211_wmm_param_ie { + u8 element_id; + u8 len; + u8 oui[3]; + u8 oui_type; + u8 oui_subtype; + u8 version; + u8 qos_info; + u8 reserved; + struct ieee80211_wmm_ac_param ac[4]; +}; + +struct sigaltstack { + void *ss_sp; + int ss_flags; + __kernel_size_t ss_size; +}; + +typedef struct sigaltstack stack_t; + +struct sigcontext { + long unsigned int trap_no; + long unsigned int error_code; + long unsigned int oldmask; + long unsigned int arm_r0; + long unsigned int arm_r1; + long unsigned int arm_r2; + long unsigned int arm_r3; + long unsigned int arm_r4; + long unsigned int arm_r5; + long unsigned int arm_r6; + long unsigned int arm_r7; + long unsigned int arm_r8; + long unsigned int arm_r9; + long unsigned int arm_r10; + long unsigned int arm_fp; + long unsigned int arm_ip; + long unsigned int arm_sp; + long unsigned int arm_lr; + long unsigned int arm_pc; + long unsigned int arm_cpsr; + long unsigned int fault_address; +}; + +struct ucontext { + long unsigned int uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; + int __unused[30]; + long unsigned int uc_regspace[128]; +}; + +struct aux_sigframe { + long unsigned int end_magic; + long: 32; +}; + +struct sigframe { + struct ucontext uc; + long unsigned int retcode[4]; +}; + +struct rt_sigframe { + struct siginfo info; + struct sigframe sig; +}; + +struct irqchip_fwid { + struct fwnode_handle fwnode; + unsigned int type; + char *name; + phys_addr_t *pa; +}; + +struct trace_event_raw_csd_queue_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_raw_csd_function { + struct trace_entry ent; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_data_offsets_csd_queue_cpu {}; + +struct trace_event_data_offsets_csd_function {}; + +typedef void (*btf_trace_csd_queue_cpu)(void *, const unsigned int, long unsigned int, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_entry)(void *, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_exit)(void *, smp_call_func_t, call_single_data_t *); + +struct call_function_data { + call_single_data_t *csd; + cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; +}; + +struct smp_call_on_cpu_struct { + struct work_struct work; + struct completion done; + int (*func)(void *); + void *data; + int ret; + int cpu; +}; + +struct cpu_stop_done { + atomic_t nr_todo; + int ret; + struct completion completion; +}; + +struct cpu_stopper { + struct task_struct *thread; + raw_spinlock_t lock; + bool enabled; + struct list_head works; + struct cpu_stop_work stop_work; + long unsigned int caller; + cpu_stop_fn_t fn; +}; + +enum multi_stop_state { + MULTI_STOP_NONE = 0, + MULTI_STOP_PREPARE = 1, + MULTI_STOP_DISABLE_IRQ = 2, + MULTI_STOP_RUN = 3, + MULTI_STOP_EXIT = 4, +}; + +struct multi_stop_data { + cpu_stop_fn_t fn; + void *data; + unsigned int num_threads; + const struct cpumask *active_cpus; + enum multi_stop_state state; + atomic_t thread_ack; +}; + +enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, + BPF_TOKEN_CREATE = 36, + __MAX_BPF_CMD = 37, +}; + +struct btf_enum { + __u32 name_off; + __s32 val; +}; + +struct bpf_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; + long: 32; + u64 delegate_cmds; + u64 delegate_maps; + u64 delegate_progs; + u64 delegate_attachs; +}; + +struct bpf_preload_info { + char link_name[16]; + struct bpf_link *link; +}; + +struct bpf_preload_ops { + int (*preload)(struct bpf_preload_info *); + struct module *owner; +}; + +enum bpf_type { + BPF_TYPE_UNSPEC = 0, + BPF_TYPE_PROG = 1, + BPF_TYPE_MAP = 2, + BPF_TYPE_LINK = 3, +}; + +struct map_iter { + void *key; + bool done; +}; + +struct bpffs_btf_enums { + const struct btf *btf; + const struct btf_type *cmd_t; + const struct btf_type *map_t; + const struct btf_type *prog_t; + const struct btf_type *attach_t; +}; + +enum { + OPT_UID = 0, + OPT_GID = 1, + OPT_MODE = 2, + OPT_DELEGATE_CMDS = 3, + OPT_DELEGATE_MAPS = 4, + OPT_DELEGATE_PROGS = 5, + OPT_DELEGATE_ATTACHS = 6, +}; + +struct bpf_cpumap_val { + __u32 qsize; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct xdp_cpumap_stats { + unsigned int redirect; + unsigned int pass; + unsigned int drop; +}; + +struct bpf_cpu_map_entry; + +struct xdp_bulk_queue { + void *q[8]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; + unsigned int count; +}; + +struct bpf_cpu_map_entry { + u32 cpu; + int map_id; + struct xdp_bulk_queue *bulkq; + struct ptr_ring *queue; + struct task_struct *kthread; + struct bpf_cpumap_val value; + struct bpf_prog *prog; + struct completion kthread_running; + struct rcu_work free_work; +}; + +struct bpf_cpu_map { + struct bpf_map map; + struct bpf_cpu_map_entry **cpu_map; + long: 32; +}; + +typedef union { +} release_pages_arg; + +struct trace_event_raw_mm_lru_insertion { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + enum lru_list lru; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_mm_lru_activate { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_lru_insertion {}; + +struct trace_event_data_offsets_mm_lru_activate {}; + +typedef void (*btf_trace_mm_lru_insertion)(void *, struct folio *); + +typedef void (*btf_trace_mm_lru_activate)(void *, struct folio *); + +struct cpu_fbatches { + local_lock_t lock; + struct folio_batch lru_add; + struct folio_batch lru_deactivate_file; + struct folio_batch lru_deactivate; + struct folio_batch lru_lazyfree; + struct folio_batch lru_activate; + local_lock_t lock_irq; + struct folio_batch lru_move_tail; +}; + +typedef void (*move_fn_t)(struct lruvec *, struct folio *); + +struct dma_block { + struct dma_block *next_block; + dma_addr_t dma; +}; + +struct dma_pool { + struct list_head page_list; + spinlock_t lock; + struct dma_block *next_block; + size_t nr_blocks; + size_t nr_active; + size_t nr_pages; + struct device *dev; + unsigned int size; + unsigned int allocation; + unsigned int boundary; + char name[32]; + struct list_head pools; +}; + +struct dma_page { + struct list_head page_list; + void *vaddr; + dma_addr_t dma; +}; + +struct saved { + struct path link; + struct delayed_call done; + const char *name; + unsigned int seq; +}; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; + unsigned int flags; + unsigned int state; + unsigned int seq; + unsigned int next_seq; + unsigned int m_seq; + unsigned int r_seq; + int last_type; + unsigned int depth; + int total_link_count; + struct saved *stack; + struct saved internal[2]; + struct filename *name; + const char *pathname; + struct nameidata *saved; + unsigned int root_seq; + int dfd; + vfsuid_t dir_vfsuid; + umode_t dir_mode; +}; + +struct renamedata { + struct mnt_idmap *old_mnt_idmap; + struct inode *old_dir; + struct dentry *old_dentry; + struct mnt_idmap *new_mnt_idmap; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +}; + +enum { + LAST_NORM = 0, + LAST_ROOT = 1, + LAST_DOT = 2, + LAST_DOTDOT = 3, +}; + +enum { + WALK_TRAILING = 1, + WALK_MORE = 2, + WALK_NOFOLLOW = 4, +}; + +typedef __kernel_mode_t mode_t; + +struct orlov_stats { + __u64 free_clusters; + __u32 free_inodes; + __u32 used_dirs; +}; + +struct isofs_fid { + u32 block; + u16 offset; + u16 parent_offset; + u32 generation; + u32 parent_block; + u32 parent_generation; +}; + +struct debugfs_reg32 { + char *name; + long unsigned int offset; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void *base; + struct device *dev; +}; + +struct debugfs_devm_entry { + int (*read)(struct seq_file *, void *); + struct device *dev; +}; + +struct btrfs_ioctl_qgroup_limit_args { + __u64 qgroupid; + struct btrfs_qgroup_limit lim; +}; + +struct btrfs_ioctl_vol_args_v2 { + __s64 fd; + __u64 transid; + __u64 flags; + union { + struct { + __u64 size; + struct btrfs_qgroup_inherit *qgroup_inherit; + long: 32; + }; + __u64 unused[4]; + }; + union { + char name[4040]; + __u64 devid; + __u64 subvolid; + }; +}; + +struct btrfs_ioctl_scrub_args { + __u64 devid; + __u64 start; + __u64 end; + __u64 flags; + struct btrfs_scrub_progress progress; + __u64 unused[109]; +}; + +struct btrfs_ioctl_dev_replace_start_params { + __u64 srcdevid; + __u64 cont_reading_from_srcdev_mode; + __u8 srcdev_name[1025]; + __u8 tgtdev_name[1025]; + long: 32; +}; + +struct btrfs_ioctl_dev_replace_status_params { + __u64 replace_state; + __u64 progress_1000; + __u64 time_started; + __u64 time_stopped; + __u64 num_write_errors; + __u64 num_uncorrectable_read_errors; +}; + +struct btrfs_ioctl_dev_replace_args { + __u64 cmd; + __u64 result; + union { + struct btrfs_ioctl_dev_replace_start_params start; + struct btrfs_ioctl_dev_replace_status_params status; + }; + __u64 spare[64]; +}; + +struct btrfs_ioctl_dev_info_args { + __u64 devid; + __u8 uuid[16]; + __u64 bytes_used; + __u64 total_bytes; + __u8 fsid[16]; + __u64 unused[377]; + __u8 path[1024]; +}; + +struct btrfs_ioctl_fs_info_args { + __u64 max_id; + __u64 num_devices; + __u8 fsid[16]; + __u32 nodesize; + __u32 sectorsize; + __u32 clone_alignment; + __u16 csum_type; + __u16 csum_size; + __u64 flags; + __u64 generation; + __u8 metadata_uuid[16]; + __u8 reserved[944]; +}; + +struct btrfs_ioctl_feature_flags { + __u64 compat_flags; + __u64 compat_ro_flags; + __u64 incompat_flags; +}; + +struct btrfs_ioctl_balance_args { + __u64 flags; + __u64 state; + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + struct btrfs_balance_progress stat; + __u64 unused[72]; +}; + +struct btrfs_ioctl_ino_lookup_args { + __u64 treeid; + __u64 objectid; + char name[4080]; +}; + +struct btrfs_ioctl_ino_lookup_user_args { + __u64 dirid; + __u64 treeid; + char name[256]; + char path[3824]; +}; + +struct btrfs_ioctl_search_key { + __u64 tree_id; + __u64 min_objectid; + __u64 max_objectid; + __u64 min_offset; + __u64 max_offset; + __u64 min_transid; + __u64 max_transid; + __u32 min_type; + __u32 max_type; + __u32 nr_items; + __u32 unused; + __u64 unused1; + __u64 unused2; + __u64 unused3; + __u64 unused4; +}; + +struct btrfs_ioctl_search_header { + __u64 transid; + __u64 objectid; + __u64 offset; + __u32 type; + __u32 len; +}; + +struct btrfs_ioctl_search_args { + struct btrfs_ioctl_search_key key; + char buf[3992]; +}; + +struct btrfs_ioctl_search_args_v2 { + struct btrfs_ioctl_search_key key; + __u64 buf_size; + __u64 buf[0]; +}; + +struct btrfs_ioctl_space_info { + __u64 flags; + __u64 total_bytes; + __u64 used_bytes; +}; + +struct btrfs_ioctl_space_args { + __u64 space_slots; + __u64 total_spaces; + struct btrfs_ioctl_space_info spaces[0]; +}; + +struct btrfs_ioctl_ino_path_args { + __u64 inum; + __u64 size; + __u64 reserved[4]; + __u64 fspath; +}; + +struct btrfs_ioctl_logical_ino_args { + __u64 logical; + __u64 size; + __u64 reserved[3]; + __u64 flags; + __u64 inodes; +}; + +struct btrfs_ioctl_get_dev_stats { + __u64 devid; + __u64 nr_items; + __u64 flags; + __u64 values[5]; + __u64 unused[121]; +}; + +struct btrfs_ioctl_quota_rescan_args { + __u64 flags; + __u64 progress; + __u64 reserved[6]; +}; + +struct btrfs_ioctl_qgroup_assign_args { + __u64 assign; + __u64 src; + __u64 dst; +}; + +struct btrfs_ioctl_qgroup_create_args { + __u64 create; + __u64 qgroupid; +}; + +struct btrfs_ioctl_timespec { + __u64 sec; + __u32 nsec; + long: 32; +}; + +struct btrfs_ioctl_received_subvol_args { + char uuid[16]; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 flags; + __u64 reserved[16]; +}; + +struct btrfs_ioctl_send_args { + __s64 send_fd; + __u64 clone_sources_count; + __u64 *clone_sources; + long: 32; + __u64 parent_root; + __u64 flags; + __u32 version; + __u8 reserved[28]; +}; + +struct btrfs_ioctl_get_subvol_info_args { + __u64 treeid; + char name[256]; + __u64 parent_id; + __u64 dirid; + __u64 generation; + __u64 flags; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __u64 ctransid; + __u64 otransid; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec ctime; + struct btrfs_ioctl_timespec otime; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 reserved[8]; +}; + +struct btrfs_ioctl_get_subvol_rootref_args { + __u64 min_treeid; + struct { + __u64 treeid; + __u64 dirid; + } rootref[255]; + __u8 num_items; + __u8 align[7]; +}; + +struct btrfs_ioctl_encoded_io_args { + const struct iovec *iov; + long unsigned int iovcnt; + __s64 offset; + __u64 flags; + __u64 len; + __u64 unencoded_len; + __u64 unencoded_offset; + __u32 compression; + __u32 encryption; + __u8 reserved[64]; +}; + +struct btrfs_ioctl_subvol_wait { + __u64 subvolid; + __u32 mode; + __u32 count; +}; + +enum btrfs_err_code { + BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, + BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET = 2, + BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET = 3, + BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET = 4, + BTRFS_ERROR_DEV_TGT_REPLACE = 5, + BTRFS_ERROR_DEV_MISSING_NOT_FOUND = 6, + BTRFS_ERROR_DEV_ONLY_WRITABLE = 7, + BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS = 8, + BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET = 9, + BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET = 10, +}; + +struct btrfs_new_inode_args { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool orphan; + bool subvol; + struct posix_acl *default_acl; + struct posix_acl *acl; + struct fscrypt_name fname; +}; + +struct btrfs_dev_lookup_args { + u64 devid; + u8 *uuid; + u8 *fsid; + bool missing; + long: 32; +}; + +struct btrfs_uring_priv { + struct io_uring_cmd *cmd; + struct page **pages; + long unsigned int nr_pages; + long: 32; + struct kiocb iocb; + struct iovec *iov; + long: 32; + struct iov_iter iter; + struct extent_state *cached_state; + long: 32; + u64 count; + u64 start; + u64 lockend; + int err; + bool compressed; +}; + +struct io_btrfs_cmd { + struct btrfs_uring_priv *priv; +}; + +struct keyctl_dh_params { + union { + __s32 private; + __s32 priv; + }; + __s32 prime; + __s32 base; +}; + +struct keyctl_kdf_params { + char *hashname; + char *otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct crypto_attr_alg { + char name[128]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +enum { + CRYPTO_MSG_ALG_REQUEST = 0, + CRYPTO_MSG_ALG_REGISTER = 1, + CRYPTO_MSG_ALG_LOADED = 2, +}; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; + bool test_started; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + CRYPTOA_UNSPEC = 0, + CRYPTOA_ALG = 1, + CRYPTOA_TYPE = 2, + __CRYPTOA_MAX = 3, +}; + +struct cryptomgr_param { + struct rtattr *tb[34]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } attrs[32]; + char template[128]; + struct crypto_larval *larval; + u32 otype; + u32 omask; +}; + +struct crypto_test_param { + char driver[128]; + char alg[128]; + u32 type; +}; + +struct io_fadvise { + struct file *file; + long: 32; + u64 offset; + u64 len; + u32 advice; + long: 32; +}; + +struct io_madvise { + struct file *file; + long: 32; + u64 addr; + u64 len; + u32 advice; + long: 32; +}; + +typedef unsigned int uInt; + +typedef unsigned char uch; + +typedef short unsigned int ush; + +typedef long unsigned int ulg; + +struct ct_data_s { + union { + ush freq; + ush code; + } fc; + union { + ush dad; + ush len; + } dl; +}; + +typedef struct ct_data_s ct_data; + +struct static_tree_desc_s { + const ct_data *static_tree; + const int *extra_bits; + int extra_base; + int elems; + int max_length; +}; + +typedef struct static_tree_desc_s static_tree_desc; + +struct tree_desc_s { + ct_data *dyn_tree; + int max_code; + static_tree_desc *stat_desc; +}; + +typedef ush Pos; + +typedef unsigned int IPos; + +struct deflate_state { + z_streamp strm; + int status; + Byte *pending_buf; + ulg pending_buf_size; + Byte *pending_out; + int pending; + int noheader; + Byte data_type; + Byte method; + int last_flush; + uInt w_size; + uInt w_bits; + uInt w_mask; + Byte *window; + ulg window_size; + Pos *prev; + Pos *head; + uInt ins_h; + uInt hash_size; + uInt hash_bits; + uInt hash_mask; + uInt hash_shift; + long int block_start; + uInt match_length; + IPos prev_match; + int match_available; + uInt strstart; + uInt match_start; + uInt lookahead; + uInt prev_length; + uInt max_chain_length; + uInt max_lazy_match; + int level; + int strategy; + uInt good_match; + int nice_match; + struct ct_data_s dyn_ltree[573]; + struct ct_data_s dyn_dtree[61]; + struct ct_data_s bl_tree[39]; + struct tree_desc_s l_desc; + struct tree_desc_s d_desc; + struct tree_desc_s bl_desc; + ush bl_count[16]; + int heap[573]; + int heap_len; + int heap_max; + uch depth[573]; + uch *l_buf; + uInt lit_bufsize; + uInt last_lit; + ush *d_buf; + ulg opt_len; + ulg static_len; + ulg compressed_len; + uInt matches; + int last_eob_len; + ush bi_buf; + int bi_valid; +}; + +typedef struct deflate_state deflate_state; + +typedef enum { + need_more = 0, + block_done = 1, + finish_started = 2, + finish_done = 3, +} block_state; + +typedef block_state (*compress_func)(deflate_state *, int); + +struct deflate_workspace { + deflate_state deflate_memory; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; +}; + +typedef struct deflate_workspace deflate_workspace; + +struct config_s { + ush good_length; + ush max_lazy; + ush nice_length; + ush max_chain; + compress_func func; +}; + +typedef struct config_s config; + +typedef ZSTD_ErrorCode ERR_enum; + +struct pcie_link_state { + struct pci_dev *pdev; + struct pci_dev *downstream; + struct pcie_link_state *root; + struct pcie_link_state *parent; + struct list_head sibling; + u32 aspm_support: 7; + u32 aspm_enabled: 7; + u32 aspm_capable: 7; + u32 aspm_default: 7; + long: 4; + u32 aspm_disable: 7; + u32 clkpm_capable: 1; + u32 clkpm_enabled: 1; + u32 clkpm_default: 1; + u32 clkpm_disable: 1; +}; + +struct vc_selection { + struct mutex lock; + struct vc_data *cons; + char *buffer; + unsigned int buf_len; + volatile int start; + int end; +}; + +struct device_attach_data { + struct device *dev; + bool check_async; + bool want_async; + bool have_async; +}; + +struct trace_event_raw_scsi_dispatch_cmd_start { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_dispatch_cmd_error { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int rtn; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_cmd_done_timeout_template { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int result; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + u8 sense_key; + u8 asc; + u8 ascq; + char __data[0]; +}; + +struct trace_event_raw_scsi_eh_wakeup { + struct trace_entry ent; + unsigned int host_no; + char __data[0]; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_start { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_error { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_cmd_done_timeout_template { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_eh_wakeup {}; + +typedef void (*btf_trace_scsi_dispatch_cmd_start)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_error)(void *, struct scsi_cmnd *, int); + +typedef void (*btf_trace_scsi_dispatch_cmd_done)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_timeout)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_eh_wakeup)(void *, struct Scsi_Host *); + +enum scsi_vpd_parameters { + SCSI_VPD_HEADER_SIZE = 4, + SCSI_VPD_LIST_SIZE = 36, +}; + +struct phy_device_node { + enum phy_upstream upstream_type; + union { + struct net_device *netdev; + struct phy_device *phydev; + } upstream; + struct sfp_bus *parent_sfp_bus; + struct phy_device *phy; +}; + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf = 1, + e1000_mng_mode_pt = 2, + e1000_mng_mode_ipmi = 3, + e1000_mng_mode_host_if_only = 4, +}; + +struct iwl_shared_mem_cfg_v2 { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 txfifo_addr; + __le32 txfifo_size[8]; + __le32 rxfifo_size[2]; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[6]; +}; + +struct iwl_shared_mem_lmac_cfg { + __le32 txfifo_addr; + __le32 txfifo_size[15]; + __le32 rxfifo1_addr; + __le32 rxfifo1_size; +}; + +struct iwl_shared_mem_cfg { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 rxfifo2_addr; + __le32 rxfifo2_size; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 lmac_num; + struct iwl_shared_mem_lmac_cfg lmac_smem[3]; + __le32 rxfifo2_control_addr; + __le32 rxfifo2_control_size; +}; + +enum nl80211_eht_ru_alloc { + NL80211_RATE_INFO_EHT_RU_ALLOC_26 = 0, + NL80211_RATE_INFO_EHT_RU_ALLOC_52 = 1, + NL80211_RATE_INFO_EHT_RU_ALLOC_52P26 = 2, + NL80211_RATE_INFO_EHT_RU_ALLOC_106 = 3, + NL80211_RATE_INFO_EHT_RU_ALLOC_106P26 = 4, + NL80211_RATE_INFO_EHT_RU_ALLOC_242 = 5, + NL80211_RATE_INFO_EHT_RU_ALLOC_484 = 6, + NL80211_RATE_INFO_EHT_RU_ALLOC_484P242 = 7, + NL80211_RATE_INFO_EHT_RU_ALLOC_996 = 8, + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484 = 9, + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242 = 10, + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 = 11, + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484 = 12, + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996 = 13, + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484 = 14, + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996 = 15, +}; + +struct iwl_fw_dbg_trigger_low_rssi { + __le32 rssi; +}; + +enum ieee80211_radiotap_he_mu_bits { + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 15, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 16, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 32, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 64, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 128, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 256, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 512, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 4096, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 8192, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 16384, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 32768, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 3, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 1, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 2, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 3, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 4, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 8, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 240, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 768, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN = 1024, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 2048, +}; + +enum ieee80211_radiotap_lsig_data1 { + IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN = 1, + IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN = 2, +}; + +enum ieee80211_radiotap_lsig_data2 { + IEEE80211_RADIOTAP_LSIG_DATA2_RATE = 15, + IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH = 65520, +}; + +enum ieee80211_radiotap_zero_len_psdu_type { + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0, + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED = 1, + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 255, +}; + +struct ieee80211_radiotap_tlv { + __le16 type; + __le16 len; + u8 data[0]; +}; + +struct ieee80211_radiotap_vendor_content { + u8 oui[3]; + u8 oui_subtype; + __le16 vendor_type; + __le16 reserved; + u8 data[0]; +}; + +struct ieee80211_radiotap_eht_usig { + __le32 common; + __le32 value; + __le32 mask; +}; + +struct ieee80211_radiotap_eht { + __le32 known; + __le32 data[9]; + __le32 user_info[0]; +}; + +enum ieee80211_radiotap_eht_known { + IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE = 2, + IEEE80211_RADIOTAP_EHT_KNOWN_GI = 4, + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF = 16, + IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM = 32, + IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM = 64, + IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM = 128, + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O = 256, + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S = 512, + IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 = 8192, + IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1 = 16384, + IEEE80211_RADIOTAP_EHT_KNOWN_CRC2_O = 32768, + IEEE80211_RADIOTAP_EHT_KNOWN_TAIL2_O = 65536, + IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S = 131072, + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S = 262144, + IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M = 524288, + IEEE80211_RADIOTAP_EHT_KNOWN_ENCODING_BLOCK_CRC_M = 1048576, + IEEE80211_RADIOTAP_EHT_KNOWN_ENCODING_BLOCK_TAIL_M = 2097152, + IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM = 4194304, + IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM = 8388608, + IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT = 16777216, + IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80 = 33554432, +}; + +enum ieee80211_radiotap_eht_data { + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE = 120, + IEEE80211_RADIOTAP_EHT_DATA0_GI = 384, + IEEE80211_RADIOTAP_EHT_DATA0_LTF = 1536, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF = 14336, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM = 16384, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM = 98304, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM = 131072, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S = 786432, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O = 3932160, + IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O = 62914560, + IEEE80211_RADIOTAP_EHT_DATA0_TAIL1_O = 4227858432, + IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE = 31, + IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX = 8160, + IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1 = 4186112, + IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN = 4194304, + IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80 = 3221225472, + IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_1 = 511, + IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_1_KNOWN = 512, + IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2 = 523264, + IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN = 524288, + IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_2 = 535822336, + IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_2_1_2_KNOWN = 536870912, + IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1 = 511, + IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN = 512, + IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_2_2_1 = 523264, + IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_2_2_1_KNOWN = 524288, + IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2 = 535822336, + IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN = 536870912, + IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_2 = 511, + IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_2_KNOWN = 512, + IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3 = 523264, + IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN = 524288, + IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_3 = 535822336, + IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_2_2_3_KNOWN = 536870912, + IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4 = 511, + IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN = 512, + IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_2_2_4 = 523264, + IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_2_2_4_KNOWN = 524288, + IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5 = 535822336, + IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN = 536870912, + IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_5 = 511, + IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_5_KNOWN = 512, + IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6 = 523264, + IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN = 524288, + IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_6 = 535822336, + IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_2_2_6_KNOWN = 536870912, + IEEE80211_RADIOTAP_EHT_DATA7_CRC2_O = 15, + IEEE80211_RADIOTAP_EHT_DATA7_TAIL_2_O = 1008, + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S = 61440, + IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S = 65536, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS = 917504, + IEEE80211_RADIOTAP_EHT_DATA7_USER_ENCODING_BLOCK_CRC = 15728640, + IEEE80211_RADIOTAP_EHT_DATA7_USER_ENCODING_BLOCK_TAIL = 1056964608, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160 = 1, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0 = 2, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1 = 508, +}; + +enum ieee80211_radiotap_eht_user_info { + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN = 1, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN = 2, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN = 4, + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O = 16, + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O = 32, + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M = 64, + IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER = 128, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID = 524032, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING = 524288, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS = 15728640, + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O = 251658240, + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O = 536870912, + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M = 1056964608, + IEEE80211_RADIOTAP_EHT_USER_INFO_RESEVED_c0000000 = 3221225472, +}; + +enum ieee80211_radiotap_eht_usig_common { + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN = 1, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN = 2, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN = 4, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN = 8, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN = 16, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC = 32, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED = 64, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK = 128, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER = 28672, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW = 229376, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ = 0, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ = 1, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ = 2, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ = 3, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1 = 4, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_2 = 5, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL = 262144, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR = 33030144, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP = 4261412864, +}; + +enum ieee80211_radiotap_eht_usig_mu { + IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD = 31, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE = 32, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE = 192, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE = 256, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO = 15872, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE = 16384, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS = 98304, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS = 4063232, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC = 62914560, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL = 4227858432, +}; + +enum ieee80211_radiotap_eht_usig_tb { + IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD = 31, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE = 192, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE = 256, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1 = 7680, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2 = 122880, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD = 4063232, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC = 62914560, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL = 4227858432, +}; + +enum iwl_mvm_rx_status { + RX_MPDU_RES_STATUS_CRC_OK = 1, + RX_MPDU_RES_STATUS_OVERRUN_OK = 2, + RX_MPDU_RES_STATUS_SRC_STA_FOUND = 4, + RX_MPDU_RES_STATUS_KEY_VALID = 8, + RX_MPDU_RES_STATUS_ICV_OK = 32, + RX_MPDU_RES_STATUS_MIC_OK = 64, + RX_MPDU_RES_STATUS_TTAK_OK = 128, + RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = 128, + RX_MPDU_RES_STATUS_SEC_NO_ENC = 0, + RX_MPDU_RES_STATUS_SEC_WEP_ENC = 256, + RX_MPDU_RES_STATUS_SEC_CCM_ENC = 512, + RX_MPDU_RES_STATUS_SEC_TKIP_ENC = 768, + RX_MPDU_RES_STATUS_SEC_EXT_ENC = 1024, + RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC = 1536, + RX_MPDU_RES_STATUS_SEC_ENC_ERR = 1792, + RX_MPDU_RES_STATUS_SEC_ENC_MSK = 1792, + RX_MPDU_RES_STATUS_DEC_DONE = 2048, + RX_MPDU_RES_STATUS_CSUM_DONE = 65536, + RX_MPDU_RES_STATUS_CSUM_OK = 131072, + RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, + RX_MPDU_RES_STATUS_STA_ID_MSK = 520093696, +}; + +enum iwl_rx_mpdu_mac_flags1 { + IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 3, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 240, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3, +}; + +enum iwl_rx_mpdu_mac_flags2 { + IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 31, + IWL_RX_MPDU_MFLG2_PAD = 32, + IWL_RX_MPDU_MFLG2_AMSDU = 64, +}; + +enum iwl_rx_mpdu_amsdu_info { + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 127, + IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 128, +}; + +enum iwl_rx_l3_proto_values { + IWL_RX_L3_TYPE_NONE = 0, + IWL_RX_L3_TYPE_IPV4 = 1, + IWL_RX_L3_TYPE_IPV4_FRAG = 2, + IWL_RX_L3_TYPE_IPV6_FRAG = 3, + IWL_RX_L3_TYPE_IPV6 = 4, + IWL_RX_L3_TYPE_IPV6_IN_IPV4 = 5, + IWL_RX_L3_TYPE_ARP = 6, + IWL_RX_L3_TYPE_EAPOL = 7, +}; + +enum iwl_rx_l3l4_flags { + IWL_RX_L3L4_IP_HDR_CSUM_OK = 1, + IWL_RX_L3L4_TCP_UDP_CSUM_OK = 2, + IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = 4, + IWL_RX_L3L4_TCP_ACK = 8, + IWL_RX_L3L4_L3_PROTO_MASK = 240, + IWL_RX_L3L4_L4_PROTO_MASK = 3840, + IWL_RX_L3L4_RSS_HASH_MASK = 61440, +}; + +enum iwl_rx_mpdu_status { + IWL_RX_MPDU_STATUS_CRC_OK = 1, + IWL_RX_MPDU_STATUS_OVERRUN_OK = 2, + IWL_RX_MPDU_STATUS_SRC_STA_FOUND = 4, + IWL_RX_MPDU_STATUS_KEY_VALID = 8, + IWL_RX_MPDU_STATUS_ICV_OK = 32, + IWL_RX_MPDU_STATUS_MIC_OK = 64, + IWL_RX_MPDU_RES_STATUS_TTAK_OK = 128, + IWL_RX_MPDU_STATUS_REPLAY_ERROR = 128, + IWL_RX_MPDU_STATUS_SEC_MASK = 1792, + IWL_RX_MPDU_STATUS_SEC_UNKNOWN = 1792, + IWL_RX_MPDU_STATUS_SEC_NONE = 0, + IWL_RX_MPDU_STATUS_SEC_WEP = 256, + IWL_RX_MPDU_STATUS_SEC_CCM = 512, + IWL_RX_MPDU_STATUS_SEC_TKIP = 768, + IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 1024, + IWL_RX_MPDU_STATUS_SEC_GCM = 1280, + IWL_RX_MPDU_STATUS_DECRYPTED = 2048, + IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = 32768, + IWL_RX_MPDU_STATUS_DUPLICATE = 4194304, + IWL_RX_MPDU_STATUS_STA_ID = 520093696, +}; + +enum iwl_rx_mpdu_reorder_data { + IWL_RX_MPDU_REORDER_NSSN_MASK = 4095, + IWL_RX_MPDU_REORDER_SN_MASK = 16773120, + IWL_RX_MPDU_REORDER_SN_SHIFT = 12, + IWL_RX_MPDU_REORDER_BAID_MASK = 2130706432, + IWL_RX_MPDU_REORDER_BAID_SHIFT = 24, + IWL_RX_MPDU_REORDER_BA_OLD_SN = 2147483648, +}; + +enum iwl_rx_mpdu_phy_info { + IWL_RX_MPDU_PHY_AMPDU = 32, + IWL_RX_MPDU_PHY_AMPDU_TOGGLE = 64, + IWL_RX_MPDU_PHY_SHORT_PREAMBLE = 128, + IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = 128, + IWL_RX_MPDU_PHY_TSF_OVERLOAD = 256, +}; + +enum iwl_rx_phy_he_data0 { + IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 1, + IWL_RX_PHY_DATA0_HE_UPLINK = 2, + IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 252, + IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 3840, + IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 1040384, + IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 1048576, + IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 6291456, + IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 8388608, + IWL_RX_PHY_DATA0_HE_DOPPLER = 16777216, + IWL_RX_PHY_DATA0_HE_DELIM_EOF = 2147483648, +}; + +enum iwl_rx_phy_eht_data0 { + IWL_RX_PHY_DATA0_EHT_VALIDATE = 1, + IWL_RX_PHY_DATA0_EHT_UPLINK = 2, + IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK = 252, + IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK = 3840, + IWL_RX_PHY_DATA0_EHT_PS160 = 4096, + IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK = 1040384, + IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM = 1048576, + IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK = 6291456, + IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG = 8388608, + IWL_RX_PHY_DATA0_EHT_BW320_SLOT = 16777216, + IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK = 33554432, + IWL_RX_PHY_DATA0_EHT_PHY_VER = 469762048, + IWL_RX_PHY_DATA0_EHT_DELIM_EOF = 2147483648, +}; + +enum iwl_rx_phy_info_type { + IWL_RX_PHY_INFO_TYPE_NONE = 0, + IWL_RX_PHY_INFO_TYPE_CCK = 1, + IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2, + IWL_RX_PHY_INFO_TYPE_HT = 3, + IWL_RX_PHY_INFO_TYPE_VHT_SU = 4, + IWL_RX_PHY_INFO_TYPE_VHT_MU = 5, + IWL_RX_PHY_INFO_TYPE_HE_SU = 6, + IWL_RX_PHY_INFO_TYPE_HE_MU = 7, + IWL_RX_PHY_INFO_TYPE_HE_TB = 8, + IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9, + IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10, + IWL_RX_PHY_INFO_TYPE_EHT_MU = 11, + IWL_RX_PHY_INFO_TYPE_EHT_TB = 12, + IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT = 13, + IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT = 14, +}; + +enum iwl_rx_phy_common_data1 { + IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 4026531840, + IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 268369920, +}; + +enum iwl_rx_phy_he_data1 { + IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 1, + IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 30, + IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 224, + IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 256, + IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 65024, + IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 1, + IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 14, +}; + +enum iwl_rx_phy_eht_data1 { + IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2 = 31, + IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE = 1, + IWL_RX_PHY_DATA1_EHT_TB_LOW_SS = 30, + IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM = 224, + IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0 = 256, + IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7 = 65024, +}; + +enum iwl_rx_phy_he_data2 { + IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 255, + IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 65280, + IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 16711680, + IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 4278190080, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 15, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 240, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 3840, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 61440, +}; + +enum iwl_rx_phy_he_data3 { + IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 255, + IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 65280, + IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 16711680, + IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 4278190080, +}; + +enum iwl_rx_phy_he_he_data4 { + IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 1, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 2, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 4, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 8, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 240, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 256, + IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 1536, +}; + +enum iwl_rx_phy_eht_data2 { + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 511, + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 261632, + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_B1 = 133955584, + IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 4294967295, +}; + +enum iwl_rx_phy_eht_data3 { + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C1 = 261632, + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C2 = 133955584, +}; + +enum iwl_rx_phy_eht_data4 { + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D1 = 511, + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D2 = 261632, + IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 786432, + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_B2 = 535822336, +}; + +enum iwl_rx_phy_data5 { + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP = 3, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1 = 60, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2 = 960, + IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE = 124, + IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR = 262016, + IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA = 1835008, + IWL_RX_PHY_DATA5_EHT_MU_SPATIAL_CONF_USR_FIELD = 266338304, +}; + +enum iwl_rx_usig_a1 { + IWL_RX_USIG_A1_ENHANCED_WIFI_VER_ID = 7, + IWL_RX_USIG_A1_BANDWIDTH = 56, + IWL_RX_USIG_A1_UL_FLAG = 64, + IWL_RX_USIG_A1_BSS_COLOR = 8064, + IWL_RX_USIG_A1_TXOP_DURATION = 1040384, + IWL_RX_USIG_A1_DISREGARD = 32505856, + IWL_RX_USIG_A1_VALIDATE = 33554432, + IWL_RX_USIG_A1_EHT_BW320_SLOT = 67108864, + IWL_RX_USIG_A1_EHT_TYPE = 402653184, + IWL_RX_USIG_A1_RDY = 2147483648, +}; + +enum iwl_rx_usig_a2_eht { + IWL_RX_USIG_A2_EHT_PPDU_TYPE = 3, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2 = 4, + IWL_RX_USIG_A2_EHT_PUNC_CHANNEL = 248, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8 = 256, + IWL_RX_USIG_A2_EHT_SIG_MCS = 1536, + IWL_RX_USIG_A2_EHT_SIG_SYM_NUM = 63488, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1 = 983040, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2 = 15728640, + IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD = 520093696, + IWL_RX_USIG_A2_EHT_CRC_OK = 1073741824, + IWL_RX_USIG_A2_EHT_RDY = 2147483648, +}; + +struct iwl_rx_no_data_ver_3 { + __le32 info; + __le32 rssi; + __le32 on_air_rise_time; + __le32 fr_time; + __le32 rate; + __le32 phy_info[2]; + __le32 rx_vec[4]; +}; + +struct iwl_frame_release { + u8 baid; + u8 reserved; + __le16 nssn; +}; + +enum iwl_bar_frame_release_sta_tid { + IWL_BAR_FRAME_RELEASE_TID_MASK = 15, + IWL_BAR_FRAME_RELEASE_STA_MASK = 496, +}; + +enum iwl_bar_frame_release_ba_info { + IWL_BAR_FRAME_RELEASE_NSSN_MASK = 4095, + IWL_BAR_FRAME_RELEASE_SN_MASK = 16773120, + IWL_BAR_FRAME_RELEASE_BAID_MASK = 1056964608, +}; + +struct iwl_bar_frame_release { + __le32 sta_tid; + __le32 ba_info; +}; + +struct iwl_rxq_sync_notification { + __le32 count; + u8 payload[0]; +}; + +enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_EMPTY = 0, + IWL_MVM_RXQ_NOTIF_DEL_BA = 1, +}; + +struct iwl_mvm_internal_rxq_notif { + u16 type; + u16 sync; + u32 cookie; + u8 data[0]; +}; + +struct iwl_mvm_delba_data { + u32 baid; +}; + +struct iwl_mvm_rx_phy_data { + enum iwl_rx_phy_info_type info_type; + __le32 d0; + __le32 d1; + __le32 d2; + __le32 d3; + __le32 eht_d4; + __le32 d5; + __le16 d4; + bool with_data; + bool first_subframe; + __le32 rx_vec[4]; + u32 rate_n_flags; + u32 gp2_on_air_rise; + u16 phy_info; + u8 energy_a; + u8 energy_b; + u8 channel; +}; + +struct iwl_rx_sta_csa { + bool all_sta_unblocked; + struct ieee80211_vif *vif; +}; + +enum led_mode { + LED_MODE_DEFAULT = 0, + LED_MODE_TXRX_ACTIVITY = 1, + LED_MODE_SIGNAL_STRENGTH = 2, + LED_MODE_ASUS = 3, + LED_MODE_ALPHA = 4, +}; + +enum firmware_errors { + FW_OK = 0, + FW_BAD_CRC = 1, + FW_BAD_LENGTH = 2, + FW_BAD_VERSION = 3, +}; + +struct rt2x00_field8 { + u8 bit_offset; + u8 bit_mask; +}; + +struct rt2x00_field16 { + u16 bit_offset; + u16 bit_mask; +}; + +struct rt2x00_field32 { + u32 bit_offset; + u32 bit_mask; +}; + +struct rf_reg_pair { + u8 bank; + u8 reg; + u8 value; +}; + +struct rt2800_drv_data { + u8 calibration_bw20; + u8 calibration_bw40; + s8 rx_calibration_bw20; + s8 rx_calibration_bw40; + s8 tx_calibration_bw20; + s8 tx_calibration_bw40; + u8 bbp25; + u8 bbp26; + u8 txmixer_gain_24g; + u8 txmixer_gain_5g; + u8 max_psdu; + unsigned int tbtt_tick; + unsigned int ampdu_factor_cnt[4]; + long unsigned int sta_ids[6]; + struct ieee80211_sta *wcid_to_sta[191]; +}; + +struct rt2800_ops { + u32 (*register_read)(struct rt2x00_dev *, const unsigned int); + u32 (*register_read_lock)(struct rt2x00_dev *, const unsigned int); + void (*register_write)(struct rt2x00_dev *, const unsigned int, u32); + void (*register_write_lock)(struct rt2x00_dev *, const unsigned int, u32); + void (*register_multiread)(struct rt2x00_dev *, const unsigned int, void *, const u32); + void (*register_multiwrite)(struct rt2x00_dev *, const unsigned int, const void *, const u32); + int (*regbusy_read)(struct rt2x00_dev *, const unsigned int, const struct rt2x00_field32, u32 *); + int (*read_eeprom)(struct rt2x00_dev *); + bool (*hwcrypt_disabled)(struct rt2x00_dev *); + int (*drv_write_firmware)(struct rt2x00_dev *, const u8 *, const size_t); + int (*drv_init_registers)(struct rt2x00_dev *); + __le32 * (*drv_get_txwi)(struct queue_entry *); + unsigned int (*drv_get_dma_done)(struct data_queue *); +}; + +struct mac_wcid_entry { + u8 mac[6]; + u8 reserved[2]; +}; + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +}; + +struct mac_iveiv_entry { + u8 iv[8]; +}; + +enum rt2800_eeprom_word { + EEPROM_CHIP_ID = 0, + EEPROM_VERSION = 1, + EEPROM_MAC_ADDR_0 = 2, + EEPROM_MAC_ADDR_1 = 3, + EEPROM_MAC_ADDR_2 = 4, + EEPROM_NIC_CONF0 = 5, + EEPROM_NIC_CONF1 = 6, + EEPROM_FREQ = 7, + EEPROM_LED_AG_CONF = 8, + EEPROM_LED_ACT_CONF = 9, + EEPROM_LED_POLARITY = 10, + EEPROM_NIC_CONF2 = 11, + EEPROM_LNA = 12, + EEPROM_RSSI_BG = 13, + EEPROM_RSSI_BG2 = 14, + EEPROM_TXMIXER_GAIN_BG = 15, + EEPROM_RSSI_A = 16, + EEPROM_RSSI_A2 = 17, + EEPROM_TXMIXER_GAIN_A = 18, + EEPROM_EIRP_MAX_TX_POWER = 19, + EEPROM_TXPOWER_DELTA = 20, + EEPROM_TXPOWER_BG1 = 21, + EEPROM_TXPOWER_BG2 = 22, + EEPROM_TSSI_BOUND_BG1 = 23, + EEPROM_TSSI_BOUND_BG2 = 24, + EEPROM_TSSI_BOUND_BG3 = 25, + EEPROM_TSSI_BOUND_BG4 = 26, + EEPROM_TSSI_BOUND_BG5 = 27, + EEPROM_TXPOWER_A1 = 28, + EEPROM_TXPOWER_A2 = 29, + EEPROM_TXPOWER_INIT = 30, + EEPROM_TSSI_BOUND_A1 = 31, + EEPROM_TSSI_BOUND_A2 = 32, + EEPROM_TSSI_BOUND_A3 = 33, + EEPROM_TSSI_BOUND_A4 = 34, + EEPROM_TSSI_BOUND_A5 = 35, + EEPROM_TXPOWER_BYRATE = 36, + EEPROM_BBP_START = 37, + EEPROM_EXT_LNA2 = 38, + EEPROM_EXT_TXPOWER_BG3 = 39, + EEPROM_EXT_TXPOWER_A3 = 40, + EEPROM_WORD_COUNT = 41, +}; + +enum { + TX_PWR_CFG_0_IDX = 0, + TX_PWR_CFG_1_IDX = 1, + TX_PWR_CFG_2_IDX = 2, + TX_PWR_CFG_3_IDX = 3, + TX_PWR_CFG_4_IDX = 4, + TX_PWR_CFG_5_IDX = 5, + TX_PWR_CFG_6_IDX = 6, + TX_PWR_CFG_7_IDX = 7, + TX_PWR_CFG_8_IDX = 8, + TX_PWR_CFG_9_IDX = 9, + TX_PWR_CFG_0_EXT_IDX = 10, + TX_PWR_CFG_1_EXT_IDX = 11, + TX_PWR_CFG_2_EXT_IDX = 12, + TX_PWR_CFG_3_EXT_IDX = 13, + TX_PWR_CFG_4_EXT_IDX = 14, + TX_PWR_CFG_IDX_COUNT = 15, +}; + +struct ir_raw_timings_pd { + unsigned int header_pulse; + unsigned int header_space; + unsigned int bit_pulse; + unsigned int bit_space[2]; + unsigned int trailer_pulse; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +enum jvc_state { + STATE_INACTIVE___4 = 0, + STATE_HEADER_SPACE = 1, + STATE_BIT_PULSE = 2, + STATE_BIT_SPACE = 3, + STATE_TRAILER_PULSE = 4, + STATE_TRAILER_SPACE = 5, + STATE_CHECK_REPEAT = 6, +}; + +struct mdp_superblock_1 { + __le32 magic; + __le32 major_version; + __le32 feature_map; + __le32 pad0; + __u8 set_uuid[16]; + char set_name[32]; + __le64 ctime; + __le32 level; + __le32 layout; + __le64 size; + __le32 chunksize; + __le32 raid_disks; + union { + __le32 bitmap_offset; + struct { + __le16 offset; + __le16 size; + } ppl; + }; + __le32 new_level; + __le64 reshape_position; + __le32 delta_disks; + __le32 new_layout; + __le32 new_chunk; + __le32 new_offset; + __le64 data_offset; + __le64 data_size; + __le64 super_offset; + union { + __le64 recovery_offset; + __le64 journal_tail; + }; + __le32 dev_number; + __le32 cnt_corrected_read; + __u8 device_uuid[16]; + __u8 devflags; + __u8 bblog_shift; + __le16 bblog_size; + __le32 bblog_offset; + __le64 utime; + __le64 events; + __le64 resync_offset; + __le32 sb_csum; + __le32 max_dev; + __u8 pad3[32]; + __le16 dev_roles[0]; +}; + +struct raid10_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + sector_t head_position; + int recovery_disabled; + long: 32; +}; + +struct geom { + int raid_disks; + int near_copies; + int far_copies; + int far_offset; + sector_t stride; + int far_set_size; + int chunk_shift; + sector_t chunk_mask; +}; + +struct r10conf { + struct mddev *mddev; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new; + struct raid10_info *mirrors_old; + spinlock_t device_lock; + long: 32; + struct geom prev; + struct geom geo; + int copies; + long: 32; + sector_t dev_sectors; + sector_t reshape_progress; + sector_t reshape_safe; + long unsigned int reshape_checkpoint; + long: 32; + sector_t offset_diff; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + seqlock_t resync_lock; + atomic_t nr_pending; + int nr_waiting; + int nr_queued; + int barrier; + int array_freeze_pending; + long: 32; + sector_t next_resync; + int fullsync; + int have_replacement; + wait_queue_head_t wait_barrier; + mempool_t r10bio_pool; + mempool_t r10buf_pool; + struct page *tmppage; + struct bio_set bio_split; + struct md_thread *thread; + long: 32; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r10dev { + struct bio *bio; + union { + struct bio *repl_bio; + struct md_rdev *rdev; + }; + sector_t addr; + int devnum; + long: 32; +}; + +struct r10bio { + atomic_t remaining; + long: 32; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_slot; + struct list_head retry_list; + long: 32; + struct r10dev devs[0]; +}; + +enum r10bio_state { + R10BIO_Uptodate = 0, + R10BIO_IsSync = 1, + R10BIO_IsRecover = 2, + R10BIO_IsReshape = 3, + R10BIO_Degraded = 4, + R10BIO_ReadError = 5, + R10BIO_MadeGood = 6, + R10BIO_WriteError = 7, + R10BIO_Previous = 8, + R10BIO_FailFast = 9, + R10BIO_Discard = 10, +}; + +struct resync_pages { + void *raid_bio; + struct page *pages[16]; +}; + +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + unsigned int count; +}; + +enum geo_type { + geo_new = 0, + geo_old = 1, + geo_start = 2, +}; + +struct rsync_pages; + +struct tc_skb_cb { + struct qdisc_skb_cb qdisc_cb; + u32 drop_reason; + u16 zone; + u16 mru; + u8 post_ct: 1; + u8 post_ct_snat: 1; + u8 post_ct_dnat: 1; +}; + +struct sch_frag_data { + long unsigned int dst; + struct qdisc_skb_cb cb; + __be16 inner_protocol; + u16 vlan_tci; + __be16 vlan_proto; + unsigned int l2_len; + u8 l2_data[18]; + int (*xmit)(struct sk_buff *); +}; + +struct eeprom_req_info { + struct ethnl_req_info base; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; +}; + +struct eeprom_reply_data { + struct ethnl_reply_data base; + u32 length; + u8 *data; +}; + +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +enum nft_last_attributes { + NFTA_LAST_UNSPEC = 0, + NFTA_LAST_SET = 1, + NFTA_LAST_MSECS = 2, + NFTA_LAST_PAD = 3, + __NFTA_LAST_MAX = 4, +}; + +struct nft_last { + long unsigned int jiffies; + unsigned int set; +}; + +struct nft_last_priv { + struct nft_last *last; +}; + +struct fib_entry_notifier_info { + struct fib_notifier_info info; + u32 dst; + int dst_len; + struct fib_info *fi; + dscp_t dscp; + u8 type; + u32 tb_id; +}; + +typedef unsigned int t_key; + +struct key_vector { + t_key key; + unsigned char pos; + unsigned char bits; + unsigned char slen; + union { + struct hlist_head leaf; + struct { + struct {} __empty_tnode; + struct key_vector *tnode[0]; + }; + }; +}; + +struct tnode { + struct callback_head rcu; + t_key empty_children; + t_key full_children; + struct key_vector *parent; + struct key_vector kv[1]; +}; + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int prefixes; + unsigned int nodesizes[32]; +}; + +struct trie { + struct key_vector kv[1]; +}; + +struct fib_trie_iter { + struct seq_net_private p; + struct fib_table *tb; + struct key_vector *tnode; + unsigned int index; + unsigned int depth; +}; + +struct fib_route_iter { + struct seq_net_private p; + struct fib_table *main_tb; + struct key_vector *tnode; + long: 32; + loff_t pos; + t_key key; + long: 32; +}; + +enum { + IFLA_BR_UNSPEC = 0, + IFLA_BR_FORWARD_DELAY = 1, + IFLA_BR_HELLO_TIME = 2, + IFLA_BR_MAX_AGE = 3, + IFLA_BR_AGEING_TIME = 4, + IFLA_BR_STP_STATE = 5, + IFLA_BR_PRIORITY = 6, + IFLA_BR_VLAN_FILTERING = 7, + IFLA_BR_VLAN_PROTOCOL = 8, + IFLA_BR_GROUP_FWD_MASK = 9, + IFLA_BR_ROOT_ID = 10, + IFLA_BR_BRIDGE_ID = 11, + IFLA_BR_ROOT_PORT = 12, + IFLA_BR_ROOT_PATH_COST = 13, + IFLA_BR_TOPOLOGY_CHANGE = 14, + IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 15, + IFLA_BR_HELLO_TIMER = 16, + IFLA_BR_TCN_TIMER = 17, + IFLA_BR_TOPOLOGY_CHANGE_TIMER = 18, + IFLA_BR_GC_TIMER = 19, + IFLA_BR_GROUP_ADDR = 20, + IFLA_BR_FDB_FLUSH = 21, + IFLA_BR_MCAST_ROUTER = 22, + IFLA_BR_MCAST_SNOOPING = 23, + IFLA_BR_MCAST_QUERY_USE_IFADDR = 24, + IFLA_BR_MCAST_QUERIER = 25, + IFLA_BR_MCAST_HASH_ELASTICITY = 26, + IFLA_BR_MCAST_HASH_MAX = 27, + IFLA_BR_MCAST_LAST_MEMBER_CNT = 28, + IFLA_BR_MCAST_STARTUP_QUERY_CNT = 29, + IFLA_BR_MCAST_LAST_MEMBER_INTVL = 30, + IFLA_BR_MCAST_MEMBERSHIP_INTVL = 31, + IFLA_BR_MCAST_QUERIER_INTVL = 32, + IFLA_BR_MCAST_QUERY_INTVL = 33, + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 34, + IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 35, + IFLA_BR_NF_CALL_IPTABLES = 36, + IFLA_BR_NF_CALL_IP6TABLES = 37, + IFLA_BR_NF_CALL_ARPTABLES = 38, + IFLA_BR_VLAN_DEFAULT_PVID = 39, + IFLA_BR_PAD = 40, + IFLA_BR_VLAN_STATS_ENABLED = 41, + IFLA_BR_MCAST_STATS_ENABLED = 42, + IFLA_BR_MCAST_IGMP_VERSION = 43, + IFLA_BR_MCAST_MLD_VERSION = 44, + IFLA_BR_VLAN_STATS_PER_PORT = 45, + IFLA_BR_MULTI_BOOLOPT = 46, + IFLA_BR_MCAST_QUERIER_STATE = 47, + IFLA_BR_FDB_N_LEARNED = 48, + IFLA_BR_FDB_MAX_LEARNED = 49, + __IFLA_BR_MAX = 50, +}; + +enum { + LINK_XSTATS_TYPE_UNSPEC = 0, + LINK_XSTATS_TYPE_BRIDGE = 1, + LINK_XSTATS_TYPE_BOND = 2, + __LINK_XSTATS_TYPE_MAX = 3, +}; + +struct bridge_vlan_info { + __u16 flags; + __u16 vid; +}; + +struct bridge_vlan_xstats { + __u64 rx_bytes; + __u64 rx_packets; + __u64 tx_bytes; + __u64 tx_packets; + __u16 vid; + __u16 flags; + __u32 pad2; +}; + +enum { + BRIDGE_XSTATS_UNSPEC = 0, + BRIDGE_XSTATS_VLAN = 1, + BRIDGE_XSTATS_MCAST = 2, + BRIDGE_XSTATS_PAD = 3, + BRIDGE_XSTATS_STP = 4, + __BRIDGE_XSTATS_MAX = 5, +}; + +struct br_boolopt_multi { + __u32 optval; + __u32 optmask; +}; + +enum { + BR_GROUPFWD_STP = 1, + BR_GROUPFWD_MACPAUSE = 2, + BR_GROUPFWD_LACP = 4, +}; + +enum { + BR_FDB_LOCAL = 0, + BR_FDB_STATIC = 1, + BR_FDB_STICKY = 2, + BR_FDB_ADDED_BY_USER = 3, + BR_FDB_ADDED_BY_EXT_LEARN = 4, + BR_FDB_OFFLOADED = 5, + BR_FDB_NOTIFY = 6, + BR_FDB_NOTIFY_INACTIVE = 7, + BR_FDB_LOCKED = 8, + BR_FDB_DYNAMIC_LEARNED = 9, +}; + +struct net_bridge_fdb_flush_desc { + long unsigned int flags; + long unsigned int flags_mask; + int port_ifindex; + u16 vlan_id; +}; + +struct cpio_data { + void *data; + size_t size; + char name[18]; +}; + +enum cpio_fields { + C_MAGIC = 0, + C_INO = 1, + C_MODE = 2, + C_UID = 3, + C_GID = 4, + C_NLINK = 5, + C_MTIME = 6, + C_FILESIZE = 7, + C_MAJ = 8, + C_MIN = 9, + C_RMAJ = 10, + C_RMIN = 11, + C_NAMESIZE = 12, + C_CHKSUM = 13, + C_NFIELDS = 14, +}; + +typedef bool (*stack_trace_consume_fn)(void *, long unsigned int); + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +typedef int (*proc_visitor)(struct task_struct *, void *); + +enum uts_proc { + UTS_PROC_ARCH = 0, + UTS_PROC_OSTYPE = 1, + UTS_PROC_OSRELEASE = 2, + UTS_PROC_VERSION = 3, + UTS_PROC_HOSTNAME = 4, + UTS_PROC_DOMAINNAME = 5, +}; + +struct prctl_mm_map { + __u64 start_code; + __u64 end_code; + __u64 start_data; + __u64 end_data; + __u64 start_brk; + __u64 brk; + __u64 start_stack; + __u64 arg_start; + __u64 arg_end; + __u64 env_start; + __u64 env_end; + __u64 *auxv; + __u32 auxv_size; + __u32 exe_fd; + long: 32; +}; + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +struct getcpu_cache { + long unsigned int blob[32]; +}; + +enum reboot_type { + BOOT_TRIPLE = 116, + BOOT_KBD = 107, + BOOT_BIOS = 98, + BOOT_ACPI = 97, + BOOT_EFI = 101, + BOOT_CF9_FORCE = 112, + BOOT_CF9_SAFE = 113, +}; + +enum sys_off_mode { + SYS_OFF_MODE_POWER_OFF_PREPARE = 0, + SYS_OFF_MODE_POWER_OFF = 1, + SYS_OFF_MODE_RESTART_PREPARE = 2, + SYS_OFF_MODE_RESTART = 3, +}; + +struct sys_off_data { + int mode; + void *cb_data; + const char *cmd; + struct device *dev; +}; + +struct sys_off_handler { + struct notifier_block nb; + int (*sys_off_cb)(struct sys_off_data *); + void *cb_data; + enum sys_off_mode mode; + bool blocking; + void *list; + struct device *dev; +}; + +struct filter_pred; + +struct prog_entry { + int target; + int when_to_branch; + struct filter_pred *pred; +}; + +struct regex; + +typedef int (*regex_match_func)(char *, struct regex *, int); + +struct regex { + char pattern[256]; + int len; + int field_len; + regex_match_func match; +}; + +enum filter_op_ids { + OP_GLOB = 0, + OP_NE = 1, + OP_EQ = 2, + OP_LE = 3, + OP_LT = 4, + OP_GE = 5, + OP_GT = 6, + OP_BAND = 7, + OP_MAX = 8, +}; + +enum filter_pred_fn { + FILTER_PRED_FN_NOP = 0, + FILTER_PRED_FN_64 = 1, + FILTER_PRED_FN_64_CPUMASK = 2, + FILTER_PRED_FN_S64 = 3, + FILTER_PRED_FN_U64 = 4, + FILTER_PRED_FN_32 = 5, + FILTER_PRED_FN_32_CPUMASK = 6, + FILTER_PRED_FN_S32 = 7, + FILTER_PRED_FN_U32 = 8, + FILTER_PRED_FN_16 = 9, + FILTER_PRED_FN_16_CPUMASK = 10, + FILTER_PRED_FN_S16 = 11, + FILTER_PRED_FN_U16 = 12, + FILTER_PRED_FN_8 = 13, + FILTER_PRED_FN_8_CPUMASK = 14, + FILTER_PRED_FN_S8 = 15, + FILTER_PRED_FN_U8 = 16, + FILTER_PRED_FN_COMM = 17, + FILTER_PRED_FN_STRING = 18, + FILTER_PRED_FN_STRLOC = 19, + FILTER_PRED_FN_STRRELLOC = 20, + FILTER_PRED_FN_PCHAR_USER = 21, + FILTER_PRED_FN_PCHAR = 22, + FILTER_PRED_FN_CPU = 23, + FILTER_PRED_FN_CPU_CPUMASK = 24, + FILTER_PRED_FN_CPUMASK = 25, + FILTER_PRED_FN_CPUMASK_CPU = 26, + FILTER_PRED_FN_FUNCTION = 27, + FILTER_PRED_FN_ = 28, + FILTER_PRED_TEST_VISITED = 29, +}; + +struct filter_pred { + struct regex *regex; + struct cpumask *mask; + short unsigned int *ops; + struct ftrace_event_field *field; + u64 val; + u64 val2; + enum filter_pred_fn fn_num; + int offset; + int not; + int op; +}; + +enum { + FILT_ERR_NONE = 0, + FILT_ERR_INVALID_OP = 1, + FILT_ERR_TOO_MANY_OPEN = 2, + FILT_ERR_TOO_MANY_CLOSE = 3, + FILT_ERR_MISSING_QUOTE = 4, + FILT_ERR_MISSING_BRACE_OPEN = 5, + FILT_ERR_MISSING_BRACE_CLOSE = 6, + FILT_ERR_OPERAND_TOO_LONG = 7, + FILT_ERR_EXPECT_STRING = 8, + FILT_ERR_EXPECT_DIGIT = 9, + FILT_ERR_ILLEGAL_FIELD_OP = 10, + FILT_ERR_FIELD_NOT_FOUND = 11, + FILT_ERR_ILLEGAL_INTVAL = 12, + FILT_ERR_BAD_SUBSYS_FILTER = 13, + FILT_ERR_TOO_MANY_PREDS = 14, + FILT_ERR_INVALID_FILTER = 15, + FILT_ERR_INVALID_CPULIST = 16, + FILT_ERR_IP_FIELD_ONLY = 17, + FILT_ERR_INVALID_VALUE = 18, + FILT_ERR_NO_FUNCTION = 19, + FILT_ERR_ERRNO = 20, + FILT_ERR_NO_FILTER = 21, +}; + +struct filter_parse_error { + int lasterr; + int lasterr_pos; +}; + +typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); + +enum { + INVERT = 1, + PROCESS_AND = 2, + PROCESS_OR = 4, +}; + +struct ustring_buffer { + char buffer[1024]; +}; + +enum { + TOO_MANY_CLOSE = -1, + TOO_MANY_OPEN = -2, + MISSING_QUOTE = -3, +}; + +struct filter_list { + struct list_head list; + struct event_filter *filter; +}; + +struct function_filter_data { + struct ftrace_ops *ops; + int first_filter; + int first_notrace; +}; + +enum vmscan_throttle_state { + VMSCAN_THROTTLE_WRITEBACK = 0, + VMSCAN_THROTTLE_ISOLATED = 1, + VMSCAN_THROTTLE_NOPROGRESS = 2, + VMSCAN_THROTTLE_CONGESTED = 3, + NR_VMSCAN_THROTTLE = 4, +}; + +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *, struct latch_tree_node *); + int (*comp)(void *, struct latch_tree_node *); +}; + +enum page_size_enum { + __PAGE_SIZE = 4096, +}; + +struct bpf_prog_pack { + struct list_head list; + void *ptr; + long unsigned int bitmap[0]; +}; + +struct bpf_prog_dummy { + struct bpf_prog prog; +}; + +typedef u64 (*btf_bpf_user_rnd_u32)(); + +typedef u64 (*btf_bpf_get_raw_cpu_id)(); + +struct _bpf_dtab_netdev { + struct net_device *dev; +}; + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + }; + struct rhash_head node; + struct callback_head rcu; +}; + +struct trace_event_raw_xdp_exception { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_xdp_bulk_tx { + struct trace_entry ent; + int ifindex; + u32 act; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_xdp_redirect_template { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + int err; + int to_ifindex; + u32 map_id; + int map_index; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_kthread { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int sched; + unsigned int xdp_pass; + unsigned int xdp_drop; + unsigned int xdp_redirect; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_enqueue { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int to_cpu; + char __data[0]; +}; + +struct trace_event_raw_xdp_devmap_xmit { + struct trace_entry ent; + int from_ifindex; + u32 act; + int to_ifindex; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_mem_disconnect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + char __data[0]; +}; + +struct trace_event_raw_mem_connect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + const struct xdp_rxq_info *rxq; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_mem_return_failed { + struct trace_entry ent; + const struct page *page; + u32 mem_id; + u32 mem_type; + char __data[0]; +}; + +struct trace_event_raw_bpf_xdp_link_attach_failed { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_xdp_exception {}; + +struct trace_event_data_offsets_xdp_bulk_tx {}; + +struct trace_event_data_offsets_xdp_redirect_template {}; + +struct trace_event_data_offsets_xdp_cpumap_kthread {}; + +struct trace_event_data_offsets_xdp_cpumap_enqueue {}; + +struct trace_event_data_offsets_xdp_devmap_xmit {}; + +struct trace_event_data_offsets_mem_disconnect {}; + +struct trace_event_data_offsets_mem_connect {}; + +struct trace_event_data_offsets_mem_return_failed {}; + +struct trace_event_data_offsets_bpf_xdp_link_attach_failed { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); + +typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); + +typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); + +typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); + +typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); + +typedef void (*btf_trace_bpf_xdp_link_attach_failed)(void *, const char *); + +typedef u64 (*btf_bpf_cgrp_storage_get)(struct bpf_map *, struct cgroup *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_cgrp_storage_delete)(struct bpf_map *, struct cgroup *); + +struct list_lru_memcg { + struct callback_head rcu; + struct list_lru_one node[0]; +}; + +struct page_frag_cache { + long unsigned int encoded_page; + __u16 offset; + __u16 pagecnt_bias; +}; + +enum vmpressure_levels { + VMPRESSURE_LOW = 0, + VMPRESSURE_MEDIUM = 1, + VMPRESSURE_CRITICAL = 2, + VMPRESSURE_NUM_LEVELS = 3, +}; + +enum vmpressure_modes { + VMPRESSURE_NO_PASSTHROUGH = 0, + VMPRESSURE_HIERARCHY = 1, + VMPRESSURE_LOCAL = 2, + VMPRESSURE_NUM_MODES = 3, +}; + +struct vmpressure_event { + struct eventfd_ctx *efd; + enum vmpressure_levels level; + enum vmpressure_modes mode; + struct list_head node; +}; + +typedef struct { + rwlock_t *lock; +} class_read_lock_t; + +typedef struct { + rwlock_t *lock; +} class_write_lock_t; + +typedef struct rw_semaphore *class_rwsem_read_t; + +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +struct statmount { + __u32 size; + __u32 mnt_opts; + __u64 mask; + __u32 sb_dev_major; + __u32 sb_dev_minor; + __u64 sb_magic; + __u32 sb_flags; + __u32 fs_type; + __u64 mnt_id; + __u64 mnt_parent_id; + __u32 mnt_id_old; + __u32 mnt_parent_id_old; + __u64 mnt_attr; + __u64 mnt_propagation; + __u64 mnt_peer_group; + __u64 mnt_master; + __u64 propagate_from; + __u32 mnt_root; + __u32 mnt_point; + __u64 mnt_ns_id; + __u32 fs_subtype; + __u32 sb_source; + __u32 opt_num; + __u32 opt_array; + __u32 opt_sec_num; + __u32 opt_sec_array; + __u64 __spare2[46]; + char str[0]; +}; + +struct mnt_id_req { + __u32 size; + __u32 spare; + __u64 mnt_id; + __u64 param; + __u64 mnt_ns_id; +}; + +struct proc_mounts { + struct mnt_namespace *ns; + struct path root; + int (*show)(struct seq_file *, struct vfsmount *); +}; + +struct mount_kattr { + unsigned int attr_set; + unsigned int attr_clr; + unsigned int propagation; + unsigned int lookup_flags; + bool recurse; + struct user_namespace *mnt_userns; + struct mnt_idmap *mnt_idmap; +}; + +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; + +enum mnt_tree_flags_t { + MNT_TREE_MOVE = 1, + MNT_TREE_BENEATH = 2, +}; + +struct kstatmount { + struct statmount *buf; + size_t bufsize; + struct vfsmount *mnt; + long: 32; + u64 mask; + struct path root; + struct statmount sm; + struct seq_file seq; +}; + +struct folio_iter { + struct folio *folio; + size_t offset; + size_t length; + struct folio *_next; + size_t _seg_count; + int _i; +}; + +struct eventfs_root_inode { + struct eventfs_inode ei; + struct dentry *events_dir; +}; + +enum { + EVENTFS_SAVE_MODE = 65536, + EVENTFS_SAVE_UID = 131072, + EVENTFS_SAVE_GID = 262144, +}; + +struct workspace___2 { + z_stream strm; + char *buf; + unsigned int buf_size; + struct list_head list; + int level; +}; + +enum btrfs_disk_cache_state { + BTRFS_DC_WRITTEN = 0, + BTRFS_DC_ERROR = 1, + BTRFS_DC_CLEAR = 2, + BTRFS_DC_SETUP = 3, +}; + +typedef struct { + __le64 b; + __le64 a; +} le128; + +struct xts_tfm_ctx { + struct crypto_skcipher *child; + struct crypto_cipher *tweak; +}; + +struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; + struct crypto_cipher_spawn tweak_spawn; +}; + +struct xts_request_ctx { + le128 t; + struct scatterlist *tail; + struct scatterlist sg[2]; + long: 32; + long: 32; + long: 32; + struct skcipher_request subreq; +}; + +enum io_uring_sqe_flags_bit { + IOSQE_FIXED_FILE_BIT = 0, + IOSQE_IO_DRAIN_BIT = 1, + IOSQE_IO_LINK_BIT = 2, + IOSQE_IO_HARDLINK_BIT = 3, + IOSQE_ASYNC_BIT = 4, + IOSQE_BUFFER_SELECT_BIT = 5, + IOSQE_CQE_SKIP_SUCCESS_BIT = 6, +}; + +enum io_uring_register_op { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + IORING_REGISTER_PBUF_RING = 22, + IORING_UNREGISTER_PBUF_RING = 23, + IORING_REGISTER_SYNC_CANCEL = 24, + IORING_REGISTER_FILE_ALLOC_RANGE = 25, + IORING_REGISTER_PBUF_STATUS = 26, + IORING_REGISTER_NAPI = 27, + IORING_UNREGISTER_NAPI = 28, + IORING_REGISTER_CLOCK = 29, + IORING_REGISTER_CLONE_BUFFERS = 30, + IORING_REGISTER_SEND_MSG_RING = 31, + IORING_REGISTER_RESIZE_RINGS = 33, + IORING_REGISTER_MEM_REGION = 34, + IORING_REGISTER_LAST = 35, + IORING_REGISTER_USE_REGISTERED_RING = 2147483648, +}; + +enum { + DIO_SHOULD_DIRTY = 1, + DIO_IS_SYNC = 2, +}; + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bio bio; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct user_msghdr { + void *msg_name; + int msg_namelen; + struct iovec *msg_iov; + __kernel_size_t msg_iovlen; + void *msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + +typedef u32 compat_size_t; + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct io_uring_recvmsg_out { + __u32 namelen; + __u32 controllen; + __u32 payloadlen; + __u32 flags; +}; + +struct io_async_msghdr { + struct iovec fast_iov; + struct iovec *free_iov; + int free_iov_nr; + int namelen; + __kernel_size_t controllen; + __kernel_size_t payloadlen; + struct sockaddr *uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_shutdown { + struct file *file; + int how; +}; + +struct io_accept { + struct file *file; + struct sockaddr *addr; + int *addr_len; + int flags; + int iou_flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_socket { + struct file *file; + int domain; + int type; + int protocol; + int flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_connect { + struct file *file; + struct sockaddr *addr; + int addr_len; + bool in_progress; + bool seen_econnaborted; +}; + +struct io_bind { + struct file *file; + int addr_len; +}; + +struct io_listen { + struct file *file; + int backlog; +}; + +struct io_sr_msg { + struct file *file; + union { + struct compat_msghdr *umsg_compat; + struct user_msghdr *umsg; + void *buf; + }; + int len; + unsigned int done_io; + unsigned int msg_flags; + unsigned int nr_multishot_loops; + u16 flags; + u16 buf_group; + u16 buf_index; + void *msg_control; + struct io_kiocb *notif; +}; + +struct io_recvmsg_multishot_hdr { + struct io_uring_recvmsg_out msg; + struct __kernel_sockaddr_storage addr; +}; + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +enum devm_ioremap_type { + DEVM_IOREMAP = 0, + DEVM_IOREMAP_UC = 1, + DEVM_IOREMAP_WC = 2, + DEVM_IOREMAP_NP = 3, +}; + +struct arch_io_reserve_memtype_wc_devres { + resource_size_t start; + resource_size_t size; +}; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; + long: 32; +}; + +enum dim_tune_state { + DIM_PARKING_ON_TOP = 0, + DIM_PARKING_TIRED = 1, + DIM_GOING_RIGHT = 2, + DIM_GOING_LEFT = 3, +}; + +enum dmi_device_type { + DMI_DEV_TYPE_ANY = 0, + DMI_DEV_TYPE_OTHER = 1, + DMI_DEV_TYPE_UNKNOWN = 2, + DMI_DEV_TYPE_VIDEO = 3, + DMI_DEV_TYPE_SCSI = 4, + DMI_DEV_TYPE_ETHERNET = 5, + DMI_DEV_TYPE_TOKENRING = 6, + DMI_DEV_TYPE_SOUND = 7, + DMI_DEV_TYPE_PATA = 8, + DMI_DEV_TYPE_SATA = 9, + DMI_DEV_TYPE_SAS = 10, + DMI_DEV_TYPE_IPMI = -1, + DMI_DEV_TYPE_OEM_STRING = -2, + DMI_DEV_TYPE_DEV_ONBOARD = -3, + DMI_DEV_TYPE_DEV_SLOT = -4, +}; + +struct dmi_device { + struct list_head list; + int type; + const char *name; + void *device_data; +}; + +struct dmi_dev_onboard { + struct dmi_device dev; + int instance; + int segment; + int bus; + int devfn; +}; + +enum smbios_attr_enum { + SMBIOS_ATTR_NONE = 0, + SMBIOS_ATTR_LABEL_SHOW = 1, + SMBIOS_ATTR_INSTANCE_SHOW = 2, +}; + +struct pnp_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; +}; + +struct pnp_card_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; + struct { + __u8 id[8]; + } devs[8]; +}; + +struct pnp_protocol; + +struct pnp_id; + +struct pnp_card { + struct device dev; + unsigned char number; + struct list_head global_list; + struct list_head protocol_list; + struct list_head devices; + struct pnp_protocol *protocol; + struct pnp_id *id; + char name[50]; + unsigned char pnpver; + unsigned char productver; + unsigned int serial; + unsigned char checksum; + struct proc_dir_entry *procdir; + long: 32; +}; + +struct pnp_dev; + +struct pnp_protocol { + struct list_head protocol_list; + char *name; + int (*get)(struct pnp_dev *); + int (*set)(struct pnp_dev *); + int (*disable)(struct pnp_dev *); + bool (*can_wakeup)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + unsigned char number; + struct device dev; + struct list_head cards; + struct list_head devices; +}; + +struct pnp_id { + char id[8]; + struct pnp_id *next; +}; + +struct pnp_card_driver; + +struct pnp_card_link { + struct pnp_card *card; + struct pnp_card_driver *driver; + void *driver_data; + pm_message_t pm_state; +}; + +struct pnp_driver { + const char *name; + const struct pnp_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_dev *, const struct pnp_device_id *); + void (*remove)(struct pnp_dev *); + void (*shutdown)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + struct device_driver driver; +}; + +struct pnp_card_driver { + struct list_head global_list; + char *name; + const struct pnp_card_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *); + void (*remove)(struct pnp_card_link *); + int (*suspend)(struct pnp_card_link *, pm_message_t); + int (*resume)(struct pnp_card_link *); + struct pnp_driver link; +}; + +struct pnp_dev { + struct device dev; + u64 dma_mask; + unsigned int number; + int status; + struct list_head global_list; + struct list_head protocol_list; + struct list_head card_list; + struct list_head rdev_list; + struct pnp_protocol *protocol; + struct pnp_card *card; + struct pnp_driver *driver; + struct pnp_card_link *card_link; + struct pnp_id *id; + int active; + int capabilities; + unsigned int num_dependent_sets; + struct list_head resources; + struct list_head options; + char name[50]; + int flags; + struct proc_dir_entry *procent; + void *data; +}; + +struct probe { + struct probe *next; + dev_t dev; + long unsigned int range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; +}; + +struct kobj_map { + struct probe *probes[255]; + struct mutex *lock; +}; + +struct dev_pm_domain_attach_data { + const char * const *pd_names; + const u32 num_pd_names; + const u32 pd_flags; +}; + +struct dev_pm_domain_list { + struct device **pd_devs; + struct device_link **pd_links; + u32 *opp_tokens; + u32 num_pds; +}; + +struct opp_table; + +struct dev_pm_opp; + +typedef int (*config_regulators_t)(struct device *, struct dev_pm_opp *, struct dev_pm_opp *, struct regulator **, unsigned int); + +typedef int (*config_clks_t)(struct device *, struct opp_table *, struct dev_pm_opp *, void *, bool); + +struct dev_pm_opp_config { + const char * const *clk_names; + config_clks_t config_clks; + const char *prop_name; + config_regulators_t config_regulators; + const unsigned int *supported_hw; + unsigned int supported_hw_count; + const char * const *regulator_names; + struct device *required_dev; + unsigned int required_dev_index; +}; + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; + unsigned int is_wait_die; +}; + +enum dma_resv_usage { + DMA_RESV_USAGE_KERNEL = 0, + DMA_RESV_USAGE_WRITE = 1, + DMA_RESV_USAGE_READ = 2, + DMA_RESV_USAGE_BOOKKEEP = 3, +}; + +struct dma_resv_list; + +struct dma_resv { + struct ww_mutex lock; + struct dma_resv_list *fences; +}; + +struct dma_resv_list { + struct callback_head rcu; + u32 num_fences; + u32 max_fences; + struct dma_fence *table[0]; +}; + +struct dma_resv_iter { + struct dma_resv *obj; + enum dma_resv_usage usage; + struct dma_fence *fence; + enum dma_resv_usage fence_usage; + unsigned int index; + struct dma_resv_list *fences; + unsigned int num_fences; + bool is_restarted; +}; + +enum { + NVME_CMBSZ_SQS = 1, + NVME_CMBSZ_CQS = 2, + NVME_CMBSZ_LISTS = 4, + NVME_CMBSZ_RDS = 8, + NVME_CMBSZ_WDS = 16, + NVME_CMBSZ_SZ_SHIFT = 12, + NVME_CMBSZ_SZ_MASK = 1048575, + NVME_CMBSZ_SZU_SHIFT = 8, + NVME_CMBSZ_SZU_MASK = 15, +}; + +enum { + NVME_CMBMSC_CRE = 1, + NVME_CMBMSC_CMSE = 2, +}; + +enum { + NVME_SGL_FMT_DATA_DESC = 0, + NVME_SGL_FMT_SEG_DESC = 2, + NVME_SGL_FMT_LAST_SEG_DESC = 3, + NVME_KEY_SGL_FMT_DATA_DESC = 4, + NVME_TRANSPORT_SGL_DATA_DESC = 5, +}; + +enum { + NVME_HOST_MEM_ENABLE = 1, + NVME_HOST_MEM_RETURN = 2, +}; + +struct nvme_host_mem_buf_desc { + __le64 addr; + __le32 size; + __u32 rsvd; +}; + +struct nvme_completion { + union nvme_result result; + __le16 sq_head; + __le16 sq_id; + __u16 command_id; + __le16 status; +}; + +struct nvme_queue; + +struct nvme_dev { + struct nvme_queue *queues; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; + u32 *dbs; + struct device *dev; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + unsigned int online_queues; + unsigned int max_qid; + unsigned int io_queues[3]; + unsigned int num_vecs; + u32 q_depth; + int io_sqes; + u32 db_stride; + void *bar; + long unsigned int bar_mapped_size; + struct mutex shutdown_lock; + bool subsystem; + u64 cmb_size; + bool cmb_use_sqes; + u32 cmbsz; + u32 cmbloc; + long: 32; + struct nvme_ctrl ctrl; + u32 last_ps; + bool hmb; + struct sg_table *hmb_sgt; + mempool_t *iod_mempool; + mempool_t *iod_meta_mempool; + __le32 *dbbuf_dbs; + dma_addr_t dbbuf_dbs_dma_addr; + __le32 *dbbuf_eis; + dma_addr_t dbbuf_eis_dma_addr; + long: 32; + u64 host_mem_size; + u32 nr_host_mem_descs; + u32 host_mem_descs_size; + dma_addr_t host_mem_descs_dma; + struct nvme_host_mem_buf_desc *host_mem_descs; + void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; +}; + +struct nvme_queue { + struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t cq_poll_lock; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 *q_db; + u32 q_depth; + u16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + long unsigned int flags; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; + struct completion delete_done; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +union nvme_descriptor { + struct nvme_sgl_desc *sg_list; + __le64 *prp_list; +}; + +struct nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + bool aborted; + s8 nr_allocations; + unsigned int dma_len; + dma_addr_t first_dma; + dma_addr_t meta_dma; + struct sg_table sgt; + struct sg_table meta_sgt; + union nvme_descriptor meta_list; + union nvme_descriptor list[5]; +}; + +enum IWL_TLC_MNG_NSS { + IWL_TLC_NSS_1 = 0, + IWL_TLC_NSS_2 = 1, + IWL_TLC_NSS_MAX = 2, +}; + +enum IWL_TLC_MCS_PER_BW { + IWL_TLC_MCS_PER_BW_80 = 0, + IWL_TLC_MCS_PER_BW_160 = 1, + IWL_TLC_MCS_PER_BW_320 = 2, + IWL_TLC_MCS_PER_BW_NUM_V3 = 2, + IWL_TLC_MCS_PER_BW_NUM_V4 = 3, +}; + +enum iwl_ppag_flags { + IWL_PPAG_ETSI_MASK = 1, + IWL_PPAG_CHINA_MASK = 2, + IWL_PPAG_ETSI_LPI_UHB_MASK = 4, + IWL_PPAG_ETSI_VLP_UHB_MASK = 8, + IWL_PPAG_ETSI_SP_UHB_MASK = 16, + IWL_PPAG_USA_LPI_UHB_MASK = 32, + IWL_PPAG_USA_VLP_UHB_MASK = 64, + IWL_PPAG_USA_SP_UHB_MASK = 128, + IWL_PPAG_CANADA_LPI_UHB_MASK = 256, + IWL_PPAG_CANADA_VLP_UHB_MASK = 512, + IWL_PPAG_CANADA_SP_UHB_MASK = 1024, +}; + +enum iwl_lari_config_masks { + LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = 1, + LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = 2, + LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = 4, + LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = 8, + LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK = 128, +}; + +enum iwl_dsm_values_srd { + DSM_VALUE_SRD_ACTIVE = 0, + DSM_VALUE_SRD_PASSIVE = 1, + DSM_VALUE_SRD_DISABLE = 2, + DSM_VALUE_SRD_MAX = 3, +}; + +enum iwl_dsm_values_indonesia { + DSM_VALUE_INDONESIA_DISABLE = 0, + DSM_VALUE_INDONESIA_ENABLE = 1, + DSM_VALUE_INDONESIA_RESERVED = 2, + DSM_VALUE_INDONESIA_MAX = 3, +}; + +enum iwl_dsm_unii4_bitmap { + DSM_VALUE_UNII4_US_OVERRIDE_MSK = 1, + DSM_VALUE_UNII4_US_EN_MSK = 2, + DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK = 4, + DSM_VALUE_UNII4_ETSI_EN_MSK = 8, + DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK = 16, + DSM_VALUE_UNII4_CANADA_EN_MSK = 32, +}; + +enum iwl_dsm_masks_reg { + DSM_MASK_CHINA_22_REG = 4, +}; + +enum iwl_uefi_cnv_puncturing_flags { + IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK = 1, + IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK = 2, +}; + +struct ieee80211_mutable_offsets { + u16 tim_offset; + u16 tim_length; + u16 cntdwn_counter_offs[2]; + u16 mbssid_off; +}; + +struct iwl_fw_dbg_trigger_missed_bcon { + __le32 stop_consec_missed_bcon; + __le32 stop_consec_missed_bcon_since_rx; + __le32 reserved2[2]; + __le32 start_consec_missed_bcon; + __le32 start_consec_missed_bcon_since_rx; + __le32 reserved1[2]; +}; + +enum iwl_gen2_tx_fifo { + IWL_GEN2_TX_FIFO_CMD = 0, + IWL_GEN2_EDCA_TX_FIFO_BK = 1, + IWL_GEN2_EDCA_TX_FIFO_BE = 2, + IWL_GEN2_EDCA_TX_FIFO_VI = 3, + IWL_GEN2_EDCA_TX_FIFO_VO = 4, + IWL_GEN2_TRIG_TX_FIFO_BK = 5, + IWL_GEN2_TRIG_TX_FIFO_BE = 6, + IWL_GEN2_TRIG_TX_FIFO_VI = 7, + IWL_GEN2_TRIG_TX_FIFO_VO = 8, +}; + +enum iwl_bz_tx_fifo { + IWL_BZ_EDCA_TX_FIFO_BK = 0, + IWL_BZ_EDCA_TX_FIFO_BE = 1, + IWL_BZ_EDCA_TX_FIFO_VI = 2, + IWL_BZ_EDCA_TX_FIFO_VO = 3, + IWL_BZ_TRIG_TX_FIFO_BK = 4, + IWL_BZ_TRIG_TX_FIFO_BE = 5, + IWL_BZ_TRIG_TX_FIFO_VI = 6, + IWL_BZ_TRIG_TX_FIFO_VO = 7, +}; + +struct iwl_mac_beacon_cmd_v6 { + struct iwl_tx_cmd___2 tx; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + struct ieee80211_hdr frame[0]; +}; + +struct iwl_mac_beacon_cmd_v7 { + struct iwl_tx_cmd___2 tx; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + __le32 ecsa_offset; + __le32 csa_offset; + struct ieee80211_hdr frame[0]; +}; + +enum iwl_mac_beacon_flags_v1 { + IWL_MAC_BEACON_CCK_V1 = 256, + IWL_MAC_BEACON_ANT_A_V1 = 512, + IWL_MAC_BEACON_ANT_B_V1 = 1024, + IWL_MAC_BEACON_FILS_V1 = 4096, +}; + +enum iwl_mac_beacon_flags { + IWL_MAC_BEACON_CCK = 32, + IWL_MAC_BEACON_ANT_A = 64, + IWL_MAC_BEACON_ANT_B = 128, + IWL_MAC_BEACON_FILS = 256, +}; + +struct iwl_mac_beacon_cmd { + __le16 byte_cnt; + __le16 flags; + __le32 short_ssid; + __le32 reserved; + __le32 link_id; + __le32 tim_idx; + union { + __le32 tim_size; + __le32 btwt_offset; + }; + __le32 ecsa_offset; + __le32 csa_offset; + struct ieee80211_hdr frame[0]; +}; + +struct iwl_extended_beacon_notif_v5 { + struct iwl_tx_resp beacon_notify_hdr; + __le64 tsf; + __le32 ibss_mgr_status; + __le32 gp2; +}; + +struct iwl_extended_beacon_notif { + __le32 status; + __le64 tsf; + __le32 ibss_mgr_status; + __le32 gp2; +}; + +enum iwl_mac_protection_flags { + MAC_PROT_FLG_TGG_PROTECT = 8, + MAC_PROT_FLG_HT_PROT = 8388608, + MAC_PROT_FLG_FAT_PROT = 16777216, + MAC_PROT_FLG_SELF_CTS_EN = 1073741824, +}; + +enum iwl_mac_types { + FW_MAC_TYPE_FIRST = 1, + FW_MAC_TYPE_AUX = 1, + FW_MAC_TYPE_LISTENER = 2, + FW_MAC_TYPE_PIBSS = 3, + FW_MAC_TYPE_IBSS = 4, + FW_MAC_TYPE_BSS_STA = 5, + FW_MAC_TYPE_P2P_DEVICE = 6, + FW_MAC_TYPE_P2P_STA = 7, + FW_MAC_TYPE_GO = 8, + FW_MAC_TYPE_TEST = 9, + FW_MAC_TYPE_MAX = 9, +}; + +struct iwl_mac_data_ap { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 reserved1; + __le32 dtim_interval; + __le32 reserved2; + __le32 mcast_qid; + __le32 beacon_template; +}; + +struct iwl_mac_data_ibss { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 reserved; + __le32 beacon_template; +}; + +enum iwl_mac_data_policy { + TWT_SUPPORTED = 1, + MORE_DATA_ACK_SUPPORTED = 2, + FLEXIBLE_TWT_SUPPORTED = 4, + PROTECTED_TWT_SUPPORTED = 8, + BROADCAST_TWT_SUPPORTED = 16, + COEX_HIGH_PRIORITY_ENABLE = 32, +}; + +struct iwl_mac_data_sta { + __le32 is_assoc; + __le32 dtim_time; + __le64 dtim_tsf; + __le32 bi; + __le32 reserved1; + __le32 dtim_interval; + __le32 data_policy; + __le32 listen_interval; + __le32 assoc_id; + __le32 assoc_beacon_arrive_time; +}; + +struct iwl_mac_data_go { + struct iwl_mac_data_ap ap; + __le32 ctwin; + __le32 opp_ps_enabled; +}; + +struct iwl_mac_data_p2p_sta { + struct iwl_mac_data_sta sta; + __le32 ctwin; +}; + +struct iwl_mac_data_pibss { + __le32 stats_interval; +}; + +struct iwl_mac_data_p2p_dev { + __le32 is_disc_extended; +}; + +enum iwl_mac_qos_flags { + MAC_QOS_FLG_UPDATE_EDCA = 1, + MAC_QOS_FLG_TGN = 2, + MAC_QOS_FLG_TXOP_TYPE = 16, +}; + +struct iwl_ac_qos___2 { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 fifos_mask; + __le16 edca_txop; +}; + +struct iwl_mac_ctx_cmd { + __le32 id_and_color; + __le32 action; + __le32 mac_type; + __le32 tsf_id; + u8 node_addr[6]; + __le16 reserved_for_node_addr; + u8 bssid_addr[6]; + __le16 reserved_for_bssid_addr; + __le32 cck_rates; + __le32 ofdm_rates; + __le32 protection_flags; + __le32 cck_short_preamble; + __le32 short_slot; + __le32 filter_flags; + __le32 qos_flags; + struct iwl_ac_qos___2 ac[5]; + union { + struct iwl_mac_data_ap ap; + struct iwl_mac_data_go go; + struct iwl_mac_data_sta sta; + struct iwl_mac_data_p2p_sta p2p_sta; + struct iwl_mac_data_p2p_dev p2p_dev; + struct iwl_mac_data_pibss pibss; + struct iwl_mac_data_ibss ibss; + }; +}; + +struct iwl_missed_beacons_notif_v4 { + __le32 link_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +}; + +struct iwl_missed_vap_notif { + __le32 mac_id; + u8 num_beacon_intervals_elapsed; + u8 profile_periodicity; + u8 reserved[2]; +}; + +struct iwl_channel_switch_start_notif_v1 { + __le32 id_and_color; +}; + +struct iwl_channel_switch_start_notif { + __le32 link_id; +}; + +struct iwl_channel_switch_error_notif { + __le32 link_id; + __le32 csa_err_mask; +}; + +struct iwl_missed_beacons_notif { + __le32 link_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 other_link_id; + __le32 consec_missed_beacons_other_link; +}; + +struct iwl_stored_beacon_notif_common { + __le32 system_time; + __le64 tsf; + __le32 beacon_timestamp; + __le16 band; + __le16 channel; + __le32 rates; + __le32 byte_count; +}; + +struct iwl_stored_beacon_notif_v2 { + struct iwl_stored_beacon_notif_common common; + u8 data[600]; +}; + +struct iwl_stored_beacon_notif_v3 { + struct iwl_stored_beacon_notif_common common; + u8 sta_id; + u8 reserved[3]; + u8 data[600]; +}; + +enum iwl_rx_phy_flags { + RX_RES_PHY_FLAGS_BAND_24 = 1, + RX_RES_PHY_FLAGS_MOD_CCK = 2, + RX_RES_PHY_FLAGS_SHORT_PREAMBLE = 4, + RX_RES_PHY_FLAGS_NARROW_BAND = 8, + RX_RES_PHY_FLAGS_ANTENNA = 112, + RX_RES_PHY_FLAGS_ANTENNA_POS = 4, + RX_RES_PHY_FLAGS_AGG = 128, + RX_RES_PHY_FLAGS_OFDM_HT = 256, + RX_RES_PHY_FLAGS_OFDM_GF = 512, + RX_RES_PHY_FLAGS_OFDM_VHT = 1024, +}; + +enum iwl_sta_type { + IWL_STA_LINK = 0, + IWL_STA_GENERAL_PURPOSE = 1, + IWL_STA_MULTICAST = 2, + IWL_STA_TDLS_LINK = 3, + IWL_STA_AUX_ACTIVITY = 4, +}; + +struct iwl_mvm_mac_iface_iterator_data { + struct iwl_mvm *mvm; + struct ieee80211_vif *vif; + long unsigned int available_mac_ids[1]; + long unsigned int available_tsf_ids[1]; + enum iwl_tsf_id preferred_tsf; + bool found_vif; +}; + +struct iwl_mvm_go_iterator_data { + bool go_active; +}; + +struct iwl_mvm_mac_ap_iterator_data { + struct iwl_mvm *mvm; + struct ieee80211_vif *vif; + u32 beacon_device_ts; + u16 beacon_int; +}; + +enum rtw_chip_ver { + RTW_CHIP_VER_CUT_A = 0, + RTW_CHIP_VER_CUT_B = 1, + RTW_CHIP_VER_CUT_C = 2, + RTW_CHIP_VER_CUT_D = 3, + RTW_CHIP_VER_CUT_E = 4, + RTW_CHIP_VER_CUT_F = 5, + RTW_CHIP_VER_CUT_G = 6, +}; + +enum rtw_ip_sel { + RTW_IP_SEL_PHY = 0, + RTW_IP_SEL_MAC = 1, + RTW_IP_SEL_DBI = 2, + RTW_IP_SEL_UNDEF = 65535, +}; + +struct rtw_pci_tx_buffer_desc { + __le16 buf_size; + __le16 psb_len; + __le32 dma; +}; + +struct rtw_pci_tx_data { + dma_addr_t dma; + u8 sn; +}; + +struct rtw_pci_ring { + u8 *head; + dma_addr_t dma; + u8 desc_size; + u32 len; + u32 wp; + u32 rp; +}; + +struct rtw_pci_tx_ring { + struct rtw_pci_ring r; + struct sk_buff_head queue; + bool queue_stopped; +}; + +struct rtw_pci_rx_buffer_desc { + __le16 buf_size; + __le16 total_pkt_size; + __le32 dma; +}; + +struct rtw_pci_rx_ring { + struct rtw_pci_ring r; + struct sk_buff *buf[512]; +}; + +struct rtw_pci { + struct pci_dev *pdev; + spinlock_t hwirq_lock; + spinlock_t irq_lock; + u32 irq_mask[4]; + bool irq_enabled; + bool running; + struct net_device *netdev; + long: 32; + struct napi_struct napi; + u16 rx_tag; + long unsigned int tx_queued[1]; + struct rtw_pci_tx_ring tx_rings[8]; + struct rtw_pci_rx_ring rx_rings[2]; + u16 link_ctrl; + atomic_t link_usage; + bool rx_no_aspm; + long unsigned int flags[1]; + void *mmap; + long: 32; +}; + +struct input_event { + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; + __u16 type; + __u16 code; + __s32 value; +}; + +enum xmp_state { + STATE_INACTIVE___5 = 0, + STATE_LEADER_PULSE = 1, + STATE_NIBBLE_SPACE = 2, +}; + +struct dm_ioctl { + __u32 version[3]; + __u32 data_size; + __u32 data_start; + __u32 target_count; + __s32 open_count; + __u32 flags; + __u32 event_nr; + __u32 padding; + __u64 dev; + char name[128]; + char uuid[129]; + char data[7]; +}; + +struct dm_target_spec { + __u64 sector_start; + __u64 length; + __s32 status; + __u32 next; + char target_type[16]; +}; + +struct dm_target_deps { + __u32 count; + __u32 padding; + __u64 dev[0]; +}; + +struct dm_name_list { + __u64 dev; + __u32 next; + char name[0]; + long: 32; +}; + +struct dm_target_versions { + __u32 next; + __u32 version[3]; + char name[0]; +}; + +struct dm_target_msg { + __u64 sector; + char message[0]; +}; + +enum { + DM_VERSION_CMD = 0, + DM_REMOVE_ALL_CMD = 1, + DM_LIST_DEVICES_CMD = 2, + DM_DEV_CREATE_CMD = 3, + DM_DEV_REMOVE_CMD = 4, + DM_DEV_RENAME_CMD = 5, + DM_DEV_SUSPEND_CMD = 6, + DM_DEV_STATUS_CMD = 7, + DM_DEV_WAIT_CMD = 8, + DM_TABLE_LOAD_CMD = 9, + DM_TABLE_CLEAR_CMD = 10, + DM_TABLE_DEPS_CMD = 11, + DM_TABLE_STATUS_CMD = 12, + DM_LIST_VERSIONS_CMD = 13, + DM_TARGET_MSG_CMD = 14, + DM_DEV_SET_GEOMETRY_CMD = 15, + DM_DEV_ARM_POLL_CMD = 16, + DM_GET_TARGET_VERSION_CMD = 17, +}; + +struct dm_file { + volatile unsigned int global_event_nr; +}; + +struct hash_cell { + struct rb_node name_node; + struct rb_node uuid_node; + bool name_set; + bool uuid_set; + char *name; + char *uuid; + struct mapped_device *md; + struct dm_table *new_map; +}; + +struct vers_iter { + size_t param_size; + struct dm_target_versions *vers; + struct dm_target_versions *old_vers; + char *end; + uint32_t flags; +}; + +typedef int (*ioctl_fn)(struct file *, struct dm_ioctl *, size_t); + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +enum nft_immediate_attributes { + NFTA_IMMEDIATE_UNSPEC = 0, + NFTA_IMMEDIATE_DREG = 1, + NFTA_IMMEDIATE_DATA = 2, + __NFTA_IMMEDIATE_MAX = 3, +}; + +struct nft_immediate_expr { + struct nft_data data; + u8 dreg; + u8 dlen; + long: 32; +}; + +struct icmp6_filter { + __u32 data[8]; +}; + +struct raw6_sock { + struct inet_sock inet; + __u32 checksum; + __u32 offset; + struct icmp6_filter filter; + __u32 ip6mr_table; + struct ipv6_pinfo inet6; +}; + +struct raw_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +enum ieee80211_radiotap_vht_known { + IEEE80211_RADIOTAP_VHT_KNOWN_STBC = 1, + IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA = 2, + IEEE80211_RADIOTAP_VHT_KNOWN_GI = 4, + IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS = 8, + IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM = 16, + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED = 32, + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH = 64, + IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID = 128, + IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID = 256, +}; + +struct drop_reason_list { + const char * const *reasons; + size_t n_reasons; +}; + +struct mcs_group___2 { + u8 shift; + u16 duration[14]; +}; + +struct __user_cap_header_struct { + __u32 version; + int pid; +}; + +typedef struct __user_cap_header_struct *cap_user_header_t; + +struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +}; + +typedef struct __user_cap_data_struct *cap_user_data_t; + +enum cpu_idle_type { + __CPU_NOT_IDLE = 0, + CPU_IDLE = 1, + CPU_NEWLY_IDLE = 2, + CPU_MAX_IDLE_TYPES = 3, +}; + +typedef int (*tg_visitor)(struct task_group *, void *); + +struct energy_env { + long unsigned int task_busy_time; + long unsigned int pd_busy_time; + long unsigned int cpu_cap; + long unsigned int pd_cap; +}; + +enum fbq_type { + regular = 0, + remote = 1, + all = 2, +}; + +enum group_type { + group_has_spare = 0, + group_fully_busy = 1, + group_misfit_task = 2, + group_smt_balance = 3, + group_asym_packing = 4, + group_imbalanced = 5, + group_overloaded = 6, +}; + +enum migration_type { + migrate_load = 0, + migrate_util = 1, + migrate_task = 2, + migrate_misfit = 3, +}; + +struct lb_env { + struct sched_domain *sd; + struct rq *src_rq; + int src_cpu; + int dst_cpu; + struct rq *dst_rq; + struct cpumask *dst_grpmask; + int new_dst_cpu; + enum cpu_idle_type idle; + long int imbalance; + struct cpumask *cpus; + unsigned int flags; + unsigned int loop; + unsigned int loop_break; + unsigned int loop_max; + enum fbq_type fbq_type; + enum migration_type migration_type; + struct list_head tasks; +}; + +struct sg_lb_stats { + long unsigned int avg_load; + long unsigned int group_load; + long unsigned int group_capacity; + long unsigned int group_util; + long unsigned int group_runnable; + unsigned int sum_nr_running; + unsigned int sum_h_nr_running; + unsigned int idle_cpus; + unsigned int group_weight; + enum group_type group_type; + unsigned int group_asym_packing; + unsigned int group_smt_balance; + long unsigned int group_misfit_task_load; +}; + +struct sd_lb_stats { + struct sched_group *busiest; + struct sched_group *local; + long unsigned int total_load; + long unsigned int total_capacity; + long unsigned int avg_load; + unsigned int prefer_sibling; + struct sg_lb_stats busiest_stat; + struct sg_lb_stats local_stat; +}; + +struct trace_buffer_meta { + __u32 meta_page_size; + __u32 meta_struct_len; + __u32 subbuf_size; + __u32 nr_subbufs; + struct { + __u64 lost_events; + __u32 id; + __u32 read; + } reader; + __u64 flags; + __u64 entries; + __u64 overrun; + __u64 read; + __u64 Reserved1; + __u64 Reserved2; +}; + +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING = 29, + RINGBUF_TYPE_TIME_EXTEND = 30, + RINGBUF_TYPE_TIME_STAMP = 31, +}; + +struct ring_buffer_per_cpu; + +struct buffer_page; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + long unsigned int head; + long unsigned int next_event; + struct buffer_page *head_page; + struct buffer_page *cache_reader_page; + long unsigned int cache_read; + long unsigned int cache_pages_removed; + long: 32; + u64 read_stamp; + u64 page_stamp; + struct ring_buffer_event *event; + size_t event_size; + int missed_events; + long: 32; +}; + +struct rb_irq_work { + struct irq_work work; + wait_queue_head_t waiters; + wait_queue_head_t full_waiters; + atomic_t seq; + bool waiters_pending; + bool full_waiters_pending; + bool wakeup_full; +}; + +struct trace_buffer { + unsigned int flags; + int cpus; + atomic_t record_disabled; + atomic_t resizing; + cpumask_var_t cpumask; + struct lock_class_key *reader_lock_key; + struct mutex mutex; + struct ring_buffer_per_cpu **buffers; + struct hlist_node node; + u64 (*clock)(); + struct rb_irq_work irq_work; + bool time_stamp_abs; + long unsigned int range_addr_start; + long unsigned int range_addr_end; + long int last_text_delta; + long int last_data_delta; + unsigned int subbuf_size; + unsigned int subbuf_order; + unsigned int max_data_size; +}; + +struct ring_buffer_meta { + int magic; + int struct_size; + long unsigned int text_addr; + long unsigned int data_addr; + long unsigned int first_buffer; + long unsigned int head_buffer; + long unsigned int commit_buffer; + __u32 subbuf_size; + __u32 nr_subbufs; + int buffers[0]; +}; + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 8, +}; + +struct buffer_data_page { + u64 time_stamp; + local_t commit; + unsigned char data[0]; + long: 32; +}; + +struct buffer_data_read_page { + unsigned int order; + struct buffer_data_page *data; +}; + +struct buffer_page { + struct list_head list; + local_t write; + unsigned int read; + local_t entries; + long unsigned int real_end; + unsigned int order; + u32 id: 30; + u32 range: 1; + struct buffer_data_page *page; +}; + +struct rb_event_info { + u64 ts; + u64 delta; + u64 before; + u64 after; + long unsigned int length; + struct buffer_page *tail_page; + int add_timestamp; + long: 32; +}; + +enum { + RB_ADD_STAMP_NONE = 0, + RB_ADD_STAMP_EXTEND = 2, + RB_ADD_STAMP_ABSOLUTE = 4, + RB_ADD_STAMP_FORCE = 8, +}; + +enum { + RB_CTX_TRANSITION = 0, + RB_CTX_NMI = 1, + RB_CTX_IRQ = 2, + RB_CTX_SOFTIRQ = 3, + RB_CTX_NORMAL = 4, + RB_CTX_MAX = 5, +}; + +struct rb_time_struct { + local64_t time; +}; + +typedef struct rb_time_struct rb_time_t; + +struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + atomic_t resize_disabled; + struct trace_buffer *buffer; + raw_spinlock_t reader_lock; + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct buffer_data_page *free_page; + long unsigned int nr_pages; + unsigned int current_context; + struct list_head *pages; + long unsigned int cnt; + struct buffer_page *head_page; + struct buffer_page *tail_page; + struct buffer_page *commit_page; + struct buffer_page *reader_page; + long unsigned int lost_events; + long unsigned int last_overrun; + long unsigned int nest; + local_t entries_bytes; + local_t entries; + local_t overrun; + local_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; + local_t pages_touched; + local_t pages_lost; + local_t pages_read; + long int last_pages_touch; + size_t shortest_full; + long unsigned int read; + long unsigned int read_bytes; + rb_time_t write_stamp; + rb_time_t before_stamp; + u64 event_stamp[5]; + u64 read_stamp; + long unsigned int pages_removed; + unsigned int mapped; + unsigned int user_mapped; + struct mutex mapping_lock; + long unsigned int *subbuf_ids; + struct trace_buffer_meta *meta_page; + struct ring_buffer_meta *ring_meta; + long int nr_pages_to_update; + struct list_head new_pages; + struct work_struct update_pages_work; + struct completion update_done; + struct rb_irq_work irq_work; +}; + +struct rb_wait_data { + struct rb_irq_work *irq_work; + int seq; +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1, + PERF_SAMPLE_BRANCH_KERNEL = 2, + PERF_SAMPLE_BRANCH_HV = 4, + PERF_SAMPLE_BRANCH_ANY = 8, + PERF_SAMPLE_BRANCH_ANY_CALL = 16, + PERF_SAMPLE_BRANCH_ANY_RETURN = 32, + PERF_SAMPLE_BRANCH_IND_CALL = 64, + PERF_SAMPLE_BRANCH_ABORT_TX = 128, + PERF_SAMPLE_BRANCH_IN_TX = 256, + PERF_SAMPLE_BRANCH_NO_TX = 512, + PERF_SAMPLE_BRANCH_COND = 1024, + PERF_SAMPLE_BRANCH_CALL_STACK = 2048, + PERF_SAMPLE_BRANCH_IND_JUMP = 4096, + PERF_SAMPLE_BRANCH_CALL = 8192, + PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, + PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, + PERF_SAMPLE_BRANCH_HW_INDEX = 131072, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 262144, + PERF_SAMPLE_BRANCH_COUNTERS = 524288, + PERF_SAMPLE_BRANCH_MAX = 1048576, +}; + +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_ID = 4, + PERF_FORMAT_GROUP = 8, + PERF_FORMAT_LOST = 16, + PERF_FORMAT_MAX = 32, +}; + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1, +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + NR_NAMESPACES = 7, +}; + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX = 3, +}; + +typedef struct pt_regs bpf_user_pt_regs_t; + +enum perf_pmu_scope { + PERF_PMU_SCOPE_NONE = 0, + PERF_PMU_SCOPE_CORE = 1, + PERF_PMU_SCOPE_DIE = 2, + PERF_PMU_SCOPE_CLUSTER = 3, + PERF_PMU_SCOPE_PKG = 4, + PERF_PMU_SCOPE_SYS_WIDE = 5, + PERF_PMU_MAX_SCOPE = 6, +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START = 1, + PERF_ADDR_FILTER_ACTION_FILTER = 2, +}; + +struct perf_addr_filter { + struct list_head entry; + struct path path; + long unsigned int offset; + long unsigned int size; + enum perf_addr_filter_action_t action; +}; + +struct swevent_hlist { + struct hlist_head heads[256]; + struct callback_head callback_head; +}; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int online; + struct perf_cgroup *cgrp; + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; + long: 32; +}; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +struct min_heap_char { + int nr; + int size; + char *data; + char preallocated[0]; +}; + +typedef struct min_heap_char min_heap_char; + +struct min_heap_callbacks { + bool (*less)(const void *, const void *, void *); + void (*swp)(void *, void *, void *); +}; + +typedef int (*remote_function_f)(void *); + +struct remote_function_call { + struct task_struct *p; + remote_function_f func; + void *info; + int ret; +}; + +enum event_type_t { + EVENT_FLEXIBLE = 1, + EVENT_PINNED = 2, + EVENT_TIME = 4, + EVENT_FROZEN = 8, + EVENT_CPU = 16, + EVENT_CGROUP = 32, + EVENT_ALL = 3, + EVENT_TIME_FROZEN = 12, +}; + +typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); + +struct event_function_struct { + struct perf_event *event; + event_f func; + void *data; +}; + +struct __group_key { + int cpu; + struct pmu *pmu; + struct cgroup *cgroup; +}; + +struct stop_event_data { + struct perf_event *event; + unsigned int restart; +}; + +struct perf_event_min_heap { + int nr; + int size; + struct perf_event **data; + struct perf_event *preallocated[0]; +}; + +struct perf_read_data { + struct perf_event *event; + bool group; + int ret; +}; + +struct perf_read_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +typedef void perf_iterate_f(struct perf_event *, void *); + +struct remote_output { + struct perf_buffer *rb; + int err; +}; + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + struct { + struct perf_event_header header; + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + } event_id; +}; + +struct perf_namespaces_event { + struct task_struct *task; + long: 32; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 nr_namespaces; + struct perf_ns_link_info link_info[7]; + } event_id; +}; + +struct perf_cgroup_event { + char *path; + int path_size; + struct { + struct perf_event_header header; + u64 id; + char path[0]; + } event_id; +}; + +struct perf_mmap_event { + struct vm_area_struct *vma; + const char *file_name; + int file_size; + int maj; + int min; + long: 32; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + u8 build_id[20]; + u32 build_id_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +struct perf_switch_event { + struct task_struct *task; + struct task_struct *next_prev; + struct { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; + } event_id; +}; + +struct perf_ksymbol_event { + const char *name; + int name_len; + struct { + struct perf_event_header header; + u64 addr; + u32 len; + u16 ksym_type; + u16 flags; + } event_id; +}; + +struct perf_bpf_event { + struct bpf_prog *prog; + struct { + struct perf_event_header header; + u16 type; + u16 flags; + u32 id; + u8 tag[8]; + } event_id; +}; + +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + struct { + struct perf_event_header header; + u64 addr; + } event_id; +}; + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; +}; + +enum perf_probe_config { + PERF_PROBE_CONFIG_IS_RETPROBE = 1, + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, +}; + +enum { + IF_ACT_NONE = -1, + IF_ACT_FILTER = 0, + IF_ACT_START = 1, + IF_ACT_STOP = 2, + IF_SRC_FILE = 3, + IF_SRC_KERNEL = 4, + IF_SRC_FILEADDR = 5, + IF_SRC_KERNELADDR = 6, +}; + +enum { + IF_STATE_ACTION = 0, + IF_STATE_SOURCE = 1, + IF_STATE_END = 2, +}; + +struct perf_aux_event { + struct perf_event_header header; + u64 hw_id; +}; + +struct perf_aux_event___2 { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +struct perf_aux_event___3 { + struct perf_event_header header; + u64 offset; + u64 size; + u64 flags; +}; + +typedef unsigned int kasan_vmalloc_flags_t; + +struct swap_cgroup_ctrl { + struct page **map; + long unsigned int length; + spinlock_t lock; +}; + +struct swap_cgroup { + short unsigned int id; +}; + +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; + +struct pts_mount_opts { + int setuid; + int setgid; + kuid_t uid; + kgid_t gid; + umode_t mode; + umode_t ptmxmode; + int reserve; + int max; +}; + +enum { + Opt_uid___6 = 0, + Opt_gid___6 = 1, + Opt_mode___4 = 2, + Opt_ptmxmode = 3, + Opt_newinstance = 4, + Opt_max = 5, + Opt_err___4 = 6, +}; + +struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; + struct super_block *sb; + struct dentry *ptmx_dentry; +}; + +struct trace_event_raw_jbd2_checkpoint { + struct trace_entry ent; + dev_t dev; + int result; + char __data[0]; +}; + +struct trace_event_raw_jbd2_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + char __data[0]; +}; + +struct trace_event_raw_jbd2_end_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + tid_t head; + char __data[0]; +}; + +struct trace_event_raw_jbd2_submit_inode_data { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_start_class { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_extend { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int buffer_credits; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int interval; + int sync; + int requested_blocks; + int dirtied_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_run_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int wait; + long unsigned int request_delay; + long unsigned int running; + long unsigned int locked; + long unsigned int flushing; + long unsigned int logging; + __u32 handle_count; + __u32 blocks; + __u32 blocks_logged; + char __data[0]; +}; + +struct trace_event_raw_jbd2_checkpoint_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int chp_time; + __u32 forced_to_close; + __u32 written; + __u32 dropped; + char __data[0]; +}; + +struct trace_event_raw_jbd2_update_log_tail { + struct trace_entry ent; + dev_t dev; + tid_t tail_sequence; + tid_t first_tid; + long unsigned int block_nr; + long unsigned int freed; + char __data[0]; +}; + +struct trace_event_raw_jbd2_write_superblock { + struct trace_entry ent; + dev_t dev; + blk_opf_t write_flags; + char __data[0]; +}; + +struct trace_event_raw_jbd2_lock_buffer_stall { + struct trace_entry ent; + dev_t dev; + long unsigned int stall_ms; + char __data[0]; +}; + +struct trace_event_raw_jbd2_journal_shrink { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int nr_shrunk; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_checkpoint_list { + struct trace_entry ent; + dev_t dev; + tid_t first_tid; + tid_t tid; + tid_t last_tid; + long unsigned int nr_freed; + tid_t next_tid; + char __data[0]; +}; + +struct trace_event_data_offsets_jbd2_checkpoint {}; + +struct trace_event_data_offsets_jbd2_commit {}; + +struct trace_event_data_offsets_jbd2_end_commit {}; + +struct trace_event_data_offsets_jbd2_submit_inode_data {}; + +struct trace_event_data_offsets_jbd2_handle_start_class {}; + +struct trace_event_data_offsets_jbd2_handle_extend {}; + +struct trace_event_data_offsets_jbd2_handle_stats {}; + +struct trace_event_data_offsets_jbd2_run_stats {}; + +struct trace_event_data_offsets_jbd2_checkpoint_stats {}; + +struct trace_event_data_offsets_jbd2_update_log_tail {}; + +struct trace_event_data_offsets_jbd2_write_superblock {}; + +struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; + +struct trace_event_data_offsets_jbd2_journal_shrink {}; + +struct trace_event_data_offsets_jbd2_shrink_scan_exit {}; + +struct trace_event_data_offsets_jbd2_shrink_checkpoint_list {}; + +typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); + +typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int); + +typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int, int, int); + +typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, tid_t, struct transaction_run_stats_s *); + +typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, tid_t, struct transaction_chp_stats_s *); + +typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, blk_opf_t); + +typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_count)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_enter)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_exit)(void *, journal_t *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_checkpoint_list)(void *, journal_t *, tid_t, tid_t, tid_t, long unsigned int, tid_t); + +struct jbd2_stats_proc_session { + journal_t *journal; + struct transaction_stats_s *stats; + int start; + int max; +}; + +enum rsapubkey_actions { + ACT_rsa_get_e = 0, + ACT_rsa_get_n = 1, + NR__rsapubkey_actions = 2, +}; + +enum rsaprivkey_actions { + ACT_rsa_get_d = 0, + ACT_rsa_get_dp = 1, + ACT_rsa_get_dq = 2, + ACT_rsa_get_e___2 = 3, + ACT_rsa_get_n___2 = 4, + ACT_rsa_get_p = 5, + ACT_rsa_get_q = 6, + ACT_rsa_get_qinv = 7, + NR__rsaprivkey_actions = 8, +}; + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct pkcs1pad_inst_ctx { + struct crypto_akcipher_spawn spawn; +}; + +struct pkcs1pad_request { + struct scatterlist in_sg[2]; + struct scatterlist out_sg[1]; + uint8_t *in_buf; + uint8_t *out_buf; + long: 32; + long: 32; + struct akcipher_request child_req; +}; + +struct x509_parse_context { + struct x509_certificate *cert; + long unsigned int data; + const void *key; + size_t key_size; + const void *params; + size_t params_size; + enum OID key_algo; + enum OID last_oid; + enum OID sig_algo; + u8 o_size; + u8 cn_size; + u8 email_size; + u16 o_offset; + u16 cn_offset; + u16 email_offset; + unsigned int raw_akid_size; + const void *raw_akid; + const void *akid_raw_issuer; + unsigned int akid_raw_issuer_size; +}; + +enum { + REQ_FSEQ_PREFLUSH = 1, + REQ_FSEQ_DATA = 2, + REQ_FSEQ_POSTFLUSH = 4, + REQ_FSEQ_DONE = 8, + REQ_FSEQ_ACTIONS = 7, + FLUSH_PENDING_TIMEOUT = 5000, +}; + +struct bfq_group_data { + struct blkcg_policy_data pd; + unsigned int weight; +}; + +struct io_uring_sync_cancel_reg { + __u64 addr; + __s32 fd; + __u32 flags; + struct __kernel_timespec timeout; + __u8 opcode; + __u8 pad[7]; + __u64 pad2[3]; +}; + +struct io_cancel { + struct file *file; + long: 32; + u64 addr; + u32 flags; + s32 fd; + u8 opcode; + long: 32; +}; + +typedef long int mpi_limb_signed_t; + +typedef struct tree_desc_s tree_desc; + +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +struct clk_divider { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u16 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +struct unipair { + short unsigned int unicode; + short unsigned int fontpos; +}; + +struct uni_pagedict { + u16 **uni_pgdir[32]; + long unsigned int refcount; + long unsigned int sum; + unsigned char *inverse_translations[4]; + u16 *inverse_trans_unicode; +}; + +typedef void * (*devcon_match_fn_t)(const struct fwnode_handle *, const char *, void *); + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1, + BIP_MAPPED_INTEGRITY = 2, + BIP_CTRL_NOCHECK = 4, + BIP_DISK_NOCHECK = 8, + BIP_IP_CHECKSUM = 16, + BIP_COPY_USER = 32, +}; + +enum pr_status { + PR_STS_SUCCESS = 0, + PR_STS_IOERR = 2, + PR_STS_RESERVATION_CONFLICT = 24, + PR_STS_RETRY_PATH_FAILURE = 917504, + PR_STS_PATH_FAST_FAILED = 983040, + PR_STS_PATH_FAILED = 65536, +}; + +enum t10_dif_type { + T10_PI_TYPE0_PROTECTION = 0, + T10_PI_TYPE1_PROTECTION = 1, + T10_PI_TYPE2_PROTECTION = 2, + T10_PI_TYPE3_PROTECTION = 3, +}; + +struct scsi_io_group_descriptor { + u8 ic_enable: 1; + u8 cs_enble: 1; + u8 st_enble: 1; + u8 reserved1: 3; + u8 io_advice_hints_mode: 2; + u8 reserved2[3]; + u8 lbm_descriptor_type: 4; + u8 rlbsr: 2; + u8 reserved3: 1; + u8 acdlu: 1; + u8 params[2]; + u8 reserved4; + u8 reserved5[8]; +}; + +struct scsi_stream_status { + u8 reserved1: 7; + u8 perm: 1; + u8 reserved2; + __be16 stream_identifier; + u8 rel_lifetime: 6; + u8 reserved3: 2; + u8 reserved4[3]; +}; + +struct scsi_stream_status_header { + __be32 len; + u16 reserved; + __be16 number_of_open_streams; + struct { + struct {} __empty_stream_status; + struct scsi_stream_status stream_status[0]; + }; +}; + +enum scsi_prot_flags { + SCSI_PROT_TRANSFER_PI = 1, + SCSI_PROT_GUARD_CHECK = 2, + SCSI_PROT_REF_CHECK = 4, + SCSI_PROT_REF_INCREMENT = 8, + SCSI_PROT_IP_CHECKSUM = 16, +}; + +enum { + SD_EXT_CDB_SIZE = 32, + SD_MEMPOOL_SIZE = 2, +}; + +enum { + SD_DEF_XFER_BLOCKS = 65535, + SD_MAX_XFER_BLOCKS = 4294967295, + SD_MAX_WS10_BLOCKS = 65535, + SD_MAX_WS16_BLOCKS = 8388607, +}; + +enum { + SD_LBP_FULL = 0, + SD_LBP_UNMAP = 1, + SD_LBP_WS16 = 2, + SD_LBP_WS10 = 3, + SD_LBP_ZERO = 4, + SD_LBP_DISABLE = 5, +}; + +enum { + SD_ZERO_WRITE = 0, + SD_ZERO_WS = 1, + SD_ZERO_WS16_UNMAP = 2, + SD_ZERO_WS10_UNMAP = 3, +}; + +struct scsi_disk { + struct scsi_device *device; + long: 32; + struct device disk_dev; + struct gendisk *disk; + struct opal_dev *opal_dev; + atomic_t openers; + long: 32; + sector_t capacity; + int max_retries; + u32 min_xfer_blocks; + u32 max_xfer_blocks; + u32 opt_xfer_blocks; + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; + u32 max_atomic; + u32 atomic_alignment; + u32 atomic_granularity; + u32 max_atomic_with_boundary; + u32 max_atomic_boundary; + u32 index; + unsigned int physical_block_size; + unsigned int max_medium_access_timeouts; + unsigned int medium_access_timed_out; + u16 permanent_stream_count; + u8 media_present; + u8 write_prot; + u8 protection_type; + u8 provisioning_mode; + u8 zeroing_mode; + u8 nr_actuators; + bool suspended; + unsigned int ATO: 1; + unsigned int cache_override: 1; + unsigned int WCE: 1; + unsigned int RCD: 1; + unsigned int DPOFUA: 1; + unsigned int first_scan: 1; + unsigned int lbpme: 1; + unsigned int lbprz: 1; + unsigned int lbpu: 1; + unsigned int lbpws: 1; + unsigned int lbpws10: 1; + unsigned int lbpvpd: 1; + unsigned int ws10: 1; + unsigned int ws16: 1; + unsigned int rc_basis: 2; + unsigned int zoned: 2; + unsigned int urswrz: 1; + unsigned int security: 1; + unsigned int ignore_medium_access_errors: 1; + unsigned int rscs: 1; + unsigned int use_atomic_write_boundary: 1; +}; + +enum { + VETH_INFO_UNSPEC = 0, + VETH_INFO_PEER = 1, + __VETH_INFO_MAX = 2, +}; + +struct veth_stats { + u64 rx_drops; + u64 xdp_packets; + u64 xdp_bytes; + u64 xdp_redirect; + u64 xdp_drops; + u64 xdp_tx; + u64 xdp_tx_err; + u64 peer_tq_xdp_xmit; + u64 peer_tq_xdp_xmit_err; +}; + +struct veth_rq_stats { + struct veth_stats vs; + struct u64_stats_sync syncp; + long: 32; +}; + +struct veth_rq { + struct napi_struct xdp_napi; + struct napi_struct *napi; + struct net_device *dev; + struct bpf_prog *xdp_prog; + struct xdp_mem_info xdp_mem; + long: 32; + struct veth_rq_stats stats; + bool rx_notify_masked; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ptr_ring xdp_ring; + struct xdp_rxq_info xdp_rxq; + struct page_pool *page_pool; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct veth_priv { + struct net_device *peer; + long: 32; + atomic64_t dropped; + struct bpf_prog *_xdp_prog; + struct veth_rq *rq; + unsigned int requested_headroom; + long: 32; +}; + +struct veth_xdp_tx_bq { + struct xdp_frame *q[16]; + unsigned int count; +}; + +struct veth_q_stat_desc { + char desc[32]; + size_t offset; +}; + +struct veth_xdp_buff { + struct xdp_buff xdp; + struct sk_buff *skb; +}; + +struct iwl_dev_info { + u16 device; + u16 subdevice; + u16 mac_type; + u16 rf_type; + u8 mac_step; + u8 rf_step; + u8 rf_id; + u8 no_160; + u8 cores; + u8 cdb; + u8 jacket; + const struct iwl_cfg *cfg; + const char *name; +}; + +enum { + WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 2147483648, +}; + +struct iwl_error_resp___2 { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_info; + __le64 timestamp; +}; + +struct iwl_csa_notification { + __le16 band; + __le16 channel; + __le32 status; +}; + +struct iwlagn_non_cfg_phy { + __le32 non_cfg_phy[8]; +}; + +struct iwl_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +}; + +struct iwl_card_state_notif { + __le32 flags; +}; + +struct iwlagn_beacon_notif { + struct iwlagn_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +}; + +struct statistics_rx_non_phy_bt { + struct statistics_rx_non_phy common; + __le32 num_bt_kills; + __le32 reserved[2]; +}; + +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; + struct statistics_rx_ht_phy ofdm_ht; +}; + +struct statistics_rx_bt { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy_bt general; + struct statistics_rx_ht_phy ofdm_ht; +}; + +struct statistics_bt_activity { + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +}; + +struct statistics_general { + struct statistics_general_common common; + __le32 reserved2; + __le32 reserved3; +}; + +struct statistics_general_bt { + struct statistics_general_common common; + struct statistics_bt_activity activity; + __le32 reserved2; + __le32 reserved3; +}; + +struct iwl_notif_statistics { + __le32 flag; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; +}; + +struct iwl_bt_notif_statistics { + __le32 flag; + struct statistics_rx_bt rx; + struct statistics_tx tx; + struct statistics_general_bt general; +}; + +struct iwl_missed_beacon_notif { + __le32 consecutive_missed_beacons; + __le32 total_missed_becons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +}; + +struct iwl_wipan_noa_descriptor { + u8 count; + __le32 duration; + __le32 interval; + __le32 starttime; +} __attribute__((packed)); + +struct iwl_wipan_noa_attribute { + u8 id; + __le16 length; + u8 index; + u8 ct_window; + struct iwl_wipan_noa_descriptor descr0; + struct iwl_wipan_noa_descriptor descr1; + u8 reserved; +} __attribute__((packed)); + +struct iwl_wipan_noa_notification { + u32 noa_active; + struct iwl_wipan_noa_attribute noa_attribute; +}; + +enum { + MEASUREMENT_READY = 1, + MEASUREMENT_ACTIVE = 2, +}; + +struct iwl_mac_client_data { + u8 is_assoc; + u8 esr_transition_timeout; + __le16 medium_sync_delay; + __le16 assoc_id; + __le16 reserved1; + __le16 data_policy; + __le16 reserved2; + __le32 ctwin; +}; + +struct iwl_mac_p2p_dev_data { + __le32 is_disc_extended; +}; + +enum iwl_mac_config_filter_flags { + MAC_CFG_FILTER_PROMISC = 1, + MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT = 2, + MAC_CFG_FILTER_ACCEPT_GRP = 4, + MAC_CFG_FILTER_ACCEPT_BEACON = 8, + MAC_CFG_FILTER_ACCEPT_BCAST_PROBE_RESP = 16, + MAC_CFG_FILTER_ACCEPT_PROBE_REQ = 32, +}; + +struct iwl_mac_config_cmd { + __le32 id_and_color; + __le32 action; + __le32 mac_type; + u8 local_mld_addr[6]; + __le16 reserved_for_local_mld_addr; + __le32 filter_flags; + __le16 he_support; + __le16 he_ap_support; + __le32 eht_support; + __le32 nic_not_ack_enabled; + union { + struct iwl_mac_client_data client; + struct iwl_mac_p2p_dev_data p2p_dev; + }; +}; + +enum xhci_ep_reset_type { + EP_HARD_RESET = 0, + EP_SOFT_RESET = 1, +}; + +enum xhci_setup_dev { + SETUP_CONTEXT_ONLY = 0, + SETUP_CONTEXT_ADDRESS = 1, +}; + +enum sanyo_state { + STATE_INACTIVE___6 = 0, + STATE_HEADER_SPACE___2 = 1, + STATE_BIT_PULSE___2 = 2, + STATE_BIT_SPACE___2 = 3, + STATE_TRAILER_PULSE___2 = 4, + STATE_TRAILER_SPACE___2 = 5, +}; + +struct dm_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct mapped_device *, char *); + ssize_t (*store)(struct mapped_device *, const char *, size_t); +}; + +struct dmi_device_attribute { + struct device_attribute dev_attr; + int field; +}; + +struct mafield { + const char *prefix; + int field; +}; + +struct hid_control_fifo { + unsigned char dir; + struct hid_report *report; + char *raw_report; +}; + +struct hid_output_fifo { + struct hid_report *report; + char *raw_report; +}; + +struct hid_class_descriptor { + __u8 bDescriptorType; + __le16 wDescriptorLength; +} __attribute__((packed)); + +struct hid_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdHID; + __u8 bCountryCode; + __u8 bNumDescriptors; + struct hid_class_descriptor desc[1]; +} __attribute__((packed)); + +struct usbhid_device { + struct hid_device *hid; + struct usb_interface *intf; + int ifnum; + unsigned int bufsize; + struct urb *urbin; + char *inbuf; + dma_addr_t inbuf_dma; + struct urb *urbctrl; + struct usb_ctrlrequest *cr; + struct hid_control_fifo ctrl[256]; + unsigned char ctrlhead; + unsigned char ctrltail; + char *ctrlbuf; + dma_addr_t ctrlbuf_dma; + long unsigned int last_ctrl; + struct urb *urbout; + struct hid_output_fifo out[256]; + unsigned char outhead; + unsigned char outtail; + char *outbuf; + dma_addr_t outbuf_dma; + long unsigned int last_out; + struct mutex mutex; + spinlock_t lock; + long unsigned int iofl; + struct timer_list io_retry; + long unsigned int stop_retry; + unsigned int retry_delay; + struct work_struct reset_work; + wait_queue_head_t wait; +}; + +struct xdp_frame_bulk { + int count; + void *xa; + void *q[16]; +}; + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +struct nf_ct_tcp_flags { + __u8 flags; + __u8 mask; +}; + +enum nf_ct_tcp_action { + NFCT_TCP_IGNORE = 0, + NFCT_TCP_INVALID = 1, + NFCT_TCP_ACCEPT = 2, +}; + +enum tcp_bit_set { + TCP_SYN_SET = 0, + TCP_SYNACK_SET = 1, + TCP_FIN_SET = 2, + TCP_ACK_SET = 3, + TCP_RST_SET = 4, + TCP_NONE_SET = 5, +}; + +enum ctattr_protoinfo_tcp { + CTA_PROTOINFO_TCP_UNSPEC = 0, + CTA_PROTOINFO_TCP_STATE = 1, + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2, + CTA_PROTOINFO_TCP_WSCALE_REPLY = 3, + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4, + CTA_PROTOINFO_TCP_FLAGS_REPLY = 5, + __CTA_PROTOINFO_TCP_MAX = 6, +}; + +enum nft_payload_csum_types { + NFT_PAYLOAD_CSUM_NONE = 0, + NFT_PAYLOAD_CSUM_INET = 1, + NFT_PAYLOAD_CSUM_SCTP = 2, +}; + +enum nft_payload_csum_flags { + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 1, +}; + +enum nft_payload_attributes { + NFTA_PAYLOAD_UNSPEC = 0, + NFTA_PAYLOAD_DREG = 1, + NFTA_PAYLOAD_BASE = 2, + NFTA_PAYLOAD_OFFSET = 3, + NFTA_PAYLOAD_LEN = 4, + NFTA_PAYLOAD_SREG = 5, + NFTA_PAYLOAD_CSUM_TYPE = 6, + NFTA_PAYLOAD_CSUM_OFFSET = 7, + NFTA_PAYLOAD_CSUM_FLAGS = 8, + __NFTA_PAYLOAD_MAX = 9, +}; + +struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +}; + +struct nft_payload_set { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 sreg; + u8 csum_type; + u8 csum_offset; + u8 csum_flags; +}; + +struct nft_payload_vlan_hdr { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; +}; + +struct sock_skb_cb { + u32 dropcount; +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; + +struct udp_dev_scratch { + u32 _tsize_state; +}; + +struct bpf_iter__udp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct udp_sock *udp_sk; + }; + uid_t uid; + long: 32; + int bucket; + long: 32; +}; + +struct bpf_udp_iter_state { + struct udp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + int offset; + struct sock **batch; + bool st_bucket_done; +}; + +struct ip_tunnel_prl { + __be32 addr; + __u16 flags; + __u16 __reserved; + __u32 datalen; + __u32 __reserved2; +}; + +struct xfrm_tunnel { + int (*handler)(struct sk_buff *); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, u32); + struct xfrm_tunnel *next; + int priority; +}; + +struct sit_net { + struct ip_tunnel *tunnels_r_l[16]; + struct ip_tunnel *tunnels_r[16]; + struct ip_tunnel *tunnels_l[16]; + struct ip_tunnel *tunnels_wc[1]; + struct ip_tunnel **tunnels[4]; + struct net_device *fb_tunnel_dev; +}; + +typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); + +struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +}; + +typedef void (*clock_access_fn)(struct timespec64 *); + +struct patch { + void *addr; + unsigned int insn; +}; + +enum proc_cn_event { + PROC_EVENT_NONE = 0, + PROC_EVENT_FORK = 1, + PROC_EVENT_EXEC = 2, + PROC_EVENT_UID = 4, + PROC_EVENT_GID = 64, + PROC_EVENT_SID = 128, + PROC_EVENT_PTRACE = 256, + PROC_EVENT_COMM = 512, + PROC_EVENT_NONZERO_EXIT = 536870912, + PROC_EVENT_COREDUMP = 1073741824, + PROC_EVENT_EXIT = 2147483648, +}; + +struct nbcon_state { + union { + unsigned int atom; + struct { + unsigned int prio: 2; + unsigned int req_prio: 2; + unsigned int unsafe: 1; + unsigned int unsafe_takeover: 1; + unsigned int cpu: 24; + }; + }; +}; + +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long int len; +}; + +typedef int (*task_call_f)(struct task_struct *, void *); + +enum tick_broadcast_mode { + TICK_BROADCAST_OFF = 0, + TICK_BROADCAST_ON = 1, + TICK_BROADCAST_FORCE = 2, +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT = 0, + TICK_BROADCAST_ENTER = 1, +}; + +struct kallsym_iter { + loff_t pos; + loff_t pos_mod_end; + loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; + long unsigned int value; + unsigned int nameoff; + char type; + char name[512]; + char module_name[60]; + int exported; + int show_value; +}; + +struct bpf_iter__ksym { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kallsym_iter *ksym; + }; +}; + +struct trace_dynamic_info { + u16 offset; + u16 len; +}; + +struct synth_field_desc { + const char *type; + const char *name; +}; + +struct synth_trace_event; + +struct synth_event_trace_state { + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int cur_field; + unsigned int n_u64; + bool disabled; + bool add_next; + bool add_name; +}; + +union trace_synth_field { + u8 as_u8; + u16 as_u16; + u32 as_u32; + u64 as_u64; + struct trace_dynamic_info as_dynamic; +}; + +struct synth_trace_event { + struct trace_entry ent; + union trace_synth_field fields[0]; +}; + +enum { + SYNTH_ERR_BAD_NAME = 0, + SYNTH_ERR_INVALID_CMD = 1, + SYNTH_ERR_INVALID_DYN_CMD = 2, + SYNTH_ERR_EVENT_EXISTS = 3, + SYNTH_ERR_TOO_MANY_FIELDS = 4, + SYNTH_ERR_INCOMPLETE_TYPE = 5, + SYNTH_ERR_INVALID_TYPE = 6, + SYNTH_ERR_INVALID_FIELD = 7, + SYNTH_ERR_INVALID_ARRAY_SPEC = 8, +}; + +struct bpf_spin_lock { + __u32 val; +}; + +struct bpf_timer { + __u64 __opaque[2]; +}; + +struct bpf_wq { + __u64 __opaque[2]; +}; + +struct bpf_list_head { + __u64 __opaque[2]; +}; + +struct bpf_list_node { + __u64 __opaque[3]; +}; + +struct bpf_rb_root { + __u64 __opaque[2]; +}; + +struct bpf_rb_node { + __u64 __opaque[4]; +}; + +struct bpf_refcount { + __u32 __opaque[1]; +}; + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +enum { + BPF_F_TIMER_ABS = 1, + BPF_F_TIMER_CPU_PIN = 2, +}; + +enum bpf_kfunc_flags { + BPF_F_PAD_ZEROS = 1, +}; + +struct bpf_rb_node_kern { + struct rb_node rb_node; + void *owner; +}; + +struct bpf_list_node_kern { + struct list_head list_head; + void *owner; + long: 32; +}; + +typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + +typedef u64 (*btf_bpf_get_smp_processor_id)(); + +typedef u64 (*btf_bpf_get_numa_node_id)(); + +typedef u64 (*btf_bpf_ktime_get_ns)(); + +typedef u64 (*btf_bpf_ktime_get_boot_ns)(); + +typedef u64 (*btf_bpf_ktime_get_coarse_ns)(); + +typedef u64 (*btf_bpf_ktime_get_tai_ns)(); + +typedef u64 (*btf_bpf_get_current_pid_tgid)(); + +typedef u64 (*btf_bpf_get_current_uid_gid)(); + +typedef u64 (*btf_bpf_get_current_comm)(char *, u32); + +typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_jiffies64)(); + +typedef u64 (*btf_bpf_get_current_cgroup_id)(); + +typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); + +typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, s64 *); + +typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, u64 *); + +typedef u64 (*btf_bpf_strncmp)(const char *, u32, const char *); + +typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); + +typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_copy_from_user_task)(void *, u32, const void *, struct task_struct *, u64); + +typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); + +typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); + +struct bpf_bprintf_buffers { + char bin_args[512]; + char buf[1024]; +}; + +typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); + +struct bpf_async_cb { + struct bpf_map *map; + struct bpf_prog *prog; + void *callback_fn; + void *value; + union { + struct callback_head rcu; + struct work_struct delete_work; + }; + u64 flags; +}; + +struct bpf_hrtimer { + struct bpf_async_cb cb; + struct hrtimer timer; + atomic_t cancelling; + long: 32; +}; + +struct bpf_work { + struct bpf_async_cb cb; + struct work_struct work; + struct work_struct delete_work; +}; + +struct bpf_async_kern { + union { + struct bpf_async_cb *cb; + struct bpf_hrtimer *timer; + struct bpf_work *work; + }; + struct bpf_spin_lock lock; +}; + +enum bpf_async_type { + BPF_ASYNC_TYPE_TIMER = 0, + BPF_ASYNC_TYPE_WQ = 1, +}; + +typedef u64 (*btf_bpf_timer_init)(struct bpf_async_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_timer_set_callback)(struct bpf_async_kern *, void *, struct bpf_prog_aux *); + +typedef u64 (*btf_bpf_timer_start)(struct bpf_async_kern *, u64, u64); + +typedef u64 (*btf_bpf_timer_cancel)(struct bpf_async_kern *); + +typedef u64 (*btf_bpf_kptr_xchg)(void *, void *); + +typedef u64 (*btf_bpf_dynptr_from_mem)(void *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_dynptr_read)(void *, u32, const struct bpf_dynptr_kern *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_write)(const struct bpf_dynptr_kern *, u32, void *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_data)(const struct bpf_dynptr_kern *, u32, u32); + +typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); + +struct bpf_throw_ctx { + struct bpf_prog_aux *aux; + long: 32; + u64 sp; + u64 bp; + int cnt; + long: 32; +}; + +struct bpf_iter_bits { + __u64 __opaque[2]; +}; + +struct bpf_iter_bits_kern { + union { + __u64 *bits; + __u64 bits_copy; + }; + int nr_bits; + int bit; +}; + +enum bpf_cond_pseudo_jmp { + BPF_MAY_GOTO = 0, +}; + +enum bpf_addr_space_cast { + BPF_ADDR_SPACE_CAST = 1, +}; + +typedef void (*bpf_insn_print_t)(void *, const char *, ...); + +typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); + +typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +struct bpf_mem_cache { + struct llist_head free_llist; + local_t active; + struct llist_head free_llist_extra; + struct irq_work refill_work; + struct obj_cgroup *objcg; + int unit_size; + int free_cnt; + int low_watermark; + int high_watermark; + int batch; + int percpu_size; + bool draining; + struct bpf_mem_cache *tgt; + struct llist_head free_by_rcu; + struct llist_node *free_by_rcu_tail; + struct llist_head waiting_for_gp; + struct llist_node *waiting_for_gp_tail; + struct callback_head rcu; + atomic_t call_rcu_in_progress; + struct llist_head free_llist_extra_rcu; + struct llist_head free_by_rcu_ttrace; + struct llist_head waiting_for_gp_ttrace; + struct callback_head rcu_ttrace; + atomic_t call_rcu_ttrace_in_progress; +}; + +struct bpf_mem_caches { + struct bpf_mem_cache cache[11]; +}; + +enum { + XA_CHECK_SCHED = 4096, +}; + +struct wb_lock_cookie { + bool locked; + long unsigned int flags; +}; + +struct dirty_throttle_control { + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + long unsigned int avail; + long unsigned int dirty; + long unsigned int thresh; + long unsigned int bg_thresh; + long unsigned int wb_dirty; + long unsigned int wb_thresh; + long unsigned int wb_bg_thresh; + long unsigned int pos_ratio; + bool freerun; + bool dirty_exceeded; +}; + +struct stat { + long unsigned int st_dev; + long unsigned int st_ino; + short unsigned int st_mode; + short unsigned int st_nlink; + short unsigned int st_uid; + short unsigned int st_gid; + long unsigned int st_rdev; + long unsigned int st_size; + long unsigned int st_blksize; + long unsigned int st_blocks; + long unsigned int st_atime; + long unsigned int st_atime_nsec; + long unsigned int st_mtime; + long unsigned int st_mtime_nsec; + long unsigned int st_ctime; + long unsigned int st_ctime_nsec; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct stat64 { + long long unsigned int st_dev; + unsigned char __pad0[4]; + long unsigned int __st_ino; + unsigned int st_mode; + unsigned int st_nlink; + long unsigned int st_uid; + long unsigned int st_gid; + long long unsigned int st_rdev; + unsigned char __pad3[4]; + long: 32; + long long int st_size; + long unsigned int st_blksize; + long: 32; + long long unsigned int st_blocks; + long unsigned int st_atime; + long unsigned int st_atime_nsec; + long unsigned int st_mtime; + long unsigned int st_mtime_nsec; + long unsigned int st_ctime; + long unsigned int st_ctime_nsec; + long long unsigned int st_ino; +}; + +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +struct statx { + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + __u64 stx_mnt_id; + __u32 stx_dio_mem_align; + __u32 stx_dio_offset_align; + __u64 stx_subvol; + __u32 stx_atomic_write_unit_min; + __u32 stx_atomic_write_unit_max; + __u32 stx_atomic_write_segments_max; + __u32 __spare1[1]; + __u64 __spare3[9]; +}; + +struct pde_opener { + struct list_head lh; + struct file *file; + bool closing; + struct completion *c; +}; + +enum { + BIAS = 2147483648, +}; + +struct vmcore { + struct list_head list; + long long unsigned int paddr; + long long unsigned int size; + loff_t offset; +}; + +struct vmcore_cb { + bool (*pfn_is_ram)(struct vmcore_cb *, long unsigned int); + struct list_head next; +}; + +enum cc_attr { + CC_ATTR_MEM_ENCRYPT = 0, + CC_ATTR_HOST_MEM_ENCRYPT = 1, + CC_ATTR_GUEST_MEM_ENCRYPT = 2, + CC_ATTR_GUEST_STATE_ENCRYPT = 3, + CC_ATTR_GUEST_UNROLL_STRING_IO = 4, + CC_ATTR_GUEST_SEV_SNP = 5, + CC_ATTR_HOST_SEV_SNP = 6, +}; + +enum { + BLOCK_BITMAP = 0, + INODE_BITMAP = 1, + INODE_TABLE = 2, + GROUP_TABLE_COUNT = 3, +}; + +struct ext4_rcu_ptr { + struct callback_head rcu; + void *ptr; +}; + +struct ext4_new_flex_group_data { + struct ext4_new_group_data *groups; + __u16 *bg_flags; + ext4_group_t resize_bg; + ext4_group_t count; +}; + +struct btrfs_disk_balance_args { + __le64 profiles; + union { + __le64 usage; + struct { + __le32 usage_min; + __le32 usage_max; + }; + }; + __le64 devid; + __le64 pstart; + __le64 pend; + __le64 vstart; + __le64 vend; + __le64 target; + __le64 flags; + union { + __le64 limit; + struct { + __le32 limit_min; + __le32 limit_max; + }; + }; + __le32 stripes_min; + __le32 stripes_max; + __le64 unused[6]; +}; + +struct btrfs_balance_item { + __le64 flags; + struct btrfs_disk_balance_args data; + struct btrfs_disk_balance_args meta; + struct btrfs_disk_balance_args sys; + __le64 unused[4]; +}; + +struct btrfs_dev_stats_item { + __le64 values[5]; +}; + +struct btrfs_swapfile_pin { + struct rb_node node; + void *ptr; + struct inode *inode; + bool is_block_group; + int bg_extent_count; +}; + +enum btrfs_map_op { + BTRFS_MAP_READ = 0, + BTRFS_MAP_WRITE = 1, + BTRFS_MAP_GET_READ_MIRRORS = 2, +}; + +struct btrfs_io_geometry { + u32 stripe_index; + u32 stripe_nr; + int mirror_num; + int num_stripes; + u64 stripe_offset; + u64 raid56_full_stripe_start; + int max_errors; + enum btrfs_map_op op; +}; + +struct alloc_chunk_ctl { + u64 start; + u64 type; + int num_stripes; + int sub_stripes; + int dev_stripes; + int devs_max; + int devs_min; + int devs_increment; + int ncopies; + int nparity; + u64 max_stripe_size; + u64 max_chunk_size; + u64 dev_extent_min; + u64 stripe_size; + u64 chunk_size; + int ndevs; + long: 32; +}; + +struct msgbuf { + __kernel_long_t mtype; + char mtext[1]; +}; + +struct msg; + +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; + struct msg *msg_last; + __kernel_old_time_t msg_stime; + __kernel_old_time_t msg_rtime; + __kernel_old_time_t msg_ctime; + long unsigned int msg_lcbytes; + long unsigned int msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + __kernel_ipc_pid_t msg_lspid; + __kernel_ipc_pid_t msg_lrpid; +}; + +struct msqid64_ds { + struct ipc64_perm msg_perm; + long unsigned int msg_stime; + long unsigned int msg_stime_high; + long unsigned int msg_rtime; + long unsigned int msg_rtime_high; + long unsigned int msg_ctime; + long unsigned int msg_ctime_high; + long unsigned int msg_cbytes; + long unsigned int msg_qnum; + long unsigned int msg_qbytes; + __kernel_pid_t msg_lspid; + __kernel_pid_t msg_lrpid; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + short unsigned int msgseg; +}; + +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; + time64_t q_rtime; + time64_t q_ctime; + long unsigned int q_cbytes; + long unsigned int q_qnum; + long unsigned int q_qbytes; + struct pid *q_lspid; + struct pid *q_lrpid; + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct msg_receiver { + struct list_head r_list; + struct task_struct *r_tsk; + int r_mode; + long int r_msgtype; + long int r_maxsize; + struct msg_msg *r_msg; +}; + +struct msg_sender { + struct list_head list; + struct task_struct *tsk; + size_t msgsz; +}; + +struct cmac_tfm_ctx { + struct crypto_cipher *child; + long: 32; + __be64 consts[0]; +}; + +struct cmac_desc_ctx { + unsigned int len; + u8 odds[0]; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct ccm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn mac; +}; + +struct crypto_ccm_ctx { + struct crypto_ahash *mac; + struct crypto_skcipher *ctr; +}; + +struct crypto_rfc4309_ctx { + struct crypto_aead *child; + u8 nonce[3]; +}; + +struct crypto_rfc4309_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct aead_request subreq; +}; + +struct crypto_ccm_req_priv_ctx { + u8 odata[16]; + u8 idata[16]; + u8 auth_tag[16]; + u32 flags; + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + }; +}; + +struct cbcmac_tfm_ctx { + struct crypto_cipher *child; +}; + +struct cbcmac_desc_ctx { + unsigned int len; + u8 dg[0]; +}; + +struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; + void (*probe)(dev_t); +}; + +enum { + IORING_REG_WAIT_TS = 1, +}; + +struct io_uring_reg_wait { + struct __kernel_timespec ts; + __u32 min_wait_usec; + __u32 flags; + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad[3]; + __u64 pad2[2]; +}; + +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 min_wait_usec; + __u64 ts; +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +struct trace_event_raw_io_uring_create { + struct trace_entry ent; + int fd; + void *ctx; + u32 sq_entries; + u32 cq_entries; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_register { + struct trace_entry ent; + void *ctx; + unsigned int opcode; + unsigned int nr_files; + unsigned int nr_bufs; + long int ret; + char __data[0]; +}; + +struct trace_event_raw_io_uring_file_get { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int fd; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_queue_async_work { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + u8 opcode; + long: 32; + long long unsigned int flags; + struct io_wq_work *work; + int rw; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_defer { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int data; + u8 opcode; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_link { + struct trace_entry ent; + void *ctx; + void *req; + void *target_req; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqring_wait { + struct trace_entry ent; + void *ctx; + int min_events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_fail_link { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + void *link; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_complete { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int res; + unsigned int cflags; + u64 extra1; + u64 extra2; + char __data[0]; +}; + +struct trace_event_raw_io_uring_submit_req { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + long: 32; + long long unsigned int flags; + bool sq_thread; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_arm { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + int events; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_add { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_req_failed { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + u8 flags; + u8 ioprio; + long: 32; + u64 off; + u64 addr; + u32 len; + u32 op_flags; + u16 buf_index; + u16 personality; + u32 file_index; + u64 pad1; + u64 addr3; + int error; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqe_overflow { + struct trace_entry ent; + void *ctx; + long: 32; + long long unsigned int user_data; + s32 res; + u32 cflags; + void *ocqe; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_task_work_run { + struct trace_entry ent; + void *tctx; + unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_io_uring_short_write { + struct trace_entry ent; + void *ctx; + long: 32; + u64 fpos; + u64 wanted; + u64 got; + char __data[0]; +}; + +struct trace_event_raw_io_uring_local_work_run { + struct trace_entry ent; + void *ctx; + int count; + unsigned int loops; + char __data[0]; +}; + +struct trace_event_data_offsets_io_uring_create {}; + +struct trace_event_data_offsets_io_uring_register {}; + +struct trace_event_data_offsets_io_uring_file_get {}; + +struct trace_event_data_offsets_io_uring_queue_async_work { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_defer { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_link {}; + +struct trace_event_data_offsets_io_uring_cqring_wait {}; + +struct trace_event_data_offsets_io_uring_fail_link { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_complete {}; + +struct trace_event_data_offsets_io_uring_submit_req { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_poll_arm { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_task_add { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_req_failed { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_cqe_overflow {}; + +struct trace_event_data_offsets_io_uring_task_work_run {}; + +struct trace_event_data_offsets_io_uring_short_write {}; + +struct trace_event_data_offsets_io_uring_local_work_run {}; + +typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); + +typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, long int); + +typedef void (*btf_trace_io_uring_file_get)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_queue_async_work)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_defer)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); + +typedef void (*btf_trace_io_uring_fail_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_complete)(void *, struct io_ring_ctx *, void *, struct io_uring_cqe *); + +typedef void (*btf_trace_io_uring_submit_req)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_poll_arm)(void *, struct io_kiocb *, int, int); + +typedef void (*btf_trace_io_uring_task_add)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_req_failed)(void *, const struct io_uring_sqe *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_cqe_overflow)(void *, void *, long long unsigned int, s32, u32, void *); + +typedef void (*btf_trace_io_uring_task_work_run)(void *, void *, unsigned int); + +typedef void (*btf_trace_io_uring_short_write)(void *, void *, u64, u64, u64); + +typedef void (*btf_trace_io_uring_local_work_run)(void *, void *, int, unsigned int); + +enum { + IO_APOLL_OK = 0, + IO_APOLL_ABORTED = 1, + IO_APOLL_READY = 2, +}; + +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; +}; + +struct ext_arg { + size_t argsz; + long: 32; + struct timespec64 ts; + const sigset_t *sig; + long: 32; + ktime_t min_time; + bool ts_set; + long: 32; +}; + +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; + +struct io_task_cancel { + struct io_uring_task *tctx; + bool all; +}; + +struct creds; + +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +struct irq_affinity_devres { + unsigned int count; + unsigned int irq[0]; +}; + +struct platform_object { + struct platform_device pdev; + char name[0]; +}; + +struct iosys_map { + union { + void *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +struct dma_buf_ops { + bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + int (*pin)(struct dma_buf_attachment *); + void (*unpin)(struct dma_buf_attachment *); + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); + void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*mmap)(struct dma_buf *, struct vm_area_struct *); + int (*vmap)(struct dma_buf *, struct iosys_map *); + void (*vunmap)(struct dma_buf *, struct iosys_map *); +}; + +struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + __poll_t active; +}; + +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + unsigned int vmapping_counter; + struct iosys_map vmap_ptr; + const char *exp_name; + const char *name; + spinlock_t name_lock; + struct module *owner; + struct list_head list_node; + void *priv; + struct dma_resv *resv; + wait_queue_head_t poll; + struct dma_buf_poll_cb_t cb_in; + struct dma_buf_poll_cb_t cb_out; +}; + +struct dma_buf_attach_ops; + +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; + bool peer2peer; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; + void *priv; +}; + +struct dma_buf_attach_ops { + bool allow_peer2peer; + void (*move_notify)(struct dma_buf_attachment *); +}; + +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct dma_resv *resv; + void *priv; +}; + +struct dma_buf_sync { + __u64 flags; +}; + +struct dma_buf_export_sync_file { + __u32 flags; + __s32 fd; +}; + +struct dma_buf_import_sync_file { + __u32 flags; + __s32 fd; +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = 127, + ATA_MASK_MWDMA = 3968, + ATA_MASK_UDMA = 1044480, +}; + +enum hsm_task_states { + HSM_ST_IDLE = 0, + HSM_ST_FIRST = 1, + HSM_ST = 2, + HSM_ST_LAST = 3, + HSM_ST_ERR = 4, +}; + +struct trace_event_raw_ata_qc_issue_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + unsigned char proto; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_qc_complete_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char status; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char error; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_tf_load { + struct trace_entry ent; + unsigned int ata_port; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_exec_command_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char cmd; + unsigned char feature; + unsigned char hob_nsect; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_bmdma_status { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char host_stat; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy_qc { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_action_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_begin_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + long unsigned int deadline; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_end_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + int rc; + char __data[0]; +}; + +struct trace_event_raw_ata_port_eh_begin_template { + struct trace_entry ent; + unsigned int ata_port; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_hsm_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int protocol; + unsigned int hsm_state; + unsigned char dev_state; + char __data[0]; +}; + +struct trace_event_raw_ata_transfer_data_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int flags; + unsigned int offset; + unsigned int bytes; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned char hsm_state; + char __data[0]; +}; + +struct trace_event_data_offsets_ata_qc_issue_template {}; + +struct trace_event_data_offsets_ata_qc_complete_template {}; + +struct trace_event_data_offsets_ata_tf_load {}; + +struct trace_event_data_offsets_ata_exec_command_template {}; + +struct trace_event_data_offsets_ata_bmdma_status {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy_qc {}; + +struct trace_event_data_offsets_ata_eh_action_template {}; + +struct trace_event_data_offsets_ata_link_reset_begin_template {}; + +struct trace_event_data_offsets_ata_link_reset_end_template {}; + +struct trace_event_data_offsets_ata_port_eh_begin_template {}; + +struct trace_event_data_offsets_ata_sff_hsm_template {}; + +struct trace_event_data_offsets_ata_transfer_data_template {}; + +struct trace_event_data_offsets_ata_sff_template {}; + +typedef void (*btf_trace_ata_qc_prep)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_issue)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_internal)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_failed)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_done)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_tf_load)(void *, struct ata_port *, const struct ata_taskfile *); + +typedef void (*btf_trace_ata_exec_command)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_setup)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_start)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_stop)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_status)(void *, struct ata_port *, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy)(void *, struct ata_device *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy_qc)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_eh_about_to_do)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_done)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_slave_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_softreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_softreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_std_sched_eh)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_freeze)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_thaw)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_sff_hsm_state)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_hsm_command_complete)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_port_intr)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_send_cdb)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_sff_flush_pio_task)(void *, struct ata_port *); + +enum { + ATA_READID_POSTRESET = 1, + ATA_DNXFER_PIO = 0, + ATA_DNXFER_DMA = 1, + ATA_DNXFER_40C = 2, + ATA_DNXFER_FORCE_PIO = 3, + ATA_DNXFER_FORCE_PIO0 = 4, + ATA_DNXFER_QUIET = -2147483648, +}; + +struct ata_force_param { + const char *name; + u8 cbl; + u8 spd_limit; + unsigned int xfer_mask; + unsigned int quirk_on; + unsigned int quirk_off; + u16 lflags_on; + u16 lflags_off; +}; + +struct ata_force_ent { + int port; + int device; + struct ata_force_param param; +}; + +struct ata_xfer_ent { + int shift; + int bits; + u8 base; +}; + +struct ata_dev_quirks_entry { + const char *model_num; + const char *model_rev; + unsigned int quirks; +}; + +enum rtl_fw_opcode { + PHY_READ = 0, + PHY_DATA_OR = 1, + PHY_DATA_AND = 2, + PHY_BJMPN = 3, + PHY_MDIO_CHG = 4, + PHY_CLEAR_READCOUNT = 7, + PHY_WRITE = 8, + PHY_READCOUNT_EQ_SKIP = 9, + PHY_COMP_EQ_SKIPN = 10, + PHY_COMP_NEQ_SKIPN = 11, + PHY_WRITE_PREVIOUS = 12, + PHY_SKIPN = 13, + PHY_DELAY_MS = 14, +}; + +struct fw_info { + u32 magic; + char version[32]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __attribute__((packed)); + +enum ieee80211_vht_max_ampdu_length_exp { + IEEE80211_VHT_MAX_AMPDU_8K = 0, + IEEE80211_VHT_MAX_AMPDU_16K = 1, + IEEE80211_VHT_MAX_AMPDU_32K = 2, + IEEE80211_VHT_MAX_AMPDU_64K = 3, + IEEE80211_VHT_MAX_AMPDU_128K = 4, + IEEE80211_VHT_MAX_AMPDU_256K = 5, + IEEE80211_VHT_MAX_AMPDU_512K = 6, + IEEE80211_VHT_MAX_AMPDU_1024K = 7, +}; + +enum iwl_mei_nvm_caps { + MEI_NVM_CAPS_LARI_SUPPORT = 1, + MEI_NVM_CAPS_11AX_SUPPORT = 2, +}; + +enum iwl_nvm_sbands_flags { + IWL_NVM_SBANDS_FLAGS_LAR = 1, + IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = 2, +}; + +struct iwl_nvm_get_info { + __le32 reserved; +}; + +enum iwl_nvm_info_general_flags { + NVM_GENERAL_FLAGS_EMPTY_OTP = 1, +}; + +struct iwl_nvm_get_info_general { + __le32 flags; + __le16 nvm_version; + u8 board_type; + u8 n_hw_addrs; +}; + +enum iwl_nvm_mac_sku_flags { + NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = 1, + NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = 2, + NVM_MAC_SKU_FLAGS_802_11N_ENABLED = 4, + NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = 8, + NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = 16, + NVM_MAC_SKU_FLAGS_MIMO_DISABLED = 32, + NVM_MAC_SKU_FLAGS_WAPI_ENABLED = 256, + NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = 16384, + NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = 32768, +}; + +struct iwl_nvm_get_info_sku { + __le32 mac_sku_flags; +}; + +struct iwl_nvm_get_info_phy { + __le32 tx_chains; + __le32 rx_chains; +}; + +struct iwl_nvm_get_info_regulatory_v1 { + __le32 lar_enabled; + __le16 channel_profile[51]; + __le16 reserved; +}; + +struct iwl_nvm_get_info_regulatory { + __le32 lar_enabled; + __le32 n_channels; + __le32 channel_profile[110]; +}; + +struct iwl_nvm_get_info_rsp_v3 { + struct iwl_nvm_get_info_general general; + struct iwl_nvm_get_info_sku mac_sku; + struct iwl_nvm_get_info_phy phy_sku; + struct iwl_nvm_get_info_regulatory_v1 regulatory; +}; + +struct iwl_nvm_get_info_rsp { + struct iwl_nvm_get_info_general general; + struct iwl_nvm_get_info_sku mac_sku; + struct iwl_nvm_get_info_phy phy_sku; + struct iwl_nvm_get_info_regulatory regulatory; +}; + +enum iwl_geo_information { + GEO_NO_INFO = 0, + GEO_WMM_ETSI_5GHZ_INFO = 1, +}; + +enum nvm_offsets { + SUBSYSTEM_ID = 10, + HW_ADDR = 21, + NVM_SW_SECTION = 448, + NVM_VERSION = 0, + RADIO_CFG = 1, + SKU = 2, + N_HW_ADDRS = 3, + NVM_CHANNELS = 32, + NVM_CHANNELS_SDP = 0, +}; + +enum ext_nvm_offsets { + MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, + NVM_VERSION_EXT_NVM = 0, + N_HW_ADDRS_FAMILY_8000 = 3, + RADIO_CFG_FAMILY_EXT_NVM = 0, + SKU_FAMILY_8000 = 2, + NVM_CHANNELS_EXTENDED = 0, + NVM_LAR_OFFSET_OLD = 1223, + NVM_LAR_OFFSET = 1287, + NVM_LAR_ENABLED = 7, +}; + +enum nvm_sku_bits { + NVM_SKU_CAP_BAND_24GHZ = 1, + NVM_SKU_CAP_BAND_52GHZ = 2, + NVM_SKU_CAP_11N_ENABLE = 4, + NVM_SKU_CAP_11AC_ENABLE = 8, + NVM_SKU_CAP_MIMO_DISABLE = 32, +}; + +enum iwl_nvm_channel_flags { + NVM_CHANNEL_VALID = 1, + NVM_CHANNEL_IBSS = 2, + NVM_CHANNEL_ACTIVE = 8, + NVM_CHANNEL_RADAR = 16, + NVM_CHANNEL_INDOOR_ONLY = 32, + NVM_CHANNEL_GO_CONCURRENT = 64, + NVM_CHANNEL_UNIFORM = 128, + NVM_CHANNEL_20MHZ = 256, + NVM_CHANNEL_40MHZ = 512, + NVM_CHANNEL_80MHZ = 1024, + NVM_CHANNEL_160MHZ = 2048, + NVM_CHANNEL_DC_HIGH = 4096, + NVM_CHANNEL_VLP = 8192, + NVM_CHANNEL_AFC = 16384, +}; + +enum iwl_reg_capa_flags_v1 { + REG_CAPA_V1_BF_CCD_LOW_BAND = 1, + REG_CAPA_V1_BF_CCD_HIGH_BAND = 2, + REG_CAPA_V1_160MHZ_ALLOWED = 4, + REG_CAPA_V1_80MHZ_ALLOWED = 8, + REG_CAPA_V1_MCS_8_ALLOWED = 16, + REG_CAPA_V1_MCS_9_ALLOWED = 32, + REG_CAPA_V1_40MHZ_FORBIDDEN = 128, + REG_CAPA_V1_DC_HIGH_ENABLED = 512, + REG_CAPA_V1_11AX_DISABLED = 1024, +}; + +enum iwl_reg_capa_flags_v2 { + REG_CAPA_V2_STRADDLE_DISABLED = 1, + REG_CAPA_V2_BF_CCD_LOW_BAND = 2, + REG_CAPA_V2_BF_CCD_HIGH_BAND = 4, + REG_CAPA_V2_160MHZ_ALLOWED = 8, + REG_CAPA_V2_80MHZ_ALLOWED = 16, + REG_CAPA_V2_MCS_8_ALLOWED = 32, + REG_CAPA_V2_MCS_9_ALLOWED = 64, + REG_CAPA_V2_WEATHER_DISABLED = 128, + REG_CAPA_V2_40MHZ_ALLOWED = 256, + REG_CAPA_V2_11AX_DISABLED = 1024, +}; + +enum iwl_reg_capa_flags_v4 { + REG_CAPA_V4_160MHZ_ALLOWED = 8, + REG_CAPA_V4_80MHZ_ALLOWED = 16, + REG_CAPA_V4_MCS_12_ALLOWED = 32, + REG_CAPA_V4_MCS_13_ALLOWED = 64, + REG_CAPA_V4_11BE_DISABLED = 256, + REG_CAPA_V4_11AX_DISABLED = 8192, + REG_CAPA_V4_320MHZ_ALLOWED = 65536, +}; + +struct iwl_reg_capa { + bool allow_40mhz; + bool allow_80mhz; + bool allow_160mhz; + bool allow_320mhz; + bool disable_11ax; + bool disable_11be; +}; + +struct iwl_scan_channel { + __le32 type; + __le16 channel; + u8 tx_gain; + u8 dsp_atten; + __le16 active_dwell; + __le16 passive_dwell; +}; + +struct iwl_scan_cmd { + __le16 len; + u8 scan_flags; + u8 channel_count; + __le16 quiet_time; + __le16 quiet_plcp_th; + __le16 good_CRC_th; + __le16 rx_chain; + __le32 max_out_time; + __le32 suspend_time; + __le32 flags; + __le32 filter_flags; + struct iwl_tx_cmd tx_cmd; + struct iwl_ssid_ie direct_scan[20]; + u8 data[0]; +}; + +struct iwl_scanstart_notification { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +}; + +struct iwl_scancomplete_notification { + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; +}; + +enum iwl_synced_time_operation { + IWL_SYNCED_TIME_OPERATION_READ_ARTB = 1, + IWL_SYNCED_TIME_OPERATION_READ_GP2 = 2, + IWL_SYNCED_TIME_OPERATION_READ_BOTH = 3, +}; + +struct iwl_synced_time_cmd { + __le32 operation; +}; + +struct iwl_synced_time_rsp { + __le32 operation; + __le32 platform_timestamp_hi; + __le32 platform_timestamp_lo; + __le32 gp2_timestamp_hi; + __le32 gp2_timestamp_lo; +}; + +struct ignore_entry { + u16 vid; + u16 pid; + u16 bcdmin; + u16 bcdmax; +}; + +struct serial_info { + struct rb_node node; + long: 32; + sector_t start; + sector_t last; + sector_t _subtree_last; +}; + +struct raid1_info { + struct md_rdev *rdev; + long: 32; + sector_t head_position; + sector_t next_seq_sect; + sector_t seq_start; +}; + +struct pool_info { + struct mddev *mddev; + int raid_disks; +}; + +struct r1conf { + struct mddev *mddev; + struct raid1_info *mirrors; + int raid_disks; + int nonrot_disks; + spinlock_t device_lock; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + wait_queue_head_t wait_barrier; + spinlock_t resync_lock; + atomic_t nr_sync_pending; + atomic_t *nr_pending; + atomic_t *nr_waiting; + atomic_t *nr_queued; + atomic_t *barrier; + int array_frozen; + int fullsync; + int recovery_disabled; + struct pool_info *poolinfo; + mempool_t r1bio_pool; + mempool_t r1buf_pool; + struct bio_set bio_split; + struct page *tmppage; + struct md_thread *thread; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r1bio { + atomic_t remaining; + atomic_t behind_remaining; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_disk; + struct list_head retry_list; + struct bio *behind_master_bio; + struct bio *bios[0]; +}; + +enum r1bio_state { + R1BIO_Uptodate = 0, + R1BIO_IsSync = 1, + R1BIO_Degraded = 2, + R1BIO_BehindIO = 3, + R1BIO_ReadError = 4, + R1BIO_Returned = 5, + R1BIO_MadeGood = 6, + R1BIO_WriteError = 7, + R1BIO_FailFast = 8, +}; + +struct read_balance_ctl { + sector_t closest_dist; + int closest_dist_disk; + int min_pending; + int min_pending_disk; + int sequential_disk; + int readable_disks; + long: 32; +}; + +struct quirks_list_struct { + struct hid_device_id hid_bl_item; + struct list_head node; +}; + +enum tcx_action_base { + TCX_NEXT = -1, + TCX_PASS = 0, + TCX_DROP = 2, + TCX_REDIRECT = 7, +}; + +struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +}; + +typedef struct ifbond ifbond; + +struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +}; + +typedef struct ifslave ifslave; + +enum { + NAPIF_STATE_SCHED = 1, + NAPIF_STATE_MISSED = 2, + NAPIF_STATE_DISABLE = 4, + NAPIF_STATE_NPSVC = 8, + NAPIF_STATE_LISTED = 16, + NAPIF_STATE_NO_BUSY_POLL = 32, + NAPIF_STATE_IN_BUSY_POLL = 64, + NAPIF_STATE_PREFER_BUSY_POLL = 128, + NAPIF_STATE_THREADED = 256, + NAPIF_STATE_SCHED_THREADED = 512, +}; + +struct net_device_path_stack { + int num_paths; + struct net_device_path path[5]; +}; + +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; + int flags; +}; + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; + +struct cpu_rmap { + struct kref refcount; + u16 size; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +struct netdev_notifier_offload_xstats_rd { + struct rtnl_hw_stats64 stats; + bool used; + long: 32; +}; + +struct netdev_notifier_offload_xstats_ru { + bool used; +}; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; + enum netdev_offload_xstats_type type; + union { + struct netdev_notifier_offload_xstats_rd *report_delta; + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +enum { + NESTED_SYNC_IMM_BIT = 0, + NESTED_SYNC_TODO_BIT = 1, +}; + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +enum qdisc_state2_t { + __QDISC_STATE2_RUNNING = 0, +}; + +enum { + SCM_TSTAMP_SND = 0, + SCM_TSTAMP_SCHED = 1, + SCM_TSTAMP_ACK = 2, +}; + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[0]; +}; + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[0]; +}; + +typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); + +struct dev_kfree_skb_cb { + enum skb_drop_reason reason; +}; + +enum { + NAPI_F_PREFER_BUSY_POLL = 1, + NAPI_F_END_ON_RESCHED = 2, +}; + +struct netdev_adjacent { + struct net_device *dev; + netdevice_tracker dev_tracker; + bool master; + bool ignore; + u16 ref_nr; + void *private; + struct list_head list; + struct callback_head rcu; +}; + +struct ethtool_cmis_cdb { + u8 cmis_rev; + u8 read_write_len_ext; + u16 max_completion_time; +}; + +enum ethtool_cmis_cdb_cmd_id { + ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 64, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 65, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 257, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 259, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL = 260, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 263, + ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 265, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 266, +}; + +struct ethtool_cmis_cdb_request { + __be16 id; + union { + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + }; + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + } body; + }; + u8 *epl; +}; + +struct ethtool_cmis_cdb_cmd_args { + struct ethtool_cmis_cdb_request req; + u16 max_duration; + u8 read_write_len_ext; + u8 msleep_pre_rpl; + u8 rpl_exp_len; + u8 flags; + char *err_msg; +}; + +struct ethtool_cmis_cdb_rpl_hdr { + u8 rpl_len; + u8 rpl_chk_code; +}; + +struct ethtool_cmis_cdb_rpl { + struct ethtool_cmis_cdb_rpl_hdr hdr; + u8 payload[120]; +}; + +struct cmis_rev_rpl { + u8 rev; +}; + +struct cmis_cdb_advert_rpl { + u8 inst_supported; + u8 read_write_len_ext; + u8 resv1; + u8 resv2; +}; + +struct cmis_password_entry_pl { + __be32 password; +}; + +struct cmis_cdb_query_status_pl { + u16 response_delay; +}; + +struct cmis_cdb_query_status_rpl { + u8 length; + u8 status; +}; + +struct cmis_cdb_module_features_rpl { + u8 resv1[34]; + __be16 max_completion_time; +}; + +struct cmis_wait_for_cond_rpl { + u8 state; +}; + +enum nft_lookup_flags { + NFT_LOOKUP_F_INV = 1, +}; + +enum nft_lookup_attributes { + NFTA_LOOKUP_UNSPEC = 0, + NFTA_LOOKUP_SET = 1, + NFTA_LOOKUP_SREG = 2, + NFTA_LOOKUP_DREG = 3, + NFTA_LOOKUP_SET_ID = 4, + NFTA_LOOKUP_FLAGS = 5, + __NFTA_LOOKUP_MAX = 6, +}; + +struct nft_lookup { + struct nft_set *set; + u8 sreg; + u8 dreg; + bool dreg_set; + bool invert; + struct nft_set_binding binding; +}; + +enum { + IFLA_INET_UNSPEC = 0, + IFLA_INET_CONF = 1, + __IFLA_INET_MAX = 2, +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[33]; +}; + +enum { + UDP_BPF_IPV4 = 0, + UDP_BPF_IPV6 = 1, + UDP_BPF_NUM_PROTS = 2, +}; + +struct switchdev_brport { + struct net_device *dev; + const void *ctx; + struct notifier_block *atomic_nb; + struct notifier_block *blocking_nb; + bool tx_fwd_offload; +}; + +enum switchdev_notifier_type { + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, + SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, + SWITCHDEV_FDB_ADD_TO_DEVICE = 3, + SWITCHDEV_FDB_DEL_TO_DEVICE = 4, + SWITCHDEV_FDB_OFFLOADED = 5, + SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, + SWITCHDEV_PORT_OBJ_ADD = 7, + SWITCHDEV_PORT_OBJ_DEL = 8, + SWITCHDEV_PORT_ATTR_SET = 9, + SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, + SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, + SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, + SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, + SWITCHDEV_BRPORT_OFFLOADED = 15, + SWITCHDEV_BRPORT_UNOFFLOADED = 16, + SWITCHDEV_BRPORT_REPLAY = 17, +}; + +struct switchdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; + const void *ctx; +}; + +struct switchdev_notifier_fdb_info { + struct switchdev_notifier_info info; + const unsigned char *addr; + u16 vid; + u8 added_by_user: 1; + u8 is_local: 1; + u8 locked: 1; + u8 offloaded: 1; +}; + +struct switchdev_notifier_brport_info { + struct switchdev_notifier_info info; + const struct switchdev_brport brport; +}; + +enum br_boolopt_id { + BR_BOOLOPT_NO_LL_LEARN = 0, + BR_BOOLOPT_MCAST_VLAN_SNOOPING = 1, + BR_BOOLOPT_MST_ENABLE = 2, + BR_BOOLOPT_MAX = 3, +}; + +struct cfg80211_conn { + struct cfg80211_connect_params params; + enum { + CFG80211_CONN_SCANNING = 0, + CFG80211_CONN_SCAN_AGAIN = 1, + CFG80211_CONN_AUTHENTICATE_NEXT = 2, + CFG80211_CONN_AUTHENTICATING = 3, + CFG80211_CONN_AUTH_FAILED_TIMEOUT = 4, + CFG80211_CONN_ASSOCIATE_NEXT = 5, + CFG80211_CONN_ASSOCIATING = 6, + CFG80211_CONN_ASSOC_FAILED = 7, + CFG80211_CONN_ASSOC_FAILED_TIMEOUT = 8, + CFG80211_CONN_DEAUTH = 9, + CFG80211_CONN_ABANDON = 10, + CFG80211_CONN_CONNECTED = 11, + } state; + u8 bssid[6]; + u8 prev_bssid[6]; + const u8 *ie; + size_t ie_len; + bool auto_auth; + bool prev_bssid_valid; +}; + +enum cfg80211_event_type { + EVENT_CONNECT_RESULT = 0, + EVENT_ROAMED = 1, + EVENT_DISCONNECTED = 2, + EVENT_IBSS_JOINED = 3, + EVENT_STOPPED = 4, + EVENT_PORT_AUTHORIZED = 5, +}; + +struct cfg80211_event { + struct list_head list; + enum cfg80211_event_type type; + union { + struct cfg80211_connect_resp_params cr; + struct cfg80211_roam_info rm; + struct { + const u8 *ie; + size_t ie_len; + u16 reason; + bool locally_generated; + } dc; + struct { + u8 bssid[6]; + struct ieee80211_channel *channel; + } ij; + struct { + u8 peer_addr[6]; + const u8 *td_bitmap; + u8 td_bitmap_len; + } pa; + }; +}; + +enum perf_event_arm_regs { + PERF_REG_ARM_R0 = 0, + PERF_REG_ARM_R1 = 1, + PERF_REG_ARM_R2 = 2, + PERF_REG_ARM_R3 = 3, + PERF_REG_ARM_R4 = 4, + PERF_REG_ARM_R5 = 5, + PERF_REG_ARM_R6 = 6, + PERF_REG_ARM_R7 = 7, + PERF_REG_ARM_R8 = 8, + PERF_REG_ARM_R9 = 9, + PERF_REG_ARM_R10 = 10, + PERF_REG_ARM_FP = 11, + PERF_REG_ARM_IP = 12, + PERF_REG_ARM_SP = 13, + PERF_REG_ARM_LR = 14, + PERF_REG_ARM_PC = 15, + PERF_REG_ARM_MAX = 16, +}; + +struct section_perm { + const char *name; + long unsigned int start; + long unsigned int end; + pmdval_t mask; + pmdval_t prot; + pmdval_t clear; +}; + +struct clone_args { + __u64 flags; + __u64 pidfd; + __u64 child_tid; + __u64 parent_tid; + __u64 exit_signal; + __u64 stack; + __u64 stack_size; + __u64 tls; + __u64 set_tid; + __u64 set_tid_size; + __u64 cgroup; +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +struct fd_range { + unsigned int from; + unsigned int to; +}; + +struct trace_event_raw_task_newtask { + struct trace_entry ent; + pid_t pid; + char comm[16]; + long unsigned int clone_flags; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_task_rename { + struct trace_entry ent; + pid_t pid; + char oldcomm[16]; + char newcomm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_data_offsets_task_newtask {}; + +struct trace_event_data_offsets_task_rename {}; + +typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int); + +typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); + +struct vm_stack { + struct callback_head rcu; + struct vm_struct *stack_vm_area; +}; + +struct audit_context; + +struct linked_page { + struct linked_page *next; + char data[4092]; +}; + +struct chain_allocator { + struct linked_page *chain; + unsigned int used_space; + gfp_t gfp_mask; + int safe_needed; +}; + +struct rtree_node { + struct list_head list; + long unsigned int *data; +}; + +struct mem_zone_bm_rtree { + struct list_head list; + struct list_head nodes; + struct list_head leaves; + long unsigned int start_pfn; + long unsigned int end_pfn; + struct rtree_node *rtree; + int levels; + unsigned int blocks; +}; + +struct bm_position { + struct mem_zone_bm_rtree *zone; + struct rtree_node *node; + long unsigned int node_pfn; + long unsigned int cur_pfn; + int node_bit; +}; + +struct memory_bitmap { + struct list_head zones; + struct linked_page *p_list; + struct bm_position cur; +}; + +struct mem_extent { + struct list_head hook; + long unsigned int start; + long unsigned int end; +}; + +struct nosave_region { + struct list_head list; + long unsigned int start_pfn; + long unsigned int end_pfn; +}; + +struct trace_event_raw_timer_class { + struct trace_entry ent; + void *timer; + char __data[0]; +}; + +struct trace_event_raw_timer_start { + struct trace_entry ent; + void *timer; + void *function; + long unsigned int expires; + long unsigned int bucket_expiry; + long unsigned int now; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_timer_expire_entry { + struct trace_entry ent; + void *timer; + long unsigned int now; + void *function; + long unsigned int baseclk; + char __data[0]; +}; + +struct trace_event_raw_timer_base_idle { + struct trace_entry ent; + bool is_idle; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_init { + struct trace_entry ent; + void *hrtimer; + clockid_t clockid; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_start { + struct trace_entry ent; + void *hrtimer; + void *function; + s64 expires; + s64 softexpires; + enum hrtimer_mode mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_hrtimer_expire_entry { + struct trace_entry ent; + void *hrtimer; + long: 32; + s64 now; + void *function; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_hrtimer_class { + struct trace_entry ent; + void *hrtimer; + char __data[0]; +}; + +struct trace_event_raw_itimer_state { + struct trace_entry ent; + int which; + long: 32; + long long unsigned int expires; + long int value_sec; + long int value_nsec; + long int interval_sec; + long int interval_nsec; + char __data[0]; +}; + +struct trace_event_raw_itimer_expire { + struct trace_entry ent; + int which; + pid_t pid; + long long unsigned int now; + char __data[0]; +}; + +struct trace_event_raw_tick_stop { + struct trace_entry ent; + int success; + int dependency; + char __data[0]; +}; + +struct trace_event_data_offsets_timer_class {}; + +struct trace_event_data_offsets_timer_start {}; + +struct trace_event_data_offsets_timer_expire_entry {}; + +struct trace_event_data_offsets_timer_base_idle {}; + +struct trace_event_data_offsets_hrtimer_init {}; + +struct trace_event_data_offsets_hrtimer_start {}; + +struct trace_event_data_offsets_hrtimer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_class {}; + +struct trace_event_data_offsets_itimer_state {}; + +struct trace_event_data_offsets_itimer_expire {}; + +struct trace_event_data_offsets_tick_stop {}; + +typedef void (*btf_trace_timer_init)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_base_idle)(void *, bool, unsigned int); + +typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); + +typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); + +typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); + +typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int); + +typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int); + +typedef void (*btf_trace_tick_stop)(void *, int, int); + +struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; + long unsigned int clk; + long unsigned int next_expiry; + unsigned int cpu; + bool next_expiry_recalc; + bool is_idle; + bool timers_pending; + long unsigned int pending_map[18]; + struct hlist_head vectors[576]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct fmeter { + int cnt; + int val; + time64_t time; + spinlock_t lock; + long: 32; +}; + +enum prs_errcode { + PERR_NONE = 0, + PERR_INVCPUS = 1, + PERR_INVPARENT = 2, + PERR_NOTPART = 3, + PERR_NOTEXCL = 4, + PERR_NOCPUS = 5, + PERR_HOTPLUG = 6, + PERR_CPUSEMPTY = 7, + PERR_HKEEPING = 8, + PERR_ACCESS = 9, +}; + +typedef enum { + CS_ONLINE = 0, + CS_CPU_EXCLUSIVE = 1, + CS_MEM_EXCLUSIVE = 2, + CS_MEM_HARDWALL = 3, + CS_MEMORY_MIGRATE = 4, + CS_SCHED_LOAD_BALANCE = 5, + CS_SPREAD_PAGE = 6, + CS_SPREAD_SLAB = 7, +} cpuset_flagbits_t; + +typedef enum { + FILE_MEMORY_MIGRATE = 0, + FILE_CPULIST = 1, + FILE_MEMLIST = 2, + FILE_EFFECTIVE_CPULIST = 3, + FILE_EFFECTIVE_MEMLIST = 4, + FILE_SUBPARTS_CPULIST = 5, + FILE_EXCLUSIVE_CPULIST = 6, + FILE_EFFECTIVE_XCPULIST = 7, + FILE_ISOLATED_CPULIST = 8, + FILE_CPU_EXCLUSIVE = 9, + FILE_MEM_EXCLUSIVE = 10, + FILE_MEM_HARDWALL = 11, + FILE_SCHED_LOAD_BALANCE = 12, + FILE_PARTITION_ROOT = 13, + FILE_SCHED_RELAX_DOMAIN_LEVEL = 14, + FILE_MEMORY_PRESSURE_ENABLED = 15, + FILE_MEMORY_PRESSURE = 16, + FILE_SPREAD_PAGE = 17, + FILE_SPREAD_SLAB = 18, +} cpuset_filetype_t; + +struct cpuset { + struct cgroup_subsys_state css; + long unsigned int flags; + cpumask_var_t cpus_allowed; + nodemask_t mems_allowed; + cpumask_var_t effective_cpus; + nodemask_t effective_mems; + cpumask_var_t effective_xcpus; + cpumask_var_t exclusive_cpus; + nodemask_t old_mems_allowed; + struct fmeter fmeter; + int attach_in_progress; + int relax_domain_level; + int nr_subparts; + int partition_root_state; + int nr_deadline_tasks; + int nr_migrate_dl_tasks; + u64 sum_migrate_dl_bw; + enum prs_errcode prs_err; + struct cgroup_file partition_file; + struct list_head remote_sibling; + struct uf_node node; +}; + +struct tmpmasks { + cpumask_var_t addmask; + cpumask_var_t delmask; + cpumask_var_t new_cpus; +}; + +enum partition_cmd { + partcmd_enable = 0, + partcmd_enablei = 1, + partcmd_disable = 2, + partcmd_update = 3, + partcmd_invalidate = 4, +}; + +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +enum error_detector { + ERROR_DETECTOR_KFENCE = 0, + ERROR_DETECTOR_KASAN = 1, + ERROR_DETECTOR_WARN = 2, +}; + +struct trace_event_raw_error_report_template { + struct trace_entry ent; + enum error_detector error_detector; + long unsigned int id; + char __data[0]; +}; + +struct trace_event_data_offsets_error_report_template {}; + +typedef void (*btf_trace_error_report_end)(void *, enum error_detector, long unsigned int); + +enum bpf_stack_slot_type { + STACK_INVALID = 0, + STACK_SPILL = 1, + STACK_MISC = 2, + STACK_ZERO = 3, + STACK_DYNPTR = 4, + STACK_ITER = 5, +}; + +struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; +}; + +enum { + BTF_VAR_STATIC = 0, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + +struct btf_var { + __u32 linkage; +}; + +struct btf_var_secinfo { + __u32 type; + __u32 offset; + __u32 size; +}; + +struct btf_decl_tag { + __s32 component_idx; +}; + +struct btf_enum64 { + __u32 name_off; + __u32 val_lo32; + __u32 val_hi32; +}; + +struct bpf_btf_info { + __u64 btf; + __u32 btf_size; + __u32 id; + __u64 name; + __u32 name_len; + __u32 kernel_btf; +}; + +struct bpf_cgroup_dev_ctx { + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + +struct bpf_sysctl { + __u32 write; + __u32 file_pos; +}; + +struct bpf_sockopt { + union { + struct bpf_sock *sk; + }; + union { + void *optval; + }; + union { + void *optval_end; + }; + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +enum { + BTF_F_COMPACT = 1, + BTF_F_NONAME = 2, + BTF_F_PTR_RAW = 4, + BTF_F_ZERO = 8, +}; + +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, + BPF_CORE_FIELD_BYTE_SIZE = 1, + BPF_CORE_FIELD_EXISTS = 2, + BPF_CORE_FIELD_SIGNED = 3, + BPF_CORE_FIELD_LSHIFT_U64 = 4, + BPF_CORE_FIELD_RSHIFT_U64 = 5, + BPF_CORE_TYPE_ID_LOCAL = 6, + BPF_CORE_TYPE_ID_TARGET = 7, + BPF_CORE_TYPE_EXISTS = 8, + BPF_CORE_TYPE_SIZE = 9, + BPF_CORE_ENUMVAL_EXISTS = 10, + BPF_CORE_ENUMVAL_VALUE = 11, + BPF_CORE_TYPE_MATCHES = 12, +}; + +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; + +struct btf_id_set { + u32 cnt; + u32 ids[0]; +}; + +struct btf_struct_metas { + u32 cnt; + struct btf_struct_meta types[0]; +}; + +enum { + BTF_FIELDS_MAX = 11, +}; + +struct bpf_core_ctx { + struct bpf_verifier_log *log; + const struct btf *btf; +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + const struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + long: 32; + u64 tmp_reg; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + struct task_struct *current_task; + long: 32; + u64 tmp_reg; +}; + +struct bpf_core_cand { + const struct btf *btf; + __u32 id; +}; + +struct bpf_core_cand_list { + struct bpf_core_cand *cands; + int len; +}; + +struct bpf_core_accessor { + __u32 type_id; + __u32 idx; + const char *name; +}; + +struct bpf_core_spec { + const struct btf *btf; + struct bpf_core_accessor spec[64]; + __u32 root_type_id; + enum bpf_core_relo_kind relo_kind; + int len; + int raw_spec[64]; + int raw_len; + __u32 bit_offset; +}; + +struct bpf_core_relo_res { + __u64 orig_val; + __u64 new_val; + bool poison; + bool validate; + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; + long: 32; +}; + +enum btf_kfunc_hook { + BTF_KFUNC_HOOK_COMMON = 0, + BTF_KFUNC_HOOK_XDP = 1, + BTF_KFUNC_HOOK_TC = 2, + BTF_KFUNC_HOOK_STRUCT_OPS = 3, + BTF_KFUNC_HOOK_TRACING = 4, + BTF_KFUNC_HOOK_SYSCALL = 5, + BTF_KFUNC_HOOK_FMODRET = 6, + BTF_KFUNC_HOOK_CGROUP = 7, + BTF_KFUNC_HOOK_SCHED_ACT = 8, + BTF_KFUNC_HOOK_SK_SKB = 9, + BTF_KFUNC_HOOK_SOCKET_FILTER = 10, + BTF_KFUNC_HOOK_LWT = 11, + BTF_KFUNC_HOOK_NETFILTER = 12, + BTF_KFUNC_HOOK_KPROBE = 13, + BTF_KFUNC_HOOK_MAX = 14, +}; + +enum { + BTF_KFUNC_SET_MAX_CNT = 256, + BTF_DTOR_KFUNC_MAX_CNT = 256, + BTF_KFUNC_FILTER_MAX_CNT = 16, +}; + +struct btf_kfunc_hook_filter { + btf_kfunc_filter_t filters[16]; + u32 nr_filters; +}; + +struct btf_kfunc_set_tab { + struct btf_id_set8 *sets[14]; + struct btf_kfunc_hook_filter hook_filters[14]; +}; + +struct btf_id_dtor_kfunc_tab { + u32 cnt; + struct btf_id_dtor_kfunc dtors[0]; +}; + +struct btf_struct_ops_tab { + u32 cnt; + u32 capacity; + struct bpf_struct_ops_desc ops[0]; +}; + +enum verifier_phase { + CHECK_META = 0, + CHECK_TYPE = 1, +}; + +struct resolve_vertex { + const struct btf_type *t; + u32 type_id; + u16 next_member; +}; + +enum visit_state { + NOT_VISITED = 0, + VISITED = 1, + RESOLVED = 2, +}; + +enum resolve_mode { + RESOLVE_TBD = 0, + RESOLVE_PTR = 1, + RESOLVE_STRUCT_OR_ARRAY = 2, +}; + +struct btf_sec_info { + u32 off; + u32 len; +}; + +struct btf_verifier_env { + struct btf *btf; + u8 *visit_states; + struct resolve_vertex stack[32]; + struct bpf_verifier_log log; + u32 log_type_id; + u32 top_stack; + enum verifier_phase phase; + enum resolve_mode resolve_mode; +}; + +struct btf_show { + u64 flags; + void *target; + void (*showfn)(struct btf_show *, const char *, va_list); + const struct btf *btf; + struct { + u8 depth; + u8 depth_to_show; + u8 depth_check; + u8 array_member: 1; + u8 array_terminated: 1; + u16 array_encoding; + u32 type_id; + int status; + const struct btf_type *type; + const struct btf_member *member; + char name[80]; + } state; + struct { + u32 size; + void *head; + void *data; + u8 safe[32]; + } obj; +}; + +struct btf_kind_operations { + s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); + int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); + int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + void (*log_details)(struct btf_verifier_env *, const struct btf_type *); + void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); +}; + +enum { + BTF_FIELD_IGNORE = 0, + BTF_FIELD_FOUND = 1, +}; + +struct btf_field_info { + enum btf_field_type type; + u32 off; + union { + struct { + u32 type_id; + } kptr; + struct { + const char *node_name; + u32 value_btf_id; + } graph_root; + }; +}; + +struct bpf_ctx_convert { + struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; + struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; + struct xdp_md BPF_PROG_TYPE_XDP_prog; + struct xdp_buff BPF_PROG_TYPE_XDP_kern; + struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; + struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; + struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; + struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; + struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; + struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; + struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; + struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; + struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; + struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; + struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; + struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; + struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; + struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; + struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; + struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; + bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; + struct pt_regs BPF_PROG_TYPE_KPROBE_kern; + __u64 BPF_PROG_TYPE_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_TRACEPOINT_kern; + struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; + struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; + long: 32; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; + void *BPF_PROG_TYPE_TRACING_prog; + void *BPF_PROG_TYPE_TRACING_kern; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; + struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; + struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; + struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; + struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; + struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; + struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; + struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; + struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; + void *BPF_PROG_TYPE_STRUCT_OPS_prog; + void *BPF_PROG_TYPE_STRUCT_OPS_kern; + void *BPF_PROG_TYPE_EXT_prog; + void *BPF_PROG_TYPE_EXT_kern; + void *BPF_PROG_TYPE_SYSCALL_prog; + void *BPF_PROG_TYPE_SYSCALL_kern; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_prog; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_kern; + long: 32; +}; + +enum { + __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0, + __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1, + __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2, + __ctx_convertBPF_PROG_TYPE_XDP = 3, + __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6, + __ctx_convertBPF_PROG_TYPE_LWT_IN = 7, + __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8, + __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9, + __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10, + __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11, + __ctx_convertBPF_PROG_TYPE_SK_SKB = 12, + __ctx_convertBPF_PROG_TYPE_SK_MSG = 13, + __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14, + __ctx_convertBPF_PROG_TYPE_KPROBE = 15, + __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16, + __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19, + __ctx_convertBPF_PROG_TYPE_TRACING = 20, + __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21, + __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23, + __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 24, + __ctx_convertBPF_PROG_TYPE_SK_LOOKUP = 25, + __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 26, + __ctx_convertBPF_PROG_TYPE_EXT = 27, + __ctx_convertBPF_PROG_TYPE_SYSCALL = 28, + __ctx_convertBPF_PROG_TYPE_NETFILTER = 29, + __ctx_convert_unused = 30, +}; + +enum bpf_struct_walk_result { + WALK_SCALAR = 0, + WALK_PTR = 1, + WALK_STRUCT = 2, +}; + +struct bpf_cand_cache { + const char *name; + u32 name_len; + u16 kind; + u16 cnt; + struct { + const struct btf *btf; + u32 id; + } cands[0]; +}; + +enum btf_arg_tag { + ARG_TAG_CTX = 1, + ARG_TAG_NONNULL = 2, + ARG_TAG_TRUSTED = 4, + ARG_TAG_NULLABLE = 8, + ARG_TAG_ARENA = 16, +}; + +struct btf_show_snprintf { + struct btf_show show; + int len_left; + int len; +}; + +enum { + BTF_MODULE_F_LIVE = 1, +}; + +struct btf_module { + struct list_head list; + struct module *module; + struct btf *btf; + struct bin_attribute *sysfs_attr; + int flags; +}; + +typedef u64 (*btf_bpf_btf_find_by_name_kind)(char *, int, u32, int); + +union thread_union { + struct task_struct task; + long unsigned int stack[4096]; +}; + +enum kasan_arg_fault { + KASAN_ARG_FAULT_DEFAULT = 0, + KASAN_ARG_FAULT_REPORT = 1, + KASAN_ARG_FAULT_PANIC = 2, + KASAN_ARG_FAULT_PANIC_ON_WRITE = 3, +}; + +typedef struct fd class_fd_pos_t; + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + short unsigned int d_reclen; + unsigned char d_type; + char d_name[0]; + long: 32; +}; + +struct linux_dirent { + long unsigned int d_ino; + long unsigned int d_off; + short unsigned int d_reclen; + char d_name[0]; +}; + +struct getdents_callback___2 { + struct dir_context ctx; + struct linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct kioctx; + +struct kioctx_table { + struct callback_head rcu; + unsigned int nr; + struct kioctx *table[0]; +}; + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + +struct iocb { + __u64 aio_data; + __u32 aio_key; + __kernel_rwf_t aio_rw_flags; + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + __u64 aio_reserved2; + __u32 aio_flags; + __u32 aio_resfd; +}; + +typedef int kiocb_cancel_fn(struct kiocb *); + +struct aio_ring { + unsigned int id; + unsigned int nr; + unsigned int head; + unsigned int tail; + unsigned int magic; + unsigned int compat_features; + unsigned int incompat_features; + unsigned int header_length; + struct io_event io_events[0]; +}; + +struct kioctx_cpu; + +struct ctx_rq_wait; + +struct kioctx { + struct percpu_ref users; + atomic_t dead; + struct percpu_ref reqs; + long unsigned int user_id; + struct kioctx_cpu *cpu; + unsigned int req_batch; + unsigned int max_reqs; + unsigned int nr_events; + long unsigned int mmap_base; + long unsigned int mmap_size; + struct folio **ring_folios; + long int nr_pages; + struct rcu_work free_rwork; + struct ctx_rq_wait *rq_wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + atomic_t reqs_available; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + spinlock_t ctx_lock; + struct list_head active_reqs; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct mutex ring_lock; + wait_queue_head_t wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + unsigned int tail; + unsigned int completed_events; + spinlock_t completion_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct folio *internal_folios[8]; + struct file *aio_ring_file; + unsigned int id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kioctx_cpu { + unsigned int reqs_available; +}; + +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + +struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; + struct cred *creds; +}; + +struct poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool cancelled; + bool work_scheduled; + bool work_need_resched; + struct wait_queue_entry wait; + struct work_struct work; +}; + +struct aio_kiocb { + union { + struct file *ki_filp; + struct kiocb rw; + struct fsync_iocb fsync; + struct poll_iocb poll; + }; + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + struct io_event ki_res; + struct list_head ki_list; + refcount_t ki_refcnt; + struct eventfd_ctx *ki_eventfd; +}; + +struct aio_waiter { + struct wait_queue_entry w; + size_t min_nr; +}; + +struct aio_poll_table { + struct poll_table_struct pt; + struct aio_kiocb *iocb; + bool queued; + int error; +}; + +struct __aio_sigset { + const sigset_t *sigmask; + size_t sigsetsize; +}; + +struct pending_reservation { + struct rb_node rb_node; + ext4_lblk_t lclu; +}; + +struct rsvd_count { + int ndelayed; + bool first_do_lblk_found; + ext4_lblk_t first_do_lblk; + ext4_lblk_t last_do_lblk; + struct extent_status *left_es; + bool partial; + ext4_lblk_t lclu; +}; + +struct ramfs_mount_opts { + umode_t mode; +}; + +struct ramfs_fs_info { + struct ramfs_mount_opts mount_opts; +}; + +enum ramfs_param { + Opt_mode___5 = 0, +}; + +enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_INVAL_INODE = 2, + FUSE_NOTIFY_INVAL_ENTRY = 3, + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, + FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, + FUSE_NOTIFY_CODE_MAX = 8, +}; + +struct fuse_forget_in { + uint64_t nlookup; +}; + +struct fuse_batch_forget_in { + uint32_t count; + uint32_t dummy; +}; + +struct fuse_interrupt_in { + uint64_t unique; +}; + +struct fuse_notify_poll_wakeup_out { + uint64_t kh; +}; + +struct fuse_notify_inval_inode_out { + uint64_t ino; + int64_t off; + int64_t len; +}; + +struct fuse_notify_inval_entry_out { + uint64_t parent; + uint32_t namelen; + uint32_t flags; +}; + +struct fuse_notify_delete_out { + uint64_t parent; + uint64_t child; + uint32_t namelen; + uint32_t padding; +}; + +struct fuse_notify_store_out { + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_out { + uint64_t notify_unique; + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_in { + uint64_t dummy1; + uint64_t offset; + uint32_t size; + uint32_t dummy2; + uint64_t dummy3; + uint64_t dummy4; +}; + +struct fuse_folio_desc { + unsigned int length; + unsigned int offset; +}; + +struct fuse_args_pages { + struct fuse_args args; + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int num_folios; + long: 32; +}; + +enum fuse_req_flag { + FR_ISREPLY = 0, + FR_FORCE = 1, + FR_BACKGROUND = 2, + FR_WAITING = 3, + FR_ABORTED = 4, + FR_INTERRUPTED = 5, + FR_LOCKED = 6, + FR_PENDING = 7, + FR_SENT = 8, + FR_FINISHED = 9, + FR_PRIVATE = 10, + FR_ASYNC = 11, +}; + +struct fuse_pqueue { + unsigned int connected; + spinlock_t lock; + struct list_head *processing; + struct list_head io; +}; + +struct fuse_dev { + struct fuse_conn *fc; + struct fuse_pqueue pq; + struct list_head entry; +}; + +struct trace_event_raw_fuse_request_send { + struct trace_entry ent; + dev_t connection; + long: 32; + uint64_t unique; + enum fuse_opcode opcode; + uint32_t len; + char __data[0]; +}; + +struct trace_event_raw_fuse_request_end { + struct trace_entry ent; + dev_t connection; + long: 32; + uint64_t unique; + uint32_t len; + int32_t error; + char __data[0]; +}; + +struct trace_event_data_offsets_fuse_request_send {}; + +struct trace_event_data_offsets_fuse_request_end {}; + +typedef void (*btf_trace_fuse_request_send)(void *, const struct fuse_req *); + +typedef void (*btf_trace_fuse_request_end)(void *, const struct fuse_req *); + +struct fuse_copy_state { + int write; + struct fuse_req *req; + struct iov_iter *iter; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; + long unsigned int nr_segs; + struct page *pg; + unsigned int len; + unsigned int offset; + unsigned int move_pages: 1; +}; + +struct fuse_retrieve_args { + struct fuse_args_pages ap; + struct fuse_notify_retrieve_in inarg; +}; + +struct btrfs_async_delayed_work { + struct btrfs_delayed_root *delayed_root; + int nr; + struct btrfs_work work; +}; + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + unsigned int partial; + u8 buf[144]; + long: 32; +}; + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +struct uuidcmp { + const char *uuid; + int len; +}; + +struct blk_stat_callback { + struct list_head list; + struct timer_list timer; + struct blk_rq_stat *cpu_stat; + int (*bucket_fn)(const struct request *); + unsigned int buckets; + struct blk_rq_stat *stat; + void (*timer_fn)(struct blk_stat_callback *); + void *data; + struct callback_head rcu; +}; + +struct trace_event_raw_wbt_stat { + struct trace_entry ent; + char name[32]; + s64 rmean; + u64 rmin; + u64 rmax; + s64 rnr_samples; + s64 rtime; + s64 wmean; + u64 wmin; + u64 wmax; + s64 wnr_samples; + s64 wtime; + char __data[0]; +}; + +struct trace_event_raw_wbt_lat { + struct trace_entry ent; + char name[32]; + long unsigned int lat; + char __data[0]; +}; + +struct trace_event_raw_wbt_step { + struct trace_entry ent; + char name[32]; + const char *msg; + int step; + long unsigned int window; + unsigned int bg; + unsigned int normal; + unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_wbt_timer { + struct trace_entry ent; + char name[32]; + unsigned int status; + int step; + unsigned int inflight; + char __data[0]; +}; + +struct trace_event_data_offsets_wbt_stat {}; + +struct trace_event_data_offsets_wbt_lat {}; + +struct trace_event_data_offsets_wbt_step {}; + +struct trace_event_data_offsets_wbt_timer {}; + +typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); + +typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, long unsigned int); + +typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, long unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, unsigned int); + +enum wbt_flags { + WBT_TRACKED = 1, + WBT_READ = 2, + WBT_SWAP = 4, + WBT_DISCARD = 8, + WBT_NR_BITS = 4, +}; + +enum { + WBT_RWQ_BG = 0, + WBT_RWQ_SWAP = 1, + WBT_RWQ_DISCARD = 2, + WBT_NUM_RWQ = 3, +}; + +enum { + WBT_STATE_ON_DEFAULT = 1, + WBT_STATE_ON_MANUAL = 2, + WBT_STATE_OFF_DEFAULT = 3, + WBT_STATE_OFF_MANUAL = 4, +}; + +struct rq_wb { + unsigned int wb_background; + unsigned int wb_normal; + short int enable_state; + unsigned int unknown_cnt; + u64 win_nsec; + u64 cur_win_nsec; + struct blk_stat_callback *cb; + long: 32; + u64 sync_issue; + void *sync_cookie; + long unsigned int last_issue; + long unsigned int last_comp; + long unsigned int min_lat_nsec; + struct rq_qos rqos; + struct rq_wait rq_wait[3]; + struct rq_depth rq_depth; +}; + +enum { + RWB_DEF_DEPTH = 16, + RWB_WINDOW_NSEC = 100000000, + RWB_MIN_WRITE_SAMPLES = 3, + RWB_UNKNOWN_BUMP = 5, +}; + +enum { + LAT_OK = 1, + LAT_UNKNOWN = 2, + LAT_UNKNOWN_WRITES = 3, + LAT_EXCEEDED = 4, +}; + +struct wbt_wait_data { + struct rq_wb *rwb; + enum wbt_flags wb_acct; + blk_opf_t opf; +}; + +struct lwq { + spinlock_t lock; + struct llist_node *ready; + struct llist_head new; +}; + +typedef enum { + ZSTD_noDict = 0, + ZSTD_extDict = 1, + ZSTD_dictMatchState = 2, + ZSTD_dedicatedDictSearch = 3, +} ZSTD_dictMode_e; + +typedef U64 ZSTD_VecMask; + +typedef enum { + search_hashChain = 0, + search_binaryTree = 1, + search_rowHash = 2, +} searchMethod_e; + +struct unimapdesc { + short unsigned int entry_ct; + struct unipair *entries; +}; + +struct vt_stat { + short unsigned int v_active; + short unsigned int v_signal; + short unsigned int v_state; +}; + +struct vt_sizes { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_scrollsize; +}; + +struct vt_consize { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_vlin; + short unsigned int v_clin; + short unsigned int v_vcol; + short unsigned int v_ccol; +}; + +struct vt_event { + unsigned int event; + unsigned int oldev; + unsigned int newev; + unsigned int pad[4]; +}; + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +struct devres_node { + struct list_head entry; + dr_release_t release; + const char *name; + size_t size; +}; + +struct devres { + struct devres_node node; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u8 data[0]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; +}; + +struct action_devres { + void *data; + void (*action)(void *); +}; + +struct pages_devres { + long unsigned int addr; + unsigned int order; +}; + +struct scsi_varlen_cdb_hdr { + __u8 opcode; + __u8 control; + __u8 misc[5]; + __u8 additional_cdb_length; + __be16 service_action; +}; + +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + } read; + struct { + struct { + struct { + __le16 pkt_info; + __le16 hdr_info; + } lo_dword; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 1, + IGB_FILTER_FLAG_VLAN_TCI = 2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 8, +}; + +struct igb_nfc_input { + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[6]; + u8 dst_addr[6]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + long unsigned int cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +struct igb_stats { + char stat_string[32]; + int sizeof_stat; + int stat_offset; +}; + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP = 1, + TEST_IRQ = 2, + TEST_LOOP = 3, + TEST_LINK = 4, +}; + +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +struct iwl_pnvm_init_complete_ntfy { + __le32 status; +}; + +struct pnvm_sku_package { + u8 rev; + u32 total_size; + u8 n_skus; + u32 reserved[2]; + u8 data[0]; +} __attribute__((packed)); + +struct iwl_pnvm_section { + __le32 offset; + const u8 data[0]; +}; + +enum ieee80211_radiotap_timestamp_unit_spos { + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 15, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS = 0, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US = 1, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS = 3, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK = 240, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU = 0, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ = 16, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU = 32, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU = 48, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN = 240, +}; + +enum ieee80211_chanctx_change { + IEEE80211_CHANCTX_CHANGE_WIDTH = 1, + IEEE80211_CHANCTX_CHANGE_RX_CHAINS = 2, + IEEE80211_CHANCTX_CHANGE_RADAR = 4, + IEEE80211_CHANCTX_CHANGE_CHANNEL = 8, + IEEE80211_CHANCTX_CHANGE_MIN_DEF = 16, + IEEE80211_CHANCTX_CHANGE_AP = 32, + IEEE80211_CHANCTX_CHANGE_PUNCTURING = 64, +}; + +struct iwl_fw_dbg_trigger_tdls { + u8 action_bitmap; + u8 peer_mode; + u8 peer[6]; + u8 reserved[4]; +}; + +struct iwl_he_backoff_conf { + __le16 cwmin; + __le16 cwmax; + __le16 aifsn; + __le16 mu_time; +}; + +enum iwl_he_pkt_ext_constellations { + IWL_HE_PKT_EXT_BPSK = 0, + IWL_HE_PKT_EXT_QPSK = 1, + IWL_HE_PKT_EXT_16QAM = 2, + IWL_HE_PKT_EXT_64QAM = 3, + IWL_HE_PKT_EXT_256QAM = 4, + IWL_HE_PKT_EXT_1024QAM = 5, + IWL_HE_PKT_EXT_4096QAM = 6, + IWL_HE_PKT_EXT_NONE = 7, +}; + +struct iwl_he_pkt_ext_v1 { + u8 pkt_ext_qam_th[16]; +}; + +enum iwl_he_sta_ctxt_flags { + STA_CTXT_HE_REF_BSSID_VALID = 16, + STA_CTXT_HE_BSS_COLOR_DIS = 32, + STA_CTXT_HE_PARTIAL_BSS_COLOR = 64, + STA_CTXT_HE_32BIT_BA_BITMAP = 128, + STA_CTXT_HE_PACKET_EXT = 256, + STA_CTXT_HE_TRIG_RND_ALLOC = 512, + STA_CTXT_HE_CONST_TRIG_RND_ALLOC = 1024, + STA_CTXT_HE_ACK_ENABLED = 2048, + STA_CTXT_HE_MU_EDCA_CW = 4096, + STA_CTXT_HE_NIC_NOT_ACK_ENABLED = 8192, + STA_CTXT_HE_RU_2MHZ_BLOCK = 16384, + STA_CTXT_HE_NDP_FEEDBACK_ENABLED = 32768, + STA_CTXT_EHT_PUNCTURE_MASK_VALID = 65536, + STA_CTXT_EHT_LONG_PPE_ENABLED = 131072, +}; + +enum iwl_he_htc_flags { + IWL_HE_HTC_SUPPORT = 1, + IWL_HE_HTC_UL_MU_RESP_SCHED = 8, + IWL_HE_HTC_BSR_SUPP = 16, + IWL_HE_HTC_OMI_SUPP = 32, + IWL_HE_HTC_BQR_SUPP = 64, +}; + +struct iwl_he_sta_context_cmd_v2 { + u8 sta_id; + u8 tid_limit; + u8 reserved1; + u8 reserved2; + __le32 flags; + u8 ref_bssid_addr[6]; + __le16 reserved0; + __le32 htc_flags; + u8 frag_flags; + u8 frag_level; + u8 frag_max_num; + u8 frag_min_size; + struct iwl_he_pkt_ext_v1 pkt_ext; + u8 bss_color; + u8 htc_trig_based_pkt_ext; + __le16 frame_time_rts_th; + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + __le16 reserved3; + struct iwl_he_backoff_conf trig_based_txf[4]; + u8 max_bssid_indicator; + u8 bssid_index; + u8 ema_ap; + u8 profile_periodicity; + u8 bssid_count; + u8 reserved4[3]; +}; + +struct iwl_he_sta_context_cmd_v3 { + u8 sta_id; + u8 tid_limit; + u8 reserved1; + u8 reserved2; + __le32 flags; + u8 ref_bssid_addr[6]; + __le16 reserved0; + __le32 htc_flags; + u8 frag_flags; + u8 frag_level; + u8 frag_max_num; + u8 frag_min_size; + struct iwl_he_pkt_ext_v2 pkt_ext; + u8 bss_color; + u8 htc_trig_based_pkt_ext; + __le16 frame_time_rts_th; + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + __le16 puncture_mask; + struct iwl_he_backoff_conf trig_based_txf[4]; + u8 max_bssid_indicator; + u8 bssid_index; + u8 ema_ap; + u8 profile_periodicity; + u8 bssid_count; + u8 reserved4[3]; +}; + +struct iwl_chan_switch_te_cmd { + __le32 mac_id; + __le32 action; + __le32 tsf; + u8 cs_count; + u8 cs_delayed_bcn_count; + u8 cs_mode; + u8 reserved; +}; + +struct iwl_hs20_roc_res { + __le32 event_unique_id; + __le32 status; +}; + +struct iwl_mu_group_mgmt_cmd { + __le32 reserved; + __le32 membership_status[2]; + __le32 user_position[4]; +}; + +struct iwl_mu_group_mgmt_notif { + __le32 membership_status[2]; + __le32 user_position[4]; +}; + +struct iwl_mcc_update_resp_v8 { + __le32 status; + __le16 mcc; + u8 padding[2]; + __le32 cap; + __le16 time; + __le16 geo_info; + u8 source_id; + u8 reserved[3]; + __le32 n_channels; + __le32 channels[0]; +}; + +enum iwl_mcc_update_status { + MCC_RESP_NEW_CHAN_PROFILE = 0, + MCC_RESP_SAME_CHAN_PROFILE = 1, + MCC_RESP_INVALID = 2, + MCC_RESP_NVM_DISABLED = 3, + MCC_RESP_ILLEGAL = 4, + MCC_RESP_LOW_PRIORITY = 5, + MCC_RESP_TEST_MODE_ACTIVE = 6, + MCC_RESP_TEST_MODE_NOT_ACTIVE = 7, + MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE = 8, +}; + +struct iwl_rxq_sync_cmd { + __le32 flags; + __le32 rxq_mask; + __le32 count; + u8 payload[0]; +}; + +enum iwl_mvm_pm_event { + IWL_MVM_PM_EVENT_AWAKE = 0, + IWL_MVM_PM_EVENT_ASLEEP = 1, + IWL_MVM_PM_EVENT_UAPSD = 2, + IWL_MVM_PM_EVENT_PS_POLL = 3, +}; + +struct iwl_mvm_pm_state_notification { + u8 sta_id; + u8 type; + __le16 reserved; +}; + +struct iwl_mvm_mc_iter_data { + struct iwl_mvm *mvm; + int port_id; +}; + +struct iwl_mvm_he_obss_narrow_bw_ru_data { + bool tolerated; +}; + +struct iwl_mvm_ftm_responder_iter_data { + bool responder; + struct ieee80211_chanctx_conf *ctx; +}; + +struct usb_dynid { + struct list_head node; + struct usb_device_id id; +}; + +struct xhci_regset { + char name[32]; + struct debugfs_regset32 regset; + size_t nregs; + struct list_head list; +}; + +struct xhci_file_map { + const char *name; + int (*show)(struct seq_file *, void *); +}; + +struct xhci_ep_priv { + char name[32]; + struct dentry *root; + struct xhci_stream_info *stream_info; + struct xhci_ring *show_ring; + unsigned int stream_id; +}; + +struct xhci_slot_priv { + char name[32]; + struct dentry *root; + struct xhci_ep_priv *eps[31]; + struct xhci_virt_device *dev; +}; + +typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_cell_post_process_t)(void *, const char *, int, unsigned int, void *, size_t); + +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM = 1, + NVMEM_TYPE_OTP = 2, + NVMEM_TYPE_BATTERY_BACKED = 3, + NVMEM_TYPE_FRAM = 4, +}; + +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + size_t raw_len; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; + struct device_node *np; + nvmem_cell_post_process_t read_post_process; + void *priv; +}; + +struct nvmem_device; + +struct nvmem_layout; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + const struct nvmem_cell_info *cells; + int ncells; + bool add_legacy_fixed_of_cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + enum nvmem_type type; + bool read_only; + bool root_only; + bool ignore_wp; + struct nvmem_layout *layout; + struct device_node *of_node; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + bool compat; + struct device *base_dev; +}; + +struct nvmem_layout { + struct device dev; + struct nvmem_device *nvmem; + int (*add_cells)(struct nvmem_layout *); +}; + +struct linear_c { + struct dm_dev *dev; + long: 32; + sector_t start; +}; + +struct fw_cfg_file { + __be32 size; + __be16 select; + __u16 reserved; + char name[56]; +}; + +struct fw_cfg_dma_access { + __be32 control; + __be32 length; + __be64 address; +}; + +struct fw_cfg_vmcoreinfo { + __le16 host_format; + __le16 guest_format; + __le32 size; + __le64 paddr; +}; + +struct fw_cfg_sysfs_entry { + struct kobject kobj; + u32 size; + u16 select; + char name[56]; + struct list_head list; +}; + +struct fw_cfg_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct fw_cfg_sysfs_entry *, char *); +}; + +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE = 2, + NVMEM_CELL_ADD = 3, + NVMEM_CELL_REMOVE = 4, + NVMEM_LAYOUT_ADD = 5, + NVMEM_LAYOUT_REMOVE = 6, +}; + +struct nvmem_device { + struct module *owner; + long: 32; + struct device dev; + struct list_head node; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + struct nvmem_layout *layout; + void *priv; + bool sysfs_cells_populated; + long: 32; +}; + +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + +struct nvmem_cell_entry { + const char *name; + int offset; + size_t raw_len; + int bytes; + int bit_offset; + int nbits; + nvmem_cell_post_process_t read_post_process; + void *priv; + struct device_node *np; + struct nvmem_device *nvmem; + struct list_head node; +}; + +struct nvmem_cell { + struct nvmem_cell_entry *entry; + const char *id; + int index; +}; + +struct flow_dissector_key_ports_range { + union { + struct flow_dissector_key_ports tp; + struct { + struct flow_dissector_key_ports tp_min; + struct flow_dissector_key_ports tp_max; + }; + }; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key; + struct flow_dissector_key_meta *mask; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key; + struct flow_dissector_key_basic *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key; + struct flow_dissector_key_control *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key; + struct flow_dissector_key_eth_addrs *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key; + struct flow_dissector_key_vlan *mask; +}; + +struct flow_match_arp { + struct flow_dissector_key_arp *key; + struct flow_dissector_key_arp *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key; + struct flow_dissector_key_ipv4_addrs *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key; + struct flow_dissector_key_ipv6_addrs *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key; + struct flow_dissector_key_ip *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key; + struct flow_dissector_key_ports *mask; +}; + +struct flow_match_ports_range { + struct flow_dissector_key_ports_range *key; + struct flow_dissector_key_ports_range *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key; + struct flow_dissector_key_icmp *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key; + struct flow_dissector_key_tcp *mask; +}; + +struct flow_match_ipsec { + struct flow_dissector_key_ipsec *key; + struct flow_dissector_key_ipsec *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key; + struct flow_dissector_key_mpls *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key; + struct flow_dissector_key_keyid *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key; + struct flow_dissector_key_enc_opts *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key; + struct flow_dissector_key_ct *mask; +}; + +struct flow_match_pppoe { + struct flow_dissector_key_pppoe *key; + struct flow_dissector_key_pppoe *mask; +}; + +struct flow_match_l2tpv3 { + struct flow_dissector_key_l2tpv3 *key; + struct flow_dissector_key_l2tpv3 *mask; +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, + FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, +}; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; + struct Qdisc *sch; + struct list_head *cb_list_head; +}; + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + struct Qdisc *sch; + enum flow_block_binder_type binder_type; + void *data; + void *cb_priv; + void (*cleanup)(struct flow_block_cb *); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +enum offload_act_command { + FLOW_ACT_REPLACE = 0, + FLOW_ACT_DESTROY = 1, + FLOW_ACT_STATS = 2, +}; + +struct flow_offload_action { + struct netlink_ext_ack *extack; + enum offload_act_command command; + enum flow_action_id id; + u32 index; + long unsigned int cookie; + long: 32; + struct flow_stats stats; + struct flow_action action; +}; + +typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); + +struct flow_indr_dev { + struct list_head list; + flow_indr_block_bind_cb_t *cb; + void *cb_priv; + refcount_t refcnt; +}; + +struct flow_indir_dev_info { + void *data; + struct net_device *dev; + struct Qdisc *sch; + enum tc_setup_type type; + void (*cleanup)(struct flow_block_cb *); + struct list_head list; + enum flow_block_command command; + enum flow_block_binder_type binder_type; + struct list_head *cb_list; +}; + +enum { + ETHTOOL_A_STRING_UNSPEC = 0, + ETHTOOL_A_STRING_INDEX = 1, + ETHTOOL_A_STRING_VALUE = 2, + __ETHTOOL_A_STRING_CNT = 3, + ETHTOOL_A_STRING_MAX = 2, +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC = 0, + ETHTOOL_A_STRINGS_STRING = 1, + __ETHTOOL_A_STRINGS_CNT = 2, + ETHTOOL_A_STRINGS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC = 0, + ETHTOOL_A_STRINGSET_ID = 1, + ETHTOOL_A_STRINGSET_COUNT = 2, + ETHTOOL_A_STRINGSET_STRINGS = 3, + __ETHTOOL_A_STRINGSET_CNT = 4, + ETHTOOL_A_STRINGSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC = 0, + ETHTOOL_A_STRINGSETS_STRINGSET = 1, + __ETHTOOL_A_STRINGSETS_CNT = 2, + ETHTOOL_A_STRINGSETS_MAX = 1, +}; + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[32]; +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[21]; +}; + +struct nf_hook_entries_rcu_head { + struct callback_head head; + void *allocation; +}; + +struct arppayload { + unsigned char mac_src[6]; + unsigned char ip_src[4]; + unsigned char mac_dst[6]; + unsigned char ip_dst[4]; +}; + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; + unsigned char nh_protocol; + unsigned char resvd; + unsigned int nh_flags; +}; + +struct nexthop_grp { + __u32 id; + __u8 weight; + __u8 weight_high; + __u16 resvd2; +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH = 0, + NEXTHOP_GRP_TYPE_RES = 1, + __NEXTHOP_GRP_TYPE_MAX = 2, +}; + +enum { + NHA_UNSPEC = 0, + NHA_ID = 1, + NHA_GROUP = 2, + NHA_GROUP_TYPE = 3, + NHA_BLACKHOLE = 4, + NHA_OIF = 5, + NHA_GATEWAY = 6, + NHA_ENCAP_TYPE = 7, + NHA_ENCAP = 8, + NHA_GROUPS = 9, + NHA_MASTER = 10, + NHA_FDB = 11, + NHA_RES_GROUP = 12, + NHA_RES_BUCKET = 13, + NHA_OP_FLAGS = 14, + NHA_GROUP_STATS = 15, + NHA_HW_STATS_ENABLE = 16, + NHA_HW_STATS_USED = 17, + __NHA_MAX = 18, +}; + +enum { + NHA_RES_GROUP_UNSPEC = 0, + NHA_RES_GROUP_PAD = 0, + NHA_RES_GROUP_BUCKETS = 1, + NHA_RES_GROUP_IDLE_TIMER = 2, + NHA_RES_GROUP_UNBALANCED_TIMER = 3, + NHA_RES_GROUP_UNBALANCED_TIME = 4, + __NHA_RES_GROUP_MAX = 5, +}; + +enum { + NHA_RES_BUCKET_UNSPEC = 0, + NHA_RES_BUCKET_PAD = 0, + NHA_RES_BUCKET_INDEX = 1, + NHA_RES_BUCKET_IDLE_TIME = 2, + NHA_RES_BUCKET_NH_ID = 3, + __NHA_RES_BUCKET_MAX = 4, +}; + +enum { + NHA_GROUP_STATS_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY = 1, + __NHA_GROUP_STATS_MAX = 2, +}; + +enum { + NHA_GROUP_STATS_ENTRY_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY_ID = 1, + NHA_GROUP_STATS_ENTRY_PACKETS = 2, + NHA_GROUP_STATS_ENTRY_PACKETS_HW = 3, + __NHA_GROUP_STATS_ENTRY_MAX = 4, +}; + +struct nh_config { + u32 nh_id; + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u8 nh_fdb; + u32 nh_flags; + int nh_ifindex; + struct net_device *dev; + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + struct nlattr *nh_grp; + u16 nh_grp_type; + u16 nh_grp_res_num_buckets; + long unsigned int nh_grp_res_idle_timer; + long unsigned int nh_grp_res_unbalanced_timer; + bool nh_grp_res_has_num_buckets; + bool nh_grp_res_has_idle_timer; + bool nh_grp_res_has_unbalanced_timer; + bool nh_hw_stats; + struct nlattr *nh_encap; + u16 nh_encap_type; + u32 nlflags; + struct nl_info nlinfo; +}; + +enum nexthop_event_type { + NEXTHOP_EVENT_DEL = 0, + NEXTHOP_EVENT_REPLACE = 1, + NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, + NEXTHOP_EVENT_BUCKET_REPLACE = 3, + NEXTHOP_EVENT_HW_STATS_REPORT_DELTA = 4, +}; + +enum nh_notifier_info_type { + NH_NOTIFIER_INFO_TYPE_SINGLE = 0, + NH_NOTIFIER_INFO_TYPE_GRP = 1, + NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, + NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, + NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS = 4, +}; + +struct nh_notifier_single_info { + struct net_device *dev; + u8 gw_family; + union { + __be32 ipv4; + struct in6_addr ipv6; + }; + u32 id; + u8 is_reject: 1; + u8 is_fdb: 1; + u8 has_encap: 1; +}; + +struct nh_notifier_grp_entry_info { + u16 weight; + struct nh_notifier_single_info nh; +}; + +struct nh_notifier_grp_info { + u16 num_nh; + bool is_fdb; + bool hw_stats; + struct nh_notifier_grp_entry_info nh_entries[0]; +}; + +struct nh_notifier_res_bucket_info { + u16 bucket_index; + unsigned int idle_timer_ms; + bool force; + struct nh_notifier_single_info old_nh; + struct nh_notifier_single_info new_nh; +}; + +struct nh_notifier_res_table_info { + u16 num_nh_buckets; + bool hw_stats; + struct nh_notifier_single_info nhs[0]; +}; + +struct nh_notifier_grp_hw_stats_entry_info { + u32 id; + long: 32; + u64 packets; +}; + +struct nh_notifier_grp_hw_stats_info { + u16 num_nh; + bool hw_stats_used; + long: 32; + struct nh_notifier_grp_hw_stats_entry_info stats[0]; +}; + +struct nh_notifier_info { + struct net *net; + struct netlink_ext_ack *extack; + u32 id; + enum nh_notifier_info_type type; + union { + struct nh_notifier_single_info *nh; + struct nh_notifier_grp_info *nh_grp; + struct nh_notifier_res_table_info *nh_res_table; + struct nh_notifier_res_bucket_info *nh_res_bucket; + struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats; + }; +}; + +struct nh_dump_filter { + u32 nh_id; + int dev_idx; + int master_idx; + bool group_filter; + bool fdb_filter; + u32 res_bucket_nh_id; + u32 op_flags; +}; + +struct rtm_dump_nh_ctx { + u32 idx; +}; + +struct rtm_dump_res_bucket_ctx { + struct rtm_dump_nh_ctx nh; + u16 bucket_index; +}; + +struct rtm_dump_nexthop_bucket_data { + struct rtm_dump_res_bucket_ctx *ctx; + struct nh_dump_filter filter; +}; + +struct snmp_mib { + const char *name; + int entry; +}; + +union net_bridge_eht_addr { + __be32 ip4; + struct in6_addr ip6; +}; + +struct net_bridge_group_eht_host { + struct rb_node rb_node; + union net_bridge_eht_addr h_addr; + struct hlist_head set_entries; + unsigned int num_entries; + unsigned char filter_mode; + struct net_bridge_port_group *pg; +}; + +struct net_bridge_group_eht_set; + +struct net_bridge_group_eht_set_entry { + struct rb_node rb_node; + struct hlist_node host_list; + union net_bridge_eht_addr h_addr; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_group_eht_set *eht_set; + struct net_bridge_group_eht_host *h_parent; + struct net_bridge_mcast_gc mcast_gc; +}; + +struct net_bridge_group_eht_set { + struct rb_node rb_node; + union net_bridge_eht_addr src_addr; + struct rb_root entry_tree; + struct timer_list timer; + struct net_bridge_port_group *pg; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; +}; + +struct freq_band_range { + u64 start; + u64 end; +}; + +struct wbrf_ranges_in_out { + u64 num_of_ranges; + struct freq_band_range band_list[11]; +}; + +struct dma_coherent_mem { + void *virt_base; + dma_addr_t device_base; + long unsigned int pfn_base; + int size; + long unsigned int *bitmap; + spinlock_t spinlock; + bool use_dev_dma_pfn_offset; +}; + +struct reserved_mem_ops; + +struct reserved_mem { + const char *name; + long unsigned int fdt_node; + const struct reserved_mem_ops *ops; + phys_addr_t base; + phys_addr_t size; + void *priv; +}; + +struct reserved_mem_ops { + int (*device_init)(struct reserved_mem *, struct device *); + void (*device_release)(struct reserved_mem *, struct device *); +}; + +typedef int (*reservedmem_of_init_fn)(struct reserved_mem *); + +typedef __kernel_long_t __kernel_suseconds_t; + +typedef __kernel_suseconds_t suseconds_t; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct syscall_trace_enter { + struct trace_entry ent; + int nr; + long unsigned int args[0]; +}; + +struct syscall_trace_exit { + struct trace_entry ent; + int nr; + long int ret; +}; + +struct syscall_tp_t { + struct trace_entry ent; + int syscall_nr; + long unsigned int ret; +}; + +struct syscall_tp_t___2 { + struct trace_entry ent; + int syscall_nr; + long unsigned int args[6]; + long: 32; +}; + +enum bpf_stack_build_id_status { + BPF_STACK_BUILD_ID_EMPTY = 0, + BPF_STACK_BUILD_ID_VALID = 1, + BPF_STACK_BUILD_ID_IP = 2, +}; + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + +enum { + BPF_F_SKIP_FIELD_MASK = 255, + BPF_F_USER_STACK = 256, + BPF_F_FAST_STACK_CMP = 512, + BPF_F_REUSE_STACKID = 1024, + BPF_F_USER_BUILD_ID = 2048, +}; + +struct stack_map_bucket { + struct pcpu_freelist_node fnode; + u32 hash; + u32 nr; + long: 32; + u64 data[0]; +}; + +struct bpf_stack_map { + struct bpf_map map; + void *elems; + struct pcpu_freelist freelist; + u32 n_buckets; + struct stack_map_bucket *buckets[0]; + long: 32; +}; + +typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_sleepable)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack_sleepable)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +typedef int cydp_t; + +struct madvise_walk_private { + struct mmu_gather *tlb; + bool pageout; +}; + +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, + FSCONFIG_SET_STRING = 1, + FSCONFIG_SET_BINARY = 2, + FSCONFIG_SET_PATH = 3, + FSCONFIG_SET_PATH_EMPTY = 4, + FSCONFIG_SET_FD = 5, + FSCONFIG_CMD_CREATE = 6, + FSCONFIG_CMD_RECONFIGURE = 7, + FSCONFIG_CMD_CREATE_EXCL = 8, +}; + +struct trace_event_raw_iomap_readpage_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + int nr_pages; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_range_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + loff_t size; + loff_t offset; + u64 length; + char __data[0]; +}; + +struct trace_event_raw_iomap_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_writepage_map { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + u64 pos; + u64 dirty_len; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_iter { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + loff_t pos; + u64 length; + s64 processed; + unsigned int flags; + const void *ops; + long unsigned int caller; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_dio_rw_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + size_t count; + size_t done_before; + int ki_flags; + unsigned int dio_flags; + bool aio; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_dio_complete { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + int ki_flags; + bool aio; + int error; + ssize_t ret; + char __data[0]; +}; + +struct trace_event_data_offsets_iomap_readpage_class {}; + +struct trace_event_data_offsets_iomap_range_class {}; + +struct trace_event_data_offsets_iomap_class {}; + +struct trace_event_data_offsets_iomap_writepage_map {}; + +struct trace_event_data_offsets_iomap_iter {}; + +struct trace_event_data_offsets_iomap_dio_rw_begin {}; + +struct trace_event_data_offsets_iomap_dio_complete {}; + +typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_release_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_invalidate_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_rw_queued)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_iter_dstmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_iter_srcmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_writepage_map)(void *, struct inode *, u64, unsigned int, struct iomap *); + +typedef void (*btf_trace_iomap_iter)(void *, struct iomap_iter *, const void *, long unsigned int); + +typedef void (*btf_trace_iomap_dio_rw_begin)(void *, struct kiocb *, struct iov_iter *, unsigned int, size_t); + +typedef void (*btf_trace_iomap_dio_complete)(void *, struct kiocb *, int, ssize_t); + +enum { + __PAGE_UNLOCK_BIT = 0, + PAGE_UNLOCK = 1, + __PAGE_UNLOCK_SEQ = 0, + __PAGE_START_WRITEBACK_BIT = 1, + PAGE_START_WRITEBACK = 2, + __PAGE_START_WRITEBACK_SEQ = 1, + __PAGE_END_WRITEBACK_BIT = 2, + PAGE_END_WRITEBACK = 4, + __PAGE_END_WRITEBACK_SEQ = 2, + __PAGE_SET_ORDERED_BIT = 3, + PAGE_SET_ORDERED = 8, + __PAGE_SET_ORDERED_SEQ = 3, +}; + +struct btrfs_replace_extent_info { + u64 disk_offset; + u64 disk_len; + u64 data_offset; + u64 data_len; + u64 file_offset; + char *extent_buf; + bool is_new_extent; + bool update_times; + int qgroup_reserved; + int insertions; +}; + +struct btrfs_file_private { + void *filldir_buf; + long: 32; + u64 last_index; + struct extent_state *llseek_cached_state; + struct task_struct *owner_task; +}; + +struct btrfs_iget_args { + u64 ino; + struct btrfs_root *root; + long: 32; +}; + +struct btrfs_rename_ctx { + u64 index; +}; + +struct data_reloc_warn { + struct btrfs_path path; + struct btrfs_fs_info *fs_info; + u64 extent_item_size; + u64 logical; + int mirror_num; + long: 32; +}; + +struct async_extent { + u64 start; + u64 ram_size; + u64 compressed_size; + struct folio **folios; + long unsigned int nr_folios; + int compress_type; + struct list_head list; + long: 32; +}; + +struct async_cow; + +struct async_chunk { + struct btrfs_inode *inode; + struct folio *locked_folio; + u64 start; + u64 end; + blk_opf_t write_flags; + struct list_head extents; + struct cgroup_subsys_state *blkcg_css; + struct btrfs_work work; + struct async_cow *async_cow; + long: 32; +}; + +struct async_cow { + atomic_t num_chunks; + long: 32; + struct async_chunk chunks[0]; +}; + +struct can_nocow_file_extent_args { + u64 start; + u64 end; + bool writeback_path; + bool strict; + bool free_path; + long: 32; + struct btrfs_file_extent file_extent; +}; + +struct btrfs_writepage_fixup { + struct folio *folio; + struct btrfs_inode *inode; + struct btrfs_work work; +}; + +struct dir_entry { + u64 ino; + u64 offset; + unsigned int type; + int name_len; +}; + +struct btrfs_delalloc_work { + struct inode *inode; + struct completion completion; + struct list_head list; + struct btrfs_work work; +}; + +struct btrfs_encoded_read_private { + wait_queue_head_t wait; + void *uring_ctx; + atomic_t pending; + blk_status_t status; +}; + +struct btrfs_swap_info { + u64 start; + u64 block_start; + u64 block_len; + u64 lowest_ppage; + u64 highest_ppage; + long unsigned int nr_pages; + int nr_extents; +}; + +struct efi_generic_dev_path { + u8 type; + u8 sub_type; + u16 length; +}; + +struct variable_validate { + efi_guid_t vendor; + char *name; + bool (*validate)(efi_char16_t *, int, u8 *, long unsigned int); +}; + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[0]; +}; + +enum blake2b_lengths { + BLAKE2B_BLOCK_SIZE = 128, + BLAKE2B_HASH_SIZE = 64, + BLAKE2B_KEY_SIZE = 64, + BLAKE2B_160_HASH_SIZE = 20, + BLAKE2B_256_HASH_SIZE = 32, + BLAKE2B_384_HASH_SIZE = 48, + BLAKE2B_512_HASH_SIZE = 64, +}; + +struct blake2b_state { + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[128]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2b_iv { + BLAKE2B_IV0 = 7640891576956012808ULL, + BLAKE2B_IV1 = 13503953896175478587ULL, + BLAKE2B_IV2 = 4354685564936845355ULL, + BLAKE2B_IV3 = 11912009170470909681ULL, + BLAKE2B_IV4 = 5840696475078001361ULL, + BLAKE2B_IV5 = 11170449401992604703ULL, + BLAKE2B_IV6 = 2270897969802886507ULL, + BLAKE2B_IV7 = 6620516959819538809ULL, +}; + +typedef void (*blake2b_compress_t)(struct blake2b_state *, const u8 *, size_t, u32); + +struct blake2b_tfm_ctx { + u8 key[64]; + unsigned int keylen; +}; + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + struct filename *filename; + struct statx *buffer; +}; + +typedef struct { + unsigned char op; + unsigned char bits; + short unsigned int val; +} code; + +typedef enum { + HEAD = 0, + FLAGS = 1, + TIME = 2, + OS = 3, + EXLEN = 4, + EXTRA = 5, + NAME = 6, + COMMENT = 7, + HCRC = 8, + DICTID = 9, + DICT = 10, + TYPE = 11, + TYPEDO = 12, + STORED = 13, + COPY = 14, + TABLE = 15, + LENLENS = 16, + CODELENS = 17, + LEN = 18, + LENEXT = 19, + DIST = 20, + DISTEXT = 21, + MATCH = 22, + LIT = 23, + CHECK = 24, + LENGTH = 25, + DONE = 26, + BAD = 27, + MEM = 28, + SYNC = 29, +} inflate_mode; + +struct inflate_state { + inflate_mode mode; + int last; + int wrap; + int havedict; + int flags; + unsigned int dmax; + long unsigned int check; + long unsigned int total; + unsigned int wbits; + unsigned int wsize; + unsigned int whave; + unsigned int write; + unsigned char *window; + long unsigned int hold; + unsigned int bits; + unsigned int length; + unsigned int offset; + unsigned int extra; + const code *lencode; + const code *distcode; + unsigned int lenbits; + unsigned int distbits; + unsigned int ncode; + unsigned int nlen; + unsigned int ndist; + unsigned int have; + code *next; + short unsigned int lens[320]; + short unsigned int work[288]; + code codes[2048]; +}; + +union uu { + short unsigned int us; + unsigned char b[2]; +}; + +typedef size_t (*ZSTD_blockCompressor)(ZSTD_matchState_t *, seqStore_t *, U32 *, const void *, size_t); + +typedef struct { + U64 rolling; + U64 stopMask; +} ldmRollingHashState_t; + +enum dim_state { + DIM_START_MEASURE = 0, + DIM_MEASURE_IN_PROGRESS = 1, + DIM_APPLY_NEW_PROFILE = 2, +}; + +enum dim_stats_state { + DIM_STATS_WORSE = 0, + DIM_STATS_SAME = 1, + DIM_STATS_BETTER = 2, +}; + +enum dim_step_result { + DIM_STEPPED = 0, + DIM_TOO_TIRED = 1, + DIM_ON_EDGE = 2, +}; + +struct clk_gate { + struct clk_hw hw; + void *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + long unsigned int key_down[24]; + unsigned int alt; + unsigned int alt_use; + unsigned int shift; + unsigned int shift_use; + bool active; + bool need_reinject; + bool reinjecting; + bool reset_canceled; + bool reset_requested; + long unsigned int reset_keybit[24]; + int reset_seq_len; + int reset_seq_cnt; + int reset_seq_version; + struct timer_list keyreset_timer; +}; + +struct cache_type_info { + const char *size_prop; + const char *line_size_props[2]; + const char *nr_sets_prop; +}; + +typedef struct { + spinlock_t *lock; +} class_spinlock_irq_t; + +enum { + K2_FLAG_SATA_8_PORTS = 16777216, + K2_FLAG_NO_ATAPI_DMA = 33554432, + K2_FLAG_BAR_POS_3 = 67108864, + K2_SATA_TF_CMD_OFFSET = 0, + K2_SATA_TF_DATA_OFFSET = 0, + K2_SATA_TF_ERROR_OFFSET = 4, + K2_SATA_TF_NSECT_OFFSET = 8, + K2_SATA_TF_LBAL_OFFSET = 12, + K2_SATA_TF_LBAM_OFFSET = 16, + K2_SATA_TF_LBAH_OFFSET = 20, + K2_SATA_TF_DEVICE_OFFSET = 24, + K2_SATA_TF_CMDSTAT_OFFSET = 28, + K2_SATA_TF_CTL_OFFSET = 32, + K2_SATA_DMA_CMD_OFFSET = 48, + K2_SATA_SCR_STATUS_OFFSET = 64, + K2_SATA_SCR_ERROR_OFFSET = 68, + K2_SATA_SCR_CONTROL_OFFSET = 72, + K2_SATA_SICR1_OFFSET = 128, + K2_SATA_SICR2_OFFSET = 132, + K2_SATA_SIM_OFFSET = 136, + K2_SATA_PORT_OFFSET = 256, + chip_svw4 = 0, + chip_svw8 = 1, + chip_svw42 = 2, + chip_svw43 = 3, +}; + +struct ich8_hsfsts { + u16 flcdone: 1; + u16 flcerr: 1; + u16 dael: 1; + u16 berasesz: 2; + u16 flcinprog: 1; + u16 reserved1: 2; + u16 reserved2: 6; + u16 fldesvalid: 1; + u16 flockdn: 1; +}; + +union ich8_hws_flash_status { + struct ich8_hsfsts hsf_status; + u16 regval; +}; + +struct ich8_hsflctl { + u16 flcgo: 1; + u16 flcycle: 2; + u16 reserved: 5; + u16 fldbcount: 2; + u16 flockdn: 6; +}; + +union ich8_hws_flash_ctrl { + struct ich8_hsflctl hsf_ctrl; + u16 regval; +}; + +struct ich8_pr { + u32 base: 13; + u32 reserved1: 2; + u32 rpe: 1; + u32 limit: 13; + u32 reserved2: 2; + u32 wpe: 1; +}; + +union ich8_flash_protected_range { + struct ich8_pr range; + u32 regval; +}; + +struct uefi_cnv_wlan_sgom_data { + u8 revision; + u8 offset_map[338]; +}; + +struct uefi_cnv_wlan_uats_data { + u8 revision; + u8 offset_map[338]; +}; + +struct uefi_cnv_common_step_data { + u8 revision; + u8 step_mode; + u8 cnvi_eq_channel; + u8 cnvr_eq_channel; + u8 radio1; + u8 radio2; +}; + +struct uefi_sar_profile { + struct iwl_sar_profile_chain chains[4]; +}; + +struct uefi_cnv_var_wrds { + u8 revision; + u32 mode; + struct uefi_sar_profile sar_profile; +} __attribute__((packed)); + +struct uefi_cnv_var_ewrd { + u8 revision; + u32 mode; + u32 num_profiles; + struct uefi_sar_profile sar_profiles[3]; +} __attribute__((packed)); + +struct uefi_cnv_var_wgds { + u8 revision; + u8 num_profiles; + struct iwl_geo_profile geo_profiles[8]; +}; + +struct uefi_cnv_var_ppag { + u8 revision; + u32 ppag_modes; + struct iwl_ppag_chain ppag_chains[2]; +} __attribute__((packed)); + +struct uefi_cnv_var_wtas { + u8 revision; + u32 tas_selection; + u8 black_list_size; + u16 black_list[16]; +} __attribute__((packed)); + +struct uefi_cnv_var_splc { + u8 revision; + u32 default_pwr_limit; +} __attribute__((packed)); + +struct uefi_cnv_var_wrdd { + u8 revision; + u32 mcc; +} __attribute__((packed)); + +struct uefi_cnv_var_eckv { + u8 revision; + u32 ext_clock_valid; +} __attribute__((packed)); + +struct uefi_cnv_var_general_cfg { + u8 revision; + u32 functions[32]; +} __attribute__((packed)); + +struct uefi_cnv_wlan_wbem_data { + u8 revision; + u32 wbem_320mhz_per_mcc; +} __attribute__((packed)); + +struct uefi_cnv_var_puncturing_data { + u8 revision; + u32 puncturing; +} __attribute__((packed)); + +struct iwl_uefi_pnvm_mem_desc { + __le32 addr; + __le32 size; + const u8 data[0]; +}; + +enum iwl_sf_scenario { + SF_SCENARIO_SINGLE_UNICAST = 0, + SF_SCENARIO_AGG_UNICAST = 1, + SF_SCENARIO_MULTICAST = 2, + SF_SCENARIO_BA_RESP = 3, + SF_SCENARIO_TX_RESP = 4, + SF_NUM_SCENARIO = 5, +}; + +struct iwl_sf_cfg_cmd { + __le32 state; + __le32 watermark[2]; + __le32 long_delay_timeouts[10]; + __le32 full_on_timeouts[10]; +}; + +struct iwl_mvm_active_iface_iterator_data { + struct ieee80211_vif *ignore_vif; + struct ieee80211_sta *sta_vif_ap_sta; + enum iwl_sf_state sta_vif_state; + u32 num_active_macs; +}; + +struct usb_qualifier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __u8 bNumConfigurations; + __u8 bRESERVED; +}; + +struct usb_set_sel_req { + __u8 u1_sel; + __u8 u1_pel; + __le16 u2_sel; + __le16 u2_pel; +}; + +enum usb_port_connect_type { + USB_PORT_CONNECT_TYPE_UNKNOWN = 0, + USB_PORT_CONNECT_TYPE_HOT_PLUG = 1, + USB_PORT_CONNECT_TYPE_HARD_WIRED = 2, + USB_PORT_NOT_USED = 3, +}; + +struct usbdevfs_hub_portinfo { + char nports; + char port[127]; +}; + +struct usb_port_status { + __le16 wPortStatus; + __le16 wPortChange; + __le32 dwExtPortStatus; +}; + +struct usb_hub_status { + __le16 wHubStatus; + __le16 wHubChange; +}; + +enum hub_led_mode { + INDICATOR_AUTO = 0, + INDICATOR_CYCLE = 1, + INDICATOR_GREEN_BLINK = 2, + INDICATOR_GREEN_BLINK_OFF = 3, + INDICATOR_AMBER_BLINK = 4, + INDICATOR_AMBER_BLINK_OFF = 5, + INDICATOR_ALT_BLINK = 6, + INDICATOR_ALT_BLINK_OFF = 7, +} __attribute__((mode(byte))); + +struct usb_tt_clear { + struct list_head clear_list; + unsigned int tt; + u16 devinfo; + struct usb_hcd *hcd; + struct usb_host_endpoint *ep; +}; + +struct typec_connector { + void (*attach)(struct typec_connector *, struct device *); + void (*deattach)(struct typec_connector *, struct device *); +}; + +typedef u32 usb_port_location_t; + +struct usb_port; + +struct usb_hub { + struct device *intfdev; + struct usb_device *hdev; + struct kref kref; + struct urb *urb; + u8 (*buffer)[8]; + union { + struct usb_hub_status hub; + struct usb_port_status port; + } *status; + struct mutex status_mutex; + int error; + int nerrors; + long unsigned int event_bits[1]; + long unsigned int change_bits[1]; + long unsigned int removed_bits[1]; + long unsigned int wakeup_bits[1]; + long unsigned int power_bits[1]; + long unsigned int child_usage_bits[1]; + long unsigned int warm_reset_bits[1]; + struct usb_hub_descriptor *descriptor; + struct usb_tt tt; + unsigned int mA_per_port; + unsigned int wakeup_enabled_descendants; + unsigned int limited_power: 1; + unsigned int quiescing: 1; + unsigned int disconnected: 1; + unsigned int in_reset: 1; + unsigned int quirk_disable_autosuspend: 1; + unsigned int quirk_check_port_auto_suspend: 1; + unsigned int has_indicators: 1; + u8 indicator[31]; + struct delayed_work leds; + struct delayed_work init_work; + struct work_struct events; + spinlock_t irq_urb_lock; + struct timer_list irq_urb_retry; + struct usb_port **ports; + struct list_head onboard_devs; +}; + +struct usb_port { + struct usb_device *child; + long: 32; + struct device dev; + struct usb_dev_state *port_owner; + struct usb_port *peer; + struct typec_connector *connector; + struct dev_pm_qos_request *req; + enum usb_port_connect_type connect_type; + enum usb_device_state state; + struct kernfs_node *state_kn; + usb_port_location_t location; + struct mutex status_lock; + u32 over_current_count; + u8 portnum; + u32 quirks; + unsigned int early_stop: 1; + unsigned int ignore_event: 1; + unsigned int is_superspeed: 1; + unsigned int usb3_lpm_u1_permit: 1; + unsigned int usb3_lpm_u2_permit: 1; + long: 32; +}; + +enum hub_activation_type { + HUB_INIT = 0, + HUB_INIT2 = 1, + HUB_INIT3 = 2, + HUB_POST_RESET = 3, + HUB_RESUME = 4, + HUB_RESET_RESUME = 5, +}; + +enum hub_quiescing_type { + HUB_DISCONNECT = 0, + HUB_PRE_RESET = 1, + HUB_SUSPEND = 2, +}; + +struct swoc_info { + __u8 rev; + __u8 reserved[8]; + __u16 LinuxSKU; + __u16 LinuxVer; + __u8 reserved2[47]; +} __attribute__((packed)); + +struct ir_raw_timings_pl { + unsigned int header_pulse; + unsigned int bit_space; + unsigned int bit_pulse[2]; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +enum sony_state { + STATE_INACTIVE___7 = 0, + STATE_HEADER_SPACE___3 = 1, + STATE_BIT_PULSE___3 = 2, + STATE_BIT_SPACE___3 = 3, + STATE_FINISHED___4 = 4, +}; + +enum { + DM_TIO_INSIDE_DM_IO = 0, + DM_TIO_IS_DUPLICATE_BIO = 1, +}; + +enum { + DM_IO_ACCOUNTED = 0, + DM_IO_WAS_SPLIT = 1, + DM_IO_BLK_STAT = 2, +}; + +struct dax_operations { + long int (*direct_access)(struct dax_device *, long unsigned int, long int, enum dax_access_mode, void **, pfn_t *); + int (*zero_page_range)(struct dax_device *, long unsigned int, size_t); + size_t (*recovery_write)(struct dax_device *, long unsigned int, void *, size_t, struct iov_iter *); +}; + +struct clone_info { + struct dm_table *map; + struct bio *bio; + struct dm_io *io; + long: 32; + sector_t sector; + unsigned int sector_count; + bool is_abnormal_io: 1; + bool submit_as_polled: 1; +}; + +struct table_device { + struct list_head list; + refcount_t count; + struct dm_dev dm_dev; +}; + +struct dm_blkdev_id { + u8 *id; + enum blk_unique_id type; +}; + +struct dm_pr { + u64 old_key; + u64 new_key; + u32 flags; + bool abort; + bool fail_early; + int ret; + enum pr_type type; + struct pr_keys *read_keys; + struct pr_held_reservation *rsv; +}; + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + +struct net_rate_estimator { + struct gnet_stats_basic_sync *bstats; + spinlock_t *stats_lock; + bool running; + struct gnet_stats_basic_sync *cpu_bstats; + u8 ewma_log; + u8 intvl_log; + seqcount_t seq; + u64 last_packets; + u64 last_bytes; + u64 avpps; + u64 avbps; + long unsigned int next_jiffies; + struct timer_list timer; + struct callback_head rcu; +}; + +struct ethtool_forced_speed_map { + u32 speed; + long unsigned int caps[4]; + const u32 *cap_arr; + u32 arr_size; +}; + +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +struct phy_req_info { + struct ethnl_req_info base; + struct phy_device_node *pdn; +}; + +struct ethnl_phy_dump_ctx { + struct phy_req_info *phy_req_info; + long unsigned int ifindex; + long unsigned int phy_index; +}; + +enum nft_trace_attributes { + NFTA_TRACE_UNSPEC = 0, + NFTA_TRACE_TABLE = 1, + NFTA_TRACE_CHAIN = 2, + NFTA_TRACE_RULE_HANDLE = 3, + NFTA_TRACE_TYPE = 4, + NFTA_TRACE_VERDICT = 5, + NFTA_TRACE_ID = 6, + NFTA_TRACE_LL_HEADER = 7, + NFTA_TRACE_NETWORK_HEADER = 8, + NFTA_TRACE_TRANSPORT_HEADER = 9, + NFTA_TRACE_IIF = 10, + NFTA_TRACE_IIFTYPE = 11, + NFTA_TRACE_OIF = 12, + NFTA_TRACE_OIFTYPE = 13, + NFTA_TRACE_MARK = 14, + NFTA_TRACE_NFPROTO = 15, + NFTA_TRACE_POLICY = 16, + NFTA_TRACE_PAD = 17, + __NFTA_TRACE_MAX = 18, +}; + +enum nft_trace_types { + NFT_TRACETYPE_UNSPEC = 0, + NFT_TRACETYPE_POLICY = 1, + NFT_TRACETYPE_RETURN = 2, + NFT_TRACETYPE_RULE = 3, + __NFT_TRACETYPE_MAX = 4, +}; + +struct nft_traceinfo { + bool trace; + bool nf_trace; + bool packet_dumped; + enum nft_trace_types type: 8; + u32 skbid; + const struct nft_base_chain *basechain; +}; + +struct tcp_plb_state { + u8 consec_cong_rounds: 5; + u8 unused: 3; + u32 pause_until; +}; + +struct igmp6_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct igmp6_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; + struct ifmcaddr6 *im; +}; + +struct brport_attribute { + struct attribute attr; + ssize_t (*show)(struct net_bridge_port *, char *); + int (*store)(struct net_bridge_port *, long unsigned int); + int (*store_raw)(struct net_bridge_port *, char *); +}; + +struct buffer { + size_t size; + char data[0]; +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +struct kexec_relocate_data { + long unsigned int kexec_start_address; + long unsigned int kexec_indirection_page; + long unsigned int kexec_mach_type; + long unsigned int kexec_r2; +}; + +enum unwind_reason_code { + URC_OK = 0, + URC_CONTINUE_UNWIND = 8, + URC_FAILURE = 9, +}; + +struct unwind_ctrl_block { + long unsigned int vrs[16]; + const long unsigned int *insn; + long unsigned int sp_high; + long unsigned int *lr_addr; + int check_each_pop; + int entries; + int byte; +}; + +enum regs { + FP = 11, + SP = 13, + LR = 14, + PC = 15, +}; + +struct of_cpu_method { + const char *method; + const struct smp_operations *ops; +}; + +enum rtmutex_chainwalk { + RT_MUTEX_MIN_CHAINWALK = 0, + RT_MUTEX_FULL_CHAINWALK = 1, +}; + +enum pci_p2pdma_map_type { + PCI_P2PDMA_MAP_UNKNOWN = 0, + PCI_P2PDMA_MAP_NOT_SUPPORTED = 1, + PCI_P2PDMA_MAP_BUS_ADDR = 2, + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE = 3, +}; + +struct pci_p2pdma_map_state { + struct dev_pagemap *pgmap; + int map; + u64 bus_off; +}; + +struct io_tlb_pool; + +struct stacktrace_cookie { + long unsigned int *store; + unsigned int size; + unsigned int skip; + unsigned int len; +}; + +struct posix_clock_desc { + struct file *fp; + struct posix_clock *clk; +}; + +typedef __u16 comp_t; + +typedef __u32 comp2_t; + +struct acct { + char ac_flag; + char ac_version; + __u16 ac_uid16; + __u16 ac_gid16; + __u16 ac_tty; + __u32 ac_btime; + comp_t ac_utime; + comp_t ac_stime; + comp_t ac_etime; + comp_t ac_mem; + comp_t ac_io; + comp_t ac_rw; + comp_t ac_minflt; + comp_t ac_majflt; + comp_t ac_swaps; + __u16 ac_ahz; + __u32 ac_exitcode; + char ac_comm[17]; + __u8 ac_etime_hi; + __u16 ac_etime_lo; + __u32 ac_uid; + __u32 ac_gid; +}; + +typedef struct acct acct_t; + +struct bsd_acct_struct { + struct fs_pin pin; + atomic_long_t count; + struct callback_head rcu; + struct mutex lock; + int active; + long unsigned int needcheck; + struct file *file; + struct pid_namespace *ns; + struct work_struct work; + struct completion done; +}; + +enum { + TRACE_FUNC_NO_OPTS = 0, + TRACE_FUNC_OPT_STACK = 1, + TRACE_FUNC_OPT_NO_REPEATS = 2, + TRACE_FUNC_OPT_HIGHEST_BIT = 4, +}; + +struct bucket { + struct hlist_nulls_head head; + raw_spinlock_t raw_lock; +}; + +struct htab_elem; + +struct bpf_htab { + struct bpf_map map; + struct bpf_mem_alloc ma; + struct bpf_mem_alloc pcpu_ma; + struct bucket *buckets; + void *elems; + union { + struct pcpu_freelist freelist; + struct bpf_lru lru; + }; + struct htab_elem **extra_elems; + long: 32; + struct percpu_counter pcount; + atomic_t count; + bool use_percpu_counter; + u32 n_buckets; + u32 elem_size; + u32 hashrnd; + struct lock_class_key lockdep_key; + int *map_locked[8]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct htab_elem { + union { + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct pcpu_freelist_node fnode; + struct htab_elem *batch_flink; + }; + }; + }; + union { + void *ptr_to_pptr; + struct bpf_lru_node lru_node; + }; + u32 hash; + char key[0]; +}; + +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; + u32 bucket_id; + u32 skip_elems; +}; + +struct static_key_mod { + struct static_key_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +struct static_key_deferred { + struct static_key key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct cachestat_range { + __u64 off; + __u64 len; +}; + +struct cachestat { + __u64 nr_cache; + __u64 nr_dirty; + __u64 nr_writeback; + __u64 nr_evicted; + __u64 nr_recently_evicted; +}; + +struct trace_event_raw_mm_filemap_op_page_cache { + struct trace_entry ent; + long unsigned int pfn; + long unsigned int i_ino; + long unsigned int index; + dev_t s_dev; + unsigned char order; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_op_page_cache_range { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + long unsigned int last_index; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_fault { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_filemap_set_wb_err { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + errseq_t errseq; + char __data[0]; +}; + +struct trace_event_raw_file_check_and_advance_wb_err { + struct trace_entry ent; + struct file *file; + long unsigned int i_ino; + dev_t s_dev; + errseq_t old; + errseq_t new; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache {}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache_range {}; + +struct trace_event_data_offsets_mm_filemap_fault {}; + +struct trace_event_data_offsets_filemap_set_wb_err {}; + +struct trace_event_data_offsets_file_check_and_advance_wb_err {}; + +typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_get_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_map_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_fault)(void *, struct address_space *, long unsigned int); + +typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); + +typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); + +enum behavior { + EXCLUSIVE = 0, + SHARED = 1, + DROP = 2, +}; + +struct dio_submit { + struct bio *bio; + unsigned int blkbits; + unsigned int blkfactor; + unsigned int start_zero_done; + int pages_in_io; + long: 32; + sector_t block_in_file; + unsigned int blocks_available; + int reap_counter; + sector_t final_block_in_request; + int boundary; + get_block_t *get_block; + loff_t logical_offset_in_bio; + sector_t final_block_in_bio; + sector_t next_block_for_io; + struct page *cur_page; + unsigned int cur_page_offset; + unsigned int cur_page_len; + long: 32; + sector_t cur_page_block; + loff_t cur_page_fs_offset; + struct iov_iter *iter; + unsigned int head; + unsigned int tail; + size_t from; + size_t to; + long: 32; +}; + +struct dio { + int flags; + blk_opf_t opf; + struct gendisk *bio_disk; + struct inode *inode; + loff_t i_size; + dio_iodone_t *end_io; + bool is_pinned; + void *private; + spinlock_t bio_lock; + int page_errors; + int is_async; + bool defer_completion; + bool should_dirty; + int io_error; + long unsigned int refcount; + struct bio *bio_list; + struct task_struct *waiter; + struct kiocb *iocb; + ssize_t result; + union { + struct page *pages[64]; + struct work_struct complete_work; + }; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct backing_aio { + struct kiocb iocb; + refcount_t ref; + struct kiocb *orig_iocb; + void (*end_write)(struct kiocb *, ssize_t); + struct work_struct work; + long int res; +}; + +struct sysctl_alias { + const char *kernel_param; + const char *sysctl_param; +}; + +struct args_protover { + __u32 version; +}; + +struct args_protosubver { + __u32 sub_version; +}; + +struct args_openmount { + __u32 devid; +}; + +struct args_ready { + __u32 token; +}; + +struct args_fail { + __u32 token; + __s32 status; +}; + +struct args_setpipefd { + __s32 pipefd; +}; + +struct args_timeout { + __u64 timeout; +}; + +struct args_requester { + __u32 uid; + __u32 gid; +}; + +struct args_expire { + __u32 how; +}; + +struct args_askumount { + __u32 may_umount; +}; + +struct args_in { + __u32 type; +}; + +struct args_out { + __u32 devid; + __u32 magic; +}; + +struct args_ismountpoint { + union { + struct args_in in; + struct args_out out; + }; +}; + +struct autofs_dev_ioctl { + __u32 ver_major; + __u32 ver_minor; + __u32 size; + __s32 ioctlfd; + union { + struct args_protover protover; + struct args_protosubver protosubver; + struct args_openmount openmount; + struct args_ready ready; + struct args_fail fail; + struct args_setpipefd setpipefd; + struct args_timeout timeout; + struct args_requester requester; + struct args_expire expire; + struct args_askumount askumount; + struct args_ismountpoint ismountpoint; + }; + char path[0]; +}; + +enum { + AUTOFS_DEV_IOCTL_VERSION_CMD = 113, + AUTOFS_DEV_IOCTL_PROTOVER_CMD = 114, + AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD = 115, + AUTOFS_DEV_IOCTL_OPENMOUNT_CMD = 116, + AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD = 117, + AUTOFS_DEV_IOCTL_READY_CMD = 118, + AUTOFS_DEV_IOCTL_FAIL_CMD = 119, + AUTOFS_DEV_IOCTL_SETPIPEFD_CMD = 120, + AUTOFS_DEV_IOCTL_CATATONIC_CMD = 121, + AUTOFS_DEV_IOCTL_TIMEOUT_CMD = 122, + AUTOFS_DEV_IOCTL_REQUESTER_CMD = 123, + AUTOFS_DEV_IOCTL_EXPIRE_CMD = 124, + AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD = 125, + AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD = 126, +}; + +typedef int (*ioctl_fn___2)(struct file *, struct autofs_sb_info *, struct autofs_dev_ioctl *); + +struct btrfs_eb_write_context { + struct writeback_control *wbc; + struct extent_buffer *eb; + struct btrfs_block_group *zoned_bg; +}; + +struct btrfs_bio_ctrl { + struct btrfs_bio *bbio; + enum btrfs_compression_type compress_type; + u32 len_to_oe_boundary; + blk_opf_t opf; + btrfs_bio_end_io_t end_io_func; + struct writeback_control *wbc; + long unsigned int submit_bitmap; +}; + +struct btrfs_raid_stride { + __le64 devid; + __le64 physical; +}; + +struct btrfs_stripe_extent { + struct { + struct {} __empty_strides; + struct btrfs_raid_stride strides[0]; + }; +}; + +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_kpp { + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); + int (*generate_public_key)(struct kpp_request *); + int (*compute_shared_secret)(struct kpp_request *); + unsigned int (*max_size)(struct crypto_kpp *); + int (*init)(struct crypto_kpp *); + void (*exit)(struct crypto_kpp *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct kpp_instance { + void (*free)(struct kpp_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct kpp_alg alg; + }; +}; + +struct crypto_kpp_spawn { + struct crypto_spawn base; +}; + +struct disk_events { + struct list_head node; + struct gendisk *disk; + spinlock_t lock; + struct mutex block_mutex; + int block; + unsigned int pending; + unsigned int clearing; + long int poll_msecs; + struct delayed_work dwork; +}; + +enum io_uring_msg_ring_flags { + IORING_MSG_DATA = 0, + IORING_MSG_SEND_FD = 1, +}; + +struct io_msg { + struct file *file; + struct file *src_file; + struct callback_head tw; + u64 user_data; + u32 len; + u32 cmd; + u32 src_fd; + union { + u32 dst_fd; + u32 cqe_flags; + }; + u32 flags; + long: 32; +}; + +typedef mpi_limb_t UWtype; + +typedef enum { + CODES = 0, + LENS = 1, + DISTS = 2, +} codetype; + +typedef enum { + ZSTD_c_compressionLevel = 100, + ZSTD_c_windowLog = 101, + ZSTD_c_hashLog = 102, + ZSTD_c_chainLog = 103, + ZSTD_c_searchLog = 104, + ZSTD_c_minMatch = 105, + ZSTD_c_targetLength = 106, + ZSTD_c_strategy = 107, + ZSTD_c_enableLongDistanceMatching = 160, + ZSTD_c_ldmHashLog = 161, + ZSTD_c_ldmMinMatch = 162, + ZSTD_c_ldmBucketSizeLog = 163, + ZSTD_c_ldmHashRateLog = 164, + ZSTD_c_contentSizeFlag = 200, + ZSTD_c_checksumFlag = 201, + ZSTD_c_dictIDFlag = 202, + ZSTD_c_nbWorkers = 400, + ZSTD_c_jobSize = 401, + ZSTD_c_overlapLog = 402, + ZSTD_c_experimentalParam1 = 500, + ZSTD_c_experimentalParam2 = 10, + ZSTD_c_experimentalParam3 = 1000, + ZSTD_c_experimentalParam4 = 1001, + ZSTD_c_experimentalParam5 = 1002, + ZSTD_c_experimentalParam6 = 1003, + ZSTD_c_experimentalParam7 = 1004, + ZSTD_c_experimentalParam8 = 1005, + ZSTD_c_experimentalParam9 = 1006, + ZSTD_c_experimentalParam10 = 1007, + ZSTD_c_experimentalParam11 = 1008, + ZSTD_c_experimentalParam12 = 1009, + ZSTD_c_experimentalParam13 = 1010, + ZSTD_c_experimentalParam14 = 1011, + ZSTD_c_experimentalParam15 = 1012, +} ZSTD_cParameter; + +typedef ZSTD_CCtx zstd_cctx; + +typedef ZSTD_CDict zstd_cdict; + +typedef unsigned int FSE_DTable; + +typedef struct { + size_t state; + const void *table; +} FSE_DState_t; + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; + +typedef struct { + short unsigned int newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; + +typedef struct { + short int ncount[256]; + FSE_DTable dtable[0]; +} FSE_DecompressWksp; + +struct pci_bus_resource { + struct list_head list; + struct resource *res; +}; + +struct earlycon_device { + struct console *con; + long: 32; + struct uart_port port; + char options[32]; + unsigned int baud; + long: 32; +}; + +struct earlycon_id { + char name[15]; + char name_term; + char compatible[128]; + int (*setup)(struct earlycon_device *, const char *); +}; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *); + long: 32; +}; + +struct trace_event_raw_devres { + struct trace_entry ent; + u32 __data_loc_devname; + struct device *dev; + const char *op; + void *node; + u32 __data_loc_name; + size_t size; + char __data[0]; +}; + +struct trace_event_data_offsets_devres { + u32 devname; + const void *devname_ptr_; + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_devres_log)(void *, struct device *, const char *, void *, const char *, size_t); + +enum ata_lpm_hints { + ATA_LPM_EMPTY = 1, + ATA_LPM_HIPM = 2, + ATA_LPM_WAKE_ONLY = 4, +}; + +enum { + ATA_EH_SPDN_NCQ_OFF = 1, + ATA_EH_SPDN_SPEED_DOWN = 2, + ATA_EH_SPDN_FALLBACK_TO_PIO = 4, + ATA_EH_SPDN_KEEP_ERRORS = 8, + ATA_EFLAG_IS_IO = 1, + ATA_EFLAG_DUBIOUS_XFER = 2, + ATA_EFLAG_OLD_ER = -2147483648, + ATA_ECAT_NONE = 0, + ATA_ECAT_ATA_BUS = 1, + ATA_ECAT_TOUT_HSM = 2, + ATA_ECAT_UNK_DEV = 3, + ATA_ECAT_DUBIOUS_NONE = 4, + ATA_ECAT_DUBIOUS_ATA_BUS = 5, + ATA_ECAT_DUBIOUS_TOUT_HSM = 6, + ATA_ECAT_DUBIOUS_UNK_DEV = 7, + ATA_ECAT_NR = 8, + ATA_EH_CMD_DFL_TIMEOUT = 5000, + ATA_EH_RESET_COOL_DOWN = 5000, + ATA_EH_PRERESET_TIMEOUT = 10000, + ATA_EH_FASTDRAIN_INTERVAL = 3000, + ATA_EH_UA_TRIES = 5, + ATA_EH_PROBE_TRIAL_INTERVAL = 60000, + ATA_EH_PROBE_TRIALS = 2, +}; + +struct ata_eh_cmd_timeout_ent { + const u8 *commands; + const unsigned int *timeouts; +}; + +struct speed_down_verdict_arg { + u64 since; + int xfer_ok; + int nr_errors[8]; + long: 32; +}; + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; +} __attribute__((packed)); + +enum iwl_tof_algo_type { + IWL_TOF_ALGO_TYPE_MAX_LIKE = 0, + IWL_TOF_ALGO_TYPE_LINEAR_REG = 1, + IWL_TOF_ALGO_TYPE_FFT = 2, + IWL_TOF_ALGO_TYPE_INVALID = 3, +}; + +enum iwl_tof_location_query { + IWL_TOF_LOC_LCI = 1, + IWL_TOF_LOC_CIVIC = 2, +}; + +struct iwl_tof_range_req_ap_entry_v2 { + u8 channel_num; + u8 bandwidth; + u8 tsf_delta_direction; + u8 ctrl_ch_position; + u8 bssid[6]; + u8 measure_type; + u8 num_of_bursts; + __le16 burst_period; + u8 samples_per_burst; + u8 retries_per_sample; + __le32 tsf_delta; + u8 location_req; + u8 asap_mode; + u8 enable_dyn_ack; + s8 rssi; + u8 algo_type; + u8 notify_mcsi; + __le16 reserved; +}; + +enum iwl_initiator_ap_flags { + IWL_INITIATOR_AP_FLAGS_ASAP = 2, + IWL_INITIATOR_AP_FLAGS_LCI_REQUEST = 4, + IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST = 8, + IWL_INITIATOR_AP_FLAGS_DYN_ACK = 16, + IWL_INITIATOR_AP_FLAGS_ALGO_LR = 32, + IWL_INITIATOR_AP_FLAGS_ALGO_FFT = 64, + IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = 256, + IWL_INITIATOR_AP_FLAGS_NON_TB = 512, + IWL_INITIATOR_AP_FLAGS_TB = 1024, + IWL_INITIATOR_AP_FLAGS_SECURED = 2048, + IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = 4096, + IWL_INITIATOR_AP_FLAGS_USE_CALIB = 8192, + IWL_INITIATOR_AP_FLAGS_PMF = 16384, + IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = 32768, + IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = 65536, + IWL_INITIATOR_AP_FLAGS_TEST_BAD_SLTF = 131072, +}; + +struct iwl_tof_range_req_ap_entry_v3 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 bandwidth; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[6]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + __le16 reserved; + __le32 tsf_delta; +}; + +struct iwl_tof_range_req_ap_entry_v4 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[6]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + __le16 reserved; + u8 hltk[32]; + u8 tk[32]; +}; + +struct iwl_tof_range_req_ap_entry_v6 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[6]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[32]; + u8 tk[32]; + __le16 calib[5]; + __le16 beacon_interval; +}; + +struct iwl_tof_range_req_ap_entry_v7 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[6]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[32]; + u8 tk[32]; + __le16 calib[5]; + __le16 beacon_interval; + u8 rx_pn[6]; + u8 tx_pn[6]; +}; + +struct iwl_tof_range_req_ap_entry_v8 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[6]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[32]; + u8 tk[32]; + __le16 calib[5]; + __le16 beacon_interval; + u8 rx_pn[6]; + u8 tx_pn[6]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + u8 r2i_max_total_ltf; + u8 i2r_max_total_ltf; +}; + +struct iwl_tof_range_req_ap_entry_v9 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[6]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[32]; + u8 tk[32]; + __le16 calib[5]; + u16 beacon_interval; + u8 rx_pn[6]; + u8 tx_pn[6]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + u8 r2i_max_total_ltf; + u8 i2r_max_total_ltf; + u8 bss_color; + u8 band; + __le16 min_time_between_msr; +}; + +struct iwl_tof_range_req_ap_entry_v10 { + __le32 initiator_ap_flags; + u8 band; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 bssid[6]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[32]; + u8 tk[32]; + __le16 calib[5]; + __le16 beacon_interval; + u8 rx_pn[6]; + u8 tx_pn[6]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; +}; + +enum iwl_tof_initiator_flags { + IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = 1, + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = 2, + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = 4, + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = 8, + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = 16, + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = 32, + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = 64, + IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM = 128, + IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB = 32768, + IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB = 65536, + IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = 1048576, +}; + +struct iwl_tof_range_req_cmd_v5 { + __le32 initiator_flags; + u8 request_id; + u8 initiator; + u8 one_sided_los_disable; + u8 req_timeout; + u8 report_policy; + u8 reserved0; + u8 num_of_ap; + u8 macaddr_random; + u8 range_req_bssid[6]; + u8 macaddr_template[6]; + u8 macaddr_mask[6]; + u8 ftm_rx_chains; + u8 ftm_tx_chains; + __le16 common_calib; + __le16 specific_calib; + struct iwl_tof_range_req_ap_entry_v2 ap[5]; +}; + +struct iwl_tof_range_req_cmd_v7 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[6]; + u8 macaddr_mask[6]; + u8 macaddr_template[6]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + __le16 common_calib; + __le16 specific_calib; + struct iwl_tof_range_req_ap_entry_v3 ap[5]; +}; + +struct iwl_tof_range_req_cmd_v8 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[6]; + u8 macaddr_mask[6]; + u8 macaddr_template[6]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + __le16 common_calib; + __le16 specific_calib; + struct iwl_tof_range_req_ap_entry_v4 ap[5]; +}; + +struct iwl_tof_range_req_cmd_v9 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[6]; + u8 macaddr_mask[6]; + u8 macaddr_template[6]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v6 ap[5]; +}; + +struct iwl_tof_range_req_cmd_v11 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[6]; + u8 macaddr_mask[6]; + u8 macaddr_template[6]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v7 ap[5]; +}; + +struct iwl_tof_range_req_cmd_v12 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[6]; + u8 macaddr_mask[6]; + u8 macaddr_template[6]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v8 ap[5]; +}; + +struct iwl_tof_range_req_cmd_v13 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[6]; + u8 macaddr_mask[6]; + u8 macaddr_template[6]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v9 ap[5]; +}; + +struct iwl_tof_range_req_cmd_v14 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[6]; + u8 macaddr_mask[6]; + u8 macaddr_template[6]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v10 ap[5]; +}; + +enum iwl_tof_range_request_status { + IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS = 0, + IWL_TOF_RANGE_REQUEST_STATUS_BUSY = 1, +}; + +enum iwl_tof_entry_status { + IWL_TOF_ENTRY_SUCCESS = 0, + IWL_TOF_ENTRY_GENERAL_FAILURE = 1, + IWL_TOF_ENTRY_NO_RESPONSE = 2, + IWL_TOF_ENTRY_REQUEST_REJECTED = 3, + IWL_TOF_ENTRY_NOT_SCHEDULED = 4, + IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5, + IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6, + IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7, + IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8, + IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9, + IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10, + IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11, + IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12, + IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13, + IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14, + IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15, +}; + +struct iwl_tof_range_rsp_ap_entry_ntfy_v3 { + u8 bssid[6]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 reserved; + u8 refusal_period; + __le32 range; + __le32 range_variance; + __le32 timestamp; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; +}; + +struct iwl_tof_range_rsp_ap_entry_ntfy_v4 { + u8 bssid[6]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 last_burst; + u8 refusal_period; + __le32 timestamp; + __le32 start_tsf; + __le32 rx_rate_n_flags; + __le32 tx_rate_n_flags; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; +}; + +struct iwl_tof_range_rsp_ap_entry_ntfy_v5 { + u8 bssid[6]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 last_burst; + u8 refusal_period; + __le32 timestamp; + __le32 start_tsf; + __le32 rx_rate_n_flags; + __le32 tx_rate_n_flags; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; + u8 rttConfidence; + u8 reserved[3]; +}; + +struct iwl_tof_range_rsp_ap_entry_ntfy_v6 { + u8 bssid[6]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 last_burst; + u8 refusal_period; + __le32 timestamp; + __le32 start_tsf; + __le32 rx_rate_n_flags; + __le32 tx_rate_n_flags; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; + u8 rttConfidence; + u8 reserved[3]; + u8 rx_pn[6]; + u8 tx_pn[6]; +}; + +struct iwl_tof_range_rsp_ntfy_v5 { + u8 request_id; + u8 request_status; + u8 last_in_batch; + u8 num_of_aps; + struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[5]; +}; + +struct iwl_tof_range_rsp_ntfy_v6 { + u8 request_id; + u8 num_of_aps; + u8 last_report; + u8 reserved; + struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[5]; +}; + +struct iwl_tof_range_rsp_ntfy_v7 { + u8 request_id; + u8 num_of_aps; + u8 last_report; + u8 reserved; + struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[5]; +}; + +struct iwl_tof_range_rsp_ntfy_v8 { + u8 request_id; + u8 num_of_aps; + u8 last_report; + u8 reserved; + struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[5]; +}; + +struct iwl_tof_range_abort_cmd { + u8 request_id; + u8 reserved[3]; +}; + +struct iwl_mvm_loc_entry { + struct list_head list; + u8 addr[6]; + u8 lci_len; + u8 civic_len; + u8 buf[0]; +}; + +struct iwl_mvm_smooth_entry { + struct list_head list; + u8 addr[6]; + s64 rtt_avg; + u64 host_time; +}; + +enum iwl_mvm_pasn_flags { + IWL_MVM_PASN_FLAG_HAS_HLTK = 1, +}; + +struct iwl_mvm_ftm_pasn_entry { + struct list_head list; + u8 addr[6]; + u8 hltk[32]; + u8 tk[32]; + u8 cipher; + u8 tx_pn[6]; + u8 rx_pn[6]; + u32 flags; +}; + +struct iwl_mvm_ftm_iter_data { + u8 *cipher; + u8 *bssid; + u8 *tk; +}; + +struct rtw_vif_recalc_lps_iter_data { + struct rtw_dev *rtwdev; + struct ieee80211_vif *found_vif; + int count; +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = -1, + PM_QOS_FLAGS_NONE = 0, + PM_QOS_FLAGS_SOME = 1, + PM_QOS_FLAGS_ALL = 2, +}; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +enum nec_state { + STATE_INACTIVE___8 = 0, + STATE_HEADER_SPACE___4 = 1, + STATE_BIT_PULSE___4 = 2, + STATE_BIT_SPACE___4 = 3, + STATE_TRAILER_PULSE___3 = 4, + STATE_TRAILER_SPACE___3 = 5, +}; + +struct mdp_device_descriptor_s { + __u32 number; + __u32 major; + __u32 minor; + __u32 raid_disk; + __u32 state; + __u32 reserved[27]; +}; + +typedef struct mdp_device_descriptor_s mdp_disk_t; + +struct mdp_superblock_s { + __u32 md_magic; + __u32 major_version; + __u32 minor_version; + __u32 patch_version; + __u32 gvalid_words; + __u32 set_uuid0; + __u32 ctime; + __u32 level; + __u32 size; + __u32 nr_disks; + __u32 raid_disks; + __u32 md_minor; + __u32 not_persistent; + __u32 set_uuid1; + __u32 set_uuid2; + __u32 set_uuid3; + __u32 gstate_creserved[16]; + __u32 utime; + __u32 state; + __u32 active_disks; + __u32 working_disks; + __u32 failed_disks; + __u32 spare_disks; + __u32 sb_csum; + __u32 events_lo; + __u32 events_hi; + __u32 cp_events_lo; + __u32 cp_events_hi; + __u32 recovery_cp; + __u64 reshape_position; + __u32 new_level; + __u32 delta_disks; + __u32 new_layout; + __u32 new_chunk; + __u32 gstate_sreserved[14]; + __u32 layout; + __u32 chunk_size; + __u32 root_pv; + __u32 root_block; + __u32 pstate_reserved[60]; + mdp_disk_t disks[27]; + __u32 reserved[0]; + mdp_disk_t this_disk; +}; + +typedef struct mdp_superblock_s mdp_super_t; + +struct mdu_version_s { + int major; + int minor; + int patchlevel; +}; + +typedef struct mdu_version_s mdu_version_t; + +typedef struct mdu_array_info_s mdu_array_info_t; + +typedef struct mdu_disk_info_s mdu_disk_info_t; + +struct mdu_bitmap_file_s { + char pathname[4096]; +}; + +typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; + +enum { + MD_RESYNC_NONE = 0, + MD_RESYNC_YIELDED = 1, + MD_RESYNC_DELAYED = 2, + MD_RESYNC_ACTIVE = 3, +}; + +enum md_ro_state { + MD_RDWR = 0, + MD_RDONLY = 1, + MD_AUTO_READ = 2, + MD_MAX_STATE = 3, +}; + +struct md_io_clone { + struct mddev *mddev; + struct bio *orig_bio; + long unsigned int start_time; + long: 32; + struct bio bio_clone; +}; + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(struct md_rdev *, struct md_rdev *, int); + int (*validate_super)(struct mddev *, struct md_rdev *, struct md_rdev *); + void (*sync_super)(struct mddev *, struct md_rdev *); + long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t); + int (*allow_new_offset)(struct md_rdev *, long long unsigned int); +}; + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct md_rdev *, char *); + ssize_t (*store)(struct md_rdev *, const char *, size_t); +}; + +enum array_state { + clear = 0, + inactive = 1, + suspended = 2, + readonly = 3, + read_auto = 4, + clean = 5, + active = 6, + write_pending = 7, + active_idle = 8, + broken = 9, + bad_word = 10, +}; + +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +struct dmabuf_token { + __u32 token_start; + __u32 token_count; +}; + +struct linger { + int l_onoff; + int l_linger; +}; + +struct so_timestamping { + int flags; + int bind_phc; +}; + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = 1, + SOF_TXTIME_REPORT_ERRORS = 2, + SOF_TXTIME_FLAGS_LAST = 2, + SOF_TXTIME_FLAGS_MASK = 3, +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC = 0, + ETHTOOL_A_BITSET_BIT_INDEX = 1, + ETHTOOL_A_BITSET_BIT_NAME = 2, + ETHTOOL_A_BITSET_BIT_VALUE = 3, + __ETHTOOL_A_BITSET_BIT_CNT = 4, + ETHTOOL_A_BITSET_BIT_MAX = 3, +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC = 0, + ETHTOOL_A_BITSET_BITS_BIT = 1, + __ETHTOOL_A_BITSET_BITS_CNT = 2, + ETHTOOL_A_BITSET_BITS_MAX = 1, +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC = 0, + ETHTOOL_A_BITSET_NOMASK = 1, + ETHTOOL_A_BITSET_SIZE = 2, + ETHTOOL_A_BITSET_BITS = 3, + ETHTOOL_A_BITSET_VALUE = 4, + ETHTOOL_A_BITSET_MASK = 5, + __ETHTOOL_A_BITSET_CNT = 6, + ETHTOOL_A_BITSET_MAX = 5, +}; + +enum ethtool_reset_flags { + ETH_RESET_MGMT = 1, + ETH_RESET_IRQ = 2, + ETH_RESET_DMA = 4, + ETH_RESET_FILTER = 8, + ETH_RESET_OFFLOAD = 16, + ETH_RESET_MAC = 32, + ETH_RESET_PHY = 64, + ETH_RESET_RAM = 128, + ETH_RESET_AP = 256, + ETH_RESET_DEDICATED = 65535, + ETH_RESET_ALL = 4294967295, +}; + +struct cmis_fw_update_fw_mng_features { + u8 start_cmd_payload_size; + u8 write_mechanism; + u16 max_duration_start; + u16 max_duration_write; + u16 max_duration_complete; +}; + +struct cmis_cdb_fw_mng_features_rpl { + u8 resv1; + u8 resv2; + u8 start_cmd_payload_size; + u8 resv3; + u8 read_write_len_ext; + u8 write_mechanism; + u8 resv4; + u8 resv5; + __be16 max_duration_start; + __be16 resv6; + __be16 max_duration_write; + __be16 max_duration_complete; + __be16 resv7; +}; + +enum cmis_cdb_fw_write_mechanism { + CMIS_CDB_FW_WRITE_MECHANISM_NONE = 0, + CMIS_CDB_FW_WRITE_MECHANISM_LPL = 1, + CMIS_CDB_FW_WRITE_MECHANISM_EPL = 16, + CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 17, +}; + +struct cmis_cdb_start_fw_download_pl_h { + __be32 image_size; + __be32 resv1; +}; + +struct cmis_cdb_start_fw_download_pl { + union { + struct { + __be32 image_size; + __be32 resv1; + }; + struct cmis_cdb_start_fw_download_pl_h head; + }; + u8 vendor_data[112]; +}; + +struct cmis_cdb_write_fw_block_lpl_pl { + __be32 block_address; + u8 fw_block[116]; +}; + +struct cmis_cdb_write_fw_block_epl_pl { + u8 fw_block[2048]; +}; + +enum { + CMIS_MODULE_LOW_PWR = 1, + CMIS_MODULE_READY = 3, +}; + +struct cmis_cdb_run_fw_image_pl { + u8 resv1; + u8 image_to_run; + u16 delay_to_reset; +}; + +struct xt_ecn_info { + __u8 operation; + __u8 invert; + __u8 ip_ect; + union { + struct { + __u8 ect; + } tcp; + } proto; +}; + +struct ip6t_ip6 { + struct in6_addr src; + struct in6_addr dst; + struct in6_addr smsk; + struct in6_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 tos; + __u8 flags; + __u8 invflags; +}; + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + short unsigned int nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + short unsigned int nduseropt_pad2; + unsigned int nduseropt_pad3; +}; + +enum { + NDUSEROPT_UNSPEC = 0, + NDUSEROPT_SRCADDR = 1, + __NDUSEROPT_MAX = 2, +}; + +struct rs_msg { + struct icmp6hdr icmph; + __u8 opt[0]; +}; + +struct ra_msg { + struct icmp6hdr icmph; + __be32 reachable_time; + __be32 retrans_timer; +}; + +enum ieee80211_vht_opmode_bits { + IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3, + IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 4, + IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 112, + IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4, + IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 128, +}; + +struct ieee80211_ext { + __le16 frame_control; + __le16 duration; + union { + struct { + u8 sa[6]; + __le32 timestamp; + u8 change_seq; + u8 variable[0]; + } __attribute__((packed)) s1g_beacon; + struct { + u8 sa[6]; + __le32 timestamp; + u8 change_seq; + u8 next_tbtt[3]; + u8 variable[0]; + } __attribute__((packed)) s1g_short_beacon; + } u; +}; + +struct ieee80211_pspoll { + __le16 frame_control; + __le16 aid; + u8 bssid[6]; + u8 ta[6]; +}; + +enum ieee80211_p2p_attr_id { + IEEE80211_P2P_ATTR_STATUS = 0, + IEEE80211_P2P_ATTR_MINOR_REASON = 1, + IEEE80211_P2P_ATTR_CAPABILITY = 2, + IEEE80211_P2P_ATTR_DEVICE_ID = 3, + IEEE80211_P2P_ATTR_GO_INTENT = 4, + IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT = 5, + IEEE80211_P2P_ATTR_LISTEN_CHANNEL = 6, + IEEE80211_P2P_ATTR_GROUP_BSSID = 7, + IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING = 8, + IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR = 9, + IEEE80211_P2P_ATTR_MANAGABILITY = 10, + IEEE80211_P2P_ATTR_CHANNEL_LIST = 11, + IEEE80211_P2P_ATTR_ABSENCE_NOTICE = 12, + IEEE80211_P2P_ATTR_DEVICE_INFO = 13, + IEEE80211_P2P_ATTR_GROUP_INFO = 14, + IEEE80211_P2P_ATTR_GROUP_ID = 15, + IEEE80211_P2P_ATTR_INTERFACE = 16, + IEEE80211_P2P_ATTR_OPER_CHANNEL = 17, + IEEE80211_P2P_ATTR_INVITE_FLAGS = 18, + IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221, + IEEE80211_P2P_ATTR_MAX = 222, +}; + +struct ieee80211_he_6ghz_oper { + u8 primary; + u8 control; + u8 ccfs0; + u8 ccfs1; + u8 minrate; +}; + +struct ieee80211_country_ie_triplet { + union { + struct { + u8 first_channel; + u8 num_channels; + s8 max_power; + } chans; + struct { + u8 reg_extension_id; + u8 reg_class; + u8 coverage_class; + } ext; + }; +}; + +enum ieee80211_timeout_interval_type { + WLAN_TIMEOUT_REASSOC_DEADLINE = 1, + WLAN_TIMEOUT_KEY_LIFETIME = 2, + WLAN_TIMEOUT_ASSOC_COMEBACK = 3, +}; + +enum ieee80211_idle_options { + WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = 1, +}; + +struct ieee80211_neighbor_ap_info { + u8 tbtt_info_hdr; + u8 tbtt_info_len; + u8 op_class; + u8 channel; +}; + +struct ieee80211_rnr_mld_params { + u8 mld_id; + __le16 params; +} __attribute__((packed)); + +struct ieee80211_tbtt_info_ge_11 { + u8 tbtt_offset; + u8 bssid[6]; + __le32 short_ssid; + u8 bss_params; + s8 psd_20; + struct ieee80211_rnr_mld_params mld_params; +} __attribute__((packed)); + +enum ieee80211_mle_subelems { + IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0, + IEEE80211_MLE_SUBELEM_FRAGMENT = 254, +}; + +enum cfg80211_rnr_iter_ret { + RNR_ITER_CONTINUE = 0, + RNR_ITER_BREAK = 1, + RNR_ITER_ERROR = 2, +}; + +struct ieee80211_csa_ie { + struct ieee80211_chan_req chanreq; + u8 mode; + u8 count; + u8 ttl; + u16 pre_value; + u16 reason_code; + u32 max_switch_time; +}; + +enum ieee80211_elems_parse_error { + IEEE80211_PARSE_ERR_INVALID_END = 1, + IEEE80211_PARSE_ERR_DUP_ELEM = 2, + IEEE80211_PARSE_ERR_BAD_ELEM_SIZE = 4, + IEEE80211_PARSE_ERR_UNEXPECTED_ELEM = 8, + IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC = 16, +}; + +struct sta_csa_rnr_iter_data { + struct ieee80211_link_data *link; + struct ieee80211_channel *chan; + u8 mld_id; +}; + +enum ieee80211_csa_source { + IEEE80211_CSA_SOURCE_BEACON = 0, + IEEE80211_CSA_SOURCE_OTHER_LINK = 1, + IEEE80211_CSA_SOURCE_PROT_ACTION = 2, + IEEE80211_CSA_SOURCE_UNPROT_ACTION = 3, +}; + +struct sta_bss_param_ch_cnt_data { + struct ieee80211_sub_if_data *sdata; + u8 reporting_link_id; + u8 mld_id; +}; + +enum assoc_status { + ASSOC_SUCCESS = 0, + ASSOC_REJECTED = 1, + ASSOC_TIMEOUT = 2, + ASSOC_ABANDON = 3, +}; + +struct trace_event_raw_dma_map { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 phys_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_unmap { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_alloc_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + gfp_t flags; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_alloc_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + gfp_t flags; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_free_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_free_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg_err { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + int err; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_unmap_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_addrs; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_single { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_map { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_free_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_free_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg_err { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap_sg { + u32 device; + const void *device_ptr_; + u32 addrs; + const void *addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_single { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_sg { + u32 device; + const void *device_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +typedef void (*btf_trace_dma_map_page)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_resource)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_page)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_resource)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_alloc)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt_err)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_free)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_map_sg)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_sg_err)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_sg)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_sync_single_for_cpu)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_single_for_device)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_cpu)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_device)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + long unsigned int attrs; +}; + +struct futex_waitv { + __u64 val; + __u64 uaddr; + __u32 flags; + __u32 __reserved; +}; + +struct futex_vector { + struct futex_waitv w; + struct futex_q q; +}; + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID = 1, + TASKSTATS_TYPE_TGID = 2, + TASKSTATS_TYPE_STATS = 3, + TASKSTATS_TYPE_AGGR_PID = 4, + TASKSTATS_TYPE_AGGR_TGID = 5, + TASKSTATS_TYPE_NULL = 6, + __TASKSTATS_TYPE_MAX = 7, +}; + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID = 1, + TASKSTATS_CMD_ATTR_TGID = 2, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, + __TASKSTATS_CMD_ATTR_MAX = 5, +}; + +enum { + CGROUPSTATS_CMD_UNSPEC = 3, + CGROUPSTATS_CMD_GET = 4, + CGROUPSTATS_CMD_NEW = 5, + __CGROUPSTATS_CMD_MAX = 6, +}; + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS = 1, + __CGROUPSTATS_TYPE_MAX = 2, +}; + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD = 1, + __CGROUPSTATS_CMD_ATTR_MAX = 2, +}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; + +enum actions { + REGISTER = 0, + DEREGISTER = 1, + CPU_DONT_CARE = 2, +}; + +struct bpf_bloom_filter { + struct bpf_map map; + u32 bitset_mask; + u32 hash_seed; + u32 nr_hash_funcs; + long unsigned int bitset[0]; + long: 32; +}; + +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS = 0, + BTF_FIELD_ITER_STRS = 1, +}; + +struct btf_field_desc { + int t_off_cnt; + int t_offs[2]; + int m_sz; + int m_off_cnt; + int m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + +struct btf_relocate { + struct btf *btf; + const struct btf *base_btf; + const struct btf *dist_base_btf; + unsigned int nr_base_types; + unsigned int nr_split_types; + unsigned int nr_dist_base_types; + int dist_str_len; + int base_str_len; + __u32 *id_map; + __u32 *str_map; +}; + +struct btf_name_info { + const char *name; + bool needs_size: 1; + unsigned int size: 31; + __u32 id; +}; + +struct bpf_iter_kmem_cache { + __u64 __opaque[1]; +}; + +struct bpf_iter_kmem_cache_kern { + struct kmem_cache *pos; + long: 32; +}; + +struct bpf_iter__kmem_cache { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kmem_cache *s; + }; +}; + +union kmem_cache_iter_priv { + struct bpf_iter_kmem_cache it; + struct bpf_iter_kmem_cache_kern kit; +}; + +enum lruvec_flags { + LRUVEC_CGROUP_CONGESTED = 0, + LRUVEC_NODE_CONGESTED = 1, +}; + +enum pgdat_flags { + PGDAT_DIRTY = 0, + PGDAT_WRITEBACK = 1, + PGDAT_RECLAIM_LOCKED = 2, +}; + +struct reclaim_stat { + unsigned int nr_dirty; + unsigned int nr_unqueued_dirty; + unsigned int nr_congested; + unsigned int nr_writeback; + unsigned int nr_immediate; + unsigned int nr_pageout; + unsigned int nr_activate[2]; + unsigned int nr_ref_keep; + unsigned int nr_unmap_fail; + unsigned int nr_lazyfree_fail; + unsigned int nr_demoted; +}; + +struct trace_event_raw_mm_vmscan_kswapd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_kswapd_wake { + struct trace_entry ent; + int nid; + int zid; + int order; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_wakeup_kswapd { + struct trace_entry ent; + int nid; + int zid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { + struct trace_entry ent; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { + struct trace_entry ent; + long unsigned int nr_reclaimed; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_start { + struct trace_entry ent; + struct shrinker *shr; + void *shrink; + int nid; + long int nr_objects_to_shrink; + long unsigned int gfp_flags; + long unsigned int cache_items; + long long unsigned int delta; + long unsigned int total_scan; + int priority; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_end { + struct trace_entry ent; + struct shrinker *shr; + int nid; + void *shrink; + long int unused_scan; + long int new_scan; + int retval; + long int total_scan; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_isolate { + struct trace_entry ent; + int highest_zoneidx; + int order; + long unsigned int nr_requested; + long unsigned int nr_scanned; + long unsigned int nr_skipped; + long unsigned int nr_taken; + int lru; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_write_folio { + struct trace_entry ent; + long unsigned int pfn; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_reclaim_pages { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_inactive { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_active { + struct trace_entry ent; + int nid; + long unsigned int nr_taken; + long unsigned int nr_active; + long unsigned int nr_deactivated; + long unsigned int nr_referenced; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_node_reclaim_begin { + struct trace_entry ent; + int nid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_throttled { + struct trace_entry ent; + int nid; + int usec_timeout; + int usec_delayed; + int reason; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; + +struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; + +struct trace_event_data_offsets_mm_shrink_slab_start {}; + +struct trace_event_data_offsets_mm_shrink_slab_end {}; + +struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; + +struct trace_event_data_offsets_mm_vmscan_write_folio {}; + +struct trace_event_data_offsets_mm_vmscan_reclaim_pages {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; + +struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; + +struct trace_event_data_offsets_mm_vmscan_throttled {}; + +typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); + +typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); + +typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int); + +typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_vmscan_write_folio)(void *, struct folio *); + +typedef void (*btf_trace_mm_vmscan_reclaim_pages)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_throttled)(void *, int, int, int, int); + +struct scan_control { + long unsigned int nr_to_reclaim; + nodemask_t *nodemask; + struct mem_cgroup *target_mem_cgroup; + long unsigned int anon_cost; + long unsigned int file_cost; + int *proactive_swappiness; + unsigned int may_deactivate: 2; + unsigned int force_deactivate: 1; + unsigned int skipped_deactivate: 1; + unsigned int may_writepage: 1; + unsigned int may_unmap: 1; + unsigned int may_swap: 1; + unsigned int no_cache_trim_mode: 1; + unsigned int cache_trim_mode_failed: 1; + unsigned int proactive: 1; + unsigned int memcg_low_reclaim: 1; + unsigned int memcg_low_skipped: 1; + unsigned int memcg_full_walk: 1; + unsigned int hibernation_mode: 1; + unsigned int compaction_ready: 1; + unsigned int cache_trim_mode: 1; + unsigned int file_is_tiny: 1; + unsigned int no_demotion: 1; + s8 order; + s8 priority; + s8 reclaim_idx; + gfp_t gfp_mask; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + struct { + unsigned int dirty; + unsigned int unqueued_dirty; + unsigned int congested; + unsigned int writeback; + unsigned int immediate; + unsigned int file_taken; + unsigned int taken; + } nr; + struct reclaim_state reclaim_state; +}; + +typedef enum { + PAGE_KEEP = 0, + PAGE_ACTIVATE = 1, + PAGE_SUCCESS = 2, + PAGE_CLEAN = 3, +} pageout_t; + +enum folio_references { + FOLIOREF_RECLAIM = 0, + FOLIOREF_RECLAIM_CLEAN = 1, + FOLIOREF_KEEP = 2, + FOLIOREF_ACTIVATE = 3, +}; + +enum scan_balance { + SCAN_EQUAL = 0, + SCAN_FRACT = 1, + SCAN_ANON = 2, + SCAN_FILE = 3, +}; + +struct qlist_head { + struct qlist_node *head; + struct qlist_node *tail; + size_t bytes; + bool offline; +}; + +struct cpu_shrink_qlist { + raw_spinlock_t lock; + struct qlist_head qlist; +}; + +struct proc_fs_opts { + int flag; + const char *str; +}; + +enum handle_to_path_flags { + HANDLE_CHECK_PERMS = 1, + HANDLE_CHECK_SUBTREE = 2, +}; + +struct handle_to_path_ctx { + struct path root; + enum handle_to_path_flags flags; + unsigned int fh_flags; +}; + +struct mmp_struct { + __le32 mmp_magic; + __le32 mmp_seq; + __le64 mmp_time; + char mmp_nodename[64]; + char mmp_bdevname[32]; + __le16 mmp_check_interval; + __le16 mmp_pad1; + __le32 mmp_pad2[226]; + __le32 mmp_checksum; +}; + +struct jbd2_revoke_table_s { + int hash_size; + int hash_shift; + struct list_head *hash_table; +}; + +struct jbd2_revoke_record_s { + struct list_head hash; + tid_t sequence; + long: 32; + long long unsigned int blocknr; +}; + +struct fuse_attr { + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint32_t rdev; + uint32_t blksize; + uint32_t flags; +}; + +struct fuse_sx_time { + int64_t tv_sec; + uint32_t tv_nsec; + int32_t __reserved; +}; + +struct fuse_statx { + uint32_t mask; + uint32_t blksize; + uint64_t attributes; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint16_t mode; + uint16_t __spare0[1]; + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t attributes_mask; + struct fuse_sx_time atime; + struct fuse_sx_time btime; + struct fuse_sx_time ctime; + struct fuse_sx_time mtime; + uint32_t rdev_major; + uint32_t rdev_minor; + uint32_t dev_major; + uint32_t dev_minor; + uint64_t __spare2[14]; +}; + +enum fuse_ext_type { + FUSE_MAX_NR_SECCTX = 31, + FUSE_EXT_GROUPS = 32, +}; + +struct fuse_entry_out { + uint64_t nodeid; + uint64_t generation; + uint64_t entry_valid; + uint64_t attr_valid; + uint32_t entry_valid_nsec; + uint32_t attr_valid_nsec; + struct fuse_attr attr; +}; + +struct fuse_getattr_in { + uint32_t getattr_flags; + uint32_t dummy; + uint64_t fh; +}; + +struct fuse_attr_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t dummy; + struct fuse_attr attr; +}; + +struct fuse_statx_in { + uint32_t getattr_flags; + uint32_t reserved; + uint64_t fh; + uint32_t sx_flags; + uint32_t sx_mask; +}; + +struct fuse_statx_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t flags; + uint64_t spare[2]; + struct fuse_statx stat; +}; + +struct fuse_mknod_in { + uint32_t mode; + uint32_t rdev; + uint32_t umask; + uint32_t padding; +}; + +struct fuse_mkdir_in { + uint32_t mode; + uint32_t umask; +}; + +struct fuse_rename2_in { + uint64_t newdir; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_link_in { + uint64_t oldnodeid; +}; + +struct fuse_setattr_in { + uint32_t valid; + uint32_t padding; + uint64_t fh; + uint64_t size; + uint64_t lock_owner; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t unused4; + uint32_t uid; + uint32_t gid; + uint32_t unused5; +}; + +struct fuse_create_in { + uint32_t flags; + uint32_t mode; + uint32_t umask; + uint32_t open_flags; +}; + +struct fuse_access_in { + uint32_t mask; + uint32_t padding; +}; + +struct fuse_secctx { + uint32_t size; + uint32_t padding; +}; + +struct fuse_secctx_header { + uint32_t size; + uint32_t nr_secctx; +}; + +struct fuse_ext_header { + uint32_t size; + uint32_t type; +}; + +struct fuse_supp_groups { + uint32_t nr_groups; + uint32_t groups[0]; +}; + +union fuse_dentry { + u64 time; + struct callback_head rcu; +}; + +struct workspace___3 { + void *mem; + void *buf; + void *cbuf; + struct list_head list; +}; + +struct tree_mod_root { + u64 logical; + u8 level; + long: 32; +}; + +struct tree_mod_elem { + struct rb_node node; + long: 32; + u64 logical; + u64 seq; + enum btrfs_mod_log_op op; + int slot; + u64 generation; + struct btrfs_disk_key key; + long: 32; + u64 blockptr; + struct { + int dst_slot; + int nr_items; + } move; + struct tree_mod_root old_root; +}; + +struct ipc_proc_iface { + const char *path; + const char *header; + int ids; + int (*show)(struct seq_file *, void *); +}; + +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct pid_namespace *pid_ns; + struct ipc_proc_iface *iface; +}; + +struct acomp_alg { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + int (*init)(struct crypto_acomp *); + void (*exit)(struct crypto_acomp *); + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct sg_io_v4 { + __s32 guard; + __u32 protocol; + __u32 subprotocol; + __u32 request_len; + __u64 request; + __u64 request_tag; + __u32 request_attr; + __u32 request_priority; + __u32 request_extra; + __u32 max_response_len; + __u64 response; + __u32 dout_iovec_count; + __u32 dout_xfer_len; + __u32 din_iovec_count; + __u32 din_xfer_len; + __u64 dout_xferp; + __u64 din_xferp; + __u32 timeout; + __u32 flags; + __u64 usr_ptr; + __u32 spare_in; + __u32 driver_status; + __u32 transport_status; + __u32 device_status; + __u32 retry_delay; + __u32 info; + __u32 duration; + __u32 response_len; + __s32 din_resid; + __s32 dout_resid; + __u64 generated_tag; + __u32 spare_out; + __u32 padding; +}; + +typedef int bsg_sg_io_fn(struct request_queue *, struct sg_io_v4 *, bool, unsigned int); + +struct bsg_device { + struct request_queue *queue; + long: 32; + struct device device; + struct cdev cdev; + int max_queue; + unsigned int timeout; + unsigned int reserved_size; + bsg_sg_io_fn *sg_io_fn; + long: 32; +}; + +struct io_uring_rsrc_register { + __u32 nr; + __u32 flags; + __u64 resv2; + __u64 data; + __u64 tags; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __u64 data; + __u64 tags; + __u32 nr; + __u32 resv2; +}; + +enum { + IORING_REGISTER_SRC_REGISTERED = 1, + IORING_REGISTER_DST_REPLACE = 2, +}; + +struct io_uring_clone_buffers { + __u32 src_fd; + __u32 flags; + __u32 src_off; + __u32 dst_off; + __u32 nr; + __u32 pad[3]; +}; + +struct io_imu_folio_data { + unsigned int nr_pages_head; + unsigned int nr_pages_mid; + unsigned int folio_shift; +}; + +struct io_rsrc_update { + struct file *file; + long: 32; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct io_futex { + struct file *file; + union { + u32 *uaddr; + struct futex_waitv *uwaitv; + }; + long unsigned int futex_val; + long unsigned int futex_mask; + long unsigned int futexv_owned; + u32 futex_flags; + unsigned int futex_nr; + bool futexv_unqueued; +}; + +struct io_futex_data { + struct futex_q q; + struct io_kiocb *req; + long: 32; +}; + +struct inflate_workspace { + struct inflate_state inflate_state; + unsigned char working_window[32768]; +}; + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +typedef enum { + ZSTD_e_continue = 0, + ZSTD_e_flush = 1, + ZSTD_e_end = 2, +} ZSTD_EndDirective; + +typedef struct { + long long unsigned int ingested; + long long unsigned int consumed; + long long unsigned int produced; + long long unsigned int flushed; + unsigned int currentJobID; + unsigned int nbActiveWorkers; +} ZSTD_frameProgression; + +typedef enum { + ZSTD_cpm_noAttachDict = 0, + ZSTD_cpm_attachDict = 1, + ZSTD_cpm_createCDict = 2, + ZSTD_cpm_unknown = 3, +} ZSTD_cParamMode_e; + +typedef enum { + ZSTDcrp_makeClean = 0, + ZSTDcrp_leaveDirty = 1, +} ZSTD_compResetPolicy_e; + +typedef enum { + ZSTDirp_continue = 0, + ZSTDirp_reset = 1, +} ZSTD_indexResetPolicy_e; + +typedef enum { + ZSTD_resetTarget_CDict = 0, + ZSTD_resetTarget_CCtx = 1, +} ZSTD_resetTarget_e; + +typedef struct { + U32 LLtype; + U32 Offtype; + U32 MLtype; + size_t size; + size_t lastCountSize; +} ZSTD_symbolEncodingTypeStats_t; + +enum { + ZSTDbss_compress = 0, + ZSTDbss_noCompress = 1, +}; + +typedef struct { + U32 *splitLocations; + size_t idx; +} seqStoreSplits; + +typedef struct { + U32 idx; + U32 posInSequence; + size_t posInSrc; +} ZSTD_sequencePosition; + +typedef size_t (*ZSTD_sequenceCopier)(ZSTD_CCtx *, ZSTD_sequencePosition *, const ZSTD_Sequence * const, size_t, const void *, size_t); + +struct n_tty_data { + size_t read_head; + size_t commit_head; + size_t canon_head; + size_t echo_head; + size_t echo_commit; + size_t echo_mark; + long unsigned int char_map[8]; + long unsigned int overrun_time; + unsigned int num_overrun; + bool no_room; + unsigned char lnext: 1; + unsigned char erasing: 1; + unsigned char raw: 1; + unsigned char real_raw: 1; + unsigned char icanon: 1; + unsigned char push: 1; + u8 read_buf[4096]; + long unsigned int read_flags[128]; + u8 echo_buf[4096]; + size_t read_tail; + size_t line_start; + size_t lookahead_count; + unsigned int column; + unsigned int canon_column; + size_t echo_tail; + struct mutex atomic_read_lock; + struct mutex output_lock; +}; + +enum { + ERASE = 0, + WERASE = 1, + KILL = 2, +}; + +struct pci1xxxx_8250 { + unsigned int nr; + u8 dev_rev; + u8 pad[3]; + void *membase; + int line[0]; +}; + +struct request_sense; + +struct cdrom_generic_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + union { + void *reserved[1]; + void *unused; + }; +}; + +struct request_sense { + __u8 error_code: 7; + __u8 valid: 1; + __u8 segment_number; + __u8 sense_key: 4; + __u8 reserved2: 1; + __u8 ili: 1; + __u8 reserved1: 2; + __u8 information[4]; + __u8 add_sense_len; + __u8 command_info[4]; + __u8 asc; + __u8 ascq; + __u8 fruc; + __u8 sks[3]; + __u8 asb[46]; +}; + +struct scsi_ioctl_command { + unsigned int inlen; + unsigned int outlen; + unsigned char data[0]; +}; + +struct scsi_idlun { + __u32 dev_id; + __u32 host_unique_id; +}; + +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct iwl_binding_cmd { + __le32 id_and_color; + __le32 action; + __le32 macs[3]; + __le32 phy; + __le32 lmac_id; +}; + +struct iwl_mvm_iface_iterator_data { + struct ieee80211_vif *ignore_vif; + int idx; + struct iwl_mvm_phy_ctxt *phyctxt; + u16 ids[3]; + u16 colors[3]; +}; + +struct find_interface_arg { + int minor; + struct device_driver *drv; +}; + +struct each_dev_arg { + void *data; + int (*fn)(struct usb_device *, void *); +}; + +enum xfer_buf_dir { + TO_XFER_BUF = 0, + FROM_XFER_BUF = 1, +}; + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +enum dmi_entry_type { + DMI_ENTRY_BIOS = 0, + DMI_ENTRY_SYSTEM = 1, + DMI_ENTRY_BASEBOARD = 2, + DMI_ENTRY_CHASSIS = 3, + DMI_ENTRY_PROCESSOR = 4, + DMI_ENTRY_MEM_CONTROLLER = 5, + DMI_ENTRY_MEM_MODULE = 6, + DMI_ENTRY_CACHE = 7, + DMI_ENTRY_PORT_CONNECTOR = 8, + DMI_ENTRY_SYSTEM_SLOT = 9, + DMI_ENTRY_ONBOARD_DEVICE = 10, + DMI_ENTRY_OEMSTRINGS = 11, + DMI_ENTRY_SYSCONF = 12, + DMI_ENTRY_BIOS_LANG = 13, + DMI_ENTRY_GROUP_ASSOC = 14, + DMI_ENTRY_SYSTEM_EVENT_LOG = 15, + DMI_ENTRY_PHYS_MEM_ARRAY = 16, + DMI_ENTRY_MEM_DEVICE = 17, + DMI_ENTRY_32_MEM_ERROR = 18, + DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19, + DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20, + DMI_ENTRY_BUILTIN_POINTING_DEV = 21, + DMI_ENTRY_PORTABLE_BATTERY = 22, + DMI_ENTRY_SYSTEM_RESET = 23, + DMI_ENTRY_HW_SECURITY = 24, + DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25, + DMI_ENTRY_VOLTAGE_PROBE = 26, + DMI_ENTRY_COOLING_DEV = 27, + DMI_ENTRY_TEMP_PROBE = 28, + DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29, + DMI_ENTRY_OOB_REMOTE_ACCESS = 30, + DMI_ENTRY_BIS_ENTRY = 31, + DMI_ENTRY_SYSTEM_BOOT = 32, + DMI_ENTRY_MGMT_DEV = 33, + DMI_ENTRY_MGMT_DEV_COMPONENT = 34, + DMI_ENTRY_MGMT_DEV_THRES = 35, + DMI_ENTRY_MEM_CHANNEL = 36, + DMI_ENTRY_IPMI_DEV = 37, + DMI_ENTRY_SYS_POWER_SUPPLY = 38, + DMI_ENTRY_ADDITIONAL = 39, + DMI_ENTRY_ONBOARD_DEV_EXT = 40, + DMI_ENTRY_MGMT_CONTROLLER_HOST = 41, + DMI_ENTRY_INACTIVE = 126, + DMI_ENTRY_END_OF_TABLE = 127, +}; + +struct dmi_header { + u8 type; + u8 length; + u16 handle; +}; + +struct dmi_memdev_info { + const char *device; + const char *bank; + u64 size; + u16 handle; + u8 type; + long: 32; +}; + +struct rmem_assigned_device { + struct device *dev; + struct reserved_mem *rmem; + struct list_head list; +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +enum tcp_fastopen_client_fail { + TFO_STATUS_UNSPEC = 0, + TFO_COOKIE_UNAVAILABLE = 1, + TFO_DATA_NOT_ACKED = 2, + TFO_SYN_RETRANSMITTED = 3, +}; + +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = 1, + CA_ACK_WIN_UPDATE = 2, + CA_ACK_ECE = 4, +}; + +struct tcp_sacktag_state { + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; + long: 32; +}; + +enum { + LWTUNNEL_XMIT_DONE = 0, + LWTUNNEL_XMIT_CONTINUE = 256, +}; + +struct sockaddr_pkt { + short unsigned int spkt_family; + unsigned char spkt_device[14]; + __be16 spkt_protocol; +}; + +struct sockaddr_ll { + short unsigned int sll_family; + __be16 sll_protocol; + int sll_ifindex; + short unsigned int sll_hatype; + unsigned char sll_pkttype; + unsigned char sll_halen; + unsigned char sll_addr[8]; +}; + +struct tpacket_stats { + unsigned int tp_packets; + unsigned int tp_drops; +}; + +struct tpacket_stats_v3 { + unsigned int tp_packets; + unsigned int tp_drops; + unsigned int tp_freeze_q_cnt; +}; + +struct tpacket_rollover_stats { + __u64 tp_all; + __u64 tp_huge; + __u64 tp_failed; +}; + +union tpacket_stats_u { + struct tpacket_stats stats1; + struct tpacket_stats_v3 stats3; +}; + +struct tpacket_auxdata { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; +}; + +struct tpacket_hdr { + long unsigned int tp_status; + unsigned int tp_len; + unsigned int tp_snaplen; + short unsigned int tp_mac; + short unsigned int tp_net; + unsigned int tp_sec; + unsigned int tp_usec; +}; + +struct tpacket2_hdr { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u8 tp_padding[4]; +}; + +struct tpacket_hdr_variant1 { + __u32 tp_rxhash; + __u32 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u16 tp_padding; +}; + +struct tpacket3_hdr { + __u32 tp_next_offset; + __u32 tp_sec; + __u32 tp_nsec; + __u32 tp_snaplen; + __u32 tp_len; + __u32 tp_status; + __u16 tp_mac; + __u16 tp_net; + union { + struct tpacket_hdr_variant1 hv1; + }; + __u8 tp_padding[8]; +}; + +struct tpacket_bd_ts { + unsigned int ts_sec; + union { + unsigned int ts_usec; + unsigned int ts_nsec; + }; +}; + +struct tpacket_hdr_v1 { + __u32 block_status; + __u32 num_pkts; + __u32 offset_to_first_pkt; + __u32 blk_len; + __u64 seq_num; + struct tpacket_bd_ts ts_first_pkt; + struct tpacket_bd_ts ts_last_pkt; +}; + +union tpacket_bd_header_u { + struct tpacket_hdr_v1 bh1; +}; + +struct tpacket_block_desc { + __u32 version; + __u32 offset_to_priv; + union tpacket_bd_header_u hdr; +}; + +enum tpacket_versions { + TPACKET_V1 = 0, + TPACKET_V2 = 1, + TPACKET_V3 = 2, +}; + +struct tpacket_req { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; +}; + +struct tpacket_req3 { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; + unsigned int tp_retire_blk_tov; + unsigned int tp_sizeof_priv; + unsigned int tp_feature_req_word; +}; + +union tpacket_req_u { + struct tpacket_req req; + struct tpacket_req3 req3; +}; + +struct fanout_args { + __u16 id; + __u16 type_flags; + __u32 max_num_members; +}; + +struct virtio_net_hdr { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + __virtio16 csum_start; + __virtio16 csum_offset; +}; + +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + __virtio16 num_buffers; +}; + +struct packet_mclist { + struct packet_mclist *next; + int ifindex; + int count; + short unsigned int type; + short unsigned int alen; + unsigned char addr[32]; +}; + +struct pgv; + +struct tpacket_kbdq_core { + struct pgv *pkbdq; + unsigned int feature_req_word; + unsigned int hdrlen; + unsigned char reset_pending_on_curr_blk; + unsigned char delete_blk_timer; + short unsigned int kactive_blk_num; + short unsigned int blk_sizeof_priv; + short unsigned int last_kactive_blk_num; + char *pkblk_start; + char *pkblk_end; + int kblk_size; + unsigned int max_frame_len; + unsigned int knum_blocks; + uint64_t knxt_seq_num; + char *prev; + char *nxt_offset; + struct sk_buff *skb; + rwlock_t blk_fill_in_prog_lock; + short unsigned int retire_blk_tov; + short unsigned int version; + long unsigned int tov_in_jiffies; + struct timer_list retire_blk_timer; + long: 32; +}; + +struct pgv { + char *buffer; +}; + +struct packet_ring_buffer { + struct pgv *pg_vec; + unsigned int head; + unsigned int frames_per_block; + unsigned int frame_size; + unsigned int frame_max; + unsigned int pg_vec_order; + unsigned int pg_vec_pages; + unsigned int pg_vec_len; + unsigned int *pending_refcnt; + long: 32; + union { + long unsigned int *rx_owner_map; + struct tpacket_kbdq_core prb_bdqc; + }; +}; + +struct packet_fanout { + possible_net_t net; + unsigned int num_members; + u32 max_num_members; + u16 id; + u8 type; + u8 flags; + union { + atomic_t rr_cur; + struct bpf_prog *bpf_prog; + }; + struct list_head list; + spinlock_t lock; + refcount_t sk_ref; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct packet_type prot_hook; + struct sock *arr[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct packet_rollover { + int sock; + atomic_long_t num; + atomic_long_t num_huge; + atomic_long_t num_failed; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u32 history[16]; +}; + +struct packet_sock { + struct sock sk; + struct packet_fanout *fanout; + union tpacket_stats_u stats; + struct packet_ring_buffer rx_ring; + struct packet_ring_buffer tx_ring; + int copy_thresh; + spinlock_t bind_lock; + struct mutex pg_vec_lock; + long unsigned int flags; + int ifindex; + u8 vnet_hdr_sz; + __be16 num; + struct packet_rollover *rollover; + struct packet_mclist *mclist; + atomic_long_t mapped; + enum tpacket_versions tp_version; + unsigned int tp_hdrlen; + unsigned int tp_reserve; + unsigned int tp_tstamp; + struct completion skb_completion; + struct net_device *cached_dev; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct packet_type prot_hook; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t tp_drops; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum packet_sock_flags { + PACKET_SOCK_ORIGDEV = 0, + PACKET_SOCK_AUXDATA = 1, + PACKET_SOCK_TX_HAS_OFF = 2, + PACKET_SOCK_TP_LOSS = 3, + PACKET_SOCK_RUNNING = 4, + PACKET_SOCK_PRESSURE = 5, + PACKET_SOCK_QDISC_BYPASS = 6, +}; + +struct packet_mreq_max { + int mr_ifindex; + short unsigned int mr_type; + short unsigned int mr_alen; + unsigned char mr_address[32]; +}; + +union tpacket_uhdr { + struct tpacket_hdr *h1; + struct tpacket2_hdr *h2; + struct tpacket3_hdr *h3; + void *raw; +}; + +struct packet_skb_cb { + union { + struct sockaddr_pkt pkt; + union { + unsigned int origlen; + struct sockaddr_ll ll; + }; + } sa; +}; + +struct trace_vif_entry { + enum nl80211_iftype vif_type; + bool p2p; + char vif_name[16]; +} __attribute__((packed)); + +struct trace_chandef_entry { + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; +}; + +struct trace_switch_entry { + struct trace_vif_entry vif; + unsigned int link_id; + struct trace_chandef_entry old_chandef; + struct trace_chandef_entry new_chandef; +} __attribute__((packed)); + +struct trace_event_raw_local_only_evt { + struct trace_entry ent; + char wiphy_name[32]; + char __data[0]; +}; + +struct trace_event_raw_local_sdata_addr_evt { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char addr[6]; + char __data[0]; +}; + +struct trace_event_raw_local_u32_evt { + struct trace_entry ent; + char wiphy_name[32]; + u32 value; + char __data[0]; +}; + +struct trace_event_raw_local_sdata_evt { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char __data[0]; +}; + +struct trace_event_raw_drv_return_int { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + char __data[0]; +}; + +struct trace_event_raw_drv_return_bool { + struct trace_entry ent; + char wiphy_name[32]; + bool ret; + char __data[0]; +}; + +struct trace_event_raw_drv_return_u32 { + struct trace_entry ent; + char wiphy_name[32]; + u32 ret; + char __data[0]; +}; + +struct trace_event_raw_drv_return_u64 { + struct trace_entry ent; + char wiphy_name[32]; + u64 ret; + char __data[0]; +}; + +struct trace_event_raw_drv_set_wakeup { + struct trace_entry ent; + char wiphy_name[32]; + bool enabled; + char __data[0]; +}; + +struct trace_event_raw_drv_stop { + struct trace_entry ent; + char wiphy_name[32]; + bool suspend; + char __data[0]; +}; + +struct trace_event_raw_drv_change_interface { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 new_type; + bool new_p2p; + char __data[0]; +}; + +struct trace_event_raw_drv_config { + struct trace_entry ent; + char wiphy_name[32]; + u32 changed; + u32 flags; + int power_level; + int dynamic_ps_timeout; + u16 listen_interval; + u8 long_frame_max_tx_count; + u8 short_frame_max_tx_count; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + int smps; + char __data[0]; +}; + +struct trace_event_raw_drv_vif_cfg_changed { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u64 changed; + bool assoc; + bool ibss_joined; + bool ibss_creator; + u16 aid; + u32 __data_loc_arp_addr_list; + int arp_addr_cnt; + u32 __data_loc_ssid; + int s1g; + bool idle; + bool ps; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_drv_link_info_changed { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u64 changed; + int link_id; + bool cts; + bool shortpre; + bool shortslot; + bool enable_beacon; + u8 dtimper; + u16 bcnint; + u16 assoc_cap; + u64 sync_tsf; + u32 sync_device_ts; + u8 sync_dtim_count; + u32 basic_rates; + int mcast_rate[6]; + u16 ht_operation_mode; + s32 cqm_rssi_thold; + s32 cqm_rssi_hyst; + u32 channel_width; + u32 channel_cfreq1; + u32 channel_cfreq1_offset; + bool qos; + bool hidden_ssid; + int txpower; + u8 p2p_oppps_ctwindow; + char __data[0]; +}; + +struct trace_event_raw_drv_prepare_multicast { + struct trace_entry ent; + char wiphy_name[32]; + int mc_count; + char __data[0]; +}; + +struct trace_event_raw_drv_configure_filter { + struct trace_entry ent; + char wiphy_name[32]; + unsigned int changed; + unsigned int total; + u64 multicast; + char __data[0]; +}; + +struct trace_event_raw_drv_config_iface_filter { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + unsigned int filter_flags; + unsigned int changed_flags; + char __data[0]; +}; + +struct trace_event_raw_drv_set_tim { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + bool set; + char __data[0]; +}; + +struct trace_event_raw_drv_set_key { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u32 cmd; + u32 cipher; + u8 hw_key_idx; + u8 flags; + s8 keyidx; + char __data[0]; +}; + +struct trace_event_raw_drv_update_tkip_key { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u32 iv32; + char __data[0]; +}; + +struct trace_event_raw_drv_sw_scan_start { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char mac_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_drv_get_stats { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + unsigned int ackfail; + unsigned int rtsfail; + unsigned int fcserr; + unsigned int rtssucc; + char __data[0]; +}; + +struct trace_event_raw_drv_get_key_seq { + struct trace_entry ent; + char wiphy_name[32]; + u32 cipher; + u8 hw_key_idx; + u8 flags; + s8 keyidx; + char __data[0]; +}; + +struct trace_event_raw_drv_set_coverage_class { + struct trace_entry ent; + char wiphy_name[32]; + s16 value; + char __data[0]; +}; + +struct trace_event_raw_drv_sta_notify { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u32 cmd; + char __data[0]; +}; + +struct trace_event_raw_drv_sta_state { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u32 old_state; + u32 new_state; + char __data[0]; +}; + +struct trace_event_raw_drv_sta_set_txpwr { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + s16 txpwr; + u8 type; + char __data[0]; +}; + +struct trace_event_raw_drv_link_sta_rc_update { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u32 changed; + u32 link_id; + char __data[0]; +}; + +struct trace_event_raw_sta_event { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_drv_conf_tx { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + unsigned int link_id; + u16 ac; + u16 txop; + u16 cw_min; + u16 cw_max; + u8 aifs; + bool uapsd; + char __data[0]; +}; + +struct trace_event_raw_drv_set_tsf { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u64 tsf; + char __data[0]; +}; + +struct trace_event_raw_drv_offset_tsf { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + s64 tsf_offset; + char __data[0]; +}; + +struct trace_event_raw_drv_ampdu_action { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + enum ieee80211_ampdu_mlme_action ieee80211_ampdu_mlme_action; + char sta_addr[6]; + u16 tid; + u16 ssn; + u16 buf_size; + bool amsdu; + u16 timeout; + u16 action; + char __data[0]; +}; + +struct trace_event_raw_drv_get_survey { + struct trace_entry ent; + char wiphy_name[32]; + int idx; + char __data[0]; +}; + +struct trace_event_raw_drv_flush { + struct trace_entry ent; + char wiphy_name[32]; + bool drop; + u32 queues; + char __data[0]; +}; + +struct trace_event_raw_chanswitch_evt { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u64 timestamp; + u32 device_timestamp; + bool block_tx; + u8 count; + u8 link_id; + char __data[0]; +}; + +struct trace_event_raw_drv_set_antenna { + struct trace_entry ent; + char wiphy_name[32]; + u32 tx_ant; + u32 rx_ant; + int ret; + char __data[0]; +}; + +struct trace_event_raw_drv_get_antenna { + struct trace_entry ent; + char wiphy_name[32]; + u32 tx_ant; + u32 rx_ant; + int ret; + char __data[0]; +}; + +struct trace_event_raw_drv_remain_on_channel { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + int center_freq; + int freq_offset; + unsigned int duration; + u32 type; + char __data[0]; +}; + +struct trace_event_raw_drv_set_ringparam { + struct trace_entry ent; + char wiphy_name[32]; + u32 tx; + u32 rx; + char __data[0]; +}; + +struct trace_event_raw_drv_get_ringparam { + struct trace_entry ent; + char wiphy_name[32]; + u32 tx; + u32 tx_max; + u32 rx; + u32 rx_max; + char __data[0]; +}; + +struct trace_event_raw_drv_set_bitrate_mask { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 legacy_2g; + u32 legacy_5g; + char __data[0]; +}; + +struct trace_event_raw_drv_set_rekey_data { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 kek[16]; + u8 kck[16]; + u8 replay_ctr[8]; + char __data[0]; +}; + +struct trace_event_raw_drv_event_callback { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 type; + char __data[0]; +}; + +struct trace_event_raw_release_evt { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + u16 tids; + int num_frames; + int reason; + bool more_data; + char __data[0]; +}; + +struct trace_event_raw_mgd_prepare_complete_tx_evt { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 duration; + u16 subtype; + u8 success; + char __data[0]; +}; + +struct trace_event_raw_local_chanctx { + struct trace_entry ent; + char wiphy_name[32]; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u32 min_control_freq; + u32 min_freq_offset; + u32 min_chan_width; + u32 min_center_freq1; + u32 min_freq1_offset; + u32 min_center_freq2; + u32 ap_control_freq; + u32 ap_freq_offset; + u32 ap_chan_width; + u32 ap_center_freq1; + u32 ap_freq1_offset; + u32 ap_center_freq2; + u8 rx_chains_static; + u8 rx_chains_dynamic; + char __data[0]; +}; + +struct trace_event_raw_drv_change_chanctx { + struct trace_entry ent; + char wiphy_name[32]; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u32 min_control_freq; + u32 min_freq_offset; + u32 min_chan_width; + u32 min_center_freq1; + u32 min_freq1_offset; + u32 min_center_freq2; + u32 ap_control_freq; + u32 ap_freq_offset; + u32 ap_chan_width; + u32 ap_center_freq1; + u32 ap_freq1_offset; + u32 ap_center_freq2; + u8 rx_chains_static; + u8 rx_chains_dynamic; + u32 changed; + char __data[0]; +}; + +struct trace_event_raw_drv_switch_vif_chanctx { + struct trace_entry ent; + char wiphy_name[32]; + int n_vifs; + u32 mode; + u32 __data_loc_vifs; + char __data[0]; +}; + +struct trace_event_raw_local_sdata_chanctx { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u32 min_control_freq; + u32 min_freq_offset; + u32 min_chan_width; + u32 min_center_freq1; + u32 min_freq1_offset; + u32 min_center_freq2; + u32 ap_control_freq; + u32 ap_freq_offset; + u32 ap_chan_width; + u32 ap_center_freq1; + u32 ap_freq1_offset; + u32 ap_center_freq2; + u8 rx_chains_static; + u8 rx_chains_dynamic; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_drv_start_ap { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 link_id; + u8 dtimper; + u16 bcnint; + u32 __data_loc_ssid; + bool hidden_ssid; + char __data[0]; +}; + +struct trace_event_raw_drv_stop_ap { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 link_id; + char __data[0]; +}; + +struct trace_event_raw_drv_reconfig_complete { + struct trace_entry ent; + char wiphy_name[32]; + u8 reconfig_type; + char __data[0]; +}; + +struct trace_event_raw_drv_join_ibss { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 dtimper; + u16 bcnint; + u32 __data_loc_ssid; + char __data[0]; +}; + +struct trace_event_raw_drv_get_expected_throughput { + struct trace_entry ent; + char sta_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_drv_start_nan { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 master_pref; + u8 bands; + char __data[0]; +}; + +struct trace_event_raw_drv_stop_nan { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char __data[0]; +}; + +struct trace_event_raw_drv_nan_change_conf { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 master_pref; + u8 bands; + u32 changes; + char __data[0]; +}; + +struct trace_event_raw_drv_add_nan_func { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 type; + u8 inst_id; + char __data[0]; +}; + +struct trace_event_raw_drv_del_nan_func { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 instance_id; + char __data[0]; +}; + +struct trace_event_raw_drv_set_default_unicast_key { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + int key_idx; + char __data[0]; +}; + +struct trace_event_raw_drv_channel_switch_beacon { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + char __data[0]; +}; + +struct trace_event_raw_drv_get_txpower { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + int dbm; + int ret; + char __data[0]; +}; + +struct trace_event_raw_drv_tdls_channel_switch { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u8 oper_class; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + char __data[0]; +}; + +struct trace_event_raw_drv_tdls_cancel_channel_switch { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_drv_tdls_recv_channel_switch { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 action_code; + char sta_addr[6]; + u32 control_freq; + u32 freq_offset; + u32 chan_width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u32 status; + bool peer_initiator; + u32 timestamp; + u16 switch_time; + u16 switch_timeout; + char __data[0]; +}; + +struct trace_event_raw_drv_wake_tx_queue { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u8 ac; + u8 tid; + char __data[0]; +}; + +struct trace_event_raw_drv_get_ftm_responder_stats { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char __data[0]; +}; + +struct trace_event_raw_sta_flag_evt { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + bool enabled; + char __data[0]; +}; + +struct trace_event_raw_drv_add_twt_setup { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + u8 dialog_token; + u8 control; + __le16 req_type; + long: 32; + __le64 twt; + u8 duration; + __le16 mantissa; + u8 channel; + char __data[0]; +}; + +struct trace_event_raw_drv_twt_teardown_request { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + u8 flowid; + char __data[0]; +}; + +struct trace_event_raw_drv_net_setup_tc { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 type; + char __data[0]; +}; + +struct trace_event_raw_drv_can_activate_links { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u16 active_links; + char __data[0]; +}; + +struct trace_event_raw_drv_change_vif_links { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u16 old_links; + u16 new_links; + char __data[0]; +}; + +struct trace_event_raw_drv_change_sta_links { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char sta_addr[6]; + u16 old_links; + u16 new_links; + char __data[0]; +}; + +struct trace_event_raw_api_start_tx_ba_session { + struct trace_entry ent; + char sta_addr[6]; + u16 tid; + char __data[0]; +}; + +struct trace_event_raw_api_start_tx_ba_cb { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 ra[6]; + u16 tid; + char __data[0]; +}; + +struct trace_event_raw_api_stop_tx_ba_session { + struct trace_entry ent; + char sta_addr[6]; + u16 tid; + char __data[0]; +}; + +struct trace_event_raw_api_stop_tx_ba_cb { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 ra[6]; + u16 tid; + char __data[0]; +}; + +struct trace_event_raw_api_beacon_loss { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char __data[0]; +}; + +struct trace_event_raw_api_connection_loss { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + char __data[0]; +}; + +struct trace_event_raw_api_disconnect { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + int reconnect; + char __data[0]; +}; + +struct trace_event_raw_api_cqm_rssi_notify { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 rssi_event; + s32 rssi_level; + char __data[0]; +}; + +struct trace_event_raw_api_scan_completed { + struct trace_entry ent; + char wiphy_name[32]; + bool aborted; + char __data[0]; +}; + +struct trace_event_raw_api_sched_scan_results { + struct trace_entry ent; + char wiphy_name[32]; + char __data[0]; +}; + +struct trace_event_raw_api_sched_scan_stopped { + struct trace_entry ent; + char wiphy_name[32]; + char __data[0]; +}; + +struct trace_event_raw_api_sta_block_awake { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + bool block; + char __data[0]; +}; + +struct trace_event_raw_api_chswitch_done { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + bool success; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_api_gtk_rekey_notify { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u8 bssid[6]; + u8 replay_ctr[8]; + char __data[0]; +}; + +struct trace_event_raw_api_enable_rssi_reports { + struct trace_entry ent; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + int rssi_min_thold; + int rssi_max_thold; + char __data[0]; +}; + +struct trace_event_raw_api_eosp { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_api_send_eosp_nullfunc { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + u8 tid; + char __data[0]; +}; + +struct trace_event_raw_api_sta_set_buffered { + struct trace_entry ent; + char wiphy_name[32]; + char sta_addr[6]; + u8 tid; + bool buffered; + char __data[0]; +}; + +struct trace_event_raw_api_radar_detected { + struct trace_entry ent; + char wiphy_name[32]; + char __data[0]; +}; + +struct trace_event_raw_api_request_smps { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + int link_id; + u32 smps_mode; + char __data[0]; +}; + +struct trace_event_raw_wake_queue { + struct trace_entry ent; + char wiphy_name[32]; + u16 queue; + u32 reason; + char __data[0]; +}; + +struct trace_event_raw_stop_queue { + struct trace_entry ent; + char wiphy_name[32]; + u16 queue; + u32 reason; + char __data[0]; +}; + +struct trace_event_raw_drv_can_neg_ttlm { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u16 downlink[16]; + u16 uplink[16]; + char __data[0]; +}; + +struct trace_event_raw_drv_neg_ttlm_res { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_iftype vif_type; + void *sdata; + bool p2p; + u32 __data_loc_vif_name; + u32 res; + u16 downlink[16]; + u16 uplink[16]; + char __data[0]; +}; + +struct trace_event_raw_drv_prep_add_interface { + struct trace_entry ent; + char wiphy_name[32]; + u32 type; + char __data[0]; +}; + +struct trace_event_data_offsets_local_only_evt {}; + +struct trace_event_data_offsets_local_sdata_addr_evt { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_local_u32_evt {}; + +struct trace_event_data_offsets_local_sdata_evt { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_return_int {}; + +struct trace_event_data_offsets_drv_return_bool {}; + +struct trace_event_data_offsets_drv_return_u32 {}; + +struct trace_event_data_offsets_drv_return_u64 {}; + +struct trace_event_data_offsets_drv_set_wakeup {}; + +struct trace_event_data_offsets_drv_stop {}; + +struct trace_event_data_offsets_drv_change_interface { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_config {}; + +struct trace_event_data_offsets_drv_vif_cfg_changed { + u32 vif_name; + const void *vif_name_ptr_; + u32 arp_addr_list; + const void *arp_addr_list_ptr_; + u32 ssid; + const void *ssid_ptr_; +}; + +struct trace_event_data_offsets_drv_link_info_changed { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_prepare_multicast {}; + +struct trace_event_data_offsets_drv_configure_filter {}; + +struct trace_event_data_offsets_drv_config_iface_filter { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_set_tim {}; + +struct trace_event_data_offsets_drv_set_key { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_update_tkip_key { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_sw_scan_start { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_get_stats {}; + +struct trace_event_data_offsets_drv_get_key_seq {}; + +struct trace_event_data_offsets_drv_set_coverage_class {}; + +struct trace_event_data_offsets_drv_sta_notify { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_sta_state { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_sta_set_txpwr { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_link_sta_rc_update { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_sta_event { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_conf_tx { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_set_tsf { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_offset_tsf { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_ampdu_action { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_get_survey {}; + +struct trace_event_data_offsets_drv_flush {}; + +struct trace_event_data_offsets_chanswitch_evt { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_set_antenna {}; + +struct trace_event_data_offsets_drv_get_antenna {}; + +struct trace_event_data_offsets_drv_remain_on_channel { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_set_ringparam {}; + +struct trace_event_data_offsets_drv_get_ringparam {}; + +struct trace_event_data_offsets_drv_set_bitrate_mask { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_set_rekey_data { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_event_callback { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_release_evt {}; + +struct trace_event_data_offsets_mgd_prepare_complete_tx_evt { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_local_chanctx {}; + +struct trace_event_data_offsets_drv_change_chanctx {}; + +struct trace_event_data_offsets_drv_switch_vif_chanctx { + u32 vifs; + const void *vifs_ptr_; +}; + +struct trace_event_data_offsets_local_sdata_chanctx { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_start_ap { + u32 vif_name; + const void *vif_name_ptr_; + u32 ssid; + const void *ssid_ptr_; +}; + +struct trace_event_data_offsets_drv_stop_ap { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_reconfig_complete {}; + +struct trace_event_data_offsets_drv_join_ibss { + u32 vif_name; + const void *vif_name_ptr_; + u32 ssid; + const void *ssid_ptr_; +}; + +struct trace_event_data_offsets_drv_get_expected_throughput {}; + +struct trace_event_data_offsets_drv_start_nan { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_stop_nan { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_nan_change_conf { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_add_nan_func { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_del_nan_func { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_set_default_unicast_key { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_channel_switch_beacon { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_get_txpower { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_tdls_channel_switch { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_tdls_cancel_channel_switch { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_tdls_recv_channel_switch { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_wake_tx_queue { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_get_ftm_responder_stats { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_sta_flag_evt { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_add_twt_setup {}; + +struct trace_event_data_offsets_drv_twt_teardown_request {}; + +struct trace_event_data_offsets_drv_net_setup_tc { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_can_activate_links { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_change_vif_links { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_change_sta_links { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_start_tx_ba_session {}; + +struct trace_event_data_offsets_api_start_tx_ba_cb { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_stop_tx_ba_session {}; + +struct trace_event_data_offsets_api_stop_tx_ba_cb { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_beacon_loss { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_connection_loss { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_disconnect { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_cqm_rssi_notify { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_scan_completed {}; + +struct trace_event_data_offsets_api_sched_scan_results {}; + +struct trace_event_data_offsets_api_sched_scan_stopped {}; + +struct trace_event_data_offsets_api_sta_block_awake {}; + +struct trace_event_data_offsets_api_chswitch_done { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_gtk_rekey_notify { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_enable_rssi_reports { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_api_eosp {}; + +struct trace_event_data_offsets_api_send_eosp_nullfunc {}; + +struct trace_event_data_offsets_api_sta_set_buffered {}; + +struct trace_event_data_offsets_api_radar_detected {}; + +struct trace_event_data_offsets_api_request_smps { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_wake_queue {}; + +struct trace_event_data_offsets_stop_queue {}; + +struct trace_event_data_offsets_drv_can_neg_ttlm { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_neg_ttlm_res { + u32 vif_name; + const void *vif_name_ptr_; +}; + +struct trace_event_data_offsets_drv_prep_add_interface {}; + +typedef void (*btf_trace_drv_return_void)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_return_int)(void *, struct ieee80211_local *, int); + +typedef void (*btf_trace_drv_return_bool)(void *, struct ieee80211_local *, bool); + +typedef void (*btf_trace_drv_return_u32)(void *, struct ieee80211_local *, u32); + +typedef void (*btf_trace_drv_return_u64)(void *, struct ieee80211_local *, u64); + +typedef void (*btf_trace_drv_start)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_get_et_strings)(void *, struct ieee80211_local *, u32); + +typedef void (*btf_trace_drv_get_et_sset_count)(void *, struct ieee80211_local *, u32); + +typedef void (*btf_trace_drv_get_et_stats)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_suspend)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_resume)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_set_wakeup)(void *, struct ieee80211_local *, bool); + +typedef void (*btf_trace_drv_stop)(void *, struct ieee80211_local *, bool); + +typedef void (*btf_trace_drv_add_interface)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_change_interface)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, enum nl80211_iftype, bool); + +typedef void (*btf_trace_drv_remove_interface)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_config)(void *, struct ieee80211_local *, u32); + +typedef void (*btf_trace_drv_vif_cfg_changed)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u64); + +typedef void (*btf_trace_drv_link_info_changed)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_bss_conf *, u64); + +typedef void (*btf_trace_drv_prepare_multicast)(void *, struct ieee80211_local *, int); + +typedef void (*btf_trace_drv_configure_filter)(void *, struct ieee80211_local *, unsigned int, unsigned int *, u64); + +typedef void (*btf_trace_drv_config_iface_filter)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, unsigned int, unsigned int); + +typedef void (*btf_trace_drv_set_tim)(void *, struct ieee80211_local *, struct ieee80211_sta *, bool); + +typedef void (*btf_trace_drv_set_key)(void *, struct ieee80211_local *, enum set_key_cmd, struct ieee80211_sub_if_data *, struct ieee80211_sta *, struct ieee80211_key_conf *); + +typedef void (*btf_trace_drv_update_tkip_key)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_key_conf *, struct ieee80211_sta *, u32); + +typedef void (*btf_trace_drv_hw_scan)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_cancel_hw_scan)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_sched_scan_start)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_sched_scan_stop)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_sw_scan_start)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, const u8 *); + +typedef void (*btf_trace_drv_sw_scan_complete)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_get_stats)(void *, struct ieee80211_local *, struct ieee80211_low_level_stats *, int); + +typedef void (*btf_trace_drv_get_key_seq)(void *, struct ieee80211_local *, struct ieee80211_key_conf *); + +typedef void (*btf_trace_drv_set_frag_threshold)(void *, struct ieee80211_local *, u32); + +typedef void (*btf_trace_drv_set_rts_threshold)(void *, struct ieee80211_local *, u32); + +typedef void (*btf_trace_drv_set_coverage_class)(void *, struct ieee80211_local *, s16); + +typedef void (*btf_trace_drv_sta_notify)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, enum sta_notify_cmd, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_sta_state)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *, enum ieee80211_sta_state, enum ieee80211_sta_state); + +typedef void (*btf_trace_drv_sta_set_txpwr)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_link_sta_rc_update)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_link_sta *, u32); + +typedef void (*btf_trace_drv_sta_statistics)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_sta_add)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_sta_remove)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_sta_pre_rcu_remove)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_sync_rx_queues)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_sta_rate_tbl_update)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_conf_tx)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, unsigned int, u16, const struct ieee80211_tx_queue_params *); + +typedef void (*btf_trace_drv_get_tsf)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_set_tsf)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u64); + +typedef void (*btf_trace_drv_offset_tsf)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, s64); + +typedef void (*btf_trace_drv_reset_tsf)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_tx_last_beacon)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_ampdu_action)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_ampdu_params *); + +typedef void (*btf_trace_drv_get_survey)(void *, struct ieee80211_local *, int, struct survey_info *); + +typedef void (*btf_trace_drv_flush)(void *, struct ieee80211_local *, u32, bool); + +typedef void (*btf_trace_drv_flush_sta)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_channel_switch)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_channel_switch *); + +typedef void (*btf_trace_drv_set_antenna)(void *, struct ieee80211_local *, u32, u32, int); + +typedef void (*btf_trace_drv_get_antenna)(void *, struct ieee80211_local *, u32, u32, int); + +typedef void (*btf_trace_drv_remain_on_channel)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_channel *, unsigned int, enum ieee80211_roc_type); + +typedef void (*btf_trace_drv_cancel_remain_on_channel)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_set_ringparam)(void *, struct ieee80211_local *, u32, u32); + +typedef void (*btf_trace_drv_get_ringparam)(void *, struct ieee80211_local *, u32 *, u32 *, u32 *, u32 *); + +typedef void (*btf_trace_drv_tx_frames_pending)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_offchannel_tx_cancel_wait)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_drv_set_bitrate_mask)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, const struct cfg80211_bitrate_mask *); + +typedef void (*btf_trace_drv_set_rekey_data)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct cfg80211_gtk_rekey_data *); + +typedef void (*btf_trace_drv_event_callback)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, const struct ieee80211_event *); + +typedef void (*btf_trace_drv_release_buffered_frames)(void *, struct ieee80211_local *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); + +typedef void (*btf_trace_drv_allow_buffered_frames)(void *, struct ieee80211_local *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); + +typedef void (*btf_trace_drv_mgd_prepare_tx)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u16, u16, bool); + +typedef void (*btf_trace_drv_mgd_complete_tx)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u16, u16, bool); + +typedef void (*btf_trace_drv_mgd_protect_tdls_discover)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_add_chanctx)(void *, struct ieee80211_local *, struct ieee80211_chanctx *); + +typedef void (*btf_trace_drv_remove_chanctx)(void *, struct ieee80211_local *, struct ieee80211_chanctx *); + +typedef void (*btf_trace_drv_change_chanctx)(void *, struct ieee80211_local *, struct ieee80211_chanctx *, u32); + +typedef void (*btf_trace_drv_switch_vif_chanctx)(void *, struct ieee80211_local *, struct ieee80211_vif_chanctx_switch *, int, enum ieee80211_chanctx_switch_mode); + +typedef void (*btf_trace_drv_assign_vif_chanctx)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_bss_conf *, struct ieee80211_chanctx *); + +typedef void (*btf_trace_drv_unassign_vif_chanctx)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_bss_conf *, struct ieee80211_chanctx *); + +typedef void (*btf_trace_drv_start_ap)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_bss_conf *); + +typedef void (*btf_trace_drv_stop_ap)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_bss_conf *); + +typedef void (*btf_trace_drv_reconfig_complete)(void *, struct ieee80211_local *, enum ieee80211_reconfig_type); + +typedef void (*btf_trace_drv_ipv6_addr_change)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_join_ibss)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_bss_conf *); + +typedef void (*btf_trace_drv_leave_ibss)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_get_expected_throughput)(void *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_start_nan)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct cfg80211_nan_conf *); + +typedef void (*btf_trace_drv_stop_nan)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_nan_change_conf)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct cfg80211_nan_conf *, u32); + +typedef void (*btf_trace_drv_add_nan_func)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, const struct cfg80211_nan_func *); + +typedef void (*btf_trace_drv_del_nan_func)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u8); + +typedef void (*btf_trace_drv_start_pmsr)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_abort_pmsr)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_set_default_unicast_key)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, int); + +typedef void (*btf_trace_drv_channel_switch_beacon)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct cfg80211_chan_def *); + +typedef void (*btf_trace_drv_pre_channel_switch)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_channel_switch *); + +typedef void (*btf_trace_drv_post_channel_switch)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_abort_channel_switch)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_channel_switch_rx_beacon)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_channel_switch *); + +typedef void (*btf_trace_drv_get_txpower)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, int, int); + +typedef void (*btf_trace_drv_tdls_channel_switch)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *, u8, struct cfg80211_chan_def *); + +typedef void (*btf_trace_drv_tdls_cancel_channel_switch)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_tdls_recv_channel_switch)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_tdls_ch_sw_params *); + +typedef void (*btf_trace_drv_wake_tx_queue)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct txq_info *); + +typedef void (*btf_trace_drv_get_ftm_responder_stats)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct cfg80211_ftm_responder_stats *); + +typedef void (*btf_trace_drv_update_vif_offload)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_drv_sta_set_4addr)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *, bool); + +typedef void (*btf_trace_drv_sta_set_decap_offload)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *, bool); + +typedef void (*btf_trace_drv_add_twt_setup)(void *, struct ieee80211_local *, struct ieee80211_sta *, struct ieee80211_twt_setup *, struct ieee80211_twt_params *); + +typedef void (*btf_trace_drv_twt_teardown_request)(void *, struct ieee80211_local *, struct ieee80211_sta *, u8); + +typedef void (*btf_trace_drv_net_fill_forward_path)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *); + +typedef void (*btf_trace_drv_net_setup_tc)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u8); + +typedef void (*btf_trace_drv_can_activate_links)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u16); + +typedef void (*btf_trace_drv_change_vif_links)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, u16, u16); + +typedef void (*btf_trace_drv_change_sta_links)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_sta *, u16, u16); + +typedef void (*btf_trace_api_start_tx_ba_session)(void *, struct ieee80211_sta *, u16); + +typedef void (*btf_trace_api_start_tx_ba_cb)(void *, struct ieee80211_sub_if_data *, const u8 *, u16); + +typedef void (*btf_trace_api_stop_tx_ba_session)(void *, struct ieee80211_sta *, u16); + +typedef void (*btf_trace_api_stop_tx_ba_cb)(void *, struct ieee80211_sub_if_data *, const u8 *, u16); + +typedef void (*btf_trace_api_restart_hw)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_api_beacon_loss)(void *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_api_connection_loss)(void *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_api_disconnect)(void *, struct ieee80211_sub_if_data *, bool); + +typedef void (*btf_trace_api_cqm_rssi_notify)(void *, struct ieee80211_sub_if_data *, enum nl80211_cqm_rssi_threshold_event, s32); + +typedef void (*btf_trace_api_cqm_beacon_loss_notify)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *); + +typedef void (*btf_trace_api_scan_completed)(void *, struct ieee80211_local *, bool); + +typedef void (*btf_trace_api_sched_scan_results)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_api_sched_scan_stopped)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_api_sta_block_awake)(void *, struct ieee80211_local *, struct ieee80211_sta *, bool); + +typedef void (*btf_trace_api_chswitch_done)(void *, struct ieee80211_sub_if_data *, bool, unsigned int); + +typedef void (*btf_trace_api_ready_on_channel)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_api_remain_on_channel_expired)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_api_gtk_rekey_notify)(void *, struct ieee80211_sub_if_data *, const u8 *, const u8 *); + +typedef void (*btf_trace_api_enable_rssi_reports)(void *, struct ieee80211_sub_if_data *, int, int); + +typedef void (*btf_trace_api_eosp)(void *, struct ieee80211_local *, struct ieee80211_sta *); + +typedef void (*btf_trace_api_send_eosp_nullfunc)(void *, struct ieee80211_local *, struct ieee80211_sta *, u8); + +typedef void (*btf_trace_api_sta_set_buffered)(void *, struct ieee80211_local *, struct ieee80211_sta *, u8, bool); + +typedef void (*btf_trace_api_radar_detected)(void *, struct ieee80211_local *); + +typedef void (*btf_trace_api_request_smps)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_link_data *, enum ieee80211_smps_mode); + +typedef void (*btf_trace_wake_queue)(void *, struct ieee80211_local *, u16, enum queue_stop_reason); + +typedef void (*btf_trace_stop_queue)(void *, struct ieee80211_local *, u16, enum queue_stop_reason); + +typedef void (*btf_trace_drv_can_neg_ttlm)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, struct ieee80211_neg_ttlm *); + +typedef void (*btf_trace_drv_neg_ttlm_res)(void *, struct ieee80211_local *, struct ieee80211_sub_if_data *, enum ieee80211_neg_ttlm_res, struct ieee80211_neg_ttlm *); + +typedef void (*btf_trace_drv_prep_add_interface)(void *, struct ieee80211_local *, enum nl80211_iftype); + +struct wq_flusher; + +struct worker; + +struct workqueue_attrs; + +struct pool_workqueue; + +struct wq_device; + +struct wq_node_nr_active; + +struct workqueue_struct { + struct list_head pwqs; + struct list_head list; + struct mutex mutex; + int work_color; + int flush_color; + atomic_t nr_pwqs_to_flush; + struct wq_flusher *first_flusher; + struct list_head flusher_queue; + struct list_head flusher_overflow; + struct list_head maydays; + struct worker *rescuer; + int nr_drainers; + int max_active; + int min_active; + int saved_max_active; + int saved_min_active; + struct workqueue_attrs *unbound_attrs; + struct pool_workqueue *dfl_pwq; + struct wq_device *wq_dev; + char name[32]; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + unsigned int flags; + struct pool_workqueue **cpu_pwq; + struct wq_node_nr_active *node_nr_active[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum wq_affn_scope { + WQ_AFFN_DFL = 0, + WQ_AFFN_CPU = 1, + WQ_AFFN_SMT = 2, + WQ_AFFN_CACHE = 3, + WQ_AFFN_NUMA = 4, + WQ_AFFN_SYSTEM = 5, + WQ_AFFN_NR_TYPES = 6, +}; + +struct workqueue_attrs { + int nice; + cpumask_var_t cpumask; + cpumask_var_t __pod_cpumask; + bool affn_strict; + enum wq_affn_scope affn_scope; + bool ordered; +}; + +enum wq_consts { + WQ_MAX_ACTIVE = 2048, + WQ_UNBOUND_MAX_ACTIVE = 2048, + WQ_DFL_ACTIVE = 1024, + WQ_DFL_MIN_ACTIVE = 8, +}; + +struct worker_pool; + +struct worker { + union { + struct list_head entry; + struct hlist_node hentry; + }; + struct work_struct *current_work; + work_func_t current_func; + struct pool_workqueue *current_pwq; + long: 32; + u64 current_at; + unsigned int current_color; + int sleeping; + work_func_t last_func; + struct list_head scheduled; + struct task_struct *task; + struct worker_pool *pool; + struct list_head node; + long unsigned int last_active; + unsigned int flags; + int id; + char desc[32]; + struct workqueue_struct *rescue_wq; + long: 32; +}; + +struct pool_workqueue { + struct worker_pool *pool; + struct workqueue_struct *wq; + int work_color; + int flush_color; + int refcnt; + int nr_in_flight[16]; + bool plugged; + int nr_active; + struct list_head inactive_works; + struct list_head pending_node; + struct list_head pwqs_node; + struct list_head mayday_node; + long: 32; + u64 stats[8]; + struct kthread_work release_work; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct worker_pool { + raw_spinlock_t lock; + int cpu; + int node; + int id; + unsigned int flags; + long unsigned int watchdog_ts; + bool cpu_stall; + int nr_running; + struct list_head worklist; + int nr_workers; + int nr_idle; + struct list_head idle_list; + struct timer_list idle_timer; + struct work_struct idle_cull_work; + struct timer_list mayday_timer; + struct hlist_head busy_hash[64]; + struct worker *manager; + struct list_head workers; + struct ida worker_ida; + struct workqueue_attrs *attrs; + struct hlist_node hash_node; + int refcnt; + struct callback_head rcu; +}; + +enum worker_pool_flags { + POOL_BH = 1, + POOL_MANAGER_ACTIVE = 2, + POOL_DISASSOCIATED = 4, + POOL_BH_DRAINING = 8, +}; + +enum worker_flags { + WORKER_DIE = 2, + WORKER_IDLE = 4, + WORKER_PREP = 8, + WORKER_CPU_INTENSIVE = 64, + WORKER_UNBOUND = 128, + WORKER_REBOUND = 256, + WORKER_NOT_RUNNING = 456, +}; + +enum work_cancel_flags { + WORK_CANCEL_DELAYED = 1, + WORK_CANCEL_DISABLE = 2, +}; + +enum wq_internal_consts { + NR_STD_WORKER_POOLS = 2, + UNBOUND_POOL_HASH_ORDER = 6, + BUSY_WORKER_HASH_ORDER = 6, + MAX_IDLE_WORKERS_RATIO = 4, + IDLE_WORKER_TIMEOUT = 300000, + MAYDAY_INITIAL_TIMEOUT = 10, + MAYDAY_INTERVAL = 100, + CREATE_COOLDOWN = 1000, + RESCUER_NICE_LEVEL = -20, + HIGHPRI_NICE_LEVEL = -20, + WQ_NAME_LEN = 32, + WORKER_ID_LEN = 42, +}; + +enum pool_workqueue_stats { + PWQ_STAT_STARTED = 0, + PWQ_STAT_COMPLETED = 1, + PWQ_STAT_CPU_TIME = 2, + PWQ_STAT_CPU_INTENSIVE = 3, + PWQ_STAT_CM_WAKEUP = 4, + PWQ_STAT_REPATRIATED = 5, + PWQ_STAT_MAYDAY = 6, + PWQ_STAT_RESCUED = 7, + PWQ_NR_STATS = 8, +}; + +struct wq_flusher { + struct list_head list; + int flush_color; + struct completion done; +}; + +struct wq_node_nr_active { + int max; + atomic_t nr; + raw_spinlock_t lock; + struct list_head pending_pwqs; +}; + +struct wq_device { + struct workqueue_struct *wq; + long: 32; + struct device dev; +}; + +struct wq_pod_type { + int nr_pods; + cpumask_var_t *pod_cpus; + int *pod_node; + int *cpu_pod; +}; + +struct work_offq_data { + u32 pool_id; + u32 disable; + u32 flags; +}; + +struct trace_event_raw_workqueue_queue_work { + struct trace_entry ent; + void *work; + void *function; + u32 __data_loc_workqueue; + int req_cpu; + int cpu; + char __data[0]; +}; + +struct trace_event_raw_workqueue_activate_work { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_data_offsets_workqueue_queue_work { + u32 workqueue; + const void *workqueue_ptr_; +}; + +struct trace_event_data_offsets_workqueue_activate_work {}; + +struct trace_event_data_offsets_workqueue_execute_start {}; + +struct trace_event_data_offsets_workqueue_execute_end {}; + +typedef void (*btf_trace_workqueue_queue_work)(void *, int, struct pool_workqueue *, struct work_struct *); + +typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); + +struct wq_drain_dead_softirq_work { + struct work_struct work; + struct worker_pool *pool; + struct completion done; +}; + +struct wq_barrier { + struct work_struct work; + struct completion done; + struct task_struct *task; +}; + +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; + struct workqueue_attrs *attrs; + struct list_head list; + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[0]; +}; + +struct pr_cont_work_struct { + bool comma; + work_func_t func; + long int ctr; +}; + +struct work_for_cpu { + struct work_struct work; + long int (*fn)(void *); + void *arg; + long int ret; +}; + +typedef struct sigevent sigevent_t; + +enum { + EVENT_TRIGGER_FL_PROBE = 1, +}; + +enum bpf_perf_event_type { + BPF_PERF_EVENT_UNSPEC = 0, + BPF_PERF_EVENT_UPROBE = 1, + BPF_PERF_EVENT_URETPROBE = 2, + BPF_PERF_EVENT_KPROBE = 3, + BPF_PERF_EVENT_KRETPROBE = 4, + BPF_PERF_EVENT_TRACEPOINT = 5, + BPF_PERF_EVENT_EVENT = 6, +}; + +enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +}; + +struct bpf_tracing_link { + struct bpf_tramp_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; + long: 32; +}; + +enum bpf_audit { + BPF_AUDIT_LOAD = 0, + BPF_AUDIT_UNLOAD = 1, + BPF_AUDIT_MAX = 2, +}; + +struct bpf_prog_kstats { + u64 nsecs; + u64 cnt; + u64 misses; +}; + +struct bpf_perf_link { + struct bpf_link link; + struct file *perf_file; + long: 32; +}; + +typedef u64 (*btf_bpf_sys_bpf)(int, union bpf_attr *, u32); + +typedef u64 (*btf_bpf_sys_close)(u32); + +typedef u64 (*btf_bpf_kallsyms_lookup_name)(const char *, int, int, u64 *); + +struct audit_buffer; + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = -1, + RSEQ_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = 1, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, +}; + +struct rseq_cs { + __u32 version; + __u32 flags; + __u64 start_ip; + __u64 post_commit_offset; + __u64 abort_ip; +}; + +struct trace_event_raw_rseq_update { + struct trace_entry ent; + s32 cpu_id; + s32 node_id; + s32 mm_cid; + char __data[0]; +}; + +struct trace_event_raw_rseq_ip_fixup { + struct trace_entry ent; + long unsigned int regs_ip; + long unsigned int start_ip; + long unsigned int post_commit_offset; + long unsigned int abort_ip; + char __data[0]; +}; + +struct trace_event_data_offsets_rseq_update {}; + +struct trace_event_data_offsets_rseq_ip_fixup {}; + +typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); + +typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +struct trace_event_raw_mm_compaction_isolate_template { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int nr_scanned; + long unsigned int nr_taken; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_migratepages { + struct trace_entry ent; + long unsigned int nr_migrated; + long unsigned int nr_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_begin { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_end { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_try_to_compact_pages { + struct trace_entry ent; + int order; + long unsigned int gfp_mask; + int prio; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_suitable_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_defer_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + unsigned int considered; + unsigned int defer_shift; + int order_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_kcompactd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_kcompactd_wake_template { + struct trace_entry ent; + int nid; + int order; + enum zone_type highest_zoneidx; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_compaction_isolate_template {}; + +struct trace_event_data_offsets_mm_compaction_migratepages {}; + +struct trace_event_data_offsets_mm_compaction_begin {}; + +struct trace_event_data_offsets_mm_compaction_end {}; + +struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; + +struct trace_event_data_offsets_mm_compaction_suitable_template {}; + +struct trace_event_data_offsets_mm_compaction_defer_template {}; + +struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; + +struct trace_event_data_offsets_kcompactd_wake_template {}; + +typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_fast_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_migratepages)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_mm_compaction_begin)(void *, struct compact_control *, long unsigned int, long unsigned int, bool); + +typedef void (*btf_trace_mm_compaction_end)(void *, struct compact_control *, long unsigned int, long unsigned int, bool, int); + +typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); + +typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); + +typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); + +typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); + +typedef enum { + ISOLATE_ABORT = 0, + ISOLATE_NONE = 1, + ISOLATE_SUCCESS = 2, +} isolate_migrate_t; + +struct mpage_readpage_args { + struct bio *bio; + struct folio *folio; + unsigned int nr_pages; + bool is_readahead; + sector_t last_block_in_bio; + struct buffer_head map_bh; + long unsigned int first_logical_block; + get_block_t *get_block; +}; + +struct mpage_data { + struct bio *bio; + long: 32; + sector_t last_block_in_bio; + get_block_t *get_block; + long: 32; +}; + +struct epoll_event { + __poll_t events; + long: 32; + __u64 data; +}; + +struct epoll_params { + __u32 busy_poll_usecs; + __u16 busy_poll_budget; + __u8 prefer_busy_poll; + __u8 __pad; +}; + +struct epoll_filefd { + struct file *file; + int fd; +}; + +struct epitem; + +struct eppoll_entry { + struct eppoll_entry *next; + struct epitem *base; + wait_queue_entry_t wait; + wait_queue_head_t *whead; +}; + +struct eventpoll; + +struct epitem { + union { + struct rb_node rbn; + struct callback_head rcu; + }; + struct list_head rdllink; + struct epitem *next; + struct epoll_filefd ffd; + bool dying; + struct eppoll_entry *pwqlist; + struct eventpoll *ep; + struct hlist_node fllink; + struct wakeup_source *ws; + struct epoll_event event; +}; + +struct eventpoll { + struct mutex mtx; + wait_queue_head_t wq; + wait_queue_head_t poll_wait; + struct list_head rdllist; + rwlock_t lock; + struct rb_root_cached rbr; + struct epitem *ovflist; + struct wakeup_source *ws; + struct user_struct *user; + struct file *file; + u64 gen; + struct hlist_head refs; + refcount_t refcount; + unsigned int napi_id; + u32 busy_poll_usecs; + u16 busy_poll_budget; + bool prefer_busy_poll; + long: 32; +}; + +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + +struct epitems_head { + struct hlist_head epitems; + struct epitems_head *next; +}; + +enum { + attr_noop = 0, + attr_delayed_allocation_blocks = 1, + attr_session_write_kbytes = 2, + attr_lifetime_write_kbytes = 3, + attr_reserved_clusters = 4, + attr_sra_exceeded_retry_limit = 5, + attr_inode_readahead = 6, + attr_trigger_test_error = 7, + attr_first_error_time = 8, + attr_last_error_time = 9, + attr_clusters_in_group = 10, + attr_mb_order = 11, + attr_feature = 12, + attr_pointer_pi = 13, + attr_pointer_ui = 14, + attr_pointer_ul = 15, + attr_pointer_u64 = 16, + attr_pointer_u8 = 17, + attr_pointer_string = 18, + attr_pointer_atomic = 19, + attr_journal_task = 20, +}; + +enum { + ptr_explicit = 0, + ptr_ext4_sb_info_offset = 1, + ptr_ext4_super_block_offset = 2, +}; + +struct ext4_attr { + struct attribute attr; + short int attr_id; + short int attr_ptr; + short unsigned int attr_size; + union { + int offset; + void *explicit_ptr; + } u; +}; + +struct fuse_file_lock { + uint64_t start; + uint64_t end; + uint32_t type; + uint32_t pid; +}; + +struct fuse_open_in { + uint32_t flags; + uint32_t open_flags; +}; + +struct fuse_flush_in { + uint64_t fh; + uint32_t unused; + uint32_t padding; + uint64_t lock_owner; +}; + +struct fuse_read_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t read_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t write_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_out { + uint32_t size; + uint32_t padding; +}; + +struct fuse_fsync_in { + uint64_t fh; + uint32_t fsync_flags; + uint32_t padding; +}; + +struct fuse_lk_in { + uint64_t fh; + uint64_t owner; + struct fuse_file_lock lk; + uint32_t lk_flags; + uint32_t padding; +}; + +struct fuse_lk_out { + struct fuse_file_lock lk; +}; + +struct fuse_bmap_in { + uint64_t block; + uint32_t blocksize; + uint32_t padding; +}; + +struct fuse_bmap_out { + uint64_t block; +}; + +struct fuse_poll_in { + uint64_t fh; + uint64_t kh; + uint32_t flags; + uint32_t events; +}; + +struct fuse_poll_out { + uint32_t revents; + uint32_t padding; +}; + +struct fuse_fallocate_in { + uint64_t fh; + uint64_t offset; + uint64_t length; + uint32_t mode; + uint32_t padding; +}; + +struct fuse_lseek_in { + uint64_t fh; + uint64_t offset; + uint32_t whence; + uint32_t padding; +}; + +struct fuse_lseek_out { + uint64_t offset; +}; + +struct fuse_copy_file_range_in { + uint64_t fh_in; + uint64_t off_in; + uint64_t nodeid_out; + uint64_t fh_out; + uint64_t off_out; + uint64_t len; + uint64_t flags; +}; + +struct fuse_io_priv { + struct kref refcnt; + int async; + spinlock_t lock; + unsigned int reqs; + ssize_t bytes; + size_t size; + __u64 offset; + bool write; + bool should_dirty; + int err; + struct kiocb *iocb; + struct completion *done; + bool blocking; + long: 32; +}; + +struct fuse_io_args { + union { + struct { + struct fuse_read_in in; + u64 attr_ver; + } read; + struct { + struct fuse_write_in in; + struct fuse_write_out out; + bool folio_locked; + long: 32; + } write; + }; + struct fuse_args_pages ap; + struct fuse_io_priv *io; + struct fuse_file *ff; +}; + +struct fuse_writepage_args { + struct fuse_io_args ia; + struct rb_node writepages_entry; + struct list_head queue_entry; + struct fuse_writepage_args *next; + struct inode *inode; + struct fuse_sync_bucket *bucket; +}; + +struct fuse_fill_wb_data { + struct fuse_writepage_args *wpa; + struct fuse_file *ff; + struct inode *inode; + struct folio **orig_folios; + unsigned int max_folios; +}; + +struct scrub_sector_verification; + +struct scrub_stripe { + struct scrub_ctx *sctx; + struct btrfs_block_group *bg; + struct page *pages[16]; + struct scrub_sector_verification *sectors; + struct btrfs_device *dev; + u64 logical; + u64 physical; + u16 mirror_num; + u16 nr_sectors; + u16 nr_data_extents; + u16 nr_meta_extents; + atomic_t pending_io; + wait_queue_head_t io_wait; + wait_queue_head_t repair_wait; + long unsigned int state; + long unsigned int extent_sector_bitmap; + long unsigned int init_error_bitmap; + unsigned int init_nr_io_errors; + unsigned int init_nr_csum_errors; + unsigned int init_nr_meta_errors; + long unsigned int error_bitmap; + long unsigned int io_error_bitmap; + long unsigned int csum_error_bitmap; + long unsigned int meta_error_bitmap; + long unsigned int write_error_bitmap; + spinlock_t write_error_lock; + u8 *csums; + struct work_struct work; +}; + +struct scrub_ctx { + struct scrub_stripe stripes[128]; + struct scrub_stripe *raid56_data_stripes; + struct btrfs_fs_info *fs_info; + struct btrfs_path extent_path; + struct btrfs_path csum_path; + int first_free; + int cur_stripe; + atomic_t cancel_req; + int readonly; + ktime_t throttle_deadline; + u64 throttle_sent; + int is_dev_replace; + long: 32; + u64 write_pointer; + struct mutex wr_lock; + struct btrfs_device *wr_tgtdev; + struct btrfs_scrub_progress stat; + spinlock_t stat_lock; + refcount_t refs; +}; + +struct scrub_sector_verification { + bool is_metadata; + long: 32; + union { + u8 *csum; + u64 generation; + }; +}; + +enum scrub_stripe_flags { + SCRUB_STRIPE_FLAG_INITIALIZED = 0, + SCRUB_STRIPE_FLAG_REPAIR_DONE = 1, + SCRUB_STRIPE_FLAG_NO_REPORT = 2, +}; + +struct scrub_warning { + struct btrfs_path *path; + long: 32; + u64 extent_item_size; + const char *errstr; + long: 32; + u64 physical; + u64 logical; + struct btrfs_device *dev; + long: 32; +}; + +struct scomp_scratch { + spinlock_t lock; + void *src; + void *dst; +}; + +struct bio_alloc_cache { + struct bio *free_list; + struct bio *free_list_irq; + unsigned int nr; + unsigned int nr_irq; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async; + unsigned int last_cq_tail; + refcount_t refs; + atomic_t ops; + struct callback_head rcu; +}; + +enum { + IO_EVENTFD_OP_SIGNAL_BIT = 0, +}; + +struct io_ftrunc { + struct file *file; + long: 32; + loff_t len; +}; + +typedef U32 (*ZSTD_getAllMatchesFn)(ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, const BYTE *, const U32 *, const U32, const U32); + +typedef struct { + rawSeqStore_t seqStore; + U32 startPosInBlock; + U32 endPosInBlock; + U32 offset; +} ZSTD_optLdm_t; + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY = 0, + BACKLIGHT_UPDATE_SYSFS = 1, +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM = 2, + BACKLIGHT_FIRMWARE = 3, + BACKLIGHT_TYPE_MAX = 4, +}; + +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR = 1, + BACKLIGHT_SCALE_NON_LINEAR = 2, +}; + +struct backlight_device; + +struct backlight_ops { + unsigned int options; + int (*update_status)(struct backlight_device *); + int (*get_brightness)(struct backlight_device *); + bool (*controls_device)(struct backlight_device *, struct device *); +}; + +struct backlight_properties { + int brightness; + int max_brightness; + int power; + enum backlight_type type; + unsigned int state; + enum backlight_scale scale; +}; + +struct backlight_device { + struct backlight_properties props; + struct mutex update_lock; + struct mutex ops_lock; + const struct backlight_ops *ops; + struct notifier_block fb_notif; + struct list_head entry; + struct device dev; + bool fb_bl_on[32]; + int use_count; + long: 32; +}; + +struct virtio_admin_cmd { + __le16 opcode; + __le16 group_type; + long: 32; + __le64 group_member_id; + struct scatterlist *data_sg; + struct scatterlist *result_sg; + struct completion completion; + u32 result_sg_size; + int ret; +}; + +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + __le16 queue_notify_data; + __le16 queue_reset; + __le16 admin_queue_index; + __le16 admin_queue_num; +}; + +struct virtio_admin_cmd_hdr { + __le16 opcode; + __le16 group_type; + __u8 reserved1[12]; + __le64 group_member_id; +}; + +struct virtio_admin_cmd_status { + __le16 status; + __le16 status_qualifier; + __u8 reserved2[4]; +}; + +struct virtio_dev_parts_cap { + __u8 get_parts_resource_objects_limit; + __u8 set_parts_resource_objects_limit; +}; + +struct virtio_admin_cmd_query_cap_id_result { + __le64 supported_caps[1]; +}; + +struct virtio_admin_cmd_cap_get_data { + __le16 id; + __u8 reserved[6]; +}; + +struct virtio_admin_cmd_cap_set_data { + __le16 id; + __u8 reserved[6]; + __u8 cap_specific_data[0]; +}; + +struct virtio_admin_cmd_resource_obj_cmd_hdr { + __le16 type; + __u8 reserved[2]; + __le32 id; +}; + +struct virtio_admin_cmd_resource_obj_create_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __le64 flags; + __u8 resource_obj_specific_data[0]; +}; + +struct virtio_resource_obj_dev_parts { + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_admin_cmd_dev_parts_metadata_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_dev_part_hdr { + __le16 part_type; + __u8 flags; + __u8 reserved; + union { + struct { + __le32 offset; + __le32 reserved; + } pci_common_cfg; + struct { + __le16 index; + __u8 reserved[6]; + } vq_index; + } selector; + __le32 length; +}; + +struct virtio_admin_cmd_dev_parts_metadata_result { + union { + struct { + __le32 size; + __le32 reserved; + } parts_size; + struct { + __le32 count; + __le32 reserved; + } hdr_list_count; + struct { + __le32 count; + __le32 reserved; + struct virtio_dev_part_hdr hdrs[0]; + } hdr_list; + }; +}; + +struct virtio_admin_cmd_dev_parts_get_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; + struct virtio_dev_part_hdr hdr_list[0]; +}; + +struct virtio_admin_cmd_dev_mode_set_data { + __u8 flags; +}; + +struct pericom8250 { + void *virt; + unsigned int nr; + int line[0]; +}; + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[0]; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED = 1, + PCE_STATUS_PREPARED = 2, + PCE_STATUS_ENABLED = 3, + PCE_STATUS_ERROR = 4, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; + bool enabled_when_prepared; +}; + +enum scsi_timeouts { + SCSI_DEFAULT_EH_TIMEOUT = 10000, +}; + +struct async_scan_data { + struct list_head list; + struct Scsi_Host *shost; + struct completion prev_finished; +}; + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +struct iwl_fw_ini_hcmd { + u8 id; + u8 group; + __le16 reserved; + u8 data[0]; +}; + +struct iwl_fw_ini_hcmd_tlv { + struct iwl_fw_ini_header hdr; + __le32 time_point; + __le32 period_msec; + struct iwl_fw_ini_hcmd hcmd; +}; + +struct iwl_fw_ini_addr_val { + __le32 address; + __le32 value; +}; + +struct iwl_fw_ini_conf_set_tlv { + struct iwl_fw_ini_header hdr; + __le32 time_point; + __le32 set_type; + __le32 addr_offset; + struct iwl_fw_ini_addr_val addr_val[0]; +}; + +enum iwl_fw_ini_config_set_type { + IWL_FW_INI_CONFIG_SET_TYPE_INVALID = 0, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC = 1, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY = 2, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX = 3, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY = 4, + IWL_FW_INI_CONFIG_SET_TYPE_CSR = 5, + IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR = 6, + IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM = 7, + IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM = 8, +} __attribute__((mode(byte))); + +struct iwl_buf_alloc_frag { + __le64 addr; + __le32 size; +}; + +struct iwl_buf_alloc_cmd { + __le32 alloc_id; + __le32 buf_location; + __le32 num_frags; + struct iwl_buf_alloc_frag frags[16]; +}; + +struct iwl_dram_info { + __le32 first_word; + __le32 second_word; + struct iwl_buf_alloc_cmd dram_frags[4]; +}; + +struct iwl_dbgc1_info { + __le32 first_word; + __le32 dbgc1_add_lsb; + __le32 dbgc1_add_msb; + __le32 dbgc1_size; +}; + +enum iwl_dbg_tlv_type { + IWL_DBG_TLV_TYPE_DEBUG_INFO = 0, + IWL_DBG_TLV_TYPE_BUF_ALLOC = 1, + IWL_DBG_TLV_TYPE_HCMD = 2, + IWL_DBG_TLV_TYPE_REGION = 3, + IWL_DBG_TLV_TYPE_TRIGGER = 4, + IWL_DBG_TLV_TYPE_CONF_SET = 5, + IWL_DBG_TLV_TYPE_NUM = 6, +}; + +struct iwl_dbg_tlv_ver_data { + int min_ver; + int max_ver; +}; + +struct iwl_dbg_tlv_timer_node { + struct list_head list; + struct timer_list timer; + struct iwl_fw_runtime *fwrt; + struct iwl_ucode_tlv *tlv; +}; + +struct iwl_nvm_access_cmd { + u8 op_code; + u8 target; + __le16 type; + __le16 offset; + __le16 length; + u8 data[0]; +}; + +struct iwl_nvm_access_resp { + __le16 offset; + __le16 length; + __le16 type; + __le16 status; + u8 data[0]; +}; + +struct iwl_mcc_update_cmd { + __le16 mcc; + u8 source_id; + u8 reserved; + __le32 key; + u8 reserved2[20]; +}; + +struct iwl_mcc_update_resp_v3 { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le16 time; + __le16 geo_info; + __le32 n_channels; + __le32 channels[0]; +}; + +struct iwl_mcc_update_resp_v4 { + __le32 status; + __le16 mcc; + __le16 cap; + __le16 time; + __le16 geo_info; + u8 source_id; + u8 reserved[3]; + __le32 n_channels; + __le32 channels[0]; +}; + +struct iwl_mcc_chub_notif { + __le16 mcc; + u8 source_id; + u8 reserved1; +}; + +enum { + READ_NVM_CHUNK_SUCCEED = 0, + READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1, +}; + +struct iwl_time_sync_cfg_cmd { + __le32 protocols; + u8 peer_addr[6]; + u8 reserved[2]; +}; + +struct iwl_time_msmt_ptp_ctx { + union { + struct { + u8 element_id; + u8 length; + __le16 reserved; + u8 data[128]; + } ftm; + struct { + u8 element_id; + u8 length; + u8 data[128]; + } tm; + }; +}; + +struct iwl_time_msmt_notify { + u8 peer_addr[6]; + u8 reserved[2]; + __le32 dialog_token; + __le32 followup_dialog_token; + __le32 t1_hi; + __le32 t1_lo; + __le32 t1_max_err; + __le32 t4_hi; + __le32 t4_lo; + __le32 t4_max_err; + __le32 t2_hi; + __le32 t2_lo; + __le32 t2_max_err; + __le32 t3_hi; + __le32 t3_lo; + __le32 t3_max_err; + struct iwl_time_msmt_ptp_ctx ptp; +}; + +struct iwl_time_msmt_cfm_notify { + u8 peer_addr[6]; + u8 reserved[2]; + __le32 dialog_token; + __le32 t1_hi; + __le32 t1_lo; + __le32 t1_max_err; + __le32 t4_hi; + __le32 t4_lo; + __le32 t4_max_err; +}; + +struct iommu_fault_page_request { + u32 flags; + u32 pasid; + u32 grpid; + u32 perm; + u64 addr; + u64 private_data[2]; +}; + +struct iommu_fault { + u32 type; + long: 32; + struct iommu_fault_page_request prm; +}; + +struct iopf_fault { + struct iommu_fault fault; + struct list_head list; +}; + +struct iommu_attach_handle; + +struct iommu_fault_param; + +struct iopf_group { + struct iopf_fault last_fault; + struct list_head faults; + size_t fault_count; + struct list_head pending_node; + struct work_struct work; + struct iommu_attach_handle *attach_handle; + struct iommu_fault_param *fault_param; + struct list_head node; + u32 cookie; +}; + +struct iommu_fault_param {}; + +struct iommu_domain; + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); + +struct iommu_domain_ops; + +struct iommu_domain_geometry { + dma_addr_t aperture_start; + dma_addr_t aperture_end; + bool force_aperture; +}; + +struct iommu_dma_cookie; + +struct iommu_dirty_ops; + +struct iommu_ops; + +struct iommu_domain { + unsigned int type; + const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + const struct iommu_ops *owner; + long unsigned int pgsize_bitmap; + struct iommu_domain_geometry geometry; + struct iommu_dma_cookie *iova_cookie; + int (*iopf_handler)(struct iopf_group *); + void *fault_data; + union { + struct { + iommu_fault_handler_t handler; + void *handler_token; + }; + struct { + struct mm_struct *mm; + int users; + struct list_head next; + }; + }; +}; + +struct iommu_dirty_ops {}; + +struct iommu_ops {}; + +enum xhci_overhead_type { + LS_OVERHEAD_TYPE = 0, + FS_OVERHEAD_TYPE = 1, + HS_OVERHEAD_TYPE = 2, +}; + +struct md5_state { + u32 hash[4]; + u32 block[16]; + u64 byte_count; +}; + +struct convert_context { + struct completion restart; + struct bio *bio_in; + struct bvec_iter iter_in; + struct bio *bio_out; + struct bvec_iter iter_out; + atomic_t cc_pending; + long: 32; + u64 cc_sector; + union { + struct skcipher_request *req; + struct aead_request *req_aead; + } r; + bool aead_recheck; + bool aead_failed; +}; + +struct crypt_config; + +struct dm_crypt_io { + struct crypt_config *cc; + struct bio *base_bio; + u8 *integrity_metadata; + bool integrity_metadata_from_pool: 1; + struct work_struct work; + struct convert_context ctx; + atomic_t io_pending; + blk_status_t error; + sector_t sector; + struct bvec_iter saved_bi_iter; + struct rb_node rb_node; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct iv_benbi_private { + int shift; +}; + +struct iv_lmk_private { + struct crypto_shash *hash_tfm; + u8 *seed; +}; + +struct iv_tcw_private { + struct crypto_shash *crc32_tfm; + u8 *iv_seed; + u8 *whitening; +}; + +struct iv_elephant_private { + struct crypto_skcipher *tfm; +}; + +struct crypt_iv_operations; + +struct crypt_config { + struct dm_dev *dev; + long: 32; + sector_t start; + struct percpu_counter n_allocated_pages; + struct workqueue_struct *io_queue; + struct workqueue_struct *crypt_queue; + spinlock_t write_thread_lock; + struct task_struct *write_thread; + struct rb_root write_tree; + char *cipher_string; + char *cipher_auth; + char *key_string; + const struct crypt_iv_operations *iv_gen_ops; + union { + struct iv_benbi_private benbi; + struct iv_lmk_private lmk; + struct iv_tcw_private tcw; + struct iv_elephant_private elephant; + } iv_gen_private; + u64 iv_offset; + unsigned int iv_size; + short unsigned int sector_size; + unsigned char sector_shift; + union { + struct crypto_skcipher **tfms; + struct crypto_aead **tfms_aead; + } cipher_tfm; + unsigned int tfms_count; + int workqueue_id; + long unsigned int cipher_flags; + unsigned int dmreq_start; + unsigned int per_bio_data_size; + long unsigned int flags; + unsigned int key_size; + unsigned int key_parts; + unsigned int key_extra_size; + unsigned int key_mac_size; + unsigned int integrity_tag_size; + unsigned int integrity_iv_size; + unsigned int used_tag_size; + unsigned int tuple_size; + unsigned int tag_pool_max_sectors; + mempool_t tag_pool; + mempool_t req_pool; + mempool_t page_pool; + struct bio_set bs; + struct mutex bio_alloc_lock; + u8 *authenc_key; + u8 key[0]; +}; + +struct dm_crypt_request { + struct convert_context *ctx; + struct scatterlist sg_in[4]; + struct scatterlist sg_out[4]; + long: 32; + u64 iv_sector; +}; + +struct crypt_iv_operations { + int (*ctr)(struct crypt_config *, struct dm_target *, const char *); + void (*dtr)(struct crypt_config *); + int (*init)(struct crypt_config *); + int (*wipe)(struct crypt_config *); + int (*generator)(struct crypt_config *, u8 *, struct dm_crypt_request *); + int (*post)(struct crypt_config *, u8 *, struct dm_crypt_request *); +}; + +enum flags { + DM_CRYPT_SUSPENDED = 0, + DM_CRYPT_KEY_VALID = 1, + DM_CRYPT_SAME_CPU = 2, + DM_CRYPT_HIGH_PRIORITY = 3, + DM_CRYPT_NO_OFFLOAD = 4, + DM_CRYPT_NO_READ_WORKQUEUE = 5, + DM_CRYPT_NO_WRITE_WORKQUEUE = 6, + DM_CRYPT_WRITE_INLINE = 7, +}; + +enum cipher_flags { + CRYPT_MODE_INTEGRITY_AEAD = 0, + CRYPT_IV_LARGE_SECTORS = 1, + CRYPT_ENCRYPT_PREPROCESS = 2, + CRYPT_KEY_MAC_SIZE_SET = 3, +}; + +enum { + NETNSA_NONE = 0, + NETNSA_NSID = 1, + NETNSA_PID = 2, + NETNSA_FD = 3, + NETNSA_TARGET_NSID = 4, + NETNSA_CURRENT_NSID = 5, + __NETNSA_MAX = 6, +}; + +struct net_fill_args { + u32 portid; + u32 seq; + int flags; + int cmd; + int nsid; + bool add_ref; + int ref_nsid; +}; + +struct rtnl_net_dump_cb { + struct net *tgt_net; + struct net *ref_net; + struct sk_buff *skb; + struct net_fill_args fillargs; + int idx; + int s_idx; +}; + +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN = 0, + NETDEV_LAG_TX_TYPE_RANDOM = 1, + NETDEV_LAG_TX_TYPE_BROADCAST = 2, + NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4, + NETDEV_LAG_TX_TYPE_HASH = 5, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE = 0, + NETDEV_LAG_HASH_L2 = 1, + NETDEV_LAG_HASH_L34 = 2, + NETDEV_LAG_HASH_L23 = 3, + NETDEV_LAG_HASH_E23 = 4, + NETDEV_LAG_HASH_E34 = 5, + NETDEV_LAG_HASH_VLAN_SRCMAC = 6, + NETDEV_LAG_HASH_UNKNOWN = 7, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +enum { + ETHTOOL_A_PROFILE_UNSPEC = 0, + ETHTOOL_A_PROFILE_IRQ_MODERATION = 1, + __ETHTOOL_A_PROFILE_CNT = 2, + ETHTOOL_A_PROFILE_MAX = 1, +}; + +enum { + ETHTOOL_A_IRQ_MODERATION_UNSPEC = 0, + ETHTOOL_A_IRQ_MODERATION_USEC = 1, + ETHTOOL_A_IRQ_MODERATION_PKTS = 2, + ETHTOOL_A_IRQ_MODERATION_COMPS = 3, + __ETHTOOL_A_IRQ_MODERATION_CNT = 4, + ETHTOOL_A_IRQ_MODERATION_MAX = 3, +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + struct kernel_ethtool_coalesce kernel_coalesce; + u32 supported_params; +}; + +struct nf_conntrack_net { + atomic_t count; + unsigned int expect_count; + unsigned int users4; + unsigned int users6; + unsigned int users_bridge; + struct ctl_table_header *sysctl_header; +}; + +struct ct_iter_state { + struct seq_net_private p; + struct hlist_nulls_head *hash; + unsigned int htable_size; + unsigned int bucket; + u_int64_t time_now; +}; + +enum nf_ct_sysctl_index { + NF_SYSCTL_CT_MAX = 0, + NF_SYSCTL_CT_COUNT = 1, + NF_SYSCTL_CT_BUCKETS = 2, + NF_SYSCTL_CT_CHECKSUM = 3, + NF_SYSCTL_CT_LOG_INVALID = 4, + NF_SYSCTL_CT_EXPECT_MAX = 5, + NF_SYSCTL_CT_ACCT = 6, + NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC = 7, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT = 8, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV = 9, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED = 10, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT = 11, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT = 12, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK = 13, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT = 14, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE = 15, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS = 16, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK = 17, + NF_SYSCTL_CT_PROTO_TCP_LOOSE = 18, + NF_SYSCTL_CT_PROTO_TCP_LIBERAL = 19, + NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST = 20, + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS = 21, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP = 22, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM = 23, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP = 24, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6 = 25, + NF_SYSCTL_CT_LAST_SYSCTL = 26, +}; + +enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = 1, + NFT_RULE_COMPAT_F_INV = 2, + NFT_RULE_COMPAT_F_MASK = 2, +}; + +enum nft_rule_compat_attributes { + NFTA_RULE_COMPAT_UNSPEC = 0, + NFTA_RULE_COMPAT_PROTO = 1, + NFTA_RULE_COMPAT_FLAGS = 2, + __NFTA_RULE_COMPAT_MAX = 3, +}; + +enum nft_target_attributes { + NFTA_TARGET_UNSPEC = 0, + NFTA_TARGET_NAME = 1, + NFTA_TARGET_REV = 2, + NFTA_TARGET_INFO = 3, + __NFTA_TARGET_MAX = 4, +}; + +enum nft_match_attributes { + NFTA_MATCH_UNSPEC = 0, + NFTA_MATCH_NAME = 1, + NFTA_MATCH_REV = 2, + NFTA_MATCH_INFO = 3, + __NFTA_MATCH_MAX = 4, +}; + +enum { + NFNL_MSG_COMPAT_GET = 0, + NFNL_MSG_COMPAT_MAX = 1, +}; + +enum { + NFTA_COMPAT_UNSPEC = 0, + NFTA_COMPAT_NAME = 1, + NFTA_COMPAT_REV = 2, + NFTA_COMPAT_TYPE = 3, + __NFTA_COMPAT_MAX = 4, +}; + +struct ip6t_entry { + struct ip6t_ip6 ipv6; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + long: 32; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ebt_entry { + unsigned int bitmask; + unsigned int invflags; + __be16 ethproto; + char in[16]; + char logical_in[16]; + char out[16]; + char logical_out[16]; + unsigned char sourcemac[6]; + unsigned char sourcemsk[6]; + unsigned char destmac[6]; + unsigned char destmsk[6]; + union { + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + }; + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + } offsets; + }; + unsigned char elems[0]; +}; + +struct arpt_devaddr_info { + char addr[16]; + char mask[16]; +}; + +struct arpt_arp { + struct in_addr src; + struct in_addr tgt; + struct in_addr smsk; + struct in_addr tmsk; + __u8 arhln; + __u8 arhln_mask; + struct arpt_devaddr_info src_devaddr; + struct arpt_devaddr_info tgt_devaddr; + __be16 arpop; + __be16 arpop_mask; + __be16 arhrd; + __be16 arhrd_mask; + __be16 arpro; + __be16 arpro_mask; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u8 flags; + __u16 invflags; +}; + +struct arpt_entry { + struct arpt_arp arp; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + long: 32; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct nft_xt_match_priv { + void *info; +}; + +union nft_entry { + struct ipt_entry e4; + struct ip6t_entry e6; + struct ebt_entry ebt; + struct arpt_entry arp; +}; + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; +}; + +struct ioam6_hdr { + __u8 opt_type; + __u8 opt_len; + char: 8; + __u8 type; +}; + +enum ieee80211_radiotap_tx_flags { + IEEE80211_RADIOTAP_F_TX_FAIL = 1, + IEEE80211_RADIOTAP_F_TX_CTS = 2, + IEEE80211_RADIOTAP_F_TX_RTS = 4, + IEEE80211_RADIOTAP_F_TX_NOACK = 8, + IEEE80211_RADIOTAP_F_TX_NOSEQNO = 16, + IEEE80211_RADIOTAP_F_TX_ORDER = 32, +}; + +enum mac80211_tx_status_flags { + IEEE80211_TX_STATUS_ACK_SIGNAL_VALID = 1, +}; + +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; + +struct cpuidle_ops { + int (*suspend)(long unsigned int); + int (*init)(struct device_node *, int); +}; + +struct of_cpuidle_method { + const char *method; + const struct cpuidle_ops *ops; +}; + +struct arm_dma_alloc_args { + struct device *dev; + size_t size; + gfp_t gfp; + pgprot_t prot; + const void *caller; + bool want_vaddr; + int coherent_flag; +}; + +struct arm_dma_free_args { + struct device *dev; + size_t size; + void *cpu_addr; + struct page *page; + bool want_vaddr; +}; + +struct arm_dma_allocator { + void * (*alloc)(struct arm_dma_alloc_args *, struct page **); + void (*free)(struct arm_dma_free_args *); +}; + +struct arm_dma_buffer { + struct list_head list; + void *virt; + struct arm_dma_allocator *allocator; +}; + +struct cma; + +enum arm_smccc_conduit { + SMCCC_CONDUIT_NONE = 0, + SMCCC_CONDUIT_SMC = 1, + SMCCC_CONDUIT_HVC = 2, +}; + +struct arm_smccc_res { + long unsigned int a0; + long unsigned int a1; + long unsigned int a2; + long unsigned int a3; +}; + +struct platform_suspend_ops { + int (*valid)(suspend_state_t); + int (*begin)(suspend_state_t); + int (*prepare)(); + int (*prepare_late)(); + int (*enter)(suspend_state_t); + void (*wake)(); + void (*finish)(); + bool (*suspend_again)(); + void (*end)(); + void (*recover)(); +}; + +struct platform_s2idle_ops { + int (*begin)(); + int (*prepare)(); + int (*prepare_late)(); + void (*check)(); + bool (*wake)(); + void (*restore_early)(); + void (*restore)(); + void (*end)(); +}; + +struct module_sect_attr { + struct bin_attribute battr; + long unsigned int address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[0]; +}; + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[0]; +}; + +struct clock_read_data { + u64 epoch_ns; + u64 epoch_cyc; + u64 sched_clock_mask; + u64 (*read_sched_clock)(); + u32 mult; + u32 shift; + long: 32; +}; + +struct clock_data { + seqcount_latch_t seq; + long: 32; + struct clock_read_data read_data[2]; + ktime_t wrap_kt; + long unsigned int rate; + u64 (*actual_read_sched_clock)(); +}; + +enum pidcg_event { + PIDCG_MAX = 0, + PIDCG_FORKFAIL = 1, + NR_PIDCG_EVENTS = 2, +}; + +struct pids_cgroup { + struct cgroup_subsys_state css; + atomic64_t counter; + atomic64_t limit; + int64_t watermark; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + atomic64_t events[2]; + atomic64_t events_local[2]; +}; + +struct saved_cmdlines_buffer { + unsigned int map_pid_to_cmdline[32769]; + unsigned int *map_cmdline_to_pid; + unsigned int cmdline_num; + int cmdline_idx; + char saved_cmdlines[0]; +}; + +struct pcpu_group_info { + int nr_units; + long unsigned int base_offset; + unsigned int *cpu_map; +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[0]; +}; + +typedef int pcpu_fc_cpu_to_node_fn_t(int); + +typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); + +struct trace_event_raw_percpu_alloc_percpu { + struct trace_entry ent; + long unsigned int call_site; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + void *base_addr; + int off; + void *ptr; + size_t bytes_alloc; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_percpu_free_percpu { + struct trace_entry ent; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu_fail { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + char __data[0]; +}; + +struct trace_event_raw_percpu_create_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_raw_percpu_destroy_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_data_offsets_percpu_alloc_percpu {}; + +struct trace_event_data_offsets_percpu_free_percpu {}; + +struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; + +struct trace_event_data_offsets_percpu_create_chunk {}; + +struct trace_event_data_offsets_percpu_destroy_chunk {}; + +typedef void (*btf_trace_percpu_alloc_percpu)(void *, long unsigned int, bool, bool, size_t, size_t, void *, int, void *, size_t, gfp_t); + +typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *); + +typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); + +typedef void (*btf_trace_percpu_create_chunk)(void *, void *); + +typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); + +struct pcpu_block_md { + int scan_hint; + int scan_hint_start; + int contig_hint; + int contig_hint_start; + int left_free; + int right_free; + int first_free; + int nr_bits; +}; + +struct pcpuobj_ext { + struct obj_cgroup *cgroup; +}; + +struct pcpu_chunk { + struct list_head list; + int free_bytes; + struct pcpu_block_md chunk_md; + long unsigned int *bound_map; + long: 32; + long: 32; + long: 32; + long: 32; + void *base_addr; + long unsigned int *alloc_map; + struct pcpu_block_md *md_blocks; + void *data; + bool immutable; + bool isolated; + int start_offset; + int end_offset; + struct pcpuobj_ext *obj_exts; + int nr_pages; + int nr_populated; + int nr_empty_pop_pages; + long unsigned int populated[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct fd_data { + fmode_t mode; + unsigned int fd; +}; + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); + void (*submit_io)(const struct iomap_iter *, struct bio *, loff_t); + struct bio_set *bio_set; +}; + +struct __fat_dirent { + long int d_ino; + __kernel_off_t d_off; + short unsigned int d_reclen; + char d_name[256]; +}; + +enum { + PARSE_INVALID = 1, + PARSE_NOT_LONGNAME = 2, + PARSE_EOF = 3, +}; + +struct fat_ioctl_filldir_callback { + struct dir_context ctx; + void *dirent; + int result; + const char *longname; + int long_len; + const char *shortname; + int short_len; +}; + +struct root_name_map { + u64 id; + const char *name; + long: 32; +}; + +struct gcm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; +}; + +struct crypto_gcm_ctx { + struct crypto_skcipher *ctr; + struct crypto_ahash *ghash; +}; + +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4106_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct aead_request subreq; +}; + +struct crypto_rfc4543_instance_ctx { + struct crypto_aead_spawn aead; +}; + +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + struct crypto_sync_skcipher *null; + u8 nonce[4]; +}; + +struct crypto_rfc4543_req_ctx { + struct aead_request subreq; +}; + +struct crypto_gcm_ghash_ctx { + unsigned int cryptlen; + struct scatterlist *src; + int (*complete)(struct aead_request *, u32); +}; + +struct crypto_gcm_req_priv_ctx { + u8 iv[16]; + u8 auth_tag[16]; + u8 iauth_tag[16]; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct scatterlist sg; + struct crypto_gcm_ghash_ctx ghash_ctx; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + } u; +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; + struct list_head queued[2]; + unsigned int nr_queued[2]; + struct rb_root_cached pending_tree; + unsigned int nr_pending; + long unsigned int first_pending_disptime; + struct timer_list pending_timer; +}; + +struct throtl_data { + struct throtl_service_queue service_queue; + struct request_queue *queue; + unsigned int nr_queued[2]; + unsigned int throtl_slice; + struct work_struct dispatch_work; + bool track_bio_latency; +}; + +struct throtl_grp; + +struct throtl_qnode { + struct list_head node; + struct bio_list bios; + struct throtl_grp *tg; +}; + +struct throtl_grp { + struct blkg_policy_data pd; + struct rb_node rb_node; + struct throtl_data *td; + struct throtl_service_queue service_queue; + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + long unsigned int disptime; + unsigned int flags; + bool has_rules_bps[2]; + bool has_rules_iops[2]; + uint64_t bps[2]; + unsigned int iops[2]; + uint64_t bytes_disp[2]; + unsigned int io_disp[2]; + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + long long int carryover_bytes[2]; + int carryover_ios[2]; + long unsigned int last_check_time; + long unsigned int slice_start[2]; + long unsigned int slice_end[2]; + long: 32; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1, + THROTL_TG_WAS_EMPTY = 2, + THROTL_TG_CANCELING = 4, +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +struct reciprocal_value_adv { + u32 m; + u8 sh; + u8 exp; + bool is_wide_m; +}; + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty = 0, + assoc_array_walk_found_terminal_node = 1, + assoc_array_walk_found_wrong_shortcut = 2, +}; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + long unsigned int sc_segments; + long unsigned int dissimilarity; + } wrong_shortcut; +}; + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +enum fw_opt { + FW_OPT_UEVENT = 1, + FW_OPT_NOWAIT = 2, + FW_OPT_USERHELPER = 4, + FW_OPT_NO_WARN = 8, + FW_OPT_NOCACHE = 16, + FW_OPT_NOFALLBACK_SYSFS = 32, + FW_OPT_FALLBACK_PLATFORM = 64, + FW_OPT_PARTIAL = 128, +}; + +enum fw_status { + FW_STATUS_UNKNOWN = 0, + FW_STATUS_LOADING = 1, + FW_STATUS_DONE = 2, + FW_STATUS_ABORTED = 3, +}; + +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct firmware_cache; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; + const char *fw_name; +}; + +struct firmware_cache { + spinlock_t lock; + struct list_head head; + int state; + spinlock_t name_lock; + struct list_head fw_names; + struct delayed_work work; + struct notifier_block pm_notify; +}; + +struct fw_cache_entry { + struct list_head list; + const char *name; +}; + +struct fw_name_devm { + long unsigned int magic; + const char *name; +}; + +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *, void *); + u32 opt_flags; +}; + +enum nvme_pr_type { + NVME_PR_WRITE_EXCLUSIVE = 1, + NVME_PR_EXCLUSIVE_ACCESS = 2, + NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +enum nvme_eds { + NVME_EXTENDED_DATA_STRUCT = 1, +}; + +struct nvme_registered_ctrl { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 hostid; + __le64 rkey; +}; + +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + struct nvme_registered_ctrl regctl_ds[0]; +}; + +struct nvme_registered_ctrl_ext { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 rkey; + __u8 hostid[16]; + __u8 rsvd32[32]; +}; + +struct nvme_reservation_status_ext { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + __u8 rsvd24[40]; + struct nvme_registered_ctrl_ext regctl_eds[0]; +}; + +struct nvmet_pr_register_data { + __le64 crkey; + __le64 nrkey; +}; + +struct nvmet_pr_acquire_data { + __le64 crkey; + __le64 prkey; +}; + +struct nvmet_pr_release_data { + __le64 crkey; +}; + +enum nvme_pr_register_action { + NVME_PR_REGISTER_ACT_REG = 0, + NVME_PR_REGISTER_ACT_UNREG = 1, + NVME_PR_REGISTER_ACT_REPLACE = 2, +}; + +enum nvme_pr_acquire_action { + NVME_PR_ACQUIRE_ACT_ACQUIRE = 0, + NVME_PR_ACQUIRE_ACT_PREEMPT = 1, + NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 2, +}; + +enum nvme_pr_release_action { + NVME_PR_RELEASE_ACT_RELEASE = 0, + NVME_PR_RELEASE_ACT_CLEAR = 1, +}; + +enum nvme_pr_change_ptpl { + NVME_PR_CPTPL_NO_CHANGE = 0, + NVME_PR_CPTPL_RESV = 1073741824, + NVME_PR_CPTPL_CLEARED = -2147483648, + NVME_PR_CPTPL_PERSIST = -1073741824, +}; + +enum flow_cls_command { + FLOW_CLS_REPLACE = 0, + FLOW_CLS_DESTROY = 1, + FLOW_CLS_STATS = 2, + FLOW_CLS_TMPLT_CREATE = 3, + FLOW_CLS_TMPLT_DESTROY = 4, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + bool skip_sw; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + bool use_act_stats; + long unsigned int cookie; + struct flow_rule *rule; + long: 32; + struct flow_stats stats; + u32 classid; + long: 32; +}; + +struct tc_query_caps_base { + enum tc_setup_type type; + void *caps; +}; + +struct tc_cbs_qopt_offload { + u8 enable; + s32 queue; + s32 hicredit; + s32 locredit; + s32 idleslope; + s32 sendslope; +}; + +struct tc_etf_qopt_offload { + u8 enable; + s32 queue; +}; + +struct tc_taprio_caps { + bool supports_queue_max_sdu: 1; + bool gate_mask_per_txq: 1; + bool broken_mqprio: 1; +}; + +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +enum igb_tx_flags { + IGB_TX_FLAGS_VLAN = 1, + IGB_TX_FLAGS_TSO = 2, + IGB_TX_FLAGS_TSTAMP = 4, + IGB_TX_FLAGS_IPV4 = 16, + IGB_TX_FLAGS_CSUM = 32, +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER = 0, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED = 1, + IGB_RING_FLAG_RX_SCTP_CSUM = 2, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP = 3, + IGB_RING_FLAG_TX_CTX_IDX = 4, + IGB_RING_FLAG_TX_DETECT_HANG = 5, +}; + +enum igb_boards { + board_82575 = 0, +}; + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY = 0, + QUEUE_MODE_STREAM_RESERVATION = 1, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH = 0, + TX_QUEUE_PRIO_LOW = 1, +}; + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +struct iwl_add_sta_resp { + u8 status; +}; + +struct iwl_rem_sta_resp { + u8 status; +}; + +struct iwl_rem_sta_cmd { + u8 num_sta; + u8 reserved[3]; + u8 addr[6]; + u8 reserved2[2]; +}; + +struct iwl_wep_cmd { + u8 num_keys; + u8 global_key_type; + u8 flags; + u8 reserved; + struct iwl_wep_key key[0]; +}; + +enum iwl_sec_key_flags { + IWL_SEC_KEY_FLAG_CIPHER_MASK = 7, + IWL_SEC_KEY_FLAG_CIPHER_WEP = 1, + IWL_SEC_KEY_FLAG_CIPHER_CCMP = 2, + IWL_SEC_KEY_FLAG_CIPHER_TKIP = 3, + IWL_SEC_KEY_FLAG_CIPHER_GCMP = 5, + IWL_SEC_KEY_FLAG_NO_TX = 8, + IWL_SEC_KEY_FLAG_KEY_SIZE = 16, + IWL_SEC_KEY_FLAG_MFP = 32, + IWL_SEC_KEY_FLAG_MCAST_KEY = 64, + IWL_SEC_KEY_FLAG_SPP_AMSDU = 128, +}; + +struct iwl_sec_key_cmd { + __le32 action; + union { + struct { + __le32 sta_mask; + __le32 key_id; + __le32 key_flags; + u8 key[32]; + u8 tkip_mic_rx_key[8]; + u8 tkip_mic_tx_key[8]; + __le64 rx_seq; + __le64 tx_seq; + } add; + struct { + __le32 old_sta_mask; + __le32 new_sta_mask; + __le32 key_id; + __le32 key_flags; + } modify; + struct { + __le32 sta_mask; + __le32 key_id; + __le32 key_flags; + } remove; + } u; +}; + +struct iwl_mvm_sta_key_update_data { + struct ieee80211_sta *sta; + u32 old_sta_mask; + u32 new_sta_mask; + int err; +}; + +enum ieee80211_min_mpdu_spacing { + IEEE80211_HT_MPDU_DENSITY_NONE = 0, + IEEE80211_HT_MPDU_DENSITY_0_25 = 1, + IEEE80211_HT_MPDU_DENSITY_0_5 = 2, + IEEE80211_HT_MPDU_DENSITY_1 = 3, + IEEE80211_HT_MPDU_DENSITY_2 = 4, + IEEE80211_HT_MPDU_DENSITY_4 = 5, + IEEE80211_HT_MPDU_DENSITY_8 = 6, + IEEE80211_HT_MPDU_DENSITY_16 = 7, +}; + +enum rtw_pwr_seq_cmd_delay_unit { + RTW_PWR_DELAY_US = 0, + RTW_PWR_DELAY_MS = 1, +}; + +enum rtw_intf_phy_cut { + RTW_INTF_PHY_CUT_A = 1, + RTW_INTF_PHY_CUT_B = 2, + RTW_INTF_PHY_CUT_C = 4, + RTW_INTF_PHY_CUT_D = 8, + RTW_INTF_PHY_CUT_E = 16, + RTW_INTF_PHY_CUT_F = 32, + RTW_INTF_PHY_CUT_G = 64, + RTW_INTF_PHY_CUT_ALL = 65535, +}; + +enum rtw_rfe_fem { + RTW_RFE_IFEM = 0, + RTW_RFE_EFEM = 1, + RTW_RFE_IFEM2G_EFEM5G = 2, + RTW_RFE_NUM = 3, +}; + +struct rtw8822bu_efuse { + u8 res4[4]; + u8 usb_optional_function; + u8 res5[30]; + u8 res6[2]; + u8 serial[11]; + u8 vid; + u8 res7; + u8 pid; + u8 res8[4]; + u8 mac_addr[6]; + u8 res9[2]; + u8 vendor_name[7]; + u8 res10[2]; + u8 device_name[20]; + u8 res11[207]; + u8 package_type; + u8 res12[4]; +}; + +struct rtw8822be_efuse { + u8 mac_addr[6]; + u8 vender_id[2]; + u8 device_id[2]; + u8 sub_vender_id[2]; + u8 sub_device_id[2]; + u8 pmc[2]; + u8 exp_device_cap[2]; + u8 msi_cap; + u8 ltr_cap; + u8 exp_link_control[2]; + u8 link_cap[4]; + u8 link_control[2]; + u8 serial_number[8]; + u8 res0: 2; + u8 ltr_en: 1; + u8 res1: 2; + u8 obff: 2; + char: 1; + u8 res2: 3; + u8 obff_cap: 2; + short: 3; + u8 res3: 4; + u8 res4[3]; + u8 class_code[3]; + u8 pci_pm_L1_2_supp: 1; + u8 pci_pm_L1_1_supp: 1; + u8 aspm_pm_L1_2_supp: 1; + u8 aspm_pm_L1_1_supp: 1; + u8 L1_pm_substates_supp: 1; + u8 res5: 3; + u8 port_common_mode_restore_time; + u8 port_t_power_on_scale: 2; + u8 res6: 1; + u8 port_t_power_on_value: 5; + u8 res7; +}; + +struct rtw8822bs_efuse { + u8 res4[74]; + u8 mac_addr[6]; +}; + +struct rtw8822b_efuse { + __le16 rtl_id; + u8 res0[4]; + u8 usb_mode; + u8 res1[9]; + struct rtw_txpwr_idx txpwr_idx_table[4]; + u8 channel_plan; + u8 xtal_k; + u8 thermal_meter; + u8 iqk_lck; + u8 pa_type; + u8 lna_type_2g[2]; + u8 lna_type_5g[2]; + u8 rf_board_option; + u8 rf_feature_option; + u8 rf_bt_setting; + u8 eeprom_version; + u8 eeprom_customer_id; + u8 tx_bb_swing_setting_2g; + u8 tx_bb_swing_setting_5g; + u8 tx_pwr_calibrate_rate; + u8 rf_antenna_option; + u8 rfe_option; + u8 country_code[2]; + u8 res[3]; + union { + struct rtw8822be_efuse e; + struct rtw8822bu_efuse u; + struct rtw8822bs_efuse s; + }; +}; + +enum { + CCUT_IDX_1R_2G = 0, + CCUT_IDX_2R_2G = 1, + CCUT_IDX_1R_5G = 2, + CCUT_IDX_2R_5G = 3, + CCUT_IDX_NR = 4, +}; + +struct cca_ccut { + u32 reg82c[4]; + u32 reg830[4]; + u32 reg838[4]; +}; + +struct rtw8822b_rfe_info { + const struct cca_ccut *cca_ccut_2g; + const struct cca_ccut *cca_ccut_5g; + enum rtw_rfe_fem fem; + bool ifem_ext; + void (*rtw_set_channel_rfe)(struct rtw_dev *, u8); +}; + +struct i2c_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT = 0, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, +}; + +struct i2c_driver { + unsigned int class; + int (*probe)(struct i2c_client *); + void (*remove)(struct i2c_client *); + void (*shutdown)(struct i2c_client *); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); + int (*command)(struct i2c_client *, unsigned int, void *); + struct device_driver driver; + const struct i2c_device_id *id_table; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const short unsigned int *address_list; + struct list_head clients; + u32 flags; +}; + +struct i2c_smbus_alert { + struct work_struct alert; + struct i2c_client *ara; +}; + +struct alert_data { + short unsigned int addr; + enum i2c_alert_protocol type; + unsigned int data; +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED = 0, +}; + +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + void *drv_data; + const struct attribute_group **attr_grp; + char **supplied_to; + size_t num_supplicants; + bool no_wakeup_source; +}; + +struct psy_am_i_supplied_data { + struct power_supply *psy; + unsigned int count; +}; + +struct psy_get_supplier_prop_data { + struct power_supply *psy; + enum power_supply_property psp; + union power_supply_propval *val; +}; + +struct simplefb_platform_data { + u32 width; + u32 height; + u32 stride; + const char *format; +}; + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; + __u32 frag_off; +}; + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +struct ts_state { + unsigned int offset; + char cb[48]; +}; + +struct ts_config; + +struct ts_ops { + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +struct ts_config { + struct ts_ops *ops; + int flags; + unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); + void (*finish)(struct ts_config *, struct ts_state *); +}; + +struct page_frag_1k { + void *va; + u16 offset; + bool pfmemalloc; +}; + +struct napi_alloc_cache { + local_lock_t bh_lock; + struct page_frag_cache page; + struct page_frag_1k page_small; + unsigned int skb_count; + void *skb_cache[64]; +}; + +struct skb_free_array { + unsigned int skb_count; + void *skb_array[16]; +}; + +typedef int (*sendmsg_func)(struct sock *, struct msghdr *); + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + short unsigned int overhead; + short int cell_align; + short unsigned int mpu; + __u32 rate; +}; + +struct tc_prio_qopt { + int bands; + __u8 priomap[16]; +}; + +struct skb_array { + struct ptr_ring ring; +}; + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u16 mpu; + u8 linklayer; + u8 shift; + long: 32; +}; + +struct psched_pktrate { + u64 rate_pkts_ps; + u32 mult; + u8 shift; +}; + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc **p_miniq; +}; + +struct pfifo_fast_priv { + struct skb_array q[3]; +}; + +enum ethtool_c33_pse_ext_state { + ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1, + ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID = 2, + ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE = 3, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED = 4, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM = 5, + ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED = 6, + ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE = 7, + ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE = 8, + ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED = 9, +}; + +enum ethtool_c33_pse_ext_substate_error_condition { + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON = 4, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS = 5, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF = 6, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN = 7, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE = 8, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP = 9, +}; + +enum ethtool_c33_pse_ext_substate_mr_pse_enable { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1, +}; + +enum ethtool_c33_pse_ext_substate_option_detect_ted { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR = 2, +}; + +enum ethtool_c33_pse_ext_substate_option_vport_lim { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION = 3, +}; + +enum ethtool_c33_pse_ext_substate_ovld_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1, +}; + +enum ethtool_c33_pse_ext_substate_power_not_available { + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT = 4, +}; + +enum ethtool_c33_pse_ext_substate_short_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1, +}; + +enum ethtool_c33_pse_admin_state { + ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_c33_pse_pw_d_status { + ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_C33_PSE_PW_D_STATUS_TEST = 5, + ETHTOOL_C33_PSE_PW_D_STATUS_FAULT = 6, + ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT = 7, +}; + +enum ethtool_podl_pse_admin_state { + ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_podl_pse_pw_d_status { + ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP = 5, + ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE = 6, + ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR = 7, +}; + +struct ethtool_c33_pse_ext_state_info { + enum ethtool_c33_pse_ext_state c33_pse_ext_state; + union { + enum ethtool_c33_pse_ext_substate_error_condition error_condition; + enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; + enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; + enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; + enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; + enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; + enum ethtool_c33_pse_ext_substate_short_detected short_detected; + u32 __c33_pse_ext_substate; + }; +}; + +struct ethtool_c33_pse_pw_limit_range { + u32 min; + u32 max; +}; + +struct pse_control_config { + enum ethtool_podl_pse_admin_state podl_admin_control; + enum ethtool_c33_pse_admin_state c33_admin_control; +}; + +struct pse_control_status { + enum ethtool_podl_pse_admin_state podl_admin_state; + enum ethtool_podl_pse_pw_d_status podl_pw_status; + enum ethtool_c33_pse_admin_state c33_admin_state; + enum ethtool_c33_pse_pw_d_status c33_pw_status; + u32 c33_pw_class; + u32 c33_actual_pw; + struct ethtool_c33_pse_ext_state_info c33_ext_state_info; + u32 c33_avail_pw_limit; + struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; + u32 c33_pw_limit_nb_ranges; +}; + +enum { + ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC = 0, + ETHTOOL_A_C33_PSE_PW_LIMIT_MIN = 1, + ETHTOOL_A_C33_PSE_PW_LIMIT_MAX = 2, +}; + +struct pse_reply_data { + struct ethnl_reply_data base; + struct pse_control_status status; +}; + +struct nft_jumpstack { + const struct nft_rule_dp *rule; +}; + +struct nft_offload_ethertype { + __be16 value; + __be16 mask; +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +struct pingfakehdr { + struct icmphdr icmph; + struct msghdr *msg; + sa_family_t family; + __wsum wcheck; +}; + +struct ping_table { + struct hlist_head hash[64]; + spinlock_t lock; +}; + +struct radiotap_align_size { + uint8_t align: 4; + uint8_t size: 4; +}; + +struct ieee80211_radiotap_namespace { + const struct radiotap_align_size *align_size; + int n_bits; + uint32_t oui; + uint8_t subns; +}; + +struct ieee80211_radiotap_vendor_namespaces { + const struct ieee80211_radiotap_namespace *ns; + int n_ns; +}; + +struct ieee80211_radiotap_iterator { + struct ieee80211_radiotap_header *_rtheader; + const struct ieee80211_radiotap_vendor_namespaces *_vns; + const struct ieee80211_radiotap_namespace *current_namespace; + unsigned char *_arg; + unsigned char *_next_ns_data; + __le32 *_next_bitmap; + unsigned char *this_arg; + int this_arg_index; + int this_arg_size; + int is_radiotap_ns; + int _max_length; + int _arg_index; + uint32_t _bitmap_shifter; + int _reset_on_ext; +}; + +enum nl80211_bss_cannot_use_reasons { + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1, + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 2, +}; + +struct ieee80211_tbtt_info_7_8_9 { + u8 tbtt_offset; + u8 bssid[6]; + u8 bss_params; + s8 psd_20; +}; + +enum cfg80211_bss_frame_type { + CFG80211_BSS_FTYPE_UNKNOWN = 0, + CFG80211_BSS_FTYPE_BEACON = 1, + CFG80211_BSS_FTYPE_PRESP = 2, + CFG80211_BSS_FTYPE_S1G_BEACON = 3, +}; + +struct cfg80211_colocated_ap { + struct list_head list; + u8 bssid[6]; + u8 ssid[32]; + size_t ssid_len; + u32 short_ssid; + u32 center_freq; + u8 unsolicited_probe: 1; + u8 oct_recommended: 1; + u8 same_ssid: 1; + u8 multi_bss: 1; + u8 transmitted_bssid: 1; + u8 colocated_ess: 1; + u8 short_ssid_valid: 1; + s8 psd_20; +}; + +struct colocated_ap_data { + const struct element *ssid_elem; + struct list_head ap_list; + u32 s_ssid_tmp; + int n_coloc; +}; + +enum bss_compare_mode { + BSS_CMP_REGULAR = 0, + BSS_CMP_HIDE_ZLEN = 1, + BSS_CMP_HIDE_NUL = 2, +}; + +struct cfg80211_inform_single_bss_data { + struct cfg80211_inform_bss *drv_data; + enum cfg80211_bss_frame_type ftype; + struct ieee80211_channel *channel; + u8 bssid[6]; + long: 32; + u64 tsf; + u16 capability; + u16 beacon_interval; + const u8 *ie; + size_t ielen; + enum bss_source_type bss_source; + struct cfg80211_bss *source_bss; + u8 max_bssid_indicator; + u8 bssid_index; + u8 use_for; + u64 cannot_use_reasons; +}; + +struct cfg80211_mle { + struct ieee80211_multi_link_elem *mle; + struct ieee80211_mle_per_sta_profile *sta_prof[15]; + ssize_t sta_prof_len[15]; + u8 data[0]; +}; + +struct tbtt_info_iter_data { + const struct ieee80211_neighbor_ap_info *ap_info; + u8 param_ch_count; + u32 use_for; + u8 mld_id; + u8 link_id; + bool non_tx; +}; + +struct arm_smccc_quirk { + int id; + union { + long unsigned int a6; + } state; +}; + +struct async_entry { + struct list_head domain_list; + struct list_head global_list; + struct work_struct work; + async_cookie_t cookie; + async_func_t func; + void *data; + struct async_domain *domain; + long: 32; +}; + +enum { + GP_IDLE = 0, + GP_ENTER = 1, + GP_PASSED = 2, + GP_EXIT = 3, + GP_REPLAY = 4, +}; + +struct rcu_gp_oldstate { + long unsigned int rgos_norm; + long unsigned int rgos_exp; +}; + +struct rcu_exp_work { + long unsigned int rew_s; + struct kthread_work rew_work; +}; + +struct rcu_node { + raw_spinlock_t lock; + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + long unsigned int completedqs; + long unsigned int qsmask; + long unsigned int rcu_gp_init_mask; + long unsigned int qsmaskinit; + long unsigned int qsmaskinitnext; + long unsigned int expmask; + long unsigned int expmaskinit; + long unsigned int expmaskinitnext; + struct kthread_worker *exp_kworker; + long unsigned int cbovldmask; + long unsigned int ffmask; + long unsigned int grpmask; + int grplo; + int grphi; + u8 grpnum; + u8 level; + bool wait_blkd_tasks; + struct rcu_node *parent; + struct list_head blkd_tasks; + struct list_head *gp_tasks; + struct list_head *exp_tasks; + struct list_head *boost_tasks; + struct rt_mutex boost_mtx; + long unsigned int boost_time; + struct mutex kthread_mutex; + struct task_struct *boost_kthread_task; + unsigned int boost_kthread_status; + long unsigned int n_boosts; + struct swait_queue_head nocb_gp_wq[2]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t fqslock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t exp_lock; + long unsigned int exp_seq_rq; + wait_queue_head_t exp_wq[4]; + struct rcu_exp_work rew; + bool exp_need_flush; + raw_spinlock_t exp_poll_lock; + long unsigned int exp_seq_poll_rq; + struct work_struct exp_poll_wq; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +union rcu_noqs { + struct { + u8 norm; + u8 exp; + } b; + u16 s; +}; + +struct rcu_snap_record { + long unsigned int gp_seq; + long: 32; + u64 cputime_irq; + u64 cputime_softirq; + u64 cputime_system; + long unsigned int nr_hardirqs; + unsigned int nr_softirqs; + long long unsigned int nr_csw; + long unsigned int jiffies; + long: 32; +}; + +struct rcu_data { + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + union rcu_noqs cpu_no_qs; + bool core_needs_qs; + bool beenonline; + bool gpwrap; + bool cpu_started; + struct rcu_node *mynode; + long unsigned int grpmask; + long unsigned int ticks_this_gp; + struct irq_work defer_qs_iw; + bool defer_qs_iw_pending; + struct work_struct strict_work; + struct rcu_segcblist cblist; + long int qlen_last_fqs_check; + long unsigned int n_cbs_invoked; + long unsigned int n_force_qs_snap; + long int blimit; + int watching_snap; + bool rcu_need_heavy_qs; + bool rcu_urgent_qs; + bool rcu_forced_tick; + bool rcu_forced_tick_exp; + long unsigned int barrier_seq_snap; + struct callback_head barrier_head; + int exp_watching_snap; + struct swait_queue_head nocb_cb_wq; + struct swait_queue_head nocb_state_wq; + struct task_struct *nocb_gp_kthread; + raw_spinlock_t nocb_lock; + int nocb_defer_wakeup; + struct timer_list nocb_timer; + long unsigned int nocb_gp_adv_time; + struct mutex nocb_gp_kthread_mutex; + long: 32; + long: 32; + long: 32; + raw_spinlock_t nocb_bypass_lock; + struct rcu_cblist nocb_bypass; + long unsigned int nocb_bypass_first; + long unsigned int nocb_nobypass_last; + int nocb_nobypass_count; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t nocb_gp_lock; + u8 nocb_gp_sleep; + u8 nocb_gp_bypass; + u8 nocb_gp_gp; + long unsigned int nocb_gp_seq; + long unsigned int nocb_gp_loops; + struct swait_queue_head nocb_gp_wq; + bool nocb_cb_sleep; + struct task_struct *nocb_cb_kthread; + struct list_head nocb_head_rdp; + struct list_head nocb_entry_rdp; + struct rcu_data *nocb_toggling_rdp; + long: 32; + long: 32; + struct rcu_data *nocb_gp_rdp; + struct task_struct *rcu_cpu_kthread_task; + unsigned int rcu_cpu_kthread_status; + char rcu_cpu_has_work; + long unsigned int rcuc_activity; + unsigned int softirq_snap; + struct irq_work rcu_iw; + bool rcu_iw_pending; + long unsigned int rcu_iw_gp_seq; + long unsigned int rcu_ofl_gp_seq; + short int rcu_ofl_gp_state; + long unsigned int rcu_onl_gp_seq; + short int rcu_onl_gp_state; + long unsigned int last_fqs_resched; + long unsigned int last_sched_clock; + struct rcu_snap_record snap_record; + long int lazy_len; + int cpu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sr_wait_node { + atomic_t inuse; + struct llist_node node; +}; + +struct rcu_state { + struct rcu_node node[3]; + struct rcu_node *level[3]; + int ncpus; + int n_online_cpus; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int gp_seq; + long unsigned int gp_max; + struct task_struct *gp_kthread; + struct swait_queue_head gp_wq; + short int gp_flags; + short int gp_state; + long unsigned int gp_wake_time; + long unsigned int gp_wake_seq; + long unsigned int gp_seq_polled; + long unsigned int gp_seq_polled_snap; + long unsigned int gp_seq_polled_exp_snap; + struct mutex barrier_mutex; + atomic_t barrier_cpu_count; + struct completion barrier_completion; + long unsigned int barrier_sequence; + raw_spinlock_t barrier_lock; + struct mutex exp_mutex; + struct mutex exp_wake_mutex; + long unsigned int expedited_sequence; + atomic_t expedited_need_qs; + struct swait_queue_head expedited_wq; + int ncpus_snap; + u8 cbovld; + u8 cbovldnext; + long unsigned int jiffies_force_qs; + long unsigned int jiffies_kick_kthreads; + long unsigned int n_force_qs; + long unsigned int gp_start; + long unsigned int gp_end; + long unsigned int gp_activity; + long unsigned int gp_req_activity; + long unsigned int jiffies_stall; + int nr_fqs_jiffies_stall; + long unsigned int jiffies_resched; + long unsigned int n_force_qs_gpstart; + const char *name; + char abbr; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + arch_spinlock_t ofl_lock; + struct llist_head srs_next; + struct llist_node *srs_wait_tail; + struct llist_node *srs_done_tail; + struct sr_wait_node srs_wait_nodes[5]; + struct work_struct srs_cleanup_work; + atomic_t srs_cleanups_pending; + struct mutex nocb_mutex; + int nocb_is_setup; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kvfree_rcu_bulk_data { + struct list_head list; + struct rcu_gp_oldstate gp_snap; + long unsigned int nr_records; + void *records[0]; +}; + +struct kfree_rcu_cpu; + +struct kfree_rcu_cpu_work { + struct rcu_work rcu_work; + struct callback_head *head_free; + struct rcu_gp_oldstate head_free_gp_snap; + struct list_head bulk_head_free[2]; + struct kfree_rcu_cpu *krcp; +}; + +struct kfree_rcu_cpu { + struct callback_head *head; + long unsigned int head_gp_snap; + atomic_t head_count; + struct list_head bulk_head[2]; + atomic_t bulk_count[2]; + struct kfree_rcu_cpu_work krw_arr[2]; + raw_spinlock_t lock; + struct delayed_work monitor_work; + bool initialized; + struct delayed_work page_cache_work; + atomic_t backoff_page_cache_fill; + atomic_t work_in_progress; + long: 32; + struct hrtimer hrtimer; + struct llist_head bkvcache; + int nr_bkv_objs; +}; + +struct rcu_stall_chk_rdr { + int nesting; + union rcu_special rs; + bool on_blkd_list; +}; + +enum { + BPF_F_UPROBE_MULTI_RETURN = 1, +}; + +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = 1, +}; + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; +}; + +struct bpf_key { + struct key *key; + bool has_ref; +}; + +struct perf_event_query_bpf { + __u32 ids_len; + __u32 prog_cnt; + __u32 ids[0]; +}; + +typedef int perf_snapshot_branch_stack_t(struct perf_branch_entry *, unsigned int); + +struct trace_event_raw_bpf_trace_printk { + struct trace_entry ent; + u32 __data_loc_bpf_string; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trace_printk { + u32 bpf_string; + const void *bpf_string_ptr_; +}; + +typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); + +struct bpf_trace_module { + struct module *module; + struct list_head list; +}; + +typedef u64 (*btf_bpf_override_return)(struct pt_regs *, long unsigned int); + +typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32); + +typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_trace_vprintk)(char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); + +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); + +struct bpf_nested_pt_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_get_current_task)(); + +typedef u64 (*btf_bpf_get_current_task_btf)(); + +typedef u64 (*btf_bpf_task_pt_regs)(struct task_struct *); + +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; + enum pid_type type; + bool has_siginfo; + struct kernel_siginfo info; +}; + +typedef u64 (*btf_bpf_send_signal)(u32); + +typedef u64 (*btf_bpf_send_signal_thread)(u32); + +typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); + +typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_get_func_ip_tracing)(void *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_trace)(void *); + +typedef u64 (*btf_bpf_get_attach_cookie_pe)(struct bpf_perf_event_data_kern *); + +typedef u64 (*btf_bpf_get_attach_cookie_tracing)(void *); + +typedef u64 (*btf_bpf_get_branch_snapshot)(void *, u32, u64); + +typedef u64 (*btf_get_func_arg)(void *, u32, u64 *); + +typedef u64 (*btf_get_func_ret)(void *, u64 *); + +typedef u64 (*btf_get_func_arg_cnt)(void *); + +typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); + +typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); + +typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); + +struct bpf_session_run_ctx { + struct bpf_run_ctx run_ctx; + bool is_return; + void *data; +}; + +struct bpf_uprobe_multi_link; + +struct bpf_uprobe { + struct bpf_uprobe_multi_link *link; + long: 32; + loff_t offset; + long unsigned int ref_ctr_offset; + long: 32; + u64 cookie; + struct uprobe *uprobe; + long: 32; + struct uprobe_consumer consumer; + bool session; + long: 32; +}; + +struct bpf_uprobe_multi_link { + struct path path; + struct bpf_link link; + u32 cnt; + u32 flags; + struct bpf_uprobe *uprobes; + struct task_struct *task; +}; + +struct bpf_uprobe_multi_run_ctx { + struct bpf_session_run_ctx session_ctx; + long unsigned int entry_ip; + struct bpf_uprobe *uprobe; +}; + +enum { + BPF_RB_NO_WAKEUP = 1, + BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +enum { + BPF_RINGBUF_BUSY_BIT = 2147483648, + BPF_RINGBUF_DISCARD_BIT = 1073741824, + BPF_RINGBUF_HDR_SZ = 8, +}; + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + long: 32; + u64 mask; + struct page **pages; + int nr_pages; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t spinlock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t busy; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int consumer_pos; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int producer_pos; + long unsigned int pending_pos; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + char data[0]; +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_ringbuf *rb; + long: 32; +}; + +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_ringbuf_reserve_dynptr)(struct bpf_map *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_ringbuf_submit_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_user_ringbuf_drain)(struct bpf_map *, void *, void *, u64); + +enum mminit_level { + MMINIT_WARNING = 0, + MMINIT_VERIFY = 1, + MMINIT_TRACE = 2, +}; + +struct vma_prepare { + struct vm_area_struct *vma; + struct vm_area_struct *adj_next; + struct file *file; + struct address_space *mapping; + struct anon_vma *anon_vma; + struct vm_area_struct *insert; + struct vm_area_struct *remove; + struct vm_area_struct *remove2; +}; + +struct vma_munmap_struct { + struct vma_iterator *vmi; + struct vm_area_struct *vma; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct list_head *uf; + long unsigned int start; + long unsigned int end; + long unsigned int unmap_start; + long unsigned int unmap_end; + int vma_count; + bool unlock; + bool clear_ptes; + long unsigned int nr_pages; + long unsigned int locked_vm; + long unsigned int nr_accounted; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int data_vm; +}; + +struct mmap_state { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int addr; + long unsigned int end; + long unsigned int pgoff; + long unsigned int pglen; + long unsigned int flags; + struct file *file; + long unsigned int charged; + bool retry_merge; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vma_munmap_struct vms; + struct ma_state mas_detach; + struct maple_tree mt_detach; +}; + +struct inodes_stat_t { + long int nr_inodes; + long int nr_unused; + long int dummy[5]; +}; + +struct trace_event_raw_ctime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + u32 ctime_ns; + u32 gen; + char __data[0]; +}; + +struct trace_event_raw_ctime_ns_xchg { + struct trace_entry ent; + dev_t dev; + ino_t ino; + u32 gen; + u32 old; + u32 new; + u32 cur; + char __data[0]; +}; + +struct trace_event_raw_fill_mg_cmtime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + time64_t mtime_s; + u32 ctime_ns; + u32 mtime_ns; + u32 gen; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_ctime {}; + +struct trace_event_data_offsets_ctime_ns_xchg {}; + +struct trace_event_data_offsets_fill_mg_cmtime {}; + +typedef void (*btf_trace_inode_set_ctime_to_ts)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_xchg_skip)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_ns_xchg)(void *, struct inode *, u32, u32, u32); + +typedef void (*btf_trace_fill_mg_cmtime)(void *, struct inode *, struct timespec64 *, struct timespec64 *); + +typedef long int intptr_t; + +enum resctrl_conf_type { + CDP_NONE = 0, + CDP_CODE = 1, + CDP_DATA = 2, +}; + +enum proc_mem_force { + PROC_MEM_FORCE_ALWAYS = 0, + PROC_MEM_FORCE_PTRACE = 1, + PROC_MEM_FORCE_NEVER = 2, +}; + +struct pid_entry { + const char *name; + unsigned int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; + union proc_op op; +}; + +struct limit_names { + const char *name; + const char *unit; +}; + +struct map_files_info { + long unsigned int start; + long unsigned int end; + fmode_t mode; +}; + +struct tgid_iter { + unsigned int tgid; + struct task_struct *task; +}; + +struct fat_cache { + struct list_head cache_list; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct fat_cache_id { + unsigned int id; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct btrfs_em_shrink_ctx { + long int nr_to_scan; + long int scanned; +}; + +struct fsverity_descriptor { + __u8 version; + __u8 hash_algorithm; + __u8 log_blocksize; + __u8 salt_size; + __le32 sig_size; + __le64 data_size; + __u8 root_hash[64]; + __u8 salt[32]; + __u8 __reserved[144]; + __u8 signature[0]; +}; + +struct btrfs_stream_header { + char magic[13]; + __le32 version; +} __attribute__((packed)); + +struct btrfs_cmd_header { + __le32 len; + __le16 cmd; + __le32 crc; +} __attribute__((packed)); + +struct btrfs_tlv_header { + __le16 tlv_type; + __le16 tlv_len; +}; + +enum btrfs_send_cmd { + BTRFS_SEND_C_UNSPEC = 0, + BTRFS_SEND_C_SUBVOL = 1, + BTRFS_SEND_C_SNAPSHOT = 2, + BTRFS_SEND_C_MKFILE = 3, + BTRFS_SEND_C_MKDIR = 4, + BTRFS_SEND_C_MKNOD = 5, + BTRFS_SEND_C_MKFIFO = 6, + BTRFS_SEND_C_MKSOCK = 7, + BTRFS_SEND_C_SYMLINK = 8, + BTRFS_SEND_C_RENAME = 9, + BTRFS_SEND_C_LINK = 10, + BTRFS_SEND_C_UNLINK = 11, + BTRFS_SEND_C_RMDIR = 12, + BTRFS_SEND_C_SET_XATTR = 13, + BTRFS_SEND_C_REMOVE_XATTR = 14, + BTRFS_SEND_C_WRITE = 15, + BTRFS_SEND_C_CLONE = 16, + BTRFS_SEND_C_TRUNCATE = 17, + BTRFS_SEND_C_CHMOD = 18, + BTRFS_SEND_C_CHOWN = 19, + BTRFS_SEND_C_UTIMES = 20, + BTRFS_SEND_C_END = 21, + BTRFS_SEND_C_UPDATE_EXTENT = 22, + BTRFS_SEND_C_MAX_V1 = 22, + BTRFS_SEND_C_FALLOCATE = 23, + BTRFS_SEND_C_FILEATTR = 24, + BTRFS_SEND_C_ENCODED_WRITE = 25, + BTRFS_SEND_C_MAX_V2 = 25, + BTRFS_SEND_C_ENABLE_VERITY = 26, + BTRFS_SEND_C_MAX_V3 = 26, + BTRFS_SEND_C_MAX = 26, +}; + +enum { + BTRFS_SEND_A_UNSPEC = 0, + BTRFS_SEND_A_UUID = 1, + BTRFS_SEND_A_CTRANSID = 2, + BTRFS_SEND_A_INO = 3, + BTRFS_SEND_A_SIZE = 4, + BTRFS_SEND_A_MODE = 5, + BTRFS_SEND_A_UID = 6, + BTRFS_SEND_A_GID = 7, + BTRFS_SEND_A_RDEV = 8, + BTRFS_SEND_A_CTIME = 9, + BTRFS_SEND_A_MTIME = 10, + BTRFS_SEND_A_ATIME = 11, + BTRFS_SEND_A_OTIME = 12, + BTRFS_SEND_A_XATTR_NAME = 13, + BTRFS_SEND_A_XATTR_DATA = 14, + BTRFS_SEND_A_PATH = 15, + BTRFS_SEND_A_PATH_TO = 16, + BTRFS_SEND_A_PATH_LINK = 17, + BTRFS_SEND_A_FILE_OFFSET = 18, + BTRFS_SEND_A_DATA = 19, + BTRFS_SEND_A_CLONE_UUID = 20, + BTRFS_SEND_A_CLONE_CTRANSID = 21, + BTRFS_SEND_A_CLONE_PATH = 22, + BTRFS_SEND_A_CLONE_OFFSET = 23, + BTRFS_SEND_A_CLONE_LEN = 24, + BTRFS_SEND_A_MAX_V1 = 24, + BTRFS_SEND_A_FALLOCATE_MODE = 25, + BTRFS_SEND_A_FILEATTR = 26, + BTRFS_SEND_A_UNENCODED_FILE_LEN = 27, + BTRFS_SEND_A_UNENCODED_LEN = 28, + BTRFS_SEND_A_UNENCODED_OFFSET = 29, + BTRFS_SEND_A_COMPRESSION = 30, + BTRFS_SEND_A_ENCRYPTION = 31, + BTRFS_SEND_A_MAX_V2 = 31, + BTRFS_SEND_A_VERITY_ALGORITHM = 32, + BTRFS_SEND_A_VERITY_BLOCK_SIZE = 33, + BTRFS_SEND_A_VERITY_SALT_DATA = 34, + BTRFS_SEND_A_VERITY_SIG_DATA = 35, + BTRFS_SEND_A_MAX_V3 = 35, + __BTRFS_SEND_A_MAX = 35, +}; + +struct fs_path { + union { + struct { + char *start; + char *end; + char *buf; + short unsigned int buf_len: 15; + short unsigned int reversed: 1; + char inline_buf[0]; + }; + char pad[256]; + }; +}; + +struct clone_root { + struct btrfs_root *root; + long: 32; + u64 ino; + u64 offset; + u64 num_bytes; + bool found_ref; + long: 32; +}; + +struct backref_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 root_ids[17]; + int num_roots; + long: 32; +}; + +struct send_ctx { + struct file *send_filp; + long: 32; + loff_t send_off; + char *send_buf; + u32 send_size; + u32 send_max_size; + bool put_data; + struct page **send_buf_pages; + long: 32; + u64 flags; + u32 proto; + struct btrfs_root *send_root; + struct btrfs_root *parent_root; + struct clone_root *clone_roots; + int clone_roots_cnt; + struct btrfs_path *left_path; + struct btrfs_path *right_path; + struct btrfs_key *cmp_key; + u64 last_reloc_trans; + u64 cur_ino; + u64 cur_inode_gen; + u64 cur_inode_size; + u64 cur_inode_mode; + u64 cur_inode_rdev; + u64 cur_inode_last_extent; + u64 cur_inode_next_write_offset; + bool cur_inode_new; + bool cur_inode_new_gen; + bool cur_inode_deleted; + bool ignore_cur_inode; + bool cur_inode_needs_verity; + void *verity_descriptor; + long: 32; + u64 send_progress; + struct list_head new_refs; + struct list_head deleted_refs; + struct btrfs_lru_cache name_cache; + struct inode *cur_inode; + struct file_ra_state ra; + u64 page_cache_clear_start; + bool clean_page_cache; + struct rb_root pending_dir_moves; + struct rb_root waiting_dir_moves; + struct rb_root orphan_dirs; + struct rb_root rbtree_new_refs; + struct rb_root rbtree_deleted_refs; + struct btrfs_lru_cache backref_cache; + long: 32; + u64 backref_cache_last_reloc_trans; + struct btrfs_lru_cache dir_created_cache; + struct btrfs_lru_cache dir_utimes_cache; +}; + +struct pending_dir_move { + struct rb_node node; + struct list_head list; + long: 32; + u64 parent_ino; + u64 ino; + u64 gen; + struct list_head update_refs; +}; + +struct waiting_dir_move { + struct rb_node node; + long: 32; + u64 ino; + u64 rmdir_ino; + u64 rmdir_gen; + bool orphanized; + long: 32; +}; + +struct orphan_dir_info { + struct rb_node node; + long: 32; + u64 ino; + u64 gen; + u64 last_dir_index_offset; + u64 dir_high_seq_ino; +}; + +struct name_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 parent_ino; + u64 parent_gen; + int ret; + int need_later_update; + int name_len; + char name[0]; + long: 32; +}; + +enum btrfs_compare_tree_result { + BTRFS_COMPARE_TREE_NEW = 0, + BTRFS_COMPARE_TREE_DELETED = 1, + BTRFS_COMPARE_TREE_CHANGED = 2, + BTRFS_COMPARE_TREE_SAME = 3, +}; + +struct btrfs_inode_info { + u64 size; + u64 gen; + u64 mode; + u64 uid; + u64 gid; + u64 rdev; + u64 fileattr; + u64 nlink; +}; + +typedef int (*iterate_inode_ref_t)(u64, struct fs_path *, void *); + +typedef int (*iterate_dir_item_t)(int, struct btrfs_key *, const char *, int, const char *, int, void *); + +struct backref_ctx { + struct send_ctx *sctx; + long: 32; + u64 found; + u64 cur_objectid; + u64 cur_offset; + u64 extent_len; + u64 bytenr; + u64 backref_owner; + u64 backref_offset; +}; + +enum inode_state { + inode_state_no_change = 0, + inode_state_will_create = 1, + inode_state_did_create = 2, + inode_state_will_delete = 3, + inode_state_did_delete = 4, +}; + +struct recorded_ref { + struct list_head list; + char *name; + struct fs_path *full_path; + u64 dir; + u64 dir_gen; + int name_len; + struct rb_node node; + struct rb_root *root; + long: 32; +}; + +struct find_xattr_ctx { + const char *name; + int name_len; + int found_idx; + char *found_data; + int found_data_len; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + unsigned int qlen; + unsigned int max_qlen; +}; + +struct pkcs7_parse_context { + struct pkcs7_message *msg; + struct pkcs7_signed_info *sinfo; + struct pkcs7_signed_info **ppsinfo; + struct x509_certificate *certs; + struct x509_certificate **ppcerts; + long unsigned int data; + enum OID last_oid; + unsigned int x509_index; + unsigned int sinfo_index; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_skid; + unsigned int raw_skid_size; + bool expect_skid; +}; + +enum { + ICQ_EXITED = 4, + ICQ_DESTROYED = 8, +}; + +struct io_poll_update { + struct file *file; + long: 32; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int nr_entries; + int error; + bool owning; + __poll_t result_mask; +}; + +enum { + IOU_POLL_DONE = 0, + IOU_POLL_NO_ACTION = 1, + IOU_POLL_REMOVE_POLL_USE_RES = 2, + IOU_POLL_REISSUE = 3, + IOU_POLL_REQUEUE = 4, +}; + +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 pool_index_plus_1: 17; + u32 offset: 10; + u32 extra: 5; + }; +}; + +struct stack_record { + struct list_head hash_list; + u32 hash; + u32 size; + union handle_parts handle; + refcount_t count; + union { + long unsigned int entries[64]; + struct { + struct list_head free_list; + long unsigned int rcu_state; + }; + }; +}; + +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS = 0, + DEPOT_COUNTER_REFD_FREES = 1, + DEPOT_COUNTER_REFD_INUSE = 2, + DEPOT_COUNTER_FREELIST_SIZE = 3, + DEPOT_COUNTER_PERSIST_COUNT = 4, + DEPOT_COUNTER_PERSIST_BYTES = 5, + DEPOT_COUNTER_COUNT = 6, +}; + +struct clk_fixed_rate { + struct clk_hw hw; + long unsigned int fixed_rate; + long unsigned int fixed_accuracy; + long unsigned int flags; +}; + +struct pciserial_board { + unsigned int flags; + unsigned int num_ports; + unsigned int base_baud; + unsigned int uart_offset; + unsigned int reg_shift; + unsigned int first_offset; +}; + +struct serial_private; + +struct pci_serial_quirk { + u32 vendor; + u32 device; + u32 subvendor; + u32 subdevice; + int (*probe)(struct pci_dev *); + int (*init)(struct pci_dev *); + int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int); + void (*exit)(struct pci_dev *); +}; + +struct serial_private { + struct pci_dev *dev; + unsigned int nr; + struct pci_serial_quirk *quirk; + const struct pciserial_board *board; + int line[0]; +}; + +struct f815xxa_data { + spinlock_t lock; + int idx; +}; + +struct timedia_struct { + int num; + const short unsigned int *ids; +}; + +enum { + MOXA_SUPP_RS232 = 1, + MOXA_SUPP_RS422 = 2, + MOXA_SUPP_RS485 = 4, +}; + +enum pci_board_num_t { + pbn_default = 0, + pbn_b0_1_115200 = 1, + pbn_b0_2_115200 = 2, + pbn_b0_4_115200 = 3, + pbn_b0_5_115200 = 4, + pbn_b0_8_115200 = 5, + pbn_b0_1_921600 = 6, + pbn_b0_2_921600 = 7, + pbn_b0_4_921600 = 8, + pbn_b0_2_1130000 = 9, + pbn_b0_4_1152000 = 10, + pbn_b0_4_1250000 = 11, + pbn_b0_2_1843200 = 12, + pbn_b0_4_1843200 = 13, + pbn_b0_1_15625000 = 14, + pbn_b0_bt_1_115200 = 15, + pbn_b0_bt_2_115200 = 16, + pbn_b0_bt_4_115200 = 17, + pbn_b0_bt_8_115200 = 18, + pbn_b0_bt_1_460800 = 19, + pbn_b0_bt_2_460800 = 20, + pbn_b0_bt_4_460800 = 21, + pbn_b0_bt_1_921600 = 22, + pbn_b0_bt_2_921600 = 23, + pbn_b0_bt_4_921600 = 24, + pbn_b0_bt_8_921600 = 25, + pbn_b1_1_115200 = 26, + pbn_b1_2_115200 = 27, + pbn_b1_4_115200 = 28, + pbn_b1_8_115200 = 29, + pbn_b1_16_115200 = 30, + pbn_b1_1_921600 = 31, + pbn_b1_2_921600 = 32, + pbn_b1_4_921600 = 33, + pbn_b1_8_921600 = 34, + pbn_b1_2_1250000 = 35, + pbn_b1_bt_1_115200 = 36, + pbn_b1_bt_2_115200 = 37, + pbn_b1_bt_4_115200 = 38, + pbn_b1_bt_2_921600 = 39, + pbn_b1_1_1382400 = 40, + pbn_b1_2_1382400 = 41, + pbn_b1_4_1382400 = 42, + pbn_b1_8_1382400 = 43, + pbn_b2_1_115200 = 44, + pbn_b2_2_115200 = 45, + pbn_b2_4_115200 = 46, + pbn_b2_8_115200 = 47, + pbn_b2_1_460800 = 48, + pbn_b2_4_460800 = 49, + pbn_b2_8_460800 = 50, + pbn_b2_16_460800 = 51, + pbn_b2_1_921600 = 52, + pbn_b2_4_921600 = 53, + pbn_b2_8_921600 = 54, + pbn_b2_8_1152000 = 55, + pbn_b2_bt_1_115200 = 56, + pbn_b2_bt_2_115200 = 57, + pbn_b2_bt_4_115200 = 58, + pbn_b2_bt_2_921600 = 59, + pbn_b2_bt_4_921600 = 60, + pbn_b3_2_115200 = 61, + pbn_b3_4_115200 = 62, + pbn_b3_8_115200 = 63, + pbn_b4_bt_2_921600 = 64, + pbn_b4_bt_4_921600 = 65, + pbn_b4_bt_8_921600 = 66, + pbn_panacom = 67, + pbn_panacom2 = 68, + pbn_panacom4 = 69, + pbn_plx_romulus = 70, + pbn_oxsemi = 71, + pbn_oxsemi_1_15625000 = 72, + pbn_oxsemi_2_15625000 = 73, + pbn_oxsemi_4_15625000 = 74, + pbn_oxsemi_8_15625000 = 75, + pbn_intel_i960 = 76, + pbn_sgi_ioc3 = 77, + pbn_computone_4 = 78, + pbn_computone_6 = 79, + pbn_computone_8 = 80, + pbn_sbsxrsio = 81, + pbn_pasemi_1682M = 82, + pbn_ni8430_2 = 83, + pbn_ni8430_4 = 84, + pbn_ni8430_8 = 85, + pbn_ni8430_16 = 86, + pbn_ADDIDATA_PCIe_1_3906250 = 87, + pbn_ADDIDATA_PCIe_2_3906250 = 88, + pbn_ADDIDATA_PCIe_4_3906250 = 89, + pbn_ADDIDATA_PCIe_8_3906250 = 90, + pbn_ce4100_1_115200 = 91, + pbn_omegapci = 92, + pbn_NETMOS9900_2s_115200 = 93, + pbn_brcm_trumanage = 94, + pbn_fintek_4 = 95, + pbn_fintek_8 = 96, + pbn_fintek_12 = 97, + pbn_fintek_F81504A = 98, + pbn_fintek_F81508A = 99, + pbn_fintek_F81512A = 100, + pbn_wch382_2 = 101, + pbn_wch384_4 = 102, + pbn_wch384_8 = 103, + pbn_sunix_pci_1s = 104, + pbn_sunix_pci_2s = 105, + pbn_sunix_pci_4s = 106, + pbn_sunix_pci_8s = 107, + pbn_sunix_pci_16s = 108, + pbn_titan_1_4000000 = 109, + pbn_titan_2_4000000 = 110, + pbn_titan_4_4000000 = 111, + pbn_titan_8_4000000 = 112, + pbn_moxa_2 = 113, + pbn_moxa_4 = 114, + pbn_moxa_8 = 115, +}; + +typedef short unsigned int __kernel_old_dev_t; + +enum { + LO_FLAGS_READ_ONLY = 1, + LO_FLAGS_AUTOCLEAR = 4, + LO_FLAGS_PARTSCAN = 8, + LO_FLAGS_DIRECT_IO = 16, +}; + +struct loop_info { + int lo_number; + __kernel_old_dev_t lo_device; + long unsigned int lo_inode; + __kernel_old_dev_t lo_rdevice; + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; + int lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + long unsigned int lo_init[2]; + char reserved[4]; +}; + +struct loop_info64 { + __u64 lo_device; + __u64 lo_inode; + __u64 lo_rdevice; + __u64 lo_offset; + __u64 lo_sizelimit; + __u32 lo_number; + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; + __u32 lo_flags; + __u8 lo_file_name[64]; + __u8 lo_crypt_name[64]; + __u8 lo_encrypt_key[32]; + __u64 lo_init[2]; +}; + +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + +enum { + Lo_unbound = 0, + Lo_bound = 1, + Lo_rundown = 2, + Lo_deleting = 3, +}; + +struct loop_device { + int lo_number; + long: 32; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + char lo_file_name[64]; + struct file *lo_backing_file; + struct block_device *lo_device; + gfp_t old_gfp_mask; + spinlock_t lo_lock; + int lo_state; + spinlock_t lo_work_lock; + struct workqueue_struct *workqueue; + struct work_struct rootcg_work; + struct list_head rootcg_cmd_list; + struct list_head idle_worker_list; + struct rb_root worker_tree; + struct timer_list timer; + bool use_dio; + bool sysfs_inited; + struct request_queue *lo_queue; + struct blk_mq_tag_set tag_set; + struct gendisk *lo_disk; + struct mutex lo_mutex; + bool idr_visible; +}; + +struct loop_cmd { + struct list_head list_entry; + bool use_aio; + atomic_t ref; + long int ret; + long: 32; + struct kiocb iocb; + struct bio_vec *bvec; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; + long: 32; +}; + +struct loop_worker { + struct rb_node rb_node; + struct work_struct work; + struct list_head cmd_list; + struct list_head idle_list; + struct loop_device *lo; + struct cgroup_subsys_state *blkcg_css; + long unsigned int last_ran_at; +}; + +struct ahci_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 flags_size; +}; + +struct iwl_rx_transfer_desc { + __le16 rbid; + __le16 reserved[3]; + __le64 addr; +}; + +struct iwl_rx_completion_desc { + __le32 reserved1; + __le16 rbid; + u8 flags; + u8 reserved2[25]; +}; + +struct iwl_rx_completion_desc_bz { + __le16 rbid; + u8 flags; + u8 reserved[1]; +}; + +struct iwl_fw_dbg_trigger_stats { + __le32 stop_offset; + __le32 stop_threshold; + __le32 start_offset; + __le32 start_threshold; +}; + +struct iwl_rx_mpdu_res_start___2 { + __le16 byte_count; + __le16 assist; +}; + +struct iwl_ba_window_status_notif { + __le64 bitmap[16]; + __le16 ra_tid[16]; + __le32 start_seq_num[16]; + __le16 mpdu_rx_count[16]; +}; + +struct mvm_statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + u8 reserved[12]; +}; + +struct mvm_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 rssi_ant; + __le32 reserved2; +}; + +struct mvm_statistics_tx_non_phy_v3 { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; +}; + +struct mvm_statistics_tx_non_phy { + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; +}; + +struct mvm_statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; + __s8 txpower[3]; + __s8 reserved; + __le32 reserved2; +}; + +struct mvm_statistics_tx_channel_width { + __le32 ext_cca_narrow_ch20[1]; + __le32 ext_cca_narrow_ch40[2]; + __le32 ext_cca_narrow_ch80[3]; + __le32 ext_cca_narrow_ch160[4]; + __le32 last_tx_ch_width_indx; + __le32 rx_detected_per_ch_width[4]; + __le32 success_per_ch_width[4]; + __le32 fail_per_ch_width[4]; +}; + +struct mvm_statistics_tx_v4 { + struct mvm_statistics_tx_non_phy_v3 general; + struct mvm_statistics_tx_non_phy_agg agg; + struct mvm_statistics_tx_channel_width channel_width; +}; + +struct mvm_statistics_tx { + struct mvm_statistics_tx_non_phy general; + struct mvm_statistics_tx_non_phy_agg agg; + struct mvm_statistics_tx_channel_width channel_width; +}; + +struct mvm_statistics_bt_activity { + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +}; + +struct mvm_statistics_general_common_v19 { + __le32 radio_temperature; + __le32 radio_voltage; + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div slow_div; + __le32 rx_enable_counter; + __le32 num_of_sos_states; + __le32 beacon_filtered; + __le32 missed_beacons; + u8 beacon_filter_average_energy; + u8 beacon_filter_reason; + u8 beacon_filter_current_energy; + u8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; + __le64 rx_time; + __le64 on_time_rf; + __le64 on_time_scan; + __le64 tx_time; +}; + +struct mvm_statistics_general_common { + __le32 radio_temperature; + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div slow_div; + __le32 rx_enable_counter; + __le32 num_of_sos_states; + __le32 beacon_filtered; + __le32 missed_beacons; + u8 beacon_filter_average_energy; + u8 beacon_filter_reason; + u8 beacon_filter_current_energy; + u8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; + __le64 rx_time; + __le64 on_time_rf; + __le64 on_time_scan; + __le64 tx_time; +}; + +struct mvm_statistics_general_v8 { + struct mvm_statistics_general_common_v19 common; + __le32 beacon_counter[5]; + u8 beacon_average_energy[5]; + u8 reserved[3]; +}; + +struct mvm_statistics_general { + struct mvm_statistics_general_common common; + __le32 beacon_counter[4]; + u8 beacon_average_energy[4]; + u8 reserved[4]; +}; + +struct mvm_statistics_load { + __le32 air_time[4]; + __le32 byte_count[4]; + __le32 pkt_count[4]; + u8 avg_energy[16]; +}; + +struct mvm_statistics_load_v1 { + __le32 air_time[5]; + __le32 byte_count[5]; + __le32 pkt_count[5]; + u8 avg_energy[16]; +}; + +struct iwl_notif_statistics_v11 { + __le32 flag; + struct mvm_statistics_rx_v3 rx; + struct mvm_statistics_tx_v4 tx; + struct mvm_statistics_general_v8 general; + struct mvm_statistics_load_v1 load_stats; +}; + +struct iwl_notif_statistics___2 { + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general general; + struct mvm_statistics_load load_stats; +}; + +enum iwl_statistics_notif_flags { + IWL_STATISTICS_REPLY_FLG_CLEAR = 1, +}; + +enum iwl_fw_statistics_type { + FW_STATISTICS_OPERATIONAL = 0, + FW_STATISTICS_PHY = 1, + FW_STATISTICS_MAC = 2, + FW_STATISTICS_RX = 3, + FW_STATISTICS_TX = 4, + FW_STATISTICS_DURATION = 5, + FW_STATISTICS_HE = 6, +}; + +struct iwl_statistics_ntfy_hdr { + u8 type; + u8 version; + __le16 size; +}; + +struct iwl_stats_ntfy_per_link { + __le32 beacon_filter_average_energy; + __le32 air_time; + __le32 beacon_counter; + __le32 beacon_average_energy; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 rx_bytes; +}; + +struct iwl_stats_ntfy_part1_per_link { + __le64 rx_time; + __le64 tx_time; + __le32 rx_action; + __le32 tx_action; + __le32 cca_defers; + __le32 beacon_filtered; +}; + +struct iwl_stats_ntfy_per_mac { + __le32 beacon_filter_average_energy; + __le32 air_time; + __le32 beacon_counter; + __le32 beacon_average_energy; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 rx_bytes; +}; + +struct iwl_stats_ntfy_per_phy { + __le32 channel_load; + __le32 channel_load_by_us; + __le32 channel_load_not_by_us; + __le32 clt; + __le32 act; + __le32 elp; + __le32 rx_detected_per_ch_width[5]; + __le32 success_per_ch_width[5]; + __le32 fail_per_ch_width[5]; + __le32 last_tx_ch_width_indx; +}; + +struct iwl_stats_ntfy_per_sta { + __le32 average_energy; +}; + +struct iwl_system_statistics_notif_oper { + __le32 time_stamp; + struct iwl_stats_ntfy_per_link per_link[4]; + struct iwl_stats_ntfy_per_phy per_phy[3]; + struct iwl_stats_ntfy_per_sta per_sta[16]; +}; + +struct iwl_system_statistics_part1_notif_oper { + __le32 time_stamp; + struct iwl_stats_ntfy_part1_per_link per_link[4]; + __le32 per_phy_crc_error_stats[3]; +}; + +struct iwl_statistics_operational_ntfy { + struct iwl_statistics_ntfy_hdr hdr; + __le32 flags; + struct iwl_stats_ntfy_per_mac per_mac[4]; + struct iwl_stats_ntfy_per_phy per_phy[3]; + struct iwl_stats_ntfy_per_sta per_sta[16]; + __le64 rx_time; + __le64 tx_time; + __le64 on_time_rf; + __le64 on_time_scan; +}; + +struct iwl_statistics_operational_ntfy_ver_14 { + struct iwl_statistics_ntfy_hdr hdr; + __le32 flags; + __le32 mac_id; + __le32 beacon_filter_average_energy; + __le32 beacon_filter_reason; + __le32 radio_temperature; + __le32 air_time[4]; + __le32 beacon_counter[4]; + __le32 beacon_average_energy[4]; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 rx_bytes[4]; + __le64 rx_time; + __le64 tx_time; + __le64 on_time_rf; + __le64 on_time_scan; + __le32 average_energy[16]; + __le32 reserved; +}; + +struct iwl_mvm_stat_data { + struct iwl_mvm *mvm; + __le32 flags; + __le32 mac_id; + u8 beacon_filter_average_energy; + __le32 *beacon_counter; + u8 *beacon_average_energy; +}; + +struct iwl_mvm_stat_data_all_macs { + struct iwl_mvm *mvm; + __le32 flags; + struct iwl_stats_ntfy_per_mac *per_mac; +}; + +enum rt2x00usb_vendor_request { + USB_DEVICE_MODE = 1, + USB_SINGLE_WRITE = 2, + USB_SINGLE_READ = 3, + USB_MULTI_WRITE = 6, + USB_MULTI_READ = 7, + USB_EEPROM_WRITE = 8, + USB_EEPROM_READ = 9, + USB_LED_CONTROL = 10, + USB_RX_CONTROL = 12, +}; + +struct queue_entry_priv_usb { + struct urb *urb; +}; + +struct queue_entry_priv_usb_bcn { + struct urb *urb; + unsigned int guardian_data; + struct urb *guardian_urb; +}; + +struct rt2x00_async_read_data { + __le32 reg; + struct usb_ctrlrequest cr; + struct rt2x00_dev *rt2x00dev; + bool (*callback)(struct rt2x00_dev *, int, u32); +}; + +struct usb_class_driver { + char *name; + char * (*devnode)(const struct device *, umode_t *); + const struct file_operations *fops; + int minor_base; +}; + +struct trace_event_raw_rtc_time_alarm_class { + struct trace_entry ent; + time64_t secs; + int err; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_rtc_irq_set_freq { + struct trace_entry ent; + int freq; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_irq_set_state { + struct trace_entry ent; + int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_alarm_irq_enable { + struct trace_entry ent; + unsigned int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_offset_class { + struct trace_entry ent; + long int offset; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_timer_class { + struct trace_entry ent; + struct rtc_timer *timer; + long: 32; + ktime_t expires; + ktime_t period; + char __data[0]; +}; + +struct trace_event_data_offsets_rtc_time_alarm_class {}; + +struct trace_event_data_offsets_rtc_irq_set_freq {}; + +struct trace_event_data_offsets_rtc_irq_set_state {}; + +struct trace_event_data_offsets_rtc_alarm_irq_enable {}; + +struct trace_event_data_offsets_rtc_offset_class {}; + +struct trace_event_data_offsets_rtc_timer_class {}; + +typedef void (*btf_trace_rtc_set_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_set_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_irq_set_freq)(void *, int, int); + +typedef void (*btf_trace_rtc_irq_set_state)(void *, int, int); + +typedef void (*btf_trace_rtc_alarm_irq_enable)(void *, unsigned int, int); + +typedef void (*btf_trace_rtc_set_offset)(void *, long int, int); + +typedef void (*btf_trace_rtc_read_offset)(void *, long int, int); + +typedef void (*btf_trace_rtc_timer_enqueue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_dequeue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_fired)(void *, struct rtc_timer *); + +enum { + none = 0, + day = 1, + month = 2, + year = 3, +}; + +struct dm_rq_target_io; + +struct dm_rq_clone_bio_info { + struct bio *orig; + struct dm_rq_target_io *tio; + struct bio clone; +}; + +struct dm_rq_target_io { + struct mapped_device *md; + struct dm_target *ti; + struct request *orig; + struct request *clone; + struct kthread_work work; + blk_status_t error; + union map_info info; + long: 32; + struct dm_stats_aux stats_aux; + long unsigned int duration_jiffies; + unsigned int n_sectors; + unsigned int completed; + long: 32; +}; + +enum { + SYSTAB = 0, + MMBASE = 1, + MMSIZE = 2, + DCSIZE = 3, + DCVERS = 4, + PARAMCOUNT = 5, +}; + +typedef int (*of_init_fn_1_ret)(struct device_node *); + +struct ifconf { + int ifc_len; + union { + char *ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; + +typedef u32 compat_ulong_t; + +typedef u32 compat_caddr_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +enum hwtstamp_flags { + HWTSTAMP_FLAG_BONDED_PHC_INDEX = 1, + HWTSTAMP_FLAG_LAST = 1, + HWTSTAMP_FLAG_MASK = 1, +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +enum ip_conntrack_expect_events { + IPEXP_NEW = 0, + IPEXP_DESTROY = 1, +}; + +struct ct_expect_iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum { + IPT_TTL_EQ = 0, + IPT_TTL_NE = 1, + IPT_TTL_LT = 2, + IPT_TTL_GT = 3, +}; + +struct ipt_ttl_info { + __u8 mode; + __u8 ttl; +}; + +enum { + IP6T_HL_EQ = 0, + IP6T_HL_NE = 1, + IP6T_HL_LT = 2, + IP6T_HL_GT = 3, +}; + +struct ip6t_hl_info { + __u8 mode; + __u8 hop_limit; +}; + +enum { + FDB_NOTIFY_BIT = 1, + FDB_NOTIFY_INACTIVE_BIT = 2, +}; + +enum { + NFEA_UNSPEC = 0, + NFEA_ACTIVITY_NOTIFY = 1, + NFEA_DONT_REFRESH = 2, + __NFEA_MAX = 3, +}; + +struct __fdb_entry { + __u8 mac_addr[6]; + __u8 port_no; + __u8 is_local; + __u32 ageing_timer_value; + __u8 port_hi; + __u8 pad0; + __u16 unused; +}; + +enum ieee80211_tx_power_intrpt_type { + IEEE80211_TPE_LOCAL_EIRP = 0, + IEEE80211_TPE_LOCAL_EIRP_PSD = 1, + IEEE80211_TPE_REG_CLIENT_EIRP = 2, + IEEE80211_TPE_REG_CLIENT_EIRP_PSD = 3, +}; + +enum ieee80211_tx_power_category_6ghz { + IEEE80211_TPE_CAT_6GHZ_DEFAULT = 0, + IEEE80211_TPE_CAT_6GHZ_SUBORDINATE = 1, +}; + +struct ieee80211_tx_pwr_env { + u8 info; + u8 variable[0]; +}; + +struct ieee80211_elems_parse { + struct ieee802_11_elems elems; + const struct element *ml_basic_elem; + const struct element *ml_reconf_elem; + size_t scratch_len; + u8 *scratch_pos; + u8 scratch[0]; +}; + +enum ocb_deferred_task_flags { + OCB_WORK_HOUSEKEEPING = 0, +}; + +struct elf32_rel { + Elf32_Addr r_offset; + Elf32_Word r_info; +}; + +typedef struct elf32_rel Elf32_Rel; + +struct efi_arm_entry_state { + u32 cpsr_before_ebs; + u32 sctlr_before_ebs; + u32 cpsr_after_ebs; + u32 sctlr_after_ebs; +}; + +struct firmware_ops { + int (*prepare_idle)(long unsigned int); + int (*do_idle)(long unsigned int); + int (*set_cpu_boot_addr)(int, long unsigned int); + int (*get_cpu_boot_addr)(int, long unsigned int *); + int (*cpu_boot)(int); + int (*l2x0_init)(); + int (*suspend)(); + int (*resume)(); +}; + +enum decode_reg_type { + REG_TYPE_NONE = 0, + REG_TYPE_ANY = 1, + REG_TYPE_SAMEAS16 = 2, + REG_TYPE_SP = 3, + REG_TYPE_PC = 4, + REG_TYPE_NOSP = 5, + REG_TYPE_NOSPPC = 6, + REG_TYPE_NOPC = 7, + REG_TYPE_NOPCWB = 8, + REG_TYPE_NOPCX = 9, + REG_TYPE_NOSPPCX = 10, + REG_TYPE_0 = 0, +}; + +struct decode_table { + struct decode_header header; + union decode_item table; +}; + +struct decode_custom { + struct decode_header header; + union decode_item decoder; +}; + +struct decode_simulate { + struct decode_header header; + union decode_item handler; +}; + +struct decode_emulate { + struct decode_header header; + union decode_item handler; +}; + +typedef enum probes_insn kprobe_decode_insn_t(probes_opcode_t, struct arch_probes_insn *, bool, const union decode_action *, const struct decode_checker **); + +struct warn_args { + const char *fmt; + va_list args; +}; + +enum { + MAX_IORES_LEVEL = 5, +}; + +struct region_devres { + struct resource *parent; + resource_size_t start; + resource_size_t n; +}; + +typedef struct { + void *lock; +} class_irq_t; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_irq_t; + +typedef struct { + void *lock; +} class_cpus_read_lock_t; + +struct trace_event_raw_sched_kthread_stop { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_stop_ret { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_queue_work { + struct trace_entry ent; + void *work; + void *function; + void *worker; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_wakeup_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int target_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_switch { + struct trace_entry ent; + char prev_comm[16]; + pid_t prev_pid; + int prev_prio; + long int prev_state; + char next_comm[16]; + pid_t next_pid; + int next_prio; + char __data[0]; +}; + +struct trace_event_raw_sched_migrate_task { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int orig_cpu; + int dest_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_process_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_wait { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_fork { + struct trace_entry ent; + char parent_comm[16]; + pid_t parent_pid; + char child_comm[16]; + pid_t child_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_process_exec { + struct trace_entry ent; + u32 __data_loc_filename; + pid_t pid; + pid_t old_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_prepare_exec { + struct trace_entry ent; + u32 __data_loc_interp; + u32 __data_loc_filename; + pid_t pid; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_runtime { + struct trace_entry ent; + char comm[16]; + pid_t pid; + long: 32; + u64 runtime; + char __data[0]; +}; + +struct trace_event_raw_sched_pi_setprio { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int oldprio; + int newprio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_hang { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_move_numa { + struct trace_entry ent; + pid_t pid; + pid_t tgid; + pid_t ngid; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_numa_pair_template { + struct trace_entry ent; + pid_t src_pid; + pid_t src_tgid; + pid_t src_ngid; + int src_cpu; + int src_nid; + pid_t dst_pid; + pid_t dst_tgid; + pid_t dst_ngid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_wake_idle_without_ipi { + struct trace_entry ent; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_kthread_stop {}; + +struct trace_event_data_offsets_sched_kthread_stop_ret {}; + +struct trace_event_data_offsets_sched_kthread_work_queue_work {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_start {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_end {}; + +struct trace_event_data_offsets_sched_wakeup_template {}; + +struct trace_event_data_offsets_sched_switch {}; + +struct trace_event_data_offsets_sched_migrate_task {}; + +struct trace_event_data_offsets_sched_process_template {}; + +struct trace_event_data_offsets_sched_process_wait {}; + +struct trace_event_data_offsets_sched_process_fork {}; + +struct trace_event_data_offsets_sched_process_exec { + u32 filename; + const void *filename_ptr_; +}; + +struct trace_event_data_offsets_sched_prepare_exec { + u32 interp; + const void *interp_ptr_; + u32 filename; + const void *filename_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_sched_stat_runtime {}; + +struct trace_event_data_offsets_sched_pi_setprio {}; + +struct trace_event_data_offsets_sched_process_hang {}; + +struct trace_event_data_offsets_sched_move_numa {}; + +struct trace_event_data_offsets_sched_numa_pair_template {}; + +struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; + +typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); + +typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, kthread_work_func_t); + +typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *, unsigned int); + +typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); + +typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); + +typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); + +typedef void (*btf_trace_sched_prepare_exec)(void *, struct task_struct *, struct linux_binprm *); + +typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); + +typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); + +typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_hw_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); + +typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); + +typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); + +typedef void (*btf_trace_sched_compute_energy_tp)(void *, struct task_struct *, int, long unsigned int, long unsigned int, long unsigned int); + +struct trace_event_raw_ipi_raise { + struct trace_entry ent; + u32 __data_loc_target_cpus; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpumask { + struct trace_entry ent; + u32 __data_loc_cpumask; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_handler { + struct trace_entry ent; + const char *reason; + char __data[0]; +}; + +struct trace_event_data_offsets_ipi_raise { + u32 target_cpus; + const void *target_cpus_ptr_; +}; + +struct trace_event_data_offsets_ipi_send_cpu {}; + +struct trace_event_data_offsets_ipi_send_cpumask { + u32 cpumask; + const void *cpumask_ptr_; +}; + +struct trace_event_data_offsets_ipi_handler {}; + +typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); + +typedef void (*btf_trace_ipi_send_cpu)(void *, const unsigned int, long unsigned int, void *); + +typedef void (*btf_trace_ipi_send_cpumask)(void *, const struct cpumask *, long unsigned int, void *); + +typedef void (*btf_trace_ipi_entry)(void *, const char *); + +typedef void (*btf_trace_ipi_exit)(void *, const char *); + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irq_t; + +struct set_affinity_pending; + +struct migration_arg { + struct task_struct *task; + int dest_cpu; + struct set_affinity_pending *pending; +}; + +struct set_affinity_pending { + refcount_t refs; + unsigned int stop_pending; + struct completion done; + struct cpu_stop_work stop_work; + struct migration_arg arg; +}; + +struct tick_work { + int cpu; + atomic_t state; + struct delayed_work work; +}; + +typedef struct { + int *lock; + long unsigned int flags; +} class_core_lock_t; + +struct cfs_schedulable_data { + struct task_group *tg; + long: 32; + u64 period; + u64 quota; +}; + +enum { + cpuset = 0, + possible = 1, + fail = 2, +}; + +union cpumask_rcuhead { + cpumask_t cpumask; + struct callback_head rcu; +}; + +struct kprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int ip; +}; + +struct kretprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int func; + long unsigned int ret_ip; +}; + +struct trace_kprobe { + struct dyn_event devent; + struct kretprobe rp; + long unsigned int *nhit; + const char *symbol; + struct trace_probe tp; +}; + +struct sym_count_ctx { + unsigned int count; + const char *name; +}; + +enum { + BPF_F_SYSCTL_BASE_NAME = 1, +}; + +struct bpf_sockopt_buf { + u8 data[32]; +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +struct bpf_prog_list { + struct hlist_node node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[2]; +}; + +typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_retval)(); + +typedef u64 (*btf_bpf_set_retval)(int); + +typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); + +typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); + +typedef u64 (*btf_bpf_get_netns_cookie_sockopt)(struct bpf_sockopt_kern *); + +struct wb_stats { + long unsigned int nr_dirty; + long unsigned int nr_io; + long unsigned int nr_more_io; + long unsigned int nr_dirty_time; + long unsigned int nr_writeback; + long unsigned int nr_reclaimable; + long unsigned int nr_dirtied; + long unsigned int nr_written; + long unsigned int dirty_thresh; + long unsigned int wb_thresh; +}; + +typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); + +typedef struct fsnotify_mark_connector *fsnotify_connp_t; + +typedef void (*iomap_punch_t)(struct inode *, loff_t, loff_t, struct iomap *); + +struct iomap_ioend { + struct list_head io_list; + u16 io_type; + u16 io_flags; + struct inode *io_inode; + size_t io_size; + long: 32; + loff_t io_offset; + sector_t io_sector; + struct bio io_bio; +}; + +struct iomap_writepage_ctx; + +struct iomap_writeback_ops { + int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t, unsigned int); + int (*prepare_ioend)(struct iomap_ioend *, int); + void (*discard_folio)(struct folio *, loff_t); +}; + +struct iomap_writepage_ctx { + struct iomap iomap; + struct iomap_ioend *ioend; + const struct iomap_writeback_ops *ops; + u32 nr_folios; + long: 32; +}; + +struct iomap_folio_state { + spinlock_t state_lock; + unsigned int read_bytes_pending; + atomic_t write_bytes_pending; + long unsigned int state[0]; +}; + +struct iomap_readpage_ctx { + struct folio *cur_folio; + bool cur_folio_in_bio; + struct bio *bio; + struct readahead_control *rac; +}; + +struct ext4_system_zone { + struct rb_node node; + long: 32; + ext4_fsblk_t start_blk; + unsigned int count; + u32 ino; +}; + +struct cdrom_msf0 { + __u8 minute; + __u8 second; + __u8 frame; +}; + +union cdrom_addr { + struct cdrom_msf0 msf; + int lba; +}; + +struct cdrom_tocentry { + __u8 cdte_track; + __u8 cdte_adr: 4; + __u8 cdte_ctrl: 4; + __u8 cdte_format; + union cdrom_addr cdte_addr; + __u8 cdte_datamode; +}; + +struct cdrom_multisession { + union cdrom_addr addr; + __u8 xa_flag; + __u8 addr_format; +}; + +struct cdrom_mcn { + __u8 medium_catalog_number[14]; +}; + +struct packet_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +struct cdrom_device_ops; + +struct cdrom_device_info { + const struct cdrom_device_ops *ops; + struct list_head list; + struct gendisk *disk; + void *handle; + int mask; + int speed; + int capacity; + unsigned int options: 30; + unsigned int mc_flags: 2; + unsigned int vfs_events; + unsigned int ioctl_events; + int use_count; + char name[20]; + __u8 sanyo_slot: 2; + __u8 keeplocked: 1; + __u8 reserved: 5; + int cdda_method; + __u8 last_sense; + __u8 media_written; + short unsigned int mmc3_profile; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; + bool opened_for_data; + long: 32; + __s64 last_media_change_ms; +}; + +struct cdrom_device_ops { + int (*open)(struct cdrom_device_info *, int); + void (*release)(struct cdrom_device_info *); + int (*drive_status)(struct cdrom_device_info *, int); + unsigned int (*check_events)(struct cdrom_device_info *, unsigned int, int); + int (*tray_move)(struct cdrom_device_info *, int); + int (*lock_door)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, long unsigned int); + int (*get_last_session)(struct cdrom_device_info *, struct cdrom_multisession *); + int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); + int (*reset)(struct cdrom_device_info *); + int (*audio_ioctl)(struct cdrom_device_info *, unsigned int, void *); + int (*generic_packet)(struct cdrom_device_info *, struct packet_command *); + int (*read_cdda_bpc)(struct cdrom_device_info *, void *, u32, u32, u8 *); + const int capability; +}; + +struct iso_volume_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2041]; +}; + +struct iso_primary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct iso_supplementary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 flags[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 escape[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct hs_volume_descriptor { + __u8 foo[8]; + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2033]; +}; + +struct hs_primary_descriptor { + __u8 foo[8]; + __u8 type[1]; + __u8 id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 unused4[28]; + __u8 root_directory_record[34]; +}; + +struct isofs_options { + unsigned int rock: 1; + unsigned int joliet: 1; + unsigned int cruft: 1; + unsigned int hide: 1; + unsigned int showassoc: 1; + unsigned int nocompress: 1; + unsigned int overriderockperm: 1; + unsigned int uid_set: 1; + unsigned int gid_set: 1; + unsigned char map; + unsigned char check; + unsigned int blocksize; + umode_t fmode; + umode_t dmode; + kgid_t gid; + kuid_t uid; + char *iocharset; + s32 session; + s32 sbsector; +}; + +enum { + Opt_block = 0, + Opt_check___2 = 1, + Opt_cruft = 2, + Opt_gid___7 = 3, + Opt_ignore___2 = 4, + Opt_iocharset = 5, + Opt_map = 6, + Opt_mode___6 = 7, + Opt_nojoliet = 8, + Opt_norock = 9, + Opt_sb___2 = 10, + Opt_session = 11, + Opt_uid___7 = 12, + Opt_unhide = 13, + Opt_utf8___2 = 14, + Opt_err___5 = 15, + Opt_nocompress = 16, + Opt_hide = 17, + Opt_showassoc = 18, + Opt_dmode = 19, + Opt_overriderockperm = 20, +}; + +struct isofs_iget5_callback_data { + long unsigned int block; + long unsigned int offset; +}; + +struct ahash_alg { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_ahash *); + void (*exit_tfm)(struct crypto_ahash *); + int (*clone_tfm)(struct crypto_ahash *, struct crypto_ahash *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct hash_alg_common halg; +}; + +struct crypto_hash_walk { + char *data; + unsigned int offset; + unsigned int flags; + struct page *pg; + unsigned int entrylen; + unsigned int total; + struct scatterlist *sg; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct show_busy_params { + struct seq_file *m; + struct blk_mq_hw_ctx *hctx; +}; + +typedef enum { + trustInput = 0, + checkMaxSymbolValue = 1, +} HIST_checkInput_e; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 1, + DIM_CQ_PERIOD_NUM_MODES = 2, +}; + +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP = 1, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP = 2, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP = 3, +}; + +struct efifb_par { + u32 pseudo_palette[16]; + resource_size_t base; + resource_size_t size; +}; + +enum dpm_order { + DPM_ORDER_NONE = 0, + DPM_ORDER_DEV_AFTER_PARENT = 1, + DPM_ORDER_PARENT_BEFORE_DEV = 2, + DPM_ORDER_DEV_LAST = 3, +}; + +struct fwnode_link { + struct fwnode_handle *supplier; + struct list_head s_hook; + struct fwnode_handle *consumer; + struct list_head c_hook; + u8 flags; +}; + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +struct class_dir { + struct kobject kobj; + const struct class *class; +}; + +struct root_device { + struct device dev; + struct module *owner; + long: 32; +}; + +struct ata_timing { + short unsigned int mode; + short unsigned int setup; + short unsigned int act8b; + short unsigned int rec8b; + short unsigned int cyc8b; + short unsigned int active; + short unsigned int recover; + short unsigned int dmack_hold; + short unsigned int cycle; + short unsigned int udma; +}; + +struct virtio_net_config { + __u8 mac[6]; + __virtio16 status; + __virtio16 max_virtqueue_pairs; + __virtio16 mtu; + __le32 speed; + __u8 duplex; + __u8 rss_max_key_size; + __le16 rss_max_indirection_table_length; + __le32 supported_hash_types; +}; + +struct virtio_net_hdr_v1 { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + union { + struct { + __virtio16 csum_start; + __virtio16 csum_offset; + }; + struct { + __virtio16 start; + __virtio16 offset; + } csum; + struct { + __le16 segments; + __le16 dup_acks; + } rsc; + }; + __virtio16 num_buffers; +}; + +struct virtio_net_hdr_v1_hash { + struct virtio_net_hdr_v1 hdr; + __le32 hash_value; + __le16 hash_report; + __le16 padding; +}; + +struct virtio_net_ctrl_hdr { + __u8 class; + __u8 cmd; +}; + +typedef __u8 virtio_net_ctrl_ack; + +struct virtio_net_ctrl_mac { + __virtio32 entries; + __u8 macs[0]; +}; + +struct virtio_net_ctrl_mq { + __virtio16 virtqueue_pairs; +}; + +struct virtio_net_ctrl_coal_tx { + __le32 tx_max_packets; + __le32 tx_usecs; +}; + +struct virtio_net_ctrl_coal_rx { + __le32 rx_max_packets; + __le32 rx_usecs; +}; + +struct virtio_net_ctrl_coal { + __le32 max_packets; + __le32 max_usecs; +}; + +struct virtio_net_ctrl_coal_vq { + __le16 vqn; + __le16 reserved; + struct virtio_net_ctrl_coal coal; +}; + +struct virtio_net_stats_capabilities { + __le64 supported_stats_types[1]; +}; + +struct virtio_net_ctrl_queue_stats { + struct { + __le16 vq_index; + __le16 reserved[3]; + __le64 types_bitmap[1]; + } stats[1]; +}; + +struct virtio_net_stats_reply_hdr { + __u8 type; + __u8 reserved; + __le16 vq_index; + __le16 reserved1; + __le16 size; +}; + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *, char *); + ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); +}; + +struct ewma_pkt_len { + long unsigned int internal; +}; + +struct virtnet_stat_desc { + char desc[32]; + size_t offset; + size_t qstat_offset; +}; + +struct virtnet_sq_free_stats { + u64 packets; + u64 bytes; + u64 napi_packets; + u64 napi_bytes; + u64 xsk; +}; + +struct virtnet_sq_stats { + struct u64_stats_sync syncp; + long: 32; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t xdp_tx; + u64_stats_t xdp_tx_drops; + u64_stats_t kicks; + u64_stats_t tx_timeouts; + u64_stats_t stop; + u64_stats_t wake; +}; + +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + long: 32; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t drops; + u64_stats_t xdp_packets; + u64_stats_t xdp_tx; + u64_stats_t xdp_redirects; + u64_stats_t xdp_drops; + u64_stats_t kicks; +}; + +struct virtnet_interrupt_coalesce { + u32 max_packets; + u32 max_usecs; +}; + +struct virtnet_rq_dma { + dma_addr_t addr; + u32 ref; + u16 len; + u16 need_sync; +}; + +struct send_queue { + struct virtqueue *vq; + struct scatterlist sg[19]; + char name[16]; + long: 32; + struct virtnet_sq_stats stats; + struct virtnet_interrupt_coalesce intr_coal; + struct napi_struct napi; + bool reset; + struct xsk_buff_pool *xsk_pool; + dma_addr_t xsk_hdr_dma_addr; + long: 32; +}; + +struct receive_queue { + struct virtqueue *vq; + long: 32; + struct napi_struct napi; + struct bpf_prog *xdp_prog; + long: 32; + struct virtnet_rq_stats stats; + u16 calls; + bool dim_enabled; + struct mutex dim_lock; + struct dim dim; + u32 packets_in_napi; + struct virtnet_interrupt_coalesce intr_coal; + struct page *pages; + struct ewma_pkt_len mrg_avg_pkt_len; + struct page_frag alloc_frag; + struct scatterlist sg[19]; + unsigned int min_buf_len; + char name[16]; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info xdp_rxq; + struct virtnet_rq_dma *last_dma; + struct xsk_buff_pool *xsk_pool; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info xsk_rxq_info; + struct xdp_buff **xsk_buffs; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct virtio_net_ctrl_rss { + u32 hash_types; + u16 indirection_table_mask; + u16 unclassified_queue; + u16 hash_cfg_reserved; + u16 max_tx_vq; + u8 hash_key_length; + u8 key[40]; + u16 *indirection_table; +}; + +struct control_buf { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; +}; + +struct virtnet_info { + struct virtio_device *vdev; + struct virtqueue *cvq; + struct net_device *dev; + struct send_queue *sq; + struct receive_queue *rq; + unsigned int status; + u16 max_queue_pairs; + u16 curr_queue_pairs; + u16 xdp_queue_pairs; + bool xdp_enabled; + bool big_packets; + unsigned int big_packets_num_skbfrags; + bool mergeable_rx_bufs; + bool has_rss; + bool has_rss_hash_report; + u8 rss_key_size; + u16 rss_indir_table_size; + u32 rss_hash_types_supported; + u32 rss_hash_types_saved; + struct virtio_net_ctrl_rss rss; + bool has_cvq; + struct mutex cvq_lock; + bool any_header_sg; + u8 hdr_len; + struct delayed_work refill; + bool refill_enabled; + spinlock_t refill_lock; + struct work_struct config_work; + struct work_struct rx_mode_work; + bool rx_mode_work_enabled; + bool affinity_hint_set; + struct hlist_node node; + struct hlist_node node_dead; + struct control_buf *ctrl; + u8 duplex; + u32 speed; + bool rx_dim_enabled; + struct virtnet_interrupt_coalesce intr_coal_tx; + struct virtnet_interrupt_coalesce intr_coal_rx; + long unsigned int guest_offloads; + long unsigned int guest_offloads_capable; + struct failover *failover; + u64 device_stats_cap; +}; + +struct virtio_net_common_hdr { + union { + struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf mrg_hdr; + struct virtio_net_hdr_v1_hash hash_v1_hdr; + }; +}; + +enum virtnet_xmit_type { + VIRTNET_XMIT_TYPE_SKB = 0, + VIRTNET_XMIT_TYPE_SKB_ORPHAN = 1, + VIRTNET_XMIT_TYPE_XDP = 2, + VIRTNET_XMIT_TYPE_XSK = 3, +}; + +struct virtnet_stats_ctx { + bool to_qstat; + u32 desc_num[3]; + u64 bitmap[3]; + u32 size[3]; + u64 *data; +}; + +struct iwl_sensitivity_cmd { + __le16 control; + __le16 table[11]; +}; + +struct iwl_enhance_sensitivity_cmd { + __le16 control; + __le16 enhance_table[23]; +}; + +struct iwl_calib_chain_noise_gain_cmd { + struct iwl_calib_hdr hdr; + u8 delta_gain_1; + u8 delta_gain_2; + u8 pad[2]; +}; + +enum iwlagn_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +struct iwl_calib_result { + struct list_head list; + size_t cmd_len; + struct iwl_calib_cmd cmd; +}; + +struct statistics_general_data { + u32 beacon_silence_rssi_a; + u32 beacon_silence_rssi_b; + u32 beacon_silence_rssi_c; + u32 beacon_energy_a; + u32 beacon_energy_b; + u32 beacon_energy_c; +}; + +struct ieee80211_bss_load_elem { + __le16 sta_count; + u8 channel_util; + __le16 avail_admission_capa; +} __attribute__((packed)); + +enum iwl_link_ctx_protection_flags { + LINK_PROT_FLG_TGG_PROTECT = 1, + LINK_PROT_FLG_HT_PROT = 2, + LINK_PROT_FLG_FAT_PROT = 4, + LINK_PROT_FLG_SELF_CTS_EN = 8, +}; + +enum iwl_link_ctx_flags { + LINK_FLG_BSS_COLOR_DIS = 1, + LINK_FLG_MU_EDCA_CW = 2, + LINK_FLG_RU_2MHZ_BLOCK = 4, + LINK_FLG_NDP_FEEDBACK_ENABLED = 8, +}; + +struct iwl_link_config_cmd { + __le32 action; + __le32 link_id; + __le32 mac_id; + __le32 phy_id; + u8 local_link_addr[6]; + __le16 reserved_for_local_link_addr; + __le32 modify_mask; + __le32 active; + union { + __le32 listen_lmac; + struct { + u8 block_tx; + u8 modify_bandwidth; + u8 reserved1[2]; + }; + }; + __le32 cck_rates; + __le32 ofdm_rates; + __le32 cck_short_preamble; + __le32 short_slot; + __le32 protection_flags; + __le32 qos_flags; + struct iwl_ac_qos___2 ac[5]; + u8 htc_trig_based_pkt_ext; + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + u8 ndp_fdbk_buff_th_exp; + struct iwl_he_backoff_conf trig_based_txf[4]; + __le32 bi; + __le32 dtim_interval; + __le16 puncture_mask; + __le16 frame_time_rts_th; + __le32 flags; + __le32 flags_mask; + u8 ref_bssid_addr[6]; + __le16 reserved_for_ref_bssid_addr; + u8 bssid_index; + u8 bss_color; + u8 spec_link_id; + u8 ul_mu_data_disable; + u8 ibss_bssid_addr[6]; + __le16 reserved_for_ibss_bssid_addr; + __le32 reserved3[8]; +}; + +struct iwl_mvm_link_sel_data { + u8 link_id; + const struct cfg80211_chan_def *chandef; + s32 signal; + u16 grade; +}; + +struct iwl_mvm_esr_iter_data { + struct ieee80211_vif *vif; + unsigned int link_id; + bool lift_block; +}; + +struct iwl_mvm_rssi_to_grade { + s8 rssi[2]; + u16 grade; +}; + +struct iwl_mvm_bw_to_rssi_threshs { + s8 low; + s8 high; +}; + +struct rtw_backup_info { + u8 len; + u32 reg; + u32 val; +}; + +struct rtw_phy_cck_pd_reg { + u32 reg_pd; + u32 mask_pd; + u32 reg_cs; + u32 mask_cs; +}; + +enum rtw_dm_cap { + RTW_DM_CAP_NA = 0, + RTW_DM_CAP_TXGAPK = 1, + RTW_DM_CAP_NUM = 2, +}; + +struct rtw8822cu_efuse { + u8 res0[48]; + u8 vid[2]; + u8 pid[2]; + u8 res1[3]; + u8 mac_addr[6]; + u8 res2[61]; +}; + +struct rtw8822cs_efuse { + u8 res0[74]; + u8 mac_addr[6]; +}; + +struct rtw8822ce_efuse { + u8 mac_addr[6]; + u8 vender_id[2]; + u8 device_id[2]; + u8 sub_vender_id[2]; + u8 sub_device_id[2]; + u8 pmc[2]; + u8 exp_device_cap[2]; + u8 msi_cap; + u8 ltr_cap; + u8 exp_link_control[2]; + u8 link_cap[4]; + u8 link_control[2]; + u8 serial_number[8]; + u8 res0: 2; + u8 ltr_en: 1; + u8 res1: 2; + u8 obff: 2; + char: 1; + u8 res2: 3; + u8 obff_cap: 2; + short: 3; + u8 res3: 4; + u8 class_code[3]; + u8 res4; + u8 pci_pm_L1_2_supp: 1; + u8 pci_pm_L1_1_supp: 1; + u8 aspm_pm_L1_2_supp: 1; + u8 aspm_pm_L1_1_supp: 1; + u8 L1_pm_substates_supp: 1; + u8 res5: 3; + u8 port_common_mode_restore_time; + u8 port_t_power_on_scale: 2; + u8 res6: 1; + u8 port_t_power_on_value: 5; + u8 res7; +}; + +struct rtw8822c_efuse { + __le16 rtl_id; + u8 res0[4]; + u8 usb_mode; + u8 res1[9]; + struct rtw_txpwr_idx txpwr_idx_table[4]; + u8 channel_plan; + u8 xtal_k; + u8 res2; + u8 iqk_lck; + u8 res3[5]; + u8 rf_board_option; + u8 rf_feature_option; + u8 rf_bt_setting; + u8 eeprom_version; + u8 eeprom_customer_id; + u8 tx_bb_swing_setting_2g; + u8 tx_bb_swing_setting_5g; + u8 tx_pwr_calibrate_rate; + u8 rf_antenna_option; + u8 rfe_option; + u8 country_code[2]; + u8 res4[3]; + u8 path_a_thermal; + u8 path_b_thermal; + u8 res5[2]; + u8 rx_gain_gap_2g_ofdm; + u8 res6; + u8 rx_gain_gap_2g_cck; + u8 res7; + u8 rx_gain_gap_5gl; + u8 res8; + u8 rx_gain_gap_5gm; + u8 res9; + u8 rx_gain_gap_5gh; + u8 res10; + u8 res11[66]; + union { + struct rtw8822ce_efuse e; + struct rtw8822cu_efuse u; + struct rtw8822cs_efuse s; + }; +}; + +enum rtw8822c_dpk_agc_phase { + RTW_DPK_GAIN_CHECK = 0, + RTW_DPK_GAIN_LARGE = 1, + RTW_DPK_GAIN_LESS = 2, + RTW_DPK_GL_LARGE = 3, + RTW_DPK_GL_LESS = 4, + RTW_DPK_LOSS_CHECK = 5, + RTW_DPK_AGC_OUT = 6, +}; + +enum rtw8822c_dpk_one_shot_action { + RTW_DPK_CAL_PWR = 0, + RTW_DPK_GAIN_LOSS = 1, + RTW_DPK_DO_DPK = 2, + RTW_DPK_DPK_ON = 3, + RTW_DPK_DAGC = 4, + RTW_DPK_ACTION_MAX = 5, +}; + +struct dpk_cfg_pair { + u32 addr; + u32 bitmask; + u32 data; +}; + +struct rtw8822c_dpk_data { + u8 txbb; + u8 pga; + u8 limited_pga; + u8 agc_cnt; + bool loss_only; + bool gain_only; + u8 path; +}; + +enum rc_filter_type { + RC_FILTER_NORMAL = 0, + RC_FILTER_WAKEUP = 1, + RC_FILTER_MAX = 2, +}; + +struct led_trigger {}; + +struct rc_filter_attribute { + struct device_attribute attr; + enum rc_filter_type type; + bool mask; +}; + +struct cs_policy_dbs_info { + struct policy_dbs_info policy_dbs; + unsigned int down_skip; + unsigned int requested_freq; +}; + +struct cs_dbs_tuners { + unsigned int down_threshold; + unsigned int freq_step; +}; + +struct of_intc_desc { + struct list_head list; + of_irq_init_cb_t irq_init_cb; + struct device_node *dev; + struct device_node *interrupt_parent; +}; + +enum { + TCA_STATS_UNSPEC = 0, + TCA_STATS_BASIC = 1, + TCA_STATS_RATE_EST = 2, + TCA_STATS_QUEUE = 3, + TCA_STATS_APP = 4, + TCA_STATS_RATE_EST64 = 5, + TCA_STATS_PAD = 6, + TCA_STATS_BASIC_HW = 7, + TCA_STATS_PKT64 = 8, + __TCA_STATS_MAX = 9, +}; + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; + long: 32; +}; + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_keee eee; +}; + +struct nf_ct_bridge_info { + struct nf_hook_ops *ops; + unsigned int ops_size; + struct module *me; +}; + +struct ipq { + struct inet_frag_queue q; + u8 ecn; + u16 max_df_size; + int iif; + unsigned int rid; + struct inet_peer *peer; +}; + +struct icmp_ext_hdr { + __u8 reserved1: 4; + __u8 version: 4; + __u8 reserved2; + __sum16 checksum; +}; + +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; + +struct icmp_ext_echo_ctype3_hdr { + __be16 afi; + __u8 addrlen; + __u8 reserved; +}; + +struct icmp_ext_echo_iio { + struct icmp_extobj_hdr extobj_hdr; + union { + char name[16]; + __be32 ifindex; + struct { + struct icmp_ext_echo_ctype3_hdr ctype3_hdr; + union { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + } ip_addr; + } addr; + } ident; +}; + +struct trace_event_raw_icmp_send { + struct trace_entry ent; + const void *skbaddr; + int type; + int code; + __u8 saddr[4]; + __u8 daddr[4]; + __u16 sport; + __u16 dport; + short unsigned int ulen; + char __data[0]; +}; + +struct trace_event_data_offsets_icmp_send {}; + +typedef void (*btf_trace_icmp_send)(void *, const struct sk_buff *, int, int); + +struct icmp_bxm { + struct sk_buff *skb; + int offset; + int data_len; + struct { + struct icmphdr icmph; + __be32 times[3]; + } data; + int head_len; + struct ip_options_data replyopts; +}; + +struct icmp_control { + enum skb_drop_reason (*handler)(struct sk_buff *); + short int error; +}; + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +enum rwsem_waiter_type { + RWSEM_WAITING_FOR_WRITE = 0, + RWSEM_WAITING_FOR_READ = 1, +}; + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + enum rwsem_waiter_type type; + long unsigned int timeout; + bool handoff_set; +}; + +enum rwsem_wake_type { + RWSEM_WAKE_ANY = 0, + RWSEM_WAKE_READERS = 1, + RWSEM_WAKE_READ_OWNED = 2, +}; + +enum owner_state { + OWNER_NULL = 1, + OWNER_WRITER = 2, + OWNER_READER = 4, + OWNER_NONSPINNABLE = 8, +}; + +enum pm_qos_req_action { + PM_QOS_ADD_REQ = 0, + PM_QOS_UPDATE_REQ = 1, + PM_QOS_REMOVE_REQ = 2, +}; + +struct trace_event_raw_cpu { + struct trace_entry ent; + u32 state; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_idle_miss { + struct trace_entry ent; + u32 cpu_id; + u32 state; + bool below; + char __data[0]; +}; + +struct trace_event_raw_powernv_throttle { + struct trace_entry ent; + int chip_id; + u32 __data_loc_reason; + int pmax; + char __data[0]; +}; + +struct trace_event_raw_pstate_sample { + struct trace_entry ent; + u32 core_busy; + u32 scaled_busy; + u32 from; + u32 to; + u64 mperf; + u64 aperf; + u64 tsc; + u32 freq; + u32 io_boost; + char __data[0]; +}; + +struct trace_event_raw_cpu_frequency_limits { + struct trace_entry ent; + u32 min_freq; + u32 max_freq; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_start { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u32 __data_loc_parent; + u32 __data_loc_pm_ops; + int event; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_end { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + int error; + char __data[0]; +}; + +struct trace_event_raw_suspend_resume { + struct trace_entry ent; + const char *action; + int val; + bool start; + char __data[0]; +}; + +struct trace_event_raw_wakeup_source { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + char __data[0]; +}; + +struct trace_event_raw_clock { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_power_domain { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_latency_qos_request { + struct trace_entry ent; + s32 value; + char __data[0]; +}; + +struct trace_event_raw_pm_qos_update { + struct trace_entry ent; + enum pm_qos_req_action action; + int prev_value; + int curr_value; + char __data[0]; +}; + +struct trace_event_raw_dev_pm_qos_request { + struct trace_entry ent; + u32 __data_loc_name; + enum dev_pm_qos_req_type type; + s32 new_value; + char __data[0]; +}; + +struct trace_event_raw_guest_halt_poll_ns { + struct trace_entry ent; + bool grow; + unsigned int new; + unsigned int old; + char __data[0]; +}; + +struct trace_event_data_offsets_cpu {}; + +struct trace_event_data_offsets_cpu_idle_miss {}; + +struct trace_event_data_offsets_powernv_throttle { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_pstate_sample {}; + +struct trace_event_data_offsets_cpu_frequency_limits {}; + +struct trace_event_data_offsets_device_pm_callback_start { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; + u32 parent; + const void *parent_ptr_; + u32 pm_ops; + const void *pm_ops_ptr_; +}; + +struct trace_event_data_offsets_device_pm_callback_end { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_suspend_resume {}; + +struct trace_event_data_offsets_wakeup_source { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clock { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_power_domain { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cpu_latency_qos_request {}; + +struct trace_event_data_offsets_pm_qos_update {}; + +struct trace_event_data_offsets_dev_pm_qos_request { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_guest_halt_poll_ns {}; + +typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_idle_miss)(void *, unsigned int, unsigned int, bool); + +typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); + +typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); + +typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); + +typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); + +typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); + +typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); + +typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_pm_qos_add_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_guest_halt_poll_ns)(void *, bool, unsigned int, unsigned int); + +struct bpf_lpm_trie_key_hdr { + __u32 prefixlen; +}; + +struct bpf_lpm_trie_key_u8 { + union { + struct bpf_lpm_trie_key_hdr hdr; + __u32 prefixlen; + }; + __u8 data[0]; +}; + +struct lpm_trie_node { + struct callback_head rcu; + struct lpm_trie_node *child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node *root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + spinlock_t lock; + long: 32; +}; + +struct trace_event_raw_oom_score_adj_update { + struct trace_entry ent; + pid_t pid; + char comm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_reclaim_retry_zone { + struct trace_entry ent; + int node; + int zone_idx; + int order; + long unsigned int reclaimable; + long unsigned int available; + long unsigned int min_wmark; + int no_progress_loops; + bool wmark_check; + char __data[0]; +}; + +struct trace_event_raw_mark_victim { + struct trace_entry ent; + int pid; + u32 __data_loc_comm; + long unsigned int total_vm; + long unsigned int anon_rss; + long unsigned int file_rss; + long unsigned int shmem_rss; + uid_t uid; + long unsigned int pgtables; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_wake_reaper { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_start_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_finish_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_skip_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_compact_retry { + struct trace_entry ent; + int order; + int priority; + int result; + int retries; + int max_retries; + bool ret; + char __data[0]; +}; + +struct trace_event_data_offsets_oom_score_adj_update {}; + +struct trace_event_data_offsets_reclaim_retry_zone {}; + +struct trace_event_data_offsets_mark_victim { + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_wake_reaper {}; + +struct trace_event_data_offsets_start_task_reaping {}; + +struct trace_event_data_offsets_finish_task_reaping {}; + +struct trace_event_data_offsets_skip_task_reaping {}; + +struct trace_event_data_offsets_compact_retry {}; + +typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); + +typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool); + +typedef void (*btf_trace_mark_victim)(void *, struct task_struct *, uid_t); + +typedef void (*btf_trace_wake_reaper)(void *, int); + +typedef void (*btf_trace_start_task_reaping)(void *, int); + +typedef void (*btf_trace_finish_task_reaping)(void *, int); + +typedef void (*btf_trace_skip_task_reaping)(void *, int); + +typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); + +struct vmap_area { + long unsigned int va_start; + long unsigned int va_end; + struct rb_node rb_node; + struct list_head list; + union { + long unsigned int subtree_max_size; + struct vm_struct *vm; + }; + long unsigned int flags; +}; + +struct trace_event_raw_alloc_vmap_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int size; + long unsigned int align; + long unsigned int vstart; + long unsigned int vend; + int failed; + char __data[0]; +}; + +struct trace_event_raw_purge_vmap_area_lazy { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + unsigned int npurged; + char __data[0]; +}; + +struct trace_event_raw_free_vmap_area_noflush { + struct trace_entry ent; + long unsigned int va_start; + long unsigned int nr_lazy; + long unsigned int nr_lazy_max; + char __data[0]; +}; + +struct trace_event_data_offsets_alloc_vmap_area {}; + +struct trace_event_data_offsets_purge_vmap_area_lazy {}; + +struct trace_event_data_offsets_free_vmap_area_noflush {}; + +typedef void (*btf_trace_alloc_vmap_area)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_purge_vmap_area_lazy)(void *, long unsigned int, long unsigned int, unsigned int); + +typedef void (*btf_trace_free_vmap_area_noflush)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; + +struct rb_list { + struct rb_root root; + struct list_head head; + spinlock_t lock; +}; + +struct vmap_pool { + struct list_head head; + long unsigned int len; +}; + +struct vmap_node { + struct vmap_pool pool[256]; + spinlock_t pool_lock; + bool skip_populate; + struct rb_list busy; + struct rb_list lazy; + struct list_head purge_list; + struct work_struct purge_work; + long unsigned int nr_purged; +}; + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, + LE_FIT_TYPE = 2, + RE_FIT_TYPE = 3, + NE_FIT_TYPE = 4, +}; + +struct vmap_block_queue { + spinlock_t lock; + struct list_head free; + struct xarray vmap_blocks; +}; + +struct vmap_block { + spinlock_t lock; + struct vmap_area *va; + long unsigned int free; + long unsigned int dirty; + long unsigned int used_map[2]; + long unsigned int dirty_min; + long unsigned int dirty_max; + struct list_head free_list; + struct callback_head callback_head; + struct list_head purge; + unsigned int cpu; +}; + +struct xattr_args { + __u64 value; + __u32 size; + __u32 flags; +}; + +struct xattr_name { + char name[256]; +}; + +struct kernel_xattr_ctx { + union { + const void *cvalue; + void *value; + }; + void *kvalue; + size_t size; + struct xattr_name *kname; + unsigned int flags; +}; + +struct proc_fs_context { + struct pid_namespace *pid_ns; + unsigned int mask; + enum proc_hidepid hidepid; + int gid; + enum proc_pidonly pidonly; +}; + +enum proc_param { + Opt_gid___8 = 0, + Opt_hidepid = 1, + Opt_subset = 2, +}; + +struct ext4_free_data { + struct list_head efd_list; + struct rb_node efd_node; + ext4_group_t efd_group; + ext4_grpblk_t efd_start_cluster; + ext4_grpblk_t efd_count; + tid_t efd_tid; +}; + +enum { + MB_INODE_PA = 0, + MB_GROUP_PA = 1, +}; + +struct ext4_buddy { + struct folio *bd_buddy_folio; + void *bd_buddy; + struct folio *bd_bitmap_folio; + void *bd_bitmap; + struct ext4_group_info *bd_info; + struct super_block *bd_sb; + __u16 bd_blkbits; + ext4_group_t bd_group; +}; + +struct sg { + struct ext4_group_info info; + ext4_grpblk_t counters[18]; +}; + +struct fuse_ioctl_in { + uint64_t fh; + uint32_t flags; + uint32_t cmd; + uint64_t arg; + uint32_t in_size; + uint32_t out_size; +}; + +struct fuse_ioctl_iovec { + uint64_t base; + uint64_t len; +}; + +struct fuse_ioctl_out { + int32_t result; + uint32_t flags; + uint32_t in_iovs; + uint32_t out_iovs; +}; + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; + __u8 digest[0]; +}; + +struct btrfs_dev_replace_item { + __le64 src_devid; + __le64 cursor_left; + __le64 cursor_right; + __le64 cont_reading_from_srcdev_mode; + __le64 replace_state; + __le64 time_started; + __le64 time_stopped; + __le64 num_write_errors; + __le64 num_uncorrectable_read_errors; +}; + +struct btrfs_failed_bio { + struct btrfs_bio *bbio; + int num_copies; + atomic_t repair_count; +}; + +struct async_submit_bio { + struct btrfs_bio *bbio; + struct btrfs_io_context *bioc; + struct btrfs_io_stripe smap; + int mirror_num; + struct btrfs_work work; + long: 32; +}; + +struct crypto_rfc3686_ctx { + struct crypto_skcipher *child; + u8 nonce[4]; +}; + +struct crypto_rfc3686_req_ctx { + u8 iv[16]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct skcipher_request subreq; +}; + +struct blk_ia_range_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_independent_access_range *, char *); +}; + +struct io_waitid_async { + struct io_kiocb *req; + struct wait_opts wo; +}; + +struct io_waitid { + struct file *file; + int which; + pid_t upid; + int options; + atomic_t refs; + struct wait_queue_head *head; + struct siginfo *infop; + struct waitid_info info; +}; + +enum hwparam_type { + hwparam_ioport = 0, + hwparam_iomem = 1, + hwparam_ioport_or_iomem = 2, + hwparam_irq = 3, + hwparam_dma = 4, + hwparam_dma_addr = 5, + hwparam_other = 6, +}; + +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + long: 32; + struct device classdev; +}; + +enum scale_freq_source { + SCALE_FREQ_SOURCE_CPUFREQ = 0, + SCALE_FREQ_SOURCE_ARCH = 1, + SCALE_FREQ_SOURCE_CPPC = 2, + SCALE_FREQ_SOURCE_VIRT = 3, +}; + +struct scale_freq_data { + enum scale_freq_source source; + void (*set_freq_scale)(); +}; + +struct trace_event_raw_hw_pressure_update { + struct trace_entry ent; + long unsigned int hw_pressure; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_hw_pressure_update {}; + +typedef void (*btf_trace_hw_pressure_update)(void *, int, long unsigned int); + +struct iwl_error_event_table___2 { + u32 valid; + u32 error_id; + u32 trm_hw_status0; + u32 trm_hw_status1; + u32 blink2; + u32 ilink1; + u32 ilink2; + u32 data1; + u32 data2; + u32 data3; + u32 bcon_time; + u32 tsf_low; + u32 tsf_hi; + u32 gp1; + u32 gp2; + u32 fw_rev_type; + u32 major; + u32 minor; + u32 hw_ver; + u32 brd_ver; + u32 log_pc; + u32 frame_ptr; + u32 stack_ptr; + u32 hcmd; + u32 isr0; + u32 isr1; + u32 isr2; + u32 isr3; + u32 isr4; + u32 last_cmd_id; + u32 wait_event; + u32 l2p_control; + u32 l2p_duration; + u32 l2p_mhvalid; + u32 l2p_addr_match; + u32 lmpm_pmg_sel; + u32 u_timestamp; + u32 flow_handler; +}; + +struct iwl_umac_error_event_table { + u32 valid; + u32 error_id; + u32 blink1; + u32 blink2; + u32 ilink1; + u32 ilink2; + u32 data1; + u32 data2; + u32 data3; + u32 umac_major; + u32 umac_minor; + u32 frame_pointer; + u32 stack_pointer; + u32 cmd_header; + u32 nic_isr_pref; +}; + +struct iwl_tcm_error_event_table { + u32 valid; + u32 error_id; + u32 blink2; + u32 ilink1; + u32 ilink2; + u32 data1; + u32 data2; + u32 data3; + u32 logpc; + u32 frame_pointer; + u32 stack_pointer; + u32 msgid; + u32 isr; + u32 hw_status[5]; + u32 sw_status[1]; + u32 reserved[4]; +}; + +struct iwl_rcm_error_event_table { + u32 valid; + u32 error_id; + u32 blink2; + u32 ilink1; + u32 ilink2; + u32 data1; + u32 data2; + u32 data3; + u32 logpc; + u32 frame_pointer; + u32 stack_pointer; + u32 msgid; + u32 isr; + u32 frame_hw_status; + u32 mbx_lmac_to_rcm_req; + u32 mbx_rcm_to_lmac_req; + u32 mh_ctl; + u32 mh_addr1_lo; + u32 mh_info; + u32 mh_err; + u32 reserved[3]; +}; + +struct error_table_start___2 { + u32 valid; + __le32 err_id; +}; + +enum iwl_tlc_mng_cfg_flags { + IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = 1, + IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = 2, + IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = 4, + IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = 8, + IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = 16, + IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK = 64, +}; + +enum iwl_tlc_mng_cfg_cw { + IWL_TLC_MNG_CH_WIDTH_20MHZ = 0, + IWL_TLC_MNG_CH_WIDTH_40MHZ = 1, + IWL_TLC_MNG_CH_WIDTH_80MHZ = 2, + IWL_TLC_MNG_CH_WIDTH_160MHZ = 3, + IWL_TLC_MNG_CH_WIDTH_320MHZ = 4, +}; + +enum iwl_tlc_mng_cfg_chains { + IWL_TLC_MNG_CHAIN_A_MSK = 1, + IWL_TLC_MNG_CHAIN_B_MSK = 2, +}; + +enum iwl_tlc_mng_cfg_mode { + IWL_TLC_MNG_MODE_CCK = 0, + IWL_TLC_MNG_MODE_OFDM_NON_HT = 0, + IWL_TLC_MNG_MODE_NON_HT = 0, + IWL_TLC_MNG_MODE_HT = 1, + IWL_TLC_MNG_MODE_VHT = 2, + IWL_TLC_MNG_MODE_HE = 3, + IWL_TLC_MNG_MODE_EHT = 4, +}; + +enum iwl_tlc_mng_ht_rates { + IWL_TLC_MNG_HT_RATE_MCS0 = 0, + IWL_TLC_MNG_HT_RATE_MCS1 = 1, + IWL_TLC_MNG_HT_RATE_MCS2 = 2, + IWL_TLC_MNG_HT_RATE_MCS3 = 3, + IWL_TLC_MNG_HT_RATE_MCS4 = 4, + IWL_TLC_MNG_HT_RATE_MCS5 = 5, + IWL_TLC_MNG_HT_RATE_MCS6 = 6, + IWL_TLC_MNG_HT_RATE_MCS7 = 7, + IWL_TLC_MNG_HT_RATE_MCS8 = 8, + IWL_TLC_MNG_HT_RATE_MCS9 = 9, + IWL_TLC_MNG_HT_RATE_MCS10 = 10, + IWL_TLC_MNG_HT_RATE_MCS11 = 11, + IWL_TLC_MNG_HT_RATE_MAX = 11, +}; + +struct iwl_tlc_config_cmd_v3 { + u8 sta_id; + u8 reserved1[3]; + u8 max_ch_width; + u8 mode; + u8 chains; + u8 amsdu; + __le16 flags; + __le16 non_ht_rates; + __le16 ht_rates[4]; + __le16 max_mpdu_len; + u8 sgi_ch_width_supp; + u8 reserved2; + __le32 max_tx_op; +}; + +struct iwl_tlc_config_cmd_v4 { + u8 sta_id; + u8 reserved1[3]; + u8 max_ch_width; + u8 mode; + u8 chains; + u8 sgi_ch_width_supp; + __le16 flags; + __le16 non_ht_rates; + __le16 ht_rates[6]; + __le16 max_mpdu_len; + __le16 max_tx_op; +}; + +enum iwl_tlc_update_flags { + IWL_TLC_NOTIF_FLAG_RATE = 1, + IWL_TLC_NOTIF_FLAG_AMSDU = 2, +}; + +struct iwl_tlc_update_notif { + u8 sta_id; + u8 reserved[3]; + __le32 flags; + __le32 rate; + __le32 amsdu_size; + __le32 amsdu_enabled; +}; + +struct rtw_iter_bitrate_mask_data { + struct rtw_dev *rtwdev; + struct ieee80211_vif *vif; + const struct cfg80211_bitrate_mask *mask; +}; + +struct class_info { + int class; + char *class_name; +}; + +enum sharp_state { + STATE_INACTIVE___9 = 0, + STATE_BIT_PULSE___5 = 1, + STATE_BIT_SPACE___5 = 2, + STATE_TRAILER_PULSE___4 = 3, + STATE_ECHO_SPACE = 4, + STATE_TRAILER_SPACE___4 = 5, +}; + +struct dm_io_client { + mempool_t pool; + struct bio_set bios; +}; + +struct io { + long unsigned int error_bits; + atomic_t count; + struct dm_io_client *client; + io_notify_fn callback; + void *context; + void *vma_invalidate_address; + long unsigned int vma_invalidate_size; + long: 32; +}; + +struct dpages { + void (*get_page)(struct dpages *, struct page **, long unsigned int *, unsigned int *); + void (*next_page)(struct dpages *); + union { + unsigned int context_u; + struct bvec_iter context_bi; + }; + void *context_ptr; + void *vma_invalidate_address; + long unsigned int vma_invalidate_size; +}; + +struct sync_io { + long unsigned int error_bits; + struct completion wait; +}; + +typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool); + +struct nvmem_layout_driver { + struct device_driver driver; + int (*probe)(struct nvmem_layout *); + void (*remove)(struct nvmem_layout *); +}; + +struct fib_notifier_net { + struct list_head fib_notifier_ops; + struct atomic_notifier_head fib_chain; +}; + +struct rss_req_info { + struct ethnl_req_info base; + u32 rss_context; +}; + +struct rss_reply_data { + struct ethnl_reply_data base; + bool no_key_fields; + u32 indir_size; + u32 hkey_size; + u32 hfunc; + u32 input_xfrm; + u32 *indir_table; + u8 *hkey; +}; + +struct rss_nl_dump_ctx { + long unsigned int ifindex; + long unsigned int ctx_idx; + unsigned int match_ifindex; + unsigned int start_ctx; +}; + +enum nf_ct_ecache_state { + NFCT_ECACHE_DESTROY_FAIL = 0, + NFCT_ECACHE_DESTROY_SENT = 1, +}; + +struct nf_ct_timeout { + __u16 l3num; + const struct nf_conntrack_l4proto *l4proto; + char data[0]; +}; + +struct nf_conn_timeout { + struct nf_ct_timeout *timeout; +}; + +struct conntrack_gc_work { + struct delayed_work dwork; + u32 next_bucket; + u32 avg_timeout; + u32 count; + u32 start_time; + bool exiting; + bool early_drop; +}; + +struct xt_log_info { + unsigned char level; + unsigned char logflags; + char prefix[30]; +}; + +struct arpreq { + struct sockaddr arp_pa; + struct sockaddr arp_ha; + int arp_flags; + struct sockaddr arp_netmask; + char arp_dev[16]; +}; + +enum { + AX25_VALUES_IPDEFMODE = 0, + AX25_VALUES_AXDEFMODE = 1, + AX25_VALUES_BACKOFF = 2, + AX25_VALUES_CONMODE = 3, + AX25_VALUES_WINDOW = 4, + AX25_VALUES_EWINDOW = 5, + AX25_VALUES_T1 = 6, + AX25_VALUES_T2 = 7, + AX25_VALUES_T3 = 8, + AX25_VALUES_IDLE = 9, + AX25_VALUES_N2 = 10, + AX25_VALUES_PACLEN = 11, + AX25_VALUES_PROTOCOL = 12, + AX25_MAX_VALUES = 13, +}; + +struct xt_get_revision { + char name[29]; + __u8 revision; +}; + +struct ipt_getinfo { + char name[32]; + unsigned int valid_hooks; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_entries; + unsigned int size; +}; + +struct ipt_get_entries { + char name[32]; + unsigned int size; + long: 32; + struct ipt_entry entrytable[0]; +}; + +struct ipt_error { + struct ipt_entry entry; + struct xt_error_target target; +}; + +struct br_frame_type { + __be16 type; + int (*frame_handler)(struct net_bridge_port *, struct sk_buff *); + struct hlist_node list; +}; + +struct trace_event_raw_wiphy_work_event { + struct trace_entry ent; + char wiphy_name[32]; + void *instance; + void *func; + char __data[0]; +}; + +struct trace_event_raw_wiphy_delayed_work_queue { + struct trace_entry ent; + char wiphy_name[32]; + void *instance; + void *func; + long unsigned int delay; + char __data[0]; +}; + +struct trace_event_raw_wiphy_work_worker_start { + struct trace_entry ent; + char wiphy_name[32]; + char __data[0]; +}; + +struct trace_event_raw_rdev_suspend { + struct trace_entry ent; + char wiphy_name[32]; + bool any; + bool disconnect; + bool magic_pkt; + bool gtk_rekey_failure; + bool eap_identity_req; + bool four_way_handshake; + bool rfkill_release; + bool valid_wow; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + char __data[0]; +}; + +struct trace_event_raw_rdev_scan { + struct trace_entry ent; + char wiphy_name[32]; + char __data[0]; +}; + +struct trace_event_raw_wiphy_only_evt { + struct trace_entry ent; + char wiphy_name[32]; + char __data[0]; +}; + +struct trace_event_raw_wiphy_enabled_evt { + struct trace_entry ent; + char wiphy_name[32]; + bool enabled; + char __data[0]; +}; + +struct trace_event_raw_rdev_add_virtual_intf { + struct trace_entry ent; + char wiphy_name[32]; + u32 __data_loc_vir_intf_name; + enum nl80211_iftype type; + char __data[0]; +}; + +struct trace_event_raw_wiphy_wdev_evt { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + char __data[0]; +}; + +struct trace_event_raw_wiphy_wdev_cookie_evt { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + long: 32; + u64 cookie; + char __data[0]; +}; + +struct trace_event_raw_rdev_change_virtual_intf { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_iftype type; + char __data[0]; +}; + +struct trace_event_raw_key_handle { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 mac_addr[6]; + int link_id; + u8 key_index; + bool pairwise; + char __data[0]; +}; + +struct trace_event_raw_rdev_add_key { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 mac_addr[6]; + int link_id; + u8 key_index; + bool pairwise; + u8 mode; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_default_key { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + int link_id; + u8 key_index; + bool unicast; + bool multicast; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_default_mgmt_key { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + int link_id; + u8 key_index; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_default_beacon_key { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + int link_id; + u8 key_index; + char __data[0]; +}; + +struct trace_event_raw_rdev_start_ap { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + int beacon_interval; + int dtim_period; + char ssid[33]; + enum nl80211_hidden_ssid hidden_ssid; + u32 wpa_ver; + bool privacy; + enum nl80211_auth_type auth_type; + int inactivity_timeout; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_change_beacon { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + int link_id; + u32 __data_loc_head; + u32 __data_loc_tail; + u32 __data_loc_beacon_ies; + u32 __data_loc_proberesp_ies; + u32 __data_loc_assocresp_ies; + u32 __data_loc_probe_resp; + char __data[0]; +}; + +struct trace_event_raw_rdev_stop_ap { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_wiphy_netdev_evt { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_rdev_end_cac { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_station_add_change { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 sta_mac[6]; + u32 sta_flags_mask; + u32 sta_flags_set; + u32 sta_modify_mask; + int listen_interval; + u16 capability; + u16 aid; + u8 plink_action; + u8 plink_state; + u8 uapsd_queues; + u8 max_sp; + u8 opmode_notif; + bool opmode_notif_used; + u8 ht_capa[26]; + u8 vht_capa[12]; + char vlan[16]; + u32 __data_loc_supported_rates; + u32 __data_loc_ext_capab; + u32 __data_loc_supported_channels; + u32 __data_loc_supported_oper_classes; + char __data[0]; +}; + +struct trace_event_raw_wiphy_netdev_mac_evt { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 sta_mac[6]; + char __data[0]; +}; + +struct trace_event_raw_station_del { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 sta_mac[6]; + u8 subtype; + u16 reason_code; + int link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_dump_station { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 sta_mac[6]; + int idx; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int_station_info { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + int generation; + u32 connected_time; + u32 inactive_time; + u32 rx_bytes; + u32 tx_bytes; + u32 rx_packets; + u32 tx_packets; + u32 tx_retries; + u32 tx_failed; + u32 rx_dropped_misc; + u32 beacon_loss_count; + u16 llid; + u16 plid; + u8 plink_state; + char __data[0]; +}; + +struct trace_event_raw_mpath_evt { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 dst[6]; + u8 next_hop[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_dump_mpath { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 dst[6]; + u8 next_hop[6]; + int idx; + char __data[0]; +}; + +struct trace_event_raw_rdev_get_mpp { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 dst[6]; + u8 mpp[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_dump_mpp { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 dst[6]; + u8 mpp[6]; + int idx; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int_mpath_info { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + int generation; + u32 filled; + u32 frame_qlen; + u32 sn; + u32 metric; + u32 exptime; + u32 discovery_timeout; + u8 discovery_retries; + u8 flags; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int_mesh_config { + struct trace_entry ent; + char wiphy_name[32]; + u16 dot11MeshRetryTimeout; + u16 dot11MeshConfirmTimeout; + u16 dot11MeshHoldingTimeout; + u16 dot11MeshMaxPeerLinks; + u8 dot11MeshMaxRetries; + u8 dot11MeshTTL; + u8 element_ttl; + bool auto_open_plinks; + u32 dot11MeshNbrOffsetMaxNeighbor; + u8 dot11MeshHWMPmaxPREQretries; + u32 path_refresh_time; + u32 dot11MeshHWMPactivePathTimeout; + u16 min_discovery_timeout; + u16 dot11MeshHWMPpreqMinInterval; + u16 dot11MeshHWMPperrMinInterval; + u16 dot11MeshHWMPnetDiameterTraversalTime; + u8 dot11MeshHWMPRootMode; + u16 dot11MeshHWMPRannInterval; + bool dot11MeshGateAnnouncementProtocol; + bool dot11MeshForwarding; + s32 rssi_threshold; + u16 ht_opmode; + u32 dot11MeshHWMPactivePathToRootTimeout; + u16 dot11MeshHWMProotInterval; + u16 dot11MeshHWMPconfirmationInterval; + bool dot11MeshNolearn; + int ret; + char __data[0]; +}; + +struct trace_event_raw_rdev_update_mesh_config { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u16 dot11MeshRetryTimeout; + u16 dot11MeshConfirmTimeout; + u16 dot11MeshHoldingTimeout; + u16 dot11MeshMaxPeerLinks; + u8 dot11MeshMaxRetries; + u8 dot11MeshTTL; + u8 element_ttl; + bool auto_open_plinks; + u32 dot11MeshNbrOffsetMaxNeighbor; + u8 dot11MeshHWMPmaxPREQretries; + u32 path_refresh_time; + u32 dot11MeshHWMPactivePathTimeout; + u16 min_discovery_timeout; + u16 dot11MeshHWMPpreqMinInterval; + u16 dot11MeshHWMPperrMinInterval; + u16 dot11MeshHWMPnetDiameterTraversalTime; + u8 dot11MeshHWMPRootMode; + u16 dot11MeshHWMPRannInterval; + bool dot11MeshGateAnnouncementProtocol; + bool dot11MeshForwarding; + s32 rssi_threshold; + u16 ht_opmode; + u32 dot11MeshHWMPactivePathToRootTimeout; + u16 dot11MeshHWMProotInterval; + u16 dot11MeshHWMPconfirmationInterval; + bool dot11MeshNolearn; + u32 mask; + char __data[0]; +}; + +struct trace_event_raw_rdev_join_mesh { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u16 dot11MeshRetryTimeout; + u16 dot11MeshConfirmTimeout; + u16 dot11MeshHoldingTimeout; + u16 dot11MeshMaxPeerLinks; + u8 dot11MeshMaxRetries; + u8 dot11MeshTTL; + u8 element_ttl; + bool auto_open_plinks; + u32 dot11MeshNbrOffsetMaxNeighbor; + u8 dot11MeshHWMPmaxPREQretries; + u32 path_refresh_time; + u32 dot11MeshHWMPactivePathTimeout; + u16 min_discovery_timeout; + u16 dot11MeshHWMPpreqMinInterval; + u16 dot11MeshHWMPperrMinInterval; + u16 dot11MeshHWMPnetDiameterTraversalTime; + u8 dot11MeshHWMPRootMode; + u16 dot11MeshHWMPRannInterval; + bool dot11MeshGateAnnouncementProtocol; + bool dot11MeshForwarding; + s32 rssi_threshold; + u16 ht_opmode; + u32 dot11MeshHWMPactivePathToRootTimeout; + u16 dot11MeshHWMProotInterval; + u16 dot11MeshHWMPconfirmationInterval; + bool dot11MeshNolearn; + char __data[0]; +}; + +struct trace_event_raw_rdev_change_bss { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + int use_cts_prot; + int use_short_preamble; + int use_short_slot_time; + int ap_isolate; + int ht_opmode; + char __data[0]; +}; + +struct trace_event_raw_rdev_inform_bss { + struct trace_entry ent; + char wiphy_name[32]; + u8 bssid[6]; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_txq_params { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_ac ac; + u16 txop; + u16 cwmin; + u16 cwmax; + u8 aifs; + char __data[0]; +}; + +struct trace_event_raw_rdev_libertas_set_mesh_channel { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_monitor_channel { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + char __data[0]; +}; + +struct trace_event_raw_rdev_auth { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + enum nl80211_auth_type auth_type; + char __data[0]; +}; + +struct trace_event_raw_rdev_assoc { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + u8 prev_bssid[6]; + bool use_mfp; + u32 flags; + u32 __data_loc_elements; + u8 ht_capa[26]; + u8 ht_capa_mask[26]; + u8 vht_capa[12]; + u8 vht_capa_mask[12]; + u32 __data_loc_fils_kek; + u32 __data_loc_fils_nonces; + char __data[0]; +}; + +struct trace_event_raw_rdev_deauth { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + u16 reason_code; + bool local_state_change; + char __data[0]; +}; + +struct trace_event_raw_rdev_disassoc { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + u16 reason_code; + bool local_state_change; + char __data[0]; +}; + +struct trace_event_raw_rdev_mgmt_tx_cancel_wait { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + long: 32; + u64 cookie; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_power_mgmt { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + bool enabled; + int timeout; + char __data[0]; +}; + +struct trace_event_raw_rdev_connect { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + char ssid[33]; + enum nl80211_auth_type auth_type; + bool privacy; + u32 wpa_versions; + u32 flags; + u8 prev_bssid[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_update_connect_params { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u32 changed; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_cqm_rssi_config { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + s32 rssi_thold; + u32 rssi_hyst; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_cqm_rssi_range_config { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + s32 rssi_low; + s32 rssi_high; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_cqm_txe_config { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u32 rate; + u32 pkts; + u32 intvl; + char __data[0]; +}; + +struct trace_event_raw_rdev_disconnect { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u16 reason_code; + char __data[0]; +}; + +struct trace_event_raw_rdev_join_ibss { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + char ssid[33]; + char __data[0]; +}; + +struct trace_event_raw_rdev_join_ocb { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_wiphy_params { + struct trace_entry ent; + char wiphy_name[32]; + u32 changed; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_tx_power { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + enum nl80211_tx_power_setting type; + int mbm; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int_int { + struct trace_entry ent; + char wiphy_name[32]; + int func_ret; + int func_fill; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_bitrate_mask { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + unsigned int link_id; + u8 peer[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_update_mgmt_frame_registrations { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + u16 global_stypes; + u16 interface_stypes; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int_tx_rx { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + u32 tx; + u32 rx; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_void_tx_rx { + struct trace_entry ent; + char wiphy_name[32]; + u32 tx; + u32 tx_max; + u32 rx; + u32 rx_max; + char __data[0]; +}; + +struct trace_event_raw_tx_rx_evt { + struct trace_entry ent; + char wiphy_name[32]; + u32 tx; + u32 rx; + char __data[0]; +}; + +struct trace_event_raw_wiphy_netdev_id_evt { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + long: 32; + u64 id; + char __data[0]; +}; + +struct trace_event_raw_rdev_tdls_mgmt { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + int link_id; + u8 action_code; + u8 dialog_token; + u16 status_code; + u32 peer_capability; + bool initiator; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_rdev_dump_survey { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + int idx; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int_survey_info { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + int ret; + u64 time; + u64 time_busy; + u64 time_ext_busy; + u64 time_rx; + u64 time_tx; + u64 time_scan; + u32 filled; + s8 noise; + char __data[0]; +}; + +struct trace_event_raw_rdev_tdls_oper { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + enum nl80211_tdls_operation oper; + char __data[0]; +}; + +struct trace_event_raw_rdev_pmksa { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_probe_client { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_remain_on_channel { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + unsigned int duration; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_int_cookie { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + long: 32; + u64 cookie; + char __data[0]; +}; + +struct trace_event_raw_rdev_cancel_remain_on_channel { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + long: 32; + u64 cookie; + char __data[0]; +}; + +struct trace_event_raw_rdev_mgmt_tx { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + bool offchan; + unsigned int wait; + bool no_cck; + bool dont_wait_for_ack; + char __data[0]; +}; + +struct trace_event_raw_rdev_tx_control_port { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 dest[6]; + __be16 proto; + bool unencrypted; + int link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_noack_map { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u16 noack_map; + char __data[0]; +}; + +struct trace_event_raw_wiphy_wdev_link_evt { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_return_chandef { + struct trace_entry ent; + char wiphy_name[32]; + int ret; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + char __data[0]; +}; + +struct trace_event_raw_rdev_start_nan { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + u8 master_pref; + u8 bands; + char __data[0]; +}; + +struct trace_event_raw_rdev_nan_change_conf { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + u8 master_pref; + u8 bands; + u32 changes; + char __data[0]; +}; + +struct trace_event_raw_rdev_add_nan_func { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + u8 func_type; + u64 cookie; + char __data[0]; +}; + +struct trace_event_raw_rdev_del_nan_func { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + long: 32; + u64 cookie; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_mac_acl { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u32 acl_policy; + char __data[0]; +}; + +struct trace_event_raw_rdev_update_ft_ies { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u16 md; + u32 __data_loc_ie; + char __data[0]; +}; + +struct trace_event_raw_rdev_crit_proto_start { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + u16 proto; + u16 duration; + char __data[0]; +}; + +struct trace_event_raw_rdev_crit_proto_stop { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + char __data[0]; +}; + +struct trace_event_raw_rdev_channel_switch { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + bool radar_required; + bool block_tx; + u8 count; + u32 __data_loc_bcn_ofs; + u32 __data_loc_pres_ofs; + u8 link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_qos_map { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 num_des; + u8 dscp_exception[42]; + u8 up[16]; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_ap_chanwidth { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_add_tx_ts { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + u8 tsid; + u8 user_prio; + u16 admitted_time; + char __data[0]; +}; + +struct trace_event_raw_rdev_del_tx_ts { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + u8 tsid; + char __data[0]; +}; + +struct trace_event_raw_rdev_tdls_channel_switch { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 addr[6]; + u8 oper_class; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + char __data[0]; +}; + +struct trace_event_raw_rdev_tdls_cancel_channel_switch { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 addr[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_pmk { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 aa[6]; + u8 pmk_len; + u8 pmk_r0_name_len; + u32 __data_loc_pmk; + u32 __data_loc_pmk_r0_name; + char __data[0]; +}; + +struct trace_event_raw_rdev_del_pmk { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 aa[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_external_auth { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 bssid[6]; + u8 ssid[33]; + u16 status; + u8 mld_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_start_radar_detection { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + u32 cac_time_ms; + int link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_mcast_rate { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + int mcast_rate[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_coalesce { + struct trace_entry ent; + char wiphy_name[32]; + int n_rules; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_multicast_to_unicast { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + bool enabled; + char __data[0]; +}; + +struct trace_event_raw_rdev_get_ftm_responder_stats { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + long: 32; + u64 timestamp; + u32 success_num; + u32 partial_num; + u32 failed_num; + u32 asap_num; + u32 non_asap_num; + long: 32; + u64 duration; + u32 unknown_triggers; + u32 reschedule; + u32 out_of_window; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_rdev_set_fils_aad { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 macaddr[6]; + u8 kek_len; + char __data[0]; +}; + +struct trace_event_raw_rdev_update_owe_info { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + u16 status; + u32 __data_loc_ie; + char __data[0]; +}; + +struct trace_event_raw_rdev_probe_mesh_link { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 dest[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_tid_config { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + char __data[0]; +}; + +struct trace_event_raw_rdev_reset_tid_config { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + u8 tids; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_sar_specs { + struct trace_entry ent; + char wiphy_name[32]; + u16 type; + u16 num; + char __data[0]; +}; + +struct trace_event_raw_rdev_color_change { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 count; + u16 bcn_ofs; + u16 pres_ofs; + u8 link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_radar_background { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + char __data[0]; +}; + +struct trace_event_raw_rdev_del_link_station { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 mld_mac[6]; + u32 link_id; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_hw_timestamp { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 macaddr[6]; + bool enable; + char __data[0]; +}; + +struct trace_event_raw_rdev_set_ttlm { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 dlink[16]; + u8 ulink[16]; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_return_bool { + struct trace_entry ent; + bool ret; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_netdev_mac_evt { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 macaddr[6]; + char __data[0]; +}; + +struct trace_event_raw_netdev_evt_only { + struct trace_entry ent; + char name[16]; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_send_rx_assoc { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 ap_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_netdev_frame_event { + struct trace_entry ent; + char name[16]; + int ifindex; + u32 __data_loc_frame; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_tx_mlme_mgmt { + struct trace_entry ent; + char name[16]; + int ifindex; + u32 __data_loc_frame; + int reconnect; + char __data[0]; +}; + +struct trace_event_raw_netdev_mac_evt { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 mac[6]; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_send_assoc_failure { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 ap_addr[6]; + bool timeout; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_michael_mic_failure { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 addr[6]; + enum nl80211_key_type key_type; + int key_id; + u8 tsc[6]; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_ready_on_channel { + struct trace_entry ent; + u32 id; + long: 32; + u64 cookie; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + unsigned int duration; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_ready_on_channel_expired { + struct trace_entry ent; + u32 id; + long: 32; + u64 cookie; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cfg80211_tx_mgmt_expired { + struct trace_entry ent; + u32 id; + long: 32; + u64 cookie; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cfg80211_new_sta { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 mac_addr[6]; + int generation; + u32 connected_time; + u32 inactive_time; + u32 rx_bytes; + u32 tx_bytes; + u32 rx_packets; + u32 tx_packets; + u32 tx_retries; + u32 tx_failed; + u32 rx_dropped_misc; + u32 beacon_loss_count; + u16 llid; + u16 plid; + u8 plink_state; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_rx_mgmt { + struct trace_entry ent; + u32 id; + int freq; + int sig_dbm; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_mgmt_tx_status { + struct trace_entry ent; + u32 id; + long: 32; + u64 cookie; + bool ack; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cfg80211_control_port_tx_status { + struct trace_entry ent; + u32 id; + long: 32; + u64 cookie; + bool ack; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cfg80211_rx_control_port { + struct trace_entry ent; + char name[16]; + int ifindex; + int len; + u8 from[6]; + u16 proto; + bool unencrypted; + int link_id; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_cqm_rssi_notify { + struct trace_entry ent; + char name[16]; + int ifindex; + enum nl80211_cqm_rssi_threshold_event rssi_event; + s32 rssi_level; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_reg_can_beacon { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + enum nl80211_iftype iftype; + u32 prohibited_flags; + u32 permitting_flags; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_chandef_dfs_required { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_ch_switch_notify { + struct trace_entry ent; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_ch_switch_started_notify { + struct trace_entry ent; + char name[16]; + int ifindex; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_radar_event { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_band band; + u32 control_freq; + u32 freq_offset; + u32 width; + u32 center_freq1; + u32 freq1_offset; + u32 center_freq2; + u16 punctured; + bool offchan; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_cac_event { + struct trace_entry ent; + char name[16]; + int ifindex; + enum nl80211_radar_event evt; + unsigned int link_id; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_rx_evt { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 addr[6]; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_ibss_joined { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 bssid[6]; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_probe_status { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 addr[6]; + long: 32; + u64 cookie; + bool acked; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cfg80211_cqm_pktloss_notify { + struct trace_entry ent; + char name[16]; + int ifindex; + u8 peer[6]; + u32 num_packets; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_pmksa_candidate_notify { + struct trace_entry ent; + char name[16]; + int ifindex; + int index; + u8 bssid[6]; + bool preauth; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_report_obss_beacon { + struct trace_entry ent; + char wiphy_name[32]; + int freq; + int sig_dbm; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_tdls_oper_request { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + enum nl80211_tdls_operation oper; + u16 reason_code; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_scan_done { + struct trace_entry ent; + u32 n_channels; + u32 __data_loc_ie; + u32 rates[6]; + u32 wdev_id; + u8 wiphy_mac[6]; + bool no_cck; + bool aborted; + long: 32; + u64 scan_start_tsf; + u8 tsf_bssid[6]; + char __data[0]; +}; + +struct trace_event_raw_wiphy_id_evt { + struct trace_entry ent; + char wiphy_name[32]; + u64 id; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_get_bss { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + u8 bssid[6]; + u32 __data_loc_ssid; + enum ieee80211_bss_type bss_type; + enum ieee80211_privacy privacy; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_inform_bss_frame { + struct trace_entry ent; + char wiphy_name[32]; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + u32 __data_loc_mgmt; + s32 signal; + long: 32; + u64 ts_boottime; + u64 parent_tsf; + u8 parent_bssid[6]; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_bss_evt { + struct trace_entry ent; + u8 bssid[6]; + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_return_uint { + struct trace_entry ent; + unsigned int ret; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_return_u32 { + struct trace_entry ent; + u32 ret; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_report_wowlan_wakeup { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + bool non_wireless; + bool disconnect; + bool magic_pkt; + bool gtk_rekey_failure; + bool eap_identity_req; + bool four_way_handshake; + bool rfkill_release; + s32 pattern_idx; + u32 packet_len; + u32 __data_loc_packet; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_ft_event { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u32 __data_loc_ies; + u8 target_ap[6]; + u32 __data_loc_ric_ies; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_stop_iface { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_pmsr_report { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + long: 32; + u64 cookie; + u8 addr[6]; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_pmsr_complete { + struct trace_entry ent; + char wiphy_name[32]; + u32 id; + long: 32; + u64 cookie; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_update_owe_info_event { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 peer[6]; + u32 __data_loc_ie; + int assoc_link_id; + u8 peer_mld_addr[6]; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_bss_color_notify { + struct trace_entry ent; + char name[16]; + int ifindex; + u32 cmd; + u8 count; + long: 32; + u64 color_bitmap; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_assoc_comeback { + struct trace_entry ent; + u32 id; + u8 ap_addr[6]; + u32 timeout; + char __data[0]; +}; + +struct trace_event_raw_link_station_add_mod { + struct trace_entry ent; + char wiphy_name[32]; + char name[16]; + int ifindex; + u8 mld_mac[6]; + u8 link_mac[6]; + u32 link_id; + u32 __data_loc_supported_rates; + u8 ht_capa[26]; + u8 vht_capa[12]; + u8 opmode_notif; + bool opmode_notif_used; + u32 __data_loc_he_capa; + u8 he_6ghz_capa[2]; + u32 __data_loc_eht_capa; + char __data[0]; +}; + +struct trace_event_raw_cfg80211_links_removed { + struct trace_entry ent; + char name[16]; + int ifindex; + u16 link_mask; + char __data[0]; +}; + +struct trace_event_data_offsets_wiphy_work_event {}; + +struct trace_event_data_offsets_wiphy_delayed_work_queue {}; + +struct trace_event_data_offsets_wiphy_work_worker_start {}; + +struct trace_event_data_offsets_rdev_suspend {}; + +struct trace_event_data_offsets_rdev_return_int {}; + +struct trace_event_data_offsets_rdev_scan {}; + +struct trace_event_data_offsets_wiphy_only_evt {}; + +struct trace_event_data_offsets_wiphy_enabled_evt {}; + +struct trace_event_data_offsets_rdev_add_virtual_intf { + u32 vir_intf_name; + const void *vir_intf_name_ptr_; +}; + +struct trace_event_data_offsets_wiphy_wdev_evt {}; + +struct trace_event_data_offsets_wiphy_wdev_cookie_evt {}; + +struct trace_event_data_offsets_rdev_change_virtual_intf {}; + +struct trace_event_data_offsets_key_handle {}; + +struct trace_event_data_offsets_rdev_add_key {}; + +struct trace_event_data_offsets_rdev_set_default_key {}; + +struct trace_event_data_offsets_rdev_set_default_mgmt_key {}; + +struct trace_event_data_offsets_rdev_set_default_beacon_key {}; + +struct trace_event_data_offsets_rdev_start_ap {}; + +struct trace_event_data_offsets_rdev_change_beacon { + u32 head; + const void *head_ptr_; + u32 tail; + const void *tail_ptr_; + u32 beacon_ies; + const void *beacon_ies_ptr_; + u32 proberesp_ies; + const void *proberesp_ies_ptr_; + u32 assocresp_ies; + const void *assocresp_ies_ptr_; + u32 probe_resp; + const void *probe_resp_ptr_; +}; + +struct trace_event_data_offsets_rdev_stop_ap {}; + +struct trace_event_data_offsets_wiphy_netdev_evt {}; + +struct trace_event_data_offsets_rdev_end_cac {}; + +struct trace_event_data_offsets_station_add_change { + u32 supported_rates; + const void *supported_rates_ptr_; + u32 ext_capab; + const void *ext_capab_ptr_; + u32 supported_channels; + const void *supported_channels_ptr_; + u32 supported_oper_classes; + const void *supported_oper_classes_ptr_; +}; + +struct trace_event_data_offsets_wiphy_netdev_mac_evt {}; + +struct trace_event_data_offsets_station_del {}; + +struct trace_event_data_offsets_rdev_dump_station {}; + +struct trace_event_data_offsets_rdev_return_int_station_info {}; + +struct trace_event_data_offsets_mpath_evt {}; + +struct trace_event_data_offsets_rdev_dump_mpath {}; + +struct trace_event_data_offsets_rdev_get_mpp {}; + +struct trace_event_data_offsets_rdev_dump_mpp {}; + +struct trace_event_data_offsets_rdev_return_int_mpath_info {}; + +struct trace_event_data_offsets_rdev_return_int_mesh_config {}; + +struct trace_event_data_offsets_rdev_update_mesh_config {}; + +struct trace_event_data_offsets_rdev_join_mesh {}; + +struct trace_event_data_offsets_rdev_change_bss {}; + +struct trace_event_data_offsets_rdev_inform_bss {}; + +struct trace_event_data_offsets_rdev_set_txq_params {}; + +struct trace_event_data_offsets_rdev_libertas_set_mesh_channel {}; + +struct trace_event_data_offsets_rdev_set_monitor_channel {}; + +struct trace_event_data_offsets_rdev_auth {}; + +struct trace_event_data_offsets_rdev_assoc { + u32 elements; + const void *elements_ptr_; + u32 fils_kek; + const void *fils_kek_ptr_; + u32 fils_nonces; + const void *fils_nonces_ptr_; +}; + +struct trace_event_data_offsets_rdev_deauth {}; + +struct trace_event_data_offsets_rdev_disassoc {}; + +struct trace_event_data_offsets_rdev_mgmt_tx_cancel_wait {}; + +struct trace_event_data_offsets_rdev_set_power_mgmt {}; + +struct trace_event_data_offsets_rdev_connect {}; + +struct trace_event_data_offsets_rdev_update_connect_params {}; + +struct trace_event_data_offsets_rdev_set_cqm_rssi_config {}; + +struct trace_event_data_offsets_rdev_set_cqm_rssi_range_config {}; + +struct trace_event_data_offsets_rdev_set_cqm_txe_config {}; + +struct trace_event_data_offsets_rdev_disconnect {}; + +struct trace_event_data_offsets_rdev_join_ibss {}; + +struct trace_event_data_offsets_rdev_join_ocb {}; + +struct trace_event_data_offsets_rdev_set_wiphy_params {}; + +struct trace_event_data_offsets_rdev_set_tx_power {}; + +struct trace_event_data_offsets_rdev_return_int_int {}; + +struct trace_event_data_offsets_rdev_set_bitrate_mask {}; + +struct trace_event_data_offsets_rdev_update_mgmt_frame_registrations {}; + +struct trace_event_data_offsets_rdev_return_int_tx_rx {}; + +struct trace_event_data_offsets_rdev_return_void_tx_rx {}; + +struct trace_event_data_offsets_tx_rx_evt {}; + +struct trace_event_data_offsets_wiphy_netdev_id_evt {}; + +struct trace_event_data_offsets_rdev_tdls_mgmt { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_rdev_dump_survey {}; + +struct trace_event_data_offsets_rdev_return_int_survey_info {}; + +struct trace_event_data_offsets_rdev_tdls_oper {}; + +struct trace_event_data_offsets_rdev_pmksa {}; + +struct trace_event_data_offsets_rdev_probe_client {}; + +struct trace_event_data_offsets_rdev_remain_on_channel {}; + +struct trace_event_data_offsets_rdev_return_int_cookie {}; + +struct trace_event_data_offsets_rdev_cancel_remain_on_channel {}; + +struct trace_event_data_offsets_rdev_mgmt_tx {}; + +struct trace_event_data_offsets_rdev_tx_control_port {}; + +struct trace_event_data_offsets_rdev_set_noack_map {}; + +struct trace_event_data_offsets_wiphy_wdev_link_evt {}; + +struct trace_event_data_offsets_rdev_return_chandef {}; + +struct trace_event_data_offsets_rdev_start_nan {}; + +struct trace_event_data_offsets_rdev_nan_change_conf {}; + +struct trace_event_data_offsets_rdev_add_nan_func {}; + +struct trace_event_data_offsets_rdev_del_nan_func {}; + +struct trace_event_data_offsets_rdev_set_mac_acl {}; + +struct trace_event_data_offsets_rdev_update_ft_ies { + u32 ie; + const void *ie_ptr_; +}; + +struct trace_event_data_offsets_rdev_crit_proto_start {}; + +struct trace_event_data_offsets_rdev_crit_proto_stop {}; + +struct trace_event_data_offsets_rdev_channel_switch { + u32 bcn_ofs; + const void *bcn_ofs_ptr_; + u32 pres_ofs; + const void *pres_ofs_ptr_; +}; + +struct trace_event_data_offsets_rdev_set_qos_map {}; + +struct trace_event_data_offsets_rdev_set_ap_chanwidth {}; + +struct trace_event_data_offsets_rdev_add_tx_ts {}; + +struct trace_event_data_offsets_rdev_del_tx_ts {}; + +struct trace_event_data_offsets_rdev_tdls_channel_switch {}; + +struct trace_event_data_offsets_rdev_tdls_cancel_channel_switch {}; + +struct trace_event_data_offsets_rdev_set_pmk { + u32 pmk; + const void *pmk_ptr_; + u32 pmk_r0_name; + const void *pmk_r0_name_ptr_; +}; + +struct trace_event_data_offsets_rdev_del_pmk {}; + +struct trace_event_data_offsets_rdev_external_auth {}; + +struct trace_event_data_offsets_rdev_start_radar_detection {}; + +struct trace_event_data_offsets_rdev_set_mcast_rate {}; + +struct trace_event_data_offsets_rdev_set_coalesce {}; + +struct trace_event_data_offsets_rdev_set_multicast_to_unicast {}; + +struct trace_event_data_offsets_rdev_get_ftm_responder_stats {}; + +struct trace_event_data_offsets_rdev_set_fils_aad {}; + +struct trace_event_data_offsets_rdev_update_owe_info { + u32 ie; + const void *ie_ptr_; +}; + +struct trace_event_data_offsets_rdev_probe_mesh_link {}; + +struct trace_event_data_offsets_rdev_set_tid_config {}; + +struct trace_event_data_offsets_rdev_reset_tid_config {}; + +struct trace_event_data_offsets_rdev_set_sar_specs {}; + +struct trace_event_data_offsets_rdev_color_change {}; + +struct trace_event_data_offsets_rdev_set_radar_background {}; + +struct trace_event_data_offsets_rdev_del_link_station {}; + +struct trace_event_data_offsets_rdev_set_hw_timestamp {}; + +struct trace_event_data_offsets_rdev_set_ttlm {}; + +struct trace_event_data_offsets_cfg80211_return_bool {}; + +struct trace_event_data_offsets_cfg80211_netdev_mac_evt {}; + +struct trace_event_data_offsets_netdev_evt_only {}; + +struct trace_event_data_offsets_cfg80211_send_rx_assoc {}; + +struct trace_event_data_offsets_netdev_frame_event { + u32 frame; + const void *frame_ptr_; +}; + +struct trace_event_data_offsets_cfg80211_tx_mlme_mgmt { + u32 frame; + const void *frame_ptr_; +}; + +struct trace_event_data_offsets_netdev_mac_evt {}; + +struct trace_event_data_offsets_cfg80211_send_assoc_failure {}; + +struct trace_event_data_offsets_cfg80211_michael_mic_failure {}; + +struct trace_event_data_offsets_cfg80211_ready_on_channel {}; + +struct trace_event_data_offsets_cfg80211_ready_on_channel_expired {}; + +struct trace_event_data_offsets_cfg80211_tx_mgmt_expired {}; + +struct trace_event_data_offsets_cfg80211_new_sta {}; + +struct trace_event_data_offsets_cfg80211_rx_mgmt {}; + +struct trace_event_data_offsets_cfg80211_mgmt_tx_status {}; + +struct trace_event_data_offsets_cfg80211_control_port_tx_status {}; + +struct trace_event_data_offsets_cfg80211_rx_control_port {}; + +struct trace_event_data_offsets_cfg80211_cqm_rssi_notify {}; + +struct trace_event_data_offsets_cfg80211_reg_can_beacon {}; + +struct trace_event_data_offsets_cfg80211_chandef_dfs_required {}; + +struct trace_event_data_offsets_cfg80211_ch_switch_notify {}; + +struct trace_event_data_offsets_cfg80211_ch_switch_started_notify {}; + +struct trace_event_data_offsets_cfg80211_radar_event {}; + +struct trace_event_data_offsets_cfg80211_cac_event {}; + +struct trace_event_data_offsets_cfg80211_rx_evt {}; + +struct trace_event_data_offsets_cfg80211_ibss_joined {}; + +struct trace_event_data_offsets_cfg80211_probe_status {}; + +struct trace_event_data_offsets_cfg80211_cqm_pktloss_notify {}; + +struct trace_event_data_offsets_cfg80211_pmksa_candidate_notify {}; + +struct trace_event_data_offsets_cfg80211_report_obss_beacon {}; + +struct trace_event_data_offsets_cfg80211_tdls_oper_request {}; + +struct trace_event_data_offsets_cfg80211_scan_done { + u32 ie; + const void *ie_ptr_; +}; + +struct trace_event_data_offsets_wiphy_id_evt {}; + +struct trace_event_data_offsets_cfg80211_get_bss { + u32 ssid; + const void *ssid_ptr_; +}; + +struct trace_event_data_offsets_cfg80211_inform_bss_frame { + u32 mgmt; + const void *mgmt_ptr_; +}; + +struct trace_event_data_offsets_cfg80211_bss_evt {}; + +struct trace_event_data_offsets_cfg80211_return_uint {}; + +struct trace_event_data_offsets_cfg80211_return_u32 {}; + +struct trace_event_data_offsets_cfg80211_report_wowlan_wakeup { + u32 packet; + const void *packet_ptr_; +}; + +struct trace_event_data_offsets_cfg80211_ft_event { + u32 ies; + const void *ies_ptr_; + u32 ric_ies; + const void *ric_ies_ptr_; +}; + +struct trace_event_data_offsets_cfg80211_stop_iface {}; + +struct trace_event_data_offsets_cfg80211_pmsr_report {}; + +struct trace_event_data_offsets_cfg80211_pmsr_complete {}; + +struct trace_event_data_offsets_cfg80211_update_owe_info_event { + u32 ie; + const void *ie_ptr_; +}; + +struct trace_event_data_offsets_cfg80211_bss_color_notify {}; + +struct trace_event_data_offsets_cfg80211_assoc_comeback {}; + +struct trace_event_data_offsets_link_station_add_mod { + u32 supported_rates; + const void *supported_rates_ptr_; + u32 he_capa; + const void *he_capa_ptr_; + u32 eht_capa; + const void *eht_capa_ptr_; +}; + +struct trace_event_data_offsets_cfg80211_links_removed {}; + +typedef void (*btf_trace_wiphy_work_queue)(void *, struct wiphy *, struct wiphy_work *); + +typedef void (*btf_trace_wiphy_work_run)(void *, struct wiphy *, struct wiphy_work *); + +typedef void (*btf_trace_wiphy_work_cancel)(void *, struct wiphy *, struct wiphy_work *); + +typedef void (*btf_trace_wiphy_work_flush)(void *, struct wiphy *, struct wiphy_work *); + +typedef void (*btf_trace_wiphy_delayed_work_queue)(void *, struct wiphy *, struct wiphy_work *, long unsigned int); + +typedef void (*btf_trace_wiphy_work_worker_start)(void *, struct wiphy *); + +typedef void (*btf_trace_rdev_suspend)(void *, struct wiphy *, struct cfg80211_wowlan *); + +typedef void (*btf_trace_rdev_return_int)(void *, struct wiphy *, int); + +typedef void (*btf_trace_rdev_scan)(void *, struct wiphy *, struct cfg80211_scan_request *); + +typedef void (*btf_trace_rdev_resume)(void *, struct wiphy *); + +typedef void (*btf_trace_rdev_return_void)(void *, struct wiphy *); + +typedef void (*btf_trace_rdev_get_antenna)(void *, struct wiphy *); + +typedef void (*btf_trace_rdev_rfkill_poll)(void *, struct wiphy *); + +typedef void (*btf_trace_rdev_set_wakeup)(void *, struct wiphy *, bool); + +typedef void (*btf_trace_rdev_add_virtual_intf)(void *, struct wiphy *, char *, enum nl80211_iftype); + +typedef void (*btf_trace_rdev_return_wdev)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_del_virtual_intf)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_change_virtual_intf)(void *, struct wiphy *, struct net_device *, enum nl80211_iftype); + +typedef void (*btf_trace_rdev_get_key)(void *, struct wiphy *, struct net_device *, int, u8, bool, const u8 *); + +typedef void (*btf_trace_rdev_del_key)(void *, struct wiphy *, struct net_device *, int, u8, bool, const u8 *); + +typedef void (*btf_trace_rdev_add_key)(void *, struct wiphy *, struct net_device *, int, u8, bool, const u8 *, u8); + +typedef void (*btf_trace_rdev_set_default_key)(void *, struct wiphy *, struct net_device *, int, u8, bool, bool); + +typedef void (*btf_trace_rdev_set_default_mgmt_key)(void *, struct wiphy *, struct net_device *, int, u8); + +typedef void (*btf_trace_rdev_set_default_beacon_key)(void *, struct wiphy *, struct net_device *, int, u8); + +typedef void (*btf_trace_rdev_start_ap)(void *, struct wiphy *, struct net_device *, struct cfg80211_ap_settings *); + +typedef void (*btf_trace_rdev_change_beacon)(void *, struct wiphy *, struct net_device *, struct cfg80211_ap_update *); + +typedef void (*btf_trace_rdev_stop_ap)(void *, struct wiphy *, struct net_device *, unsigned int); + +typedef void (*btf_trace_rdev_set_rekey_data)(void *, struct wiphy *, struct net_device *); + +typedef void (*btf_trace_rdev_get_mesh_config)(void *, struct wiphy *, struct net_device *); + +typedef void (*btf_trace_rdev_leave_mesh)(void *, struct wiphy *, struct net_device *); + +typedef void (*btf_trace_rdev_leave_ibss)(void *, struct wiphy *, struct net_device *); + +typedef void (*btf_trace_rdev_leave_ocb)(void *, struct wiphy *, struct net_device *); + +typedef void (*btf_trace_rdev_flush_pmksa)(void *, struct wiphy *, struct net_device *); + +typedef void (*btf_trace_rdev_end_cac)(void *, struct wiphy *, struct net_device *, unsigned int); + +typedef void (*btf_trace_rdev_add_station)(void *, struct wiphy *, struct net_device *, u8 *, struct station_parameters *); + +typedef void (*btf_trace_rdev_change_station)(void *, struct wiphy *, struct net_device *, u8 *, struct station_parameters *); + +typedef void (*btf_trace_rdev_del_station)(void *, struct wiphy *, struct net_device *, struct station_del_parameters *); + +typedef void (*btf_trace_rdev_get_station)(void *, struct wiphy *, struct net_device *, const u8 *); + +typedef void (*btf_trace_rdev_del_mpath)(void *, struct wiphy *, struct net_device *, const u8 *); + +typedef void (*btf_trace_rdev_dump_station)(void *, struct wiphy *, struct net_device *, int, u8 *); + +typedef void (*btf_trace_rdev_return_int_station_info)(void *, struct wiphy *, int, struct station_info *); + +typedef void (*btf_trace_rdev_add_mpath)(void *, struct wiphy *, struct net_device *, u8 *, u8 *); + +typedef void (*btf_trace_rdev_change_mpath)(void *, struct wiphy *, struct net_device *, u8 *, u8 *); + +typedef void (*btf_trace_rdev_get_mpath)(void *, struct wiphy *, struct net_device *, u8 *, u8 *); + +typedef void (*btf_trace_rdev_dump_mpath)(void *, struct wiphy *, struct net_device *, int, u8 *, u8 *); + +typedef void (*btf_trace_rdev_get_mpp)(void *, struct wiphy *, struct net_device *, u8 *, u8 *); + +typedef void (*btf_trace_rdev_dump_mpp)(void *, struct wiphy *, struct net_device *, int, u8 *, u8 *); + +typedef void (*btf_trace_rdev_return_int_mpath_info)(void *, struct wiphy *, int, struct mpath_info *); + +typedef void (*btf_trace_rdev_return_int_mesh_config)(void *, struct wiphy *, int, struct mesh_config *); + +typedef void (*btf_trace_rdev_update_mesh_config)(void *, struct wiphy *, struct net_device *, u32, const struct mesh_config *); + +typedef void (*btf_trace_rdev_join_mesh)(void *, struct wiphy *, struct net_device *, const struct mesh_config *, const struct mesh_setup *); + +typedef void (*btf_trace_rdev_change_bss)(void *, struct wiphy *, struct net_device *, struct bss_parameters *); + +typedef void (*btf_trace_rdev_inform_bss)(void *, struct wiphy *, struct cfg80211_bss *); + +typedef void (*btf_trace_rdev_set_txq_params)(void *, struct wiphy *, struct net_device *, struct ieee80211_txq_params *); + +typedef void (*btf_trace_rdev_libertas_set_mesh_channel)(void *, struct wiphy *, struct net_device *, struct ieee80211_channel *); + +typedef void (*btf_trace_rdev_set_monitor_channel)(void *, struct wiphy *, struct net_device *, struct cfg80211_chan_def *); + +typedef void (*btf_trace_rdev_auth)(void *, struct wiphy *, struct net_device *, struct cfg80211_auth_request *); + +typedef void (*btf_trace_rdev_assoc)(void *, struct wiphy *, struct net_device *, struct cfg80211_assoc_request *); + +typedef void (*btf_trace_rdev_deauth)(void *, struct wiphy *, struct net_device *, struct cfg80211_deauth_request *); + +typedef void (*btf_trace_rdev_disassoc)(void *, struct wiphy *, struct net_device *, struct cfg80211_disassoc_request *); + +typedef void (*btf_trace_rdev_mgmt_tx_cancel_wait)(void *, struct wiphy *, struct wireless_dev *, u64); + +typedef void (*btf_trace_rdev_set_power_mgmt)(void *, struct wiphy *, struct net_device *, bool, int); + +typedef void (*btf_trace_rdev_connect)(void *, struct wiphy *, struct net_device *, struct cfg80211_connect_params *); + +typedef void (*btf_trace_rdev_update_connect_params)(void *, struct wiphy *, struct net_device *, struct cfg80211_connect_params *, u32); + +typedef void (*btf_trace_rdev_set_cqm_rssi_config)(void *, struct wiphy *, struct net_device *, s32, u32); + +typedef void (*btf_trace_rdev_set_cqm_rssi_range_config)(void *, struct wiphy *, struct net_device *, s32, s32); + +typedef void (*btf_trace_rdev_set_cqm_txe_config)(void *, struct wiphy *, struct net_device *, u32, u32, u32); + +typedef void (*btf_trace_rdev_disconnect)(void *, struct wiphy *, struct net_device *, u16); + +typedef void (*btf_trace_rdev_join_ibss)(void *, struct wiphy *, struct net_device *, struct cfg80211_ibss_params *); + +typedef void (*btf_trace_rdev_join_ocb)(void *, struct wiphy *, struct net_device *, const struct ocb_setup *); + +typedef void (*btf_trace_rdev_set_wiphy_params)(void *, struct wiphy *, u32); + +typedef void (*btf_trace_rdev_get_tx_power)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_set_tx_power)(void *, struct wiphy *, struct wireless_dev *, enum nl80211_tx_power_setting, int); + +typedef void (*btf_trace_rdev_return_int_int)(void *, struct wiphy *, int, int); + +typedef void (*btf_trace_rdev_set_bitrate_mask)(void *, struct wiphy *, struct net_device *, unsigned int, const u8 *, const struct cfg80211_bitrate_mask *); + +typedef void (*btf_trace_rdev_update_mgmt_frame_registrations)(void *, struct wiphy *, struct wireless_dev *, struct mgmt_frame_regs *); + +typedef void (*btf_trace_rdev_return_int_tx_rx)(void *, struct wiphy *, int, u32, u32); + +typedef void (*btf_trace_rdev_return_void_tx_rx)(void *, struct wiphy *, u32, u32, u32, u32); + +typedef void (*btf_trace_rdev_set_antenna)(void *, struct wiphy *, u32, u32); + +typedef void (*btf_trace_rdev_sched_scan_start)(void *, struct wiphy *, struct net_device *, u64); + +typedef void (*btf_trace_rdev_sched_scan_stop)(void *, struct wiphy *, struct net_device *, u64); + +typedef void (*btf_trace_rdev_tdls_mgmt)(void *, struct wiphy *, struct net_device *, u8 *, int, u8, u8, u16, u32, bool, const u8 *, size_t); + +typedef void (*btf_trace_rdev_dump_survey)(void *, struct wiphy *, struct net_device *, int); + +typedef void (*btf_trace_rdev_return_int_survey_info)(void *, struct wiphy *, int, struct survey_info *); + +typedef void (*btf_trace_rdev_tdls_oper)(void *, struct wiphy *, struct net_device *, u8 *, enum nl80211_tdls_operation); + +typedef void (*btf_trace_rdev_probe_client)(void *, struct wiphy *, struct net_device *, const u8 *); + +typedef void (*btf_trace_rdev_set_pmksa)(void *, struct wiphy *, struct net_device *, struct cfg80211_pmksa *); + +typedef void (*btf_trace_rdev_del_pmksa)(void *, struct wiphy *, struct net_device *, struct cfg80211_pmksa *); + +typedef void (*btf_trace_rdev_remain_on_channel)(void *, struct wiphy *, struct wireless_dev *, struct ieee80211_channel *, unsigned int); + +typedef void (*btf_trace_rdev_return_int_cookie)(void *, struct wiphy *, int, u64); + +typedef void (*btf_trace_rdev_cancel_remain_on_channel)(void *, struct wiphy *, struct wireless_dev *, u64); + +typedef void (*btf_trace_rdev_mgmt_tx)(void *, struct wiphy *, struct wireless_dev *, struct cfg80211_mgmt_tx_params *); + +typedef void (*btf_trace_rdev_tx_control_port)(void *, struct wiphy *, struct net_device *, const u8 *, size_t, const u8 *, __be16, bool, int); + +typedef void (*btf_trace_rdev_set_noack_map)(void *, struct wiphy *, struct net_device *, u16); + +typedef void (*btf_trace_rdev_get_channel)(void *, struct wiphy *, struct wireless_dev *, unsigned int); + +typedef void (*btf_trace_rdev_return_chandef)(void *, struct wiphy *, int, struct cfg80211_chan_def *); + +typedef void (*btf_trace_rdev_start_p2p_device)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_stop_p2p_device)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_start_nan)(void *, struct wiphy *, struct wireless_dev *, struct cfg80211_nan_conf *); + +typedef void (*btf_trace_rdev_nan_change_conf)(void *, struct wiphy *, struct wireless_dev *, struct cfg80211_nan_conf *, u32); + +typedef void (*btf_trace_rdev_stop_nan)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_add_nan_func)(void *, struct wiphy *, struct wireless_dev *, const struct cfg80211_nan_func *); + +typedef void (*btf_trace_rdev_del_nan_func)(void *, struct wiphy *, struct wireless_dev *, u64); + +typedef void (*btf_trace_rdev_set_mac_acl)(void *, struct wiphy *, struct net_device *, struct cfg80211_acl_data *); + +typedef void (*btf_trace_rdev_update_ft_ies)(void *, struct wiphy *, struct net_device *, struct cfg80211_update_ft_ies_params *); + +typedef void (*btf_trace_rdev_crit_proto_start)(void *, struct wiphy *, struct wireless_dev *, enum nl80211_crit_proto_id, u16); + +typedef void (*btf_trace_rdev_crit_proto_stop)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_channel_switch)(void *, struct wiphy *, struct net_device *, struct cfg80211_csa_settings *); + +typedef void (*btf_trace_rdev_set_qos_map)(void *, struct wiphy *, struct net_device *, struct cfg80211_qos_map *); + +typedef void (*btf_trace_rdev_set_ap_chanwidth)(void *, struct wiphy *, struct net_device *, unsigned int, struct cfg80211_chan_def *); + +typedef void (*btf_trace_rdev_add_tx_ts)(void *, struct wiphy *, struct net_device *, u8, const u8 *, u8, u16); + +typedef void (*btf_trace_rdev_del_tx_ts)(void *, struct wiphy *, struct net_device *, u8, const u8 *); + +typedef void (*btf_trace_rdev_tdls_channel_switch)(void *, struct wiphy *, struct net_device *, const u8 *, u8, struct cfg80211_chan_def *); + +typedef void (*btf_trace_rdev_tdls_cancel_channel_switch)(void *, struct wiphy *, struct net_device *, const u8 *); + +typedef void (*btf_trace_rdev_set_pmk)(void *, struct wiphy *, struct net_device *, struct cfg80211_pmk_conf *); + +typedef void (*btf_trace_rdev_del_pmk)(void *, struct wiphy *, struct net_device *, const u8 *); + +typedef void (*btf_trace_rdev_external_auth)(void *, struct wiphy *, struct net_device *, struct cfg80211_external_auth_params *); + +typedef void (*btf_trace_rdev_start_radar_detection)(void *, struct wiphy *, struct net_device *, struct cfg80211_chan_def *, u32, int); + +typedef void (*btf_trace_rdev_set_mcast_rate)(void *, struct wiphy *, struct net_device *, int *); + +typedef void (*btf_trace_rdev_set_coalesce)(void *, struct wiphy *, struct cfg80211_coalesce *); + +typedef void (*btf_trace_rdev_abort_scan)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_set_multicast_to_unicast)(void *, struct wiphy *, struct net_device *, const bool); + +typedef void (*btf_trace_rdev_get_txq_stats)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_rdev_get_ftm_responder_stats)(void *, struct wiphy *, struct net_device *, struct cfg80211_ftm_responder_stats *); + +typedef void (*btf_trace_rdev_start_pmsr)(void *, struct wiphy *, struct wireless_dev *, u64); + +typedef void (*btf_trace_rdev_abort_pmsr)(void *, struct wiphy *, struct wireless_dev *, u64); + +typedef void (*btf_trace_rdev_set_fils_aad)(void *, struct wiphy *, struct net_device *, struct cfg80211_fils_aad *); + +typedef void (*btf_trace_rdev_update_owe_info)(void *, struct wiphy *, struct net_device *, struct cfg80211_update_owe_info *); + +typedef void (*btf_trace_rdev_probe_mesh_link)(void *, struct wiphy *, struct net_device *, const u8 *, const u8 *, size_t); + +typedef void (*btf_trace_rdev_set_tid_config)(void *, struct wiphy *, struct net_device *, struct cfg80211_tid_config *); + +typedef void (*btf_trace_rdev_reset_tid_config)(void *, struct wiphy *, struct net_device *, const u8 *, u8); + +typedef void (*btf_trace_rdev_set_sar_specs)(void *, struct wiphy *, struct cfg80211_sar_specs *); + +typedef void (*btf_trace_rdev_color_change)(void *, struct wiphy *, struct net_device *, struct cfg80211_color_change_settings *); + +typedef void (*btf_trace_rdev_set_radar_background)(void *, struct wiphy *, struct cfg80211_chan_def *); + +typedef void (*btf_trace_rdev_add_intf_link)(void *, struct wiphy *, struct wireless_dev *, unsigned int); + +typedef void (*btf_trace_rdev_del_intf_link)(void *, struct wiphy *, struct wireless_dev *, unsigned int); + +typedef void (*btf_trace_rdev_del_link_station)(void *, struct wiphy *, struct net_device *, struct link_station_del_parameters *); + +typedef void (*btf_trace_rdev_set_hw_timestamp)(void *, struct wiphy *, struct net_device *, struct cfg80211_set_hw_timestamp *); + +typedef void (*btf_trace_rdev_set_ttlm)(void *, struct wiphy *, struct net_device *, struct cfg80211_ttlm_params *); + +typedef void (*btf_trace_cfg80211_return_bool)(void *, bool); + +typedef void (*btf_trace_cfg80211_notify_new_peer_candidate)(void *, struct net_device *, const u8 *); + +typedef void (*btf_trace_cfg80211_send_rx_auth)(void *, struct net_device *); + +typedef void (*btf_trace_cfg80211_send_rx_assoc)(void *, struct net_device *, const struct cfg80211_rx_assoc_resp_data *); + +typedef void (*btf_trace_cfg80211_rx_unprot_mlme_mgmt)(void *, struct net_device *, const u8 *, int); + +typedef void (*btf_trace_cfg80211_rx_mlme_mgmt)(void *, struct net_device *, const u8 *, int); + +typedef void (*btf_trace_cfg80211_tx_mlme_mgmt)(void *, struct net_device *, const u8 *, int, bool); + +typedef void (*btf_trace_cfg80211_send_auth_timeout)(void *, struct net_device *, const u8 *); + +typedef void (*btf_trace_cfg80211_send_assoc_failure)(void *, struct net_device *, struct cfg80211_assoc_failure *); + +typedef void (*btf_trace_cfg80211_michael_mic_failure)(void *, struct net_device *, const u8 *, enum nl80211_key_type, int, const u8 *); + +typedef void (*btf_trace_cfg80211_ready_on_channel)(void *, struct wireless_dev *, u64, struct ieee80211_channel *, unsigned int); + +typedef void (*btf_trace_cfg80211_ready_on_channel_expired)(void *, struct wireless_dev *, u64, struct ieee80211_channel *); + +typedef void (*btf_trace_cfg80211_tx_mgmt_expired)(void *, struct wireless_dev *, u64, struct ieee80211_channel *); + +typedef void (*btf_trace_cfg80211_new_sta)(void *, struct net_device *, const u8 *, struct station_info *); + +typedef void (*btf_trace_cfg80211_del_sta)(void *, struct net_device *, const u8 *); + +typedef void (*btf_trace_cfg80211_rx_mgmt)(void *, struct wireless_dev *, struct cfg80211_rx_info *); + +typedef void (*btf_trace_cfg80211_mgmt_tx_status)(void *, struct wireless_dev *, u64, bool); + +typedef void (*btf_trace_cfg80211_control_port_tx_status)(void *, struct wireless_dev *, u64, bool); + +typedef void (*btf_trace_cfg80211_rx_control_port)(void *, struct net_device *, struct sk_buff *, bool, int); + +typedef void (*btf_trace_cfg80211_cqm_rssi_notify)(void *, struct net_device *, enum nl80211_cqm_rssi_threshold_event, s32); + +typedef void (*btf_trace_cfg80211_reg_can_beacon)(void *, struct wiphy *, struct cfg80211_chan_def *, enum nl80211_iftype, u32, u32); + +typedef void (*btf_trace_cfg80211_chandef_dfs_required)(void *, struct wiphy *, struct cfg80211_chan_def *); + +typedef void (*btf_trace_cfg80211_ch_switch_notify)(void *, struct net_device *, struct cfg80211_chan_def *, unsigned int); + +typedef void (*btf_trace_cfg80211_ch_switch_started_notify)(void *, struct net_device *, struct cfg80211_chan_def *, unsigned int); + +typedef void (*btf_trace_cfg80211_radar_event)(void *, struct wiphy *, struct cfg80211_chan_def *, bool); + +typedef void (*btf_trace_cfg80211_cac_event)(void *, struct net_device *, enum nl80211_radar_event, unsigned int); + +typedef void (*btf_trace_cfg80211_rx_spurious_frame)(void *, struct net_device *, const u8 *); + +typedef void (*btf_trace_cfg80211_rx_unexpected_4addr_frame)(void *, struct net_device *, const u8 *); + +typedef void (*btf_trace_cfg80211_ibss_joined)(void *, struct net_device *, const u8 *, struct ieee80211_channel *); + +typedef void (*btf_trace_cfg80211_probe_status)(void *, struct net_device *, const u8 *, u64, bool); + +typedef void (*btf_trace_cfg80211_cqm_pktloss_notify)(void *, struct net_device *, const u8 *, u32); + +typedef void (*btf_trace_cfg80211_gtk_rekey_notify)(void *, struct net_device *, const u8 *); + +typedef void (*btf_trace_cfg80211_pmksa_candidate_notify)(void *, struct net_device *, int, const u8 *, bool); + +typedef void (*btf_trace_cfg80211_report_obss_beacon)(void *, struct wiphy *, const u8 *, size_t, int, int); + +typedef void (*btf_trace_cfg80211_tdls_oper_request)(void *, struct wiphy *, struct net_device *, const u8 *, enum nl80211_tdls_operation, u16); + +typedef void (*btf_trace_cfg80211_scan_done)(void *, struct cfg80211_scan_request *, struct cfg80211_scan_info *); + +typedef void (*btf_trace_cfg80211_sched_scan_stopped)(void *, struct wiphy *, u64); + +typedef void (*btf_trace_cfg80211_sched_scan_results)(void *, struct wiphy *, u64); + +typedef void (*btf_trace_cfg80211_get_bss)(void *, struct wiphy *, struct ieee80211_channel *, const u8 *, const u8 *, size_t, enum ieee80211_bss_type, enum ieee80211_privacy); + +typedef void (*btf_trace_cfg80211_inform_bss_frame)(void *, struct wiphy *, struct cfg80211_inform_bss *, struct ieee80211_mgmt *, size_t); + +typedef void (*btf_trace_cfg80211_return_bss)(void *, struct cfg80211_bss *); + +typedef void (*btf_trace_cfg80211_return_uint)(void *, unsigned int); + +typedef void (*btf_trace_cfg80211_return_u32)(void *, u32); + +typedef void (*btf_trace_cfg80211_report_wowlan_wakeup)(void *, struct wiphy *, struct wireless_dev *, struct cfg80211_wowlan_wakeup *); + +typedef void (*btf_trace_cfg80211_ft_event)(void *, struct wiphy *, struct net_device *, struct cfg80211_ft_event_params *); + +typedef void (*btf_trace_cfg80211_stop_iface)(void *, struct wiphy *, struct wireless_dev *); + +typedef void (*btf_trace_cfg80211_pmsr_report)(void *, struct wiphy *, struct wireless_dev *, u64, const u8 *); + +typedef void (*btf_trace_cfg80211_pmsr_complete)(void *, struct wiphy *, struct wireless_dev *, u64); + +typedef void (*btf_trace_cfg80211_update_owe_info_event)(void *, struct wiphy *, struct net_device *, struct cfg80211_update_owe_info *); + +typedef void (*btf_trace_cfg80211_bss_color_notify)(void *, struct net_device *, enum nl80211_commands, u8, u64); + +typedef void (*btf_trace_cfg80211_assoc_comeback)(void *, struct wireless_dev *, const u8 *, u32); + +typedef void (*btf_trace_rdev_add_link_station)(void *, struct wiphy *, struct net_device *, struct link_station_parameters *); + +typedef void (*btf_trace_rdev_mod_link_station)(void *, struct wiphy *, struct net_device *, struct link_station_parameters *); + +typedef void (*btf_trace_cfg80211_links_removed)(void *, struct net_device *, u16); + +struct pci_sys_data; + +struct hw_pci { + struct pci_ops *ops; + int nr_controllers; + void **private_data; + int (*setup)(int, struct pci_sys_data *); + int (*scan)(int, struct pci_host_bridge *); + void (*preinit)(); + void (*postinit)(); + u8 (*swizzle)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); +}; + +struct pci_sys_data { + struct list_head node; + int busnr; + long: 32; + u64 mem_offset; + long unsigned int io_offset; + struct pci_bus *bus; + struct list_head resources; + struct resource io_res; + char io_res_name[12]; + u8 (*swizzle)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void *private_data; +}; + +struct trace_event_raw_rcu_utilization { + struct trace_entry ent; + const char *s; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_future_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long int gp_seq_req; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period_init { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + u8 level; + int grplo; + int grphi; + long unsigned int qsmask; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gpseq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_funnel_lock { + struct trace_entry ent; + const char *rcuname; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_nocb_wake { + struct trace_entry ent; + const char *rcuname; + int cpu; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_rcu_preempt_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_unlock_preempted_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_quiescent_state_report { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long unsigned int mask; + long unsigned int qsmask; + u8 level; + int grplo; + int grphi; + u8 gp_tasks; + char __data[0]; +}; + +struct trace_event_raw_rcu_fqs { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int cpu; + const char *qsevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_stall_warning { + struct trace_entry ent; + const char *rcuname; + const char *msg; + char __data[0]; +}; + +struct trace_event_raw_rcu_watching { + struct trace_entry ent; + const char *polarity; + long int oldnesting; + long int newnesting; + int counter; + char __data[0]; +}; + +struct trace_event_raw_rcu_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_segcb_stats { + struct trace_entry ent; + const char *ctx; + long unsigned int gp_seq[4]; + long int seglen[4]; + char __data[0]; +}; + +struct trace_event_raw_rcu_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_start { + struct trace_entry ent; + const char *rcuname; + long int qlen; + long int blimit; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kfree_bulk_callback { + struct trace_entry ent; + const char *rcuname; + long unsigned int nr_records; + void **p; + char __data[0]; +}; + +struct trace_event_raw_rcu_sr_normal { + struct trace_entry ent; + const char *rcuname; + void *rhp; + const char *srevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_end { + struct trace_entry ent; + const char *rcuname; + int callbacks_invoked; + char cb; + char nr; + char iit; + char risk; + char __data[0]; +}; + +struct trace_event_raw_rcu_torture_read { + struct trace_entry ent; + char rcutorturename[8]; + struct callback_head *rhp; + long unsigned int secs; + long unsigned int c_old; + long unsigned int c; + char __data[0]; +}; + +struct trace_event_raw_rcu_barrier { + struct trace_entry ent; + const char *rcuname; + const char *s; + int cpu; + int cnt; + long unsigned int done; + char __data[0]; +}; + +struct trace_event_data_offsets_rcu_utilization {}; + +struct trace_event_data_offsets_rcu_grace_period {}; + +struct trace_event_data_offsets_rcu_future_grace_period {}; + +struct trace_event_data_offsets_rcu_grace_period_init {}; + +struct trace_event_data_offsets_rcu_exp_grace_period {}; + +struct trace_event_data_offsets_rcu_exp_funnel_lock {}; + +struct trace_event_data_offsets_rcu_nocb_wake {}; + +struct trace_event_data_offsets_rcu_preempt_task {}; + +struct trace_event_data_offsets_rcu_unlock_preempted_task {}; + +struct trace_event_data_offsets_rcu_quiescent_state_report {}; + +struct trace_event_data_offsets_rcu_fqs {}; + +struct trace_event_data_offsets_rcu_stall_warning {}; + +struct trace_event_data_offsets_rcu_watching {}; + +struct trace_event_data_offsets_rcu_callback {}; + +struct trace_event_data_offsets_rcu_segcb_stats {}; + +struct trace_event_data_offsets_rcu_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_batch_start {}; + +struct trace_event_data_offsets_rcu_invoke_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {}; + +struct trace_event_data_offsets_rcu_sr_normal {}; + +struct trace_event_data_offsets_rcu_batch_end {}; + +struct trace_event_data_offsets_rcu_torture_read {}; + +struct trace_event_data_offsets_rcu_barrier {}; + +typedef void (*btf_trace_rcu_utilization)(void *, const char *); + +typedef void (*btf_trace_rcu_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, long unsigned int, long unsigned int, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, long unsigned int, u8, int, int, long unsigned int); + +typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_nocb_wake)(void *, const char *, int, const char *); + +typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, long unsigned int, int); + +typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, long unsigned int, long unsigned int, long unsigned int, u8, int, int, int); + +typedef void (*btf_trace_rcu_fqs)(void *, const char *, long unsigned int, int, const char *); + +typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); + +typedef void (*btf_trace_rcu_watching)(void *, const char *, long int, long int, int); + +typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long int); + +typedef void (*btf_trace_rcu_segcb_stats)(void *, struct rcu_segcblist *, const char *); + +typedef void (*btf_trace_rcu_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int, long int); + +typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long int, long int); + +typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *); + +typedef void (*btf_trace_rcu_invoke_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int); + +typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, long unsigned int, void **); + +typedef void (*btf_trace_rcu_sr_normal)(void *, const char *, struct callback_head *, const char *); + +typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char); + +typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, long unsigned int); + +struct rcu_tasks; + +typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); + +typedef void (*pregp_func_t)(struct list_head *); + +typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); + +typedef void (*postscan_func_t)(struct list_head *); + +typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); + +typedef void (*postgp_func_t)(struct rcu_tasks *); + +struct rcu_tasks_percpu; + +struct rcu_tasks { + struct rcuwait cbs_wait; + raw_spinlock_t cbs_gbl_lock; + struct mutex tasks_gp_mutex; + int gp_state; + int gp_sleep; + int init_fract; + long unsigned int gp_jiffies; + long unsigned int gp_start; + long unsigned int tasks_gp_seq; + long unsigned int n_ipis; + long unsigned int n_ipis_fails; + struct task_struct *kthread_ptr; + long unsigned int lazy_jiffies; + rcu_tasks_gp_func_t gp_func; + pregp_func_t pregp_func; + pertask_func_t pertask_func; + postscan_func_t postscan_func; + holdouts_func_t holdouts_func; + postgp_func_t postgp_func; + call_rcu_func_t call_func; + unsigned int wait_state; + struct rcu_tasks_percpu *rtpcpu; + struct rcu_tasks_percpu **rtpcp_array; + int percpu_enqueue_shift; + int percpu_enqueue_lim; + int percpu_dequeue_lim; + long unsigned int percpu_dequeue_gpseq; + struct mutex barrier_q_mutex; + atomic_t barrier_q_count; + struct completion barrier_q_completion; + long unsigned int barrier_q_seq; + long unsigned int barrier_q_start; + char *name; + char *kname; +}; + +struct rcu_tasks_percpu { + struct rcu_segcblist cblist; + raw_spinlock_t lock; + long unsigned int rtp_jiffies; + long unsigned int rtp_n_lock_retries; + struct timer_list lazy_timer; + unsigned int urgent_gp; + struct work_struct rtp_work; + struct irq_work rtp_irq_work; + struct callback_head barrier_q_head; + struct list_head rtp_blkd_tasks; + struct list_head rtp_exit_list; + int cpu; + int index; + struct rcu_tasks *rtpp; +}; + +struct trc_stall_chk_rdr { + int nesting; + int ipi_to_cpu; + u8 needqs; +}; + +struct tracer_stat { + const char *name; + void * (*stat_start)(struct tracer_stat *); + void * (*stat_next)(void *, int); + cmp_func_t stat_cmp; + int (*stat_show)(struct seq_file *, void *); + void (*stat_release)(void *); + int (*stat_headers)(struct seq_file *); +}; + +struct stat_node { + struct rb_node node; + void *stat; +}; + +struct stat_session { + struct list_head session_list; + struct tracer_stat *ts; + struct rb_root stat_root; + struct mutex stat_mutex; + struct dentry *file; +}; + +struct bpf_cgroup_storage_map { + struct bpf_map map; + spinlock_t lock; + struct rb_root root; + struct list_head list; +}; + +enum vm_stat_item { + NR_DIRTY_THRESHOLD = 0, + NR_DIRTY_BG_THRESHOLD = 1, + NR_MEMMAP_PAGES = 2, + NR_MEMMAP_BOOT_PAGES = 3, + NR_VM_STAT_ITEMS = 4, +}; + +struct contig_page_info { + long unsigned int free_pages; + long unsigned int free_blocks_total; + long unsigned int free_blocks_suitable; +}; + +struct lruvec_stats_percpu { + long int state[30]; + long int state_prev[30]; +}; + +struct lruvec_stats { + long int state[30]; + long int state_local[30]; + long int state_pending[30]; +}; + +struct memcg_vmstats { + long int state[37]; + long unsigned int events[20]; + long int state_local[37]; + long unsigned int events_local[20]; + long int state_pending[37]; + long unsigned int events_pending[20]; + long: 32; + atomic64_t stats_updates; +}; + +struct memcg_vmstats_percpu { + unsigned int stats_updates; + struct memcg_vmstats_percpu *parent; + struct memcg_vmstats *vmstats; + long int state[37]; + long unsigned int events[20]; + long int state_prev[37]; + long unsigned int events_prev[20]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct trace_event_raw_memcg_rstat_stats { + struct trace_entry ent; + u64 id; + int item; + int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_rstat_events { + struct trace_entry ent; + u64 id; + int item; + long unsigned int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_flush_stats { + struct trace_entry ent; + u64 id; + s64 stats_updates; + bool force; + bool needs_flush; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_memcg_rstat_stats {}; + +struct trace_event_data_offsets_memcg_rstat_events {}; + +struct trace_event_data_offsets_memcg_flush_stats {}; + +typedef void (*btf_trace_mod_memcg_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_mod_memcg_lruvec_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_count_memcg_events)(void *, struct mem_cgroup *, int, long unsigned int); + +typedef void (*btf_trace_memcg_flush_stats)(void *, struct mem_cgroup *, s64, bool, bool); + +struct memory_stat { + const char *name; + unsigned int idx; +}; + +struct memcg_stock_pcp { + local_lock_t stock_lock; + struct mem_cgroup *cached; + unsigned int nr_pages; + struct obj_cgroup *cached_objcg; + struct pglist_data *cached_pgdat; + unsigned int nr_bytes; + int nr_slab_reclaimable_b; + int nr_slab_unreclaimable_b; + struct work_struct work; + long unsigned int flags; +}; + +struct aggregate_control { + long int *aggregate; + long int *local; + long int *pending; + long int *ppending; + long int *cstat; + long int *cstat_prev; + int size; +}; + +enum { + MEMORY_RECLAIM_SWAPPINESS = 0, + MEMORY_RECLAIM_NULL = 1, +}; + +struct uncharge_gather { + struct mem_cgroup *memcg; + long unsigned int nr_memory; + long unsigned int pgpgout; + long unsigned int nr_kmem; + int nid; +}; + +struct iomap_dio { + struct kiocb *iocb; + const struct iomap_dio_ops *dops; + loff_t i_size; + loff_t size; + atomic_t ref; + unsigned int flags; + int error; + size_t done_before; + bool wait_for_completion; + union { + struct { + struct iov_iter *iter; + struct task_struct *waiter; + } submit; + struct { + struct work_struct work; + } aio; + }; + long: 32; +}; + +struct ext4_orphan_block_tail { + __le32 ob_magic; + __le32 ob_checksum; +}; + +enum { + AUTOFS_IOC_READY_CMD = 96, + AUTOFS_IOC_FAIL_CMD = 97, + AUTOFS_IOC_CATATONIC_CMD = 98, + AUTOFS_IOC_PROTOVER_CMD = 99, + AUTOFS_IOC_SETTIMEOUT_CMD = 100, + AUTOFS_IOC_EXPIRE_CMD = 101, +}; + +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 102, + AUTOFS_IOC_PROTOSUBVER_CMD = 103, + AUTOFS_IOC_ASKUMOUNT_CMD = 112, +}; + +struct btrfs_compr_pool { + struct shrinker *shrinker; + spinlock_t lock; + struct list_head list; + int count; + int thresh; +}; + +struct bucket_item { + u32 count; +}; + +struct heuristic_ws { + u8 *sample; + u32 sample_size; + struct bucket_item *bucket; + struct bucket_item *bucket_b; + struct list_head list; +}; + +struct btrfs_dio_data { + ssize_t submitted; + struct extent_changeset *data_reserved; + struct btrfs_ordered_extent *ordered; + bool data_space_reserved; + bool nocow_done; +}; + +struct btrfs_dio_private { + u64 file_offset; + u32 bytes; + long: 32; + struct btrfs_bio bbio; +}; + +struct hmac_ctx { + struct crypto_shash *hash; + u8 pads[0]; +}; + +struct essiv_instance_ctx { + union { + struct crypto_skcipher_spawn skcipher_spawn; + struct crypto_aead_spawn aead_spawn; + } u; + char essiv_cipher_name[128]; + char shash_driver_name[128]; +}; + +struct essiv_tfm_ctx { + union { + struct crypto_skcipher *skcipher; + struct crypto_aead *aead; + } u; + struct crypto_cipher *essiv_cipher; + struct crypto_shash *hash; + int ivoffset; +}; + +struct essiv_aead_request_ctx { + struct scatterlist sg[4]; + u8 *assoc; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct aead_request aead_req; +}; + +enum msdos_sys_ind { + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 133, + WIN98_EXTENDED_PARTITION = 15, + LINUX_DATA_PARTITION = 131, + LINUX_LVM_PARTITION = 142, + LINUX_RAID_PARTITION = 253, + SOLARIS_X86_PARTITION = 130, + NEW_SOLARIS_X86_PARTITION = 191, + DM6_AUX1PARTITION = 81, + DM6_AUX3PARTITION = 83, + DM6_PARTITION = 84, + EZD_PARTITION = 85, + FREEBSD_PARTITION = 165, + OPENBSD_PARTITION = 166, + NETBSD_PARTITION = 169, + BSDI_PARTITION = 183, + MINIX_PARTITION = 129, + UNIXWARE_PARTITION = 99, +}; + +struct io_xattr { + struct file *file; + struct kernel_xattr_ctx ctx; + struct filename *filename; +}; + +typedef s32 compat_ssize_t; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +typedef struct { + U32 fastMode; + U32 tableLog; +} ZSTD_seqSymbol_header; + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; +} seq_t; + +typedef struct { + size_t state; + const ZSTD_seqSymbol *table; +} ZSTD_fseState; + +typedef struct { + BIT_DStream_t DStream; + ZSTD_fseState stateLL; + ZSTD_fseState stateOffb; + ZSTD_fseState stateML; + size_t prevOffset[3]; +} seqState_t; + +typedef enum { + ZSTD_lo_isRegularOffset = 0, + ZSTD_lo_isLongOffset = 1, +} ZSTD_longOffset_e; + +struct aperture_range { + struct device *dev; + resource_size_t base; + resource_size_t size; + struct list_head lh; + void (*detach)(struct device *); +}; + +struct u32_fract { + __u32 numerator; + __u32 denominator; +}; + +struct clk_fractional_divider { + struct clk_hw hw; + void *reg; + u8 mshift; + u8 mwidth; + u8 nshift; + u8 nwidth; + u8 flags; + void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *); + spinlock_t *lock; +}; + +struct plat_serial8250_port { + long unsigned int iobase; + void *membase; + resource_size_t mapbase; + resource_size_t mapsize; + unsigned int uartclk; + unsigned int irq; + long unsigned int irqflags; + void *private_data; + unsigned char regshift; + unsigned char iotype; + unsigned char hub6; + unsigned char has_sysrq; + unsigned int type; + upf_t flags; + u16 bugs; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + u32 (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, u32); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); + long: 32; +}; + +enum { + PLAT8250_DEV_LEGACY = -1, + PLAT8250_DEV_PLATFORM = 0, + PLAT8250_DEV_PLATFORM1 = 1, + PLAT8250_DEV_PLATFORM2 = 2, + PLAT8250_DEV_FOURPORT = 3, + PLAT8250_DEV_ACCENT = 4, + PLAT8250_DEV_BOCA = 5, + PLAT8250_DEV_EXAR_ST16C554 = 6, + PLAT8250_DEV_HUB6 = 7, + PLAT8250_DEV_AU1X00 = 8, + PLAT8250_DEV_SM501 = 9, +}; + +struct old_serial_port { + unsigned int uart; + unsigned int baud_base; + unsigned int port; + unsigned int irq; + upf_t flags; + unsigned char io_type; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; + long: 32; +}; + +typedef void (*serial8250_isa_config_fn)(int, struct uart_port *, u32 *); + +struct spi_transport_attrs { + int period; + int min_period; + int offset; + int max_offset; + unsigned int width: 1; + unsigned int max_width: 1; + unsigned int iu: 1; + unsigned int max_iu: 1; + unsigned int dt: 1; + unsigned int qas: 1; + unsigned int max_qas: 1; + unsigned int wr_flow: 1; + unsigned int rd_strm: 1; + unsigned int rti: 1; + unsigned int pcomp_en: 1; + unsigned int hold_mcs: 1; + unsigned int initial_dv: 1; + long unsigned int flags; + unsigned int support_sync: 1; + unsigned int support_wide: 1; + unsigned int support_dt: 1; + unsigned int support_dt_only; + unsigned int support_ius; + unsigned int support_qas; + unsigned int dv_pending: 1; + unsigned int dv_in_progress: 1; + struct mutex dv_mutex; +}; + +enum spi_signal_type { + SPI_SIGNAL_UNKNOWN = 1, + SPI_SIGNAL_SE = 2, + SPI_SIGNAL_LVD = 3, + SPI_SIGNAL_HVD = 4, +}; + +struct spi_host_attrs { + enum spi_signal_type signalling; +}; + +struct spi_function_template { + void (*get_period)(struct scsi_target *); + void (*set_period)(struct scsi_target *, int); + void (*get_offset)(struct scsi_target *); + void (*set_offset)(struct scsi_target *, int); + void (*get_width)(struct scsi_target *); + void (*set_width)(struct scsi_target *, int); + void (*get_iu)(struct scsi_target *); + void (*set_iu)(struct scsi_target *, int); + void (*get_dt)(struct scsi_target *); + void (*set_dt)(struct scsi_target *, int); + void (*get_qas)(struct scsi_target *); + void (*set_qas)(struct scsi_target *, int); + void (*get_wr_flow)(struct scsi_target *); + void (*set_wr_flow)(struct scsi_target *, int); + void (*get_rd_strm)(struct scsi_target *); + void (*set_rd_strm)(struct scsi_target *, int); + void (*get_rti)(struct scsi_target *); + void (*set_rti)(struct scsi_target *, int); + void (*get_pcomp_en)(struct scsi_target *); + void (*set_pcomp_en)(struct scsi_target *, int); + void (*get_hold_mcs)(struct scsi_target *); + void (*set_hold_mcs)(struct scsi_target *, int); + void (*get_signalling)(struct Scsi_Host *); + void (*set_signalling)(struct Scsi_Host *, enum spi_signal_type); + int (*deny_binding)(struct scsi_target *); + long unsigned int show_period: 1; + long unsigned int show_offset: 1; + long unsigned int show_width: 1; + long unsigned int show_iu: 1; + long unsigned int show_dt: 1; + long unsigned int show_qas: 1; + long unsigned int show_wr_flow: 1; + long unsigned int show_rd_strm: 1; + long unsigned int show_rti: 1; + long unsigned int show_pcomp_en: 1; + long unsigned int show_hold_mcs: 1; +}; + +enum { + SPI_BLIST_NOIUS = 1, +}; + +struct spi_internal { + struct scsi_transport_template t; + struct spi_function_template *f; +}; + +enum spi_compare_returns { + SPI_COMPARE_SUCCESS = 0, + SPI_COMPARE_FAILURE = 1, + SPI_COMPARE_SKIP_TEST = 2, +}; + +struct work_queue_wrapper { + struct work_struct work; + struct scsi_device *sdev; +}; + +struct swmii_regs { + u16 bmsr; + u16 lpa; + u16 lpagb; + u16 estat; +}; + +enum { + SWMII_SPEED_10 = 0, + SWMII_SPEED_100 = 1, + SWMII_SPEED_1000 = 2, + SWMII_DUPLEX_HALF = 0, + SWMII_DUPLEX_FULL = 1, +}; + +struct fixed_mdio_bus { + struct mii_bus *mii_bus; + struct list_head phys; +}; + +struct fixed_phy { + int addr; + struct phy_device *phydev; + struct fixed_phy_status status; + bool no_carrier; + int (*link_update)(struct net_device *, struct fixed_phy_status *); + struct list_head node; + struct gpio_desc *link_gpiod; +}; + +struct trace_event_raw_iwlwifi_dev_ioread32 { + struct trace_entry ent; + u32 __data_loc_dev; + u32 offs; + u32 val; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_iowrite8 { + struct trace_entry ent; + u32 __data_loc_dev; + u32 offs; + u8 val; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_iowrite32 { + struct trace_entry ent; + u32 __data_loc_dev; + u32 offs; + u32 val; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_iowrite64 { + struct trace_entry ent; + u32 __data_loc_dev; + long: 32; + u64 offs; + u64 val; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_iowrite_prph32 { + struct trace_entry ent; + u32 __data_loc_dev; + u32 offs; + u32 val; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_iowrite_prph64 { + struct trace_entry ent; + u32 __data_loc_dev; + long: 32; + u64 offs; + u64 val; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_ioread_prph32 { + struct trace_entry ent; + u32 __data_loc_dev; + u32 offs; + u32 val; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_irq { + struct trace_entry ent; + u32 __data_loc_dev; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_irq_msix { + struct trace_entry ent; + u32 __data_loc_dev; + u32 entry; + u8 defirq; + u32 inta_fh; + u32 inta_hw; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_ict_read { + struct trace_entry ent; + u32 __data_loc_dev; + u32 index; + u32 value; + char __data[0]; +}; + +struct trace_event_data_offsets_iwlwifi_dev_ioread32 { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_iowrite8 { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_iowrite32 { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_iowrite64 { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_iowrite_prph32 { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_iowrite_prph64 { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_ioread_prph32 { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_irq { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_irq_msix { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_ict_read { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_iwlwifi_dev_ioread32)(void *, const struct device *, u32, u32); + +typedef void (*btf_trace_iwlwifi_dev_iowrite8)(void *, const struct device *, u32, u8); + +typedef void (*btf_trace_iwlwifi_dev_iowrite32)(void *, const struct device *, u32, u32); + +typedef void (*btf_trace_iwlwifi_dev_iowrite64)(void *, const struct device *, u64, u64); + +typedef void (*btf_trace_iwlwifi_dev_iowrite_prph32)(void *, const struct device *, u32, u32); + +typedef void (*btf_trace_iwlwifi_dev_iowrite_prph64)(void *, const struct device *, u64, u64); + +typedef void (*btf_trace_iwlwifi_dev_ioread_prph32)(void *, const struct device *, u32, u32); + +typedef void (*btf_trace_iwlwifi_dev_irq)(void *, const struct device *); + +typedef void (*btf_trace_iwlwifi_dev_irq_msix)(void *, const struct device *, struct msix_entry *, bool, u32, u32); + +typedef void (*btf_trace_iwlwifi_dev_ict_read)(void *, const struct device *, u32, u32); + +struct trace_event_raw_iwlwifi_dev_ucode_cont_event { + struct trace_entry ent; + u32 __data_loc_dev; + u32 time; + u32 data; + u32 ev; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_ucode_wrap_event { + struct trace_entry ent; + u32 __data_loc_dev; + u32 wraps; + u32 n_entry; + u32 p_entry; + char __data[0]; +}; + +struct trace_event_data_offsets_iwlwifi_dev_ucode_cont_event { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_ucode_wrap_event { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_iwlwifi_dev_ucode_cont_event)(void *, const struct device *, u32, u32, u32); + +typedef void (*btf_trace_iwlwifi_dev_ucode_wrap_event)(void *, const struct device *, u32, u32, u32); + +struct trace_event_raw_iwlwifi_msg_event { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dbg { + struct trace_entry ent; + u32 level; + u32 __data_loc_function; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_iwlwifi_msg_event { + u32 msg; + const void *msg_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dbg { + u32 function; + const void *function_ptr_; + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_iwlwifi_err)(void *, struct va_format *); + +typedef void (*btf_trace_iwlwifi_warn)(void *, struct va_format *); + +typedef void (*btf_trace_iwlwifi_info)(void *, struct va_format *); + +typedef void (*btf_trace_iwlwifi_crit)(void *, struct va_format *); + +typedef void (*btf_trace_iwlwifi_dbg)(void *, u32, const char *, struct va_format *); + +struct trace_event_raw_iwlwifi_dev_tx_tb { + struct trace_entry ent; + u32 __data_loc_dev; + long: 32; + u64 phys; + u32 __data_loc_data; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iwlwifi_dev_rx_data { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_data; + char __data[0]; +}; + +struct trace_event_data_offsets_iwlwifi_dev_tx_tb { + u32 dev; + const void *dev_ptr_; + u32 data; + const void *data_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_rx_data { + u32 dev; + const void *dev_ptr_; + u32 data; + const void *data_ptr_; +}; + +typedef void (*btf_trace_iwlwifi_dev_tx_tb)(void *, const struct device *, struct sk_buff *, u8 *, dma_addr_t, size_t); + +typedef void (*btf_trace_iwlwifi_dev_rx_data)(void *, const struct device *, void *, size_t, size_t); + +struct trace_event_raw_iwlwifi_dev_hcmd { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_hcmd; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_rx { + struct trace_entry ent; + u32 __data_loc_dev; + u16 cmd; + u8 hdr_offset; + u32 __data_loc_rxbuf; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_tx { + struct trace_entry ent; + u32 __data_loc_dev; + void *skbaddr; + size_t framelen; + u32 __data_loc_tfd; + u32 __data_loc_buf0; + u32 __data_loc_buf1; + char __data[0]; +}; + +struct trace_event_raw_iwlwifi_dev_ucode_event { + struct trace_entry ent; + u32 __data_loc_dev; + u32 time; + u32 data; + u32 ev; + char __data[0]; +}; + +struct trace_event_data_offsets_iwlwifi_dev_hcmd { + u32 dev; + const void *dev_ptr_; + u32 hcmd; + const void *hcmd_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_rx { + u32 dev; + const void *dev_ptr_; + u32 rxbuf; + const void *rxbuf_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_tx { + u32 dev; + const void *dev_ptr_; + u32 tfd; + const void *tfd_ptr_; + u32 buf0; + const void *buf0_ptr_; + u32 buf1; + const void *buf1_ptr_; +}; + +struct trace_event_data_offsets_iwlwifi_dev_ucode_event { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_iwlwifi_dev_hcmd)(void *, const struct device *, struct iwl_host_cmd *, u16, struct iwl_cmd_header_wide *); + +typedef void (*btf_trace_iwlwifi_dev_rx)(void *, const struct device *, struct iwl_rx_packet *, size_t, size_t, size_t); + +typedef void (*btf_trace_iwlwifi_dev_tx)(void *, const struct device *, struct sk_buff *, void *, size_t, void *, size_t, int); + +typedef void (*btf_trace_iwlwifi_dev_ucode_event)(void *, const struct device *, u32, u32, u32); + +enum iwl_tdls_channel_switch_type { + TDLS_SEND_CHAN_SW_REQ = 0, + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH = 1, + TDLS_MOVE_CH = 2, +}; + +struct iwl_tdls_channel_switch_timing { + __le32 frame_timestamp; + __le32 max_offchan_duration; + __le32 switch_time; + __le32 switch_timeout; +}; + +struct iwl_tdls_channel_switch_frame { + __le32 switch_time_offset; + struct iwl_tx_cmd___2 tx_cmd; + u8 data[200]; +}; + +struct iwl_tdls_channel_switch_cmd_tail { + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +}; + +struct iwl_tdls_channel_switch_cmd { + u8 switch_type; + __le32 peer_sta_id; + struct iwl_fw_channel_info ci; + struct iwl_tdls_channel_switch_cmd_tail tail; +} __attribute__((packed)); + +struct iwl_tdls_channel_switch_notif { + __le32 status; + __le32 offchannel_duration; + __le32 sta_id; +}; + +struct iwl_tdls_sta_info { + u8 sta_id; + u8 tx_to_peer_tid; + __le16 tx_to_peer_ssn; + __le32 is_initiator; +}; + +struct iwl_tdls_config_cmd { + __le32 id_and_color; + u8 tdls_peer_count; + u8 tx_to_ap_tid; + __le16 tx_to_ap_ssn; + struct iwl_tdls_sta_info sta_info[4]; + __le32 pti_req_data_offset; + struct iwl_tx_cmd___2 pti_req_tx_cmd; + u8 pti_req_template[0]; +}; + +struct iwl_tdls_config_sta_info_res { + __le16 sta_id; + __le16 tx_to_peer_last_seq; +}; + +struct iwl_tdls_config_res { + __le32 tx_to_ap_last_seq; + struct iwl_tdls_config_sta_info_res sta_info[4]; +}; + +struct rtw_sdio_work_data { + struct work_struct work; + struct rtw_dev *rtwdev; +}; + +struct sdio_func; + +struct rtw_sdio { + struct sdio_func *sdio_func; + u32 irq_mask; + u8 rx_addr; + bool sdio3_bus_mode; + void *irq_thread; + struct workqueue_struct *txwq; + struct rtw_sdio_work_data *tx_handler_data; + struct sk_buff_head tx_queue[8]; +}; + +struct quirk_entry { + u16 vid; + u16 pid; + u32 flags; +}; + +struct i2c_device_identity { + u16 manufacturer_id; + u16 part_id; + u8 die_revision; +}; + +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; + u32 digital_filter_width_ns; + u32 analog_filter_cutoff_freq_hz; +}; + +struct trace_event_raw_i2c_write { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_read { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + char __data[0]; +}; + +struct trace_event_raw_i2c_reply { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_result { + struct trace_entry ent; + int adapter_nr; + __u16 nr_msgs; + __s16 ret; + char __data[0]; +}; + +struct trace_event_data_offsets_i2c_write { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_read {}; + +struct trace_event_data_offsets_i2c_reply { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_result {}; + +typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); + +struct i2c_cmd_arg { + unsigned int cmd; + void *arg; +}; + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + +struct used_address { + struct __kernel_sockaddr_storage name; + unsigned int name_len; +}; + +struct llc_pdu_sn { + u8 dsap; + u8 ssap; + u8 ctrl_1; + u8 ctrl_2; +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +struct plca_reply_data { + struct ethnl_reply_data base; + struct phy_plca_cfg plca_cfg; + struct phy_plca_status plca_st; +}; + +enum nft_objref_attributes { + NFTA_OBJREF_UNSPEC = 0, + NFTA_OBJREF_IMM_TYPE = 1, + NFTA_OBJREF_IMM_NAME = 2, + NFTA_OBJREF_SET_SREG = 3, + NFTA_OBJREF_SET_NAME = 4, + NFTA_OBJREF_SET_ID = 5, + __NFTA_OBJREF_MAX = 6, +}; + +struct nft_objref_map { + struct nft_set *set; + u8 sreg; + struct nft_set_binding binding; +}; + +enum ipt_reject_with { + IPT_ICMP_NET_UNREACHABLE = 0, + IPT_ICMP_HOST_UNREACHABLE = 1, + IPT_ICMP_PROT_UNREACHABLE = 2, + IPT_ICMP_PORT_UNREACHABLE = 3, + IPT_ICMP_ECHOREPLY = 4, + IPT_ICMP_NET_PROHIBITED = 5, + IPT_ICMP_HOST_PROHIBITED = 6, + IPT_TCP_RESET = 7, + IPT_ICMP_ADMIN_PROHIBITED = 8, +}; + +struct ipt_reject_info { + enum ipt_reject_with with; +}; + +enum ieee80211_s1g_chanwidth { + IEEE80211_S1G_CHANWIDTH_1MHZ = 0, + IEEE80211_S1G_CHANWIDTH_2MHZ = 1, + IEEE80211_S1G_CHANWIDTH_4MHZ = 3, + IEEE80211_S1G_CHANWIDTH_8MHZ = 7, + IEEE80211_S1G_CHANWIDTH_16MHZ = 15, +}; + +struct iface_combination_params { + int radio_idx; + int num_different_channels; + u8 radar_detect; + int iftype_num[13]; + u32 new_beacon_int; +}; + +struct ieee80211_noa_data { + u32 next_tsf; + bool has_next_tsf; + u8 absent; + u8 count[4]; + struct { + u32 start; + u32 duration; + u32 interval; + } desc[4]; +}; + +struct suspend_stats { + unsigned int step_failures[8]; + unsigned int success; + unsigned int fail; + int last_failed_dev; + char failed_devs[80]; + int last_failed_errno; + int errno[2]; + int last_failed_step; + long: 32; + u64 last_hw_sleep; + u64 total_hw_sleep; + u64 max_hw_sleep; + enum suspend_stat_step failed_steps[2]; +}; + +struct rchan_percpu_buf_dispatcher { + struct rchan_buf *buf; + struct dentry *dentry; +}; + +struct eprobe_trace_entry_head { + struct trace_entry ent; +}; + +struct trace_eprobe { + const char *event_system; + const char *event_name; + char *filter_str; + struct trace_event_call *event; + struct dyn_event devent; + struct trace_probe tp; +}; + +struct eprobe_data { + struct trace_event_file *file; + struct trace_eprobe *ep; +}; + +enum pgt_entry { + NORMAL_PMD = 0, + HPAGE_PMD = 1, + NORMAL_PUD = 2, + HPAGE_PUD = 3, +}; + +struct prepend_buffer { + char *buf; + int len; +}; + +struct ext4_fc_tl { + __le16 fc_tag; + __le16 fc_len; +}; + +struct ext4_fc_head { + __le32 fc_features; + __le32 fc_tid; +}; + +struct ext4_fc_add_range { + __le32 fc_ino; + __u8 fc_ex[12]; +}; + +struct ext4_fc_del_range { + __le32 fc_ino; + __le32 fc_lblk; + __le32 fc_len; +}; + +struct ext4_fc_dentry_info { + __le32 fc_parent_ino; + __le32 fc_ino; + __u8 fc_dname[0]; +}; + +struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[0]; +}; + +struct ext4_fc_tail { + __le32 fc_tid; + __le32 fc_crc; +}; + +enum { + EXT4_FC_STATUS_OK = 0, + EXT4_FC_STATUS_INELIGIBLE = 1, + EXT4_FC_STATUS_SKIPPED = 2, + EXT4_FC_STATUS_FAILED = 3, +}; + +struct ext4_fc_dentry_update { + int fcd_op; + int fcd_parent; + int fcd_ino; + long: 32; + struct qstr fcd_name; + unsigned char fcd_iname[36]; + struct list_head fcd_list; + struct list_head fcd_dilist; + long: 32; +}; + +struct __track_dentry_update_args { + struct dentry *dentry; + int op; +}; + +struct __track_range_args { + ext4_lblk_t start; + ext4_lblk_t end; +}; + +struct dentry_info_args { + int parent_ino; + int dname_len; + int ino; + int inode_len; + char *dname; +}; + +struct ext4_fc_tl_mem { + u16 fc_tag; + u16 fc_len; +}; + +struct fuse_kstatfs { + uint64_t blocks; + uint64_t bfree; + uint64_t bavail; + uint64_t files; + uint64_t ffree; + uint32_t bsize; + uint32_t namelen; + uint32_t frsize; + uint32_t padding; + uint32_t spare[6]; +}; + +struct fuse_statfs_out { + struct fuse_kstatfs st; +}; + +struct fuse_init_in { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint32_t flags2; + uint32_t unused[11]; +}; + +struct fuse_init_out { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint16_t max_background; + uint16_t congestion_threshold; + uint32_t max_write; + uint32_t time_gran; + uint16_t max_pages; + uint16_t map_alignment; + uint32_t flags2; + uint32_t max_stack_depth; + uint32_t unused[6]; +}; + +struct fuse_syncfs_in { + uint64_t padding; +}; + +enum fuse_dax_mode { + FUSE_DAX_INODE_DEFAULT = 0, + FUSE_DAX_ALWAYS = 1, + FUSE_DAX_NEVER = 2, + FUSE_DAX_INODE_USER = 3, +}; + +struct fuse_fs_context { + int fd; + struct file *file; + unsigned int rootmode; + kuid_t user_id; + kgid_t group_id; + bool is_bdev: 1; + bool fd_present: 1; + bool rootmode_present: 1; + bool user_id_present: 1; + bool group_id_present: 1; + bool default_permissions: 1; + bool allow_other: 1; + bool destroy: 1; + bool no_control: 1; + bool no_force_umount: 1; + bool legacy_opts_show: 1; + enum fuse_dax_mode dax_mode; + unsigned int max_read; + unsigned int blksize; + const char *subtype; + struct dax_device *dax_dev; + void **fudptr; +}; + +enum { + OPT_SOURCE = 0, + OPT_SUBTYPE = 1, + OPT_FD = 2, + OPT_ROOTMODE = 3, + OPT_USER_ID = 4, + OPT_GROUP_ID = 5, + OPT_DEFAULT_PERMISSIONS = 6, + OPT_ALLOW_OTHER = 7, + OPT_MAX_READ = 8, + OPT_BLKSIZE = 9, + OPT_ERR = 10, +}; + +struct fuse_inode_handle { + u64 nodeid; + u32 generation; + long: 32; +}; + +struct fuse_init_args { + struct fuse_args args; + struct fuse_init_in in; + struct fuse_init_out out; +}; + +struct btrfs_free_space_entry { + __le64 offset; + __le64 bytes; + __u8 type; +} __attribute__((packed)); + +struct btrfs_free_space_header { + struct btrfs_disk_key location; + __le64 generation; + __le64 num_entries; + __le64 num_bitmaps; +} __attribute__((packed)); + +struct btrfs_trim_range { + u64 start; + u64 bytes; + struct list_head list; +}; + +enum { + Opt_uid___8 = 0, + Opt_gid___9 = 1, +}; + +struct blk_queue_stats { + struct list_head callbacks; + spinlock_t lock; + int accounting; +}; + +struct trace_event_raw_kyber_latency { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char type[8]; + u8 percentile; + u8 numerator; + u8 denominator; + unsigned int samples; + char __data[0]; +}; + +struct trace_event_raw_kyber_adjust { + struct trace_entry ent; + dev_t dev; + char domain[16]; + unsigned int depth; + char __data[0]; +}; + +struct trace_event_raw_kyber_throttled { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_kyber_latency {}; + +struct trace_event_data_offsets_kyber_adjust {}; + +struct trace_event_data_offsets_kyber_throttled {}; + +typedef void (*btf_trace_kyber_latency)(void *, dev_t, const char *, const char *, unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_kyber_adjust)(void *, dev_t, const char *, unsigned int); + +typedef void (*btf_trace_kyber_throttled)(void *, dev_t, const char *); + +enum { + KYBER_READ = 0, + KYBER_WRITE = 1, + KYBER_DISCARD = 2, + KYBER_OTHER = 3, + KYBER_NUM_DOMAINS = 4, +}; + +enum { + KYBER_ASYNC_PERCENT = 75, +}; + +enum { + KYBER_LATENCY_SHIFT = 2, + KYBER_GOOD_BUCKETS = 4, + KYBER_LATENCY_BUCKETS = 8, +}; + +enum { + KYBER_TOTAL_LATENCY = 0, + KYBER_IO_LATENCY = 1, +}; + +struct kyber_cpu_latency { + atomic_t buckets[48]; +}; + +struct kyber_ctx_queue { + spinlock_t lock; + struct list_head rq_list[4]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kyber_queue_data { + struct request_queue *q; + dev_t dev; + struct sbitmap_queue domain_tokens[4]; + unsigned int async_depth; + struct kyber_cpu_latency *cpu_latency; + struct timer_list timer; + unsigned int latency_buckets[48]; + long unsigned int latency_timeout[3]; + int domain_p99[3]; + long: 32; + u64 latency_targets[3]; +}; + +struct kyber_hctx_data { + spinlock_t lock; + struct list_head rqs[4]; + unsigned int cur_domain; + unsigned int batching; + struct kyber_ctx_queue *kcqs; + struct sbitmap kcq_map[4]; + struct sbq_wait domain_wait[4]; + struct sbq_wait_state *domain_ws[4]; + atomic_t wait_index[4]; +}; + +struct flush_kcq_data { + struct kyber_hctx_data *khd; + unsigned int sched_domain; + struct list_head *list; +}; + +enum { + IORING_MEM_REGION_REG_WAIT_ARG = 1, +}; + +struct io_uring_mem_region_reg { + __u64 region_uptr; + __u64 flags; + __u64 __resv[2]; +}; + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; + +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; + __u8 sqe_op; + __u8 sqe_flags; + }; + __u8 resv; + __u32 resv2[3]; +}; + +struct io_uring_clock_register { + __u32 clockid; + __u32 __resv[3]; +}; + +enum io_uring_register_restriction_op { + IORING_RESTRICTION_REGISTER_OP = 0, + IORING_RESTRICTION_SQE_OP = 1, + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + IORING_RESTRICTION_LAST = 4, +}; + +struct io_ring_ctx_rings { + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_uring_sqe *sq_sqes; + struct io_rings *rings; +}; + +enum { + NVME_AEN_BIT_NS_ATTR = 8, + NVME_AEN_BIT_FW_ACT = 9, + NVME_AEN_BIT_ANA_CHANGE = 11, + NVME_AEN_BIT_DISC_CHANGE = 31, +}; + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, + SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, + SWITCHTEC_GAS_NTB_OFFSET = 65536, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, +}; + +struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info ntp_info[48]; +}; + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; + u64 lut_entry[512]; +}; + +struct pci_dev_reset_methods { + u16 vendor; + u16 device; + int (*reset)(struct pci_dev *, bool); +}; + +struct pci_dev_acs_enabled { + u16 vendor; + u16 device; + int (*acs_enabled)(struct pci_dev *, u16); +}; + +struct pci_dev_acs_ops { + u16 vendor; + u16 device; + int (*enable_acs)(struct pci_dev *); + int (*disable_acs_redir)(struct pci_dev *); +}; + +struct drm_dmi_panel_orientation_data { + int width; + int height; + const char * const *bios_dates; + int orientation; +}; + +struct component_master_ops { + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct aggregate_device; + +struct component { + struct list_head node; + struct aggregate_device *adev; + bool bound; + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct aggregate_device { + struct list_head node; + bool bound; + const struct component_master_ops *ops; + struct device *parent; + struct component_match *match; +}; + +struct iwlagn_tx_power_dbm_cmd { + s8 global_lmt; + u8 flags; + s8 srv_chan_lmt; + u8 reserved; +}; + +struct iwl_txfifo_flush_cmd_v3 { + __le32 queue_control; + __le16 flush_control; + __le16 reserved; +}; + +struct iwl_txfifo_flush_cmd_v2 { + __le16 queue_control; + __le16 flush_control; +}; + +enum iwl_bt_kill_idx { + IWL_BT_KILL_DEFAULT = 0, + IWL_BT_KILL_OVERRIDE = 1, + IWL_BT_KILL_REDUCE = 2, +}; + +struct iwl_basic_bt_cmd { + u8 flags; + u8 ledtime; + u8 max_kill; + u8 bt3_timer_t7_value; + __le32 kill_ack_mask; + __le32 kill_cts_mask; + u8 bt3_prio_sample_time; + u8 bt3_timer_t2_value; + __le16 bt4_reaction_time; + __le32 bt3_lookup_table[12]; + u8 reduce_txpower; + u8 reserved; + __le16 valid; +}; + +struct iwl_bt_cmd_v1 { + struct iwl_basic_bt_cmd basic; + u8 prio_boost; + u8 tx_prio_boost; + __le16 rx_prio_boost; +}; + +struct iwl_bt_cmd_v2 { + struct iwl_basic_bt_cmd basic; + __le32 prio_boost; + u8 reserved; + u8 tx_prio_boost; + __le16 rx_prio_boost; +}; + +struct iwl_bt_uart_msg { + u8 header; + u8 frame1; + u8 frame2; + u8 frame3; + u8 frame4; + u8 frame5; + u8 frame6; + u8 frame7; +}; + +struct iwl_bt_coex_profile_notif___2 { + struct iwl_bt_uart_msg last_bt_uart_msg; + u8 bt_status; + u8 bt_traffic_load; + u8 bt_ci_compliance; + u8 reserved; +}; + +enum iwlagn_d3_wakeup_filters { + IWLAGN_D3_WAKEUP_RFKILL = 1, + IWLAGN_D3_WAKEUP_SYSASSERT = 2, +}; + +struct iwlagn_d3_config_cmd { + __le32 min_sleep_time; + __le32 wakeup_flags; +}; + +struct iwlagn_wowlan_pattern { + u8 mask[16]; + u8 pattern[128]; + u8 mask_size; + u8 pattern_size; + __le16 reserved; +}; + +struct iwlagn_wowlan_patterns_cmd { + __le32 n_patterns; + struct iwlagn_wowlan_pattern patterns[0]; +}; + +struct iwlagn_wowlan_wakeup_filter_cmd { + __le32 enabled; + __le16 non_qos_seq; + __le16 reserved; + __le16 qos_seq[8]; +}; + +struct iwlagn_wowlan_rsc_tsc_params_cmd { + union iwlagn_all_tsc_rsc all_tsc_rsc; +}; + +struct iwlagn_mic_keys { + u8 tx[8]; + u8 rx_unicast[8]; + u8 rx_mcast[8]; +}; + +struct iwlagn_p1k_cache { + __le16 p1k[5]; +}; + +struct iwlagn_wowlan_tkip_params_cmd { + struct iwlagn_mic_keys mic_keys; + struct iwlagn_p1k_cache tx; + struct iwlagn_p1k_cache rx_uni[2]; + struct iwlagn_p1k_cache rx_multi[2]; +}; + +struct iwlagn_wowlan_kek_kck_material_cmd { + u8 kck[32]; + u8 kek[32]; + __le16 kck_len; + __le16 kek_len; + __le64 replay_ctr; +}; + +struct wowlan_key_data { + struct iwl_rxon_context *ctx; + struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; + struct iwlagn_wowlan_tkip_params_cmd *tkip; + const u8 *bssid; + bool error; + bool use_rsc_tsc; + bool use_tkip; +}; + +enum iwl_power_flags { + POWER_FLAGS_POWER_SAVE_ENA_MSK = 1, + POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = 2, + POWER_FLAGS_SKIP_OVER_DTIM_MSK = 4, + POWER_FLAGS_SNOOZE_ENA_MSK = 32, + POWER_FLAGS_BT_SCO_ENA = 256, + POWER_FLAGS_ADVANCE_PM_ENA_MSK = 512, + POWER_FLAGS_LPRX_ENA_MSK = 2048, + POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = 4096, +}; + +enum iwl_device_power_flags { + DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = 1, + DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = 2, + DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK = 128, + DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = 4096, +}; + +struct iwl_device_power_cmd { + __le16 flags; + __le16 reserved; +}; + +struct iwl_mac_power_cmd { + __le32 id_and_color; + __le16 flags; + __le16 keep_alive_seconds; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 rx_data_timeout_uapsd; + __le32 tx_data_timeout_uapsd; + u8 lprx_rssi_threshold; + u8 skip_dtim_periods; + __le16 snooze_interval; + __le16 snooze_window; + u8 snooze_step; + u8 qndp_tid; + u8 uapsd_ac_flags; + u8 uapsd_max_sp; + u8 heavy_tx_thld_packets; + u8 heavy_rx_thld_packets; + u8 heavy_tx_thld_percentage; + u8 heavy_rx_thld_percentage; + u8 limited_ps_threshold; + u8 reserved; +}; + +struct iwl_uapsd_misbehaving_ap_notif { + __le32 sta_id; + u8 mac_id; + u8 reserved[3]; +}; + +struct iwl_beacon_filter_cmd { + __le32 bf_energy_delta; + __le32 bf_roaming_energy_delta; + __le32 bf_roaming_state; + __le32 bf_temp_threshold; + __le32 bf_temp_fast_filter; + __le32 bf_temp_slow_filter; + __le32 bf_enable_beacon_filter; + __le32 bf_debug_flag; + __le32 bf_escape_timer; + __le32 ba_escape_timer; + __le32 ba_enable_beacon_abort; + __le32 bf_threshold_absolute_low[2]; + __le32 bf_threshold_absolute_high[2]; +}; + +struct iwl_allow_uapsd_iface_iterator_data { + struct ieee80211_vif *current_vif; + bool allow_uapsd; +}; + +struct iwl_power_vifs { + struct iwl_mvm *mvm; + struct ieee80211_vif *bss_vif; + struct ieee80211_vif *p2p_vif; + struct ieee80211_vif *ap_vif; + struct ieee80211_vif *monitor_vif; + bool p2p_active; + bool bss_active; + bool ap_active; + bool monitor_active; +}; + +struct rtw_stas_entry { + struct list_head list; + struct ieee80211_sta *sta; +}; + +struct rtw_iter_stas_data { + struct rtw_dev *rtwdev; + struct list_head list; +}; + +struct rtw_vifs_entry { + struct list_head list; + struct ieee80211_vif *vif; +}; + +struct rtw_iter_vifs_data { + struct rtw_dev *rtwdev; + struct list_head list; +}; + +struct dbc_regs { + __le32 capability; + __le32 doorbell; + __le32 ersts; + __le32 __reserved_0; + __le64 erstba; + __le64 erdp; + __le32 control; + __le32 status; + __le32 portsc; + __le32 __reserved_1; + __le64 dccp; + __le32 devinfo1; + __le32 devinfo2; +}; + +struct dbc_str_descs { + char string0[64]; + char manufacturer[64]; + char product[64]; + char serial[64]; +}; + +enum dbc_state { + DS_DISABLED = 0, + DS_INITIALIZED = 1, + DS_ENABLED = 2, + DS_CONNECTED = 3, + DS_CONFIGURED = 4, + DS_MAX = 5, +}; + +struct xhci_dbc; + +struct dbc_ep { + struct xhci_dbc *dbc; + struct list_head list_pending; + struct xhci_ring *ring; + unsigned int direction: 1; + unsigned int halted: 1; +}; + +struct dbc_driver; + +struct xhci_dbc { + spinlock_t lock; + struct device *dev; + struct xhci_hcd *xhci; + struct dbc_regs *regs; + struct xhci_ring *ring_evt; + struct xhci_ring *ring_in; + struct xhci_ring *ring_out; + struct xhci_erst erst; + struct xhci_container_ctx *ctx; + struct dbc_str_descs *string; + dma_addr_t string_dma; + size_t string_size; + u16 idVendor; + u16 idProduct; + u16 bcdDevice; + u8 bInterfaceProtocol; + enum dbc_state state; + struct delayed_work event_work; + unsigned int poll_interval; + unsigned int resume_required: 1; + struct dbc_ep eps[2]; + const struct dbc_driver *driver; + void *priv; +}; + +struct dbc_driver { + int (*configure)(struct xhci_dbc *); + void (*disconnect)(struct xhci_dbc *); +}; + +struct dbc_request { + void *buf; + unsigned int length; + dma_addr_t dma; + void (*complete)(struct xhci_dbc *, struct dbc_request *); + struct list_head list_pool; + int status; + unsigned int actual; + struct xhci_dbc *dbc; + struct list_head list_pending; + dma_addr_t trb_dma; + union xhci_trb *trb; + unsigned int direction: 1; +}; + +struct trace_event_raw_xhci_log_msg { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctx { + struct trace_entry ent; + int ctx_64; + unsigned int ctx_type; + dma_addr_t ctx_dma; + u8 *ctx_va; + unsigned int ctx_ep_num; + u32 __data_loc_ctx_data; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_trb { + struct trace_entry ent; + dma_addr_t dma; + u32 type; + u32 field0; + u32 field1; + u32 field2; + u32 field3; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_free_virt_dev { + struct trace_entry ent; + void *vdev; + long: 32; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int slot_id; + u16 current_mel; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_virt_dev { + struct trace_entry ent; + void *vdev; + long: 32; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int devnum; + int state; + int speed; + u8 portnum; + u8 level; + int slot_id; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_urb { + struct trace_entry ent; + u32 __data_loc_devname; + void *urb; + unsigned int pipe; + unsigned int stream; + int status; + unsigned int flags; + int num_mapped_sgs; + int num_sgs; + int length; + int actual; + int epnum; + int dir_in; + int type; + int slot_id; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_stream_ctx { + struct trace_entry ent; + unsigned int stream_id; + long: 32; + u64 stream_ring; + dma_addr_t ctx_array_dma; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_ep_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u64 deq; + u32 tx_info; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_slot_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u32 tt_info; + u32 state; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctrl_ctx { + struct trace_entry ent; + u32 drop; + u32 add; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ring { + struct trace_entry ent; + u32 type; + void *ring; + dma_addr_t enq; + dma_addr_t deq; + unsigned int num_segs; + unsigned int stream_id; + unsigned int cycle_state; + unsigned int bounce_buf_len; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_portsc { + struct trace_entry ent; + u32 busnum; + u32 portnum; + u32 portsc; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_doorbell { + struct trace_entry ent; + u32 slot; + u32 doorbell; + char __data[0]; +}; + +struct trace_event_raw_xhci_dbc_log_request { + struct trace_entry ent; + struct dbc_request *req; + bool dir; + unsigned int actual; + unsigned int length; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_xhci_log_msg { + u32 msg; + const void *msg_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_ctx { + u32 ctx_data; + const void *ctx_data_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_trb {}; + +struct trace_event_data_offsets_xhci_log_free_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_urb { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_stream_ctx {}; + +struct trace_event_data_offsets_xhci_log_ep_ctx {}; + +struct trace_event_data_offsets_xhci_log_slot_ctx {}; + +struct trace_event_data_offsets_xhci_log_ctrl_ctx {}; + +struct trace_event_data_offsets_xhci_log_ring {}; + +struct trace_event_data_offsets_xhci_log_portsc {}; + +struct trace_event_data_offsets_xhci_log_doorbell {}; + +struct trace_event_data_offsets_xhci_dbc_log_request {}; + +typedef void (*btf_trace_xhci_dbg_address)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_context_change)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_quirks)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_reset_ep)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_cancel_urb)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_init)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_ring_expansion)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_address_ctx)(void *, struct xhci_hcd *, struct xhci_container_ctx *, unsigned int); + +typedef void (*btf_trace_xhci_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_command)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_queue_trb)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_gadget_ep_queue)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_free_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_alloc_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_addressable_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_stop_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_urb_enqueue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_giveback)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_dequeue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_alloc_stream_info_ctx)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_stream)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_stop_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_config_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_add_endpoint)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_alloc_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_free_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_disable_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_discover_or_reset_device)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_setup_device_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_addr_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_address_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_ring_alloc)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_free)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_expansion)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_enq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_deq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_handle_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_get_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_hub_status_data)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_ring_ep_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_ring_host_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_dbc_alloc_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_free_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_queue_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_giveback_request)(void *, struct dbc_request *); + +struct ppl_header_entry { + __le64 data_sector; + __le32 pp_size; + __le32 data_size; + __le32 parity_disk; + __le32 checksum; +}; + +struct ppl_header { + __u8 reserved[512]; + __le32 signature; + __le32 padding; + __le64 generation; + __le32 entries_count; + __le32 checksum; + struct ppl_header_entry entries[148]; +}; + +struct ppl_log; + +struct ppl_io_unit { + struct ppl_log *log; + struct page *header_page; + unsigned int entries_count; + unsigned int pp_size; + u64 seq; + struct list_head log_sibling; + struct list_head stripe_list; + atomic_t pending_stripes; + atomic_t pending_flushes; + bool submitted; + long: 32; + struct bio bio; + struct bio_vec biovec[32]; +}; + +struct ppl_conf { + struct mddev *mddev; + struct ppl_log *child_logs; + int count; + int block_size; + u32 signature; + long: 32; + atomic64_t seq; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + struct bio_set flush_bs; + int recovered_entries; + int mismatch_count; + struct list_head no_mem_stripes; + spinlock_t no_mem_stripes_lock; + short unsigned int write_hint; + long: 32; +}; + +struct ppl_log { + struct ppl_conf *ppl_conf; + struct md_rdev *rdev; + struct mutex io_mutex; + struct ppl_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head io_list; + long: 32; + sector_t next_io_sector; + unsigned int entry_space; + bool use_multippl; + bool wb_cache_on; + long unsigned int disk_flush_bitmap; + long: 32; +}; + +struct firmware_map_entry { + u64 start; + u64 end; + const char *type; + struct list_head list; + struct kobject kobj; +}; + +struct memmap_attribute { + struct attribute attr; + ssize_t (*show)(struct firmware_map_entry *, char *); +}; + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *, char *); + ssize_t (*store)(struct netdev_queue *, const char *, size_t); +}; + +enum ethtool_multicast_groups { + ETHNL_MCGRP_MONITOR = 0, +}; + +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + long unsigned int pos_ifindex; +}; + +typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); + +struct nf_conntrack_nat_helper { + struct list_head list; + char mod_name[16]; + struct module *module; +}; + +enum nft_dynset_ops { + NFT_DYNSET_OP_ADD = 0, + NFT_DYNSET_OP_UPDATE = 1, + NFT_DYNSET_OP_DELETE = 2, +}; + +enum nft_dynset_flags { + NFT_DYNSET_F_INV = 1, + NFT_DYNSET_F_EXPR = 2, +}; + +enum nft_dynset_attributes { + NFTA_DYNSET_UNSPEC = 0, + NFTA_DYNSET_SET_NAME = 1, + NFTA_DYNSET_SET_ID = 2, + NFTA_DYNSET_OP = 3, + NFTA_DYNSET_SREG_KEY = 4, + NFTA_DYNSET_SREG_DATA = 5, + NFTA_DYNSET_TIMEOUT = 6, + NFTA_DYNSET_EXPR = 7, + NFTA_DYNSET_PAD = 8, + NFTA_DYNSET_FLAGS = 9, + NFTA_DYNSET_EXPRESSIONS = 10, + __NFTA_DYNSET_MAX = 11, +}; + +struct nft_dynset { + struct nft_set *set; + struct nft_set_ext_tmpl tmpl; + enum nft_dynset_ops op: 8; + u8 sreg_key; + u8 sreg_data; + bool invert; + bool expr; + u8 num_exprs; + long: 32; + u64 timeout; + struct nft_expr *expr_array[2]; + struct nft_set_binding binding; +}; + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +struct frag_queue { + struct inet_frag_queue q; + int iif; + __u16 nhoffset; + u8 ecn; +}; + +struct ieee80211s_hdr { + u8 flags; + u8 ttl; + __le32 seqnum; + u8 eaddr1[6]; + u8 eaddr2[6]; +} __attribute__((packed)); + +struct iapp_layer2_update { + u8 da[6]; + u8 sa[6]; + __be16 len; + u8 dsap; + u8 ssap; + u8 control; + u8 xid_info[3]; +}; + +typedef u32 (*codel_skb_len_t)(const struct sk_buff *); + +typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *); + +typedef void (*codel_skb_drop_t)(struct sk_buff *, void *); + +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *, void *); + +struct ieee80211_ema_beacons { + u8 cnt; + struct { + struct sk_buff *skb; + struct ieee80211_mutable_offsets offs; + } bcn[0]; +}; + +typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, struct fq_tin *, struct fq_flow *); + +typedef void fq_skb_free_t(struct fq *, struct fq_tin *, struct fq_flow *, struct sk_buff *); + +typedef bool fq_skb_filter_t(struct fq *, struct fq_tin *, struct fq_flow *, struct sk_buff *, void *); + +enum mesh_path_flags { + MESH_PATH_ACTIVE = 1, + MESH_PATH_RESOLVING = 2, + MESH_PATH_SN_VALID = 4, + MESH_PATH_FIXED = 8, + MESH_PATH_RESOLVED = 16, + MESH_PATH_REQ_QUEUED = 32, + MESH_PATH_DELETED = 64, +}; + +struct mesh_path { + u8 dst[6]; + u8 mpp[6]; + struct rhash_head rhash; + struct hlist_node walk_list; + struct hlist_node gate_list; + struct ieee80211_sub_if_data *sdata; + struct sta_info *next_hop; + struct timer_list timer; + struct sk_buff_head frame_queue; + struct callback_head rcu; + u32 sn; + u32 metric; + u8 hop_count; + long unsigned int exp_time; + u32 discovery_timeout; + u8 discovery_retries; + enum mesh_path_flags flags; + spinlock_t state_lock; + u8 rann_snd_addr[6]; + u32 rann_metric; + long unsigned int last_preq_to_root; + long unsigned int fast_tx_check; + bool is_root; + bool is_gate; + u32 path_change_count; +}; + +enum ieee80211_encrypt { + ENCRYPT_NO = 0, + ENCRYPT_MGMT = 1, + ENCRYPT_DATA = 2, +}; + +typedef long unsigned int old_sigset_t; + +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + long unsigned int sa_flags; + __sigrestore_t sa_restorer; +}; + +enum { + TRACE_SIGNAL_DELIVERED = 0, + TRACE_SIGNAL_IGNORED = 1, + TRACE_SIGNAL_ALREADY_PENDING = 2, + TRACE_SIGNAL_OVERFLOW_FAIL = 3, + TRACE_SIGNAL_LOSE_INFO = 4, +}; + +struct trace_event_raw_signal_generate { + struct trace_entry ent; + int sig; + int errno; + int code; + char comm[16]; + pid_t pid; + int group; + int result; + char __data[0]; +}; + +struct trace_event_raw_signal_deliver { + struct trace_entry ent; + int sig; + int errno; + int code; + long unsigned int sa_handler; + long unsigned int sa_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_signal_generate {}; + +struct trace_event_data_offsets_signal_deliver {}; + +typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); + +typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); + +enum sig_handler { + HANDLER_CURRENT = 0, + HANDLER_SIG_DFL = 1, + HANDLER_EXIT = 2, +}; + +struct pm_vt_switch { + struct list_head head; + struct device *dev; + bool required; +}; + +enum { + IRQ_STARTUP_NORMAL = 0, + IRQ_STARTUP_MANAGED = 1, + IRQ_STARTUP_ABORT = 2, +}; + +enum { + Q_REQUEUE_PI_NONE = 0, + Q_REQUEUE_PI_IGNORE = 1, + Q_REQUEUE_PI_IN_PROGRESS = 2, + Q_REQUEUE_PI_WAIT = 3, + Q_REQUEUE_PI_DONE = 4, + Q_REQUEUE_PI_LOCKED = 5, +}; + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE = 0, + RDMACG_RESOURCE_HCA_OBJECT = 1, + RDMACG_RESOURCE_MAX = 2, +}; + +struct rdma_cgroup { + struct cgroup_subsys_state css; + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +enum rdmacg_file_type { + RDMACG_RESOURCE_TYPE_MAX = 0, + RDMACG_RESOURCE_TYPE_STAT = 1, +}; + +struct rdmacg_resource { + int max; + int usage; +}; + +struct rdmacg_resource_pool { + struct rdmacg_device *device; + struct rdmacg_resource resources[2]; + struct list_head cg_node; + struct list_head dev_node; + long: 32; + u64 usage_sum; + int num_max_cnt; + long: 32; +}; + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +enum tp_func_state { + TP_FUNC_0 = 0, + TP_FUNC_1 = 1, + TP_FUNC_2 = 2, + TP_FUNC_N = 3, +}; + +enum tp_transition_sync { + TP_TRANSITION_SYNC_1_0_1 = 0, + TP_TRANSITION_SYNC_N_2_1 = 1, + _NR_TP_TRANSITION_SYNC = 2, +}; + +struct tp_transition_snapshot { + long unsigned int rcu; + bool ongoing; +}; + +struct tp_probes { + struct callback_head rcu; + struct tracepoint_func probes[0]; +}; + +struct bpf_kfunc_desc { + struct btf_func_model func_model; + u32 func_id; + s32 imm; + u16 offset; + long unsigned int addr; +}; + +struct bpf_kfunc_desc_tab { + struct bpf_kfunc_desc descs[256]; + u32 nr_descs; +}; + +struct bpf_kfunc_btf { + struct btf *btf; + struct module *module; + u16 offset; +}; + +struct bpf_kfunc_btf_tab { + struct bpf_kfunc_btf descs[256]; + u32 nr_descs; +}; + +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + +struct bpf_verifier_stack_elem { + struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; + struct bpf_verifier_stack_elem *next; + u32 log_pos; +}; + +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; + bool raw_mode; + bool pkt_access; + u8 release_regno; + int regno; + int access_size; + int mem_size; + long: 32; + u64 msize_max_value; + int ref_obj_id; + int dynptr_id; + int map_uid; + int func_id; + struct btf *btf; + u32 btf_id; + struct btf *ret_btf; + u32 ret_btf_id; + u32 subprogno; + struct btf_field *kptr_field; +}; + +struct bpf_kfunc_call_arg_meta { + struct btf *btf; + u32 func_id; + u32 kfunc_flags; + const struct btf_type *func_proto; + const char *func_name; + u32 ref_obj_id; + u8 release_regno; + bool r0_rdonly; + u32 ret_btf_id; + u64 r0_size; + u32 subprogno; + long: 32; + struct { + u64 value; + bool found; + long: 32; + } arg_constant; + struct btf *arg_btf; + u32 arg_btf_id; + bool arg_owning_ref; + struct { + struct btf_field *field; + } arg_list_head; + struct { + struct btf_field *field; + } arg_rbtree_root; + struct { + enum bpf_dynptr_type type; + u32 id; + u32 ref_obj_id; + } initialized_dynptr; + struct { + u8 spi; + u8 frameno; + } iter; + struct { + struct bpf_map *ptr; + int uid; + } map; + long: 32; + u64 mem_size; +}; + +enum reg_arg_type { + SRC_OP = 0, + DST_OP = 1, + DST_OP_NO_MARK = 2, +}; + +struct linked_reg { + u8 frameno; + union { + u8 spi; + u8 regno; + }; + bool is_reg; +}; + +struct linked_regs { + int cnt; + struct linked_reg entries[6]; +}; + +enum bpf_access_src { + ACCESS_DIRECT = 1, + ACCESS_HELPER = 2, +}; + +struct task_struct__safe_rcu { + const cpumask_t *cpus_ptr; + struct css_set *cgroups; + struct task_struct *real_parent; + struct task_struct *group_leader; +}; + +struct cgroup__safe_rcu { + struct kernfs_node *kn; +}; + +struct css_set__safe_rcu { + struct cgroup *dfl_cgrp; +}; + +struct mm_struct__safe_rcu_or_null { + struct file *exe_file; +}; + +struct sk_buff__safe_rcu_or_null { + struct sock *sk; +}; + +struct request_sock__safe_rcu_or_null { + struct sock *sk; +}; + +struct bpf_iter_meta__safe_trusted { + struct seq_file *seq; +}; + +struct bpf_iter__task__safe_trusted { + struct bpf_iter_meta *meta; + struct task_struct *task; +}; + +struct linux_binprm__safe_trusted { + struct file *file; +}; + +struct file__safe_trusted { + struct inode *f_inode; +}; + +struct dentry__safe_trusted { + struct inode *d_inode; +}; + +struct socket__safe_trusted_or_null { + struct sock *sk; +}; + +struct bpf_reg_types { + const enum bpf_reg_type types[10]; + u32 *btf_id; +}; + +enum { + AT_PKT_END = -1, + BEYOND_PKT_END = -2, +}; + +typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *, int); + +enum { + KF_ARG_DYNPTR_ID = 0, + KF_ARG_LIST_HEAD_ID = 1, + KF_ARG_LIST_NODE_ID = 2, + KF_ARG_RB_ROOT_ID = 3, + KF_ARG_RB_NODE_ID = 4, + KF_ARG_WORKQUEUE_ID = 5, +}; + +enum kfunc_ptr_arg_type { + KF_ARG_PTR_TO_CTX = 0, + KF_ARG_PTR_TO_ALLOC_BTF_ID = 1, + KF_ARG_PTR_TO_REFCOUNTED_KPTR = 2, + KF_ARG_PTR_TO_DYNPTR = 3, + KF_ARG_PTR_TO_ITER = 4, + KF_ARG_PTR_TO_LIST_HEAD = 5, + KF_ARG_PTR_TO_LIST_NODE = 6, + KF_ARG_PTR_TO_BTF_ID = 7, + KF_ARG_PTR_TO_MEM = 8, + KF_ARG_PTR_TO_MEM_SIZE = 9, + KF_ARG_PTR_TO_CALLBACK = 10, + KF_ARG_PTR_TO_RB_ROOT = 11, + KF_ARG_PTR_TO_RB_NODE = 12, + KF_ARG_PTR_TO_NULL = 13, + KF_ARG_PTR_TO_CONST_STR = 14, + KF_ARG_PTR_TO_MAP = 15, + KF_ARG_PTR_TO_WORKQUEUE = 16, +}; + +enum special_kfunc_type { + KF_bpf_obj_new_impl = 0, + KF_bpf_obj_drop_impl = 1, + KF_bpf_refcount_acquire_impl = 2, + KF_bpf_list_push_front_impl = 3, + KF_bpf_list_push_back_impl = 4, + KF_bpf_list_pop_front = 5, + KF_bpf_list_pop_back = 6, + KF_bpf_cast_to_kern_ctx = 7, + KF_bpf_rdonly_cast = 8, + KF_bpf_rcu_read_lock = 9, + KF_bpf_rcu_read_unlock = 10, + KF_bpf_rbtree_remove = 11, + KF_bpf_rbtree_add_impl = 12, + KF_bpf_rbtree_first = 13, + KF_bpf_dynptr_from_skb = 14, + KF_bpf_dynptr_from_xdp = 15, + KF_bpf_dynptr_slice = 16, + KF_bpf_dynptr_slice_rdwr = 17, + KF_bpf_dynptr_clone = 18, + KF_bpf_percpu_obj_new_impl = 19, + KF_bpf_percpu_obj_drop_impl = 20, + KF_bpf_throw = 21, + KF_bpf_wq_set_callback_impl = 22, + KF_bpf_preempt_disable = 23, + KF_bpf_preempt_enable = 24, + KF_bpf_iter_css_task_new = 25, + KF_bpf_session_cookie = 26, + KF_bpf_get_kmem_cache = 27, +}; + +enum { + REASON_BOUNDS = -1, + REASON_TYPE = -2, + REASON_PATHS = -3, + REASON_LIMIT = -4, + REASON_STACK = -5, +}; + +struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; + bool mask_to_left; + long: 32; +}; + +enum { + DISCOVERED = 16, + EXPLORED = 32, + FALLTHROUGH = 1, + BRANCH = 2, +}; + +enum { + DONE_EXPLORING = 0, + KEEP_EXPLORING = 1, +}; + +enum exact_level { + NOT_EXACT = 0, + EXACT = 1, + RANGE_WITHIN = 2, +}; + +struct bpf_iter; + +struct uprobe { + struct rb_node rb_node; + refcount_t ref; + struct rw_semaphore register_rwsem; + struct rw_semaphore consumer_rwsem; + struct list_head pending_list; + struct list_head consumers; + struct inode *inode; + union { + struct callback_head rcu; + struct work_struct work; + }; + long: 32; + loff_t offset; + loff_t ref_ctr_offset; + long unsigned int flags; + struct arch_uprobe arch; + long: 32; +}; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +struct xol_area { + wait_queue_head_t wq; + long unsigned int *bitmap; + struct page *page; + long unsigned int vaddr; +}; + +typedef struct { + struct srcu_struct *lock; + int idx; +} class_srcu_t; + +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +struct __uprobe_key { + struct inode *inode; + long: 32; + loff_t offset; +}; + +struct map_info___2 { + struct map_info___2 *next; + struct mm_struct *mm; + long unsigned int vaddr; +}; + +typedef int folio_walk_flags_t; + +enum folio_walk_level { + FW_LEVEL_PTE = 0, + FW_LEVEL_PMD = 1, + FW_LEVEL_PUD = 2, +}; + +struct folio_walk { + struct page *page; + enum folio_walk_level level; + union { + pte_t *ptep; + pud_t *pudp; + pmd_t *pmdp; + }; + union { + pte_t pte; + pud_t pud; + pmd_t pmd; + }; + struct vm_area_struct *vma; + spinlock_t *ptl; +}; + +enum bio_post_read_step { + STEP_INITIAL = 0, + STEP_DECRYPT = 1, + STEP_VERITY = 2, + STEP_MAX = 3, +}; + +struct bio_post_read_ctx { + struct bio *bio; + struct work_struct work; + unsigned int cur_step; + unsigned int enabled_steps; +}; + +struct fuse_dirent { + uint64_t ino; + uint64_t off; + uint32_t namelen; + uint32_t type; + char name[0]; +}; + +struct fuse_direntplus { + struct fuse_entry_out entry_out; + struct fuse_dirent dirent; +}; + +enum fuse_parse_result { + FOUND_ERR = -1, + FOUND_NONE = 0, + FOUND_SOME = 1, + FOUND_ALL = 2, +}; + +struct falloc_range { + struct list_head list; + u64 start; + u64 len; +}; + +enum { + RANGE_BOUNDARY_WRITTEN_EXTENT = 0, + RANGE_BOUNDARY_PREALLOC_EXTENT = 1, + RANGE_BOUNDARY_HOLE = 2, +}; + +enum devcg_behavior { + DEVCG_DEFAULT_NONE = 0, + DEVCG_DEFAULT_ALLOW = 1, + DEVCG_DEFAULT_DENY = 2, +}; + +struct dev_exception_item { + u32 major; + u32 minor; + short int type; + short int access; + struct list_head list; + struct callback_head rcu; +}; + +struct dev_cgroup { + struct cgroup_subsys_state css; + struct list_head exceptions; + enum devcg_behavior behavior; + long: 32; +}; + +struct trace_event_raw_block_buffer { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + size_t size; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_rq_requeue { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_rq_completion { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + int error; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + unsigned int bytes; + short unsigned int ioprio; + char rwbs[8]; + char comm[16]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_bio_complete { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_bio { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_plug { + struct trace_entry ent; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_unplug { + struct trace_entry ent; + int nr_rq; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_split { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + sector_t new_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_remap { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_rq_remap { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + unsigned int nr_bios; + char rwbs[8]; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_block_buffer {}; + +struct trace_event_data_offsets_block_rq_requeue { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq_completion { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_bio_complete {}; + +struct trace_event_data_offsets_block_bio {}; + +struct trace_event_data_offsets_block_plug {}; + +struct trace_event_data_offsets_block_unplug {}; + +struct trace_event_data_offsets_block_split {}; + +struct trace_event_data_offsets_block_bio_remap {}; + +struct trace_event_data_offsets_block_rq_remap {}; + +typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_complete)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_error)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_insert)(void *, struct request *); + +typedef void (*btf_trace_block_rq_issue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_merge)(void *, struct request *); + +typedef void (*btf_trace_block_io_start)(void *, struct request *); + +typedef void (*btf_trace_block_io_done)(void *, struct request *); + +typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); + +typedef void (*btf_trace_block_getrq)(void *, struct bio *); + +typedef void (*btf_trace_block_plug)(void *, struct request_queue *); + +typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); + +typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); + +typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); + +typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); + +struct io_nop { + struct file *file; + int result; + int fd; + int buffer; + unsigned int flags; +}; + +struct strarray { + char **array; + size_t n; +}; + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +struct clk_gated_fixed { + struct clk_gpio clk_gpio; + struct regulator *supply; + long unsigned int rate; +}; + +struct irq_info { + struct hlist_node node; + int irq; + spinlock_t lock; + struct list_head *head; +}; + +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +enum led_trigger_netdev_modes { + TRIGGER_NETDEV_LINK = 0, + TRIGGER_NETDEV_LINK_10 = 1, + TRIGGER_NETDEV_LINK_100 = 2, + TRIGGER_NETDEV_LINK_1000 = 3, + TRIGGER_NETDEV_LINK_2500 = 4, + TRIGGER_NETDEV_LINK_5000 = 5, + TRIGGER_NETDEV_LINK_10000 = 6, + TRIGGER_NETDEV_HALF_DUPLEX = 7, + TRIGGER_NETDEV_FULL_DUPLEX = 8, + TRIGGER_NETDEV_TX = 9, + TRIGGER_NETDEV_RX = 10, + TRIGGER_NETDEV_TX_ERR = 11, + TRIGGER_NETDEV_RX_ERR = 12, + __TRIGGER_NETDEV_MAX = 13, +}; + +struct rtl821x_priv { + u16 phycr1; + u16 phycr2; + bool has_phycr2; + struct clk *clk; +}; + +enum iwl_scd_cfg_actions { + SCD_CFG_DISABLE_QUEUE = 0, + SCD_CFG_ENABLE_QUEUE = 1, + SCD_CFG_UPDATE_QUEUE_TID = 2, +}; + +struct iwl_scd_txq_cfg_cmd { + u8 token; + u8 sta_id; + u8 tid; + u8 scd_queue; + u8 action; + u8 aggregate; + u8 tx_fifo; + u8 window; + __le16 ssn; + __le16 reserved; +}; + +struct iwl_cancel_channel_switch_cmd { + __le32 id; +}; + +enum iwl_sta_flags { + STA_FLG_REDUCED_TX_PWR_CTRL = 8, + STA_FLG_REDUCED_TX_PWR_DATA = 64, + STA_FLG_DISABLE_TX = 16, + STA_FLG_PS = 256, + STA_FLG_DRAIN_FLOW = 4096, + STA_FLG_PAN = 8192, + STA_FLG_CLASS_AUTH = 16384, + STA_FLG_CLASS_ASSOC = 32768, + STA_FLG_RTS_MIMO_PROT = 131072, + STA_FLG_MAX_AGG_SIZE_SHIFT = 19, + STA_FLG_MAX_AGG_SIZE_8K = 0, + STA_FLG_MAX_AGG_SIZE_16K = 524288, + STA_FLG_MAX_AGG_SIZE_32K = 1048576, + STA_FLG_MAX_AGG_SIZE_64K = 1572864, + STA_FLG_MAX_AGG_SIZE_128K = 2097152, + STA_FLG_MAX_AGG_SIZE_256K = 2621440, + STA_FLG_MAX_AGG_SIZE_512K = 3145728, + STA_FLG_MAX_AGG_SIZE_1024K = 3670016, + STA_FLG_MAX_AGG_SIZE_2M = 4194304, + STA_FLG_MAX_AGG_SIZE_4M = 4718592, + STA_FLG_MAX_AGG_SIZE_MSK = 7864320, + STA_FLG_AGG_MPDU_DENS_SHIFT = 23, + STA_FLG_AGG_MPDU_DENS_2US = 33554432, + STA_FLG_AGG_MPDU_DENS_4US = 41943040, + STA_FLG_AGG_MPDU_DENS_8US = 50331648, + STA_FLG_AGG_MPDU_DENS_16US = 58720256, + STA_FLG_AGG_MPDU_DENS_MSK = 58720256, + STA_FLG_FAT_EN_20MHZ = 0, + STA_FLG_FAT_EN_40MHZ = 67108864, + STA_FLG_FAT_EN_80MHZ = 134217728, + STA_FLG_FAT_EN_160MHZ = 201326592, + STA_FLG_FAT_EN_MSK = 201326592, + STA_FLG_MIMO_EN_SISO = 0, + STA_FLG_MIMO_EN_MIMO2 = 268435456, + STA_FLG_MIMO_EN_MIMO3 = 536870912, + STA_FLG_MIMO_EN_MSK = 805306368, +}; + +enum iwl_sta_modify_flag { + STA_MODIFY_QUEUE_REMOVAL = 1, + STA_MODIFY_TID_DISABLE_TX = 2, + STA_MODIFY_UAPSD_ACS = 4, + STA_MODIFY_ADD_BA_TID = 8, + STA_MODIFY_REMOVE_BA_TID = 16, + STA_MODIFY_SLEEPING_STA_TX_COUNT = 32, + STA_MODIFY_PROT_TH = 64, + STA_MODIFY_QUEUES = 128, +}; + +enum iwl_sta_mode { + STA_MODE_ADD = 0, + STA_MODE_MODIFY = 1, +}; + +enum iwl_sta_sleep_flag { + STA_SLEEP_STATE_AWAKE = 0, + STA_SLEEP_STATE_PS_POLL = 1, + STA_SLEEP_STATE_UAPSD = 2, + STA_SLEEP_STATE_MOREDATA = 4, +}; + +struct iwl_mvm_add_sta_cmd { + u8 add_modify; + u8 awake_acs; + __le16 tid_disable_tx; + __le32 mac_id_n_color; + u8 addr[6]; + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + __le32 station_flags; + __le32 station_flags_msk; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + u8 sleep_state_flags; + u8 station_type; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; + __le16 rx_ba_window; + u8 sp_length; + u8 uapsd_acs; +}; + +struct iwl_mvm_add_sta_key_common { + u8 sta_id; + u8 key_offset; + __le16 key_flags; + u8 key[32]; + u8 rx_secur_seq_cnt[16]; +}; + +struct iwl_mvm_add_sta_key_cmd_v1 { + struct iwl_mvm_add_sta_key_common common; + u8 tkip_rx_tsc_byte2; + u8 reserved; + __le16 tkip_rx_ttak[5]; +}; + +struct iwl_mvm_add_sta_key_cmd { + struct iwl_mvm_add_sta_key_common common; + __le64 rx_mic_key; + __le64 tx_mic_key; + __le64 transmit_seq_cnt; +}; + +enum iwl_mvm_add_sta_rsp_status { + ADD_STA_SUCCESS = 1, + ADD_STA_STATIONS_OVERLOAD = 2, + ADD_STA_IMMEDIATE_BA_FAILURE = 4, + ADD_STA_MODIFY_NON_EXISTING_STA = 8, +}; + +struct iwl_mvm_rm_sta_cmd { + u8 sta_id; + u8 reserved[3]; +}; + +struct iwl_mvm_mgmt_mcast_key_cmd_v1 { + __le32 ctrl_flags; + u8 igtk[16]; + u8 k1[16]; + u8 k2[16]; + __le32 key_id; + __le32 sta_id; + __le64 receive_seq_cnt; +}; + +struct iwl_mvm_eosp_notification { + __le32 remain_frame_count; + __le32 sta_id; +}; + +enum rx_crypto { + RX_CRYPTO_SUCCESS = 0, + RX_CRYPTO_FAIL_ICV = 1, + RX_CRYPTO_FAIL_MIC = 2, + RX_CRYPTO_FAIL_KEY = 3, +}; + +enum rt2x00usb_mode_offset { + USB_MODE_RESET = 1, + USB_MODE_UNPLUG = 2, + USB_MODE_FUNCTION = 3, + USB_MODE_TEST = 4, + USB_MODE_SLEEP = 7, + USB_MODE_FIRMWARE = 8, + USB_MODE_WAKEUP = 9, + USB_MODE_AUTORUN = 17, +}; + +struct input_seq_state { + short unsigned int pos; + bool mutex_acquired; + int input_devices_state; +}; + +struct input_devres { + struct input_dev *input; +}; + +struct ptp_clock_caps { + int max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int pps; + int n_pins; + int cross_timestamping; + int adjust_phase; + int max_phase_adj; + int rsv[11]; +}; + +struct ptp_sys_offset { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[51]; +}; + +struct ptp_sys_offset_extended { + unsigned int n_samples; + __kernel_clockid_t clockid; + unsigned int rsv[2]; + struct ptp_clock_time ts[75]; +}; + +struct ptp_sys_offset_precise { + struct ptp_clock_time device; + struct ptp_clock_time sys_realtime; + struct ptp_clock_time sys_monoraw; + unsigned int rsv[4]; +}; + +struct userspace_policy { + unsigned int is_managed; + unsigned int setspeed; + struct mutex mutex; +}; + +typedef struct { + u32 version; + u32 num_entries; + u32 desc_size; + u32 flags; + efi_memory_desc_t entry[0]; +} efi_memory_attributes_table_t; + +struct dst_cache_pcpu { + long unsigned int refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + struct ethtool_link_ext_stats link_stats; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; + long: 32; +}; + +struct nf_log_buf { + unsigned int count; + char buf[1020]; +}; + +struct xt_tcp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 option; + __u8 flg_mask; + __u8 flg_cmp; + __u8 invflags; +}; + +struct xt_udp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 invflags; +}; + +struct ipt_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +struct ip6t_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +struct icmp_filter { + __u32 data; +}; + +struct raw_sock { + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; + +struct ifaddrlblmsg { + __u8 ifal_family; + __u8 __ifal_reserved; + __u8 ifal_prefixlen; + __u8 ifal_flags; + __u32 ifal_index; + __u32 ifal_seq; +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX = 3, +}; + +struct ip6addrlbl_entry { + struct in6_addr prefix; + int prefixlen; + int ifindex; + int addrtype; + u32 label; + struct hlist_node list; + struct callback_head rcu; +}; + +struct ip6addrlbl_init_table { + const struct in6_addr *prefix; + int prefixlen; + u32 label; +}; + +struct nft_ct_frag6_pernet { + struct ctl_table_header *nf_frag_frags_hdr; + struct fqdir *fqdir; +}; + +enum rfkill_type { + RFKILL_TYPE_ALL = 0, + RFKILL_TYPE_WLAN = 1, + RFKILL_TYPE_BLUETOOTH = 2, + RFKILL_TYPE_UWB = 3, + RFKILL_TYPE_WIMAX = 4, + RFKILL_TYPE_WWAN = 5, + RFKILL_TYPE_GPS = 6, + RFKILL_TYPE_FM = 7, + RFKILL_TYPE_NFC = 8, + NUM_RFKILL_TYPES = 9, +}; + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute pop +#endif + +#endif /* __VMLINUX_H__ */ diff --git a/selftest/struct-ops/dep/include/arch/arm64/vmlinux-v6.13-rc1-g353b2a7de2a3.h b/selftest/struct-ops/dep/include/arch/arm64/vmlinux-v6.13-rc1-g353b2a7de2a3.h new file mode 100644 index 00000000..fda07658 --- /dev/null +++ b/selftest/struct-ops/dep/include/arch/arm64/vmlinux-v6.13-rc1-g353b2a7de2a3.h @@ -0,0 +1,102528 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) +#endif + +typedef unsigned char __u8; + +typedef short unsigned int __u16; + +typedef int __s32; + +typedef unsigned int __u32; + +typedef long long int __s64; + +typedef long long unsigned int __u64; + +typedef __u8 u8; + +typedef __u16 u16; + +typedef __s32 s32; + +typedef __u32 u32; + +typedef __s64 s64; + +typedef __u64 u64; + +enum { + false = 0, + true = 1, +}; + +typedef long int __kernel_long_t; + +typedef long unsigned int __kernel_ulong_t; + +typedef int __kernel_pid_t; + +typedef unsigned int __kernel_uid32_t; + +typedef __kernel_ulong_t __kernel_size_t; + +typedef long long int __kernel_time64_t; + +typedef __kernel_long_t __kernel_clock_t; + +typedef int __kernel_timer_t; + +typedef int __kernel_clockid_t; + +typedef __kernel_pid_t pid_t; + +typedef __kernel_clockid_t clockid_t; + +typedef _Bool bool; + +typedef __kernel_size_t size_t; + +typedef s64 ktime_t; + +typedef struct { + int counter; +} atomic_t; + +typedef struct { + s64 counter; +} atomic64_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +typedef atomic64_t atomic_long_t; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long int tv_nsec; +}; + +struct qspinlock { + union { + atomic_t val; + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; + }; +}; + +typedef struct qspinlock arch_spinlock_t; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct cpumask { + long unsigned int bits[128]; +}; + +typedef struct cpumask cpumask_t; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct pollfd; + +struct restart_block { + long unsigned int arch_data; + long int (*fn)(struct restart_block *); + union { + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + u64 expires; + } nanosleep; + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + long unsigned int tv_sec; + long unsigned int tv_nsec; + } poll; + }; +}; + +struct thread_info { + long unsigned int flags; + union { + u64 preempt_count; + struct { + u32 need_resched; + u32 count; + } preempt; + }; + u32 cpu; +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; + u16 src; + u16 dst; +}; + +struct load_weight { + long unsigned int weight; + u32 inv_weight; +}; + +struct rb_node { + long unsigned int __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + long unsigned int load_avg; + long unsigned int runnable_avg; + long unsigned int util_avg; + unsigned int util_est; +}; + +struct cfs_rq; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + u64 deadline; + u64 min_vruntime; + u64 min_slice; + struct list_head group_node; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + unsigned char custom_slice; + u64 exec_start; + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; + u64 nr_migrations; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + long unsigned int runnable_weight; + long: 64; + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + long unsigned int timeout; + long unsigned int watchdog_stamp; + unsigned int time_slice; + short unsigned int on_rq; + short unsigned int on_list; + struct sched_rt_entity *back; +}; + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; + +struct sched_dl_entity; + +typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); + +struct task_struct; + +typedef struct task_struct * (*dl_server_pick_f)(struct sched_dl_entity *); + +struct rq; + +struct sched_dl_entity { + struct rb_node rb_node; + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + s64 runtime; + u64 deadline; + unsigned int flags; + unsigned int dl_throttled: 1; + unsigned int dl_yielded: 1; + unsigned int dl_non_contending: 1; + unsigned int dl_overrun: 1; + unsigned int dl_server: 1; + unsigned int dl_defer: 1; + unsigned int dl_defer_armed: 1; + unsigned int dl_defer_running: 1; + struct hrtimer dl_timer; + struct hrtimer inactive_timer; + struct rq *rq; + dl_server_has_tasks_f server_has_tasks; + dl_server_pick_f server_pick_task; + struct sched_dl_entity *pi_se; +}; + +struct scx_dsq_list_node { + struct list_head node; + u32 flags; + u32 priv; +}; + +struct scx_dispatch_q; + +struct cgroup; + +struct sched_ext_entity { + struct scx_dispatch_q *dsq; + struct scx_dsq_list_node dsq_list; + struct rb_node dsq_priq; + u32 dsq_seq; + u32 dsq_flags; + u32 flags; + u32 weight; + s32 sticky_cpu; + s32 holding_cpu; + u32 kf_mask; + struct task_struct *kf_tasks[2]; + atomic_long_t ops_state; + struct list_head runnable_node; + long unsigned int runnable_at; + u64 core_sched_at; + u64 ddsp_dsq_id; + u64 ddsp_enq_flags; + u64 slice; + u64 dsq_vtime; + bool disallow; + struct cgroup *cgrp_moving_from; + struct list_head tasks_node; +}; + +struct sched_statistics {}; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + long unsigned int pcount; + long long unsigned int run_delay; + long long unsigned int last_arrival; + long long unsigned int last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct optimistic_spin_queue { + atomic_t tail; +}; + +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; + struct optimistic_spin_queue osq; + struct list_head wait_list; +}; + +struct posix_cputimers_work { + struct callback_head work; + struct mutex mutex; + unsigned int scheduled; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + long unsigned int sig[1]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct syscall_user_dispatch {}; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + long unsigned int bits[1]; +} nodemask_t; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +struct arch_tlbflush_unmap_batch {}; + +struct tlbflush_unmap_batch { + struct arch_tlbflush_unmap_batch arch; + bool flush_required; + bool writable; +}; + +struct page; + +struct page_frag { + struct page *page; + __u32 offset; + __u32 size; +}; + +struct kmap_ctrl {}; + +struct timer_list { + struct hlist_node entry; + long unsigned int expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct llist_head { + struct llist_node *first; +}; + +struct cpu_context { + long unsigned int x19; + long unsigned int x20; + long unsigned int x21; + long unsigned int x22; + long unsigned int x23; + long unsigned int x24; + long unsigned int x25; + long unsigned int x26; + long unsigned int x27; + long unsigned int x28; + long unsigned int fp; + long unsigned int sp; + long unsigned int pc; +}; + +struct user_fpsimd_state { + __int128 unsigned vregs[32]; + __u32 fpsr; + __u32 fpcr; + __u32 __reserved[2]; +}; + +enum fp_type { + FP_STATE_CURRENT = 0, + FP_STATE_FPSIMD = 1, + FP_STATE_SVE = 2, +}; + +struct perf_event; + +struct debug_info { + int suspended_step; + int bps_disabled; + int wps_disabled; + struct perf_event *hbp_break[16]; + struct perf_event *hbp_watch[16]; +}; + +struct ptrauth_key { + long unsigned int lo; + long unsigned int hi; +}; + +struct ptrauth_keys_user { + struct ptrauth_key apia; + struct ptrauth_key apib; + struct ptrauth_key apda; + struct ptrauth_key apdb; + struct ptrauth_key apga; +}; + +struct ptrauth_keys_kernel { + struct ptrauth_key apia; +}; + +struct thread_struct { + struct cpu_context cpu_context; + long: 64; + struct { + long unsigned int tp_value; + long unsigned int tp2_value; + u64 fpmr; + long unsigned int pad; + struct user_fpsimd_state fpsimd_state; + } uw; + enum fp_type fp_type; + unsigned int fpsimd_cpu; + void *sve_state; + void *sme_state; + unsigned int vl[2]; + unsigned int vl_onexec[2]; + long unsigned int fault_address; + long unsigned int fault_code; + struct debug_info debug; + long: 64; + struct user_fpsimd_state kernel_fpsimd_state; + unsigned int kernel_fpsimd_cpu; + struct ptrauth_keys_user keys_user; + struct ptrauth_keys_kernel keys_kernel; + u64 mte_ctrl; + u64 sctlr_user; + u64 svcr; + u64 tpidr2_el0; + u64 por_el0; +}; + +struct sched_class; + +struct task_group; + +struct rcu_node; + +struct mm_struct; + +struct address_space; + +struct pid; + +struct completion; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct rseq; + +struct pipe_inode_info; + +struct task_delay_info; + +struct mem_cgroup; + +struct obj_cgroup; + +struct gendisk; + +struct uprobe_task; + +struct vm_struct; + +struct bpf_local_storage; + +struct bpf_run_ctx; + +struct bpf_net_context; + +struct task_struct { + struct thread_info thread_info; + unsigned int __state; + unsigned int saved_state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + int on_cpu; + struct __call_single_node wake_entry; + unsigned int wakee_flips; + long unsigned int wakee_flip_decay_ts; + struct task_struct *last_wakee; + int recent_used_cpu; + int wake_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + struct sched_entity se; + struct sched_rt_entity rt; + struct sched_dl_entity dl; + struct sched_dl_entity *dl_server; + struct sched_ext_entity scx; + const struct sched_class *sched_class; + struct rb_node core_node; + long unsigned int core_cookie; + unsigned int core_occupation; + struct task_group *sched_task_group; + long: 64; + struct sched_statistics stats; + struct hlist_head preempt_notifiers; + unsigned int btrace_seq; + unsigned int policy; + long unsigned int max_allowed_capacity; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + short unsigned int migration_disabled; + short unsigned int migration_flags; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; + long unsigned int rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_exit_cpu; + struct list_head rcu_tasks_exit_list; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + long unsigned int jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork: 1; + unsigned int sched_contributes_to_load: 1; + unsigned int sched_migrated: 1; + long: 29; + unsigned int sched_remote_wakeup: 1; + unsigned int sched_rt_mutex: 1; + unsigned int in_execve: 1; + unsigned int in_iowait: 1; + unsigned int no_cgroup_migration: 1; + unsigned int frozen: 1; + unsigned int use_memdelay: 1; + unsigned int in_memstall: 1; + unsigned int in_eventfd: 1; + unsigned int in_thrashing: 1; + long unsigned int atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + long unsigned int stack_canary; + struct task_struct *real_parent; + struct task_struct *parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_node; + struct completion *vfork_done; + int *set_child_tid; + int *clear_child_tid; + void *worker_private; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + u64 start_time; + u64 start_boottime; + long unsigned int min_flt; + long unsigned int maj_flt; + struct posix_cputimers posix_cputimers; + struct posix_cputimers_work posix_cputimers_work; + const struct cred *ptracer_cred; + const struct cred *real_cred; + const struct cred *cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + long unsigned int last_switch_count; + long unsigned int last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + long unsigned int sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct io_context *io_context; + struct capture_control *capture_control; + long unsigned int ptrace_message; + kernel_siginfo_t *last_siginfo; + struct task_io_accounting ioac; + unsigned int psi_flags; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + struct css_set *cgroups; + struct list_head cg_list; + struct robust_list_head *robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + u8 perf_recursion[4]; + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct rseq *rseq; + u32 rseq_len; + u32 rseq_sig; + long unsigned int rseq_event_mask; + int mm_cid; + int last_mm_cid; + int migrate_from_cpu; + int mm_cid_active; + struct callback_head cid_work; + struct tlbflush_unmap_batch tlb_ubc; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + long unsigned int dirty_paused_when; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + unsigned int kasan_depth; + int curr_ret_stack; + int curr_ret_depth; + long unsigned int *ret_stack; + long long unsigned int ftrace_timestamp; + long long unsigned int ftrace_sleeptime; + atomic_t trace_overrun; + atomic_t tracing_graph_pause; + long unsigned int trace_recursion; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct obj_cgroup *objcg; + struct gendisk *throttle_disk; + struct uprobe_task *utask; + struct kmap_ctrl kmap_ctrl; + struct callback_head rcu; + refcount_t rcu_users; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; + struct vm_struct *stack_vm_area; + refcount_t stack_refcount; + struct bpf_local_storage *bpf_storage; + struct bpf_run_ctx *bpf_ctx; + struct bpf_net_context *bpf_net_context; + struct llist_head kretprobe_instances; + long: 64; + struct thread_struct thread; + long: 64; + long: 64; +}; + +union sigval { + int sival_int; + void *sival_ptr; +}; + +typedef union sigval sigval_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void *_addr; + union { + int _trapno; + short int _addr_lsb; + struct { + char _dummy_bnd[8]; + void *_lower; + void *_upper; + } _addr_bnd; + struct { + char _dummy_pkey[8]; + __u32 _pkey; + } _addr_pkey; + struct { + long unsigned int _data; + __u32 _type; + __u32 _flags; + } _perf; + }; + } _sigfault; + struct { + long int _band; + int _fd; + } _sigpoll; + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + __u64 rseq_cs; + __u32 flags; + __u32 node_id; + __u32 mm_cid; + char end[0]; +}; + +struct rhash_head { + struct rhash_head *next; +}; + +struct scx_dispatch_q { + raw_spinlock_t lock; + struct list_head list; + struct rb_root priq; + u32 nr; + u32 seq; + u64 id; + struct rhash_head hash_node; + struct llist_node free_node; + struct callback_head rcu; +}; + +struct rq_flags; + +struct affinity_context; + +struct sched_class { + void (*enqueue_task)(struct rq *, struct task_struct *, int); + bool (*dequeue_task)(struct rq *, struct task_struct *, int); + void (*yield_task)(struct rq *); + bool (*yield_to_task)(struct rq *, struct task_struct *); + void (*wakeup_preempt)(struct rq *, struct task_struct *, int); + int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); + struct task_struct * (*pick_task)(struct rq *); + struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *); + void (*put_prev_task)(struct rq *, struct task_struct *, struct task_struct *); + void (*set_next_task)(struct rq *, struct task_struct *, bool); + int (*select_task_rq)(struct task_struct *, int, int); + void (*migrate_task_rq)(struct task_struct *, int); + void (*task_woken)(struct rq *, struct task_struct *); + void (*set_cpus_allowed)(struct task_struct *, struct affinity_context *); + void (*rq_online)(struct rq *); + void (*rq_offline)(struct rq *); + struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); + void (*task_tick)(struct rq *, struct task_struct *, int); + void (*task_fork)(struct task_struct *); + void (*task_dead)(struct task_struct *); + void (*switching_to)(struct rq *, struct task_struct *); + void (*switched_from)(struct rq *, struct task_struct *); + void (*switched_to)(struct rq *, struct task_struct *); + void (*reweight_task)(struct rq *, struct task_struct *, const struct load_weight *); + void (*prio_changed)(struct rq *, struct task_struct *, int); + unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); + void (*update_curr)(struct rq *); + void (*task_change_group)(struct task_struct *); + int (*task_is_throttled)(struct task_struct *, int); +}; + +typedef __kernel_uid32_t uid_t; + +typedef struct { + uid_t val; +} kuid_t; + +typedef unsigned int __kernel_gid32_t; + +typedef __kernel_gid32_t gid_t; + +typedef struct { + gid_t val; +} kgid_t; + +typedef struct { + u64 val; +} kernel_cap_t; + +struct user_struct; + +struct user_namespace; + +struct ucounts; + +struct group_info; + +struct cred { + atomic_long_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + struct user_struct *user; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; +}; + +typedef signed char __s8; + +typedef __s8 s8; + +typedef __kernel_long_t __kernel_ssize_t; + +typedef long long int __kernel_loff_t; + +typedef __u32 __le32; + +typedef __u64 __le64; + +typedef unsigned int __poll_t; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +typedef short unsigned int umode_t; + +typedef __kernel_loff_t loff_t; + +typedef __kernel_ssize_t ssize_t; + +typedef s32 int32_t; + +typedef u32 uint32_t; + +typedef u64 sector_t; + +typedef u64 blkcnt_t; + +typedef unsigned int gfp_t; + +typedef unsigned int fmode_t; + +typedef u64 phys_addr_t; + +typedef void (*ctor_fn_t)(); + +struct lock_class_key {}; + +struct fs_context; + +struct fs_parameter_spec; + +struct dentry; + +struct super_block; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct jump_entry { + s32 code; + s32 target; + long int key; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + long unsigned int type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +struct qrwlock { + union { + atomic_t cnts; + struct { + u8 __lstate[3]; + u8 wlocked; + }; + }; + arch_spinlock_t wait_lock; +}; + +typedef struct qrwlock arch_rwlock_t; + +struct lockdep_map {}; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + unsigned int flags; + long unsigned int begin; +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno: 18; + unsigned int class_id: 6; + unsigned int flags: 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; +}; + +enum class_map_type { + DD_CLASS_TYPE_DISJOINT_BITS = 0, + DD_CLASS_TYPE_LEVEL_NUM = 1, + DD_CLASS_TYPE_DISJOINT_NAMES = 2, + DD_CLASS_TYPE_LEVEL_NAMES = 3, +}; + +struct ddebug_class_map { + struct list_head link; + struct module *mod; + const char *mod_name; + const char **class_names; + const int length; + const int base; + enum class_map_type map_type; +}; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kernfs_node; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + const struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized: 1; + unsigned int state_in_sysfs: 1; + unsigned int state_add_uevent_sent: 1; + unsigned int state_remove_uevent_sent: 1; + unsigned int uevent_suppress: 1; +}; + +struct module_param_attrs; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_memory { + void *base; + void *rw_copy; + bool is_rox; + unsigned int size; + struct mod_tree_node mtn; +}; + +struct mod_plt_sec { + int plt_shndx; + int plt_num_entries; + int plt_max_entries; +}; + +struct plt_entry; + +struct mod_arch_specific { + struct mod_plt_sec core; + struct mod_plt_sec init; + struct plt_entry *ftrace_trampolines; +}; + +struct elf64_sym; + +typedef struct elf64_sym Elf64_Sym; + +struct mod_kallsyms { + Elf64_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +typedef const int tracepoint_ptr_t; + +struct _ddebug_info { + struct _ddebug *descs; + struct ddebug_class_map *classes; + unsigned int num_descs; + unsigned int num_classes; +}; + +struct module_attribute; + +struct kernel_symbol; + +struct kernel_param; + +struct exception_table_entry; + +struct bug_entry; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct srcu_struct; + +struct bpf_raw_event_map; + +struct trace_event_call; + +struct trace_eval_map; + +struct error_injection_entry; + +struct module { + enum module_state state; + struct list_head list; + char name[56]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + bool async_probe_requested; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + struct module_memory mem[7]; + struct mod_arch_specific arch; + long unsigned int taints; + unsigned int num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void *percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + unsigned int btf_data_size; + unsigned int btf_base_data_size; + void *btf_data; + void *btf_base_data; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + unsigned int num_ftrace_callsites; + long unsigned int *ftrace_callsites; + void *kprobes_text_start; + unsigned int kprobes_text_size; + long unsigned int *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + ctor_fn_t *ctors; + unsigned int num_ctors; + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; + struct _ddebug_info dyndbg_info; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +typedef unsigned int fop_flags_t; + +typedef void *fl_owner_t; + +struct file; + +struct kiocb; + +struct iov_iter; + +struct io_comp_batch; + +struct dir_context; + +struct poll_table_struct; + +struct vm_area_struct; + +struct inode; + +struct file_lock; + +struct file_lease; + +struct seq_file; + +struct io_uring_cmd; + +struct file_operations { + struct module *owner; + fop_flags_t fop_flags; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap)(struct file *, struct vm_area_struct *); + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct file *); + int (*setlease)(struct file *, int, struct file_lease **, void **); + long int (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); + int (*uring_cmd)(struct io_uring_cmd *, unsigned int); + int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); +}; + +struct bug_entry { + int bug_addr_disp; + int file_disp; + short unsigned int line; + short unsigned int flags; +}; + +struct static_call_key { + void *func; +}; + +struct user_pt_regs { + __u64 regs[31]; + __u64 sp; + __u64 pc; + __u64 pstate; +}; + +struct frame_record { + u64 fp; + u64 lr; +}; + +struct frame_record_meta { + struct frame_record record; + u64 type; +}; + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + u64 regs[31]; + u64 sp; + u64 pc; + u64 pstate; + }; + }; + u64 orig_x0; + s32 syscallno; + u32 pmr; + u64 sdei_ttbr1; + struct frame_record_meta stackframe; + u64 lockdep_hardirqs; + u64 exit_rcu; +}; + +typedef u64 pteval_t; + +typedef u64 pmdval_t; + +typedef u64 pudval_t; + +typedef u64 pgdval_t; + +typedef struct { + pteval_t pte; +} pte_t; + +typedef struct { + pmdval_t pmd; +} pmd_t; + +typedef struct { + pudval_t pud; +} pud_t; + +typedef struct { + pgdval_t pgd; +} pgd_t; + +typedef struct { + pteval_t pgprot; +} pgprot_t; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void *xa_head; +}; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct proc_ns_operations; + +struct ns_common { + struct dentry *stashed; + const struct proc_ns_operations *ops; + unsigned int inum; + refcount_t count; +}; + +struct kmem_cache; + +struct fs_pin; + +struct pid_namespace { + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; + int memfd_noexec_scope; +}; + +typedef struct page *pgtable_t; + +struct page_pool; + +struct dev_pagemap; + +struct page { + long unsigned int flags; + union { + struct { + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + struct list_head buddy_list; + struct list_head pcp_list; + }; + struct address_space *mapping; + union { + long unsigned int index; + long unsigned int share; + }; + long unsigned int private; + }; + struct { + long unsigned int pp_magic; + struct page_pool *pp; + long unsigned int _pp_mapping_pad; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; + }; + struct { + long unsigned int compound_head; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + unsigned int page_type; + atomic_t _mapcount; + }; + atomic_t _refcount; + long unsigned int memcg_data; +}; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t *__sighandler_t; + +typedef void __restorefn_t(); + +typedef __restorefn_t *__sigrestore_t; + +struct sigaction { + __sighandler_t sa_handler; + long unsigned int sa_flags; + __sigrestore_t sa_restorer; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +typedef struct {} lockdep_map_p; + +struct maple_tree { + union { + spinlock_t ma_lock; + lockdep_map_p ma_external_lock; + }; + unsigned int ma_flags; + void *ma_root; +}; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + struct optimistic_spin_queue osq; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +struct percpu_counter { + raw_spinlock_t lock; + s64 count; + struct list_head list; + s32 *counters; +}; + +typedef struct { + atomic64_t id; + refcount_t pinned; + void *vdso; + long unsigned int flags; + u8 pkey_allocation_map; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct mm_cid; + +struct linux_binfmt; + +struct kioctx_table; + +struct mmu_notifier_subscriptions; + +struct mm_struct { + struct { + struct { + atomic_t mm_count; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct maple_tree mm_mt; + long unsigned int mmap_base; + long unsigned int mmap_legacy_base; + long unsigned int task_size; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + struct mm_cid *pcpu_cid; + long unsigned int mm_cid_next_scan; + unsigned int nr_cpus_allowed; + atomic_t max_nr_cid; + raw_spinlock_t cpus_allowed_lock; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + int mm_lock_seq; + long unsigned int hiwater_rss; + long unsigned int hiwater_vm; + long unsigned int total_vm; + long unsigned int locked_vm; + atomic64_t pinned_vm; + long unsigned int data_vm; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int def_flags; + seqcount_t write_protect_seq; + spinlock_t arg_lock; + long unsigned int start_code; + long unsigned int end_code; + long unsigned int start_data; + long unsigned int end_data; + long unsigned int start_brk; + long unsigned int brk; + long unsigned int start_stack; + long unsigned int arg_start; + long unsigned int arg_end; + long unsigned int env_start; + long unsigned int env_end; + long unsigned int saved_auxv[50]; + struct percpu_counter rss_stat[4]; + struct linux_binfmt *binfmt; + mm_context_t context; + long unsigned int flags; + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; + struct task_struct *owner; + struct user_namespace *user_ns; + struct file *exe_file; + struct mmu_notifier_subscriptions *notifier_subscriptions; + atomic_t tlb_flush_pending; + atomic_t tlb_flush_batched; + struct uprobes_state uprobes_state; + struct work_struct async_put_work; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + long unsigned int cpu_bitmap[0]; +}; + +typedef u32 errseq_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + struct rw_semaphore invalidate_lock; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + long unsigned int nrpages; + long unsigned int writeback_index; + const struct address_space_operations *a_ops; + long unsigned int flags; + errseq_t wb_err; + spinlock_t i_private_lock; + struct list_head i_private_list; + struct rw_semaphore i_mmap_rwsem; + void *i_private_data; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct dentry *stashed; + u64 ino; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[0]; +}; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +typedef int32_t key_serial_t; + +typedef __s64 time64_t; + +typedef uint32_t key_perm_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + long unsigned int hash; + union { + struct { + char desc[6]; + u16 desc_len; + }; + long unsigned int x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + long unsigned int nr_leaves_on_tree; +}; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct rw_semaphore sem; + struct key_user *user; + void *security; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + short unsigned int quotalen; + short unsigned int datalen; + short int state; + long unsigned int flags; + union { + struct keyring_index_key index_key; + struct { + long unsigned int hash; + long unsigned int len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + refcount_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct pacct_struct { + int ac_flag; + long int ac_exitcode; + long unsigned int ac_mem; + u64 ac_utime; + u64 ac_stime; + long unsigned int ac_minflt; + long unsigned int ac_majflt; +}; + +struct core_state; + +struct tty_struct; + +struct taskstats; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + int quick_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exec_task; + int group_stop_count; + unsigned int flags; + struct core_state *core_state; + unsigned int is_child_subreaper: 1; + unsigned int has_child_subreaper: 1; + unsigned int next_posix_timer_id; + struct hlist_head posix_timers; + struct hlist_head ignored_posix_timers; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + seqlock_t stats_lock; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + long unsigned int cnvcsw; + long unsigned int cnivcsw; + long unsigned int min_flt; + long unsigned int maj_flt; + long unsigned int cmin_flt; + long unsigned int cmaj_flt; + long unsigned int inblock; + long unsigned int oublock; + long unsigned int cinblock; + long unsigned int coublock; + long unsigned int maxrss; + long unsigned int cmaxrss; + struct task_io_accounting ioac; + long long unsigned int sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + bool oom_flag_origin; + short int oom_score_adj; + short int oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + short unsigned int ioprio; + spinlock_t lock; + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +struct arch_uprobe_task {}; + +struct return_instance; + +struct uprobe; + +struct arch_uprobe; + +struct uprobe_task { + enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { + struct { + struct arch_uprobe_task autask; + long unsigned int vaddr; + }; + struct { + struct callback_head dup_xol_work; + long unsigned int dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + struct timer_list ri_timer; + long unsigned int xol_vaddr; + struct arch_uprobe *auprobe; +}; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; +}; + +typedef void probes_handler_t(u32, long int, struct pt_regs *); + +struct arch_probe_insn { + probes_handler_t *handler; +}; + +struct arch_uprobe { + union { + __le32 insn; + __le32 ixol; + }; + struct arch_probe_insn api; + bool simulate; +}; + +enum hprobe_state { + HPROBE_LEASED = 0, + HPROBE_STABLE = 1, + HPROBE_GONE = 2, + HPROBE_CONSUMED = 3, +}; + +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + +struct return_consumer { + __u64 cookie; + __u64 id; +}; + +struct return_instance { + struct hprobe hprobe; + long unsigned int func; + long unsigned int stack; + long unsigned int orig_ret_vaddr; + bool chained; + int consumers_cnt; + struct return_instance *next; + struct callback_head rcu; + struct return_consumer consumers[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct workqueue_struct; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct vmem_altmap { + long unsigned int base_pfn; + const long unsigned int end_pfn; + const long unsigned int reserve; + long unsigned int free; + long unsigned int align; + long unsigned int alloc; + bool inaccessible; +}; + +struct percpu_ref_data; + +struct percpu_ref { + long unsigned int percpu_count_ptr; + struct percpu_ref_data *data; +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT = 2, + MEMORY_DEVICE_FS_DAX = 3, + MEMORY_DEVICE_GENERIC = 4, + MEMORY_DEVICE_PCI_P2PDMA = 5, +}; + +struct range { + u64 start; + u64 end; +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref ref; + struct completion done; + enum memory_type type; + unsigned int flags; + long unsigned int vmemmap_shift; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + union { + struct range range; + struct { + struct {} __empty_ranges; + struct range ranges[0]; + }; + }; +}; + +typedef struct { + long unsigned int val; +} swp_entry_t; + +struct folio { + union { + struct { + long unsigned int flags; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; + struct address_space *mapping; + long unsigned int index; + union { + void *private; + swp_entry_t swap; + }; + atomic_t _mapcount; + atomic_t _refcount; + long unsigned int memcg_data; + }; + struct page page; + }; + union { + struct { + long unsigned int _flags_1; + long unsigned int _head_1; + atomic_t _large_mapcount; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + unsigned int _folio_nr_pages; + }; + struct page __page_1; + }; + union { + struct { + long unsigned int _flags_2; + long unsigned int _head_2; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; + }; + struct { + long unsigned int _flags_2a; + long unsigned int _head_2a; + struct list_head _deferred_list; + }; + struct page __page_2; + }; +}; + +typedef long unsigned int vm_flags_t; + +typedef struct { + atomic64_t refcnt; +} file_ref_t; + +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +struct file_ra_state { + long unsigned int start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; +}; + +typedef struct { + long unsigned int v; +} freeptr_t; + +struct fown_struct; + +struct file { + file_ref_t f_ref; + spinlock_t f_lock; + fmode_t f_mode; + const struct file_operations *f_op; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + struct path f_path; + union { + struct mutex f_pos_lock; + u64 f_pipe; + }; + loff_t f_pos; + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; + struct hlist_head *f_ep; + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + freeptr_t f_freeptr; + }; +}; + +struct vm_userfaultfd_ctx {}; + +struct vma_lock { + struct rw_semaphore lock; +}; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + union { + struct { + long unsigned int vm_start; + long unsigned int vm_end; + }; + struct callback_head vm_rcu; + }; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + union { + const vm_flags_t vm_flags; + vm_flags_t __vm_flags; + }; + bool detached; + int vm_lock_seq; + struct vma_lock *vm_lock; + struct { + struct rb_node rb; + long unsigned int rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + long unsigned int vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +typedef unsigned int vm_fault_t; + +struct vm_fault; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*may_split)(struct vm_area_struct *, long unsigned int); + int (*mremap)(struct vm_area_struct *); + int (*mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int, long unsigned int); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); + vm_fault_t (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); + long unsigned int (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); + const char * (*name)(struct vm_area_struct *); + struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); +}; + +struct mm_cid { + u64 time; + int cid; + int recent_cid; +}; + +enum fault_flag { + FAULT_FLAG_WRITE = 1, + FAULT_FLAG_MKWRITE = 2, + FAULT_FLAG_ALLOW_RETRY = 4, + FAULT_FLAG_RETRY_NOWAIT = 8, + FAULT_FLAG_KILLABLE = 16, + FAULT_FLAG_TRIED = 32, + FAULT_FLAG_USER = 64, + FAULT_FLAG_REMOTE = 128, + FAULT_FLAG_INSTRUCTION = 256, + FAULT_FLAG_INTERRUPTIBLE = 512, + FAULT_FLAG_UNSHARE = 1024, + FAULT_FLAG_ORIG_PTE_VALID = 2048, + FAULT_FLAG_VMA_LOCK = 4096, +}; + +struct vm_fault { + const struct { + struct vm_area_struct *vma; + gfp_t gfp_mask; + long unsigned int pgoff; + long unsigned int address; + long unsigned int real_address; + }; + enum fault_flag flags; + pmd_t *pmd; + pud_t *pud; + union { + pte_t orig_pte; + pmd_t orig_pmd; + }; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + long unsigned int gp_seq[4]; + long int len; + long int seglen[4]; + u8 flags; +}; + +struct srcu_node; + +struct srcu_data { + atomic_long_t srcu_lock_count[2]; + atomic_long_t srcu_unlock_count[2]; + int srcu_reader_flavor; + long: 64; + long: 64; + long: 64; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + long unsigned int grpmask; + int cpu; + struct srcu_struct *ssp; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct srcu_node { + spinlock_t lock; + long unsigned int srcu_have_cbs[4]; + long unsigned int srcu_data_have_cbs[4]; + long unsigned int srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_usage; + +struct srcu_struct { + unsigned int srcu_idx; + struct srcu_data *sda; + struct lockdep_map dep_map; + struct srcu_usage *srcu_sup; +}; + +struct srcu_usage { + struct srcu_node *node; + struct srcu_node *level[3]; + int srcu_size_state; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + long unsigned int srcu_gp_seq; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + long unsigned int srcu_gp_start; + long unsigned int srcu_last_gp_end; + long unsigned int srcu_size_jiffies; + long unsigned int srcu_n_lock_retries; + long unsigned int srcu_n_exp_nodelay; + bool sda_is_static; + long unsigned int srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + long unsigned int reschedule_jiffies; + long unsigned int reschedule_count; + struct delayed_work work; + struct srcu_struct *srcu_ssp; +}; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; + struct xarray xa; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + long unsigned int subdirs; + struct rb_root children; + struct kernfs_root *root; + long unsigned int rev; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + short unsigned int flags; + umode_t mode; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + u64 id; + void *priv; + struct kernfs_iattrs *iattr; + struct callback_head rcu; +}; + +struct kernfs_open_file; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); + loff_t (*llseek)(struct kernfs_open_file *, loff_t, int); +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped: 1; + bool released: 1; + const struct vm_operations_struct *vm_ops; +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void * (*grab_current_ns)(); + const void * (*netlink_ns)(struct sock *); + const void * (*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; + u32 dio_mem_align; + u32 dio_offset_align; + u64 change_cookie; + u64 subvol; + u32 atomic_write_unit_min; + u32 atomic_write_unit_max; + u32 atomic_write_segments_max; +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + struct address_space * (*f_mapping)(); + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); + int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *, struct vm_area_struct *); +}; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); + size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); + struct attribute **attrs; + union { + struct bin_attribute **bin_attrs; + const struct bin_attribute * const *bin_attrs_new; + }; +}; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations * (*child_ns_type)(const struct kobject *); + const void * (*namespace)(const struct kobject *); + void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(const struct kobject *); + const char * (* const name)(const struct kobject *); + int (* const uevent)(const struct kobject *, struct kobj_uevent_env *); +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct fwnode_operations; + +struct device; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle * (*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); + bool (*device_dma_supported)(const struct fwnode_handle *); + enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); + const char * (*get_name)(const struct fwnode_handle *); + const char * (*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); + struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + void * (*iomap)(struct fwnode_handle *, int); + int (*irq_get)(const struct fwnode_handle *, unsigned int); + int (*add_links)(struct fwnode_handle *); +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head defer_sync; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +enum rpm_status { + RPM_INVALID = -1, + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +struct wakeup_source; + +struct wake_irq; + +struct pm_subsys_data; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + bool can_wakeup: 1; + bool async_suspend: 1; + bool in_dpm_list: 1; + bool is_prepared: 1; + bool is_suspended: 1; + bool is_noirq_suspended: 1; + bool is_late_suspended: 1; + bool no_pm: 1; + bool early_init: 1; + bool direct_complete: 1; + u32 driver_flags; + spinlock_t lock; + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path: 1; + bool syscore: 1; + bool no_pm_callbacks: 1; + bool async_in_progress: 1; + bool must_resume: 1; + bool may_skip_resume: 1; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth: 3; + bool idle_notification: 1; + bool request_pending: 1; + bool deferred_resume: 1; + bool needs_force_resume: 1; + bool runtime_auto: 1; + bool ignore_children: 1; + bool no_callbacks: 1; + bool irq_safe: 1; + bool use_autosuspend: 1; + bool timer_autosuspends: 1; + bool memalloc_noio: 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + enum rpm_status last_status; + int runtime_error; + int autosuspend_delay; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +struct irq_domain; + +struct msi_device_data; + +struct dev_msi_info { + struct irq_domain *domain; + struct msi_device_data *data; +}; + +struct dev_archdata {}; + +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, + DEVICE_REMOVABLE_UNKNOWN = 1, + DEVICE_FIXED = 2, + DEVICE_REMOVABLE = 3, +}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct dma_coherent_mem; + +struct io_tlb_mem; + +struct device_node; + +struct class; + +struct iommu_group; + +struct dev_iommu; + +struct device_physical_location; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + const struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct dev_msi_info msi; + u64 *dma_mask; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct dma_coherent_mem *dma_mem; + struct io_tlb_mem *dma_io_tlb_mem; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + const struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + struct device_physical_location *physical_location; + enum device_removable removable; + bool offline_disabled: 1; + bool offline: 1; + bool of_node_reused: 1; + bool state_synced: 1; + bool can_match: 1; + bool dma_coherent: 1; + bool dma_skip_sync: 1; + bool dma_iommu: 1; +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +typedef u32 phandle; + +struct property { + char *name; + int length; + void *value; + struct property *next; + struct bin_attribute attr; +}; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + struct kobject kobj; + long unsigned int _flags; + void *data; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint_ext { + int (*regfunc)(); + void (*unregfunc)(); + unsigned int faultable: 1; +}; + +struct tracepoint { + const char *name; + struct static_key_false key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + void *probestub; + struct tracepoint_func *funcs; + struct tracepoint_ext *ext; +}; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long: 64; +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic: 1; + bool allow_reinit: 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + long unsigned int nr_to_scan; + long unsigned int nr_scanned; + struct mem_cgroup *memcg; +}; + +struct shrinker { + long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); + long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); + long int batch; + int seeks; + unsigned int flags; + refcount_t refcount; + struct completion done; + struct callback_head rcu; + void *private_data; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); + int (*memory_failure)(struct dev_pagemap *, long unsigned int, long unsigned int, int); +}; + +struct hlist_bl_node; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct lockref { + union { + __u64 lock_count; + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct qstr { + union { + struct { + u32 len; + u32 hash; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[40]; + const struct dentry_operations *d_op; + struct super_block *d_sb; + long unsigned int d_time; + void *d_fsdata; + struct lockref d_lockref; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct hlist_node d_sib; + struct hlist_head d_children; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +} __attribute__((mode(byte))); + +struct posix_acl; + +struct inode_operations; + +struct bdi_writeback; + +struct file_lock_context; + +struct cdev; + +struct fsnotify_mark_connector; + +struct inode { + umode_t i_mode; + short unsigned int i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + long unsigned int i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + time64_t i_atime_sec; + time64_t i_mtime_sec; + time64_t i_ctime_sec; + u32 i_atime_nsec; + u32 i_mtime_nsec; + u32 i_ctime_nsec; + u32 i_generation; + spinlock_t i_lock; + short unsigned int i_bytes; + u8 i_blkbits; + enum rw_hint i_write_hint; + blkcnt_t i_blocks; + u32 i_state; + struct rw_semaphore i_rwsem; + long unsigned int dirtied_when; + long unsigned int dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + void *i_private; +}; + +enum d_real_type { + D_REAL_DATA = 0, + D_REAL_METADATA = 1, +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char * (*d_dname)(struct dentry *, char *, int); + struct vfsmount * (*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry * (*d_real)(struct dentry *, enum d_real_type); + long: 64; + long: 64; + long: 64; +}; + +struct mtd_info; + +typedef long long int qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + long unsigned int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct *task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + short unsigned int frozen; + int freeze_kcount; + int freeze_ucount; + struct percpu_rw_semaphore rw_sem[3]; +}; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct block_device; + +struct backing_dev_info; + +struct fsnotify_sb_info; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + long unsigned int s_blocksize; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + long unsigned int s_flags; + long unsigned int s_iflags; + long unsigned int s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + const struct xattr_handler * const *s_xattr; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct file *s_bdev_file; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + u32 s_fsnotify_mask; + struct fsnotify_sb_info *s_fsnotify_info; + char s_id[32]; + uuid_t s_uuid; + u8 s_uuid_len; + char s_sysfs_name[37]; + unsigned int s_max_links; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + struct shrinker *s_shrink; + atomic_long_t s_remove_count; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long: 64; + long: 64; +}; + +struct mnt_idmap; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; + struct mnt_idmap *mnt_idmap; +}; + +struct list_lru_one { + struct list_head list; + long int nr_items; + spinlock_t lock; +}; + +struct list_lru_node { + struct list_lru_one lru; + atomic_long_t nr_items; + long: 64; + long: 64; + long: 64; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, +}; + +struct exception_table_entry { + int insn; + int fixup; + short int type; + short int data; +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct key_preparsed_payload; + +struct key_match_data; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long int (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction * (*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +struct user_struct { + refcount_t __count; + struct percpu_counter epoll_watches; + long unsigned int unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + struct ratelimit_state ratelimit; +}; + +struct group_info { + refcount_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active: 1; + unsigned int in_hrtirq: 1; + unsigned int hang_detected: 1; + unsigned int softirq_activated: 1; + unsigned int online: 1; + unsigned int nr_events; + short unsigned int nr_retries; + short unsigned int nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[8]; +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +struct request_queue; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long int); + void *private; + int ki_flags; + u16 ki_ioprio; + union { + struct wait_page_queue *ki_waitq; + ssize_t (*dio_complete)(void *); + }; +}; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + long unsigned int dq_flags; + struct mem_dqblk dq_dqb; +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot * (*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t * (*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_dqblk { + int d_fieldmask; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + long long unsigned int ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + struct qc_type_state s_state[3]; +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct writeback_control; + +struct readahead_control; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*read_folio)(struct file *, struct folio *); + int (*writepages)(struct address_space *, struct writeback_control *); + bool (*dirty_folio)(struct address_space *, struct folio *); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio)(struct folio *, size_t, size_t); + bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, enum migrate_mode); + int (*launder_folio)(struct folio *); + bool (*is_partially_uptodate)(struct folio *, size_t, size_t); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); + int (*error_remove_folio)(struct address_space *, struct folio *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); + int (*swap_rw)(struct kiocb *, struct iov_iter *); +}; + +struct iovec { + void *iov_base; + __kernel_size_t iov_len; +}; + +struct kvec; + +struct bio_vec; + +struct folio_queue; + +struct iov_iter { + u8 iter_type; + bool nofault; + bool data_source; + size_t iov_offset; + union { + struct iovec __ubuf_iovec; + struct { + union { + const struct iovec *__iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + const struct folio_queue *folioq; + struct xarray *xarray; + void *ubuf; + }; + size_t count; + }; + }; + union { + long unsigned int nr_segs; + u8 folioq_slot; + loff_t xarray_start; + }; +}; + +struct fiemap_extent_info; + +struct fileattr; + +struct offset_ctx; + +struct inode_operations { + struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); + const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct mnt_idmap *, struct inode *, int); + struct posix_acl * (*get_inode_acl)(struct inode *, int, bool); + int (*readlink)(struct dentry *, char *, int); + int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); + int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); + int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); + int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); + int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); + struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int); + int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); + int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); + int (*fileattr_get)(struct dentry *, struct fileattr *); + struct offset_ctx * (*get_offset_ctx)(struct inode *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct fown_struct { + struct file *file; + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +enum freeze_holder { + FREEZE_HOLDER_KERNEL = 1, + FREEZE_HOLDER_USERSPACE = 2, + FREEZE_MAY_NEST = 4, +}; + +struct kstatfs; + +struct super_operations { + struct inode * (*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *, enum freeze_holder); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *, enum freeze_holder); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long int (*free_cached_objects)(struct super_block *, struct shrink_control *); + void (*shutdown)(struct super_block *); +}; + +struct fid; + +struct iomap; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry * (*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); + long unsigned int flags; +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); +}; + +typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +struct offset_ctx { + struct maple_tree mt; + long unsigned int next_offset; +}; + +struct p_log; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + short unsigned int flags; + const void *data; +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +typedef __u64 Elf64_Addr; + +typedef __u16 Elf64_Half; + +typedef __u32 Elf64_Word; + +typedef __u64 Elf64_Xword; + +struct elf64_sym { + Elf64_Word st_name; + unsigned char st_info; + unsigned char st_other; + Elf64_Half st_shndx; + Elf64_Addr st_value; + Elf64_Xword st_size; +}; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +struct error_injection_entry { + long unsigned int addr; + int etype; +}; + +struct plt_entry { + __le32 adrp; + __le32 add; + __le32 br; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +enum mod_mem_type { + MOD_TEXT = 0, + MOD_DATA = 1, + MOD_RODATA = 2, + MOD_RO_AFTER_INIT = 3, + MOD_INIT_TEXT = 4, + MOD_INIT_DATA = 5, + MOD_INIT_RODATA = 6, + MOD_MEM_NUM_TYPES = 7, + MOD_INVALID = -1, +}; + +struct kernel_symbol { + int value_offset; + int name_offset; + int namespace_offset; +}; + +struct nsset; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common * (*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace * (*owner)(struct ns_common *); + struct ns_common * (*get_parent)(struct ns_common *); +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct folio_batch { + unsigned char nr; + unsigned char i; + bool percpu_pvec_drained; + struct folio *folios[31]; +}; + +struct folio_queue { + struct folio_batch vec; + u8 orders[31]; + struct folio_queue *next; + struct folio_queue *prev; + long unsigned int marks; + long unsigned int marks2; + long unsigned int marks3; +}; + +struct cpu_operations { + const char *name; + int (*cpu_init)(unsigned int); + int (*cpu_prepare)(unsigned int); + int (*cpu_boot)(unsigned int); + void (*cpu_postboot)(); + bool (*cpu_can_disable)(unsigned int); + int (*cpu_disable)(unsigned int); + void (*cpu_die)(unsigned int); + int (*cpu_kill)(unsigned int); +}; + +typedef long unsigned int uintptr_t; + +enum { + ___GFP_DMA_BIT = 0, + ___GFP_HIGHMEM_BIT = 1, + ___GFP_DMA32_BIT = 2, + ___GFP_MOVABLE_BIT = 3, + ___GFP_RECLAIMABLE_BIT = 4, + ___GFP_HIGH_BIT = 5, + ___GFP_IO_BIT = 6, + ___GFP_FS_BIT = 7, + ___GFP_ZERO_BIT = 8, + ___GFP_UNUSED_BIT = 9, + ___GFP_DIRECT_RECLAIM_BIT = 10, + ___GFP_KSWAPD_RECLAIM_BIT = 11, + ___GFP_WRITE_BIT = 12, + ___GFP_NOWARN_BIT = 13, + ___GFP_RETRY_MAYFAIL_BIT = 14, + ___GFP_NOFAIL_BIT = 15, + ___GFP_NORETRY_BIT = 16, + ___GFP_MEMALLOC_BIT = 17, + ___GFP_COMP_BIT = 18, + ___GFP_NOMEMALLOC_BIT = 19, + ___GFP_HARDWALL_BIT = 20, + ___GFP_THISNODE_BIT = 21, + ___GFP_ACCOUNT_BIT = 22, + ___GFP_ZEROTAGS_BIT = 23, + ___GFP_NO_OBJ_EXT_BIT = 24, + ___GFP_LAST_BIT = 25, +}; + +typedef struct cpumask cpumask_var_t[1]; + +struct plist_head { + struct list_head node_list; +}; + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +enum scale_freq_source { + SCALE_FREQ_SOURCE_CPUFREQ = 0, + SCALE_FREQ_SOURCE_ARCH = 1, + SCALE_FREQ_SOURCE_CPPC = 2, + SCALE_FREQ_SOURCE_VIRT = 3, +}; + +struct scale_freq_data { + enum scale_freq_source source; + void (*set_freq_scale)(); +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED = 0, + PM_QOS_MAX = 1, + PM_QOS_MIN = 2, +}; + +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX = 2, +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED = 0, + CPUFREQ_TABLE_SORTED_ASCENDING = 1, + CPUFREQ_TABLE_SORTED_DESCENDING = 2, +}; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct cpufreq_stats; + +struct clk; + +struct cpufreq_governor; + +struct cpufreq_frequency_table; + +struct thermal_cooling_device; + +struct cpufreq_policy { + cpumask_var_t cpus; + cpumask_var_t related_cpus; + cpumask_var_t real_cpus; + unsigned int shared_type; + unsigned int cpu; + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo; + unsigned int min; + unsigned int max; + unsigned int cur; + unsigned int suspend_freq; + unsigned int policy; + unsigned int last_policy; + struct cpufreq_governor *governor; + void *governor_data; + char last_governor[16]; + struct work_struct update; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + struct rw_semaphore rwsem; + bool fast_switch_possible; + bool fast_switch_enabled; + bool strict_target; + bool efficiencies_available; + unsigned int transition_delay_us; + bool dvfs_possible_from_any_cpu; + bool boost_enabled; + unsigned int cached_target_freq; + unsigned int cached_resolved_idx; + bool transition_ongoing; + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; + struct cpufreq_stats *stats; + void *driver_data; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct cpufreq_governor { + char name[16]; + int (*init)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*start)(struct cpufreq_policy *); + void (*stop)(struct cpufreq_policy *); + void (*limits)(struct cpufreq_policy *); + ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); + int (*store_setspeed)(struct cpufreq_policy *, unsigned int); + struct list_head governor_list; + struct module *owner; + u8 flags; +}; + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; + unsigned int frequency; +}; + +typedef __u32 __be32; + +typedef u8 uint8_t; + +typedef __be32 fdt32_t; + +struct fdt_header { + fdt32_t magic; + fdt32_t totalsize; + fdt32_t off_dt_struct; + fdt32_t off_dt_strings; + fdt32_t off_mem_rsvmap; + fdt32_t version; + fdt32_t last_comp_version; + fdt32_t boot_cpuid_phys; + fdt32_t size_dt_strings; + fdt32_t size_dt_struct; +}; + +typedef u64 p4dval_t; + +typedef struct { + p4dval_t p4d; +} p4d_t; + +enum fixed_addresses { + FIX_HOLE = 0, + FIX_FDT_END = 1, + FIX_FDT = 514, + FIX_EARLYCON_MEM_BASE = 515, + FIX_TEXT_POKE0 = 516, + FIX_ENTRY_TRAMP_TEXT4 = 517, + FIX_ENTRY_TRAMP_TEXT3 = 518, + FIX_ENTRY_TRAMP_TEXT2 = 519, + FIX_ENTRY_TRAMP_TEXT1 = 520, + __end_of_permanent_fixed_addresses = 521, + FIX_BTMAP_END = 521, + FIX_BTMAP_BEGIN = 968, + FIX_PTE = 969, + FIX_PMD = 970, + FIX_PUD = 971, + FIX_P4D = 972, + FIX_PGD = 973, + __end_of_fixed_addresses = 974, +}; + +typedef short int __s16; + +typedef u64 uint64_t; + +typedef long unsigned int irq_hw_number_t; + +struct preempt_notifier; + +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *, int); + void (*sched_out)(struct preempt_notifier *, struct task_struct *); +}; + +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +struct cacheline_padding { + char x[0]; +}; + +struct arch_hw_breakpoint_ctrl { + u32 __reserved: 19; + u32 len: 8; + u32 type: 2; + u32 privilege: 2; + u32 enabled: 1; +}; + +struct arch_hw_breakpoint { + u64 address; + u64 trigger; + struct arch_hw_breakpoint_ctrl ctrl; +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +typedef struct { + atomic_long_t a; +} local_t; + +typedef struct { + local_t a; +} local64_t; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled: 1; + __u64 inherit: 1; + __u64 pinned: 1; + __u64 exclusive: 1; + __u64 exclude_user: 1; + __u64 exclude_kernel: 1; + __u64 exclude_hv: 1; + __u64 exclude_idle: 1; + __u64 mmap: 1; + __u64 comm: 1; + __u64 freq: 1; + __u64 inherit_stat: 1; + __u64 enable_on_exec: 1; + __u64 task: 1; + __u64 watermark: 1; + __u64 precise_ip: 2; + __u64 mmap_data: 1; + __u64 sample_id_all: 1; + __u64 exclude_host: 1; + __u64 exclude_guest: 1; + __u64 exclude_callchain_kernel: 1; + __u64 exclude_callchain_user: 1; + __u64 mmap2: 1; + __u64 comm_exec: 1; + __u64 use_clockid: 1; + __u64 context_switch: 1; + __u64 write_backward: 1; + __u64 namespaces: 1; + __u64 ksymbol: 1; + __u64 bpf_event: 1; + __u64 aux_output: 1; + __u64 cgroup: 1; + __u64 text_poke: 1; + __u64 build_id: 1; + __u64 inherit_thread: 1; + __u64 remove_on_exec: 1; + __u64 sigtrap: 1; + __u64 __reserved_1: 26; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + union { + __u32 aux_action; + struct { + __u32 aux_start_paused: 1; + __u32 aux_pause: 1; + __u32 aux_resume: 1; + __u32 __reserved_3: 29; + }; + }; + __u64 sig_data; + __u64 config3; +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head *next; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + long unsigned int config_base; + long unsigned int event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + u64 aux_config; + unsigned int aux_paused; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + struct arch_hw_breakpoint info; + struct rhlist_head bp_list; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + long unsigned int addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct irq_work { + struct __call_single_node node; + void (*func)(struct irq_work *); + struct rcuwait irqwait; +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); + +struct ftrace_ops; + +struct ftrace_regs; + +typedef void (*ftrace_func_t)(long unsigned int, long unsigned int, struct ftrace_ops *, struct ftrace_regs *); + +struct ftrace_hash; + +struct ftrace_ops_hash { + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; + struct mutex regex_lock; +}; + +enum ftrace_ops_cmd { + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF = 0, + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER = 1, + FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER = 2, +}; + +typedef int (*ftrace_ops_func_t)(struct ftrace_ops *, enum ftrace_ops_cmd); + +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops *next; + long unsigned int flags; + void *private; + ftrace_func_t saved_func; + struct ftrace_ops_hash local_hash; + struct ftrace_ops_hash *func_hash; + struct ftrace_ops_hash old_hash; + long unsigned int trampoline; + long unsigned int trampoline_size; + struct list_head list; + struct list_head subop_list; + ftrace_ops_func_t ops_func; + struct ftrace_ops *managed; + long unsigned int direct_call; +}; + +struct pmu; + +struct perf_event_pmu_context; + +struct perf_buffer; + +struct fasync_struct; + +struct perf_addr_filter_range; + +struct bpf_prog; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + unsigned int group_generation; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + struct perf_event_pmu_context *pmu_ctx; + atomic_long_t refcount; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + long unsigned int rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + unsigned int pending_wakeup; + unsigned int pending_kill; + unsigned int pending_disable; + long unsigned int pending_addr; + struct irq_work pending_irq; + struct irq_work pending_disable_irq; + struct callback_head pending_task; + unsigned int pending_work; + struct rcuwait pending_work_wait; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + long unsigned int addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + atomic64_t lost_samples; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + struct bpf_prog *prog; + u64 bpf_cookie; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct ftrace_ops ftrace_ops; + struct perf_cgroup *cgrp; + struct list_head sb_list; + __u32 orig_type; +}; + +typedef u64 gpa_t; + +typedef u64 gfn_t; + +struct interval_tree_node { + struct rb_node rb; + long unsigned int start; + long unsigned int last; + long unsigned int __subtree_last; +}; + +struct kvm_arch_memory_slot {}; + +struct kvm_memory_slot { + struct hlist_node id_node[2]; + struct interval_tree_node hva_node[2]; + struct rb_node gfn_node[2]; + gfn_t base_gfn; + long unsigned int npages; + long unsigned int *dirty_bitmap; + struct kvm_arch_memory_slot arch; + long unsigned int userspace_addr; + u32 flags; + short int id; + u16 as_id; +}; + +struct kvm_memslots { + u64 generation; + atomic_long_t last_used_slot; + struct rb_root_cached hva_tree; + struct rb_root gfn_tree; + struct hlist_head id_hash[128]; + int node_idx; +}; + +struct kvm_vm_stat_generic { + u64 remote_tlb_flush; + u64 remote_tlb_flush_requests; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vmid { + atomic64_t id; +}; + +struct kvm_mmu_memory_cache { + gfp_t gfp_zero; + gfp_t gfp_custom; + u64 init_value; + struct kmem_cache *kmem_cache; + int capacity; + int nobjs; + void **objects; +}; + +struct kvm_pgtable; + +struct kvm_arch; + +struct kvm_s2_mmu { + struct kvm_vmid vmid; + phys_addr_t pgd_phys; + struct kvm_pgtable *pgt; + u64 vtcr; + int *last_vcpu_ran; + struct kvm_mmu_memory_cache split_page_cache; + uint64_t split_page_chunk_size; + struct kvm_arch *arch; + u64 tlb_vttbr; + u64 tlb_vtcr; + bool nested_stage2_enabled; + bool pending_unmap; + atomic_t refcnt; +}; + +enum iodev_type { + IODEV_CPUIF = 0, + IODEV_DIST = 1, + IODEV_REDIST = 2, + IODEV_ITS = 3, +}; + +struct kvm_io_device_ops; + +struct kvm_io_device { + const struct kvm_io_device_ops *ops; +}; + +struct kvm_vcpu; + +struct vgic_its; + +struct vgic_register_region; + +struct vgic_io_device { + gpa_t base_addr; + union { + struct kvm_vcpu *redist_vcpu; + struct vgic_its *its; + }; + const struct vgic_register_region *regions; + enum iodev_type iodev_type; + int nr_regions; + struct kvm_io_device dev; +}; + +struct its_vpe; + +struct its_vm { + struct fwnode_handle *fwnode; + struct irq_domain *domain; + struct page *vprop_page; + struct its_vpe **vpes; + int nr_vpes; + irq_hw_number_t db_lpi_base; + long unsigned int *db_bitmap; + int nr_db_lpis; + raw_spinlock_t vmapp_lock; + u32 vlpi_count[16]; +}; + +struct vgic_irq; + +struct vgic_state_iter; + +struct vgic_dist { + bool in_kernel; + bool ready; + bool initialized; + u32 vgic_model; + u32 implementation_rev; + bool v2_groups_user_writable; + bool msis_require_devid; + int nr_spis; + gpa_t vgic_dist_base; + union { + gpa_t vgic_cpu_base; + struct list_head rd_regions; + }; + bool enabled; + bool nassgireq; + struct vgic_irq *spis; + struct vgic_io_device dist_iodev; + bool has_its; + bool table_write_in_progress; + u64 propbaser; + struct xarray lpi_xa; + struct vgic_state_iter *iter; + struct its_vm its_vm; +}; + +struct arch_timer_vm_data { + u64 voffset; + u64 poffset; + u8 ppi[4]; +}; + +struct kvm_smccc_features { + long unsigned int std_bmap; + long unsigned int std_hyp_bmap; + long unsigned int vendor_hyp_bmap; +}; + +typedef unsigned int pkvm_handle_t; + +struct kvm_hyp_memcache { + phys_addr_t head; + long unsigned int nr_pages; +}; + +struct kvm_protected_vm { + pkvm_handle_t handle; + struct kvm_hyp_memcache teardown_mc; + bool enabled; +}; + +struct kvm_mpidr_data; + +struct arm_pmu; + +struct kvm_sysreg_masks; + +struct kvm_arch { + struct kvm_s2_mmu mmu; + u64 fgu[5]; + struct kvm_s2_mmu *nested_mmus; + size_t nested_mmus_size; + int nested_mmus_next; + struct vgic_dist vgic; + struct arch_timer_vm_data timer_data; + u32 psci_version; + struct mutex config_lock; + long unsigned int flags; + long unsigned int vcpu_features[1]; + struct kvm_mpidr_data *mpidr_data; + long unsigned int *pmu_filter; + struct arm_pmu *arm_pmu; + cpumask_var_t supported_cpus; + u8 pmcr_n; + u8 idreg_debugfs_iter; + struct kvm_smccc_features smccc_feat; + struct maple_tree smccc_filter; + u64 id_regs[56]; + u64 ctr_el0; + struct kvm_sysreg_masks *sysreg_masks; + struct kvm_protected_vm pkvm; +}; + +struct mmu_notifier_ops; + +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; + struct mm_struct *mm; + struct callback_head rcu; + unsigned int users; +}; + +struct kvm_io_bus; + +struct kvm_coalesced_mmio_ring; + +struct kvm_irq_routing_table; + +struct kvm_stat_data; + +struct kvm { + rwlock_t mmu_lock; + struct mutex slots_lock; + struct mutex slots_arch_lock; + struct mm_struct *mm; + long unsigned int nr_memslot_pages; + struct kvm_memslots __memslots[2]; + struct kvm_memslots *memslots[1]; + struct xarray vcpu_array; + atomic_t nr_memslots_dirty_logging; + spinlock_t mn_invalidate_lock; + long unsigned int mn_active_invalidate_count; + struct rcuwait mn_memslots_update_rcuwait; + spinlock_t gpc_lock; + struct list_head gpc_list; + atomic_t online_vcpus; + int max_vcpus; + int created_vcpus; + int last_boosted_vcpu; + struct list_head vm_list; + struct mutex lock; + struct kvm_io_bus *buses[5]; + struct { + spinlock_t lock; + struct list_head items; + struct list_head resampler_list; + struct mutex resampler_lock; + } irqfds; + struct list_head ioeventfds; + struct kvm_vm_stat stat; + struct kvm_arch arch; + refcount_t users_count; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + spinlock_t ring_lock; + struct list_head coalesced_zones; + struct mutex irq_lock; + struct kvm_irq_routing_table *irq_routing; + struct hlist_head irq_ack_notifier_list; + struct mmu_notifier mmu_notifier; + long unsigned int mmu_invalidate_seq; + long int mmu_invalidate_in_progress; + gfn_t mmu_invalidate_range_start; + gfn_t mmu_invalidate_range_end; + struct list_head devices; + u64 manual_dirty_log_protect; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; + pid_t userspace_pid; + bool override_halt_poll_ns; + unsigned int max_halt_poll_ns; + u32 dirty_ring_size; + bool dirty_ring_with_bitmap; + bool vm_bugged; + bool vm_dead; + char stats_id[48]; +}; + +struct kvm_vcpu_stat_generic { + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_wait_ns; + u64 halt_poll_success_hist[32]; + u64 halt_poll_fail_hist[32]; + u64 halt_wait_hist[32]; + u64 blocking; +}; + +struct kvm_io_device_ops { + int (*read)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, void *); + int (*write)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, const void *); + void (*destructor)(struct kvm_io_device *); +}; + +struct kvm_mmio_fragment { + gpa_t gpa; + void *data; + unsigned int len; +}; + +struct kvm_cpu_context { + struct user_pt_regs regs; + u64 spsr_abt; + u64 spsr_und; + u64 spsr_irq; + u64 spsr_fiq; + struct user_fpsimd_state fp_regs; + u64 sys_regs[286]; + struct kvm_vcpu *__hyp_running_vcpu; + u64 *vncr_array; +}; + +struct kvm_vcpu_fault_info { + u64 esr_el2; + u64 far_el2; + u64 hpfar_el2; + u64 disr_el1; +}; + +struct kvm_guest_debug_arch { + __u64 dbg_bcr[16]; + __u64 dbg_bvr[16]; + __u64 dbg_wcr[16]; + __u64 dbg_wvr[16]; +}; + +struct vgic_v2_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_apr; + u32 vgic_lr[64]; + unsigned int used_lrs; +}; + +struct its_vpe { + struct page *vpt_page; + struct its_vm *its_vm; + atomic_t vlpi_count; + int irq; + irq_hw_number_t vpe_db_lpi; + bool resident; + bool ready; + union { + struct { + int vpe_proxy_event; + bool idai; + }; + struct { + struct fwnode_handle *fwnode; + struct irq_domain *sgi_domain; + struct { + u8 priority; + bool enabled; + bool group; + } sgi_config[16]; + }; + }; + atomic_t vmapp_count; + raw_spinlock_t vpe_lock; + u16 col_idx; + u16 vpe_id; + bool pending_last; +}; + +struct vgic_v3_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_sre; + u32 vgic_ap0r[4]; + u32 vgic_ap1r[4]; + u64 vgic_lr[16]; + struct its_vpe its_vpe; + unsigned int used_lrs; +}; + +struct vgic_redist_region; + +struct vgic_cpu { + union { + struct vgic_v2_cpu_if vgic_v2; + struct vgic_v3_cpu_if vgic_v3; + }; + struct vgic_irq *private_irqs; + raw_spinlock_t ap_list_lock; + struct list_head ap_list_head; + struct vgic_io_device rd_iodev; + struct vgic_redist_region *rdreg; + u32 rdreg_index; + atomic_t syncr_busy; + u64 pendbaser; + atomic_t ctlr; + u32 num_pri_bits; + u32 num_id_bits; +}; + +struct arch_timer_offset { + u64 *vm_offset; + u64 *vcpu_offset; +}; + +struct arch_timer_context { + struct kvm_vcpu *vcpu; + struct hrtimer hrtimer; + u64 ns_frac; + struct arch_timer_offset offset; + bool loaded; + struct { + bool level; + } irq; + u32 host_timer_irq; +}; + +struct arch_timer_cpu { + struct arch_timer_context timers[4]; + struct hrtimer bg_timer; + bool enabled; +}; + +struct kvm_pmu_events { + u64 events_host; + u64 events_guest; +}; + +struct kvm_pmc { + u8 idx; + struct perf_event *perf_event; +}; + +struct kvm_pmu { + struct irq_work overflow_work; + struct kvm_pmu_events events; + struct kvm_pmc pmc[32]; + int irq_num; + bool created; + bool irq_level; +}; + +struct kvm_mp_state { + __u32 mp_state; +}; + +struct vcpu_reset_state { + long unsigned int pc; + long unsigned int r0; + bool be; + bool reset; +}; + +struct kvm_vcpu_arch { + struct kvm_cpu_context ctxt; + void *sve_state; + enum fp_type fp_type; + unsigned int sve_max_vl; + struct kvm_s2_mmu *hw_mmu; + u64 hcr_el2; + u64 hcrx_el2; + u64 mdcr_el2; + u64 cptr_el2; + struct kvm_vcpu_fault_info fault; + u8 cflags; + u8 iflags; + u8 sflags; + bool pause; + struct kvm_guest_debug_arch *debug_ptr; + struct kvm_guest_debug_arch vcpu_debug_state; + struct kvm_guest_debug_arch external_debug_state; + struct vgic_cpu vgic_cpu; + struct arch_timer_cpu timer_cpu; + struct kvm_pmu pmu; + struct { + u32 mdscr_el1; + bool pstate_ss; + } guest_debug_preserved; + struct kvm_mp_state mp_state; + spinlock_t mp_state_lock; + struct kvm_mmu_memory_cache mmu_page_cache; + u64 vsesr_el2; + struct vcpu_reset_state reset_state; + struct { + u64 last_steal; + gpa_t base; + } steal; + u32 *ccsidr; + long: 64; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 hvc_exit_stat; + u64 wfe_exit_stat; + u64 wfi_exit_stat; + u64 mmio_exit_user; + u64 mmio_exit_kernel; + u64 signal_exits; + u64 exits; +}; + +struct kvm_dirty_gfn; + +struct kvm_dirty_ring { + u32 dirty_index; + u32 reset_index; + u32 size; + u32 soft_limit; + struct kvm_dirty_gfn *dirty_gfns; + int index; +}; + +struct kvm_run; + +struct kvm_vcpu { + struct kvm *kvm; + struct preempt_notifier preempt_notifier; + int cpu; + int vcpu_id; + int vcpu_idx; + int ____srcu_idx; + int mode; + u64 requests; + long unsigned int guest_debug; + struct mutex mutex; + struct kvm_run *run; + struct rcuwait wait; + struct pid *pid; + rwlock_t pid_lock; + int sigset_active; + sigset_t sigset; + unsigned int halt_poll_ns; + bool valid_wakeup; + int mmio_needed; + int mmio_read_completed; + int mmio_is_write; + int mmio_cur_fragment; + int mmio_nr_fragments; + struct kvm_mmio_fragment mmio_fragments[2]; + struct { + bool in_spin_loop; + bool dy_eligible; + } spin_loop; + bool wants_to_run; + bool preempted; + bool ready; + bool scheduled_out; + struct kvm_vcpu_arch arch; + struct kvm_vcpu_stat stat; + char stats_id[48]; + struct kvm_dirty_ring dirty_ring; + struct kvm_memory_slot *last_used_slot; + u64 last_used_slot_gen; + long: 64; +}; + +struct codetag { + unsigned int flags; + unsigned int lineno; + const char *modname; + const char *function; + const char *filename; +}; + +struct alloc_tag_counters { + u64 bytes; + u64 calls; +}; + +struct alloc_tag { + struct codetag ct; + struct alloc_tag_counters *counters; +}; + +struct task_cputime { + u64 stime; + u64 utime; + long long unsigned int sum_exec_runtime; +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + atomic_t count; + atomic_long_t ucount[10]; + atomic_long_t rlimit[4]; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; + int nr_descendants; +}; + +struct cgroup_file { + struct kernfs_node *kn; + long unsigned int notified_at; + struct timer_list notify_timer; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; + u64 forceidle_sum; + u64 ntime; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + struct bpf_prog_array *effective[28]; + struct hlist_head progs[28]; + u8 flags[28]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + bool e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct psi_group; + +struct cgroup { + struct cgroup_subsys_state self; + long unsigned int flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + struct cgroup_file psi_files[3]; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state *subsys[13]; + int nr_dying_subsys[13]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[13]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad_; + struct cgroup *rstat_flush_next; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group *psi; + struct cgroup_bpf bpf; + struct cgroup_freezer_state freezer; + struct bpf_local_storage *bpf_cgrp_storage; + struct cgroup *ancestors[0]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +struct css_set { + struct cgroup_subsys_state *subsys[13]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[13]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + +struct perf_event_context { + raw_spinlock_t lock; + struct mutex mutex; + struct list_head pmu_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + int nr_events; + int nr_user; + int is_active; + int nr_task_data; + int nr_stat; + int nr_freq; + int rotate_disable; + refcount_t refcount; + struct task_struct *task; + u64 time; + u64 timestamp; + u64 timeoffset; + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + struct callback_head callback_head; + local_t nr_no_switch_fast; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct page_counter { + atomic_long_t usage; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad1_; + long unsigned int emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + long unsigned int elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + long unsigned int watermark; + long unsigned int local_watermark; + long unsigned int failcnt; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + bool protection_support; + long unsigned int min; + long unsigned int low; + long unsigned int high; + long unsigned int max; + struct page_counter *parent; + long: 64; + long: 64; +}; + +struct vmpressure { + long unsigned int scanned; + long unsigned int reclaimed; + long unsigned int tree_scanned; + long unsigned int tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + struct fprop_global completions; + struct timer_list period_timer; + long unsigned int period_time; + long unsigned int dirty_limit_tstamp; + long unsigned int dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + u64 at; + struct wb_completion done; +}; + +struct memcg_vmstats; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct list_head memory_peaks; + struct list_head swap_peaks; + spinlock_t peaks_lock; + struct work_struct high_work; + long unsigned int zswap_max; + bool zswap_writeback; + struct vmpressure vmpressure; + bool oom_group; + int swappiness; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct memcg_vmstats *vmstats; + atomic_long_t memory_events[9]; + atomic_long_t memory_events_local[9]; + long unsigned int socket_pressure; + int kmemcg_id; + struct obj_cgroup *objcg; + struct obj_cgroup *orig_objcg; + struct list_head objcg_list; + struct memcg_vmstats_percpu *vmstats_percpu; + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct mem_cgroup_per_node *nodeinfo[0]; + long: 64; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct vm_struct { + struct vm_struct *next; + void *addr; + long unsigned int size; + long unsigned int flags; + struct page **pages; + unsigned int page_order; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +struct bpf_run_ctx {}; + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + union { + struct { + struct uid_gid_extent extent[5]; + u32 nr_extents; + }; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +struct ctl_table; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + const struct ctl_table *ctl_table; + int ctl_table_size; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + const struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; + enum { + SYSCTL_TABLE_TYPE_DEFAULT = 0, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, + } type; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct binfmt_misc; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + long unsigned int flags; + bool parent_could_setfcap; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + long int ucount_max[10]; + long int rlimit_max[4]; + struct binfmt_misc *binfmt_misc; +}; + +struct zswap_lruvec_state { + atomic_long_t nr_disk_swapins; +}; + +struct free_area { + struct list_head free_list[6]; + long unsigned int nr_free; +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + spinlock_t lru_lock; + long unsigned int anon_cost; + long unsigned int file_cost; + atomic_long_t nonresident_age; + long unsigned int refaults[2]; + long unsigned int flags; + struct pglist_data *pgdat; + struct zswap_lruvec_state zswap_lruvec_state; +}; + +struct per_cpu_pages; + +struct per_cpu_zonestat; + +struct zone { + long unsigned int _watermark[4]; + long unsigned int watermark_boost; + long unsigned int nr_reserved_highatomic; + long unsigned int nr_free_highatomic; + long int lowmem_reserve[4]; + struct pglist_data *zone_pgdat; + struct per_cpu_pages *per_cpu_pageset; + struct per_cpu_zonestat *per_cpu_zonestats; + int pageset_high_min; + int pageset_high_max; + int pageset_batch; + long unsigned int zone_start_pfn; + atomic_long_t managed_pages; + long unsigned int spanned_pages; + long unsigned int present_pages; + long unsigned int cma_pages; + const char *name; + long unsigned int nr_isolate_pageblock; + int initialized; + long: 0; + struct cacheline_padding _pad1_; + struct free_area free_area[11]; + long unsigned int flags; + spinlock_t lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + long unsigned int percpu_drift_mark; + long unsigned int compact_cached_free_pfn; + long unsigned int compact_cached_migrate_pfn[2]; + long unsigned int compact_init_migrate_pfn; + long unsigned int compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + long: 0; + struct cacheline_padding _pad3_; + atomic_long_t vm_stat[10]; + atomic_long_t vm_numa_event[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[5]; +}; + +enum zone_type { + ZONE_DMA = 0, + ZONE_DMA32 = 1, + ZONE_NORMAL = 2, + ZONE_MOVABLE = 3, + __MAX_NR_ZONES = 4, +}; + +struct per_cpu_nodestat; + +struct pglist_data { + struct zone node_zones[4]; + struct zonelist node_zonelists[1]; + int nr_zones; + long unsigned int node_start_pfn; + long unsigned int node_present_pages; + long unsigned int node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + wait_queue_head_t reclaim_wait[4]; + atomic_t nr_writeback_throttled; + long unsigned int nr_reclaim_start; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + bool proactive_compact_trigger; + long unsigned int totalreserve_pages; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad1_; + struct lruvec __lruvec; + long unsigned int flags; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[45]; + long: 64; + long: 64; +}; + +struct per_cpu_pages { + spinlock_t lock; + int count; + int high; + int high_min; + int high_max; + int batch; + u8 flags; + u8 alloc_factor; + short int free_count; + struct list_head lists[12]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct per_cpu_zonestat { + s8 vm_stat_diff[10]; + s8 stat_threshold; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[45]; +}; + +enum mmu_notifier_event { + MMU_NOTIFY_UNMAP = 0, + MMU_NOTIFY_CLEAR = 1, + MMU_NOTIFY_PROTECTION_VMA = 2, + MMU_NOTIFY_PROTECTION_PAGE = 3, + MMU_NOTIFY_SOFT_DIRTY = 4, + MMU_NOTIFY_RELEASE = 5, + MMU_NOTIFY_MIGRATE = 6, + MMU_NOTIFY_EXCLUSIVE = 7, +}; + +struct mmu_notifier_range; + +struct mmu_notifier_ops { + void (*release)(struct mmu_notifier *, struct mm_struct *); + int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + int (*clear_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + int (*test_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int); + int (*invalidate_range_start)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*invalidate_range_end)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*arch_invalidate_secondary_tlbs)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + struct mmu_notifier * (*alloc_notifier)(struct mm_struct *); + void (*free_notifier)(struct mmu_notifier *); +}; + +struct mmu_notifier_range { + struct mm_struct *mm; + long unsigned int start; + long unsigned int end; + unsigned int flags; + enum mmu_notifier_event event; + void *owner; +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_RANDOM_START = 0, + KMALLOC_RANDOM_END = 0, + KMALLOC_RECLAIM = 1, + KMALLOC_DMA = 2, + KMALLOC_CGROUP = 3, + NR_KMALLOC_TYPES = 4, +}; + +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +struct shrinker_info_unit { + atomic_long_t nr_deferred[64]; + long unsigned int map[1]; +}; + +struct shrinker_info { + struct callback_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[0]; +}; + +typedef int proc_handler(const struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set * (*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, kuid_t *, kgid_t *); + int (*permissions)(struct ctl_table_header *, const struct ctl_table *); +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + long: 0; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; + __u64 compact_count; + __u64 compact_delay_total; + __u32 ac_tgid; + __u64 ac_tgetime; + __u64 ac_exe_dev; + __u64 ac_exe_inode; + __u64 wpcopy_count; + __u64 wpcopy_delay_total; + __u64 irq_count; + __u64 irq_delay_total; +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +struct swap_iocb; + +struct writeback_control { + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate: 1; + unsigned int for_background: 1; + unsigned int tagged_writepages: 1; + unsigned int for_reclaim: 1; + unsigned int range_cyclic: 1; + unsigned int for_sync: 1; + unsigned int unpinned_netfs_wb: 1; + unsigned int no_cgroup_owner: 1; + struct swap_iocb **swap_plug; + struct list_head *list; + struct folio_batch fbatch; + long unsigned int index; + int saved_err; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +struct bdi_writeback { + struct backing_dev_info *bdi; + long unsigned int state; + long unsigned int last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + atomic_t writeback_inodes; + struct percpu_counter stat[4]; + long unsigned int bw_time_stamp; + long unsigned int dirtied_stamp; + long unsigned int written_stamp; + long unsigned int write_bandwidth; + long unsigned int avg_write_bandwidth; + long unsigned int dirty_ratelimit; + long unsigned int balanced_dirty_ratelimit; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + struct delayed_work bw_dwork; + struct list_head bdi_node; + struct percpu_ref refcnt; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + struct list_head b_attached; + struct list_head offline_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +struct disk_stats; + +struct blk_holder_ops; + +struct partition_meta_info; + +struct block_device { + sector_t bd_start_sect; + sector_t bd_nr_sectors; + struct gendisk *bd_disk; + struct request_queue *bd_queue; + struct disk_stats *bd_stats; + long unsigned int bd_stamp; + atomic_t __bd_flags; + dev_t bd_dev; + struct address_space *bd_mapping; + atomic_t bd_openers; + spinlock_t bd_size_lock; + void *bd_claiming; + void *bd_holder; + const struct blk_holder_ops *bd_holder_ops; + struct mutex bd_holder_lock; + int bd_holders; + struct kobject *bd_holder_dir; + atomic_t bd_fsfreeze_count; + struct mutex bd_fsfreeze_mutex; + struct partition_meta_info *bd_meta_info; + int bd_writers; + struct device bd_device; +}; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + long unsigned int ra_pages; + long unsigned int io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + long unsigned int last_bdp_sleep; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; +}; + +typedef __u32 blk_opf_t; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +} __attribute__((packed)); + +typedef unsigned int blk_qc_t; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct blkcg_gq; + +struct bio_set; + +struct bio { + struct bio *bi_next; + struct block_device *bi_bdev; + blk_opf_t bi_opf; + short unsigned int bi_flags; + short unsigned int bi_ioprio; + enum rw_hint bi_write_hint; + blk_status_t bi_status; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + union { + blk_qc_t bi_cookie; + unsigned int __bi_nr_segments; + }; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + short unsigned int bi_vcnt; + short unsigned int bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +struct cgroup_namespace { + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +struct ftrace_regs {}; + +struct kvm_debug_exit_arch { + __u32 hsr; + __u32 hsr_high; + __u64 far; +}; + +struct kvm_sync_regs { + __u64 device_irq_level; +}; + +struct kvm_hyperv_exit { + __u32 type; + __u32 pad1; + union { + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; + } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 status; + __u64 send_page; + __u64 recv_page; + __u64 pending_page; + } syndbg; + } u; +}; + +struct kvm_xen_exit { + __u32 type; + union { + struct { + __u32 longmode; + __u32 cpl; + __u64 input; + __u64 result; + __u64 params[6]; + } hcall; + } u; +}; + +struct kvm_run { + __u8 request_interrupt_window; + __u8 immediate_exit__unsafe; + __u8 padding1[6]; + __u32 exit_reason; + __u8 ready_for_interrupt_injection; + __u8 if_flag; + __u16 flags; + __u64 cr8; + __u64 apic_base; + union { + struct { + __u64 hardware_exit_reason; + } hw; + struct { + __u64 hardware_entry_failure_reason; + __u32 cpu; + } fail_entry; + struct { + __u32 exception; + __u32 error_code; + } ex; + struct { + __u8 direction; + __u8 size; + __u16 port; + __u32 count; + __u64 data_offset; + } io; + struct { + struct kvm_debug_exit_arch arch; + } debug; + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } mmio; + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; + struct { + __u64 nr; + __u64 args[6]; + __u64 ret; + union { + __u64 flags; + }; + } hypercall; + struct { + __u64 rip; + __u32 is_write; + __u32 pad; + } tpr_access; + struct { + __u8 icptcode; + __u16 ipa; + __u32 ipb; + } s390_sieic; + __u64 s390_reset_flags; + struct { + __u64 trans_exc_code; + __u32 pgm_code; + } s390_ucontrol; + struct { + __u32 dcrn; + __u32 data; + __u8 is_write; + } dcr; + struct { + __u32 suberror; + __u32 ndata; + __u64 data[16]; + } internal; + struct { + __u32 suberror; + __u32 ndata; + __u64 flags; + union { + struct { + __u8 insn_size; + __u8 insn_bytes[15]; + }; + }; + } emulation_failure; + struct { + __u64 gprs[32]; + } osi; + struct { + __u64 nr; + __u64 ret; + __u64 args[9]; + } papr_hcall; + struct { + __u16 subchannel_id; + __u16 subchannel_nr; + __u32 io_int_parm; + __u32 io_int_word; + __u32 ipb; + __u8 dequeued; + } s390_tsch; + struct { + __u32 epr; + } epr; + struct { + __u32 type; + __u32 ndata; + union { + __u64 data[16]; + }; + } system_event; + struct { + __u64 addr; + __u8 ar; + __u8 reserved; + __u8 fc; + __u8 sel1; + __u16 sel2; + } s390_stsi; + struct { + __u8 vector; + } eoi; + struct kvm_hyperv_exit hyperv; + struct { + __u64 esr_iss; + __u64 fault_ipa; + } arm_nisv; + struct { + __u8 error; + __u8 pad[7]; + __u32 reason; + __u32 index; + __u64 data; + } msr; + struct kvm_xen_exit xen; + struct { + long unsigned int extension_id; + long unsigned int function_id; + long unsigned int args[6]; + long unsigned int ret[2]; + } riscv_sbi; + struct { + long unsigned int csr_num; + long unsigned int new_value; + long unsigned int write_mask; + long unsigned int ret_value; + } riscv_csr; + struct { + __u32 flags; + } notify; + struct { + __u64 flags; + __u64 gpa; + __u64 size; + } memory_fault; + char padding[256]; + }; + __u64 kvm_valid_regs; + __u64 kvm_dirty_regs; + union { + struct kvm_sync_regs regs; + char padding[2048]; + } s; +}; + +struct kvm_coalesced_mmio_zone { + __u64 addr; + __u32 size; + union { + __u32 pad; + __u32 pio; + }; +}; + +struct kvm_coalesced_mmio { + __u64 phys_addr; + __u32 len; + union { + __u32 pad; + __u32 pio; + }; + __u8 data[8]; +}; + +struct kvm_coalesced_mmio_ring { + __u32 first; + __u32 last; + struct kvm_coalesced_mmio coalesced_mmio[0]; +}; + +struct kvm_device_attr { + __u32 flags; + __u32 group; + __u64 attr; + __u64 addr; +}; + +struct kvm_dirty_gfn { + __u32 flags; + __u32 slot; + __u64 offset; +}; + +struct kvm_stats_desc { + __u32 flags; + __s16 exponent; + __u16 size; + __u32 offset; + __u32 bucket_size; + char name[0]; +}; + +enum vgic_irq_config { + VGIC_CONFIG_EDGE = 0, + VGIC_CONFIG_LEVEL = 1, +}; + +struct irq_ops { + long unsigned int flags; + bool (*get_input_level)(int); +}; + +struct vgic_irq { + raw_spinlock_t irq_lock; + struct callback_head rcu; + struct list_head ap_list; + struct kvm_vcpu *vcpu; + struct kvm_vcpu *target_vcpu; + u32 intid; + bool line_level; + bool pending_latch; + bool active; + bool enabled; + bool hw; + struct kref refcount; + u32 hwintid; + unsigned int host_irq; + union { + u8 targets; + u32 mpidr; + }; + u8 source; + u8 active_source; + u8 priority; + u8 group; + enum vgic_irq_config config; + struct irq_ops *ops; + void *owner; +}; + +struct kvm_device; + +struct vgic_its { + gpa_t vgic_its_base; + bool enabled; + struct vgic_io_device iodev; + struct kvm_device *dev; + u64 baser_device_table; + u64 baser_coll_table; + struct mutex cmd_lock; + u64 cbaser; + u32 creadr; + u32 cwriter; + u32 abi_rev; + struct mutex its_lock; + struct list_head device_list; + struct list_head collection_list; + struct xarray translation_cache; +}; + +struct vgic_register_region { + unsigned int reg_offset; + unsigned int len; + unsigned int bits_per_irq; + unsigned int access_flags; + union { + long unsigned int (*read)(struct kvm_vcpu *, gpa_t, unsigned int); + long unsigned int (*its_read)(struct kvm *, struct vgic_its *, gpa_t, unsigned int); + }; + union { + void (*write)(struct kvm_vcpu *, gpa_t, unsigned int, long unsigned int); + void (*its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, long unsigned int); + }; + long unsigned int (*uaccess_read)(struct kvm_vcpu *, gpa_t, unsigned int); + union { + int (*uaccess_write)(struct kvm_vcpu *, gpa_t, unsigned int, long unsigned int); + int (*uaccess_its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, long unsigned int); + }; +}; + +struct kvm_device_ops; + +struct kvm_device { + const struct kvm_device_ops *ops; + struct kvm *kvm; + void *private; + struct list_head vm_node; +}; + +struct vgic_redist_region { + u32 index; + gpa_t base; + u32 count; + u32 free_index; + struct list_head list; +}; + +typedef long unsigned int kernel_ulong_t; + +struct acpi_device_id { + __u8 id[16]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_rsvd: 18; + __u64 mem_hops: 3; + __u64 mem_blk: 3; + __u64 mem_snoopx: 2; + __u64 mem_remote: 1; + __u64 mem_lvl_num: 4; + __u64 mem_dtlb: 7; + __u64 mem_lock: 2; + __u64 mem_snoop: 5; + __u64 mem_lvl: 14; + __u64 mem_op: 5; + }; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred: 1; + __u64 predicted: 1; + __u64 in_tx: 1; + __u64 abort: 1; + __u64 cycles: 16; + __u64 type: 4; + __u64 spec: 2; + __u64 new_type: 4; + __u64 priv: 3; + __u64 reserved: 31; +}; + +union perf_sample_weight { + __u64 full; + struct { + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; + }; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; + struct list_head clock_list; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + long unsigned int timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + long unsigned int event_count; + long unsigned int active_count; + long unsigned int relax_count; + long unsigned int expire_count; + long unsigned int wakeup_count; + struct device *dev; + bool active: 1; + bool autosleep_enabled: 1; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); + int (*set_performance_state)(struct device *, unsigned int); +}; + +struct bus_type { + const char *name; + const char *dev_name; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, const struct device_driver *); + int (*uevent)(const struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + void (*dma_cleanup)(struct device *); + const struct dev_pm_ops *pm; + bool need_parent_lock; +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +struct driver_private; + +struct device_driver { + const char *name; + const struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +struct class { + const char *name; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *); + void (*class_release)(const struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void * (*namespace)(const struct device *); + void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; +}; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + unsigned int min_align_mask; + long unsigned int segment_boundary_mask; +}; + +enum device_physical_location_panel { + DEVICE_PANEL_TOP = 0, + DEVICE_PANEL_BOTTOM = 1, + DEVICE_PANEL_LEFT = 2, + DEVICE_PANEL_RIGHT = 3, + DEVICE_PANEL_FRONT = 4, + DEVICE_PANEL_BACK = 5, + DEVICE_PANEL_UNKNOWN = 6, +}; + +enum device_physical_location_vertical_position { + DEVICE_VERT_POS_UPPER = 0, + DEVICE_VERT_POS_CENTER = 1, + DEVICE_VERT_POS_LOWER = 2, +}; + +enum device_physical_location_horizontal_position { + DEVICE_HORI_POS_LEFT = 0, + DEVICE_HORI_POS_CENTER = 1, + DEVICE_HORI_POS_RIGHT = 2, +}; + +struct device_physical_location { + enum device_physical_location_panel panel; + enum device_physical_location_vertical_position vertical_position; + enum device_physical_location_horizontal_position horizontal_position; + bool dock; + bool lid; +}; + +typedef u64 dma_addr_t; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + +struct seq_operations { + void * (*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void * (*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +struct u64_stats_sync {}; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + union { + struct bpf_cgroup_storage *cgroup_storage[2]; + u64 bpf_cookie; + }; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct psi_group_cpu { + seqcount_t seq; + unsigned int tasks[4]; + u32 state_mask; + u32 times[7]; + u64 state_start; + u32 times_prev[14]; + long: 64; +}; + +struct psi_group { + struct psi_group *parent; + bool enabled; + struct mutex avgs_lock; + struct psi_group_cpu *pcpu; + u64 avg_total[6]; + u64 avg_last_update; + u64 avg_next_update; + struct delayed_work avgs_work; + struct list_head avg_triggers; + u32 avg_nr_triggers[6]; + u64 total[12]; + long unsigned int avg[18]; + struct task_struct *rtpoll_task; + struct timer_list rtpoll_timer; + wait_queue_head_t rtpoll_wait; + atomic_t rtpoll_wakeup; + atomic_t rtpoll_scheduled; + struct mutex rtpoll_trigger_lock; + struct list_head rtpoll_triggers; + u32 rtpoll_nr_triggers[6]; + u32 rtpoll_states; + u64 rtpoll_min_period; + u64 rtpoll_total[6]; + u64 rtpoll_next_update; + u64 rtpoll_until; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init: 1; + bool implicit_on_dfl: 1; + bool threaded: 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat subtree_bstat; + struct cgroup_base_stat last_subtree_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct list_head root_list; + struct callback_head rcu; + long: 64; + long: 64; + struct cgroup cgrp; + struct cgroup *cgrp_ancestor_storage; + atomic_t nr_cgrps; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct cftype { + char name[64]; + long unsigned int private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + struct lock_class_key lockdep_key; +}; + +struct bpf_insn { + __u8 code; + __u8 dst_reg: 4; + __u8 src_reg: 4; + __s16 off; + __s32 imm; +}; + +enum bpf_cgroup_iter_order { + BPF_CGROUP_ITER_ORDER_UNSPEC = 0, + BPF_CGROUP_ITER_SELF_ONLY = 1, + BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, + BPF_CGROUP_ITER_DESCENDANTS_POST = 3, + BPF_CGROUP_ITER_ANCESTORS_UP = 4, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, + BPF_MAP_TYPE_USER_RINGBUF = 31, + BPF_MAP_TYPE_CGRP_STORAGE = 32, + BPF_MAP_TYPE_ARENA = 33, + __MAX_BPF_MAP_TYPE = 34, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, + BPF_PROG_TYPE_NETFILTER = 32, + __MAX_BPF_PROG_TYPE = 33, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + BPF_LSM_CGROUP = 43, + BPF_STRUCT_OPS = 44, + BPF_NETFILTER = 45, + BPF_TCX_INGRESS = 46, + BPF_TCX_EGRESS = 47, + BPF_TRACE_UPROBE_MULTI = 48, + BPF_CGROUP_UNIX_CONNECT = 49, + BPF_CGROUP_UNIX_SENDMSG = 50, + BPF_CGROUP_UNIX_RECVMSG = 51, + BPF_CGROUP_UNIX_GETPEERNAME = 52, + BPF_CGROUP_UNIX_GETSOCKNAME = 53, + BPF_NETKIT_PRIMARY = 54, + BPF_NETKIT_PEER = 55, + BPF_TRACE_KPROBE_SESSION = 56, + BPF_TRACE_UPROBE_SESSION = 57, + __MAX_BPF_ATTACH_TYPE = 58, +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + __u64 map_extra; + __s32 value_type_btf_obj_fd; + __s32 map_token_fd; + }; + struct { + __u32 map_fd; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; + __u64 fd_array; + __u64 core_relos; + __u32 core_relo_rec_size; + __u32 log_true_size; + __s32 prog_token_fd; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + __s32 path_fd; + }; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + union { + __u32 prog_cnt; + __u32 count; + }; + __u64 prog_attach_flags; + __u64 link_ids; + __u64 link_attach_flags; + __u64 revision; + } query; + struct { + __u64 name; + __u32 prog_fd; + __u64 cookie; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + __u32 btf_log_true_size; + __u32 btf_flags; + __s32 btf_token_fd; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + union { + __u32 prog_fd; + __u32 map_fd; + }; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + }; + struct { + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __u64 syms; + __u64 addrs; + __u64 cookies; + } kprobe_multi; + struct { + __u32 target_btf_id; + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } tcx; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; + }; + } link_create; + struct { + __u32 link_fd; + union { + __u32 new_prog_fd; + __u32 new_map_fd; + }; + __u32 flags; + union { + __u32 old_prog_fd; + __u32 old_map_fd; + }; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; + struct { + __u32 flags; + __u32 bpffs_fd; + } token_create; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +struct bpf_prog_stats; + +struct bpf_prog_aux; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited: 1; + u16 jit_requested: 1; + u16 gpl_compatible: 1; + u16 cb_access: 1; + u16 dst_needed: 1; + u16 blinding_requested: 1; + u16 blinded: 1; + u16 is_func: 1; + u16 kprobe_override: 1; + u16 has_callchain_buf: 1; + u16 enforce_expected_attach_type: 1; + u16 call_get_stack: 1; + u16 call_get_func_ip: 1; + u16 tstamp_type_access: 1; + u16 sleepable: 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_stats *stats; + int *active; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + union { + struct { + struct {} __empty_insns; + struct sock_filter insns[0]; + }; + struct { + struct {} __empty_insnsi; + struct bpf_insn insnsi[0]; + }; + }; +}; + +enum btf_field_type { + BPF_SPIN_LOCK = 1, + BPF_TIMER = 2, + BPF_KPTR_UNREF = 4, + BPF_KPTR_REF = 8, + BPF_KPTR_PERCPU = 16, + BPF_KPTR = 28, + BPF_LIST_HEAD = 32, + BPF_LIST_NODE = 64, + BPF_RB_ROOT = 128, + BPF_RB_NODE = 256, + BPF_GRAPH_NODE = 320, + BPF_GRAPH_ROOT = 160, + BPF_REFCOUNT = 512, + BPF_WORKQUEUE = 1024, + BPF_UPTR = 2048, +}; + +typedef void (*btf_dtor_kfunc_t)(void *); + +struct btf; + +struct btf_field_kptr { + struct btf *btf; + struct module *module; + btf_dtor_kfunc_t dtor; + u32 btf_id; +}; + +struct btf_record; + +struct btf_field_graph_root { + struct btf *btf; + u32 value_btf_id; + u32 node_offset; + struct btf_record *value_rec; +}; + +struct btf_field { + u32 offset; + u32 size; + enum btf_field_type type; + union { + struct btf_field_kptr kptr; + struct btf_field_graph_root graph_root; + }; +}; + +struct btf_record { + u32 cnt; + u32 field_mask; + int spin_lock_off; + int timer_off; + int wq_off; + int refcount_off; + struct btf_field fields[0]; +}; + +struct blk_holder_ops { + void (*mark_dead)(struct block_device *, bool); + void (*sync)(struct block_device *); + int (*freeze)(struct block_device *); + int (*thaw)(struct block_device *); +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_alloc_cache; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + struct bio_alloc_cache *cache; + mempool_t bio_pool; + mempool_t bvec_pool; + unsigned int back_pad; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; + struct hlist_node cpuhp_dead; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + atomic_t generation; +}; + +struct lruvec_stats_percpu; + +struct lruvec_stats; + +struct mem_cgroup_per_node { + struct mem_cgroup *memcg; + struct lruvec_stats_percpu *lruvec_stats_percpu; + struct lruvec_stats *lruvec_stats; + struct shrinker_info *shrinker_info; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad1_; + struct lruvec lruvec; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + long unsigned int lru_zone_size[20]; + struct mem_cgroup_reclaim_iter iter; + long: 64; + long: 64; +}; + +typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +enum bpf_iter_task_type { + BPF_TASK_ITER_ALL = 0, + BPF_TASK_ITER_TID = 1, + BPF_TASK_ITER_TGID = 2, +}; + +struct bpf_map; + +struct bpf_iter_aux_info { + struct bpf_map *map; + struct { + struct cgroup *start; + enum bpf_cgroup_iter_order order; + } cgroup; + struct { + enum bpf_iter_task_type type; + u32 pid; + } task; +}; + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +struct bpf_local_storage_map; + +struct bpf_verifier_env; + +struct bpf_func_state; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map * (*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, union bpf_attr *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + void * (*map_lookup_elem)(struct bpf_map *, void *); + long int (*map_update_elem)(struct bpf_map *, void *, void *, u64); + long int (*map_delete_elem)(struct bpf_map *, void *); + long int (*map_push_elem)(struct bpf_map *, void *, u64); + long int (*map_pop_elem)(struct bpf_map *, void *); + long int (*map_peek_elem)(struct bpf_map *, void *); + void * (*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + long unsigned int (*map_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); + long int (*map_redirect)(struct bpf_map *, u64, u64); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); + long int (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); + u64 (*map_mem_usage)(const struct bpf_map *); + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u64 map_extra; + u32 map_flags; + u32 id; + struct btf_record *record; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + u32 btf_vmlinux_value_type_id; + struct btf *btf; + struct obj_cgroup *objcg; + char name[16]; + struct mutex freeze_mutex; + atomic64_t refcnt; + atomic64_t usercnt; + union { + struct work_struct work; + struct callback_head rcu; + }; + atomic64_t writecnt; + struct { + const struct btf_type *attach_func_proto; + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; + bool bypass_spec_v1; + bool frozen; + bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + atomic64_t sleepable_refcnt; + s64 *elem_count; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf_kfunc_set_tab; + +struct btf_id_dtor_kfunc_tab; + +struct btf_struct_metas; + +struct btf_struct_ops_tab; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; + struct btf_kfunc_set_tab *kfunc_set_tab; + struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; + struct btf_struct_metas *struct_meta_tab; + struct btf_struct_ops_tab *struct_ops_tab; + struct btf *base_btf; + u32 start_id; + u32 start_str_off; + char name[56]; + bool kernel_btf; + __u32 *base_id_map; +}; + +struct bpf_ksym { + long unsigned int start; + long unsigned int end; + char name[512]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct bpf_arena; + +struct bpf_jit_poke_descriptor; + +struct bpf_kfunc_desc_tab; + +struct bpf_kfunc_btf_tab; + +struct bpf_prog_ops; + +struct btf_mod_pair; + +struct bpf_token; + +struct bpf_prog_offload; + +struct bpf_func_info_aux; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 real_func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + struct btf *attach_btf; + const struct bpf_ctx_arg_aux *ctx_arg_info; + void *priv_stack_ptr; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool dev_bound; + bool offload_requested; + bool attach_btf_trace; + bool attach_tracing_prog; + bool func_proto_unreliable; + bool tail_call_reachable; + bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; + bool is_extended; + bool jits_use_priv_stack; + bool priv_stack_requested; + u64 prog_array_member_cnt; + struct mutex ext_mutex; + struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *); + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; + struct bpf_kfunc_btf_tab *kfunc_btf_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct btf_mod_pair *used_btfs; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + u32 verified_insns; + int cgroup_atype; + struct bpf_map *cgroup_storage[2]; + char name[16]; + u64 (*bpf_exception_cb)(u64, u64, u64, u64, u64); + struct bpf_token *token; + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + struct module *mod; + u32 num_exentries; + struct exception_table_entry *extable; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_KEY = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCK_COMMON = 12, + PTR_TO_TCP_SOCK = 13, + PTR_TO_TP_BUFFER = 14, + PTR_TO_XDP_SOCK = 15, + PTR_TO_BTF_ID = 16, + PTR_TO_MEM = 17, + PTR_TO_ARENA = 18, + PTR_TO_BUF = 19, + PTR_TO_FUNC = 20, + CONST_PTR_TO_DYNPTR = 21, + __BPF_REG_TYPE_MAX = 22, + PTR_TO_MAP_VALUE_OR_NULL = 260, + PTR_TO_SOCKET_OR_NULL = 267, + PTR_TO_SOCK_COMMON_OR_NULL = 268, + PTR_TO_TCP_SOCK_OR_NULL = 269, + PTR_TO_BTF_ID_OR_NULL = 272, + __BPF_REG_TYPE_LIMIT = 134217727, +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); +}; + +struct net_device; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct btf_func_model { + u8 ret_size; + u8 ret_flags; + u8 nr_args; + u8 arg_size[12]; + u8 arg_flags[12]; +}; + +struct bpf_tramp_image { + void *image; + int size; + struct bpf_ksym ksym; + struct percpu_ref pcref; + void *ip_after_call; + void *ip_epilogue; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct bpf_trampoline { + struct hlist_node hlist; + struct ftrace_ops *fops; + struct mutex mutex; + refcount_t refcnt; + u32 flags; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + struct bpf_tramp_image *cur_image; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; + bool called: 1; + bool verified: 1; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + void *aux; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + struct btf *btf; + u32 btf_id; +}; + +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + +struct bpf_token { + struct work_struct work; + atomic64_t refcnt; + struct user_namespace *userns; + u64 allowed_cmds; + u64 allowed_maps; + u64 allowed_progs; + u64 allowed_attachs; +}; + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + long unsigned int pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __attribute__((packed)); + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct perf_cpu_pmu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + struct device *parent; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + unsigned int scope; + int *pmu_disable_count; + struct perf_cpu_pmu_context *cpu_pmu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_pmu_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); + void * (*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + bool (*filter)(struct pmu *, int); + int (*check_period)(struct perf_event *, u64); +}; + +struct perf_event_pmu_context { + struct pmu *pmu; + struct perf_event_context *ctx; + struct list_head pmu_ctx_entry; + struct list_head pinned_active; + struct list_head flexible_active; + unsigned int embedded: 1; + unsigned int nr_events; + unsigned int nr_cgroups; + unsigned int nr_freq; + atomic_t refcount; + struct callback_head callback_head; + void *task_ctx_data; + int rotate_necessary; +}; + +struct perf_cpu_pmu_context { + struct perf_event_pmu_context epc; + struct perf_event_pmu_context *task_epc; + struct list_head sched_cb_entry; + int sched_cb_usage; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + long unsigned int wakeup; + long unsigned int size; + u64 aux_flags; + union { + void *addr; + long unsigned int head; + }; + int page; +}; + +struct perf_addr_filter_range { + long unsigned int start; + long unsigned int size; +}; + +struct perf_sample_data { + u64 sample_flags; + u64 period; + u64 dyn_size; + u64 type; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 ip; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; + union perf_sample_weight weight; + union perf_mem_data_src data_src; + u64 txn; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 stream_id; + u64 cgroup; + u64 addr; + u64 phys_addr; + u64 data_page_size; + u64 code_page_size; + u64 aux_size; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; + u64 timeoffset; + int active; +}; + +struct kvm_mpidr_data { + u64 mpidr_mask; + struct { + struct {} __empty_cmpidr_to_idx; + u16 cmpidr_to_idx[0]; + }; +}; + +enum fgt_group_id { + __NO_FGT_GROUP__ = 0, + HFGxTR_GROUP = 1, + HDFGRTR_GROUP = 2, + HDFGWTR_GROUP = 2, + HFGITR_GROUP = 3, + HAFGRTR_GROUP = 4, + __NR_FGT_GROUP_IDS__ = 5, +}; + +struct kvm_sysreg_masks { + struct { + u64 res0; + u64 res1; + } mask[155]; +}; + +enum vcpu_sysreg { + __INVALID_SYSREG__ = 0, + MPIDR_EL1 = 1, + CLIDR_EL1 = 2, + CSSELR_EL1 = 3, + TPIDR_EL0 = 4, + TPIDRRO_EL0 = 5, + TPIDR_EL1 = 6, + CNTKCTL_EL1 = 7, + PAR_EL1 = 8, + MDCCINT_EL1 = 9, + OSLSR_EL1 = 10, + DISR_EL1 = 11, + PMCR_EL0 = 12, + PMSELR_EL0 = 13, + PMEVCNTR0_EL0 = 14, + PMEVCNTR30_EL0 = 44, + PMCCNTR_EL0 = 45, + PMEVTYPER0_EL0 = 46, + PMEVTYPER30_EL0 = 76, + PMCCFILTR_EL0 = 77, + PMCNTENSET_EL0 = 78, + PMINTENSET_EL1 = 79, + PMOVSSET_EL0 = 80, + PMUSERENR_EL0 = 81, + APIAKEYLO_EL1 = 82, + APIAKEYHI_EL1 = 83, + APIBKEYLO_EL1 = 84, + APIBKEYHI_EL1 = 85, + APDAKEYLO_EL1 = 86, + APDAKEYHI_EL1 = 87, + APDBKEYLO_EL1 = 88, + APDBKEYHI_EL1 = 89, + APGAKEYLO_EL1 = 90, + APGAKEYHI_EL1 = 91, + RGSR_EL1 = 92, + GCR_EL1 = 93, + TFSRE0_EL1 = 94, + POR_EL0 = 95, + SVCR = 96, + FPMR = 97, + DACR32_EL2 = 98, + IFSR32_EL2 = 99, + FPEXC32_EL2 = 100, + DBGVCR32_EL2 = 101, + SCTLR_EL2 = 102, + ACTLR_EL2 = 103, + CPTR_EL2 = 104, + HACR_EL2 = 105, + ZCR_EL2 = 106, + TTBR0_EL2 = 107, + TTBR1_EL2 = 108, + TCR_EL2 = 109, + PIRE0_EL2 = 110, + PIR_EL2 = 111, + POR_EL2 = 112, + SPSR_EL2 = 113, + ELR_EL2 = 114, + AFSR0_EL2 = 115, + AFSR1_EL2 = 116, + ESR_EL2 = 117, + FAR_EL2 = 118, + HPFAR_EL2 = 119, + MAIR_EL2 = 120, + AMAIR_EL2 = 121, + VBAR_EL2 = 122, + RVBAR_EL2 = 123, + CONTEXTIDR_EL2 = 124, + CNTHCTL_EL2 = 125, + SP_EL2 = 126, + CNTHP_CTL_EL2 = 127, + CNTHP_CVAL_EL2 = 128, + CNTHV_CTL_EL2 = 129, + CNTHV_CVAL_EL2 = 130, + __SANITISED_REG_START__ = 131, + __after___SANITISED_REG_START__ = 130, + TCR2_EL2 = 131, + MDCR_EL2 = 132, + __VNCR_START__ = 133, + __after___VNCR_START__ = 132, + __before_SCTLR_EL1 = 133, + SCTLR_EL1 = 167, + __after_SCTLR_EL1 = 167, + __before_ACTLR_EL1 = 168, + ACTLR_EL1 = 168, + __after_ACTLR_EL1 = 168, + __before_CPACR_EL1 = 169, + CPACR_EL1 = 165, + __after_CPACR_EL1 = 168, + __before_ZCR_EL1 = 169, + ZCR_EL1 = 193, + __after_ZCR_EL1 = 193, + __before_TTBR0_EL1 = 194, + TTBR0_EL1 = 197, + __after_TTBR0_EL1 = 197, + __before_TTBR1_EL1 = 198, + TTBR1_EL1 = 199, + __after_TTBR1_EL1 = 199, + __before_TCR_EL1 = 200, + TCR_EL1 = 169, + __after_TCR_EL1 = 199, + __before_TCR2_EL1 = 200, + TCR2_EL1 = 211, + __after_TCR2_EL1 = 211, + __before_ESR_EL1 = 212, + ESR_EL1 = 172, + __after_ESR_EL1 = 211, + __before_AFSR0_EL1 = 212, + AFSR0_EL1 = 170, + __after_AFSR0_EL1 = 211, + __before_AFSR1_EL1 = 212, + AFSR1_EL1 = 171, + __after_AFSR1_EL1 = 211, + __before_FAR_EL1 = 212, + FAR_EL1 = 201, + __after_FAR_EL1 = 211, + __before_MAIR_EL1 = 212, + MAIR_EL1 = 173, + __after_MAIR_EL1 = 211, + __before_VBAR_EL1 = 212, + VBAR_EL1 = 207, + __after_VBAR_EL1 = 211, + __before_CONTEXTIDR_EL1 = 212, + CONTEXTIDR_EL1 = 166, + __after_CONTEXTIDR_EL1 = 211, + __before_AMAIR_EL1 = 212, + AMAIR_EL1 = 174, + __after_AMAIR_EL1 = 211, + __before_MDSCR_EL1 = 212, + MDSCR_EL1 = 176, + __after_MDSCR_EL1 = 211, + __before_ELR_EL1 = 212, + ELR_EL1 = 203, + __after_ELR_EL1 = 211, + __before_SP_EL1 = 212, + SP_EL1 = 205, + __after_SP_EL1 = 211, + __before_SPSR_EL1 = 212, + SPSR_EL1 = 177, + __after_SPSR_EL1 = 211, + __before_TFSR_EL1 = 212, + TFSR_EL1 = 183, + __after_TFSR_EL1 = 211, + __before_VPIDR_EL2 = 212, + VPIDR_EL2 = 150, + __after_VPIDR_EL2 = 211, + __before_VMPIDR_EL2 = 212, + VMPIDR_EL2 = 143, + __after_VMPIDR_EL2 = 211, + __before_HCR_EL2 = 212, + HCR_EL2 = 148, + __after_HCR_EL2 = 211, + __before_HSTR_EL2 = 212, + HSTR_EL2 = 149, + __after_HSTR_EL2 = 211, + __before_VTTBR_EL2 = 212, + VTTBR_EL2 = 137, + __after_VTTBR_EL2 = 211, + __before_VTCR_EL2 = 212, + VTCR_EL2 = 141, + __after_VTCR_EL2 = 211, + __before_TPIDR_EL2 = 212, + TPIDR_EL2 = 151, + __after_TPIDR_EL2 = 211, + __before_HCRX_EL2 = 212, + HCRX_EL2 = 153, + __after_HCRX_EL2 = 211, + __before_PIR_EL1 = 212, + PIR_EL1 = 217, + __after_PIR_EL1 = 217, + __before_PIRE0_EL1 = 218, + PIRE0_EL1 = 215, + __after_PIRE0_EL1 = 217, + __before_POR_EL1 = 218, + POR_EL1 = 218, + __after_POR_EL1 = 218, + __before_HFGRTR_EL2 = 219, + HFGRTR_EL2 = 188, + __after_HFGRTR_EL2 = 218, + __before_HFGWTR_EL2 = 219, + HFGWTR_EL2 = 189, + __after_HFGWTR_EL2 = 218, + __before_HFGITR_EL2 = 219, + HFGITR_EL2 = 190, + __after_HFGITR_EL2 = 218, + __before_HDFGRTR_EL2 = 219, + HDFGRTR_EL2 = 191, + __after_HDFGRTR_EL2 = 218, + __before_HDFGWTR_EL2 = 219, + HDFGWTR_EL2 = 192, + __after_HDFGWTR_EL2 = 218, + __before_HAFGRTR_EL2 = 219, + HAFGRTR_EL2 = 194, + __after_HAFGRTR_EL2 = 218, + __before_CNTVOFF_EL2 = 219, + CNTVOFF_EL2 = 145, + __after_CNTVOFF_EL2 = 218, + __before_CNTV_CVAL_EL0 = 219, + CNTV_CVAL_EL0 = 178, + __after_CNTV_CVAL_EL0 = 218, + __before_CNTV_CTL_EL0 = 219, + CNTV_CTL_EL0 = 179, + __after_CNTV_CTL_EL0 = 218, + __before_CNTP_CVAL_EL0 = 219, + CNTP_CVAL_EL0 = 180, + __after_CNTP_CVAL_EL0 = 218, + __before_CNTP_CTL_EL0 = 219, + CNTP_CTL_EL0 = 181, + __after_CNTP_CTL_EL0 = 218, + __before_ICH_HCR_EL2 = 219, + ICH_HCR_EL2 = 285, + __after_ICH_HCR_EL2 = 285, + NR_SYS_REGS = 286, +}; + +struct kvm_io_range { + gpa_t addr; + int len; + struct kvm_io_device *dev; +}; + +struct kvm_io_bus { + int dev_count; + int ioeventfd_count; + struct kvm_io_range range[0]; +}; + +enum kvm_bus { + KVM_MMIO_BUS = 0, + KVM_PIO_BUS = 1, + KVM_VIRTIO_CCW_NOTIFY_BUS = 2, + KVM_FAST_MMIO_BUS = 3, + KVM_IOCSR_BUS = 4, + KVM_NR_BUSES = 5, +}; + +struct kvm_irq_routing_table { + int chip[988]; + u32 nr_rt_entries; + struct hlist_head map[0]; +}; + +enum kvm_stat_kind { + KVM_STAT_VM = 0, + KVM_STAT_VCPU = 1, +}; + +struct _kvm_stats_desc; + +struct kvm_stat_data { + struct kvm *kvm; + const struct _kvm_stats_desc *desc; + enum kvm_stat_kind kind; +}; + +struct _kvm_stats_desc { + struct kvm_stats_desc desc; + char name[48]; +}; + +struct kvm_device_ops { + const char *name; + int (*create)(struct kvm_device *, u32); + void (*init)(struct kvm_device *); + void (*destroy)(struct kvm_device *); + void (*release)(struct kvm_device *); + int (*set_attr)(struct kvm_device *, struct kvm_device_attr *); + int (*get_attr)(struct kvm_device *, struct kvm_device_attr *); + int (*has_attr)(struct kvm_device *, struct kvm_device_attr *); + long int (*ioctl)(struct kvm_device *, unsigned int, long unsigned int); + int (*mmap)(struct kvm_device *, struct vm_area_struct *); +}; + +struct kvm_coalesced_mmio_dev { + struct list_head list; + struct kvm_io_device dev; + struct kvm *kvm; + struct kvm_coalesced_mmio_zone zone; +}; + +typedef int (*cmp_func_t)(const void *, const void *); + +struct __va_list { + void *__stack; + void *__gr_top; + void *__vr_top; + int __gr_offs; + int __vr_offs; +}; + +typedef struct __va_list va_list; + +struct va_format { + const char *fmt; + va_list *va; +}; + +typedef struct { + void *lock; +} class_preempt_notrace_t; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = 1, + CACHE_TYPE_DATA = 2, + CACHE_TYPE_SEPARATE = 3, + CACHE_TYPE_UNIFIED = 4, +}; + +enum ftr_type { + FTR_EXACT = 0, + FTR_LOWER_SAFE = 1, + FTR_HIGHER_SAFE = 2, + FTR_HIGHER_OR_ZERO_SAFE = 3, +}; + +struct arm64_ftr_bits { + bool sign; + bool visible; + bool strict; + enum ftr_type type; + u8 shift; + u8 width; + s64 safe_val; +}; + +struct arm64_ftr_override { + u64 val; + u64 mask; +}; + +struct arm64_ftr_reg { + const char *name; + u64 strict_mask; + u64 user_mask; + u64 sys_val; + u64 user_val; + struct arm64_ftr_override *override; + const struct arm64_ftr_bits *ftr_bits; +}; + +enum mitigation_state { + SPECTRE_UNAFFECTED = 0, + SPECTRE_MITIGATED = 1, + SPECTRE_VULNERABLE = 2, +}; + +typedef struct mutex *class_mutex_t; + +struct trace_eval_map { + const char *system; + const char *eval_string; + long unsigned int eval_value; +}; + +struct reg_mask_range { + __u64 addr; + __u32 range; + __u32 reserved[13]; +}; + +struct kvm_one_reg { + __u64 id; + __u64 addr; +}; + +enum kvm_device_type { + KVM_DEV_TYPE_FSL_MPIC_20 = 1, + KVM_DEV_TYPE_FSL_MPIC_42 = 2, + KVM_DEV_TYPE_XICS = 3, + KVM_DEV_TYPE_VFIO = 4, + KVM_DEV_TYPE_ARM_VGIC_V2 = 5, + KVM_DEV_TYPE_FLIC = 6, + KVM_DEV_TYPE_ARM_VGIC_V3 = 7, + KVM_DEV_TYPE_ARM_VGIC_ITS = 8, + KVM_DEV_TYPE_XIVE = 9, + KVM_DEV_TYPE_ARM_PV_TIME = 10, + KVM_DEV_TYPE_RISCV_AIA = 11, + KVM_DEV_TYPE_LOONGARCH_IPI = 12, + KVM_DEV_TYPE_LOONGARCH_EIOINTC = 13, + KVM_DEV_TYPE_LOONGARCH_PCHPIC = 14, + KVM_DEV_TYPE_MAX = 15, +}; + +enum vgic_type { + VGIC_V2 = 0, + VGIC_V3 = 1, +}; + +struct vgic_global { + enum vgic_type type; + phys_addr_t vcpu_base; + void *vcpu_base_va; + void *vcpu_hyp_va; + void *vctrl_base; + void *vctrl_hyp; + int nr_lr; + unsigned int maint_irq; + int max_gic_vcpus; + bool can_emulate_gicv2; + bool has_gicv4; + bool has_gicv4_1; + bool no_hw_deactivation; + struct static_key_false gicv3_cpuif; + u32 ich_vtr_el2; +}; + +enum kvm_arch_timers { + TIMER_PTIMER = 0, + TIMER_VTIMER = 1, + NR_KVM_EL0_TIMERS = 2, + TIMER_HVTIMER = 2, + TIMER_HPTIMER = 3, + NR_KVM_TIMERS = 4, +}; + +enum kvm_arch_timer_regs { + TIMER_REG_CNT = 0, + TIMER_REG_CVAL = 1, + TIMER_REG_TVAL = 2, + TIMER_REG_CTL = 3, + TIMER_REG_VOFF = 4, +}; + +typedef u64 kvm_pte_t; + +typedef kvm_pte_t *kvm_pteref_t; + +enum kvm_pgtable_stage2_flags { + KVM_PGTABLE_S2_NOFWB = 1, + KVM_PGTABLE_S2_IDMAP = 2, +}; + +enum kvm_pgtable_prot { + KVM_PGTABLE_PROT_X = 1ULL, + KVM_PGTABLE_PROT_W = 2ULL, + KVM_PGTABLE_PROT_R = 4ULL, + KVM_PGTABLE_PROT_DEVICE = 8ULL, + KVM_PGTABLE_PROT_NORMAL_NC = 16ULL, + KVM_PGTABLE_PROT_SW0 = 36028797018963968ULL, + KVM_PGTABLE_PROT_SW1 = 72057594037927936ULL, + KVM_PGTABLE_PROT_SW2 = 144115188075855872ULL, + KVM_PGTABLE_PROT_SW3 = 288230376151711744ULL, +}; + +typedef bool (*kvm_pgtable_force_pte_cb_t)(u64, u64, enum kvm_pgtable_prot); + +struct kvm_pgtable_mm_ops; + +struct kvm_pgtable { + u32 ia_bits; + s8 start_level; + kvm_pteref_t pgd; + struct kvm_pgtable_mm_ops *mm_ops; + struct kvm_s2_mmu *mmu; + enum kvm_pgtable_stage2_flags flags; + kvm_pgtable_force_pte_cb_t force_pte_cb; +}; + +struct kvm_pgtable_mm_ops { + void * (*zalloc_page)(void *); + void * (*zalloc_pages_exact)(size_t); + void (*free_pages_exact)(void *, size_t); + void (*free_unlinked_table)(void *, s8); + void (*get_page)(void *); + void (*put_page)(void *); + int (*page_count)(void *); + void * (*phys_to_virt)(phys_addr_t); + phys_addr_t (*virt_to_phys)(void *); + void (*dcache_clean_inval_poc)(void *, size_t); + void (*icache_inval_pou)(void *, size_t); +}; + +struct sys_reg_params { + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + u64 regval; + bool is_write; +}; + +struct sys_reg_desc { + const char *name; + enum { + AA32_DIRECT = 0, + AA32_LO = 1, + AA32_HI = 2, + } aarch32_map; + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + bool (*access)(struct kvm_vcpu *, struct sys_reg_params *, const struct sys_reg_desc *); + u64 (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *); + int reg; + u64 val; + int (*__get_user)(struct kvm_vcpu *, const struct sys_reg_desc *, u64 *); + int (*set_user)(struct kvm_vcpu *, const struct sys_reg_desc *, u64); + unsigned int (*visibility)(const struct kvm_vcpu *, const struct sys_reg_desc *); +}; + +union tlbi_info { + struct { + u64 start; + u64 size; + } range; + struct { + u64 addr; + } ipa; + struct { + u64 addr; + u32 encoding; + } va; +}; + +typedef __s16 s16; + +typedef __u16 __be16; + +typedef __u32 __wsum; + +typedef __kernel_clock_t clock_t; + +typedef unsigned int slab_flags_t; + +typedef struct { + atomic_t refcnt; +} rcuref_t; + +enum lockdep_ok { + LOCKDEP_STILL_OK = 0, + LOCKDEP_NOW_UNRELIABLE = 1, +}; + +struct pollfd { + int fd; + short int events; + short int revents; +}; + +struct rhashtable; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct reclaim_state { + long unsigned int reclaimed; +}; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + +struct bpf_redirect_info { + u64 tgt_index; + void *tgt_value; + struct bpf_map *map; + u32 flags; + u32 map_id; + enum bpf_map_type map_type; + struct bpf_nh_params nh; + u32 kern_flags; +}; + +struct bpf_net_context { + struct bpf_redirect_info ri; + struct list_head cpu_map_flush_list; + struct list_head dev_map_flush_list; + struct list_head xskmap_map_flush_list; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next; + struct hlist_nulls_node **pprev; +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + long unsigned int min_coredump; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +struct wait_page_queue { + struct folio *folio; + int bit_nr; + wait_queue_entry_t wait; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + struct file_ra_state *ra; + long unsigned int _index; + unsigned int _nr_pages; + unsigned int _batch_count; + bool _workingset; + long unsigned int _pflags; +}; + +struct swap_cluster_info; + +struct percpu_cluster; + +struct swap_info_struct { + struct percpu_ref users; + long unsigned int flags; + short int prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + long unsigned int *zeromap; + struct swap_cluster_info *cluster_info; + struct list_head free_clusters; + struct list_head full_clusters; + struct list_head nonfull_clusters[1]; + struct list_head frag_clusters[1]; + unsigned int frag_cluster_nr[1]; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int *cluster_next_cpu; + struct percpu_cluster *percpu_cluster; + struct rb_root swap_extent_root; + struct block_device *bdev; + struct file *swap_file; + struct completion comp; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct work_struct reclaim_work; + struct list_head discard_clusters; + struct plist_node avail_lists[0]; +}; + +struct posix_acl_entry { + short int e_tag; + short unsigned int e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + unsigned int a_count; + struct callback_head a_rcu; + struct posix_acl_entry a_entries[0]; +}; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +typedef __u64 __addrpair; + +typedef __u32 __portpair; + +typedef struct { + struct net *net; +} possible_net_t; + +struct proto; + +struct inet_timewait_death_row; + +struct sock_common { + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + short unsigned int skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse: 4; + unsigned char skc_reuseport: 1; + unsigned char skc_ipv6only: 1; + unsigned char skc_net_refcnt: 1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + atomic64_t skc_cookie; + union { + long unsigned int skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + int skc_dontcopy_begin[0]; + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + short unsigned int skc_tx_queue_mapping; + short unsigned int skc_rx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + refcount_t skc_refcnt; + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; +}; + +struct sk_buff; + +struct sk_buff_list { + struct sk_buff *next; + struct sk_buff *prev; +}; + +struct sk_buff_head { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + }; + struct sk_buff_list list; + }; + __u32 qlen; + spinlock_t lock; +}; + +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; +} socket_lock_t; + +typedef u64 netdev_features_t; + +struct sock_cgroup_data { + struct cgroup *cgroup; + u32 classid; +}; + +typedef struct {} netns_tracker; + +struct dst_entry; + +struct sk_filter; + +struct socket_wq; + +struct socket; + +struct sock_reuseport; + +struct sock { + struct sock_common __sk_common; + __u8 __cacheline_group_begin__sock_write_rx[0]; + atomic_t sk_drops; + __s32 sk_peek_off; + struct sk_buff_head sk_error_queue; + struct sk_buff_head sk_receive_queue; + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + __u8 __cacheline_group_end__sock_write_rx[0]; + __u8 __cacheline_group_begin__sock_read_rx[0]; + struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + unsigned int sk_ll_usec; + unsigned int sk_napi_id; + u16 sk_busy_poll_budget; + u8 sk_prefer_busy_poll; + u8 sk_userlocks; + int sk_rcvbuf; + struct sk_filter *sk_filter; + union { + struct socket_wq *sk_wq; + struct socket_wq *sk_wq_raw; + }; + void (*sk_data_ready)(struct sock *); + long int sk_rcvtimeo; + int sk_rcvlowat; + __u8 __cacheline_group_end__sock_read_rx[0]; + __u8 __cacheline_group_begin__sock_read_rxtx[0]; + int sk_err; + struct socket *sk_socket; + struct mem_cgroup *sk_memcg; + __u8 __cacheline_group_end__sock_read_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_rxtx[0]; + socket_lock_t sk_lock; + u32 sk_reserved_mem; + int sk_forward_alloc; + u32 sk_tsflags; + __u8 __cacheline_group_end__sock_write_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_tx[0]; + int sk_write_pending; + atomic_t sk_omem_alloc; + int sk_sndbuf; + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + long unsigned int sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff_head sk_write_queue; + u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + struct page_frag sk_frag; + struct timer_list sk_timer; + long unsigned int sk_pacing_rate; + atomic_t sk_zckey; + atomic_t sk_tskey; + __u8 __cacheline_group_end__sock_write_tx[0]; + __u8 __cacheline_group_begin__sock_read_tx[0]; + long unsigned int sk_max_pacing_rate; + long int sk_sndtimeo; + u32 sk_priority; + u32 sk_mark; + struct dst_entry *sk_dst_cache; + netdev_features_t sk_route_caps; + u16 sk_gso_type; + u16 sk_gso_max_segs; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + u32 sk_txhash; + u8 sk_pacing_shift; + bool sk_use_task_frag; + __u8 __cacheline_group_end__sock_read_tx[0]; + u8 sk_gso_disabled: 1; + u8 sk_kern_sock: 1; + u8 sk_no_check_tx: 1; + u8 sk_no_check_rx: 1; + u8 sk_shutdown; + u16 sk_type; + u16 sk_protocol; + long unsigned int sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + spinlock_t sk_peer_lock; + int sk_bind_phc; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + ktime_t sk_stamp; + int sk_disconnects; + u8 sk_txrehash; + u8 sk_clockid; + u8 sk_txtime_deadline_mode: 1; + u8 sk_txtime_report_errors: 1; + u8 sk_txtime_unused: 6; + void *sk_user_data; + struct sock_cgroup_data sk_cgrp_data; + void (*sk_state_change)(struct sock *); + void (*sk_write_space)(struct sock *); + void (*sk_error_report)(struct sock *); + int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); + void (*sk_destruct)(struct sock *); + struct sock_reuseport *sk_reuseport_cb; + struct bpf_local_storage *sk_bpf_storage; + struct callback_head sk_rcu; + netns_tracker ns_tracker; + struct xarray sk_user_frags; +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + union { + void *module; + atomic_t refcnt; + }; + void *data; + int flags; + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +struct ref_tracker_dir {}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + int sysctl_optmem_max; + u8 sysctl_txrehash; + u8 sysctl_tstamp_allow_data; + struct prot_inuse *prot_inuse; + struct cpumask *rps_default_mask; +}; + +struct ipstats_mib; + +struct tcp_mib; + +struct linux_mib; + +struct udp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct proc_dir_entry; + +struct netns_mib { + struct ipstats_mib *ip_statistics; + struct ipstats_mib *ipv6_statistics; + struct tcp_mib *tcp_statistics; + struct linux_mib *net_statistics; + struct udp_mib *udp_statistics; + struct udp_mib *udp_stats_in6; + struct udp_mib *udplite_statistics; + struct udp_mib *udplite_stats_in6; + struct icmp_mib *icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct icmpv6_mib *icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct unix_table { + spinlock_t *locks; + struct hlist_head *buckets; +}; + +struct netns_unix { + struct unix_table table; + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + refcount_t tw_refcount; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct local_ports { + u32 range; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct udp_table; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct inet_peer_base; + +struct fqdir; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + __u8 __cacheline_group_begin__netns_ipv4_read_tx[0]; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_tso_rtt_log; + u8 sysctl_tcp_autocorking; + int sysctl_tcp_min_snd_mss; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_wmem[3]; + u8 sysctl_ip_fwd_use_pmtu; + __u8 __cacheline_group_end__netns_ipv4_read_tx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_txrx[0]; + u8 sysctl_tcp_moderate_rcvbuf; + __u8 __cacheline_group_end__netns_ipv4_read_txrx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_rx[0]; + u8 sysctl_ip_early_demux; + u8 sysctl_tcp_early_demux; + u8 sysctl_tcp_l3mdev_accept; + int sysctl_tcp_reordering; + int sysctl_tcp_rmem[3]; + __u8 __cacheline_group_end__netns_ipv4_read_rx[0]; + long: 64; + struct inet_timewait_death_row tcp_death_row; + struct udp_table *udp_table; + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + bool fib_has_custom_local_routes; + bool fib_offload_disabled; + u8 sysctl_tcp_shrink_window; + struct hlist_head *fib_table_hash; + struct sock *fibnl; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct fqdir *fqdir; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_msgs_per_sec; + int sysctl_icmp_msgs_burst; + atomic_t icmp_global_credit; + u32 icmp_global_stamp; + u32 ip_rt_min_pmtu; + int ip_rt_mtu_expires; + int ip_rt_min_advmss; + struct local_ports ip_local_ports; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; + u8 sysctl_ip_dynaddr; + u8 sysctl_udp_early_demux; + u8 sysctl_nexthop_compat_mode; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; + u8 sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; + u8 sysctl_tcp_comp_sack_nr; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; + int sysctl_tcp_fin_timeout; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + int sysctl_tcp_rto_min_us; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_adv_win_scale; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_challenge_ack_limit; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_reflect_tos; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + unsigned int sysctl_tcp_child_ehash_entries; + long unsigned int sysctl_tcp_comp_sack_delay_ns; + long unsigned int sysctl_tcp_comp_sack_slack_ns; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + long unsigned int tfo_active_disable_stamp; + u32 tcp_challenge_timestamp; + u32 tcp_challenge_count; + u8 sysctl_tcp_plb_enabled; + u8 sysctl_tcp_plb_idle_rehash_rounds; + u8 sysctl_tcp_plb_rehash_rounds; + u8 sysctl_tcp_plb_suspend_rto_sec; + int sysctl_tcp_plb_cong_thresh; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_tcp_syn_linear_timeouts; + u8 sysctl_igmp_llm_reports; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + unsigned int sysctl_udp_child_hash_entries; + long unsigned int *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + siphash_key_t ip_id_key; + struct hlist_head *inet_addr_lst; + struct delayed_work addr_chk_work; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct neighbour; + +struct dst_ops { + short unsigned int family; + unsigned int gc_thresh; + void (*gc)(struct dst_ops *); + struct dst_entry * (*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *); + void (*negative_advice)(struct sock *, struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + struct percpu_counter pcpuc_entries; + long: 64; + long: 64; + long: 64; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + u32 multipath_hash_fields; + u8 multipath_hash_policy; + u8 bindv6only; + u8 flowlabel_consistency; + u8 auto_flowlabels; + int icmpv6_time; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; + long unsigned int icmpv6_ratemask[4]; + long unsigned int *icmpv6_ratemask_ptr; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; + int idgen_retries; + int idgen_delay; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; + u8 skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; + u8 icmpv6_error_anycast_as_unicast; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct ioam6_pernet_data; + +struct netns_ipv6 { + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + atomic_t ip6_rt_gc_expire; + long unsigned int ip6_rt_last_gc; + unsigned char flowlabel_has_excl; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct hlist_head *inet6_addr_lst; + spinlock_t addrconf_hash_lock; + struct delayed_work addr_chk_work; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; + struct ioam6_pernet_data *ioam6_data; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_logger *nf_loggers[11]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries *hooks_ipv4[5]; + struct nf_hook_entries *hooks_ipv6[5]; + struct nf_hook_entries *hooks_bridge[5]; + unsigned int defrag_ipv4_users; + unsigned int defrag_ipv6_users; +}; + +struct nf_ct_event_notifier; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; +}; + +struct ip_conntrack_stat; + +struct netns_ct { + u8 sysctl_log_invalid; + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_tstamp; + u8 sysctl_checksum; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_ip_net nf_ct_proto; +}; + +struct netns_nftables { + u8 gencursor; +}; + +struct netns_bpf { + struct bpf_prog_array *run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + spinlock_t rules_mod_lock; + unsigned int dev_base_seq; + u32 ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct ref_tracker_dir refcnt_tracker; + struct ref_tracker_dir notrefcnt_tracker; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct xarray dev_by_index; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_nf nf; + struct netns_ct ct; + struct netns_nftables nft; + struct net_generic *gen; + struct netns_bpf bpf; + u64 net_cookie; + struct sock *diag_nlsk; + long: 64; + long: 64; +}; + +struct binfmt_misc { + struct list_head entries; + rwlock_t entries_lock; + bool enabled; +}; + +typedef struct { + local64_t v; +} u64_stats_t; + +typedef short unsigned int __kernel_sa_family_t; + +typedef __kernel_sa_family_t sa_family_t; + +struct sockaddr { + sa_family_t sa_family; + union { + char sa_data_min[14]; + struct { + struct {} __empty_sa_data; + char sa_data[0]; + }; + }; +}; + +struct ubuf_info; + +struct msghdr { + void *msg_name; + int msg_namelen; + int msg_inq; + struct iov_iter msg_iter; + union { + void *msg_control; + void *msg_control_user; + }; + bool msg_control_is_user: 1; + bool msg_get_inq: 1; + unsigned int msg_flags; + __kernel_size_t msg_controllen; + struct kiocb *msg_iocb; + struct ubuf_info *msg_ubuf; + int (*sg_from_iter)(struct sk_buff *, struct iov_iter *, size_t); +}; + +struct ubuf_info_ops; + +struct ubuf_info { + const struct ubuf_info_ops *ops; + refcount_t refcnt; + u8 flags; +}; + +typedef unsigned int sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + long unsigned int dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + struct llist_node ll_node; + }; + struct sock *sk; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + long unsigned int _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + long unsigned int _sk_redir; + }; + long unsigned int _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned: 1; + __u8 nohdr: 1; + __u8 fclone: 2; + __u8 peeked: 1; + __u8 head_frag: 1; + __u8 pfmemalloc: 1; + __u8 pp_recycle: 1; + __u8 active_extensions; + union { + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + }; + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + } headers; + }; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; +}; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; + unsigned int slot_map; +} te1_settings; + +typedef struct { + short unsigned int encoding; + short unsigned int parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + short unsigned int lmi; + short unsigned int dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +} fr_proto_pvc_info; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + short unsigned int dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; + +struct ifmap { + long unsigned int mem_start; + long unsigned int mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct if_settings { + unsigned int type; + unsigned int size; + union { + raw_hdlc_proto *raw_hdlc; + cisco_proto *cisco; + fr_proto *fr; + fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; + x25_hdlc_proto *x25; + sync_serial_settings *sync; + te1_settings *te1; + } ifs_ifsu; +}; + +struct ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void *ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +struct swap_cluster_info { + spinlock_t lock; + u16 count; + u8 flags; + u8 order; + struct list_head list; +}; + +struct percpu_cluster { + unsigned int next[1]; +}; + +typedef struct { + union { + void *kernel; + void *user; + }; + bool is_kernel: 1; +} sockptr_t; + +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); + int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); + int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_offloaded_map *, void *); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; +}; + +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +enum rx_handler_result { + RX_HANDLER_CONSUMED = 0, + RX_HANDLER_ANOTHER = 1, + RX_HANDLER_EXACT = 2, + RX_HANDLER_PASS = 3, +}; + +typedef enum rx_handler_result rx_handler_result_t; + +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); + +typedef u32 xdp_features_t; + +struct net_device_stats { + union { + long unsigned int rx_packets; + atomic_long_t __rx_packets; + }; + union { + long unsigned int tx_packets; + atomic_long_t __tx_packets; + }; + union { + long unsigned int rx_bytes; + atomic_long_t __rx_bytes; + }; + union { + long unsigned int tx_bytes; + atomic_long_t __tx_bytes; + }; + union { + long unsigned int rx_errors; + atomic_long_t __rx_errors; + }; + union { + long unsigned int tx_errors; + atomic_long_t __tx_errors; + }; + union { + long unsigned int rx_dropped; + atomic_long_t __rx_dropped; + }; + union { + long unsigned int tx_dropped; + atomic_long_t __tx_dropped; + }; + union { + long unsigned int multicast; + atomic_long_t __multicast; + }; + union { + long unsigned int collisions; + atomic_long_t __collisions; + }; + union { + long unsigned int rx_length_errors; + atomic_long_t __rx_length_errors; + }; + union { + long unsigned int rx_over_errors; + atomic_long_t __rx_over_errors; + }; + union { + long unsigned int rx_crc_errors; + atomic_long_t __rx_crc_errors; + }; + union { + long unsigned int rx_frame_errors; + atomic_long_t __rx_frame_errors; + }; + union { + long unsigned int rx_fifo_errors; + atomic_long_t __rx_fifo_errors; + }; + union { + long unsigned int rx_missed_errors; + atomic_long_t __rx_missed_errors; + }; + union { + long unsigned int tx_aborted_errors; + atomic_long_t __tx_aborted_errors; + }; + union { + long unsigned int tx_carrier_errors; + atomic_long_t __tx_carrier_errors; + }; + union { + long unsigned int tx_fifo_errors; + atomic_long_t __tx_fifo_errors; + }; + union { + long unsigned int tx_heartbeat_errors; + atomic_long_t __tx_heartbeat_errors; + }; + union { + long unsigned int tx_window_errors; + atomic_long_t __tx_window_errors; + }; + union { + long unsigned int rx_compressed; + atomic_long_t __rx_compressed; + }; + union { + long unsigned int tx_compressed; + atomic_long_t __tx_compressed; + }; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; + struct rb_root tree; +}; + +enum netdev_ml_priv_type { + ML_PRIV_NONE = 0, + ML_PRIV_CAN = 1, +}; + +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE = 0, + NETDEV_PCPU_STAT_LSTATS = 1, + NETDEV_PCPU_STAT_TSTATS = 2, + NETDEV_PCPU_STAT_DSTATS = 3, +}; + +struct sfp_bus; + +struct udp_tunnel_nic; + +struct bpf_xdp_link; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; + +typedef struct {} netdevice_tracker; + +struct net_device_ops; + +struct header_ops; + +struct netdev_queue; + +struct xps_dev_maps; + +struct bpf_mprog_entry; + +struct pcpu_lstats; + +struct pcpu_sw_netstats; + +struct pcpu_dstats; + +struct inet6_dev; + +struct netdev_rx_queue; + +struct netdev_name_node; + +struct dev_ifalias; + +struct xdp_metadata_ops; + +struct xsk_tx_metadata_ops; + +struct net_device_core_stats; + +struct ethtool_ops; + +struct ndisc_ops; + +struct in_device; + +struct cpu_rmap; + +struct Qdisc; + +struct xdp_dev_bulk_queue; + +struct rtnl_link_ops; + +struct netdev_stat_ops; + +struct netdev_queue_mgmt_ops; + +struct phy_link_topology; + +struct phy_device; + +struct udp_tunnel_nic_info; + +struct ethtool_netdev_state; + +struct rtnl_hw_stats64; + +struct devlink_port; + +struct dim_irq_moder; + +struct napi_config; + +struct net_device { + __u8 __cacheline_group_begin__net_device_read_tx[0]; + union { + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + }; + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + } priv_flags_fast; + }; + const struct net_device_ops *netdev_ops; + const struct header_ops *header_ops; + struct netdev_queue *_tx; + netdev_features_t gso_partial_features; + unsigned int real_num_tx_queues; + unsigned int gso_max_size; + unsigned int gso_ipv4_max_size; + u16 gso_max_segs; + s16 num_tc; + unsigned int mtu; + short unsigned int needed_headroom; + struct netdev_tc_txq tc_to_txq[16]; + struct xps_dev_maps *xps_maps[2]; + struct nf_hook_entries *nf_hooks_egress; + struct bpf_mprog_entry *tcx_egress; + __u8 __cacheline_group_end__net_device_read_tx[0]; + __u8 __cacheline_group_begin__net_device_read_txrx[0]; + union { + struct pcpu_lstats *lstats; + struct pcpu_sw_netstats *tstats; + struct pcpu_dstats *dstats; + }; + long unsigned int state; + unsigned int flags; + short unsigned int hard_header_len; + netdev_features_t features; + struct inet6_dev *ip6_ptr; + __u8 __cacheline_group_end__net_device_read_txrx[0]; + __u8 __cacheline_group_begin__net_device_read_rx[0]; + struct bpf_prog *xdp_prog; + struct list_head ptype_specific; + int ifindex; + unsigned int real_num_rx_queues; + struct netdev_rx_queue *_rx; + unsigned int gro_max_size; + unsigned int gro_ipv4_max_size; + rx_handler_func_t *rx_handler; + void *rx_handler_data; + possible_net_t nd_net; + struct bpf_mprog_entry *tcx_ingress; + __u8 __cacheline_group_end__net_device_read_rx[0]; + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias *ifalias; + long unsigned int mem_end; + long unsigned int mem_start; + long unsigned int base_addr; + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + xdp_features_t xdp_features; + const struct xdp_metadata_ops *xdp_metadata_ops; + const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; + short unsigned int gflags; + short unsigned int needed_tailroom; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + unsigned int min_mtu; + unsigned int max_mtu; + short unsigned int type; + unsigned char min_header_len; + unsigned char name_assign_type; + int group; + struct net_device_stats stats; + struct net_device_core_stats *core_stats; + atomic_t carrier_up_count; + atomic_t carrier_down_count; + const struct ethtool_ops *ethtool_ops; + const struct ndisc_ops *ndisc_ops; + unsigned int operstate; + unsigned char link_mode; + unsigned char if_port; + unsigned char dma; + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + short unsigned int neigh_priv_len; + short unsigned int dev_id; + short unsigned int dev_port; + int irq; + u32 priv_len; + spinlock_t addr_list_lock; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + struct kset *queues_kset; + unsigned int promiscuity; + unsigned int allmulti; + bool uc_promisc; + struct in_device *ip_ptr; + struct hlist_head fib_nh_head; + const unsigned char *dev_addr; + unsigned int num_rx_queues; + unsigned int xdp_zc_max_segs; + struct netdev_queue *ingress_queue; + struct nf_hook_entries *nf_hooks_ingress; + unsigned char broadcast[32]; + struct cpu_rmap *rx_cpu_rmap; + struct hlist_node index_hlist; + unsigned int num_tx_queues; + struct Qdisc *qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + struct xdp_dev_bulk_queue *xdp_bulkq; + struct timer_list watchdog_timer; + int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; + int *pcpu_refcnt; + struct ref_tracker_dir refcnt_tracker; + struct list_head link_watch_list; + u8 reg_state; + bool dismantle; + enum { + RTNL_LINK_INITIALIZED = 0, + RTNL_LINK_INITIALIZING = 1, + } rtnl_link_state: 16; + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *); + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type: 8; + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + const struct rtnl_link_ops *rtnl_link_ops; + const struct netdev_stat_ops *stat_ops; + const struct netdev_queue_mgmt_ops *queue_mgmt_ops; + unsigned int tso_max_size; + u16 tso_max_segs; + u8 prio_tc_map[16]; + struct phy_link_topology *link_topo; + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + bool proto_down; + bool threaded; + long unsigned int see_all_hwtstamp_requests: 1; + long unsigned int change_proto_down: 1; + long unsigned int netns_local: 1; + long unsigned int fcoe_mtu: 1; + struct list_head net_notifier_list; + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + struct ethtool_netdev_state *ethtool; + struct bpf_xdp_entity xdp_state[3]; + u8 dev_addr_shadow[32]; + netdevice_tracker linkwatch_dev_tracker; + netdevice_tracker watchdog_dev_tracker; + netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; + struct devlink_port *devlink_port; + struct hlist_head page_pools; + struct dim_irq_moder *irq_moder; + u64 max_pacing_offload_horizon; + struct napi_config *napi_config; + long unsigned int gro_flush_timeout; + u32 napi_defer_hard_irqs; + struct mutex lock; + struct hlist_head neighbours[2]; + long: 64; + long: 64; + long: 64; + u8 priv[0]; +}; + +struct bpf_prog_stats { + u64_stats_t cnt; + u64_stats_t nsecs; + u64_stats_t misses; + struct u64_stats_sync syncp; + long: 64; +}; + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; +}; + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short int cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; + unsigned int chaintoolong; +}; + +struct skb_shared_hwtstamps { + union { + ktime_t hwtstamp; + void *netdev_data; + }; +}; + +struct ubuf_info_ops { + void (*complete)(struct sk_buff *, struct ubuf_info *, bool); + int (*link_skb)(struct sk_buff *, struct ubuf_info *); +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[1]; + u8 chunks; + long: 0; + char data[0]; +}; + +struct dql { + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + short unsigned int stall_thrs; + long unsigned int history_head; + long unsigned int history[4]; + long: 64; + unsigned int limit; + unsigned int num_completed; + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + unsigned int lowest_slack; + long unsigned int slack_start_time; + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; + short unsigned int stall_max; + long unsigned int last_reap; + long unsigned int stall_cnt; +}; + +struct prot_inuse { + int all; + int val[64]; +}; + +struct ipstats_mib { + u64 mibs[38]; + struct u64_stats_sync syncp; +}; + +struct icmp_mib { + long unsigned int mibs[30]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + long unsigned int mibs[7]; +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[7]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + +struct tcp_mib { + long unsigned int mibs[16]; +}; + +struct udp_mib { + long unsigned int mibs[10]; +}; + +struct linux_mib { + long unsigned int mibs[132]; +}; + +struct inet_frags; + +struct fqdir { + long int high_thresh; + long int low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long: 64; + long: 64; + struct rhashtable rhashtable; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_long_t mem; + struct work_struct destroy_work; + struct llist_node free_list; + long: 64; + long: 64; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + u8 tstamp_type; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*min_tso_segs)(struct sock *); + void (*cong_control)(struct sock *, u32, int, const struct rate_sample *); + u32 (*undo_cwnd)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct netlink_ext_ack; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(const struct net *); + int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); + struct module *owner; + struct callback_head rcu; +}; + +struct uncached_list; + +struct lwtunnel_state; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + long unsigned int _metrics; + long unsigned int expires; + void *__pad1; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + short unsigned int flags; + short int obsolete; + short unsigned int header_len; + short unsigned int trailer_len; + rcuref_t __rcuref; + int __use; + long unsigned int lastuse; + struct callback_head callback_head; + short int error; + short int __pad; + __u32 tclassid; + netdevice_tracker dev_tracker; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; + struct lwtunnel_state *lwtstate; +}; + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + long unsigned int hh_data[12]; +}; + +struct neigh_table; + +struct neigh_parms; + +struct neigh_ops; + +struct neighbour { + struct hlist_node hash; + struct hlist_node dev_list; + struct neigh_table *tbl; + struct neigh_parms *parms; + long unsigned int confirmed; + long unsigned int updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + long unsigned int used; + atomic_t probes; + u8 nud_state; + u8 type; + u8 dead; + u8 protocol; + u32 flags; + seqlock_t ha_lock; + long: 0; + unsigned char ha[32]; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct list_head managed_list; + struct callback_head rcu; + struct net_device *dev; + netdevice_tracker dev_tracker; + u8 primary_key[0]; +}; + +struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; +}; + +struct ipv6_devconf { + __u8 __cacheline_group_begin__ipv6_devconf_read_txrx[0]; + __s32 disable_ipv6; + __s32 hop_limit; + __s32 mtu6; + __s32 forwarding; + __s32 disable_policy; + __s32 proxy_ndp; + __u8 __cacheline_group_end__ipv6_devconf_read_txrx[0]; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_min_advance; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_min_lft; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + __s32 accept_source_route; + __s32 accept_ra_from_local; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + __s32 accept_untracked_na; + struct ipv6_stable_secret stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 ndisc_tclass; + __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; + __u8 ndisc_evict_nocarrier; + __u8 ra_honor_pio_life; + __u8 ra_honor_pio_pflag; + struct ctl_table_header *sysctl_header; +}; + +enum { + IPPROTO_IP = 0, + IPPROTO_ICMP = 1, + IPPROTO_IGMP = 2, + IPPROTO_IPIP = 4, + IPPROTO_TCP = 6, + IPPROTO_EGP = 8, + IPPROTO_PUP = 12, + IPPROTO_UDP = 17, + IPPROTO_IDP = 22, + IPPROTO_TP = 29, + IPPROTO_DCCP = 33, + IPPROTO_IPV6 = 41, + IPPROTO_RSVP = 46, + IPPROTO_GRE = 47, + IPPROTO_ESP = 50, + IPPROTO_AH = 51, + IPPROTO_MTP = 92, + IPPROTO_BEETPH = 94, + IPPROTO_ENCAP = 98, + IPPROTO_PIM = 103, + IPPROTO_COMP = 108, + IPPROTO_L2TP = 115, + IPPROTO_SCTP = 132, + IPPROTO_UDPLITE = 136, + IPPROTO_MPLS = 137, + IPPROTO_ETHERNET = 143, + IPPROTO_RAW = 255, + IPPROTO_SMC = 256, + IPPROTO_MPTCP = 262, + IPPROTO_MAX = 263, +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED = 1, + SS_CONNECTING = 2, + SS_CONNECTED = 3, + SS_DISCONNECTING = 4, +} socket_state; + +struct socket_wq { + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + long unsigned int flags; + struct callback_head rcu; + long: 64; +}; + +struct proto_ops; + +struct socket { + socket_state state; + short int type; + long unsigned int flags; + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + long: 64; + long: 64; + long: 64; + struct socket_wq wq; +}; + +typedef struct { + size_t written; + size_t count; + union { + char *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); + +typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); + +struct proto_accept_arg; + +struct proto_ops { + int family; + struct module *owner; + int (*release)(struct socket *); + int (*bind)(struct socket *, struct sockaddr *, int); + int (*connect)(struct socket *, struct sockaddr *, int, int); + int (*socketpair)(struct socket *, struct socket *); + int (*accept)(struct socket *, struct socket *, struct proto_accept_arg *); + int (*getname)(struct socket *, struct sockaddr *, int); + __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); + int (*ioctl)(struct socket *, unsigned int, long unsigned int); + int (*gettstamp)(struct socket *, void *, bool, bool); + int (*listen)(struct socket *, int); + int (*shutdown)(struct socket *, int); + int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct socket *, int, int, char *, int *); + void (*show_fdinfo)(struct seq_file *, struct socket *); + int (*sendmsg)(struct socket *, struct msghdr *, size_t); + int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); + int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); + ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct socket *); + int (*set_peek_off)(struct sock *, int); + int (*peek_len)(struct socket *); + int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); + int (*read_skb)(struct sock *, skb_read_actor_t); + int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); + int (*set_rcvlowat)(struct sock *, int); +}; + +struct proto_accept_arg { + int flags; + int err; + int is_empty; + bool kern; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +struct nla_policy; + +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + const struct nla_policy *policy; + const struct nlattr *miss_nest; + u16 miss_type; + u8 cookie[20]; + u8 cookie_len; + char _msg_buf[80]; +}; + +struct netlink_range_validation; + +struct netlink_range_validation_signed; + +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + u16 strict_start_type; + const u32 bitfield32_valid; + const u32 mask; + const char *reject_message; + const struct nla_policy *nested_policy; + const struct netlink_range_validation *range; + const struct netlink_range_validation_signed *range_signed; + struct { + s16 min; + s16 max; + }; + int (*validate)(const struct nlattr *, struct netlink_ext_ack *); + }; +}; + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq; + unsigned int seq; + int flags; + bool strict_check; + union { + u8 ctx[48]; + long int args[6]; + }; +}; + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + __u64 rx_compressed; + __u64 tx_compressed; + __u64 rx_nohandler; + __u64 rx_otherhost_dropped; +}; + +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + +struct ifla_vf_guid { + __u32 vf; + __u64 guid; +}; + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; + +enum netdev_tx { + __NETDEV_TX_MIN = -2147483648, + NETDEV_TX_OK = 0, + NETDEV_TX_BUSY = 16, +}; + +typedef enum netdev_tx netdev_tx_t; + +struct net_device_core_stats { + long unsigned int rx_dropped; + long unsigned int tx_dropped; + long unsigned int rx_nohandler; + long unsigned int rx_otherhost_dropped; +}; + +struct header_ops { + int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); + int (*parse)(const struct sk_buff *, unsigned char *); + int (*cache)(const struct neighbour *, struct hh_cache *, __be16); + void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); + bool (*validate)(const char *, unsigned int); + __be16 (*parse_protocol)(const struct sk_buff *); +}; + +struct gro_list { + struct list_head list; + int count; +}; + +struct napi_config { + u64 gro_flush_timeout; + u64 irq_suspend_timeout; + u32 defer_hard_irqs; + unsigned int napi_id; +}; + +struct napi_struct { + struct list_head poll_list; + long unsigned int state; + int weight; + u32 defer_hard_irqs_count; + long unsigned int gro_bitmask; + int (*poll)(struct napi_struct *, int); + int list_owner; + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + unsigned int napi_id; + struct hrtimer timer; + struct task_struct *thread; + long unsigned int gro_flush_timeout; + long unsigned int irq_suspend_timeout; + u32 defer_hard_irqs; + struct list_head dev_list; + struct hlist_node napi_hash_node; + int irq; + int index; + struct napi_config *config; +}; + +struct netdev_queue { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; + struct kobject kobj; + long unsigned int tx_maxrate; + atomic_long_t trans_timeout; + struct net_device *sb_dev; + long: 64; + long: 64; + struct dql dql; + spinlock_t _xmit_lock; + int xmit_lock_owner; + long unsigned int trans_start; + long unsigned int state; + struct napi_struct *napi; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct gnet_stats_basic_sync { + u64_stats_t bytes; + u64_stats_t packets; + struct u64_stats_sync syncp; +}; + +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +struct Qdisc_ops; + +struct qdisc_size_table; + +struct net_rate_estimator; + +struct Qdisc { + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + unsigned int flags; + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table *stab; + struct hlist_node hash; + u32 handle; + u32 parent; + struct netdev_queue *dev_queue; + struct net_rate_estimator *rate_est; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + int pad; + refcount_t refcnt; + long: 64; + long: 64; + long: 64; + struct sk_buff_head gso_skb; + struct qdisc_skb_head q; + struct gnet_stats_basic_sync bstats; + struct gnet_stats_queue qstats; + int owner; + long unsigned int state; + long unsigned int state2; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t busylock; + spinlock_t seqlock; + struct callback_head rcu; + netdevice_tracker dev_tracker; + struct lock_class_key root_lock_key; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long int privdata[0]; +}; + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[0]; +}; + +struct xps_dev_maps { + struct callback_head rcu; + unsigned int nr_ids; + s16 num_tc; + struct xps_map *attr_map[0]; +}; + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN = 1, + DEV_PATH_BRIDGE = 2, + DEV_PATH_PPPOE = 3, + DEV_PATH_DSA = 4, + DEV_PATH_MTK_WDMA = 5, +}; + +struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; + union { + struct { + u16 id; + __be16 proto; + u8 h_dest[6]; + } encap; + struct { + enum { + DEV_PATH_BR_VLAN_KEEP = 0, + DEV_PATH_BR_VLAN_TAG = 1, + DEV_PATH_BR_VLAN_UNTAG = 2, + DEV_PATH_BR_VLAN_UNTAG_HW = 3, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; + } bridge; + struct { + int port; + u16 proto; + } dsa; + struct { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; + u8 amsdu; + } mtk_wdma; + }; +}; + +struct net_device_path_ctx { + const struct net_device *dev; + u8 daddr[6]; + int num_vlans; + struct { + u16 id; + __be16 proto; + } vlan[2]; +}; + +enum tc_setup_type { + TC_QUERY_CAPS = 0, + TC_SETUP_QDISC_MQPRIO = 1, + TC_SETUP_CLSU32 = 2, + TC_SETUP_CLSFLOWER = 3, + TC_SETUP_CLSMATCHALL = 4, + TC_SETUP_CLSBPF = 5, + TC_SETUP_BLOCK = 6, + TC_SETUP_QDISC_CBS = 7, + TC_SETUP_QDISC_RED = 8, + TC_SETUP_QDISC_PRIO = 9, + TC_SETUP_QDISC_MQ = 10, + TC_SETUP_QDISC_ETF = 11, + TC_SETUP_ROOT_QDISC = 12, + TC_SETUP_QDISC_GRED = 13, + TC_SETUP_QDISC_TAPRIO = 14, + TC_SETUP_FT = 15, + TC_SETUP_QDISC_ETS = 16, + TC_SETUP_QDISC_TBF = 17, + TC_SETUP_QDISC_FIFO = 18, + TC_SETUP_QDISC_HTB = 19, + TC_SETUP_ACT = 20, +}; + +enum bpf_netdev_command { + XDP_SETUP_PROG = 0, + XDP_SETUP_PROG_HW = 1, + BPF_OFFLOAD_MAP_ALLOC = 2, + BPF_OFFLOAD_MAP_FREE = 3, + XDP_SETUP_XSK_POOL = 4, +}; + +struct xsk_buff_pool; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + struct { + struct bpf_offloaded_map *offmap; + }; + struct { + struct xsk_buff_pool *pool; + u16 queue_id; + } xsk; + }; +}; + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[0]; +}; + +struct xdp_frame; + +struct xdp_buff; + +struct ip_tunnel_parm_kern; + +struct kernel_hwtstamp_config; + +struct net_device_ops { + int (*ndo_init)(struct net_device *); + void (*ndo_uninit)(struct net_device *); + int (*ndo_open)(struct net_device *); + int (*ndo_stop)(struct net_device *); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); + netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); + u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); + void (*ndo_change_rx_flags)(struct net_device *, int); + void (*ndo_set_rx_mode)(struct net_device *); + int (*ndo_set_mac_address)(struct net_device *, void *); + int (*ndo_validate_addr)(struct net_device *); + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_eth_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_siocbond)(struct net_device *, struct ifreq *, int); + int (*ndo_siocwandev)(struct net_device *, struct if_settings *); + int (*ndo_siocdevprivate)(struct net_device *, struct ifreq *, void *, int); + int (*ndo_set_config)(struct net_device *, struct ifmap *); + int (*ndo_change_mtu)(struct net_device *, int); + int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); + void (*ndo_tx_timeout)(struct net_device *, unsigned int); + void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); + bool (*ndo_has_offload_stats)(const struct net_device *, int); + int (*ndo_get_offload_stats)(int, const struct net_device *, void *); + struct net_device_stats * (*ndo_get_stats)(struct net_device *); + int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); + int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); + int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); + int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); + int (*ndo_set_vf_rate)(struct net_device *, int, int, int); + int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); + int (*ndo_set_vf_trust)(struct net_device *, int, bool); + int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); + int (*ndo_set_vf_link_state)(struct net_device *, int, int); + int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); + int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); + int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); + int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); + int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); + int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); + int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); + int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_del_slave)(struct net_device *, struct net_device *); + struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); + struct net_device * (*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); + netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); + int (*ndo_set_features)(struct net_device *, netdev_features_t); + int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); + void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); + int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del_bulk)(struct nlmsghdr *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); + int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); + int (*ndo_mdb_add)(struct net_device *, struct nlattr **, u16, struct netlink_ext_ack *); + int (*ndo_mdb_del)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_del_bulk)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_dump)(struct net_device *, struct sk_buff *, struct netlink_callback *); + int (*ndo_mdb_get)(struct net_device *, struct nlattr **, u32, u32, struct netlink_ext_ack *); + int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); + int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); + int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); + int (*ndo_change_carrier)(struct net_device *, bool); + int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); + void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); + void (*ndo_dfwd_del_station)(struct net_device *, void *); + int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); + int (*ndo_get_iflink)(const struct net_device *); + int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); + void (*ndo_set_rx_headroom)(struct net_device *, int); + int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); + int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *, struct xdp_buff *); + int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); + int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm_kern *, int); + struct net_device * (*ndo_get_peer_dev)(struct net_device *); + int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); + ktime_t (*ndo_get_tstamp)(struct net_device *, const struct skb_shared_hwtstamps *, bool); + int (*ndo_hwtstamp_get)(struct net_device *, struct kernel_hwtstamp_config *); + int (*ndo_hwtstamp_set)(struct net_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + void *sysctl_table; + int dead; + refcount_t refcnt; + struct callback_head callback_head; + int reachable_time; + u32 qlen; + int data[14]; + long unsigned int data_state[1]; +}; + +enum hwtstamp_source { + HWTSTAMP_SOURCE_UNSPEC = 0, + HWTSTAMP_SOURCE_NETDEV = 1, + HWTSTAMP_SOURCE_PHYLIB = 2, +}; + +struct kernel_hwtstamp_config { + int flags; + int tx_type; + int rx_filter; + struct ifreq *ifr; + bool copied_to_user; + enum hwtstamp_source source; +}; + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; +}; + +struct pcpu_sw_netstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + struct u64_stats_sync syncp; +}; + +struct pcpu_dstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_drops; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_drops; + struct u64_stats_sync syncp; + long: 64; + long: 64; +}; + +struct ipv6_devstat { + struct proc_dir_entry *proc_dir_entry; + struct ipstats_mib *ipv6; + struct icmpv6_mib_device *icmpv6dev; + struct icmpv6msg_mib_device *icmpv6msgdev; +}; + +struct ifmcaddr6; + +struct ifacaddr6; + +struct inet6_dev { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head addr_list; + struct ifmcaddr6 *mc_list; + struct ifmcaddr6 *mc_tomb; + unsigned char mc_qrv; + unsigned char mc_gq_running; + unsigned char mc_ifc_count; + unsigned char mc_dad_count; + long unsigned int mc_v1_seen; + long unsigned int mc_qi; + long unsigned int mc_qri; + long unsigned int mc_maxdelay; + struct delayed_work mc_gq_work; + struct delayed_work mc_ifc_work; + struct delayed_work mc_dad_work; + struct delayed_work mc_query_work; + struct delayed_work mc_report_work; + struct sk_buff_head mc_query_queue; + struct sk_buff_head mc_report_queue; + spinlock_t mc_query_lock; + spinlock_t mc_report_lock; + struct mutex mc_lock; + struct ifacaddr6 *ac_list; + rwlock_t lock; + refcount_t refcnt; + __u32 if_flags; + int dead; + u32 desync_factor; + struct list_head tempaddr_list; + struct in6_addr token; + struct neigh_parms *nd_parms; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; + struct timer_list rs_timer; + __s32 rs_interval; + __u8 rs_probes; + long unsigned int tstamp; + struct callback_head rcu; + unsigned int ra_mtu; +}; + +enum xdp_rss_hash_type { + XDP_RSS_L3_IPV4 = 1, + XDP_RSS_L3_IPV6 = 2, + XDP_RSS_L3_DYNHDR = 4, + XDP_RSS_L4 = 8, + XDP_RSS_L4_TCP = 16, + XDP_RSS_L4_UDP = 32, + XDP_RSS_L4_SCTP = 64, + XDP_RSS_L4_IPSEC = 128, + XDP_RSS_L4_ICMP = 256, + XDP_RSS_TYPE_NONE = 0, + XDP_RSS_TYPE_L2 = 0, + XDP_RSS_TYPE_L3_IPV4 = 1, + XDP_RSS_TYPE_L3_IPV6 = 2, + XDP_RSS_TYPE_L3_IPV4_OPT = 5, + XDP_RSS_TYPE_L3_IPV6_EX = 6, + XDP_RSS_TYPE_L4_ANY = 8, + XDP_RSS_TYPE_L4_IPV4_TCP = 25, + XDP_RSS_TYPE_L4_IPV4_UDP = 41, + XDP_RSS_TYPE_L4_IPV4_SCTP = 73, + XDP_RSS_TYPE_L4_IPV4_IPSEC = 137, + XDP_RSS_TYPE_L4_IPV4_ICMP = 265, + XDP_RSS_TYPE_L4_IPV6_TCP = 26, + XDP_RSS_TYPE_L4_IPV6_UDP = 42, + XDP_RSS_TYPE_L4_IPV6_SCTP = 74, + XDP_RSS_TYPE_L4_IPV6_IPSEC = 138, + XDP_RSS_TYPE_L4_IPV6_ICMP = 266, + XDP_RSS_TYPE_L4_IPV6_TCP_EX = 30, + XDP_RSS_TYPE_L4_IPV6_UDP_EX = 46, + XDP_RSS_TYPE_L4_IPV6_SCTP_EX = 78, +}; + +struct xdp_md; + +struct xdp_metadata_ops { + int (*xmo_rx_timestamp)(const struct xdp_md *, u64 *); + int (*xmo_rx_hash)(const struct xdp_md *, u32 *, enum xdp_rss_hash_type *); + int (*xmo_rx_vlan_tag)(const struct xdp_md *, __be16 *, u16 *); +}; + +struct xsk_tx_metadata_ops { + void (*tmo_request_timestamp)(void *); + u64 (*tmo_fill_timestamp)(void *); + void (*tmo_request_checksum)(u16, u16, void *); +}; + +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE = 0, + ETHTOOL_ID_ACTIVE = 1, + ETHTOOL_ID_ON = 2, + ETHTOOL_ID_OFF = 3, +}; + +struct ethtool_drvinfo; + +struct ethtool_regs; + +struct ethtool_wolinfo; + +struct ethtool_link_ext_state_info; + +struct ethtool_link_ext_stats; + +struct ethtool_eeprom; + +struct ethtool_coalesce; + +struct kernel_ethtool_coalesce; + +struct ethtool_ringparam; + +struct kernel_ethtool_ringparam; + +struct ethtool_pause_stats; + +struct ethtool_pauseparam; + +struct ethtool_test; + +struct ethtool_stats; + +struct ethtool_rxnfc; + +struct ethtool_flash; + +struct ethtool_rxfh_param; + +struct ethtool_rxfh_context; + +struct ethtool_channels; + +struct ethtool_dump; + +struct kernel_ethtool_ts_info; + +struct ethtool_ts_stats; + +struct ethtool_modinfo; + +struct ethtool_keee; + +struct ethtool_tunable; + +struct ethtool_link_ksettings; + +struct ethtool_fec_stats; + +struct ethtool_fecparam; + +struct ethtool_module_eeprom; + +struct ethtool_eth_phy_stats; + +struct ethtool_eth_mac_stats; + +struct ethtool_eth_ctrl_stats; + +struct ethtool_rmon_stats; + +struct ethtool_rmon_hist_range; + +struct ethtool_module_power_mode_params; + +struct ethtool_mm_state; + +struct ethtool_mm_cfg; + +struct ethtool_mm_stats; + +struct ethtool_ops { + u32 cap_link_lanes_supported: 1; + u32 cap_rss_ctx_supported: 1; + u32 cap_rss_sym_xor_supported: 1; + u32 rxfh_per_ctx_key: 1; + u32 cap_rss_rxnfc_adds: 1; + u32 rxfh_indir_space; + u16 rxfh_key_space; + u16 rxfh_priv_size; + u32 rxfh_max_num_contexts; + u32 supported_coalesce_params; + u32 supported_ring_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); + void (*get_link_ext_stats)(struct net_device *, struct ethtool_link_ext_stats *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); + void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); + int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*create_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*modify_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*remove_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, u32, struct netlink_ext_ack *); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *); + void (*get_ts_stats)(struct net_device *, struct ethtool_ts_stats *); + int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_keee *); + int (*set_eee)(struct net_device *, struct ethtool_keee *); + int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); + int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + int (*set_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); + void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); + void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); + void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, const struct ethtool_rmon_hist_range **); + int (*get_module_power_mode)(struct net_device *, struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*set_module_power_mode)(struct net_device *, const struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*get_mm)(struct net_device *, struct ethtool_mm_state *); + int (*set_mm)(struct net_device *, struct ethtool_mm_cfg *, struct netlink_ext_ack *); + void (*get_mm_stats)(struct net_device *, struct ethtool_mm_stats *); +}; + +struct nd_opt_hdr; + +struct ndisc_options; + +struct prefix_info; + +struct ndisc_ops { + int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); + void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); + int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); + void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); + void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); +}; + +struct rtnl_link_ops { + struct list_head list; + struct srcu_struct srcu; + const char *kind; + size_t priv_size; + struct net_device * (*alloc)(struct nlattr **, const char *, unsigned char, unsigned int, unsigned int); + void (*setup)(struct net_device *); + bool netns_refund; + const u16 peer_type; + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + void (*dellink)(struct net_device *, struct list_head *); + size_t (*get_size)(const struct net_device *); + int (*fill_info)(struct sk_buff *, const struct net_device *); + size_t (*get_xstats_size)(const struct net_device *); + int (*fill_xstats)(struct sk_buff *, const struct net_device *); + unsigned int (*get_num_tx_queues)(); + unsigned int (*get_num_rx_queues)(); + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + size_t (*get_slave_size)(const struct net_device *, const struct net_device *); + int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); + struct net * (*get_link_net)(const struct net_device *); + size_t (*get_linkxstats_size)(const struct net_device *, int); + int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); +}; + +struct netdev_queue_stats_rx; + +struct netdev_queue_stats_tx; + +struct netdev_stat_ops { + void (*get_queue_stats_rx)(struct net_device *, int, struct netdev_queue_stats_rx *); + void (*get_queue_stats_tx)(struct net_device *, int, struct netdev_queue_stats_tx *); + void (*get_base_stats)(struct net_device *, struct netdev_queue_stats_rx *, struct netdev_queue_stats_tx *); +}; + +struct netdev_queue_mgmt_ops { + size_t ndo_queue_mem_size; + int (*ndo_queue_mem_alloc)(struct net_device *, void *, int); + void (*ndo_queue_mem_free)(struct net_device *, void *); + int (*ndo_queue_start)(struct net_device *, void *, int); + int (*ndo_queue_stop)(struct net_device *, void *, int); +}; + +struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; +}; + +struct udp_tunnel_info; + +struct udp_tunnel_nic_shared; + +struct udp_tunnel_nic_info { + int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*sync_table)(struct net_device *, unsigned int); + struct udp_tunnel_nic_shared *shared; + unsigned int flags; + struct udp_tunnel_nic_table_info tables[4]; +}; + +enum { + RTAX_UNSPEC = 0, + RTAX_LOCK = 1, + RTAX_MTU = 2, + RTAX_WINDOW = 3, + RTAX_RTT = 4, + RTAX_RTTVAR = 5, + RTAX_SSTHRESH = 6, + RTAX_CWND = 7, + RTAX_ADVMSS = 8, + RTAX_REORDERING = 9, + RTAX_HOPLIMIT = 10, + RTAX_INITCWND = 11, + RTAX_FEATURES = 12, + RTAX_RTO_MIN = 13, + RTAX_INITRWND = 14, + RTAX_QUICKACK = 15, + RTAX_CC_ALGO = 16, + RTAX_FASTOPEN_NO_COOKIE = 17, + __RTAX_MAX = 18, +}; + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + short unsigned int tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +struct gnet_dump { + spinlock_t *lock; + struct sk_buff *skb; + struct nlattr *tail; + int compat_tc_stats; + int compat_xstats; + int padattr; + void *xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +struct netlink_range_validation { + u64 min; + u64 max; +}; + +struct netlink_range_validation_signed { + s64 min; + s64 max; +}; + +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0, + FLOW_ACTION_HW_STATS_DELAYED_BIT = 1, + FLOW_ACTION_HW_STATS_DISABLED_BIT = 2, + FLOW_ACTION_HW_STATS_NUM_BITS = 3, +}; + +struct flow_block { + struct list_head cb_list; +}; + +typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[0]; +}; + +struct Qdisc_class_ops; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); + int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*attach)(struct Qdisc *); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + void (*change_real_num_tx)(struct Qdisc *, unsigned int); + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *, u32); + void (*egress_block_set)(struct Qdisc *, u32); + u32 (*ingress_block_get)(struct Qdisc *); + u32 (*egress_block_get)(struct Qdisc *); + struct module *owner; +}; + +struct qdisc_walker; + +struct tcf_block; + +struct Qdisc_class_ops { + unsigned int flags; + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); + struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); + void (*qlen_notify)(struct Qdisc *, long unsigned int); + long unsigned int (*find)(struct Qdisc *, u32); + int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + void (*walk)(struct Qdisc *, struct qdisc_walker *); + struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); + void (*unbind_tcf)(struct Qdisc *, long unsigned int); + int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); + int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); +}; + +struct tcf_chain; + +struct tcf_block { + struct xarray ports; + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + bool bypass_wanted; + atomic_t filtercnt; + atomic_t skipswcnt; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[128]; + struct mutex proto_destroy_lock; +}; + +struct tcf_result; + +struct tcf_proto_ops; + +struct tcf_proto { + struct tcf_proto *next; + void *root; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + __be16 protocol; + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + spinlock_t lock; + bool deleting; + bool counted; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +struct tcf_result { + union { + struct { + long unsigned int class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + }; +}; + +struct tcf_walker; + +struct tcf_exts; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + int (*init)(struct tcf_proto *); + void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); + void * (*get)(struct tcf_proto *, u32); + void (*put)(struct tcf_proto *, void *); + int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, u32, struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *); + void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); + int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); + void (*hw_add)(struct tcf_proto *, void *); + void (*hw_del)(struct tcf_proto *, void *); + void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); + void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); + void (*tmplt_destroy)(void *); + void (*tmplt_reoffload)(struct tcf_chain *, bool, flow_setup_cb_t *, void *); + struct tcf_exts * (*get_exts)(const struct tcf_proto *, u32); + int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*tmplt_dump)(struct sk_buff *, struct net *, void *); + struct module *owner; + int flags; +}; + +struct tcf_chain { + struct mutex filter_chain_lock; + struct tcf_proto *filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; +}; + +struct trace_seq { + char buffer[8156]; + struct seq_buf seq; + size_t readpos; + int full; +}; + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 20, +}; + +struct trace_entry { + short unsigned int type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + long unsigned int iter_flags; + void *temp; + unsigned int temp_size; + char *fmt; + unsigned int fmt_size; + atomic_t wait_index; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool closed; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + long unsigned int lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + loff_t pos; + long int idx; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + const int len; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head * (*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +enum { + TRACE_EVENT_FL_CAP_ANY_BIT = 0, + TRACE_EVENT_FL_NO_SET_FILTER_BIT = 1, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 2, + TRACE_EVENT_FL_TRACEPOINT_BIT = 3, + TRACE_EVENT_FL_DYNAMIC_BIT = 4, + TRACE_EVENT_FL_KPROBE_BIT = 5, + TRACE_EVENT_FL_UPROBE_BIT = 6, + TRACE_EVENT_FL_EPROBE_BIT = 7, + TRACE_EVENT_FL_FPROBE_BIT = 8, + TRACE_EVENT_FL_CUSTOM_BIT = 9, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, + EVENT_FILE_FL_FREED_BIT = 11, +}; + +enum rpc_display_format_t { + RPC_DISPLAY_ADDR = 0, + RPC_DISPLAY_PORT = 1, + RPC_DISPLAY_PROTO = 2, + RPC_DISPLAY_HEX_ADDR = 3, + RPC_DISPLAY_HEX_PORT = 4, + RPC_DISPLAY_NETID = 5, + RPC_DISPLAY_MAX = 6, +}; + +enum { + NEIGH_VAR_MCAST_PROBES = 0, + NEIGH_VAR_UCAST_PROBES = 1, + NEIGH_VAR_APP_PROBES = 2, + NEIGH_VAR_MCAST_REPROBES = 3, + NEIGH_VAR_RETRANS_TIME = 4, + NEIGH_VAR_BASE_REACHABLE_TIME = 5, + NEIGH_VAR_DELAY_PROBE_TIME = 6, + NEIGH_VAR_INTERVAL_PROBE_TIME_MS = 7, + NEIGH_VAR_GC_STALETIME = 8, + NEIGH_VAR_QUEUE_LEN_BYTES = 9, + NEIGH_VAR_PROXY_QLEN = 10, + NEIGH_VAR_ANYCAST_DELAY = 11, + NEIGH_VAR_PROXY_DELAY = 12, + NEIGH_VAR_LOCKTIME = 13, + NEIGH_VAR_QUEUE_LEN = 14, + NEIGH_VAR_RETRANS_TIME_MS = 15, + NEIGH_VAR_BASE_REACHABLE_TIME_MS = 16, + NEIGH_VAR_GC_INTERVAL = 17, + NEIGH_VAR_GC_THRESH1 = 18, + NEIGH_VAR_GC_THRESH2 = 19, + NEIGH_VAR_GC_THRESH3 = 20, + NEIGH_VAR_MAX = 21, +}; + +struct pneigh_entry; + +struct neigh_statistics; + +struct neigh_hash_table; + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *, const struct net_device *, __u32 *); + bool (*key_eq)(const struct neighbour *, const void *); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *); + int (*is_multicast)(const void *); + bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + long unsigned int last_flush; + struct delayed_work gc_work; + struct delayed_work managed_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + struct list_head managed_list; + rwlock_t lock; + long unsigned int last_rand; + struct neigh_statistics *stats; + struct neigh_hash_table *nht; + struct pneigh_entry **phash_buckets; +}; + +struct neigh_statistics { + long unsigned int allocs; + long unsigned int destroys; + long unsigned int hash_grows; + long unsigned int res_failed; + long unsigned int lookups; + long unsigned int hits; + long unsigned int rcv_probes_mcast; + long unsigned int rcv_probes_ucast; + long unsigned int periodic_gc_runs; + long unsigned int forced_gc_runs; + long unsigned int unres_discards; + long unsigned int table_fulls; +}; + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u8 protocol; + u32 key[0]; +}; + +struct neigh_hash_table { + struct hlist_head *hash_heads; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT = 2, + TCP_SYN_RECV = 3, + TCP_FIN_WAIT1 = 4, + TCP_FIN_WAIT2 = 5, + TCP_TIME_WAIT = 6, + TCP_CLOSE = 7, + TCP_CLOSE_WAIT = 8, + TCP_LAST_ACK = 9, + TCP_LISTEN = 10, + TCP_CLOSING = 11, + TCP_NEW_SYN_RECV = 12, + TCP_BOUND_INACTIVE = 13, + TCP_MAX_STATES = 14, +}; + +struct smc_hashinfo; + +struct sk_psock; + +struct request_sock_ops; + +struct timewait_sock_ops; + +struct raw_hashinfo; + +struct proto { + void (*close)(struct sock *, long int); + int (*pre_connect)(struct sock *, struct sockaddr *, int); + int (*connect)(struct sock *, struct sockaddr *, int); + int (*disconnect)(struct sock *, int); + struct sock * (*accept)(struct sock *, struct proto_accept_arg *); + int (*ioctl)(struct sock *, int, int *); + int (*init)(struct sock *); + void (*destroy)(struct sock *); + void (*shutdown)(struct sock *, int); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*keepalive)(struct sock *, int); + int (*sendmsg)(struct sock *, struct msghdr *, size_t); + int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int *); + void (*splice_eof)(struct socket *); + int (*bind)(struct sock *, struct sockaddr *, int); + int (*bind_add)(struct sock *, struct sockaddr *, int); + int (*backlog_rcv)(struct sock *, struct sk_buff *); + bool (*bpf_bypass_getsockopt)(int, int); + void (*release_cb)(struct sock *); + int (*hash)(struct sock *); + void (*unhash)(struct sock *); + void (*rehash)(struct sock *); + int (*get_port)(struct sock *, short unsigned int); + void (*put_port)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + unsigned int inuse_idx; + bool (*stream_memory_free)(const struct sock *, int); + bool (*sock_is_readable)(struct sock *); + void (*enter_memory_pressure)(struct sock *); + void (*leave_memory_pressure)(struct sock *); + atomic_long_t *memory_allocated; + int *per_cpu_fw_alloc; + struct percpu_counter *sockets_allocated; + long unsigned int *memory_pressure; + long int *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + int max_header; + bool no_autobind; + struct kmem_cache *slab; + unsigned int obj_size; + unsigned int ipv6_pinfo_offset; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + unsigned int *orphan_count; + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + struct module *owner; + char name[32]; + struct list_head node; + int (*diag_destroy)(struct sock *, int); +}; + +enum sk_rst_reason { + SK_RST_REASON_NOT_SPECIFIED = 0, + SK_RST_REASON_NO_SOCKET = 1, + SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE = 2, + SK_RST_REASON_TCP_RFC7323_PAWS = 3, + SK_RST_REASON_TCP_TOO_OLD_ACK = 4, + SK_RST_REASON_TCP_ACK_UNSENT_DATA = 5, + SK_RST_REASON_TCP_FLAGS = 6, + SK_RST_REASON_TCP_OLD_ACK = 7, + SK_RST_REASON_TCP_ABORT_ON_DATA = 8, + SK_RST_REASON_TCP_TIMEWAIT_SOCKET = 9, + SK_RST_REASON_INVALID_SYN = 10, + SK_RST_REASON_TCP_ABORT_ON_CLOSE = 11, + SK_RST_REASON_TCP_ABORT_ON_LINGER = 12, + SK_RST_REASON_TCP_ABORT_ON_MEMORY = 13, + SK_RST_REASON_TCP_STATE = 14, + SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT = 15, + SK_RST_REASON_TCP_DISCONNECT_WITH_DATA = 16, + SK_RST_REASON_MPTCP_RST_EUNSPEC = 17, + SK_RST_REASON_MPTCP_RST_EMPTCP = 18, + SK_RST_REASON_MPTCP_RST_ERESOURCE = 19, + SK_RST_REASON_MPTCP_RST_EPROHIBIT = 20, + SK_RST_REASON_MPTCP_RST_EWQ2BIG = 21, + SK_RST_REASON_MPTCP_RST_EBADPERF = 22, + SK_RST_REASON_MPTCP_RST_EMIDDLEBOX = 23, + SK_RST_REASON_ERROR = 24, + SK_RST_REASON_MAX = 25, +}; + +struct request_sock; + +struct request_sock_ops { + int family; + unsigned int obj_size; + struct kmem_cache *slab; + char *slab_name; + int (*rtx_syn_ack)(const struct sock *, struct request_sock *); + void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*send_reset)(const struct sock *, struct sk_buff *, enum sk_rst_reason); + void (*destructor)(struct request_sock *); + void (*syn_ack_timeout)(const struct request_sock *); +}; + +struct timewait_sock_ops { + struct kmem_cache *twsk_slab; + char *twsk_slab_name; + unsigned int twsk_obj_size; + void (*twsk_destructor)(struct sock *); +}; + +struct saved_syn; + +struct request_sock { + struct sock_common __req_common; + struct request_sock *dl_next; + u16 mss; + u8 num_retrans; + u8 syncookie: 1; + u8 num_timeout: 7; + u32 ts_recent; + struct timer_list rsk_timer; + const struct request_sock_ops *rsk_ops; + struct sock *sk; + struct saved_syn *saved_syn; + u32 secid; + u32 peer_secid; + u32 timeout; +}; + +struct saved_syn { + u32 mac_hdrlen; + u32 network_hdrlen; + u32 tcp_hdrlen; + u8 data[0]; +}; + +enum tsq_enum { + TSQ_THROTTLED = 0, + TSQ_QUEUED = 1, + TCP_TSQ_DEFERRED = 2, + TCP_WRITE_TIMER_DEFERRED = 3, + TCP_DELACK_TIMER_DEFERRED = 4, + TCP_MTU_REDUCED_DEFERRED = 5, + TCP_ACK_DEFERRED = 6, +}; + +struct ip6_sf_list { + struct ip6_sf_list *sf_next; + struct in6_addr sf_addr; + long unsigned int sf_count[2]; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; + struct callback_head rcu; +}; + +struct ifmcaddr6 { + struct in6_addr mca_addr; + struct inet6_dev *idev; + struct ifmcaddr6 *next; + struct ip6_sf_list *mca_sources; + struct ip6_sf_list *mca_tomb; + unsigned int mca_sfmode; + unsigned char mca_crcount; + long unsigned int mca_sfcount[2]; + struct delayed_work mca_work; + unsigned int mca_flags; + int mca_users; + refcount_t mca_refcnt; + long unsigned int mca_cstamp; + long unsigned int mca_tstamp; + struct callback_head rcu; +}; + +struct ifacaddr6 { + struct in6_addr aca_addr; + struct fib6_info *aca_rt; + struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; + int aca_users; + refcount_t aca_refcnt; + long unsigned int aca_cstamp; + long unsigned int aca_tstamp; + struct callback_head rcu; +}; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_CLONE = 71, + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, +}; + +struct linux_binprm { + struct vm_area_struct *vma; + long unsigned int vma_pages; + long unsigned int argmin; + struct mm_struct *mm; + long unsigned int p; + unsigned int have_execfd: 1; + unsigned int execfd_creds: 1; + unsigned int secureexec: 1; + unsigned int point_of_no_return: 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + long unsigned int loader; + long unsigned int exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = -1, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; + +struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +}; + +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; + cpumask_var_t effective_affinity; + unsigned int ipi_offset; +}; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + irq_hw_number_t hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct irqstat; + +struct irqaction; + +struct irq_affinity_notify; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + struct irqstat *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + long unsigned int last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + long unsigned int threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; + unsigned int force_resume_depth; + struct proc_dir_entry *dir; + struct callback_head rcu; + struct kobject kobj; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + struct hlist_node resend_node; + long: 64; + long: 64; + long: 64; +}; + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, + IRQ_NO_DEBUG = 2097152, +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +struct msi_msg; + +struct irq_chip { + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + long unsigned int flags; +}; + +struct irqstat { + unsigned int cnt; +}; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + long unsigned int thread_flags; + long unsigned int thread_mask; + const char *name; + struct proc_dir_entry *dir; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + CPUTIME_FORCEIDLE = 10, + NR_STATS = 11, +}; + +enum { + IRQS_AUTODETECT = 1, + IRQS_SPURIOUS_DISABLED = 2, + IRQS_POLL_INPROGRESS = 8, + IRQS_ONESHOT = 32, + IRQS_REPLAY = 64, + IRQS_WAITING = 128, + IRQS_PENDING = 512, + IRQS_SUSPENDED = 2048, + IRQS_TIMINGS = 4096, + IRQS_NMI = 8192, + IRQS_SYSFS = 16384, +}; + +enum { + _IRQ_DEFAULT_INIT_FLAGS = 0, + _IRQ_PER_CPU = 512, + _IRQ_LEVEL = 256, + _IRQ_NOPROBE = 1024, + _IRQ_NOREQUEST = 2048, + _IRQ_NOTHREAD = 65536, + _IRQ_NOAUTOEN = 4096, + _IRQ_MOVE_PCNTXT = 16384, + _IRQ_NO_BALANCING = 8192, + _IRQ_NESTED_THREAD = 32768, + _IRQ_PER_CPU_DEVID = 131072, + _IRQ_IS_POLLED = 262144, + _IRQ_DISABLE_UNLAZY = 524288, + _IRQ_HIDDEN = 1048576, + _IRQ_NO_DEBUG = 2097152, + _IRQF_MODIFY_MASK = 2096911, +}; + +typedef __kernel_timer_t timer_t; + +typedef struct { + spinlock_t *lock; +} class_spinlock_t; + +typedef struct { + spinlock_t *lock; + long unsigned int flags; +} class_spinlock_irqsave_t; + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF = 0, + REFCOUNT_ADD_OVF = 1, + REFCOUNT_ADD_UAF = 2, + REFCOUNT_SUB_UAF = 3, + REFCOUNT_DEC_LEAK = 4, +}; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; + +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long int tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + long long int offset; + long long int freq; + long long int maxerror; + long long int esterror; + int status; + long long int constant; + long long int precision; + long long int tolerance; + struct __kernel_timex_timeval time; + long long int tick; + long long int ppsfreq; + long long int jitter; + int shift; + long long int stabil; + long long int jitcnt; + long long int calcnt; + long long int errcnt; + long long int stbcnt; + int tai; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[12]; + int _tid; + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +}; + +typedef struct sigevent sigevent_t; + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct ucounts *ucounts; +}; + +enum tk_offsets { + TK_OFFS_REAL = 0, + TK_OFFS_BOOT = 1, + TK_OFFS_TAI = 2, + TK_OFFS_MAX = 3, +}; + +enum _slab_flag_bits { + _SLAB_CONSISTENCY_CHECKS = 0, + _SLAB_RED_ZONE = 1, + _SLAB_POISON = 2, + _SLAB_KMALLOC = 3, + _SLAB_HWCACHE_ALIGN = 4, + _SLAB_CACHE_DMA = 5, + _SLAB_CACHE_DMA32 = 6, + _SLAB_STORE_USER = 7, + _SLAB_PANIC = 8, + _SLAB_TYPESAFE_BY_RCU = 9, + _SLAB_TRACE = 10, + _SLAB_NOLEAKTRACE = 11, + _SLAB_NO_MERGE = 12, + _SLAB_ACCOUNT = 13, + _SLAB_KASAN = 14, + _SLAB_NO_USER_FLAGS = 15, + _SLAB_RECLAIM_ACCOUNT = 16, + _SLAB_OBJECT_POISON = 17, + _SLAB_CMPXCHG_DOUBLE = 18, + _SLAB_NO_OBJ_EXT = 19, + _SLAB_FLAGS_LAST_BIT = 20, +}; + +struct kmem_cache_args { + unsigned int align; + unsigned int useroffset; + unsigned int usersize; + unsigned int freeptr_offset; + bool use_freeptr_offset; + void (*ctor)(void *); +}; + +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0, + HRTIMER_MODE_REL = 1, + HRTIMER_MODE_PINNED = 2, + HRTIMER_MODE_SOFT = 4, + HRTIMER_MODE_HARD = 8, + HRTIMER_MODE_ABS_PINNED = 2, + HRTIMER_MODE_REL_PINNED = 3, + HRTIMER_MODE_ABS_SOFT = 4, + HRTIMER_MODE_REL_SOFT = 5, + HRTIMER_MODE_ABS_PINNED_SOFT = 6, + HRTIMER_MODE_REL_PINNED_SOFT = 7, + HRTIMER_MODE_ABS_HARD = 8, + HRTIMER_MODE_REL_HARD = 9, + HRTIMER_MODE_ABS_PINNED_HARD = 10, + HRTIMER_MODE_REL_PINNED_HARD = 11, +}; + +enum alarmtimer_type { + ALARM_REALTIME = 0, + ALARM_BOOTTIME = 1, + ALARM_NUMTYPE = 2, + ALARM_REALTIME_FREEZER = 3, + ALARM_BOOTTIME_FREEZER = 4, +}; + +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + void (*function)(struct alarm *, ktime_t); + enum alarmtimer_type type; + int state; + void *data; +}; + +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + bool firing; + bool nanosleep; + struct task_struct *handling; +}; + +struct k_clock; + +struct k_itimer { + struct hlist_node list; + struct hlist_node ignored_list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_status; + bool it_sig_periodic; + s64 it_overrun; + s64 it_overrun_last; + unsigned int it_signal_seq; + unsigned int it_sigqueue_seq; + int it_sigev_notify; + enum pid_type it_pid_type; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue sigq; + rcuref_t rcuref; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +struct k_clock { + int (*clock_getres)(const clockid_t, struct timespec64 *); + int (*clock_set)(const clockid_t, const struct timespec64 *); + int (*clock_get_timespec)(const clockid_t, struct timespec64 *); + ktime_t (*clock_get_ktime)(const clockid_t); + int (*clock_adj)(const clockid_t, struct __kernel_timex *); + int (*timer_create)(struct k_itimer *); + int (*nsleep)(const clockid_t, int, const struct timespec64 *); + int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); + int (*timer_del)(struct k_itimer *); + void (*timer_get)(struct k_itimer *, struct itimerspec64 *); + void (*timer_rearm)(struct k_itimer *); + s64 (*timer_forward)(struct k_itimer *, ktime_t); + ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); + int (*timer_try_to_cancel)(struct k_itimer *); + void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); + void (*timer_wait_running)(struct k_itimer *); +}; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +enum { + EI_ETYPE_NULL = 0, + EI_ETYPE_ERRNO = 1, + EI_ETYPE_ERRNO_NULL = 2, + EI_ETYPE_TRUE = 3, +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +enum rlimit_type { + UCOUNT_RLIMIT_NPROC = 0, + UCOUNT_RLIMIT_MSGQUEUE = 1, + UCOUNT_RLIMIT_SIGPENDING = 2, + UCOUNT_RLIMIT_MEMLOCK = 3, + UCOUNT_RLIMIT_COUNTS = 4, +}; + +enum { + TRACE_EVENT_FL_CAP_ANY = 1, + TRACE_EVENT_FL_NO_SET_FILTER = 2, + TRACE_EVENT_FL_IGNORE_ENABLE = 4, + TRACE_EVENT_FL_TRACEPOINT = 8, + TRACE_EVENT_FL_DYNAMIC = 16, + TRACE_EVENT_FL_KPROBE = 32, + TRACE_EVENT_FL_UPROBE = 64, + TRACE_EVENT_FL_EPROBE = 128, + TRACE_EVENT_FL_FPROBE = 256, + TRACE_EVENT_FL_CUSTOM = 512, +}; + +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + +enum posix_timer_state { + POSIX_TIMER_DISARMED = 0, + POSIX_TIMER_ARMED = 1, + POSIX_TIMER_REQUEUE_PENDING = 2, +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +enum utf8_normalization { + UTF8_NFDI = 0, + UTF8_NFDICF = 1, + UTF8_NMAX = 2, +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +enum { + BTF_KIND_UNKN = 0, + BTF_KIND_INT = 1, + BTF_KIND_PTR = 2, + BTF_KIND_ARRAY = 3, + BTF_KIND_STRUCT = 4, + BTF_KIND_UNION = 5, + BTF_KIND_ENUM = 6, + BTF_KIND_FWD = 7, + BTF_KIND_TYPEDEF = 8, + BTF_KIND_VOLATILE = 9, + BTF_KIND_CONST = 10, + BTF_KIND_RESTRICT = 11, + BTF_KIND_FUNC = 12, + BTF_KIND_FUNC_PROTO = 13, + BTF_KIND_VAR = 14, + BTF_KIND_DATASEC = 15, + BTF_KIND_FLOAT = 16, + BTF_KIND_DECL_TAG = 17, + BTF_KIND_TYPE_TAG = 18, + BTF_KIND_ENUM64 = 19, + NR_BTF_KINDS = 20, + BTF_KIND_MAX = 19, +}; + +struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; +}; + +struct btf_param { + __u32 name_off; + __u32 type; +}; + +struct btf_anon_stack { + u32 tid; + u32 offset; +}; + +struct ring_buffer_event { + u32 type_len: 5; + u32 time_delta: 27; + u32 array[0]; +}; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, + __MAX_BPF_LINK_TYPE = 15, +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; + __u32 target_btf_id; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + union { + struct { + __u64 cgroup_id; + __u32 order; + } cgroup; + struct { + __u32 tid; + __u32 pid; + } task; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + struct { + __u32 map_id; + } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + __u64 addrs; + __u32 count; + __u32 flags; + __u64 missed; + __u64 cookies; + } kprobe_multi; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 path_size; + __u32 count; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + __u32 type; + union { + struct { + __u64 file_name; + __u32 name_len; + __u32 offset; + __u64 cookie; + } uprobe; + struct { + __u64 func_name; + __u32 name_len; + __u32 offset; + __u64 addr; + __u64 missed; + __u64 cookie; + } kprobe; + struct { + __u64 tp_name; + __u32 name_len; + __u64 cookie; + } tracepoint; + struct { + __u64 config; + __u32 type; + __u64 cookie; + } event; + }; + } perf_event; + struct { + __u32 ifindex; + __u32 attach_type; + } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; + }; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + bool sleepable; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + void (*dealloc_deferred)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); + int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; + u64 cookie; +}; + +struct trace_buffer; + +struct trace_event_file; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned int trace_ctx; + struct pt_regs *regs; +}; + +struct eventfs_inode; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct eventfs_inode *ei; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + long unsigned int flags; + refcount_t ref; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, + EVENT_FILE_FL_FREED = 2048, +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_RDYN_STRING = 3, + FILTER_PTR_STRING = 4, + FILTER_TRACE_FN = 5, + FILTER_CPUMASK = 6, + FILTER_COMM = 7, + FILTER_CPU = 8, + FILTER_STACKTRACE = 9, +}; + +struct trace_event_raw_benchmark_event { + struct trace_entry ent; + char str[128]; + u64 delta; + char __data[0]; +}; + +struct trace_event_data_offsets_benchmark_event {}; + +typedef void (*btf_trace_benchmark_event)(void *, const char *, u64); + +enum { + BPF_F_NO_PREALLOC = 1, + BPF_F_NO_COMMON_LRU = 2, + BPF_F_NUMA_NODE = 4, + BPF_F_RDONLY = 8, + BPF_F_WRONLY = 16, + BPF_F_STACK_BUILD_ID = 32, + BPF_F_ZERO_SEED = 64, + BPF_F_RDONLY_PROG = 128, + BPF_F_WRONLY_PROG = 256, + BPF_F_CLONE = 512, + BPF_F_MMAPABLE = 1024, + BPF_F_PRESERVE_ELEMS = 2048, + BPF_F_INNER_MAP = 4096, + BPF_F_LINK = 8192, + BPF_F_PATH_FD = 16384, + BPF_F_VTYPE_BTF_OBJ_FD = 32768, + BPF_F_TOKEN_FD = 65536, + BPF_F_SEGV_ON_FAULT = 131072, + BPF_F_NO_USER_CONV = 262144, +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_COMPLETED = 16384, + VM_FAULT_HINDEX_MASK = 983040, +}; + +typedef unsigned int zap_flags_t; + +enum pageflags { + PG_locked = 0, + PG_writeback = 1, + PG_referenced = 2, + PG_uptodate = 3, + PG_dirty = 4, + PG_lru = 5, + PG_head = 6, + PG_waiters = 7, + PG_active = 8, + PG_workingset = 9, + PG_owner_priv_1 = 10, + PG_owner_2 = 11, + PG_arch_1 = 12, + PG_reserved = 13, + PG_private = 14, + PG_private_2 = 15, + PG_reclaim = 16, + PG_swapbacked = 17, + PG_unevictable = 18, + PG_mlocked = 19, + PG_arch_2 = 20, + PG_arch_3 = 21, + __NR_PAGEFLAGS = 22, + PG_readahead = 16, + PG_swapcache = 10, + PG_checked = 10, + PG_anon_exclusive = 11, + PG_mappedtodisk = 11, + PG_fscache = 15, + PG_pinned = 10, + PG_savepinned = 4, + PG_foreign = 10, + PG_xen_remapped = 10, + PG_isolated = 16, + PG_reported = 3, + PG_has_hwpoisoned = 8, + PG_large_rmappable = 9, + PG_partially_mapped = 16, +}; + +enum pagetype { + PGTY_buddy = 240, + PGTY_offline = 241, + PGTY_table = 242, + PGTY_guard = 243, + PGTY_hugetlb = 244, + PGTY_slab = 245, + PGTY_zsmalloc = 246, + PGTY_unaccepted = 247, + PGTY_mapcount_underflow = 255, +}; + +struct zap_details { + struct folio *single_folio; + bool even_cows; + zap_flags_t zap_flags; +}; + +typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *); + +struct btf_id_set8 { + u32 cnt; + u32 flags; + struct { + u32 id; + u32 flags; + } pairs[0]; +}; + +typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *, u32); + +struct btf_kfunc_id_set { + struct module *owner; + struct btf_id_set8 *set; + btf_kfunc_filter_t filter; +}; + +struct range_tree { + struct rb_root_cached it_root; + struct rb_root_cached range_size_root; +}; + +struct bpf_arena { + struct bpf_map map; + u64 user_vm_start; + u64 user_vm_end; + struct vm_struct *kern_vm; + struct range_tree rt; + struct list_head vma_list; + struct mutex lock; +}; + +struct vma_list { + struct vm_area_struct *vma; + struct list_head head; + atomic_t mmap_count; +}; + +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = -1, + RSEQ_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = 1, +}; + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, +}; + +struct rseq_cs { + __u32 version; + __u32 flags; + __u64 start_ip; + __u64 post_commit_offset; + __u64 abort_ip; +}; + +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = 0, + RSEQ_EVENT_SIGNAL_BIT = 1, + RSEQ_EVENT_MIGRATE_BIT = 2, +}; + +struct trace_event_raw_rseq_update { + struct trace_entry ent; + s32 cpu_id; + s32 node_id; + s32 mm_cid; + char __data[0]; +}; + +struct trace_event_raw_rseq_ip_fixup { + struct trace_entry ent; + long unsigned int regs_ip; + long unsigned int start_ip; + long unsigned int post_commit_offset; + long unsigned int abort_ip; + char __data[0]; +}; + +struct trace_event_data_offsets_rseq_update {}; + +struct trace_event_data_offsets_rseq_ip_fixup {}; + +typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); + +typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef phys_addr_t resource_size_t; + +typedef struct { + u64 val; +} pfn_t; + +enum { + MM_FILEPAGES = 0, + MM_ANONPAGES = 1, + MM_SWAPENTS = 2, + MM_SHMEMPAGES = 3, + NR_MM_COUNTERS = 4, +}; + +struct compact_control; + +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +struct task_delay_info { + raw_spinlock_t lock; + u64 blkio_start; + u64 blkio_delay; + u64 swapin_start; + u64 swapin_delay; + u32 blkio_count; + u32 swapin_count; + u64 freepages_start; + u64 freepages_delay; + u64 thrashing_start; + u64 thrashing_delay; + u64 compact_start; + u64 compact_delay; + u64 wpcopy_start; + u64 wpcopy_delay; + u64 irq_delay; + u32 freepages_count; + u32 thrashing_count; + u32 compact_count; + u32 wpcopy_count; + u32 irq_count; +}; + +struct maple_alloc { + long unsigned int total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[30]; +}; + +struct maple_enode; + +enum store_type { + wr_invalid = 0, + wr_new_root = 1, + wr_store_root = 2, + wr_exact_fit = 3, + wr_spanning_store = 4, + wr_split_store = 5, + wr_rebalance = 6, + wr_append = 7, + wr_node_store = 8, + wr_slot_store = 9, +}; + +enum maple_status { + ma_active = 0, + ma_start = 1, + ma_root = 2, + ma_none = 3, + ma_pause = 4, + ma_overflow = 5, + ma_underflow = 6, + ma_error = 7, +}; + +struct ma_state { + struct maple_tree *tree; + long unsigned int index; + long unsigned int last; + struct maple_enode *node; + long unsigned int min; + long unsigned int max; + struct maple_alloc *alloc; + enum maple_status status; + unsigned char depth; + unsigned char offset; + unsigned char mas_flags; + unsigned char end; + enum store_type store_type; +}; + +struct ptdesc { + long unsigned int __page_flags; + union { + struct callback_head pt_rcu_head; + struct list_head pt_list; + struct { + long unsigned int _pt_pad_1; + pgtable_t pmd_huge_pte; + }; + }; + long unsigned int __page_mapping; + union { + long unsigned int pt_index; + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + union { + long unsigned int _pt_pad_2; + spinlock_t ptl; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int pt_memcg_data; +}; + +struct anon_vma { + struct anon_vma *root; + struct rw_semaphore rwsem; + atomic_t refcount; + long unsigned int num_children; + long unsigned int num_active_vmas; + struct anon_vma *parent; + struct rb_root_cached rb_root; +}; + +enum { + FOLL_WRITE = 1, + FOLL_GET = 2, + FOLL_DUMP = 4, + FOLL_FORCE = 8, + FOLL_NOWAIT = 16, + FOLL_NOFAULT = 32, + FOLL_HWPOISON = 64, + FOLL_ANON = 128, + FOLL_LONGTERM = 256, + FOLL_SPLIT_PMD = 512, + FOLL_PCI_P2PDMA = 1024, + FOLL_INTERRUPTIBLE = 2048, + FOLL_HONOR_NUMA_FAULT = 4096, +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_THROTTLED_WRITTEN = 33, + NR_KERNEL_MISC_RECLAIMABLE = 34, + NR_FOLL_PIN_ACQUIRED = 35, + NR_FOLL_PIN_RELEASED = 36, + NR_KERNEL_STACK_KB = 37, + NR_PAGETABLE = 38, + NR_SECONDARY_PAGETABLE = 39, + NR_IOMMU_PAGES = 40, + NR_SWAPCACHE = 41, + PGDEMOTE_KSWAPD = 42, + PGDEMOTE_DIRECT = 43, + PGDEMOTE_KHUGEPAGED = 44, + NR_VM_NODE_STAT_ITEMS = 45, +}; + +struct mem_section_usage { + struct callback_head rcu; + long unsigned int subsection_map[1]; + long unsigned int pageblock_flags[0]; +}; + +struct mem_section { + long unsigned int section_mem_map; + struct mem_section_usage *usage; +}; + +enum { + SECTION_MARKED_PRESENT_BIT = 0, + SECTION_HAS_MEM_MAP_BIT = 1, + SECTION_IS_ONLINE_BIT = 2, + SECTION_IS_EARLY_BIT = 3, + SECTION_MAP_LAST_BIT = 4, +}; + +typedef unsigned int pgtbl_mod_mask; + +enum { + __PERCPU_REF_ATOMIC = 1, + __PERCPU_REF_DEAD = 2, + __PERCPU_REF_ATOMIC_DEAD = 3, + __PERCPU_REF_FLAG_BITS = 2, +}; + +enum migrate_reason { + MR_COMPACTION = 0, + MR_MEMORY_FAILURE = 1, + MR_MEMORY_HOTPLUG = 2, + MR_SYSCALL = 3, + MR_MEMPOLICY_MBIND = 4, + MR_NUMA_MISPLACED = 5, + MR_CONTIG_RANGE = 6, + MR_LONGTERM_PIN = 7, + MR_DEMOTION = 8, + MR_DAMON = 9, + MR_TYPES = 10, +}; + +struct fc_log; + +struct p_log { + const char *prefix; + struct fc_log *log; +}; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT = 0, + FS_CONTEXT_FOR_SUBMOUNT = 1, + FS_CONTEXT_FOR_RECONFIGURE = 2, +}; + +enum fs_context_phase { + FS_CONTEXT_CREATE_PARAMS = 0, + FS_CONTEXT_CREATING = 1, + FS_CONTEXT_AWAITING_MOUNT = 2, + FS_CONTEXT_AWAITING_RECONF = 3, + FS_CONTEXT_RECONF_PARAMS = 4, + FS_CONTEXT_RECONFIGURING = 5, + FS_CONTEXT_FAILED = 6, +}; + +struct fs_context_operations; + +struct fs_context { + const struct fs_context_operations *ops; + struct mutex uapi_mutex; + struct file_system_type *fs_type; + void *fs_private; + void *sget_key; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + struct p_log log; + const char *source; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + unsigned int s_iflags; + enum fs_context_purpose purpose: 8; + enum fs_context_phase phase: 8; + bool need_free: 1; + bool global: 1; + bool oldapi: 1; + bool exclusive: 1; +}; + +struct audit_names; + +struct filename { + const char *name; + const char *uptr; + atomic_t refcnt; + struct audit_names *aname; + const char iname[0]; +}; + +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC = 0, + MTHP_STAT_ANON_FAULT_FALLBACK = 1, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE = 2, + MTHP_STAT_ZSWPOUT = 3, + MTHP_STAT_SWPIN = 4, + MTHP_STAT_SWPOUT = 5, + MTHP_STAT_SWPOUT_FALLBACK = 6, + MTHP_STAT_SHMEM_ALLOC = 7, + MTHP_STAT_SHMEM_FALLBACK = 8, + MTHP_STAT_SHMEM_FALLBACK_CHARGE = 9, + MTHP_STAT_SPLIT = 10, + MTHP_STAT_SPLIT_FAILED = 11, + MTHP_STAT_SPLIT_DEFERRED = 12, + MTHP_STAT_NR_ANON = 13, + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED = 14, + __MTHP_STAT_COUNT = 15, +}; + +struct encoded_page; + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_DMA = 4, + PGALLOC_DMA32 = 5, + PGALLOC_NORMAL = 6, + PGALLOC_MOVABLE = 7, + ALLOCSTALL_DMA = 8, + ALLOCSTALL_DMA32 = 9, + ALLOCSTALL_NORMAL = 10, + ALLOCSTALL_MOVABLE = 11, + PGSCAN_SKIP_DMA = 12, + PGSCAN_SKIP_DMA32 = 13, + PGSCAN_SKIP_NORMAL = 14, + PGSCAN_SKIP_MOVABLE = 15, + PGFREE = 16, + PGACTIVATE = 17, + PGDEACTIVATE = 18, + PGLAZYFREE = 19, + PGFAULT = 20, + PGMAJFAULT = 21, + PGLAZYFREED = 22, + PGREFILL = 23, + PGREUSE = 24, + PGSTEAL_KSWAPD = 25, + PGSTEAL_DIRECT = 26, + PGSTEAL_KHUGEPAGED = 27, + PGSCAN_KSWAPD = 28, + PGSCAN_DIRECT = 29, + PGSCAN_KHUGEPAGED = 30, + PGSCAN_DIRECT_THROTTLE = 31, + PGSCAN_ANON = 32, + PGSCAN_FILE = 33, + PGSTEAL_ANON = 34, + PGSTEAL_FILE = 35, + PGINODESTEAL = 36, + SLABS_SCANNED = 37, + KSWAPD_INODESTEAL = 38, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 39, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 40, + PAGEOUTRUN = 41, + PGROTATED = 42, + DROP_PAGECACHE = 43, + DROP_SLAB = 44, + OOM_KILL = 45, + PGMIGRATE_SUCCESS = 46, + PGMIGRATE_FAIL = 47, + THP_MIGRATION_SUCCESS = 48, + THP_MIGRATION_FAIL = 49, + THP_MIGRATION_SPLIT = 50, + COMPACTMIGRATE_SCANNED = 51, + COMPACTFREE_SCANNED = 52, + COMPACTISOLATED = 53, + COMPACTSTALL = 54, + COMPACTFAIL = 55, + COMPACTSUCCESS = 56, + KCOMPACTD_WAKE = 57, + KCOMPACTD_MIGRATE_SCANNED = 58, + KCOMPACTD_FREE_SCANNED = 59, + CMA_ALLOC_SUCCESS = 60, + CMA_ALLOC_FAIL = 61, + UNEVICTABLE_PGCULLED = 62, + UNEVICTABLE_PGSCANNED = 63, + UNEVICTABLE_PGRESCUED = 64, + UNEVICTABLE_PGMLOCKED = 65, + UNEVICTABLE_PGMUNLOCKED = 66, + UNEVICTABLE_PGCLEARED = 67, + UNEVICTABLE_PGSTRANDED = 68, + SWAP_RA = 69, + SWAP_RA_HIT = 70, + SWPIN_ZERO = 71, + SWPOUT_ZERO = 72, + ZSWPIN = 73, + ZSWPOUT = 74, + ZSWPWB = 75, + NR_VM_EVENT_ITEMS = 76, +}; + +struct vm_event_state { + long unsigned int event[76]; +}; + +struct follow_pfnmap_args { + struct vm_area_struct *vma; + long unsigned int address; + spinlock_t *lock; + pte_t *ptep; + long unsigned int pfn; + pgprot_t pgprot; + bool writable; + bool special; +}; + +enum { + SWP_USED = 1, + SWP_WRITEOK = 2, + SWP_DISCARDABLE = 4, + SWP_DISCARDING = 8, + SWP_SOLIDSTATE = 16, + SWP_CONTINUED = 32, + SWP_BLKDEV = 64, + SWP_ACTIVATED = 128, + SWP_FS_OPS = 256, + SWP_AREA_DISCARD = 512, + SWP_PAGE_DISCARD = 1024, + SWP_STABLE_WRITES = 2048, + SWP_SYNCHRONOUS_IO = 4096, + SWP_SCANNING = 16384, +}; + +typedef long unsigned int pte_marker; + +enum fs_value_type { + fs_value_is_undefined = 0, + fs_value_is_flag = 1, + fs_value_is_string = 2, + fs_value_is_blob = 3, + fs_value_is_filename = 4, + fs_value_is_file = 5, +}; + +struct fs_parameter { + const char *key; + enum fs_value_type type: 8; + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +struct fc_log { + refcount_t usage; + u8 head; + u8 tail; + u8 need_free; + struct module *owner; + char *buffer[8]; +}; + +struct fs_context_operations { + void (*free)(struct fs_context *); + int (*dup)(struct fs_context *, struct fs_context *); + int (*parse_param)(struct fs_context *, struct fs_parameter *); + int (*parse_monolithic)(struct fs_context *, void *); + int (*get_tree)(struct fs_context *); + int (*reconfigure)(struct fs_context *); +}; + +struct fs_parse_result { + bool negated; + union { + bool boolean; + int int_32; + unsigned int uint_32; + u64 uint_64; + kuid_t uid; + kgid_t gid; + }; +}; + +typedef int rmap_t; + +enum rmap_level { + RMAP_LEVEL_PTE = 0, + RMAP_LEVEL_PMD = 1, +}; + +struct mmu_table_batch { + struct callback_head rcu; + unsigned int nr; + void *tables[0]; +}; + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct encoded_page *encoded_pages[0]; +}; + +struct mmu_gather { + struct mm_struct *mm; + struct mmu_table_batch *batch; + long unsigned int start; + long unsigned int end; + unsigned int fullmm: 1; + unsigned int need_flush_all: 1; + unsigned int freed_tables: 1; + unsigned int delayed_rmap: 1; + unsigned int cleared_ptes: 1; + unsigned int cleared_pmds: 1; + unsigned int cleared_puds: 1; + unsigned int cleared_p4ds: 1; + unsigned int vma_exec: 1; + unsigned int vma_huge: 1; + unsigned int vma_pfn: 1; + unsigned int batch_count; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[8]; +}; + +struct unlink_vma_file_batch { + int count; + struct vm_area_struct *vmas[8]; +}; + +typedef int fpb_t; + +struct compact_control { + struct list_head freepages[11]; + struct list_head migratepages; + unsigned int nr_freepages; + unsigned int nr_migratepages; + long unsigned int free_pfn; + long unsigned int migrate_pfn; + long unsigned int fast_start_pfn; + struct zone *zone; + long unsigned int total_migrate_scanned; + long unsigned int total_free_scanned; + short unsigned int fast_search_fail; + short int search_order; + const gfp_t gfp_mask; + int order; + int migratetype; + const unsigned int alloc_flags; + const int highest_zoneidx; + enum migrate_mode mode; + bool ignore_skip_hint; + bool no_set_skip_hint; + bool ignore_block_suitable; + bool direct_compaction; + bool proactive_compaction; + bool whole_zone; + bool contended; + bool finish_pageblock; + bool alloc_contig; +}; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct fsnotify_mark_connector { + spinlock_t lock; + unsigned char type; + unsigned char prio; + short unsigned int flags; + union { + void *obj; + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +struct fsnotify_sb_info { + struct fsnotify_mark_connector *sb_marks; + atomic_long_t watched_objects[3]; +}; + +struct file_lock_core { + struct file_lock_core *flc_blocker; + struct list_head flc_list; + struct hlist_node flc_link; + struct list_head flc_blocked_requests; + struct list_head flc_blocked_member; + fl_owner_t flc_owner; + unsigned int flc_flags; + unsigned char flc_type; + pid_t flc_pid; + int flc_link_cpu; + wait_queue_head_t flc_wait; + struct file *flc_file; +}; + +struct nlm_lockowner; + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; + +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +struct file_lock_operations; + +struct lock_manager_operations; + +struct file_lock { + struct file_lock_core c; + loff_t fl_start; + loff_t fl_end; + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + struct { + struct inode *inode; + } ceph; + } fl_u; +}; + +struct lease_manager_operations; + +struct file_lease { + struct file_lock_core c; + struct fasync_struct *fl_fasync; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + const struct lease_manager_operations *fl_lmops; +}; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + void *lm_mod_owner; + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_lock_expirable)(struct file_lock *); + void (*lm_expire_lock)(); +}; + +struct lease_manager_operations { + bool (*lm_break)(struct file_lease *); + int (*lm_change)(struct file_lease *, int, struct list_head *); + void (*lm_setup)(struct file_lease *, void **); + bool (*lm_breaker_owns_lease)(struct file_lease *); +}; + +struct fd { + long unsigned int word; +}; + +typedef struct fd class_fd_t; + +struct xattr_args { + __u64 value; + __u32 size; + __u32 flags; +}; + +struct simple_xattrs { + struct rb_root rb_root; + rwlock_t lock; +}; + +struct simple_xattr { + struct rb_node rb_node; + char *name; + size_t size; + char value[0]; +}; + +enum fsnotify_group_prio { + FSNOTIFY_PRIO_NORMAL = 0, + FSNOTIFY_PRIO_CONTENT = 1, + FSNOTIFY_PRIO_PRE_CONTENT = 2, + __FSNOTIFY_PRIO_NUM = 3, +}; + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE = 0, + FSNOTIFY_EVENT_PATH = 1, + FSNOTIFY_EVENT_INODE = 2, + FSNOTIFY_EVENT_DENTRY = 3, + FSNOTIFY_EVENT_ERROR = 4, +}; + +enum fsnotify_iter_type { + FSNOTIFY_ITER_TYPE_INODE = 0, + FSNOTIFY_ITER_TYPE_VFSMOUNT = 1, + FSNOTIFY_ITER_TYPE_SB = 2, + FSNOTIFY_ITER_TYPE_PARENT = 3, + FSNOTIFY_ITER_TYPE_INODE2 = 4, + FSNOTIFY_ITER_TYPE_COUNT = 5, +}; + +struct xattr_name { + char name[256]; +}; + +struct kernel_xattr_ctx { + union { + const void *cvalue; + void *value; + }; + void *kvalue; + size_t size; + struct xattr_name *kname; + unsigned int flags; +}; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_BOUNCE = 8, + NR_FREE_CMA_PAGES = 9, + NR_VM_ZONE_STAT_ITEMS = 10, +}; + +struct mount; + +struct mnt_namespace { + struct ns_common ns; + struct mount *root; + struct rb_root mounts; + struct user_namespace *user_ns; + struct ucounts *ucounts; + u64 seq; + wait_queue_head_t poll; + u64 event; + unsigned int nr_mounts; + unsigned int pending_mounts; + struct rb_node mnt_ns_tree_node; + refcount_t passive; +}; + +enum ucount_type { + UCOUNT_USER_NAMESPACES = 0, + UCOUNT_PID_NAMESPACES = 1, + UCOUNT_UTS_NAMESPACES = 2, + UCOUNT_IPC_NAMESPACES = 3, + UCOUNT_NET_NAMESPACES = 4, + UCOUNT_MNT_NAMESPACES = 5, + UCOUNT_CGROUP_NAMESPACES = 6, + UCOUNT_TIME_NAMESPACES = 7, + UCOUNT_INOTIFY_INSTANCES = 8, + UCOUNT_INOTIFY_WATCHES = 9, + UCOUNT_COUNTS = 10, +}; + +struct fsnotify_group; + +struct fsnotify_iter_info; + +struct fsnotify_mark; + +struct fsnotify_event; + +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); + int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); + void (*free_group_priv)(struct fsnotify_group *); + void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); + void (*free_event)(struct fsnotify_group *, struct fsnotify_event *); + void (*free_mark)(struct fsnotify_mark *); +}; + +struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; +}; + +struct fsnotify_group { + const struct fsnotify_ops *ops; + refcount_t refcnt; + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + enum fsnotify_group_prio priority; + bool shutdown; + int flags; + unsigned int owner_flags; + struct mutex mark_mutex; + atomic_t user_waits; + struct list_head marks_list; + struct fasync_struct *fsn_fa; + struct fsnotify_event *overflow_event; + struct mem_cgroup *memcg; + union { + void *private; + struct inotify_group_private_data inotify_data; + }; +}; + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[5]; + struct fsnotify_group *current_group; + unsigned int report_mask; + int srcu_idx; +}; + +struct fsnotify_mark { + __u32 mask; + refcount_t refcnt; + struct fsnotify_group *group; + struct list_head g_list; + spinlock_t lock; + struct hlist_node obj_list; + struct fsnotify_mark_connector *connector; + __u32 ignore_mask; + unsigned int flags; +}; + +struct fsnotify_event { + struct list_head list; +}; + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +struct mnt_pcp; + +struct mountpoint; + +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; + union { + struct callback_head mnt_rcu; + struct llist_node mnt_llist; + }; + struct mnt_pcp *mnt_pcp; + struct list_head mnt_mounts; + struct list_head mnt_child; + struct list_head mnt_instance; + const char *mnt_devname; + union { + struct rb_node mnt_node; + struct list_head mnt_list; + }; + struct list_head mnt_expire; + struct list_head mnt_share; + struct list_head mnt_slave_list; + struct list_head mnt_slave; + struct mount *mnt_master; + struct mnt_namespace *mnt_ns; + struct mountpoint *mnt_mp; + union { + struct hlist_node mnt_mp_list; + struct hlist_node mnt_umount; + }; + struct list_head mnt_umounting; + struct fsnotify_mark_connector *mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; + int mnt_id; + u64 mnt_id_unique; + int mnt_group_id; + int mnt_expiry_mark; + struct hlist_head mnt_pins; + struct hlist_head mnt_stuck_children; +}; + +struct mnt_pcp { + int mnt_count; + int mnt_writers; +}; + +struct mountpoint { + struct hlist_node m_hash; + struct dentry *m_dentry; + struct hlist_head m_list; + int m_count; +}; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS = 1, + CGROUP_INET_SOCK_CREATE = 2, + CGROUP_SOCK_OPS = 3, + CGROUP_DEVICE = 4, + CGROUP_INET4_BIND = 5, + CGROUP_INET6_BIND = 6, + CGROUP_INET4_CONNECT = 7, + CGROUP_INET6_CONNECT = 8, + CGROUP_UNIX_CONNECT = 9, + CGROUP_INET4_POST_BIND = 10, + CGROUP_INET6_POST_BIND = 11, + CGROUP_UDP4_SENDMSG = 12, + CGROUP_UDP6_SENDMSG = 13, + CGROUP_UNIX_SENDMSG = 14, + CGROUP_SYSCTL = 15, + CGROUP_UDP4_RECVMSG = 16, + CGROUP_UDP6_RECVMSG = 17, + CGROUP_UNIX_RECVMSG = 18, + CGROUP_GETSOCKOPT = 19, + CGROUP_SETSOCKOPT = 20, + CGROUP_INET4_GETPEERNAME = 21, + CGROUP_INET6_GETPEERNAME = 22, + CGROUP_UNIX_GETPEERNAME = 23, + CGROUP_INET4_GETSOCKNAME = 24, + CGROUP_INET6_GETSOCKNAME = 25, + CGROUP_UNIX_GETSOCKNAME = 26, + CGROUP_INET_SOCK_RELEASE = 27, + CGROUP_LSM_START = 28, + CGROUP_LSM_END = 27, + MAX_CGROUP_BPF_ATTACH_TYPE = 28, +}; + +enum psi_task_count { + NR_IOWAIT = 0, + NR_MEMSTALL = 1, + NR_RUNNING = 2, + NR_MEMSTALL_RUNNING = 3, + NR_PSI_TASK_COUNTS = 4, +}; + +enum psi_res { + PSI_IO = 0, + PSI_MEM = 1, + PSI_CPU = 2, + NR_PSI_RESOURCES = 3, +}; + +enum psi_states { + PSI_IO_SOME = 0, + PSI_IO_FULL = 1, + PSI_MEM_SOME = 2, + PSI_MEM_FULL = 3, + PSI_CPU_SOME = 4, + PSI_CPU_FULL = 5, + PSI_NONIDLE = 6, + NR_PSI_STATES = 7, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL = 1, + NR_PSI_AGGREGATORS = 2, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + pids_cgrp_id = 9, + rdma_cgrp_id = 10, + misc_cgrp_id = 11, + debug_cgrp_id = 12, + CGROUP_SUBSYS_COUNT = 13, +}; + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_OOM_GROUP_KILL = 5, + MEMCG_SWAP_HIGH = 6, + MEMCG_SWAP_MAX = 7, + MEMCG_SWAP_FAIL = 8, + MEMCG_NR_MEMORY_EVENTS = 9, +}; + +enum page_memcg_data_flags { + MEMCG_DATA_OBJEXTS = 1, + MEMCG_DATA_KMEM = 2, + __NR_MEMCG_DATA_FLAGS = 4, +}; + +struct kernel_cpustat { + u64 cpustat[11]; +}; + +struct kernel_stat { + long unsigned int irqs_sum; + unsigned int softirqs[10]; +}; + +enum { + PROC_ENTRY_PERMANENT = 1, +}; + +struct proc_ops { + unsigned int proc_flags; + int (*proc_open)(struct inode *, struct file *); + ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *); + loff_t (*proc_lseek)(struct file *, loff_t, int); + int (*proc_release)(struct inode *, struct file *); + __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); + long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int); + int (*proc_mmap)(struct file *, struct vm_area_struct *); + long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); +}; + +enum kernfs_node_type { + KERNFS_DIR = 1, + KERNFS_FILE = 2, + KERNFS_LINK = 4, +}; + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 16, + KERNFS_NS = 32, + KERNFS_HAS_SEQ_SHOW = 64, + KERNFS_HAS_MMAP = 128, + KERNFS_LOCKDEP = 256, + KERNFS_HIDDEN = 512, + KERNFS_SUICIDAL = 1024, + KERNFS_SUICIDED = 2048, + KERNFS_EMPTY_DIR = 4096, + KERNFS_HAS_RELEASE = 8192, + KERNFS_REMOVING = 16384, +}; + +struct kernfs_syscall_ops; + +struct kernfs_root { + struct kernfs_node *kn; + unsigned int flags; + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + struct list_head supers; + wait_queue_head_t deactivate_waitq; + struct rw_semaphore kernfs_rwsem; + struct rw_semaphore kernfs_iattr_rwsem; + struct rw_semaphore kernfs_supers_rwsem; + struct callback_head rcu; +}; + +struct kernfs_iattrs { + kuid_t ia_uid; + kgid_t ia_gid; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; +}; + +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *, struct kernfs_root *); + int (*mkdir)(struct kernfs_node *, const char *, umode_t); + int (*rmdir)(struct kernfs_node *); + int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); + int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); +}; + +typedef __u16 __le16; + +typedef void (*rcu_callback_t)(struct callback_head *); + +struct request; + +struct rq_list { + struct request *head; + struct request *tail; +}; + +struct blk_plug { + struct rq_list mq_list; + struct rq_list cached_rqs; + u64 cur_ktime; + short unsigned int nr_ios; + short unsigned int rq_count; + bool multiple_queues; + bool has_elevator; + struct list_head cb_list; +}; + +typedef unsigned int blk_mode_t; + +struct block_device_operations; + +struct timer_rand_state; + +struct disk_events; + +struct badblocks; + +struct blk_independent_access_ranges; + +struct gendisk { + int major; + int first_minor; + int minors; + char disk_name[32]; + short unsigned int events; + short unsigned int event_flags; + struct xarray part_tbl; + struct block_device *part0; + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + struct bio_set bio_split; + int flags; + long unsigned int state; + struct mutex open_mutex; + unsigned int open_partitions; + struct backing_dev_info *bdi; + struct kobject queue_kobj; + struct kobject *slave_dir; + struct list_head slave_bdevs; + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; + u64 diskseq; + blk_mode_t open_mode; + struct blk_independent_access_ranges *ia_ranges; +}; + +typedef unsigned int blk_features_t; + +typedef unsigned int blk_flags_t; + +enum blk_integrity_checksum { + BLK_INTEGRITY_CSUM_NONE = 0, + BLK_INTEGRITY_CSUM_IP = 1, + BLK_INTEGRITY_CSUM_CRC = 2, + BLK_INTEGRITY_CSUM_CRC64 = 3, +} __attribute__((mode(byte))); + +struct blk_integrity { + unsigned char flags; + enum blk_integrity_checksum csum_type; + unsigned char tuple_size; + unsigned char pi_offset; + unsigned char interval_exp; + unsigned char tag_size; +}; + +struct queue_limits { + blk_features_t features; + blk_flags_t flags; + long unsigned int seg_boundary_mask; + long unsigned int virt_boundary_mask; + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_user_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_user_discard_sectors; + unsigned int max_secure_erase_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_hw_zone_append_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int zone_write_granularity; + unsigned int atomic_write_hw_max; + unsigned int atomic_write_max_sectors; + unsigned int atomic_write_hw_boundary; + unsigned int atomic_write_boundary_sectors; + unsigned int atomic_write_hw_unit_min; + unsigned int atomic_write_unit_min; + unsigned int atomic_write_hw_unit_max; + unsigned int atomic_write_unit_max; + short unsigned int max_segments; + short unsigned int max_integrity_segments; + short unsigned int max_discard_segments; + unsigned int max_open_zones; + unsigned int max_active_zones; + unsigned int dma_alignment; + unsigned int dma_pad_mask; + struct blk_integrity integrity; +}; + +struct elevator_queue; + +struct blk_mq_ops; + +struct blk_mq_ctx; + +struct blk_queue_stats; + +struct rq_qos; + +struct blk_mq_tags; + +struct blk_trace; + +struct blk_flush_queue; + +struct throtl_data; + +struct blk_mq_tag_set; + +struct request_queue { + void *queuedata; + struct elevator_queue *elevator; + const struct blk_mq_ops *mq_ops; + struct blk_mq_ctx *queue_ctx; + long unsigned int queue_flags; + unsigned int rq_timeout; + unsigned int queue_depth; + refcount_t refs; + unsigned int nr_hw_queues; + struct xarray hctx_table; + struct percpu_ref q_usage_counter; + struct lock_class_key io_lock_cls_key; + struct lockdep_map io_lockdep_map; + struct lock_class_key q_lock_cls_key; + struct lockdep_map q_lockdep_map; + struct request *last_merge; + spinlock_t queue_lock; + int quiesce_depth; + struct gendisk *disk; + struct kobject *mq_kobj; + struct queue_limits limits; + struct device *dev; + enum rpm_status rpm_status; + atomic_t pm_only; + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + struct mutex rq_qos_mutex; + int id; + long unsigned int nr_requests; + struct timer_list timeout; + struct work_struct timeout_work; + atomic_t nr_active_requests_shared_tags; + struct blk_mq_tags *sched_shared_tags; + struct list_head icq_list; + long unsigned int blkcg_pols[1]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct mutex blkcg_mutex; + int node; + spinlock_t requeue_lock; + struct list_head requeue_list; + struct delayed_work requeue_work; + struct blk_trace *blk_trace; + struct blk_flush_queue *fq; + struct list_head flush_list; + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + struct mutex limits_lock; + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + int mq_freeze_depth; + struct throtl_data *td; + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + struct mutex mq_freeze_lock; + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; + struct mutex debugfs_mutex; + bool mq_sysfs_init_done; +}; + +struct buffer_head; + +typedef void bh_end_io_t(struct buffer_head *, int); + +struct buffer_head { + long unsigned int b_state; + struct buffer_head *b_this_page; + union { + struct page *b_page; + struct folio *b_folio; + }; + sector_t b_blocknr; + size_t b_size; + char *b_data; + struct block_device *b_bdev; + bh_end_io_t *b_end_io; + void *b_private; + struct list_head b_assoc_buffers; + struct address_space *b_assoc_map; + atomic_t b_count; + spinlock_t b_uptodate_lock; +}; + +struct io_comp_batch { + struct rq_list req_list; + bool need_ts; + void (*complete)(struct io_comp_batch *); +}; + +struct fiemap_extent; + +struct fiemap_extent_info { + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent *fi_extents_start; +}; + +struct partition_meta_info { + char uuid[37]; + u8 volname[64]; +}; + +enum bh_state_bits { + BH_Uptodate = 0, + BH_Dirty = 1, + BH_Lock = 2, + BH_Req = 3, + BH_Mapped = 4, + BH_New = 5, + BH_Async_Read = 6, + BH_Async_Write = 7, + BH_Delay = 8, + BH_Boundary = 9, + BH_Write_EIO = 10, + BH_Unwritten = 11, + BH_Quiet = 12, + BH_Meta = 13, + BH_Prio = 14, + BH_Defer_Completion = 15, + BH_PrivateStart = 16, +}; + +typedef unsigned int tid_t; + +struct transaction_chp_stats_s { + long unsigned int cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +struct journal_s; + +typedef struct journal_s journal_t; + +struct journal_head; + +struct transaction_s; + +typedef struct transaction_s transaction_t; + +struct transaction_s { + journal_t *t_journal; + tid_t t_tid; + enum { + T_RUNNING = 0, + T_LOCKED = 1, + T_SWITCH = 2, + T_FLUSH = 3, + T_COMMIT = 4, + T_COMMIT_DFLUSH = 5, + T_COMMIT_JFLUSH = 6, + T_COMMIT_CALLBACK = 7, + T_FINISHED = 8, + } t_state; + long unsigned int t_log_start; + int t_nr_buffers; + struct journal_head *t_reserved_list; + struct journal_head *t_buffers; + struct journal_head *t_forget; + struct journal_head *t_checkpoint_list; + struct journal_head *t_shadow_list; + struct list_head t_inode_list; + long unsigned int t_max_wait; + long unsigned int t_start; + long unsigned int t_requested; + struct transaction_chp_stats_s t_chp_stats; + atomic_t t_updates; + atomic_t t_outstanding_credits; + atomic_t t_outstanding_revokes; + atomic_t t_handle_count; + transaction_t *t_cpnext; + transaction_t *t_cpprev; + long unsigned int t_expires; + ktime_t t_start_time; + unsigned int t_synchronous_commit: 1; + int t_need_data_flush; + struct list_head t_private_list; +}; + +struct jbd2_buffer_trigger_type; + +struct journal_head { + struct buffer_head *b_bh; + spinlock_t b_state_lock; + int b_jcount; + unsigned int b_jlist; + unsigned int b_modified; + char *b_frozen_data; + char *b_committed_data; + transaction_t *b_transaction; + transaction_t *b_next_transaction; + struct journal_head *b_tnext; + struct journal_head *b_tprev; + transaction_t *b_cp_transaction; + struct journal_head *b_cpnext; + struct journal_head *b_cpprev; + struct jbd2_buffer_trigger_type *b_triggers; + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +struct jbd2_buffer_trigger_type { + void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); + void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); +}; + +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 resv[4]; + __u64 capacity; + __u8 reserved[24]; +}; + +typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); + +enum blk_unique_id { + BLK_UID_T10 = 1, + BLK_UID_EUI64 = 2, + BLK_UID_NAA = 3, +}; + +struct hd_geometry; + +struct pr_ops; + +struct block_device_operations { + void (*submit_bio)(struct bio *); + int (*poll_bio)(struct bio *, struct io_comp_batch *, unsigned int); + int (*open)(struct gendisk *, blk_mode_t); + void (*release)(struct gendisk *); + int (*ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + int (*compat_ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + unsigned int (*check_events)(struct gendisk *, unsigned int); + void (*unlock_native_capacity)(struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + int (*set_read_only)(struct block_device *, bool); + void (*free_disk)(struct gendisk *); + void (*swap_slot_free_notify)(struct block_device *, long unsigned int); + int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); + char * (*devnode)(struct gendisk *, umode_t *); + int (*get_unique_id)(struct gendisk *, u8 *, enum blk_unique_id); + struct module *owner; + const struct pr_ops *pr_ops; + int (*alternative_gpt_sector)(struct gendisk *, sector_t *); +}; + +struct blk_independent_access_range { + struct kobject kobj; + sector_t sector; + sector_t nr_sectors; +}; + +struct blk_independent_access_ranges { + struct kobject kobj; + bool sysfs_registered; + unsigned int nr_ia_ranges; + struct blk_independent_access_range ia_range[0]; +}; + +enum blk_eh_timer_return { + BLK_EH_DONE = 0, + BLK_EH_RESET_TIMER = 1, +}; + +struct blk_mq_hw_ctx; + +struct blk_mq_queue_data; + +struct blk_mq_ops { + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); + void (*commit_rqs)(struct blk_mq_hw_ctx *); + void (*queue_rqs)(struct rq_list *); + int (*get_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *, int); + void (*set_rq_budget_token)(struct request *, int); + int (*get_rq_budget_token)(struct request *); + enum blk_eh_timer_return (*timeout)(struct request *); + int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); + void (*complete)(struct request *); + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); + void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); + void (*cleanup_rq)(struct request *); + bool (*busy)(struct request_queue *); + void (*map_queues)(struct blk_mq_tag_set *); + void (*show_rq)(struct seq_file *, struct request *); +}; + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_keys; + +struct pr_held_reservation; + +struct pr_ops { + int (*pr_register)(struct block_device *, u64, u64, u32); + int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); + int (*pr_release)(struct block_device *, u64, enum pr_type); + int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); + int (*pr_clear)(struct block_device *, u64); + int (*pr_read_keys)(struct block_device *, struct pr_keys *); + int (*pr_read_reservation)(struct block_device *, struct pr_held_reservation *); +}; + +struct crypto_alg; + +struct crypto_tfm { + refcount_t refcnt; + u32 crt_flags; + int node; + void (*exit)(struct crypto_tfm *); + struct crypto_alg *__crt_alg; + void *__crt_ctx[0]; +}; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); + void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); + void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); + int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); +}; + +struct crypto_type; + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + int cra_priority; + refcount_t cra_refcnt; + char cra_name[128]; + char cra_driver_name[128]; + const struct crypto_type *cra_type; + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + int (*cra_init)(struct crypto_tfm *); + void (*cra_exit)(struct crypto_tfm *); + void (*cra_destroy)(struct crypto_alg *); + struct module *cra_module; +}; + +struct crypto_instance; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); + unsigned int (*extsize)(struct crypto_alg *); + int (*init_tfm)(struct crypto_tfm *); + void (*show)(struct seq_file *, struct crypto_alg *); + int (*report)(struct sk_buff *, struct crypto_alg *); + void (*free)(struct crypto_instance *); + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; + +struct jbd2_journal_handle; + +typedef struct jbd2_journal_handle handle_t; + +struct jbd2_journal_handle { + union { + transaction_t *h_transaction; + journal_t *h_journal; + }; + handle_t *h_rsv_handle; + int h_total_credits; + int h_revoke_credits; + int h_revoke_credits_requested; + int h_ref; + int h_err; + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; + long unsigned int h_start_jiffies; + unsigned int h_requested_credits; + unsigned int saved_alloc_context; +}; + +struct transaction_run_stats_s { + long unsigned int rs_wait; + long unsigned int rs_request_delay; + long unsigned int rs_running; + long unsigned int rs_locked; + long unsigned int rs_flushing; + long unsigned int rs_logging; + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + long unsigned int ts_tid; + long unsigned int ts_requested; + struct transaction_run_stats_s run; +}; + +enum passtype { + PASS_SCAN = 0, + PASS_REVOKE = 1, + PASS_REPLAY = 2, +}; + +struct journal_superblock_s; + +typedef struct journal_superblock_s journal_superblock_t; + +struct jbd2_revoke_table_s; + +struct jbd2_inode; + +struct journal_s { + long unsigned int j_flags; + int j_errno; + struct mutex j_abort_mutex; + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + rwlock_t j_state_lock; + int j_barrier_count; + struct mutex j_barrier; + transaction_t *j_running_transaction; + transaction_t *j_committing_transaction; + transaction_t *j_checkpoint_transactions; + wait_queue_head_t j_wait_transaction_locked; + wait_queue_head_t j_wait_done_commit; + wait_queue_head_t j_wait_commit; + wait_queue_head_t j_wait_updates; + wait_queue_head_t j_wait_reserved; + wait_queue_head_t j_fc_wait; + struct mutex j_checkpoint_mutex; + struct buffer_head *j_chkpt_bhs[64]; + struct shrinker *j_shrinker; + struct percpu_counter j_checkpoint_jh_count; + transaction_t *j_shrink_transaction; + long unsigned int j_head; + long unsigned int j_tail; + long unsigned int j_free; + long unsigned int j_first; + long unsigned int j_last; + long unsigned int j_fc_first; + long unsigned int j_fc_off; + long unsigned int j_fc_last; + struct block_device *j_dev; + int j_blocksize; + long long unsigned int j_blk_offset; + char j_devname[56]; + struct block_device *j_fs_dev; + errseq_t j_fs_dev_wb_err; + unsigned int j_total_len; + atomic_t j_reserved_credits; + spinlock_t j_list_lock; + struct inode *j_inode; + tid_t j_tail_sequence; + tid_t j_transaction_sequence; + tid_t j_commit_sequence; + tid_t j_commit_request; + __u8 j_uuid[16]; + struct task_struct *j_task; + int j_max_transaction_buffers; + int j_revoke_records_per_block; + int j_transaction_overhead_buffers; + long unsigned int j_commit_interval; + struct timer_list j_commit_timer; + spinlock_t j_revoke_lock; + struct jbd2_revoke_table_s *j_revoke; + struct jbd2_revoke_table_s *j_revoke_table[2]; + struct buffer_head **j_wbuf; + struct buffer_head **j_fc_wbuf; + int j_wbufsize; + int j_fc_wbufsize; + pid_t j_last_sync_writer; + u64 j_average_commit_time; + u32 j_min_batch_time; + u32 j_max_batch_time; + void (*j_commit_callback)(journal_t *, transaction_t *); + int (*j_submit_inode_data_buffers)(struct jbd2_inode *); + int (*j_finish_inode_data_buffers)(struct jbd2_inode *); + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + unsigned int j_failed_commit; + void *j_private; + struct crypto_shash *j_chksum_driver; + __u32 j_csum_seed; + void (*j_fc_cleanup_callback)(struct journal_s *, int, tid_t); + int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); + int (*j_bmap)(struct journal_s *, sector_t *); +}; + +struct journal_header_s { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +}; + +typedef struct journal_header_s journal_header_t; + +struct journal_superblock_s { + journal_header_t s_header; + __be32 s_blocksize; + __be32 s_maxlen; + __be32 s_first; + __be32 s_sequence; + __be32 s_start; + __be32 s_errno; + __be32 s_feature_compat; + __be32 s_feature_incompat; + __be32 s_feature_ro_compat; + __u8 s_uuid[16]; + __be32 s_nr_users; + __be32 s_dynsuper; + __be32 s_max_transaction; + __be32 s_max_trans_data; + __u8 s_checksum_type; + __u8 s_padding2[3]; + __be32 s_num_fc_blks; + __be32 s_head; + __u32 s_padding[40]; + __be32 s_checksum; + __u8 s_users[768]; +}; + +struct jbd2_inode { + transaction_t *i_transaction; + transaction_t *i_next_transaction; + struct list_head i_list; + struct inode *i_vfs_inode; + long unsigned int i_flags; + loff_t i_dirty_start; + loff_t i_dirty_end; +}; + +struct bgl_lock { + spinlock_t lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct blockgroup_lock { + struct bgl_lock locks[128]; +}; + +struct fiemap_extent { + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + __u64 fe_reserved64[2]; + __u32 fe_flags; + __u32 fe_reserved[3]; +}; + +struct fscrypt_dummy_policy {}; + +typedef int ext4_grpblk_t; + +typedef long long unsigned int ext4_fsblk_t; + +typedef __u32 ext4_lblk_t; + +typedef unsigned int ext4_group_t; + +struct ext4_system_blocks { + struct rb_root root; + struct callback_head rcu; +}; + +struct ext4_group_desc { + __le32 bg_block_bitmap_lo; + __le32 bg_inode_bitmap_lo; + __le32 bg_inode_table_lo; + __le16 bg_free_blocks_count_lo; + __le16 bg_free_inodes_count_lo; + __le16 bg_used_dirs_count_lo; + __le16 bg_flags; + __le32 bg_exclude_bitmap_lo; + __le16 bg_block_bitmap_csum_lo; + __le16 bg_inode_bitmap_csum_lo; + __le16 bg_itable_unused_lo; + __le16 bg_checksum; + __le32 bg_block_bitmap_hi; + __le32 bg_inode_bitmap_hi; + __le32 bg_inode_table_hi; + __le16 bg_free_blocks_count_hi; + __le16 bg_free_inodes_count_hi; + __le16 bg_used_dirs_count_hi; + __le16 bg_itable_unused_hi; + __le32 bg_exclude_bitmap_hi; + __le16 bg_block_bitmap_csum_hi; + __le16 bg_inode_bitmap_csum_hi; + __u32 bg_reserved; +}; + +struct flex_groups { + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; +}; + +struct ext4_new_group_data { + __u32 group; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 mdata_blocks; + __u32 free_clusters_count; +}; + +enum { + BLOCK_BITMAP = 0, + INODE_BITMAP = 1, + INODE_TABLE = 2, + GROUP_TABLE_COUNT = 3, +}; + +struct extent_status { + struct rb_node rb_node; + ext4_lblk_t es_lblk; + ext4_lblk_t es_len; + ext4_fsblk_t es_pblk; +}; + +struct ext4_es_tree { + struct rb_root root; + struct extent_status *cache_es; +}; + +struct ext4_es_stats { + long unsigned int es_stats_shrunk; + struct percpu_counter es_stats_cache_hits; + struct percpu_counter es_stats_cache_misses; + u64 es_stats_scan_time; + u64 es_stats_max_scan_time; + struct percpu_counter es_stats_all_cnt; + struct percpu_counter es_stats_shk_cnt; +}; + +struct ext4_pending_tree { + struct rb_root root; +}; + +struct ext4_fc_stats { + unsigned int fc_ineligible_reason_count[10]; + long unsigned int fc_num_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_failed_commits; + long unsigned int fc_skipped_commits; + long unsigned int fc_numblks; + u64 s_fc_avg_commit_time; +}; + +struct ext4_fc_alloc_region { + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + int ino; + int len; +}; + +struct ext4_fc_replay_state { + int fc_replay_num_tags; + int fc_replay_expected_off; + int fc_current_pass; + int fc_cur_tag; + int fc_crc; + struct ext4_fc_alloc_region *fc_regions; + int fc_regions_size; + int fc_regions_used; + int fc_regions_valid; + int *fc_modified_inodes; + int fc_modified_inodes_used; + int fc_modified_inodes_size; +}; + +struct ext4_inode_info { + __le32 i_data[15]; + __u32 i_dtime; + ext4_fsblk_t i_file_acl; + ext4_group_t i_block_group; + ext4_lblk_t i_dir_start_lookup; + long unsigned int i_flags; + struct rw_semaphore xattr_sem; + union { + struct list_head i_orphan; + unsigned int i_orphan_idx; + }; + struct list_head i_fc_dilist; + struct list_head i_fc_list; + ext4_lblk_t i_fc_lblk_start; + ext4_lblk_t i_fc_lblk_len; + atomic_t i_fc_updates; + atomic_t i_unwritten; + wait_queue_head_t i_fc_wait; + struct mutex i_fc_lock; + loff_t i_disksize; + struct rw_semaphore i_data_sem; + struct inode vfs_inode; + struct jbd2_inode *jinode; + spinlock_t i_raw_lock; + struct timespec64 i_crtime; + atomic_t i_prealloc_active; + unsigned int i_reserved_data_blocks; + struct rb_root i_prealloc_node; + rwlock_t i_prealloc_lock; + struct ext4_es_tree i_es_tree; + rwlock_t i_es_lock; + struct list_head i_es_list; + unsigned int i_es_all_nr; + unsigned int i_es_shk_nr; + ext4_lblk_t i_es_shrink_lblk; + ext4_group_t i_last_alloc_group; + struct ext4_pending_tree i_pending_tree; + __u16 i_extra_isize; + u16 i_inline_off; + u16 i_inline_size; + spinlock_t i_completed_io_lock; + struct list_head i_rsv_conversion_list; + struct work_struct i_rsv_conversion_work; + spinlock_t i_block_reservation_lock; + tid_t i_sync_tid; + tid_t i_datasync_tid; + __u32 i_csum_seed; + kprojid_t i_projid; +}; + +struct ext4_super_block { + __le32 s_inodes_count; + __le32 s_blocks_count_lo; + __le32 s_r_blocks_count_lo; + __le32 s_free_blocks_count_lo; + __le32 s_free_inodes_count; + __le32 s_first_data_block; + __le32 s_log_block_size; + __le32 s_log_cluster_size; + __le32 s_blocks_per_group; + __le32 s_clusters_per_group; + __le32 s_inodes_per_group; + __le32 s_mtime; + __le32 s_wtime; + __le16 s_mnt_count; + __le16 s_max_mnt_count; + __le16 s_magic; + __le16 s_state; + __le16 s_errors; + __le16 s_minor_rev_level; + __le32 s_lastcheck; + __le32 s_checkinterval; + __le32 s_creator_os; + __le32 s_rev_level; + __le16 s_def_resuid; + __le16 s_def_resgid; + __le32 s_first_ino; + __le16 s_inode_size; + __le16 s_block_group_nr; + __le32 s_feature_compat; + __le32 s_feature_incompat; + __le32 s_feature_ro_compat; + __u8 s_uuid[16]; + char s_volume_name[16]; + char s_last_mounted[64]; + __le32 s_algorithm_usage_bitmap; + __u8 s_prealloc_blocks; + __u8 s_prealloc_dir_blocks; + __le16 s_reserved_gdt_blocks; + __u8 s_journal_uuid[16]; + __le32 s_journal_inum; + __le32 s_journal_dev; + __le32 s_last_orphan; + __le32 s_hash_seed[4]; + __u8 s_def_hash_version; + __u8 s_jnl_backup_type; + __le16 s_desc_size; + __le32 s_default_mount_opts; + __le32 s_first_meta_bg; + __le32 s_mkfs_time; + __le32 s_jnl_blocks[17]; + __le32 s_blocks_count_hi; + __le32 s_r_blocks_count_hi; + __le32 s_free_blocks_count_hi; + __le16 s_min_extra_isize; + __le16 s_want_extra_isize; + __le32 s_flags; + __le16 s_raid_stride; + __le16 s_mmp_update_interval; + __le64 s_mmp_block; + __le32 s_raid_stripe_width; + __u8 s_log_groups_per_flex; + __u8 s_checksum_type; + __u8 s_encryption_level; + __u8 s_reserved_pad; + __le64 s_kbytes_written; + __le32 s_snapshot_inum; + __le32 s_snapshot_id; + __le64 s_snapshot_r_blocks_count; + __le32 s_snapshot_list; + __le32 s_error_count; + __le32 s_first_error_time; + __le32 s_first_error_ino; + __le64 s_first_error_block; + __u8 s_first_error_func[32]; + __le32 s_first_error_line; + __le32 s_last_error_time; + __le32 s_last_error_ino; + __le32 s_last_error_line; + __le64 s_last_error_block; + __u8 s_last_error_func[32]; + __u8 s_mount_opts[64]; + __le32 s_usr_quota_inum; + __le32 s_grp_quota_inum; + __le32 s_overhead_clusters; + __le32 s_backup_bgs[2]; + __u8 s_encrypt_algos[4]; + __u8 s_encrypt_pw_salt[16]; + __le32 s_lpf_ino; + __le32 s_prj_quota_inum; + __le32 s_checksum_seed; + __u8 s_wtime_hi; + __u8 s_mtime_hi; + __u8 s_mkfs_time_hi; + __u8 s_lastcheck_hi; + __u8 s_first_error_time_hi; + __u8 s_last_error_time_hi; + __u8 s_first_error_errcode; + __u8 s_last_error_errcode; + __le16 s_encoding; + __le16 s_encoding_flags; + __le32 s_orphan_file_inum; + __le32 s_reserved[94]; + __le32 s_checksum; +}; + +enum ext4_journal_trigger_type { + EXT4_JTR_ORPHAN_FILE = 0, + EXT4_JTR_NONE = 1, +}; + +struct ext4_journal_trigger { + struct jbd2_buffer_trigger_type tr_triggers; + struct super_block *sb; +}; + +struct ext4_orphan_block { + atomic_t ob_free_entries; + struct buffer_head *ob_bh; +}; + +struct ext4_orphan_info { + int of_blocks; + __u32 of_csum_seed; + struct ext4_orphan_block *of_binfo; +}; + +struct dax_device; + +struct ext4_group_info; + +struct ext4_locality_group; + +struct ext4_li_request; + +struct mb_cache; + +struct ext4_sb_info { + long unsigned int s_desc_size; + long unsigned int s_inodes_per_block; + long unsigned int s_blocks_per_group; + long unsigned int s_clusters_per_group; + long unsigned int s_inodes_per_group; + long unsigned int s_itb_per_group; + long unsigned int s_gdb_count; + long unsigned int s_desc_per_block; + ext4_group_t s_groups_count; + ext4_group_t s_blockfile_groups; + long unsigned int s_overhead; + unsigned int s_cluster_ratio; + unsigned int s_cluster_bits; + loff_t s_bitmap_maxbytes; + struct buffer_head *s_sbh; + struct ext4_super_block *s_es; + struct buffer_head **s_group_desc; + unsigned int s_mount_opt; + unsigned int s_mount_opt2; + long unsigned int s_mount_flags; + unsigned int s_def_mount_opt; + unsigned int s_def_mount_opt2; + ext4_fsblk_t s_sb_block; + atomic64_t s_resv_clusters; + kuid_t s_resuid; + kgid_t s_resgid; + short unsigned int s_mount_state; + short unsigned int s_pad; + int s_addr_per_block_bits; + int s_desc_per_block_bits; + int s_inode_size; + int s_first_ino; + unsigned int s_inode_readahead_blks; + unsigned int s_inode_goal; + u32 s_hash_seed[4]; + int s_def_hash_version; + int s_hash_unsigned; + struct percpu_counter s_freeclusters_counter; + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; + struct percpu_counter s_dirtyclusters_counter; + struct percpu_counter s_sra_exceeded_retry_limit; + struct blockgroup_lock *s_blockgroup_lock; + struct proc_dir_entry *s_proc; + struct kobject s_kobj; + struct completion s_kobj_unregister; + struct super_block *s_sb; + struct buffer_head *s_mmp_bh; + struct journal_s *s_journal; + long unsigned int s_ext4_flags; + struct mutex s_orphan_lock; + struct list_head s_orphan; + struct ext4_orphan_info s_orphan_info; + long unsigned int s_commit_interval; + u32 s_max_batch_time; + u32 s_min_batch_time; + struct file *s_journal_bdev_file; + unsigned int s_want_extra_isize; + struct ext4_system_blocks *s_system_blks; + struct ext4_group_info ***s_group_info; + struct inode *s_buddy_cache; + spinlock_t s_md_lock; + short unsigned int *s_mb_offsets; + unsigned int *s_mb_maxs; + unsigned int s_group_info_size; + unsigned int s_mb_free_pending; + struct list_head s_freed_data_list[2]; + struct list_head s_discard_list; + struct work_struct s_discard_work; + atomic_t s_retry_alloc_pending; + struct list_head *s_mb_avg_fragment_size; + rwlock_t *s_mb_avg_fragment_size_locks; + struct list_head *s_mb_largest_free_orders; + rwlock_t *s_mb_largest_free_orders_locks; + long unsigned int s_stripe; + unsigned int s_mb_max_linear_groups; + unsigned int s_mb_stream_request; + unsigned int s_mb_max_to_scan; + unsigned int s_mb_min_to_scan; + unsigned int s_mb_stats; + unsigned int s_mb_order2_reqs; + unsigned int s_mb_group_prealloc; + unsigned int s_max_dir_size_kb; + long unsigned int s_mb_last_group; + long unsigned int s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; + unsigned int s_mb_best_avail_max_trim_order; + atomic_t s_bal_reqs; + atomic_t s_bal_success; + atomic_t s_bal_allocated; + atomic_t s_bal_ex_scanned; + atomic_t s_bal_cX_ex_scanned[5]; + atomic_t s_bal_groups_scanned; + atomic_t s_bal_goals; + atomic_t s_bal_len_goals; + atomic_t s_bal_breaks; + atomic_t s_bal_2orders; + atomic_t s_bal_p2_aligned_bad_suggestions; + atomic_t s_bal_goal_fast_bad_suggestions; + atomic_t s_bal_best_avail_bad_suggestions; + atomic64_t s_bal_cX_groups_considered[5]; + atomic64_t s_bal_cX_hits[5]; + atomic64_t s_bal_cX_failed[5]; + atomic_t s_mb_buddies_generated; + atomic64_t s_mb_generation_time; + atomic_t s_mb_lost_chunks; + atomic_t s_mb_preallocated; + atomic_t s_mb_discarded; + atomic_t s_lock_busy; + struct ext4_locality_group *s_locality_groups; + long unsigned int s_sectors_written_start; + u64 s_kbytes_written; + unsigned int s_extent_max_zeroout_kb; + unsigned int s_log_groups_per_flex; + struct flex_groups **s_flex_groups; + ext4_group_t s_flex_groups_allocated; + struct workqueue_struct *rsv_conversion_wq; + struct timer_list s_err_report; + struct ext4_li_request *s_li_request; + unsigned int s_li_wait_mult; + struct task_struct *s_mmp_tsk; + long unsigned int s_last_trim_minblks; + struct crypto_shash *s_chksum_driver; + __u32 s_csum_seed; + struct shrinker *s_es_shrinker; + struct list_head s_es_list; + long int s_es_nr_inode; + struct ext4_es_stats s_es_stats; + struct mb_cache *s_ea_block_cache; + struct mb_cache *s_ea_inode_cache; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t s_es_lock; + struct ext4_journal_trigger s_journal_triggers[1]; + struct ratelimit_state s_err_ratelimit_state; + struct ratelimit_state s_warning_ratelimit_state; + struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; + struct fscrypt_dummy_policy s_dummy_enc_policy; + struct percpu_rw_semaphore s_writepages_rwsem; + struct dax_device *s_daxdev; + u64 s_dax_part_off; + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; + spinlock_t s_error_lock; + int s_add_error_count; + int s_first_error_code; + __u32 s_first_error_line; + __u32 s_first_error_ino; + __u64 s_first_error_block; + const char *s_first_error_func; + time64_t s_first_error_time; + int s_last_error_code; + __u32 s_last_error_line; + __u32 s_last_error_ino; + __u64 s_last_error_block; + const char *s_last_error_func; + time64_t s_last_error_time; + struct work_struct s_sb_upd_work; + unsigned int s_awu_min; + unsigned int s_awu_max; + atomic_t s_fc_subtid; + struct list_head s_fc_q[2]; + struct list_head s_fc_dentry_q[2]; + unsigned int s_fc_bytes; + spinlock_t s_fc_lock; + struct buffer_head *s_fc_bh; + struct ext4_fc_stats s_fc_stats; + tid_t s_fc_ineligible_tid; + struct ext4_fc_replay_state s_fc_replay_state; + long: 64; +}; + +struct ext4_group_info { + long unsigned int bb_state; + struct rb_root bb_free_root; + ext4_grpblk_t bb_first_free; + ext4_grpblk_t bb_free; + ext4_grpblk_t bb_fragments; + int bb_avg_fragment_size_order; + ext4_grpblk_t bb_largest_free_order; + ext4_group_t bb_group; + struct list_head bb_prealloc_list; + struct rw_semaphore alloc_sem; + struct list_head bb_avg_fragment_size_node; + struct list_head bb_largest_free_order_node; + ext4_grpblk_t bb_counters[0]; +}; + +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP = 0, + EXT4_LI_MODE_ITABLE = 1, +}; + +struct ext4_li_request { + struct super_block *lr_super; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; + ext4_group_t lr_next_group; + struct list_head lr_request; + long unsigned int lr_next_sched; + long unsigned int lr_timeout; +}; + +struct ext4_iloc { + struct buffer_head *bh; + long unsigned int offset; + ext4_group_t block_group; +}; + +typedef enum { + EXT4_IGET_NORMAL = 0, + EXT4_IGET_SPECIAL = 1, + EXT4_IGET_HANDLE = 2, + EXT4_IGET_BAD = 4, + EXT4_IGET_EA_INODE = 8, +} ext4_iget_flags; + +struct ext4_rcu_ptr { + struct callback_head rcu; + void *ptr; +}; + +struct ext4_new_flex_group_data { + struct ext4_new_group_data *groups; + __u16 *bg_flags; + ext4_group_t resize_bg; + ext4_group_t count; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +typedef u16 wchar_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char)(wchar_t, unsigned char *, int); + int (*char2uni)(const unsigned char *, int, wchar_t *); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +struct fat_mount_options { + kuid_t fs_uid; + kgid_t fs_gid; + short unsigned int fs_fmask; + short unsigned int fs_dmask; + short unsigned int codepage; + int time_offset; + char *iocharset; + short unsigned int shortname; + unsigned char name_check; + unsigned char errors; + unsigned char nfs; + short unsigned int allow_utime; + unsigned int quiet: 1; + unsigned int showexec: 1; + unsigned int sys_immutable: 1; + unsigned int dotsOK: 1; + unsigned int isvfat: 1; + unsigned int utf8: 1; + unsigned int unicode_xlate: 1; + unsigned int numtail: 1; + unsigned int flush: 1; + unsigned int nocase: 1; + unsigned int usefree: 1; + unsigned int tz_set: 1; + unsigned int rodir: 1; + unsigned int discard: 1; + unsigned int dos1xfloppy: 1; + unsigned int debug: 1; +}; + +struct fatent_operations; + +struct msdos_sb_info { + short unsigned int sec_per_clus; + short unsigned int cluster_bits; + unsigned int cluster_size; + unsigned char fats; + unsigned char fat_bits; + short unsigned int fat_start; + long unsigned int fat_length; + long unsigned int dir_start; + short unsigned int dir_entries; + long unsigned int data_start; + long unsigned int max_cluster; + long unsigned int root_cluster; + long unsigned int fsinfo_sector; + struct mutex fat_lock; + struct mutex nfs_build_inode_lock; + struct mutex s_lock; + unsigned int prev_free; + unsigned int free_clusters; + unsigned int free_clus_valid; + struct fat_mount_options options; + struct nls_table *nls_disk; + struct nls_table *nls_io; + const void *dir_ops; + int dir_per_block; + int dir_per_block_bits; + unsigned int vol_id; + int fatent_shift; + const struct fatent_operations *fatent_ops; + struct inode *fat_inode; + struct inode *fsinfo_inode; + struct ratelimit_state ratelimit; + spinlock_t inode_hash_lock; + struct hlist_head inode_hashtable[256]; + spinlock_t dir_hash_lock; + struct hlist_head dir_hashtable[256]; + unsigned int dirty; + struct callback_head rcu; +}; + +struct fat_entry; + +struct fatent_operations { + void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); + void (*ent_set_ptr)(struct fat_entry *, int); + int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); + int (*ent_get)(struct fat_entry *); + void (*ent_put)(struct fat_entry *, int); + int (*ent_next)(struct fat_entry *); +}; + +struct msdos_inode_info { + spinlock_t cache_lru_lock; + struct list_head cache_lru; + int nr_caches; + unsigned int cache_valid_id; + loff_t mmu_private; + int i_start; + int i_logstart; + int i_attrs; + loff_t i_pos; + struct hlist_node i_fat_hash; + struct hlist_node i_dir_hash; + struct rw_semaphore truncate_lock; + struct timespec64 i_crtime; + struct inode vfs_inode; +}; + +struct fat_entry { + int entry; + union { + u8 *ent12_p[2]; + __le16 *ent16_p; + __le32 *ent32_p; + } u; + int nr_bhs; + struct buffer_head *bhs[2]; + struct inode *fat_inode; +}; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +typedef void (*swap_func_t)(void *, void *, int); + +typedef void (*exitcall_t)(); + +struct wait_bit_key { + long unsigned int *flags; + int bit_nr; + long unsigned int timeout; +}; + +typedef int wait_bit_action_f(struct wait_bit_key *, int); + +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +struct trace_print_flags { + long unsigned int mask; + const char *name; +}; + +struct kstatfs { + long int f_type; + long int f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long int f_namelen; + long int f_frsize; + long int f_flags; + long int f_spare[4]; +}; + +enum req_op { + REQ_OP_READ = 0, + REQ_OP_WRITE = 1, + REQ_OP_FLUSH = 2, + REQ_OP_DISCARD = 3, + REQ_OP_SECURE_ERASE = 5, + REQ_OP_ZONE_APPEND = 7, + REQ_OP_WRITE_ZEROES = 9, + REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_RESET = 13, + REQ_OP_ZONE_RESET_ALL = 15, + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST = 36, +}; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +struct btrfs_ioctl_vol_args { + __s64 fd; + char name[4088]; +}; + +struct btrfs_qgroup_limit { + __u64 flags; + __u64 max_rfer; + __u64 max_excl; + __u64 rsv_rfer; + __u64 rsv_excl; +}; + +struct btrfs_qgroup_inherit { + __u64 flags; + __u64 num_qgroups; + __u64 num_ref_copies; + __u64 num_excl_copies; + struct btrfs_qgroup_limit lim; + __u64 qgroups[0]; +}; + +struct btrfs_scrub_progress { + __u64 data_extents_scrubbed; + __u64 tree_extents_scrubbed; + __u64 data_bytes_scrubbed; + __u64 tree_bytes_scrubbed; + __u64 read_errors; + __u64 csum_errors; + __u64 verify_errors; + __u64 no_csum; + __u64 csum_discards; + __u64 super_errors; + __u64 malloc_errors; + __u64 uncorrectable_errors; + __u64 corrected_errors; + __u64 last_physical; + __u64 unverified_errors; +}; + +struct btrfs_balance_args { + __u64 profiles; + union { + __u64 usage; + struct { + __u32 usage_min; + __u32 usage_max; + }; + }; + __u64 devid; + __u64 pstart; + __u64 pend; + __u64 vstart; + __u64 vend; + __u64 target; + __u64 flags; + union { + __u64 limit; + struct { + __u32 limit_min; + __u32 limit_max; + }; + }; + __u32 stripes_min; + __u32 stripes_max; + __u64 unused[6]; +}; + +struct btrfs_balance_progress { + __u64 expected; + __u64 considered; + __u64 completed; +}; + +enum btrfs_dev_stat_values { + BTRFS_DEV_STAT_WRITE_ERRS = 0, + BTRFS_DEV_STAT_READ_ERRS = 1, + BTRFS_DEV_STAT_FLUSH_ERRS = 2, + BTRFS_DEV_STAT_CORRUPTION_ERRS = 3, + BTRFS_DEV_STAT_GENERATION_ERRS = 4, + BTRFS_DEV_STAT_VALUES_MAX = 5, +}; + +struct constant_table { + const char *name; + int value; +}; + +struct btrfs_disk_key { + __le64 objectid; + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct btrfs_key { + __u64 objectid; + __u8 type; + __u64 offset; +} __attribute__((packed)); + +struct btrfs_header { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __u8 chunk_tree_uuid[16]; + __le64 generation; + __le64 owner; + __le32 nritems; + __u8 level; +} __attribute__((packed)); + +struct btrfs_root_backup { + __le64 tree_root; + __le64 tree_root_gen; + __le64 chunk_root; + __le64 chunk_root_gen; + __le64 extent_root; + __le64 extent_root_gen; + __le64 fs_root; + __le64 fs_root_gen; + __le64 dev_root; + __le64 dev_root_gen; + __le64 csum_root; + __le64 csum_root_gen; + __le64 total_bytes; + __le64 bytes_used; + __le64 num_devices; + __le64 unused_64[4]; + __u8 tree_root_level; + __u8 chunk_root_level; + __u8 extent_root_level; + __u8 fs_root_level; + __u8 dev_root_level; + __u8 csum_root_level; + __u8 unused_8[10]; +}; + +struct btrfs_item { + struct btrfs_disk_key key; + __le32 offset; + __le32 size; +} __attribute__((packed)); + +struct btrfs_dev_item { + __le64 devid; + __le64 total_bytes; + __le64 bytes_used; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le64 type; + __le64 generation; + __le64 start_offset; + __le32 dev_group; + __u8 seek_speed; + __u8 bandwidth; + __u8 uuid[16]; + __u8 fsid[16]; +} __attribute__((packed)); + +struct btrfs_super_block { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __le64 magic; + __le64 generation; + __le64 root; + __le64 chunk_root; + __le64 log_root; + __le64 __unused_log_root_transid; + __le64 total_bytes; + __le64 bytes_used; + __le64 root_dir_objectid; + __le64 num_devices; + __le32 sectorsize; + __le32 nodesize; + __le32 __unused_leafsize; + __le32 stripesize; + __le32 sys_chunk_array_size; + __le64 chunk_root_generation; + __le64 compat_flags; + __le64 compat_ro_flags; + __le64 incompat_flags; + __le16 csum_type; + __u8 root_level; + __u8 chunk_root_level; + __u8 log_root_level; + struct btrfs_dev_item dev_item; + char label[256]; + __le64 cache_generation; + __le64 uuid_tree_generation; + __u8 metadata_uuid[16]; + __u64 nr_global_roots; + __le64 reserved[27]; + __u8 sys_chunk_array[2048]; + struct btrfs_root_backup super_roots[4]; + __u8 padding[565]; +} __attribute__((packed)); + +struct btrfs_inode_ref { + __le64 index; + __le16 name_len; +} __attribute__((packed)); + +struct btrfs_timespec { + __le64 sec; + __le32 nsec; +} __attribute__((packed)); + +struct btrfs_inode_item { + __le64 generation; + __le64 transid; + __le64 size; + __le64 nbytes; + __le64 block_group; + __le32 nlink; + __le32 uid; + __le32 gid; + __le32 mode; + __le64 rdev; + __le64 flags; + __le64 sequence; + __le64 reserved[4]; + struct btrfs_timespec atime; + struct btrfs_timespec ctime; + struct btrfs_timespec mtime; + struct btrfs_timespec otime; +}; + +struct btrfs_dir_item { + struct btrfs_disk_key location; + __le64 transid; + __le16 data_len; + __le16 name_len; + __u8 type; +} __attribute__((packed)); + +struct btrfs_root_item { + struct btrfs_inode_item inode; + __le64 generation; + __le64 root_dirid; + __le64 bytenr; + __le64 byte_limit; + __le64 bytes_used; + __le64 last_snapshot; + __le64 flags; + __le32 refs; + struct btrfs_disk_key drop_progress; + __u8 drop_level; + __u8 level; + __le64 generation_v2; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __le64 ctransid; + __le64 otransid; + __le64 stransid; + __le64 rtransid; + struct btrfs_timespec ctime; + struct btrfs_timespec otime; + struct btrfs_timespec stime; + struct btrfs_timespec rtime; + __le64 reserved[8]; +} __attribute__((packed)); + +struct btrfs_root_ref { + __le64 dirid; + __le64 sequence; + __le16 name_len; +} __attribute__((packed)); + +enum { + BTRFS_FILE_EXTENT_INLINE = 0, + BTRFS_FILE_EXTENT_REG = 1, + BTRFS_FILE_EXTENT_PREALLOC = 2, + BTRFS_NR_FILE_EXTENT_TYPES = 3, +}; + +struct btrfs_file_extent_item { + __le64 generation; + __le64 ram_bytes; + __u8 compression; + __u8 encryption; + __le16 other_encoding; + __u8 type; + __le64 disk_bytenr; + __le64 disk_num_bytes; + __le64 offset; + __le64 num_bytes; +} __attribute__((packed)); + +struct btrfs_work; + +typedef void (*btrfs_func_t)(struct btrfs_work *); + +typedef void (*btrfs_ordered_func_t)(struct btrfs_work *, bool); + +struct btrfs_workqueue; + +struct btrfs_work { + btrfs_func_t func; + btrfs_ordered_func_t ordered_func; + struct work_struct normal_work; + struct list_head ordered_list; + struct btrfs_workqueue *wq; + long unsigned int flags; +}; + +struct btrfs_inode; + +struct btrfs_ordered_extent { + u64 file_offset; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 bytes_left; + u64 truncated_len; + long unsigned int flags; + int compress_type; + int qgroup_rsv; + refcount_t refs; + struct btrfs_inode *inode; + struct list_head list; + struct list_head log_list; + wait_queue_head_t wait; + struct rb_node rb_node; + struct list_head root_extent_list; + struct btrfs_work work; + struct completion completion; + struct btrfs_work flush_work; + struct list_head work_list; + struct list_head bioc_list; +}; + +struct extent_map_tree { + struct rb_root root; + struct list_head modified_extents; + rwlock_t lock; +}; + +struct btrfs_fs_info; + +struct extent_io_tree { + struct rb_root state; + union { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + }; + u8 owner; + spinlock_t lock; +}; + +enum btrfs_rsv_type { + BTRFS_BLOCK_RSV_GLOBAL = 0, + BTRFS_BLOCK_RSV_DELALLOC = 1, + BTRFS_BLOCK_RSV_TRANS = 2, + BTRFS_BLOCK_RSV_CHUNK = 3, + BTRFS_BLOCK_RSV_DELOPS = 4, + BTRFS_BLOCK_RSV_DELREFS = 5, + BTRFS_BLOCK_RSV_EMPTY = 6, + BTRFS_BLOCK_RSV_TEMP = 7, +}; + +struct btrfs_space_info; + +struct btrfs_block_rsv { + u64 size; + u64 reserved; + struct btrfs_space_info *space_info; + spinlock_t lock; + bool full; + bool failfast; + enum btrfs_rsv_type type: 8; + u64 qgroup_rsv_size; + u64 qgroup_rsv_reserved; +}; + +struct btrfs_root; + +struct btrfs_delayed_node; + +struct btrfs_inode { + struct btrfs_root *root; + u8 prop_compress; + u8 defrag_compress; + spinlock_t lock; + struct extent_map_tree extent_tree; + struct extent_io_tree io_tree; + struct extent_io_tree *file_extent_tree; + struct mutex log_mutex; + unsigned int outstanding_extents; + spinlock_t ordered_tree_lock; + struct rb_root ordered_tree; + struct rb_node *ordered_tree_last; + struct list_head delalloc_inodes; + long unsigned int runtime_flags; + u64 generation; + u64 last_trans; + u64 logged_trans; + int last_sub_trans; + int last_log_commit; + union { + u64 delalloc_bytes; + u64 first_dir_index_to_log; + }; + union { + u64 new_delalloc_bytes; + u64 last_dir_index_offset; + }; + union { + u64 defrag_bytes; + u64 reloc_block_group_start; + }; + u64 disk_i_size; + union { + u64 index_cnt; + u64 csum_bytes; + }; + u64 dir_index; + u64 last_unlink_trans; + union { + u64 last_reflink_trans; + u64 ref_root_id; + }; + u32 flags; + u32 ro_flags; + struct btrfs_block_rsv block_rsv; + struct btrfs_delayed_node *delayed_node; + u64 i_otime_sec; + u32 i_otime_nsec; + struct list_head delayed_iput; + struct rw_semaphore i_mmap_lock; + struct inode vfs_inode; +}; + +struct btrfs_block_group; + +struct btrfs_free_cluster { + spinlock_t lock; + spinlock_t refill_lock; + struct rb_root root; + u64 max_size; + u64 window_start; + bool fragmented; + struct btrfs_block_group *block_group; + struct list_head block_group_list; +}; + +struct btrfs_discard_ctl { + struct workqueue_struct *discard_workers; + struct delayed_work work; + spinlock_t lock; + struct btrfs_block_group *block_group; + struct list_head discard_list[3]; + u64 prev_discard; + u64 prev_discard_time; + atomic_t discardable_extents; + atomic64_t discardable_bytes; + u64 max_discard_size; + u64 delay_ms; + u32 iops_limit; + u32 kbps_limit; + u64 discard_extent_bytes; + u64 discard_bitmap_bytes; + atomic64_t discard_bytes_saved; +}; + +struct btrfs_device; + +struct btrfs_dev_replace { + u64 replace_state; + time64_t time_started; + time64_t time_stopped; + atomic64_t num_write_errors; + atomic64_t num_uncorrectable_read_errors; + u64 cursor_left; + u64 committed_cursor_left; + u64 cursor_left_last_write_of_item; + u64 cursor_right; + u64 cont_reading_from_srcdev_mode; + int is_valid; + int item_needs_writeback; + struct btrfs_device *srcdev; + struct btrfs_device *tgtdev; + struct mutex lock_finishing_cancel_unmount; + struct rw_semaphore rwsem; + struct btrfs_scrub_progress scrub_progress; + struct percpu_counter bio_counter; + wait_queue_head_t replace_wait; + struct task_struct *replace_task; +}; + +enum btrfs_exclusive_operation { + BTRFS_EXCLOP_NONE = 0, + BTRFS_EXCLOP_BALANCE_PAUSED = 1, + BTRFS_EXCLOP_BALANCE = 2, + BTRFS_EXCLOP_DEV_ADD = 3, + BTRFS_EXCLOP_DEV_REMOVE = 4, + BTRFS_EXCLOP_DEV_REPLACE = 5, + BTRFS_EXCLOP_RESIZE = 6, + BTRFS_EXCLOP_SWAP_ACTIVATE = 7, +}; + +struct btrfs_commit_stats { + u64 commit_count; + u64 max_commit_dur; + u64 last_commit_dur; + u64 total_commit_dur; +}; + +struct btrfs_transaction; + +struct btrfs_stripe_hash_table; + +struct btrfs_fs_devices; + +struct reloc_control; + +struct btrfs_balance_control; + +struct ulist; + +struct btrfs_delayed_root; + +struct btrfs_fs_info { + u8 chunk_tree_uuid[16]; + long unsigned int flags; + struct btrfs_root *tree_root; + struct btrfs_root *chunk_root; + struct btrfs_root *dev_root; + struct btrfs_root *fs_root; + struct btrfs_root *quota_root; + struct btrfs_root *uuid_root; + struct btrfs_root *data_reloc_root; + struct btrfs_root *block_group_root; + struct btrfs_root *stripe_root; + struct btrfs_root *log_root_tree; + rwlock_t global_root_lock; + struct rb_root global_root_tree; + spinlock_t fs_roots_radix_lock; + struct xarray fs_roots_radix; + rwlock_t block_group_cache_lock; + struct rb_root_cached block_group_cache_tree; + atomic64_t free_chunk_space; + struct extent_io_tree excluded_extents; + struct rb_root_cached mapping_tree; + rwlock_t mapping_tree_lock; + struct btrfs_block_rsv global_block_rsv; + struct btrfs_block_rsv trans_block_rsv; + struct btrfs_block_rsv chunk_block_rsv; + struct btrfs_block_rsv delayed_block_rsv; + struct btrfs_block_rsv delayed_refs_rsv; + struct btrfs_block_rsv empty_block_rsv; + u64 generation; + u64 last_trans_committed; + u64 last_reloc_trans; + u64 last_trans_log_full_commit; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + u32 commit_interval; + u64 max_inline; + struct btrfs_transaction *running_transaction; + wait_queue_head_t transaction_throttle; + wait_queue_head_t transaction_wait; + wait_queue_head_t transaction_blocked_wait; + wait_queue_head_t async_submit_wait; + spinlock_t super_lock; + struct btrfs_super_block *super_copy; + struct btrfs_super_block *super_for_commit; + struct super_block *sb; + struct inode *btree_inode; + struct mutex tree_log_mutex; + struct mutex transaction_kthread_mutex; + struct mutex cleaner_mutex; + struct mutex chunk_mutex; + struct mutex ro_block_group_mutex; + struct btrfs_stripe_hash_table *stripe_hash_table; + struct mutex ordered_operations_mutex; + struct rw_semaphore commit_root_sem; + struct rw_semaphore cleanup_work_sem; + struct rw_semaphore subvol_sem; + spinlock_t trans_lock; + struct mutex reloc_mutex; + struct list_head trans_list; + struct list_head dead_roots; + struct list_head caching_block_groups; + spinlock_t delayed_iput_lock; + struct list_head delayed_iputs; + atomic_t nr_delayed_iputs; + wait_queue_head_t delayed_iputs_wait; + atomic64_t tree_mod_seq; + rwlock_t tree_mod_log_lock; + struct rb_root tree_mod_log; + struct list_head tree_mod_seq_list; + atomic_t async_delalloc_pages; + spinlock_t ordered_root_lock; + struct list_head ordered_roots; + struct mutex delalloc_root_mutex; + spinlock_t delalloc_root_lock; + struct list_head delalloc_roots; + struct btrfs_workqueue *workers; + struct btrfs_workqueue *delalloc_workers; + struct btrfs_workqueue *flush_workers; + struct workqueue_struct *endio_workers; + struct workqueue_struct *endio_meta_workers; + struct workqueue_struct *rmw_workers; + struct workqueue_struct *compressed_write_workers; + struct btrfs_workqueue *endio_write_workers; + struct btrfs_workqueue *endio_freespace_worker; + struct btrfs_workqueue *caching_workers; + struct btrfs_workqueue *fixup_workers; + struct btrfs_workqueue *delayed_workers; + struct task_struct *transaction_kthread; + struct task_struct *cleaner_kthread; + u32 thread_pool_size; + struct kobject *space_info_kobj; + struct kobject *qgroups_kobj; + struct kobject *discard_kobj; + struct percpu_counter dirty_metadata_bytes; + struct percpu_counter delalloc_bytes; + struct percpu_counter ordered_bytes; + s32 dirty_metadata_batch; + s32 delalloc_batch; + struct percpu_counter evictable_extent_maps; + u64 em_shrinker_last_root; + u64 em_shrinker_last_ino; + atomic64_t em_shrinker_nr_to_scan; + struct work_struct em_shrinker_work; + struct list_head dirty_cowonly_roots; + struct btrfs_fs_devices *fs_devices; + struct list_head space_info; + struct btrfs_space_info *data_sinfo; + struct reloc_control *reloc_ctl; + struct btrfs_free_cluster data_alloc_cluster; + struct btrfs_free_cluster meta_alloc_cluster; + spinlock_t defrag_inodes_lock; + struct rb_root defrag_inodes; + atomic_t defrag_running; + seqlock_t profiles_lock; + u64 avail_data_alloc_bits; + u64 avail_metadata_alloc_bits; + u64 avail_system_alloc_bits; + spinlock_t balance_lock; + struct mutex balance_mutex; + atomic_t balance_pause_req; + atomic_t balance_cancel_req; + struct btrfs_balance_control *balance_ctl; + wait_queue_head_t balance_wait_q; + atomic_t reloc_cancel_req; + u32 data_chunk_allocations; + u32 metadata_ratio; + void *bdev_holder; + struct mutex scrub_lock; + atomic_t scrubs_running; + atomic_t scrub_pause_req; + atomic_t scrubs_paused; + atomic_t scrub_cancel_req; + wait_queue_head_t scrub_pause_wait; + refcount_t scrub_workers_refcnt; + u32 sectors_per_page; + struct workqueue_struct *scrub_workers; + struct btrfs_discard_ctl discard_ctl; + u64 qgroup_flags; + struct rb_root qgroup_tree; + spinlock_t qgroup_lock; + struct ulist *qgroup_ulist; + struct mutex qgroup_ioctl_lock; + struct list_head dirty_qgroups; + u64 qgroup_seq; + struct mutex qgroup_rescan_lock; + struct btrfs_key qgroup_rescan_progress; + struct btrfs_workqueue *qgroup_rescan_workers; + struct completion qgroup_rescan_completion; + struct btrfs_work qgroup_rescan_work; + bool qgroup_rescan_running; + u8 qgroup_drop_subtree_thres; + u64 qgroup_enable_gen; + int fs_error; + long unsigned int fs_state; + struct btrfs_delayed_root *delayed_root; + spinlock_t buffer_lock; + struct xarray buffer_radix; + int backup_root_index; + struct btrfs_dev_replace dev_replace; + struct semaphore uuid_tree_rescan_sem; + struct work_struct async_reclaim_work; + struct work_struct async_data_reclaim_work; + struct work_struct preempt_reclaim_work; + struct work_struct reclaim_bgs_work; + struct list_head reclaim_bgs; + int bg_reclaim_threshold; + spinlock_t unused_bgs_lock; + struct list_head unused_bgs; + struct mutex unused_bg_unpin_mutex; + struct mutex reclaim_bgs_lock; + u32 nodesize; + u32 sectorsize; + u32 sectorsize_bits; + u32 csum_size; + u32 csums_per_leaf; + u32 stripesize; + u64 max_extent_size; + spinlock_t swapfile_pins_lock; + struct rb_root swapfile_pins; + struct crypto_shash *csum_shash; + enum btrfs_exclusive_operation exclusive_operation; + u64 zone_size; + struct queue_limits limits; + u64 max_zone_append_size; + struct mutex zoned_meta_io_lock; + spinlock_t treelog_bg_lock; + u64 treelog_bg; + spinlock_t relocation_bg_lock; + u64 data_reloc_bg; + struct mutex zoned_data_reloc_io_lock; + struct btrfs_block_group *active_meta_bg; + struct btrfs_block_group *active_system_bg; + u64 nr_global_roots; + spinlock_t zone_active_bgs_lock; + struct list_head zone_active_bgs; + struct btrfs_commit_stats commit_stats; + u64 last_root_drop_gen; + struct lockdep_map btrfs_trans_num_writers_map; + struct lockdep_map btrfs_trans_num_extwriters_map; + struct lockdep_map btrfs_state_change_map[4]; + struct lockdep_map btrfs_trans_pending_ordered_map; + struct lockdep_map btrfs_ordered_extent_map; +}; + +enum btrfs_compression_type { + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_LZO = 2, + BTRFS_COMPRESS_ZSTD = 3, + BTRFS_NR_COMPRESS_TYPES = 4, +}; + +struct ulist_node { + u64 val; + u64 aux; + struct list_head list; + struct rb_node rb_node; +}; + +struct ulist { + long unsigned int nnodes; + struct list_head nodes; + struct rb_root root; + struct ulist_node *prealloc; +}; + +struct extent_buffer { + u64 start; + u32 len; + u32 folio_size; + long unsigned int bflags; + struct btrfs_fs_info *fs_info; + void *addr; + spinlock_t refs_lock; + atomic_t refs; + int read_mirror; + s8 log_index; + u8 folio_shift; + struct callback_head callback_head; + struct rw_semaphore lock; + struct folio *folios[16]; +}; + +enum btrfs_discard_state { + BTRFS_DISCARD_EXTENTS = 0, + BTRFS_DISCARD_BITMAPS = 1, + BTRFS_DISCARD_RESET_CURSOR = 2, +}; + +struct btrfs_io_ctl { + void *cur; + void *orig; + struct page *page; + struct page **pages; + struct btrfs_fs_info *fs_info; + struct inode *inode; + long unsigned int size; + int index; + int num_pages; + int entries; + int bitmaps; +}; + +enum btrfs_block_group_size_class { + BTRFS_BG_SZ_NONE = 0, + BTRFS_BG_SZ_SMALL = 1, + BTRFS_BG_SZ_MEDIUM = 2, + BTRFS_BG_SZ_LARGE = 3, +}; + +struct btrfs_caching_control; + +struct btrfs_free_space_ctl; + +struct btrfs_chunk_map; + +struct btrfs_block_group { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + spinlock_t lock; + u64 start; + u64 length; + u64 pinned; + u64 reserved; + u64 used; + u64 delalloc_bytes; + u64 bytes_super; + u64 flags; + u64 cache_generation; + u64 global_root_id; + u64 commit_used; + u32 bitmap_high_thresh; + u32 bitmap_low_thresh; + struct rw_semaphore data_rwsem; + long unsigned int full_stripe_len; + long unsigned int runtime_flags; + unsigned int ro; + int disk_cache_state; + int cached; + struct btrfs_caching_control *caching_ctl; + struct btrfs_space_info *space_info; + struct btrfs_free_space_ctl *free_space_ctl; + struct rb_node cache_node; + struct list_head list; + refcount_t refs; + struct list_head cluster_list; + struct list_head bg_list; + struct list_head ro_list; + atomic_t frozen; + struct list_head discard_list; + int discard_index; + u64 discard_eligible_time; + u64 discard_cursor; + enum btrfs_discard_state discard_state; + struct list_head dirty_list; + struct list_head io_list; + struct btrfs_io_ctl io_ctl; + atomic_t reservations; + atomic_t nocow_writers; + struct mutex free_space_lock; + int swap_extents; + u64 alloc_offset; + u64 zone_unusable; + u64 zone_capacity; + u64 meta_write_pointer; + struct btrfs_chunk_map *physical_map; + struct list_head active_bg_list; + struct work_struct zone_finish_work; + struct extent_buffer *last_eb; + enum btrfs_block_group_size_class size_class; + u64 reclaim_mark; +}; + +enum btrfs_lock_nesting { + BTRFS_NESTING_NORMAL = 0, + BTRFS_NESTING_COW = 1, + BTRFS_NESTING_LEFT = 2, + BTRFS_NESTING_RIGHT = 3, + BTRFS_NESTING_LEFT_COW = 4, + BTRFS_NESTING_RIGHT_COW = 5, + BTRFS_NESTING_SPLIT = 6, + BTRFS_NESTING_NEW_ROOT = 7, + BTRFS_NESTING_MAX = 8, +}; + +struct btrfs_drew_lock { + atomic_t readers; + atomic_t writers; + wait_queue_head_t pending_writers; + wait_queue_head_t pending_readers; +}; + +enum { + __EXTENT_DIRTY_BIT = 0, + EXTENT_DIRTY = 1, + __EXTENT_DIRTY_SEQ = 0, + __EXTENT_UPTODATE_BIT = 1, + EXTENT_UPTODATE = 2, + __EXTENT_UPTODATE_SEQ = 1, + __EXTENT_LOCKED_BIT = 2, + EXTENT_LOCKED = 4, + __EXTENT_LOCKED_SEQ = 2, + __EXTENT_DIO_LOCKED_BIT = 3, + EXTENT_DIO_LOCKED = 8, + __EXTENT_DIO_LOCKED_SEQ = 3, + __EXTENT_NEW_BIT = 4, + EXTENT_NEW = 16, + __EXTENT_NEW_SEQ = 4, + __EXTENT_DELALLOC_BIT = 5, + EXTENT_DELALLOC = 32, + __EXTENT_DELALLOC_SEQ = 5, + __EXTENT_DEFRAG_BIT = 6, + EXTENT_DEFRAG = 64, + __EXTENT_DEFRAG_SEQ = 6, + __EXTENT_BOUNDARY_BIT = 7, + EXTENT_BOUNDARY = 128, + __EXTENT_BOUNDARY_SEQ = 7, + __EXTENT_NODATASUM_BIT = 8, + EXTENT_NODATASUM = 256, + __EXTENT_NODATASUM_SEQ = 8, + __EXTENT_CLEAR_META_RESV_BIT = 9, + EXTENT_CLEAR_META_RESV = 512, + __EXTENT_CLEAR_META_RESV_SEQ = 9, + __EXTENT_NEED_WAIT_BIT = 10, + EXTENT_NEED_WAIT = 1024, + __EXTENT_NEED_WAIT_SEQ = 10, + __EXTENT_NORESERVE_BIT = 11, + EXTENT_NORESERVE = 2048, + __EXTENT_NORESERVE_SEQ = 11, + __EXTENT_QGROUP_RESERVED_BIT = 12, + EXTENT_QGROUP_RESERVED = 4096, + __EXTENT_QGROUP_RESERVED_SEQ = 12, + __EXTENT_CLEAR_DATA_RESV_BIT = 13, + EXTENT_CLEAR_DATA_RESV = 8192, + __EXTENT_CLEAR_DATA_RESV_SEQ = 13, + __EXTENT_DELALLOC_NEW_BIT = 14, + EXTENT_DELALLOC_NEW = 16384, + __EXTENT_DELALLOC_NEW_SEQ = 14, + __EXTENT_ADD_INODE_BYTES_BIT = 15, + EXTENT_ADD_INODE_BYTES = 32768, + __EXTENT_ADD_INODE_BYTES_SEQ = 15, + __EXTENT_CLEAR_ALL_BITS_BIT = 16, + EXTENT_CLEAR_ALL_BITS = 65536, + __EXTENT_CLEAR_ALL_BITS_SEQ = 16, + __EXTENT_NOWAIT_BIT = 17, + EXTENT_NOWAIT = 131072, + __EXTENT_NOWAIT_SEQ = 17, +}; + +enum { + IO_TREE_FS_PINNED_EXTENTS = 0, + IO_TREE_FS_EXCLUDED_EXTENTS = 1, + IO_TREE_BTREE_INODE_IO = 2, + IO_TREE_INODE_IO = 3, + IO_TREE_RELOC_BLOCKS = 4, + IO_TREE_TRANS_DIRTY_PAGES = 5, + IO_TREE_ROOT_DIRTY_LOG_PAGES = 6, + IO_TREE_INODE_FILE_EXTENT = 7, + IO_TREE_LOG_CSUM_RANGE = 8, + IO_TREE_SELFTEST = 9, + IO_TREE_DEVICE_ALLOC_STATE = 10, +}; + +struct extent_state { + u64 start; + u64 end; + struct rb_node rb_node; + wait_queue_head_t wq; + refcount_t refs; + u32 state; +}; + +struct btrfs_workqueue { + struct workqueue_struct *normal_wq; + struct btrfs_fs_info *fs_info; + struct list_head ordered_list; + spinlock_t list_lock; + atomic_t pending; + int limit_active; + int current_active; + int thresh; + unsigned int count; + spinlock_t thres_lock; +}; + +struct btrfs_space_info { + struct btrfs_fs_info *fs_info; + spinlock_t lock; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 bytes_zone_unusable; + u64 max_extent_size; + u64 chunk_size; + int bg_reclaim_threshold; + int clamp; + unsigned int full: 1; + unsigned int chunk_alloc: 1; + unsigned int flush: 1; + unsigned int force_alloc; + u64 disk_used; + u64 disk_total; + u64 flags; + struct list_head list; + struct list_head ro_bgs; + struct list_head priority_tickets; + struct list_head tickets; + u64 reclaim_size; + u64 tickets_id; + struct rw_semaphore groups_sem; + struct list_head block_groups[9]; + struct kobject kobj; + struct kobject *block_group_kobjs[9]; + u64 reclaim_count; + u64 reclaim_bytes; + u64 reclaim_errors; + bool dynamic_reclaim; + bool periodic_reclaim; + bool periodic_reclaim_ready; + s64 reclaimable_bytes; +}; + +enum { + BTRFS_FS_STATE_REMOUNTING = 0, + BTRFS_FS_STATE_RO = 1, + BTRFS_FS_STATE_TRANS_ABORTED = 2, + BTRFS_FS_STATE_DEV_REPLACING = 3, + BTRFS_FS_STATE_DUMMY_FS_INFO = 4, + BTRFS_FS_STATE_NO_DATA_CSUMS = 5, + BTRFS_FS_STATE_SKIP_META_CSUMS = 6, + BTRFS_FS_STATE_LOG_CLEANUP_ERROR = 7, + BTRFS_FS_STATE_COUNT = 8, +}; + +enum { + BTRFS_FS_CLOSING_START = 0, + BTRFS_FS_CLOSING_DONE = 1, + BTRFS_FS_LOG_RECOVERING = 2, + BTRFS_FS_OPEN = 3, + BTRFS_FS_QUOTA_ENABLED = 4, + BTRFS_FS_UPDATE_UUID_TREE_GEN = 5, + BTRFS_FS_CREATING_FREE_SPACE_TREE = 6, + BTRFS_FS_BTREE_ERR = 7, + BTRFS_FS_LOG1_ERR = 8, + BTRFS_FS_LOG2_ERR = 9, + BTRFS_FS_QUOTA_OVERRIDE = 10, + BTRFS_FS_FROZEN = 11, + BTRFS_FS_BALANCE_RUNNING = 12, + BTRFS_FS_RELOC_RUNNING = 13, + BTRFS_FS_CLEANER_RUNNING = 14, + BTRFS_FS_CSUM_IMPL_FAST = 15, + BTRFS_FS_DISCARD_RUNNING = 16, + BTRFS_FS_CLEANUP_SPACE_CACHE_V1 = 17, + BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED = 18, + BTRFS_FS_TREE_MOD_LOG_USERS = 19, + BTRFS_FS_COMMIT_TRANS = 20, + BTRFS_FS_UNFINISHED_DROPS = 21, + BTRFS_FS_NEED_ZONE_FINISH = 22, + BTRFS_FS_NEED_TRANS_COMMIT = 23, + BTRFS_FS_ACTIVE_ZONE_TRACKING = 24, + BTRFS_FS_FEATURE_CHANGED = 25, + BTRFS_FS_UNALIGNED_TREE_BLOCK = 26, +}; + +enum { + BTRFS_MOUNT_NODATASUM = 1ULL, + BTRFS_MOUNT_NODATACOW = 2ULL, + BTRFS_MOUNT_NOBARRIER = 4ULL, + BTRFS_MOUNT_SSD = 8ULL, + BTRFS_MOUNT_DEGRADED = 16ULL, + BTRFS_MOUNT_COMPRESS = 32ULL, + BTRFS_MOUNT_NOTREELOG = 64ULL, + BTRFS_MOUNT_FLUSHONCOMMIT = 128ULL, + BTRFS_MOUNT_SSD_SPREAD = 256ULL, + BTRFS_MOUNT_NOSSD = 512ULL, + BTRFS_MOUNT_DISCARD_SYNC = 1024ULL, + BTRFS_MOUNT_FORCE_COMPRESS = 2048ULL, + BTRFS_MOUNT_SPACE_CACHE = 4096ULL, + BTRFS_MOUNT_CLEAR_CACHE = 8192ULL, + BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = 16384ULL, + BTRFS_MOUNT_ENOSPC_DEBUG = 32768ULL, + BTRFS_MOUNT_AUTO_DEFRAG = 65536ULL, + BTRFS_MOUNT_USEBACKUPROOT = 131072ULL, + BTRFS_MOUNT_SKIP_BALANCE = 262144ULL, + BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = 524288ULL, + BTRFS_MOUNT_RESCAN_UUID_TREE = 1048576ULL, + BTRFS_MOUNT_FRAGMENT_DATA = 2097152ULL, + BTRFS_MOUNT_FRAGMENT_METADATA = 4194304ULL, + BTRFS_MOUNT_FREE_SPACE_TREE = 8388608ULL, + BTRFS_MOUNT_NOLOGREPLAY = 16777216ULL, + BTRFS_MOUNT_REF_VERIFY = 33554432ULL, + BTRFS_MOUNT_DISCARD_ASYNC = 67108864ULL, + BTRFS_MOUNT_IGNOREBADROOTS = 134217728ULL, + BTRFS_MOUNT_IGNOREDATACSUMS = 268435456ULL, + BTRFS_MOUNT_NODISCARD = 536870912ULL, + BTRFS_MOUNT_NOSPACECACHE = 1073741824ULL, + BTRFS_MOUNT_IGNOREMETACSUMS = 2147483648ULL, + BTRFS_MOUNT_IGNORESUPERFLAGS = 4294967296ULL, +}; + +struct rcu_string; + +struct btrfs_zoned_device_info; + +struct scrub_ctx; + +struct btrfs_device { + struct list_head dev_list; + struct list_head dev_alloc_list; + struct list_head post_commit_list; + struct btrfs_fs_devices *fs_devices; + struct btrfs_fs_info *fs_info; + struct rcu_string *name; + u64 generation; + struct file *bdev_file; + struct block_device *bdev; + struct btrfs_zoned_device_info *zone_info; + dev_t devt; + long unsigned int dev_state; + blk_status_t last_flush_error; + u64 devid; + u64 total_bytes; + u64 disk_total_bytes; + u64 bytes_used; + u32 io_align; + u32 io_width; + u64 type; + atomic_t sb_write_errors; + u32 sector_size; + u8 uuid[16]; + u64 commit_total_bytes; + u64 commit_bytes_used; + struct bio flush_bio; + struct completion flush_wait; + struct scrub_ctx *scrub_ctx; + int dev_stats_valid; + atomic_t dev_stats_ccnt; + atomic_t dev_stat_values[5]; + struct extent_io_tree alloc_state; + struct completion kobj_unregister; + struct kobject devid_kobj; + u64 scrub_speed_max; +}; + +struct btrfs_qgroup_swapped_blocks { + spinlock_t lock; + bool swapped; + struct rb_root blocks[8]; +}; + +struct btrfs_root { + struct rb_node rb_node; + struct extent_buffer *node; + struct extent_buffer *commit_root; + struct btrfs_root *log_root; + struct btrfs_root *reloc_root; + long unsigned int state; + struct btrfs_root_item root_item; + struct btrfs_key root_key; + struct btrfs_fs_info *fs_info; + struct extent_io_tree dirty_log_pages; + struct mutex objectid_mutex; + spinlock_t accounting_lock; + struct btrfs_block_rsv *block_rsv; + struct mutex log_mutex; + wait_queue_head_t log_writer_wait; + wait_queue_head_t log_commit_wait[2]; + struct list_head log_ctxs[2]; + atomic_t log_writers; + atomic_t log_commit[2]; + atomic_t log_batch; + int log_transid; + int log_transid_committed; + int last_log_commit; + pid_t log_start_pid; + u64 last_trans; + u64 free_objectid; + struct btrfs_key defrag_progress; + struct btrfs_key defrag_max; + struct list_head dirty_list; + struct list_head root_list; + struct xarray inodes; + struct xarray delayed_nodes; + dev_t anon_dev; + spinlock_t root_item_lock; + refcount_t refs; + struct mutex delalloc_mutex; + spinlock_t delalloc_lock; + struct list_head delalloc_inodes; + struct list_head delalloc_root; + u64 nr_delalloc_inodes; + struct mutex ordered_extent_mutex; + spinlock_t ordered_extent_lock; + struct list_head ordered_extents; + struct list_head ordered_root; + u64 nr_ordered_extents; + struct list_head reloc_dirty_list; + int send_in_progress; + int dedupe_in_progress; + struct btrfs_drew_lock snapshot_lock; + atomic_t snapshot_force_cow; + spinlock_t qgroup_meta_rsv_lock; + u64 qgroup_meta_rsv_pertrans; + u64 qgroup_meta_rsv_prealloc; + wait_queue_head_t qgroup_flush_wait; + atomic_t nr_swapfiles; + struct btrfs_qgroup_swapped_blocks swapped_blocks; + struct extent_io_tree log_csum_range; + u64 relocation_src_root; +}; + +enum btrfs_trans_state { + TRANS_STATE_RUNNING = 0, + TRANS_STATE_COMMIT_PREP = 1, + TRANS_STATE_COMMIT_START = 2, + TRANS_STATE_COMMIT_DOING = 3, + TRANS_STATE_UNBLOCKED = 4, + TRANS_STATE_SUPER_COMMITTED = 5, + TRANS_STATE_COMPLETED = 6, + TRANS_STATE_MAX = 7, +}; + +struct btrfs_delayed_ref_root { + struct xarray head_refs; + struct xarray dirty_extents; + spinlock_t lock; + long unsigned int num_heads; + long unsigned int num_heads_ready; + u64 pending_csums; + long unsigned int flags; + u64 run_delayed_start; + u64 qgroup_to_skip; +}; + +struct btrfs_transaction { + u64 transid; + atomic_t num_extwriters; + atomic_t num_writers; + refcount_t use_count; + long unsigned int flags; + enum btrfs_trans_state state; + int aborted; + struct list_head list; + struct extent_io_tree dirty_pages; + time64_t start_time; + wait_queue_head_t writer_wait; + wait_queue_head_t commit_wait; + struct list_head pending_snapshots; + struct list_head dev_update_list; + struct list_head switch_commits; + struct list_head dirty_bgs; + struct list_head io_bgs; + struct list_head dropped_roots; + struct extent_io_tree pinned_extents; + struct mutex cache_write_mutex; + spinlock_t dirty_bgs_lock; + struct list_head deleted_bgs; + spinlock_t dropped_roots_lock; + struct btrfs_delayed_ref_root delayed_refs; + struct btrfs_fs_info *fs_info; + atomic_t pending_ordered; + wait_queue_head_t pending_wait; +}; + +enum btrfs_chunk_allocation_policy { + BTRFS_CHUNK_ALLOC_REGULAR = 0, + BTRFS_CHUNK_ALLOC_ZONED = 1, +}; + +enum btrfs_read_policy { + BTRFS_READ_POLICY_PID = 0, + BTRFS_NR_READ_POLICY = 1, +}; + +struct btrfs_fs_devices { + u8 fsid[16]; + u8 metadata_uuid[16]; + struct list_head fs_list; + u64 num_devices; + u64 open_devices; + u64 rw_devices; + u64 missing_devices; + u64 total_rw_bytes; + u64 total_devices; + u64 latest_generation; + struct btrfs_device *latest_dev; + struct mutex device_list_mutex; + struct list_head devices; + struct list_head alloc_list; + struct list_head seed_list; + int opened; + bool rotating; + bool discardable; + bool seeding; + bool temp_fsid; + struct btrfs_fs_info *fs_info; + struct kobject fsid_kobj; + struct kobject *devices_kobj; + struct kobject *devinfo_kobj; + struct completion kobj_unregister; + enum btrfs_chunk_allocation_policy chunk_alloc_policy; + enum btrfs_read_policy read_policy; +}; + +struct btrfs_balance_control { + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + u64 flags; + struct btrfs_balance_progress stat; +}; + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + struct list_head prepare_list; + atomic_t items; + atomic_t items_seq; + int nodes; + wait_queue_head_t wait; +}; + +struct btrfs_path { + struct extent_buffer *nodes[8]; + int slots[8]; + u8 locks[8]; + u8 reada; + u8 lowest_level; + unsigned int search_for_split: 1; + unsigned int keep_locks: 1; + unsigned int skip_locking: 1; + unsigned int search_commit_root: 1; + unsigned int need_commit_sem: 1; + unsigned int skip_release_on_error: 1; + unsigned int search_for_extension: 1; + unsigned int nowait: 1; +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + struct list_head n_list; + struct list_head p_list; + struct rb_root_cached ins_root; + struct rb_root_cached del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + refcount_t refs; + int count; + u64 index_cnt; + long unsigned int flags; + u32 curr_index_batch_size; + u32 index_item_leaves; +}; + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +enum compact_priority { + COMPACT_PRIO_SYNC_FULL = 0, + MIN_COMPACT_PRIORITY = 0, + COMPACT_PRIO_SYNC_LIGHT = 1, + MIN_COMPACT_COSTLY_PRIORITY = 1, + DEF_COMPACT_PRIORITY = 1, + COMPACT_PRIO_ASYNC = 2, + INIT_COMPACT_PRIORITY = 2, +}; + +enum compact_result { + COMPACT_NOT_SUITABLE_ZONE = 0, + COMPACT_SKIPPED = 1, + COMPACT_DEFERRED = 2, + COMPACT_NO_SUITABLE_PAGE = 3, + COMPACT_CONTINUE = 4, + COMPACT_COMPLETE = 5, + COMPACT_PARTIAL_SKIPPED = 6, + COMPACT_CONTENDED = 7, + COMPACT_SUCCESS = 8, +}; + +enum { + __EXTENT_FLAG_PINNED_BIT = 0, + EXTENT_FLAG_PINNED = 1, + __EXTENT_FLAG_PINNED_SEQ = 0, + __EXTENT_FLAG_COMPRESS_ZLIB_BIT = 1, + EXTENT_FLAG_COMPRESS_ZLIB = 2, + __EXTENT_FLAG_COMPRESS_ZLIB_SEQ = 1, + __EXTENT_FLAG_COMPRESS_LZO_BIT = 2, + EXTENT_FLAG_COMPRESS_LZO = 4, + __EXTENT_FLAG_COMPRESS_LZO_SEQ = 2, + __EXTENT_FLAG_COMPRESS_ZSTD_BIT = 3, + EXTENT_FLAG_COMPRESS_ZSTD = 8, + __EXTENT_FLAG_COMPRESS_ZSTD_SEQ = 3, + __EXTENT_FLAG_PREALLOC_BIT = 4, + EXTENT_FLAG_PREALLOC = 16, + __EXTENT_FLAG_PREALLOC_SEQ = 4, + __EXTENT_FLAG_LOGGING_BIT = 5, + EXTENT_FLAG_LOGGING = 32, + __EXTENT_FLAG_LOGGING_SEQ = 5, + __EXTENT_FLAG_MERGED_BIT = 6, + EXTENT_FLAG_MERGED = 64, + __EXTENT_FLAG_MERGED_SEQ = 6, +}; + +struct extent_map { + struct rb_node rb_node; + u64 start; + u64 len; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 ram_bytes; + u64 generation; + u32 flags; + refcount_t refs; + struct list_head list; +}; + +enum { + BTRFS_ORDERED_REGULAR = 0, + BTRFS_ORDERED_NOCOW = 1, + BTRFS_ORDERED_PREALLOC = 2, + BTRFS_ORDERED_COMPRESSED = 3, + BTRFS_ORDERED_DIRECT = 4, + BTRFS_ORDERED_IO_DONE = 5, + BTRFS_ORDERED_COMPLETE = 6, + BTRFS_ORDERED_IOERR = 7, + BTRFS_ORDERED_TRUNCATED = 8, + BTRFS_ORDERED_LOGGED = 9, + BTRFS_ORDERED_LOGGED_CSUM = 10, + BTRFS_ORDERED_PENDING = 11, + BTRFS_ORDERED_ENCODED = 12, +}; + +enum btrfs_delayed_ref_action { + BTRFS_ADD_DELAYED_REF = 1, + BTRFS_DROP_DELAYED_REF = 2, + BTRFS_ADD_DELAYED_EXTENT = 3, + BTRFS_UPDATE_DELAYED_HEAD = 4, +} __attribute__((mode(byte))); + +struct btrfs_data_ref { + u64 objectid; + u64 offset; +}; + +struct btrfs_tree_ref { + int level; +}; + +struct btrfs_delayed_ref_node { + struct rb_node ref_node; + struct list_head add_list; + u64 bytenr; + u64 num_bytes; + u64 seq; + u64 ref_root; + u64 parent; + refcount_t refs; + int ref_mod; + unsigned int action: 8; + unsigned int type: 8; + union { + struct btrfs_tree_ref tree_ref; + struct btrfs_data_ref data_ref; + }; +}; + +struct btrfs_delayed_extent_op { + struct btrfs_disk_key key; + bool update_key; + bool update_flags; + u64 flags_to_set; +}; + +struct btrfs_delayed_ref_head { + u64 bytenr; + u64 num_bytes; + struct mutex mutex; + refcount_t refs; + spinlock_t lock; + struct rb_root_cached ref_tree; + struct list_head ref_add_list; + struct btrfs_delayed_extent_op *extent_op; + int total_ref_mod; + int ref_mod; + u64 owning_root; + u64 reserved_bytes; + u8 level; + bool must_insert_reserved; + bool is_data; + bool is_system; + bool processing; + bool tracked; +}; + +struct btrfs_pending_snapshot; + +struct btrfs_trans_handle { + u64 transid; + u64 bytes_reserved; + u64 delayed_refs_bytes_reserved; + u64 chunk_bytes_reserved; + long unsigned int delayed_ref_updates; + long unsigned int delayed_ref_csum_deletions; + struct btrfs_transaction *transaction; + struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *orig_rsv; + struct btrfs_pending_snapshot *pending_snapshot; + refcount_t use_count; + unsigned int type; + short int aborted; + bool adding_csums; + bool allocating_chunk; + bool removing_chunk; + bool reloc_reserved; + bool in_fsync; + struct btrfs_fs_info *fs_info; + struct list_head new_bgs; + struct btrfs_block_rsv delayed_rsv; +}; + +struct btrfs_pending_snapshot { + struct dentry *dentry; + struct btrfs_inode *dir; + struct btrfs_root *root; + struct btrfs_root_item *root_item; + struct btrfs_root *snap; + struct btrfs_qgroup_inherit *inherit; + struct btrfs_path *path; + struct btrfs_block_rsv block_rsv; + int error; + dev_t anon_dev; + bool readonly; + struct list_head list; +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u64 ino; + u32 gen; + } __attribute__((packed)) i64; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + struct { + struct {} __empty_raw; + __u32 raw[0]; + }; + }; +}; + +enum btrfs_trim_state { + BTRFS_TRIM_STATE_UNTRIMMED = 0, + BTRFS_TRIM_STATE_TRIMMED = 1, + BTRFS_TRIM_STATE_TRIMMING = 2, +}; + +struct btrfs_free_space { + struct rb_node offset_index; + struct rb_node bytes_index; + u64 offset; + u64 bytes; + u64 max_extent_size; + long unsigned int *bitmap; + struct list_head list; + enum btrfs_trim_state trim_state; + s32 bitmap_extents; +}; + +enum { + BTRFS_STAT_CURR = 0, + BTRFS_STAT_PREV = 1, + BTRFS_STAT_NR_ENTRIES = 2, +}; + +struct btrfs_free_space_op; + +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + struct rb_root_cached free_space_bytes; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + s32 discardable_extents[2]; + s64 discardable_bytes[2]; + const struct btrfs_free_space_op *op; + struct btrfs_block_group *block_group; + struct mutex cache_writeout_mutex; + struct list_head trimming_ranges; +}; + +struct btrfs_free_space_op { + bool (*use_bitmap)(struct btrfs_free_space_ctl *, struct btrfs_free_space *); +}; + +struct extent_inode_elem; + +struct prelim_ref { + struct rb_node rbnode; + u64 root_id; + struct btrfs_key key_for_search; + u8 level; + int count; + struct extent_inode_elem *inode_list; + u64 parent; + u64 wanted_disk_byte; +}; + +struct rcu_string { + struct callback_head rcu; + char str[0]; +}; + +enum btrfs_raid_types { + BTRFS_RAID_SINGLE = 0, + BTRFS_RAID_RAID0 = 1, + BTRFS_RAID_RAID1 = 2, + BTRFS_RAID_DUP = 3, + BTRFS_RAID_RAID10 = 4, + BTRFS_RAID_RAID5 = 5, + BTRFS_RAID_RAID6 = 6, + BTRFS_RAID_RAID1C3 = 7, + BTRFS_RAID_RAID1C4 = 8, + BTRFS_NR_RAID_TYPES = 9, +}; + +struct btrfs_zoned_device_info { + u64 zone_size; + u8 zone_size_shift; + u32 nr_zones; + unsigned int max_active_zones; + int reserved_active_zones; + atomic_t active_zones_left; + long unsigned int *seq_zones; + long unsigned int *empty_zones; + long unsigned int *active_zones; + struct blk_zone *zone_cache; + struct blk_zone sb_zones[6]; +}; + +struct btrfs_io_context; + +struct btrfs_io_stripe { + struct btrfs_device *dev; + u64 physical; + u64 length; + bool rst_search_commit_root; + struct btrfs_io_context *bioc; +}; + +struct btrfs_io_context { + refcount_t refs; + struct btrfs_fs_info *fs_info; + u64 map_type; + struct bio *orig_bio; + atomic_t error; + u16 max_errors; + u64 logical; + u64 size; + struct list_head rst_ordered_entry; + u16 num_stripes; + u16 mirror_num; + u16 replace_nr_stripes; + s16 replace_stripe_src; + u64 full_stripe_logical; + struct btrfs_io_stripe stripes[0]; +}; + +struct btrfs_device_info { + struct btrfs_device *dev; + u64 dev_offset; + u64 max_avail; + u64 total_avail; +}; + +struct btrfs_raid_attr { + u8 sub_stripes; + u8 dev_stripes; + u8 devs_max; + u8 devs_min; + u8 tolerated_failures; + u8 devs_increment; + u8 ncopies; + u8 nparity; + u8 mindev_error; + const char raid_name[8]; + u64 bg_flag; +}; + +struct btrfs_chunk_map { + struct rb_node rb_node; + int verified_stripes; + refcount_t refs; + u64 start; + u64 chunk_len; + u64 stripe_size; + u64 type; + int io_align; + int io_width; + int num_stripes; + int sub_stripes; + struct btrfs_io_stripe stripes[0]; +}; + +enum btrfs_reserve_flush_enum { + BTRFS_RESERVE_NO_FLUSH = 0, + BTRFS_RESERVE_FLUSH_LIMIT = 1, + BTRFS_RESERVE_FLUSH_EVICT = 2, + BTRFS_RESERVE_FLUSH_DATA = 3, + BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE = 4, + BTRFS_RESERVE_FLUSH_ALL = 5, + BTRFS_RESERVE_FLUSH_ALL_STEAL = 6, + BTRFS_RESERVE_FLUSH_EMERGENCY = 7, +}; + +enum btrfs_flush_state { + FLUSH_DELAYED_ITEMS_NR = 1, + FLUSH_DELAYED_ITEMS = 2, + FLUSH_DELAYED_REFS_NR = 3, + FLUSH_DELAYED_REFS = 4, + FLUSH_DELALLOC = 5, + FLUSH_DELALLOC_WAIT = 6, + FLUSH_DELALLOC_FULL = 7, + ALLOC_CHUNK = 8, + ALLOC_CHUNK_FORCE = 9, + RUN_DELAYED_IPUTS = 10, + COMMIT_TRANS = 11, +}; + +struct btrfs_caching_control { + struct list_head list; + struct mutex mutex; + wait_queue_head_t wait; + struct btrfs_work work; + struct btrfs_block_group *block_group; + atomic_t progress; + refcount_t count; +}; + +struct btrfs_qgroup_extent_record { + u64 num_bytes; + u32 data_rsv; + u64 data_rsv_refroot; + struct ulist *old_roots; +}; + +enum btrfs_qgroup_rsv_type { + BTRFS_QGROUP_RSV_DATA = 0, + BTRFS_QGROUP_RSV_META_PERTRANS = 1, + BTRFS_QGROUP_RSV_META_PREALLOC = 2, + BTRFS_QGROUP_RSV_LAST = 3, +}; + +struct btrfs_qgroup_rsv { + u64 values[3]; +}; + +struct btrfs_qgroup { + u64 qgroupid; + u64 rfer; + u64 rfer_cmpr; + u64 excl; + u64 excl_cmpr; + u64 lim_flags; + u64 max_rfer; + u64 max_excl; + u64 rsv_rfer; + u64 rsv_excl; + struct btrfs_qgroup_rsv rsv; + struct list_head groups; + struct list_head members; + struct list_head dirty; + struct list_head iterator; + struct list_head nested_iterator; + struct rb_node node; + u64 old_refcnt; + u64 new_refcnt; + struct kobject kobj; +}; + +enum { + __QGROUP_RESERVE_BIT = 0, + QGROUP_RESERVE = 1, + __QGROUP_RESERVE_SEQ = 0, + __QGROUP_RELEASE_BIT = 1, + QGROUP_RELEASE = 2, + __QGROUP_RELEASE_SEQ = 1, + __QGROUP_FREE_BIT = 2, + QGROUP_FREE = 4, + __QGROUP_FREE_SEQ = 2, +}; + +enum btrfs_rbio_ops { + BTRFS_RBIO_WRITE = 0, + BTRFS_RBIO_READ_REBUILD = 1, + BTRFS_RBIO_PARITY_SCRUB = 2, +}; + +struct sector_ptr; + +struct btrfs_raid_bio { + struct btrfs_io_context *bioc; + struct list_head hash_list; + struct list_head stripe_cache; + struct work_struct work; + struct bio_list bio_list; + spinlock_t bio_list_lock; + struct list_head plug_list; + long unsigned int flags; + enum btrfs_rbio_ops operation; + u16 nr_pages; + u16 nr_sectors; + u8 nr_data; + u8 real_stripes; + u8 stripe_npages; + u8 stripe_nsectors; + u8 scrubp; + int bio_list_bytes; + refcount_t refs; + atomic_t stripes_pending; + wait_queue_head_t io_wait; + long unsigned int dbitmap; + long unsigned int finish_pbitmap; + struct page **stripe_pages; + struct sector_ptr *bio_sectors; + struct sector_ptr *stripe_sectors; + void **finish_pointers; + long unsigned int *error_bitmap; + u8 *csum_buf; + long unsigned int *csum_bitmap; +}; + +struct raid56_bio_trace_info { + u64 devid; + u32 offset; + u8 stripe_nr; +}; + +enum btrfs_extent_allocation_policy { + BTRFS_EXTENT_ALLOC_CLUSTERED = 0, + BTRFS_EXTENT_ALLOC_ZONED = 1, +}; + +struct find_free_extent_ctl { + u64 ram_bytes; + u64 num_bytes; + u64 min_alloc_size; + u64 empty_size; + u64 flags; + int delalloc; + u64 search_start; + u64 empty_cluster; + struct btrfs_free_cluster *last_ptr; + bool use_cluster; + bool have_caching_bg; + bool orig_have_caching_bg; + bool for_treelog; + bool for_data_reloc; + int index; + int loop; + bool retry_uncached; + int cached; + u64 max_extent_size; + u64 total_free_space; + u64 found_offset; + u64 hint_byte; + enum btrfs_extent_allocation_policy policy; + bool hinted; + enum btrfs_block_group_size_class size_class; +}; + +struct trace_event_raw_btrfs_transaction_commit { + struct trace_entry ent; + u8 fsid[16]; + u64 generation; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__inode { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 blocks; + u64 disk_i_size; + u64 generation; + u64 last_trans; + u64 logged_trans; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + u64 start; + u64 len; + u32 flags; + int refs; + char __data[0]; +}; + +struct trace_event_raw_btrfs_handle_em_exist { + struct trace_entry ent; + u8 fsid[16]; + u64 e_start; + u64 e_len; + u64 map_start; + u64 map_len; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_regular { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 extent_offset; + u8 extent_type; + u8 compression; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_inline { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u8 extent_type; + u8 compression; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 file_offset; + u64 start; + u64 len; + u64 disk_len; + u64 bytes_left; + long unsigned int flags; + int compress_type; + int refs; + u64 root_objectid; + u64 truncated_len; + char __data[0]; +}; + +struct trace_event_raw_btrfs_finish_ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 len; + bool uptodate; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__writepage { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + long unsigned int index; + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + char for_kupdate; + char for_reclaim; + char range_cyclic; + long unsigned int writeback_index; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_writepage_end_io_hook { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 end; + int uptodate; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_file { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 parent; + int datasync; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_fs { + struct trace_entry ent; + u8 fsid[16]; + int wait; + char __data[0]; +}; + +struct trace_event_raw_btrfs_add_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 offset; + u64 size; + u64 flags; + u64 bytes_used; + u64 bytes_super; + int create; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_tree_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + u64 parent; + u64 ref_root; + int level; + int type; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_data_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + u64 parent; + u64 ref_root; + u64 owner; + u64 offset; + int type; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_ref_head { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + int is_data; + char __data[0]; +}; + +struct trace_event_raw_btrfs__chunk { + struct trace_entry ent; + u8 fsid[16]; + int num_stripes; + u64 type; + int sub_stripes; + u64 offset; + u64 size; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_cow_block { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 buf_start; + int refs; + u64 cow_start; + int buf_level; + int cow_level; + char __data[0]; +}; + +struct trace_event_raw_btrfs_space_reservation { + struct trace_entry ent; + u8 fsid[16]; + u32 __data_loc_type; + u64 val; + u64 bytes; + int reserve; + char __data[0]; +}; + +struct trace_event_raw_btrfs_trigger_flush { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + int flush; + u32 __data_loc_reason; + char __data[0]; +}; + +struct trace_event_raw_btrfs_flush_space { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 num_bytes; + int state; + int ret; + bool for_preempt; + char __data[0]; +}; + +struct trace_event_raw_btrfs__reserved_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_search_loop { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_have_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + bool hinted; + u64 bg_start; + u64 bg_flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs__reserve_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + int bg_size_class; + u64 start; + u64 len; + u64 loop; + bool hinted; + int size_class; + char __data[0]; +}; + +struct trace_event_raw_btrfs_find_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 bytes; + u64 empty_size; + u64 min_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_failed_cluster_setup { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_setup_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 max_size; + u64 size; + int bitmap; + char __data[0]; +}; + +struct trace_event_raw_alloc_extent_state { + struct trace_entry ent; + const struct extent_state *state; + long unsigned int mask; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_free_extent_state { + struct trace_entry ent; + const struct extent_state *state; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work { + struct trace_entry ent; + u8 fsid[16]; + const void *work; + const void *wq; + const void *func; + const void *ordered_func; + const void *normal_work; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work__done { + struct trace_entry ent; + u8 fsid[16]; + const void *wtag; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue_done { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + char __data[0]; +}; + +struct trace_event_raw_btrfs__qgroup_rsv_data { + struct trace_entry ent; + u8 fsid[16]; + u64 rootid; + u64 ino; + u64 start; + u64 len; + u64 reserved; + int op; + char __data[0]; +}; + +struct trace_event_raw_btrfs_qgroup_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + char __data[0]; +}; + +struct trace_event_raw_qgroup_num_dirty_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 num_dirty_extents; + char __data[0]; +}; + +struct trace_event_raw_btrfs_qgroup_account_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 bytenr; + u64 num_bytes; + u64 nr_old_roots; + u64 nr_new_roots; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_counters { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 old_rfer; + u64 old_excl; + u64 cur_old_count; + u64 cur_new_count; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 cur_reserved; + s64 diff; + int type; + char __data[0]; +}; + +struct trace_event_raw_qgroup_meta_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; +}; + +struct trace_event_raw_qgroup_meta_convert { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_qgroup_meta_free_all_pertrans { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; +}; + +struct trace_event_raw_btrfs__prelim_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 root_id; + u64 objectid; + u8 type; + u64 offset; + int level; + int old_count; + u64 parent; + u64 bytenr; + int mod_count; + u64 tree_size; + char __data[0]; +}; + +struct trace_event_raw_btrfs_inode_mod_outstanding_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + int mod; + unsigned int outstanding; + char __data[0]; +}; + +struct trace_event_raw_btrfs__block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 len; + u64 used; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs_set_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + char __data[0]; +}; + +struct trace_event_raw_btrfs_clear_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int clear_bits; + char __data[0]; +}; + +struct trace_event_raw_btrfs_convert_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + unsigned int clear_bits; + char __data[0]; +}; + +struct trace_event_raw_btrfs_dump_space_info { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 reclaim_size; + int clamp; + u64 global_reserved; + u64 trans_reserved; + u64 delayed_refs_reserved; + u64 delayed_reserved; + u64 free_chunk_space; + u64 delalloc_bytes; + u64 ordered_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_reserve_ticket { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + u64 start_ns; + int flush; + int error; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sleep_tree_lock { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 start_ns; + u64 end_ns; + u64 diff_ns; + u64 owner; + int is_log_tree; + char __data[0]; +}; + +struct trace_event_raw_btrfs_locking_events { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 owner; + int is_log_tree; + char __data[0]; +}; + +struct trace_event_raw_btrfs__space_info_update { + struct trace_entry ent; + u8 fsid[16]; + u64 type; + u64 old; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_btrfs_raid56_bio { + struct trace_entry ent; + u8 fsid[16]; + u64 full_stripe; + u64 physical; + u64 devid; + u32 offset; + u32 len; + u8 opf; + u8 total_stripes; + u8 real_stripes; + u8 nr_data; + u8 stripe_nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_insert_one_raid_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + int num_stripes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_raid_extent_delete { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 end; + u64 found_start; + u64 found_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_raid_extent_offset { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + u64 physical; + u64 devid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_count { + struct trace_entry ent; + u8 fsid[16]; + long int nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_enter { + struct trace_entry ent; + u8 fsid[16]; + long int nr_to_scan; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_exit { + struct trace_entry ent; + u8 fsid[16]; + long int nr_dropped; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_remove_em { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 root_id; + u64 start; + u64 len; + u32 flags; + char __data[0]; +}; + +struct trace_event_data_offsets_btrfs_transaction_commit {}; + +struct trace_event_data_offsets_btrfs__inode {}; + +struct trace_event_data_offsets_btrfs_get_extent {}; + +struct trace_event_data_offsets_btrfs_handle_em_exist {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_regular {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_inline {}; + +struct trace_event_data_offsets_btrfs__ordered_extent {}; + +struct trace_event_data_offsets_btrfs_finish_ordered_extent {}; + +struct trace_event_data_offsets_btrfs__writepage {}; + +struct trace_event_data_offsets_btrfs_writepage_end_io_hook {}; + +struct trace_event_data_offsets_btrfs_sync_file {}; + +struct trace_event_data_offsets_btrfs_sync_fs {}; + +struct trace_event_data_offsets_btrfs_add_block_group {}; + +struct trace_event_data_offsets_btrfs_delayed_tree_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_data_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_ref_head {}; + +struct trace_event_data_offsets_btrfs__chunk {}; + +struct trace_event_data_offsets_btrfs_cow_block {}; + +struct trace_event_data_offsets_btrfs_space_reservation { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_btrfs_trigger_flush { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_btrfs_flush_space {}; + +struct trace_event_data_offsets_btrfs__reserved_extent {}; + +struct trace_event_data_offsets_find_free_extent {}; + +struct trace_event_data_offsets_find_free_extent_search_loop {}; + +struct trace_event_data_offsets_find_free_extent_have_block_group {}; + +struct trace_event_data_offsets_btrfs__reserve_extent {}; + +struct trace_event_data_offsets_btrfs_find_cluster {}; + +struct trace_event_data_offsets_btrfs_failed_cluster_setup {}; + +struct trace_event_data_offsets_btrfs_setup_cluster {}; + +struct trace_event_data_offsets_alloc_extent_state {}; + +struct trace_event_data_offsets_free_extent_state {}; + +struct trace_event_data_offsets_btrfs__work {}; + +struct trace_event_data_offsets_btrfs__work__done {}; + +struct trace_event_data_offsets_btrfs_workqueue { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_btrfs_workqueue_done {}; + +struct trace_event_data_offsets_btrfs__qgroup_rsv_data {}; + +struct trace_event_data_offsets_btrfs_qgroup_extent {}; + +struct trace_event_data_offsets_qgroup_num_dirty_extents {}; + +struct trace_event_data_offsets_btrfs_qgroup_account_extent {}; + +struct trace_event_data_offsets_qgroup_update_counters {}; + +struct trace_event_data_offsets_qgroup_update_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_convert {}; + +struct trace_event_data_offsets_qgroup_meta_free_all_pertrans {}; + +struct trace_event_data_offsets_btrfs__prelim_ref {}; + +struct trace_event_data_offsets_btrfs_inode_mod_outstanding_extents {}; + +struct trace_event_data_offsets_btrfs__block_group {}; + +struct trace_event_data_offsets_btrfs_set_extent_bit {}; + +struct trace_event_data_offsets_btrfs_clear_extent_bit {}; + +struct trace_event_data_offsets_btrfs_convert_extent_bit {}; + +struct trace_event_data_offsets_btrfs_dump_space_info {}; + +struct trace_event_data_offsets_btrfs_reserve_ticket {}; + +struct trace_event_data_offsets_btrfs_sleep_tree_lock {}; + +struct trace_event_data_offsets_btrfs_locking_events {}; + +struct trace_event_data_offsets_btrfs__space_info_update {}; + +struct trace_event_data_offsets_btrfs_raid56_bio {}; + +struct trace_event_data_offsets_btrfs_insert_one_raid_extent {}; + +struct trace_event_data_offsets_btrfs_raid_extent_delete {}; + +struct trace_event_data_offsets_btrfs_get_raid_extent_offset {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_count {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_enter {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_exit {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_remove_em {}; + +typedef void (*btf_trace_btrfs_transaction_commit)(void *, const struct btrfs_fs_info *); + +typedef void (*btf_trace_btrfs_inode_new)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_request)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_evict)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_get_extent)(void *, const struct btrfs_root *, const struct btrfs_inode *, const struct extent_map *); + +typedef void (*btf_trace_btrfs_handle_em_exist)(void *, const struct btrfs_fs_info *, const struct extent_map *, const struct extent_map *, u64, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_ordered_extent_add)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_remove)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_start)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_put)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_for_logging)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_split)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_dec_test_pending)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_mark_finished)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_finish_ordered_extent)(void *, const struct btrfs_inode *, u64, u64, bool); + +typedef void (*btf_trace_extent_writepage)(void *, const struct folio *, const struct inode *, const struct writeback_control *); + +typedef void (*btf_trace_btrfs_writepage_end_io_hook)(void *, const struct btrfs_inode *, u64, u64, int); + +typedef void (*btf_trace_btrfs_sync_file)(void *, const struct file *, int); + +typedef void (*btf_trace_btrfs_sync_fs)(void *, const struct btrfs_fs_info *, int); + +typedef void (*btf_trace_btrfs_add_block_group)(void *, const struct btrfs_fs_info *, const struct btrfs_block_group *, int); + +typedef void (*btf_trace_add_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_run_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_btrfs_chunk_alloc)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_chunk_free)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_cow_block)(void *, const struct btrfs_root *, const struct extent_buffer *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_space_reservation)(void *, const struct btrfs_fs_info *, const char *, u64, u64, int); + +typedef void (*btf_trace_btrfs_trigger_flush)(void *, const struct btrfs_fs_info *, u64, u64, int, const char *); + +typedef void (*btf_trace_btrfs_flush_space)(void *, const struct btrfs_fs_info *, u64, u64, int, int, bool); + +typedef void (*btf_trace_btrfs_reserved_extent_alloc)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_reserved_extent_free)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_find_free_extent)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_search_loop)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_have_block_group)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reserve_extent)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_reserve_extent_cluster)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_find_cluster)(void *, const struct btrfs_block_group *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_failed_cluster_setup)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_setup_cluster)(void *, const struct btrfs_block_group *, const struct btrfs_free_cluster *, u64, int); + +typedef void (*btf_trace_alloc_extent_state)(void *, const struct extent_state *, gfp_t, long unsigned int); + +typedef void (*btf_trace_free_extent_state)(void *, const struct extent_state *, long unsigned int); + +typedef void (*btf_trace_btrfs_work_queued)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_work_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_all_work_done)(void *, const struct btrfs_fs_info *, const void *); + +typedef void (*btf_trace_btrfs_ordered_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_workqueue_alloc)(void *, const struct btrfs_workqueue *, const char *); + +typedef void (*btf_trace_btrfs_workqueue_destroy)(void *, const struct btrfs_workqueue *); + +typedef void (*btf_trace_btrfs_qgroup_reserve_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_release_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_account_extents)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_btrfs_qgroup_trace_extent)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_qgroup_num_dirty_extents)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_qgroup_account_extent)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64, u64); + +typedef void (*btf_trace_qgroup_update_counters)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, u64, u64); + +typedef void (*btf_trace_qgroup_update_reserve)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, s64, int); + +typedef void (*btf_trace_qgroup_meta_reserve)(void *, const struct btrfs_root *, s64, int); + +typedef void (*btf_trace_qgroup_meta_convert)(void *, const struct btrfs_root *, s64); + +typedef void (*btf_trace_qgroup_meta_free_all_pertrans)(void *, struct btrfs_root *); + +typedef void (*btf_trace_btrfs_prelim_ref_merge)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_prelim_ref_insert)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_inode_mod_outstanding_extents)(void *, const struct btrfs_root *, u64, int, unsigned int); + +typedef void (*btf_trace_btrfs_remove_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_skip_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_set_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_clear_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_convert_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int, unsigned int); + +typedef void (*btf_trace_btrfs_done_preemptive_reclaim)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_fail_all_tickets)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_reserve_ticket)(void *, const struct btrfs_fs_info *, u64, u64, u64, int, int); + +typedef void (*btf_trace_btrfs_tree_read_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock_blocking)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_read)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_write)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_try_tree_read_lock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_lock_atomic)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_update_bytes_may_use)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_pinned)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_zone_unusable)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_raid56_read)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_raid56_write)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_btrfs_insert_one_raid_extent)(void *, const struct btrfs_fs_info *, u64, u64, int); + +typedef void (*btf_trace_btrfs_raid_extent_delete)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_get_raid_extent_offset)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_count)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_enter)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_exit)(void *, const struct btrfs_fs_info *, long int, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_remove_em)(void *, const struct btrfs_inode *, const struct extent_map *); + +struct btrfs_fs_context { + char *subvol_name; + u64 subvol_objectid; + u64 max_inline; + u32 commit_interval; + u32 metadata_ratio; + u32 thread_pool_size; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + refcount_t refs; +}; + +enum { + Opt_acl = 0, + Opt_clear_cache = 1, + Opt_commit_interval = 2, + Opt_compress = 3, + Opt_compress_force = 4, + Opt_compress_force_type = 5, + Opt_compress_type = 6, + Opt_degraded = 7, + Opt_device = 8, + Opt_fatal_errors = 9, + Opt_flushoncommit = 10, + Opt_max_inline = 11, + Opt_barrier = 12, + Opt_datacow = 13, + Opt_datasum = 14, + Opt_defrag = 15, + Opt_discard = 16, + Opt_discard_mode = 17, + Opt_ratio = 18, + Opt_rescan_uuid_tree = 19, + Opt_skip_balance = 20, + Opt_space_cache = 21, + Opt_space_cache_version = 22, + Opt_ssd = 23, + Opt_ssd_spread = 24, + Opt_subvol = 25, + Opt_subvol_empty = 26, + Opt_subvolid = 27, + Opt_thread_pool = 28, + Opt_treelog = 29, + Opt_user_subvol_rm_allowed = 30, + Opt_norecovery = 31, + Opt_rescue = 32, + Opt_usebackuproot = 33, + Opt_nologreplay = 34, + Opt_enospc_debug = 35, + Opt_err = 36, +}; + +enum { + Opt_fatal_errors_panic = 0, + Opt_fatal_errors_bug = 1, +}; + +enum { + Opt_discard_sync = 0, + Opt_discard_async = 1, +}; + +enum { + Opt_space_cache_v1 = 0, + Opt_space_cache_v2 = 1, +}; + +enum { + Opt_rescue_usebackuproot = 0, + Opt_rescue_nologreplay = 1, + Opt_rescue_ignorebadroots = 2, + Opt_rescue_ignoredatacsums = 3, + Opt_rescue_ignoremetacsums = 4, + Opt_rescue_ignoresuperflags = 5, + Opt_rescue_parameter_all = 6, +}; + +struct init_sequence { + int (*init_func)(); + void (*exit_func)(); +}; + +typedef void (*crypto_completion_t)(void *, int); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + u32 flags; +}; + +struct crypto_template; + +struct crypto_spawn; + +struct crypto_instance { + struct crypto_alg alg; + struct crypto_template *tmpl; + union { + struct hlist_node list; + struct crypto_spawn *spawns; + }; + struct work_struct free_work; + void *__ctx[0]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + struct crypto_instance *inst; + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct rtattr; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + int (*create)(struct crypto_template *, struct rtattr **); + char name[128]; +}; + +struct scatterlist; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_request { + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + struct crypto_async_request base; + void *__ctx[0]; +}; + +struct crypto_skcipher { + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct skcipher_alg_common { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + struct crypto_alg base; +}; + +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); + int (*encrypt)(struct skcipher_request *); + int (*decrypt)(struct skcipher_request *); + int (*export)(struct skcipher_request *, void *); + int (*import)(struct skcipher_request *, const void *); + int (*init)(struct crypto_skcipher *); + void (*exit)(struct crypto_skcipher *); + unsigned int walksize; + union { + struct { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + struct crypto_alg base; + }; + struct skcipher_alg_common co; + }; +}; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *); + union { + struct { + char head[88]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } src; + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } dst; + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + unsigned int ivsize; + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; +}; + +struct crypto_rfc3686_ctx { + struct crypto_skcipher *child; + u8 nonce[4]; +}; + +struct crypto_rfc3686_req_ctx { + u8 iv[16]; + struct skcipher_request subreq; +}; + +struct disk_stats { + u64 nsecs[4]; + long unsigned int sectors[4]; + long unsigned int ios[4]; + long unsigned int merges[4]; + long unsigned int io_ticks; + local_t in_flight[2]; +}; + +enum stat_group { + STAT_READ = 0, + STAT_WRITE = 1, + STAT_DISCARD = 2, + STAT_FLUSH = 3, + NR_STAT_GROUPS = 4, +}; + +struct sbitmap_word { + long unsigned int word; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int cleared; + raw_spinlock_t swap_lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sbitmap { + unsigned int depth; + unsigned int shift; + unsigned int map_nr; + bool round_robin; + struct sbitmap_word *map; + unsigned int *alloc_hint; +}; + +struct sbq_wait_state { + wait_queue_head_t wait; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sbitmap_queue { + struct sbitmap sb; + unsigned int wake_batch; + atomic_t wake_index; + struct sbq_wait_state *ws; + atomic_t ws_active; + unsigned int min_shallow_depth; + atomic_t completion_cnt; + atomic_t wakeup_cnt; +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1, + DISK_EVENT_EJECT_REQUEST = 2, +}; + +enum { + DISK_EVENT_FLAG_POLL = 1, + DISK_EVENT_FLAG_UEVENT = 2, + DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 4, +}; + +typedef __u32 req_flags_t; + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +enum rq_end_io_ret { + RQ_END_IO_NONE = 0, + RQ_END_IO_FREE = 1, +}; + +typedef enum rq_end_io_ret rq_end_io_fn(struct request *, blk_status_t); + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + int tag; + int internal_tag; + unsigned int timeout; + unsigned int __data_len; + sector_t __sector; + struct bio *bio; + struct bio *biotail; + union { + struct list_head queuelist; + struct request *rq_next; + }; + struct block_device *part; + u64 alloc_time_ns; + u64 start_time_ns; + u64 io_start_time_ns; + short unsigned int wbt_flags; + short unsigned int stats_sectors; + short unsigned int nr_phys_segments; + short unsigned int nr_integrity_segments; + enum mq_rq_state state; + atomic_t ref; + long unsigned int deadline; + union { + struct hlist_node hash; + struct llist_node ipi_list; + }; + union { + struct rb_node rb_node; + struct bio_vec special_vec; + }; + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { + unsigned int seq; + rq_end_io_fn *saved_end_io; + } flush; + u64 fifo_time; + rq_end_io_fn *end_io; + void *end_io_data; +}; + +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + unsigned int active_queues; + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; + spinlock_t lock; +}; + +struct blk_flush_queue { + spinlock_t mq_flush_lock; + unsigned int flush_pending_idx: 1; + unsigned int flush_running_idx: 1; + blk_status_t rq_status; + long unsigned int flush_pending_since; + struct list_head flush_queue[2]; + long unsigned int flush_data_in_flight; + struct request *flush_rq; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +struct blk_mq_tag_set { + const struct blk_mq_ops *ops; + struct blk_mq_queue_map map[3]; + unsigned int nr_maps; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int cmd_size; + int numa_node; + unsigned int timeout; + unsigned int flags; + void *driver_data; + struct blk_mq_tags **tags; + struct blk_mq_tags *shared_tags; + struct mutex tag_list_lock; + struct list_head tag_list; + struct srcu_struct *srcu; +}; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + long unsigned int state; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + long unsigned int flags; + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + void *driver_data; + struct sbitmap ctx_map; + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + short unsigned int type; + short unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + unsigned int numa_node; + unsigned int queue_num; + atomic_t nr_active; + struct hlist_node cpuhp_online; + struct hlist_node cpuhp_dead; + struct kobject kobj; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct list_head hctx_list; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + AS_NO_WRITEBACK_TAGS = 5, + AS_RELEASE_ALWAYS = 6, + AS_STABLE_WRITES = 7, + AS_INACCESSIBLE = 8, + AS_FOLIO_ORDER_BITS = 5, + AS_FOLIO_ORDER_MIN = 16, + AS_FOLIO_ORDER_MAX = 21, +}; + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler * const *xattr; + const struct dentry_operations *dops; + long unsigned int magic; +}; + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +enum { + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, + IOPRIO_CLASS_INVALID = 7, +}; + +struct blkg_iostat { + u64 bytes[3]; + u64 ios[3]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkcg_gq *blkg; + struct llist_node lnode; + int lqueued; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +struct blkcg; + +struct blkg_policy_data; + +struct blkcg_gq { + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + struct blkcg_gq *parent; + struct percpu_ref refcnt; + bool online; + struct blkg_iostat_set *iostat_cpu; + struct blkg_iostat_set iostat; + struct blkg_policy_data *pd[6]; + spinlock_t async_bio_lock; + struct bio_list async_bios; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + struct callback_head callback_head; +}; + +typedef __u32 blk_mq_req_flags_t; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = 8, + __REQ_FAILFAST_TRANSPORT = 9, + __REQ_FAILFAST_DRIVER = 10, + __REQ_SYNC = 11, + __REQ_META = 12, + __REQ_PRIO = 13, + __REQ_NOMERGE = 14, + __REQ_IDLE = 15, + __REQ_INTEGRITY = 16, + __REQ_FUA = 17, + __REQ_PREFLUSH = 18, + __REQ_RAHEAD = 19, + __REQ_BACKGROUND = 20, + __REQ_NOWAIT = 21, + __REQ_POLLED = 22, + __REQ_ALLOC_CACHE = 23, + __REQ_SWAP = 24, + __REQ_DRV = 25, + __REQ_FS_PRIVATE = 26, + __REQ_ATOMIC = 27, + __REQ_NOUNMAP = 28, + __REQ_NR_BITS = 29, +}; + +struct elevator_type; + +struct elevator_queue { + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + long unsigned int flags; + struct hlist_head hash[64]; +}; + +struct blk_mq_ctxs; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[3]; + long: 64; + }; + unsigned int cpu; + short unsigned int index_hw[3]; + struct blk_mq_hw_ctx *hctxs[3]; + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; + long: 64; +}; + +struct rchan; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + long unsigned int *sequence; + unsigned char *msg_data; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct list_head running_list; + atomic_t dropped; +}; + +enum { + QUEUE_FLAG_DYING = 0, + QUEUE_FLAG_NOMERGES = 1, + QUEUE_FLAG_SAME_COMP = 2, + QUEUE_FLAG_FAIL_IO = 3, + QUEUE_FLAG_NOXMERGES = 4, + QUEUE_FLAG_SAME_FORCE = 5, + QUEUE_FLAG_INIT_DONE = 6, + QUEUE_FLAG_STATS = 7, + QUEUE_FLAG_REGISTERED = 8, + QUEUE_FLAG_QUIESCED = 9, + QUEUE_FLAG_RQ_ALLOC_TIME = 10, + QUEUE_FLAG_HCTX_ACTIVE = 11, + QUEUE_FLAG_SQ_SCHED = 12, + QUEUE_FLAG_MAX = 13, +}; + +enum { + __RQF_STARTED = 0, + __RQF_FLUSH_SEQ = 1, + __RQF_MIXED_MERGE = 2, + __RQF_DONTPREP = 3, + __RQF_SCHED_TAGS = 4, + __RQF_USE_SCHED = 5, + __RQF_FAILED = 6, + __RQF_QUIET = 7, + __RQF_IO_STAT = 8, + __RQF_PM = 9, + __RQF_HASHED = 10, + __RQF_STATS = 11, + __RQF_SPECIAL_PAYLOAD = 12, + __RQF_ZONE_WRITE_PLUGGING = 13, + __RQF_TIMED_OUT = 14, + __RQF_RESV = 15, + __RQF_BITS = 16, +}; + +struct rchan_buf { + void *start; + void *data; + size_t offset; + size_t subbufs_produced; + size_t subbufs_consumed; + struct rchan *chan; + wait_queue_head_t read_wait; + struct irq_work wakeup_work; + struct dentry *dentry; + struct kref kref; + struct page **page_array; + unsigned int page_count; + unsigned int finalized; + size_t *padding; + size_t prev_padding; + size_t bytes_consumed; + size_t early_bytes; + unsigned int cpu; + long: 64; + long: 64; +}; + +struct rchan_callbacks; + +struct rchan { + u32 version; + size_t subbuf_size; + size_t n_subbufs; + size_t alloc_size; + const struct rchan_callbacks *cb; + struct kref kref; + void *private_data; + size_t last_toobig; + struct rchan_buf **buf; + int is_global; + struct list_head list; + struct dentry *parent; + int has_base_filename; + char base_filename[255]; +}; + +struct rchan_callbacks { + int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); + struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); + int (*remove_buf_file)(struct dentry *); +}; + +enum blktrace_cat { + BLK_TC_READ = 1, + BLK_TC_WRITE = 2, + BLK_TC_FLUSH = 4, + BLK_TC_SYNC = 8, + BLK_TC_SYNCIO = 8, + BLK_TC_QUEUE = 16, + BLK_TC_REQUEUE = 32, + BLK_TC_ISSUE = 64, + BLK_TC_COMPLETE = 128, + BLK_TC_FS = 256, + BLK_TC_PC = 512, + BLK_TC_NOTIFY = 1024, + BLK_TC_AHEAD = 2048, + BLK_TC_META = 4096, + BLK_TC_DISCARD = 8192, + BLK_TC_DRV_DATA = 16384, + BLK_TC_FUA = 32768, + BLK_TC_END = 32768, +}; + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx *queue_ctx; +}; + +typedef unsigned int blk_insert_t; + +struct blk_mq_alloc_data { + struct request_queue *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + unsigned int nr_tags; + struct rq_list *cached_rqs; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, blk_insert_t); + struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request * (*former_request)(struct request_queue *, struct request *); + struct request * (*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +struct elv_fs_entry; + +struct blk_mq_debugfs_attr; + +struct elevator_type { + struct kmem_cache *icq_cache; + struct elevator_mq_ops ops; + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + struct module *elevator_owner; + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; + char icq_cache_name[22]; + struct list_head list; +}; + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +struct blk_mq_debugfs_attr { + const char *name; + umode_t mode; + int (*show)(void *, struct seq_file *); + ssize_t (*write)(void *, const char *, size_t, loff_t *); + const struct seq_operations *seq_ops; +}; + +struct blkcg_policy_data; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + atomic_t congestion_count; + struct xarray blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + struct blkcg_policy_data *cpd[6]; + struct list_head all_blkcgs_node; + struct llist_head *lhead; + struct list_head cgwb_list; +}; + +struct blkg_policy_data { + struct blkcg_gq *blkg; + int plid; + bool online; +}; + +struct blkcg_policy_data { + struct blkcg *blkcg; + int plid; +}; + +typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); + +typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); + +typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(struct gendisk *, struct blkcg *, gfp_t); + +typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_stat_pd_fn(struct blkg_policy_data *, struct seq_file *); + +struct blkcg_policy { + int plid; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[5]; + atomic64_t aux_cnt[5]; +}; + +struct bfq_entity; + +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + u64 vtime; + long unsigned int wsum; +}; + +struct bfq_sched_data; + +struct bfq_queue; + +struct bfq_entity { + struct rb_node rb_node; + bool on_st_or_in_serv; + u64 start; + u64 finish; + struct rb_root *tree; + u64 min_start; + int service; + int budget; + int allocated; + int dev_weight; + int weight; + int new_weight; + int orig_weight; + struct bfq_entity *parent; + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + int prio_changed; + bool in_groups_with_pending_reqs; + struct bfq_queue *last_bfqq_created; +}; + +struct bfq_sched_data { + struct bfq_entity *in_service_entity; + struct bfq_entity *next_in_service; + struct bfq_service_tree service_tree[3]; + long unsigned int bfq_class_idle_last_service; +}; + +struct bfq_weight_counter { + unsigned int weight; + unsigned int num_active; + struct rb_node weights_node; +}; + +struct bfq_ttime { + u64 last_end_request; + u64 ttime_total; + long unsigned int ttime_samples; + u64 ttime_mean; +}; + +struct bfq_data; + +struct bfq_io_cq; + +struct bfq_queue { + int ref; + int stable_ref; + struct bfq_data *bfqd; + short unsigned int ioprio; + short unsigned int ioprio_class; + short unsigned int new_ioprio; + short unsigned int new_ioprio_class; + u64 last_serv_time_ns; + unsigned int inject_limit; + long unsigned int decrease_time_jif; + struct bfq_queue *new_bfqq; + struct rb_node pos_node; + struct rb_root *pos_root; + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int meta_pending; + struct list_head fifo; + struct bfq_entity entity; + struct bfq_weight_counter *weight_counter; + int max_budget; + long unsigned int budget_timeout; + int dispatched; + long unsigned int flags; + struct list_head bfqq_list; + struct bfq_ttime ttime; + u64 io_start_time; + u64 tot_idle_time; + u32 seek_history; + struct hlist_node burst_list_node; + sector_t last_request_pos; + unsigned int requests_within_timer; + pid_t pid; + struct bfq_io_cq *bic; + long unsigned int wr_cur_max_time; + long unsigned int soft_rt_next_start; + long unsigned int last_wr_start_finish; + unsigned int wr_coeff; + long unsigned int last_idle_bklogged; + long unsigned int service_from_backlogged; + long unsigned int service_from_wr; + long unsigned int wr_start_at_switch_to_srt; + long unsigned int split_time; + long unsigned int first_IO_time; + long unsigned int creation_time; + struct bfq_queue *waker_bfqq; + struct bfq_queue *tentative_waker_bfqq; + unsigned int num_waker_detections; + u64 waker_detection_started; + struct hlist_node woken_list_node; + struct hlist_head woken_list; + unsigned int actuator_idx; +}; + +struct bfq_group; + +struct bfq_data { + struct request_queue *queue; + struct list_head dispatch; + struct bfq_group *root_group; + struct rb_root_cached queue_weights_tree; + unsigned int num_groups_with_pending_reqs; + unsigned int busy_queues[3]; + int wr_busy_queues; + int queued; + int tot_rq_in_driver; + int rq_in_driver[8]; + bool nonrot_with_queueing; + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + int budgets_assigned; + struct hrtimer idle_slice_timer; + struct bfq_queue *in_service_queue; + sector_t last_position; + sector_t in_serv_last_pos; + u64 last_completion; + struct bfq_queue *last_completed_rq_bfqq; + struct bfq_queue *last_bfqq_created; + u64 last_empty_occupied_ns; + bool wait_dispatch; + struct request *waited_rq; + bool rqs_injected; + u64 first_dispatch; + u64 last_dispatch; + ktime_t last_budget_start; + ktime_t last_idling_start; + long unsigned int last_idling_start_jiffies; + int peak_rate_samples; + u32 sequential_samples; + u64 tot_sectors_dispatched; + u32 last_rq_max_size; + u64 delta_from_first; + u32 peak_rate; + int bfq_max_budget; + struct list_head active_list[8]; + struct list_head idle_list; + u64 bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + u32 bfq_slice_idle; + int bfq_user_max_budget; + unsigned int bfq_timeout; + bool strict_guarantees; + long unsigned int last_ins_in_burst; + long unsigned int bfq_burst_interval; + int burst_size; + struct bfq_entity *burst_parent_entity; + long unsigned int bfq_large_burst_thresh; + bool large_burst; + struct hlist_head burst_list; + bool low_latency; + unsigned int bfq_wr_coeff; + unsigned int bfq_wr_rt_max_time; + unsigned int bfq_wr_min_idle_time; + long unsigned int bfq_wr_min_inter_arr_async; + unsigned int bfq_wr_max_softrt_rate; + u64 rate_dur_prod; + struct bfq_queue oom_bfqq; + spinlock_t lock; + struct bfq_io_cq *bio_bic; + struct bfq_queue *bio_bfqq; + unsigned int word_depths[4]; + unsigned int full_depth_shift; + unsigned int num_actuators; + sector_t sector[8]; + sector_t nr_sectors[8]; + struct blk_independent_access_range ia_ranges[8]; + unsigned int actuator_load_threshold; +}; + +struct bfq_iocq_bfqq_data { + bool saved_has_short_ttime; + bool saved_IO_bound; + u64 saved_io_start_time; + u64 saved_tot_idle_time; + bool saved_in_large_burst; + bool was_in_burst_list; + unsigned int saved_weight; + long unsigned int saved_wr_coeff; + long unsigned int saved_last_wr_start_finish; + long unsigned int saved_service_from_wr; + long unsigned int saved_wr_start_at_switch_to_srt; + unsigned int saved_wr_cur_max_time; + struct bfq_ttime saved_ttime; + u64 saved_last_serv_time_ns; + unsigned int saved_inject_limit; + long unsigned int saved_decrease_time_jif; + struct bfq_queue *stable_merge_bfqq; + bool stably_merged; +}; + +struct bfq_io_cq { + struct io_cq icq; + struct bfq_queue *bfqq[16]; + int ioprio; + uint64_t blkcg_serial_nr; + struct bfq_iocq_bfqq_data bfqq_data[8]; + unsigned int requests; +}; + +struct bfqg_stats { + struct blkg_rwstat bytes; + struct blkg_rwstat ios; +}; + +struct bfq_group { + struct blkg_policy_data pd; + refcount_t ref; + struct bfq_entity entity; + struct bfq_sched_data sched_data; + struct bfq_data *bfqd; + struct bfq_queue *async_bfqq[128]; + struct bfq_queue *async_idle_bfqq[8]; + struct bfq_entity *my_entity; + int active_entities; + int num_queues_with_pending_reqs; + struct rb_root rq_pos_tree; + struct bfqg_stats stats; +}; + +enum bfqq_state_flags { + BFQQF_just_created = 0, + BFQQF_busy = 1, + BFQQF_wait_request = 2, + BFQQF_non_blocking_wait_rq = 3, + BFQQF_fifo_expire = 4, + BFQQF_has_short_ttime = 5, + BFQQF_sync = 6, + BFQQF_IO_bound = 7, + BFQQF_in_large_burst = 8, + BFQQF_softrt_update = 9, + BFQQF_coop = 10, + BFQQF_split_coop = 11, +}; + +enum bfqq_expiration { + BFQQE_TOO_IDLE = 0, + BFQQE_BUDGET_TIMEOUT = 1, + BFQQE_BUDGET_EXHAUSTED = 2, + BFQQE_NO_MORE_REQUESTS = 3, + BFQQE_PREEMPTED = 4, +}; + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +enum work_bits { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_INACTIVE_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + WORK_STRUCT_FLAG_BITS = 4, + WORK_STRUCT_COLOR_SHIFT = 4, + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PWQ_SHIFT = 8, + WORK_OFFQ_FLAG_SHIFT = 4, + WORK_OFFQ_BH_BIT = 4, + WORK_OFFQ_FLAG_END = 5, + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_DISABLE_SHIFT = 5, + WORK_OFFQ_DISABLE_BITS = 16, + WORK_OFFQ_POOL_SHIFT = 21, + WORK_OFFQ_LEFT = 43, + WORK_OFFQ_POOL_BITS = 31, +}; + +typedef long unsigned int mpi_limb_t; + +typedef mpi_limb_t *mpi_ptr_t; + +typedef int mpi_size_t; + +enum { + UNAME26 = 131072, + ADDR_NO_RANDOMIZE = 262144, + FDPIC_FUNCPTRS = 524288, + MMAP_PAGE_ZERO = 1048576, + ADDR_COMPAT_LAYOUT = 2097152, + READ_IMPLIES_EXEC = 4194304, + ADDR_LIMIT_32BIT = 8388608, + SHORT_INODE = 16777216, + WHOLE_SECONDS = 33554432, + STICKY_TIMEOUTS = 67108864, + ADDR_LIMIT_3GB = 134217728, +}; + +typedef __kernel_long_t __kernel_ptrdiff_t; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +typedef s16 int16_t; + +typedef u16 uint16_t; + +typedef uint8_t BYTE; + +typedef uint8_t U8; + +typedef uint16_t U16; + +typedef int16_t S16; + +typedef uint32_t U32; + +typedef uint64_t U64; + +typedef unsigned int FSE_CTable; + +typedef enum { + FSE_repeat_none = 0, + FSE_repeat_check = 1, + FSE_repeat_valid = 2, +} FSE_repeat; + +typedef size_t HUF_CElt; + +typedef enum { + HUF_repeat_none = 0, + HUF_repeat_check = 1, + HUF_repeat_valid = 2, +} HUF_repeat; + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +typedef enum { + ZSTDcs_created = 0, + ZSTDcs_init = 1, + ZSTDcs_ongoing = 2, + ZSTDcs_ending = 3, +} ZSTD_compressionStage_e; + +typedef enum { + ZSTD_f_zstd1 = 0, + ZSTD_f_zstd1_magicless = 1, +} ZSTD_format_e; + +typedef enum { + ZSTD_fast = 1, + ZSTD_dfast = 2, + ZSTD_greedy = 3, + ZSTD_lazy = 4, + ZSTD_lazy2 = 5, + ZSTD_btlazy2 = 6, + ZSTD_btopt = 7, + ZSTD_btultra = 8, + ZSTD_btultra2 = 9, +} ZSTD_strategy; + +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int minMatch; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef struct { + int contentSizeFlag; + int checksumFlag; + int noDictIDFlag; +} ZSTD_frameParameters; + +typedef enum { + ZSTD_dictDefaultAttach = 0, + ZSTD_dictForceAttach = 1, + ZSTD_dictForceCopy = 2, + ZSTD_dictForceLoad = 3, +} ZSTD_dictAttachPref_e; + +typedef enum { + ZSTD_ps_auto = 0, + ZSTD_ps_enable = 1, + ZSTD_ps_disable = 2, +} ZSTD_paramSwitch_e; + +typedef struct { + ZSTD_paramSwitch_e enableLdm; + U32 hashLog; + U32 bucketSizeLog; + U32 minMatchLength; + U32 hashRateLog; + U32 windowLog; +} ldmParams_t; + +typedef enum { + ZSTD_bm_buffered = 0, + ZSTD_bm_stable = 1, +} ZSTD_bufferMode_e; + +typedef enum { + ZSTD_sf_noBlockDelimiters = 0, + ZSTD_sf_explicitBlockDelimiters = 1, +} ZSTD_sequenceFormat_e; + +typedef void * (*ZSTD_allocFunction)(void *, size_t); + +typedef void (*ZSTD_freeFunction)(void *, void *); + +typedef struct { + ZSTD_allocFunction customAlloc; + ZSTD_freeFunction customFree; + void *opaque; +} ZSTD_customMem; + +struct ZSTD_CCtx_params_s { + ZSTD_format_e format; + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; + int compressionLevel; + int forceWindow; + size_t targetCBlockSize; + int srcSizeHint; + ZSTD_dictAttachPref_e attachDictPref; + ZSTD_paramSwitch_e literalCompressionMode; + int nbWorkers; + size_t jobSize; + int overlapLog; + int rsyncable; + ldmParams_t ldmParams; + int enableDedicatedDictSearch; + ZSTD_bufferMode_e inBufferMode; + ZSTD_bufferMode_e outBufferMode; + ZSTD_sequenceFormat_e blockDelimiters; + int validateSequences; + ZSTD_paramSwitch_e useBlockSplitter; + ZSTD_paramSwitch_e useRowMatchFinder; + int deterministicRefPrefix; + ZSTD_customMem customMem; +}; + +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; + +typedef enum { + ZSTD_cwksp_alloc_objects = 0, + ZSTD_cwksp_alloc_buffers = 1, + ZSTD_cwksp_alloc_aligned = 2, +} ZSTD_cwksp_alloc_phase_e; + +typedef enum { + ZSTD_cwksp_dynamic_alloc = 0, + ZSTD_cwksp_static_alloc = 1, +} ZSTD_cwksp_static_alloc_e; + +typedef struct { + void *workspace; + void *workspaceEnd; + void *objectEnd; + void *tableEnd; + void *tableValidEnd; + void *allocStart; + BYTE allocFailed; + int workspaceOversizedDuration; + ZSTD_cwksp_alloc_phase_e phase; + ZSTD_cwksp_static_alloc_e isStatic; +} ZSTD_cwksp; + +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +struct POOL_ctx_s; + +typedef struct POOL_ctx_s ZSTD_threadPool; + +typedef struct { + unsigned int offset; + unsigned int litLength; + unsigned int matchLength; + unsigned int rep; +} ZSTD_Sequence; + +typedef struct { + int collectSequences; + ZSTD_Sequence *seqStart; + size_t seqIndex; + size_t maxSequences; +} SeqCollector; + +typedef enum { + ZSTD_llt_none = 0, + ZSTD_llt_literalLength = 1, + ZSTD_llt_matchLength = 2, +} ZSTD_longLengthType_e; + +struct seqDef_s; + +typedef struct seqDef_s seqDef; + +typedef struct { + seqDef *sequencesStart; + seqDef *sequences; + BYTE *litStart; + BYTE *lit; + BYTE *llCode; + BYTE *mlCode; + BYTE *ofCode; + size_t maxNbSeq; + size_t maxNbLit; + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; +} seqStore_t; + +typedef struct { + const BYTE *nextSrc; + const BYTE *base; + const BYTE *dictBase; + U32 dictLimit; + U32 lowLimit; + U32 nbOverflowCorrections; +} ZSTD_window_t; + +typedef struct { + U32 offset; + U32 checksum; +} ldmEntry_t; + +typedef struct { + const BYTE *split; + U32 hash; + U32 checksum; + ldmEntry_t *bucket; +} ldmMatchCandidate_t; + +typedef struct { + ZSTD_window_t window; + ldmEntry_t *hashTable; + U32 loadedDictEnd; + BYTE *bucketOffsets; + size_t splitIndices[64]; + ldmMatchCandidate_t matchCandidates[64]; +} ldmState_t; + +typedef struct { + U32 offset; + U32 litLength; + U32 matchLength; +} rawSeq; + +typedef struct { + rawSeq *seq; + size_t pos; + size_t posInSequence; + size_t size; + size_t capacity; +} rawSeqStore_t; + +typedef struct { + HUF_CElt CTable[257]; + HUF_repeat repeatMode; +} ZSTD_hufCTables_t; + +typedef struct { + FSE_CTable offcodeCTable[193]; + FSE_CTable matchlengthCTable[363]; + FSE_CTable litlengthCTable[329]; + FSE_repeat offcode_repeatMode; + FSE_repeat matchlength_repeatMode; + FSE_repeat litlength_repeatMode; +} ZSTD_fseCTables_t; + +typedef struct { + ZSTD_hufCTables_t huf; + ZSTD_fseCTables_t fse; +} ZSTD_entropyCTables_t; + +typedef struct { + ZSTD_entropyCTables_t entropy; + U32 rep[3]; +} ZSTD_compressedBlockState_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + int price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[3]; +} ZSTD_optimal_t; + +typedef enum { + zop_dynamic = 0, + zop_predef = 1, +} ZSTD_OptPrice_e; + +typedef struct { + unsigned int *litFreq; + unsigned int *litLengthFreq; + unsigned int *matchLengthFreq; + unsigned int *offCodeFreq; + ZSTD_match_t *matchTable; + ZSTD_optimal_t *priceTable; + U32 litSum; + U32 litLengthSum; + U32 matchLengthSum; + U32 offCodeSum; + U32 litSumBasePrice; + U32 litLengthSumBasePrice; + U32 matchLengthSumBasePrice; + U32 offCodeSumBasePrice; + ZSTD_OptPrice_e priceType; + const ZSTD_entropyCTables_t *symbolCosts; + ZSTD_paramSwitch_e literalCompressionMode; +} optState_t; + +struct ZSTD_matchState_t; + +typedef struct ZSTD_matchState_t ZSTD_matchState_t; + +struct ZSTD_matchState_t { + ZSTD_window_t window; + U32 loadedDictEnd; + U32 nextToUpdate; + U32 hashLog3; + U32 rowHashLog; + U16 *tagTable; + U32 hashCache[8]; + U32 *hashTable; + U32 *hashTable3; + U32 *chainTable; + U32 forceNonContiguous; + int dedicatedDictSearch; + optState_t opt; + const ZSTD_matchState_t *dictMatchState; + ZSTD_compressionParameters cParams; + const rawSeqStore_t *ldmSeqStore; +}; + +typedef struct { + ZSTD_compressedBlockState_t *prevCBlock; + ZSTD_compressedBlockState_t *nextCBlock; + ZSTD_matchState_t matchState; +} ZSTD_blockState_t; + +typedef enum { + ZSTDb_not_buffered = 0, + ZSTDb_buffered = 1, +} ZSTD_buffered_policy_e; + +typedef enum { + zcss_init = 0, + zcss_load = 1, + zcss_flush = 2, +} ZSTD_cStreamStage; + +struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; + +typedef enum { + ZSTD_dct_auto = 0, + ZSTD_dct_rawContent = 1, + ZSTD_dct_fullDict = 2, +} ZSTD_dictContentType_e; + +struct ZSTD_CDict_s; + +typedef struct ZSTD_CDict_s ZSTD_CDict; + +typedef struct { + void *dictBuffer; + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; + ZSTD_CDict *cdict; +} ZSTD_localDict; + +struct ZSTD_prefixDict_s { + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; +}; + +typedef struct ZSTD_prefixDict_s ZSTD_prefixDict; + +typedef enum { + set_basic = 0, + set_rle = 1, + set_compressed = 2, + set_repeat = 3, +} symbolEncodingType_e; + +typedef struct { + symbolEncodingType_e hType; + BYTE hufDesBuffer[128]; + size_t hufDesSize; +} ZSTD_hufCTablesMetadata_t; + +typedef struct { + symbolEncodingType_e llType; + symbolEncodingType_e ofType; + symbolEncodingType_e mlType; + BYTE fseTablesBuffer[133]; + size_t fseTablesSize; + size_t lastCountSize; +} ZSTD_fseCTablesMetadata_t; + +typedef struct { + ZSTD_hufCTablesMetadata_t hufMetadata; + ZSTD_fseCTablesMetadata_t fseMetadata; +} ZSTD_entropyCTablesMetadata_t; + +typedef struct { + seqStore_t fullSeqStoreChunk; + seqStore_t firstHalfSeqStore; + seqStore_t secondHalfSeqStore; + seqStore_t currSeqStore; + seqStore_t nextSeqStore; + U32 partitions[196]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +} ZSTD_blockSplitCtx; + +struct ZSTD_CCtx_s { + ZSTD_compressionStage_e stage; + int cParamsChanged; + int bmi2; + ZSTD_CCtx_params requestedParams; + ZSTD_CCtx_params appliedParams; + ZSTD_CCtx_params simpleApiParams; + U32 dictID; + size_t dictContentSize; + ZSTD_cwksp workspace; + size_t blockSize; + long long unsigned int pledgedSrcSizePlusOne; + long long unsigned int consumedSrcSize; + long long unsigned int producedCSize; + struct xxh64_state xxhState; + ZSTD_customMem customMem; + ZSTD_threadPool *pool; + size_t staticSize; + SeqCollector seqCollector; + int isFirstBlock; + int initialized; + seqStore_t seqStore; + ldmState_t ldmState; + rawSeq *ldmSequences; + size_t maxNbLdmSequences; + rawSeqStore_t externSeqStore; + ZSTD_blockState_t blockState; + U32 *entropyWorkspace; + ZSTD_buffered_policy_e bufferedPolicy; + char *inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + char *outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage streamStage; + U32 frameEnded; + ZSTD_inBuffer expectedInBuffer; + size_t expectedOutBufferSize; + ZSTD_localDict localDict; + const ZSTD_CDict *cdict; + ZSTD_prefixDict prefixDict; + ZSTD_blockSplitCtx blockSplitCtx; +}; + +typedef struct ZSTD_CCtx_s ZSTD_CCtx; + +typedef enum { + ZSTD_c_compressionLevel = 100, + ZSTD_c_windowLog = 101, + ZSTD_c_hashLog = 102, + ZSTD_c_chainLog = 103, + ZSTD_c_searchLog = 104, + ZSTD_c_minMatch = 105, + ZSTD_c_targetLength = 106, + ZSTD_c_strategy = 107, + ZSTD_c_enableLongDistanceMatching = 160, + ZSTD_c_ldmHashLog = 161, + ZSTD_c_ldmMinMatch = 162, + ZSTD_c_ldmBucketSizeLog = 163, + ZSTD_c_ldmHashRateLog = 164, + ZSTD_c_contentSizeFlag = 200, + ZSTD_c_checksumFlag = 201, + ZSTD_c_dictIDFlag = 202, + ZSTD_c_nbWorkers = 400, + ZSTD_c_jobSize = 401, + ZSTD_c_overlapLog = 402, + ZSTD_c_experimentalParam1 = 500, + ZSTD_c_experimentalParam2 = 10, + ZSTD_c_experimentalParam3 = 1000, + ZSTD_c_experimentalParam4 = 1001, + ZSTD_c_experimentalParam5 = 1002, + ZSTD_c_experimentalParam6 = 1003, + ZSTD_c_experimentalParam7 = 1004, + ZSTD_c_experimentalParam8 = 1005, + ZSTD_c_experimentalParam9 = 1006, + ZSTD_c_experimentalParam10 = 1007, + ZSTD_c_experimentalParam11 = 1008, + ZSTD_c_experimentalParam12 = 1009, + ZSTD_c_experimentalParam13 = 1010, + ZSTD_c_experimentalParam14 = 1011, + ZSTD_c_experimentalParam15 = 1012, +} ZSTD_cParameter; + +typedef struct { + size_t error; + int lowerBound; + int upperBound; +} ZSTD_bounds; + +typedef enum { + ZSTD_reset_session_only = 1, + ZSTD_reset_parameters = 2, + ZSTD_reset_session_and_parameters = 3, +} ZSTD_ResetDirective; + +struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; + +typedef ZSTD_CCtx ZSTD_CStream; + +typedef enum { + ZSTD_e_continue = 0, + ZSTD_e_flush = 1, + ZSTD_e_end = 2, +} ZSTD_EndDirective; + +struct ZSTD_CDict_s { + const void *dictContent; + size_t dictContentSize; + ZSTD_dictContentType_e dictContentType; + U32 *entropyWorkspace; + ZSTD_cwksp workspace; + ZSTD_matchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_customMem customMem; + U32 dictID; + int compressionLevel; + ZSTD_paramSwitch_e useRowMatchFinder; +}; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +typedef enum { + ZSTD_dlm_byCopy = 0, + ZSTD_dlm_byRef = 1, +} ZSTD_dictLoadMethod_e; + +typedef struct { + long long unsigned int ingested; + long long unsigned int consumed; + long long unsigned int produced; + long long unsigned int flushed; + unsigned int currentJobID; + unsigned int nbActiveWorkers; +} ZSTD_frameProgression; + +typedef enum { + ZSTD_no_overlap = 0, + ZSTD_overlap_src_before_dst = 1, +} ZSTD_overlap_e; + +struct seqDef_s { + U32 offBase; + U16 litLength; + U16 mlBase; +}; + +typedef enum { + ZSTD_dtlm_fast = 0, + ZSTD_dtlm_full = 1, +} ZSTD_dictTableLoadMethod_e; + +typedef enum { + ZSTD_noDict = 0, + ZSTD_extDict = 1, + ZSTD_dictMatchState = 2, + ZSTD_dedicatedDictSearch = 3, +} ZSTD_dictMode_e; + +typedef enum { + ZSTD_cpm_noAttachDict = 0, + ZSTD_cpm_attachDict = 1, + ZSTD_cpm_createCDict = 2, + ZSTD_cpm_unknown = 3, +} ZSTD_cParamMode_e; + +typedef size_t (*ZSTD_blockCompressor)(ZSTD_matchState_t *, seqStore_t *, U32 *, const void *, size_t); + +struct repcodes_s { + U32 rep[3]; +}; + +typedef struct repcodes_s repcodes_t; + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1, +} ZSTD_defaultPolicy_e; + +typedef enum { + ZSTDcrp_makeClean = 0, + ZSTDcrp_leaveDirty = 1, +} ZSTD_compResetPolicy_e; + +typedef enum { + ZSTDirp_continue = 0, + ZSTDirp_reset = 1, +} ZSTD_indexResetPolicy_e; + +typedef enum { + ZSTD_resetTarget_CDict = 0, + ZSTD_resetTarget_CCtx = 1, +} ZSTD_resetTarget_e; + +typedef struct { + U32 LLtype; + U32 Offtype; + U32 MLtype; + size_t size; + size_t lastCountSize; +} ZSTD_symbolEncodingTypeStats_t; + +enum { + ZSTDbss_compress = 0, + ZSTDbss_noCompress = 1, +}; + +typedef struct { + U32 *splitLocations; + size_t idx; +} seqStoreSplits; + +typedef struct { + U32 idx; + U32 posInSequence; + size_t posInSrc; +} ZSTD_sequencePosition; + +typedef size_t (*ZSTD_sequenceCopier)(ZSTD_CCtx *, ZSTD_sequencePosition *, const ZSTD_Sequence * const, size_t, const void *, size_t); + +struct pci_device_id { + __u32 vendor; + __u32 device; + __u32 subvendor; + __u32 subdevice; + __u32 class; + __u32 class_mask; + kernel_ulong_t driver_data; + __u32 override_only; +}; + +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + long unsigned int flags; + long unsigned int desc; + struct resource *parent; + struct resource *sibling; + struct resource *child; +}; + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char *); + ssize_t (*store)(struct device_driver *, const char *, size_t); +}; + +struct iommu_fault_param; + +struct iommu_fwspec; + +struct iommu_device; + +struct dev_iommu { + struct mutex lock; + struct iommu_fault_param *fault_param; + struct iommu_fwspec *fwspec; + struct iommu_device *iommu_dev; + void *priv; + u32 max_pasids; + u32 attach_deferred: 1; + u32 pci_32bit_workaround: 1; + u32 require_direct: 1; + u32 shadow_on_flush: 1; +}; + +struct pci_bus; + +struct hotplug_slot; + +struct pci_slot { + struct pci_bus *bus; + struct list_head list; + struct hotplug_slot *hotplug; + unsigned char number; + struct kobject kobj; +}; + +typedef short unsigned int pci_bus_flags_t; + +struct pci_dev; + +struct pci_ops; + +struct pci_bus { + struct list_head node; + struct pci_bus *parent; + struct list_head children; + struct list_head devices; + struct pci_dev *self; + struct list_head slots; + struct resource *resource[4]; + struct list_head resources; + struct resource busn_res; + struct pci_ops *ops; + void *sysdata; + struct proc_dir_entry *procdir; + unsigned char number; + unsigned char primary; + unsigned char max_bus_speed; + unsigned char cur_bus_speed; + int domain_nr; + char name[48]; + short unsigned int bridge_ctl; + pci_bus_flags_t bus_flags; + struct device *bridge; + struct device dev; + struct bin_attribute *legacy_io; + struct bin_attribute *legacy_mem; + unsigned int is_added: 1; + unsigned int unsafe_warn: 1; +}; + +enum { + PCI_STD_RESOURCES = 0, + PCI_STD_RESOURCE_END = 5, + PCI_ROM_RESOURCE = 6, + PCI_BRIDGE_RESOURCES = 7, + PCI_BRIDGE_RESOURCE_END = 10, + PCI_NUM_RESOURCES = 11, + DEVICE_COUNT_RESOURCE = 11, +}; + +typedef int pci_power_t; + +typedef unsigned int pci_channel_state_t; + +enum { + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, +}; + +typedef short unsigned int pci_dev_flags_t; + +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; +}; + +struct pcie_bwctrl_data; + +struct pci_driver; + +struct pcie_link_state; + +struct pci_dev { + struct list_head bus_list; + struct pci_bus *bus; + struct pci_bus *subordinate; + void *sysdata; + struct proc_dir_entry *procent; + struct pci_slot *slot; + unsigned int devfn; + short unsigned int vendor; + short unsigned int device; + short unsigned int subsystem_vendor; + short unsigned int subsystem_device; + unsigned int class; + u8 revision; + u8 hdr_type; + u32 devcap; + u8 pcie_cap; + u8 msi_cap; + u8 msix_cap; + u8 pcie_mpss: 3; + u8 rom_base_reg; + u8 pin; + u16 pcie_flags_reg; + long unsigned int *dma_alias_mask; + struct pci_driver *driver; + u64 dma_mask; + struct device_dma_parameters dma_parms; + pci_power_t current_state; + u8 pm_cap; + unsigned int pme_support: 5; + unsigned int pme_poll: 1; + unsigned int pinned: 1; + unsigned int config_rrs_sv: 1; + unsigned int imm_ready: 1; + unsigned int d1_support: 1; + unsigned int d2_support: 1; + unsigned int no_d1d2: 1; + unsigned int no_d3cold: 1; + unsigned int bridge_d3: 1; + unsigned int d3cold_allowed: 1; + unsigned int mmio_always_on: 1; + unsigned int wakeup_prepared: 1; + unsigned int skip_bus_pm: 1; + unsigned int ignore_hotplug: 1; + unsigned int hotplug_user_indicators: 1; + unsigned int clear_retrain_link: 1; + unsigned int d3hot_delay; + unsigned int d3cold_delay; + u16 l1ss; + struct pcie_link_state *link_state; + unsigned int ltr_path: 1; + unsigned int pasid_no_tlp: 1; + unsigned int eetlp_prefix_path: 1; + pci_channel_state_t error_state; + struct device dev; + int cfg_size; + unsigned int irq; + struct resource resource[11]; + struct resource driver_exclusive_resource; + bool match_driver; + unsigned int transparent: 1; + unsigned int io_window: 1; + unsigned int pref_window: 1; + unsigned int pref_64_window: 1; + unsigned int multifunction: 1; + unsigned int is_busmaster: 1; + unsigned int no_msi: 1; + unsigned int no_64bit_msi: 1; + unsigned int block_cfg_access: 1; + unsigned int broken_parity_status: 1; + unsigned int irq_reroute_variant: 2; + unsigned int msi_enabled: 1; + unsigned int msix_enabled: 1; + unsigned int ari_enabled: 1; + unsigned int ats_enabled: 1; + unsigned int pasid_enabled: 1; + unsigned int pri_enabled: 1; + unsigned int tph_enabled: 1; + unsigned int is_managed: 1; + unsigned int is_msi_managed: 1; + unsigned int needs_freset: 1; + unsigned int state_saved: 1; + unsigned int is_physfn: 1; + unsigned int is_virtfn: 1; + unsigned int is_hotplug_bridge: 1; + unsigned int shpc_managed: 1; + unsigned int is_thunderbolt: 1; + unsigned int untrusted: 1; + unsigned int external_facing: 1; + unsigned int broken_intx_masking: 1; + unsigned int io_window_1k: 1; + unsigned int irq_managed: 1; + unsigned int non_compliant_bars: 1; + unsigned int is_probed: 1; + unsigned int link_active_reporting: 1; + unsigned int no_vf_scan: 1; + unsigned int no_command_memory: 1; + unsigned int rom_bar_overlap: 1; + unsigned int rom_attr_enabled: 1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; + spinlock_t pcie_cap_lock; + u32 saved_config_space[16]; + struct hlist_head saved_cap_space; + struct bin_attribute *res_attr[11]; + struct bin_attribute *res_attr_wc[11]; + void *msix_base; + raw_spinlock_t msi_lock; + struct pci_vpd vpd; + struct pcie_bwctrl_data *link_bwctrl; + u16 acs_cap; + u8 supported_speeds; + phys_addr_t rom; + size_t romlen; + const char *driver_override; + long unsigned int priv_flags; + u8 reset_methods[8]; +}; + +struct pci_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct pci_error_handlers; + +struct pci_driver { + const char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *, const struct pci_device_id *); + void (*remove)(struct pci_dev *); + int (*suspend)(struct pci_dev *, pm_message_t); + int (*resume)(struct pci_dev *); + void (*shutdown)(struct pci_dev *); + int (*sriov_configure)(struct pci_dev *, int); + int (*sriov_set_msix_vec_count)(struct pci_dev *, int); + u32 (*sriov_get_vf_total_msix)(struct pci_dev *); + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + struct device_driver driver; + struct pci_dynids dynids; + bool driver_managed_dma; +}; + +struct pci_ops { + int (*add_bus)(struct pci_bus *); + void (*remove_bus)(struct pci_bus *); + void * (*map_bus)(struct pci_bus *, unsigned int, int); + int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); + int (*write)(struct pci_bus *, unsigned int, int, int, u32); +}; + +typedef unsigned int pci_ers_result_t; + +struct pci_error_handlers { + pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *); + pci_ers_result_t (*slot_reset)(struct pci_dev *); + void (*reset_prepare)(struct pci_dev *); + void (*reset_done)(struct pci_dev *); + void (*resume)(struct pci_dev *); + void (*cor_error_detected)(struct pci_dev *); +}; + +enum pci_fixup_pass { + pci_fixup_early = 0, + pci_fixup_header = 1, + pci_fixup_final = 2, + pci_fixup_enable = 3, + pci_fixup_resume = 4, + pci_fixup_suspend = 5, + pci_fixup_resume_early = 6, + pci_fixup_suspend_late = 7, +}; + +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[16]; +}; + +enum hk_type { + HK_TYPE_TIMER = 0, + HK_TYPE_RCU = 1, + HK_TYPE_MISC = 2, + HK_TYPE_SCHED = 3, + HK_TYPE_TICK = 4, + HK_TYPE_DOMAIN = 5, + HK_TYPE_WQ = 6, + HK_TYPE_MANAGED_IRQ = 7, + HK_TYPE_KTHREAD = 8, + HK_TYPE_MAX = 9, +}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +struct iova_bitmap; + +struct iommu_fault_page_request { + u32 flags; + u32 pasid; + u32 grpid; + u32 perm; + u64 addr; + u64 private_data[2]; +}; + +struct iommu_fault { + u32 type; + struct iommu_fault_page_request prm; +}; + +struct iommu_page_response { + u32 pasid; + u32 grpid; + u32 code; +}; + +struct iopf_fault { + struct iommu_fault fault; + struct list_head list; +}; + +struct iommu_attach_handle; + +struct iopf_group { + struct iopf_fault last_fault; + struct list_head faults; + size_t fault_count; + struct list_head pending_node; + struct work_struct work; + struct iommu_attach_handle *attach_handle; + struct iommu_fault_param *fault_param; + struct list_head node; + u32 cookie; +}; + +struct iommu_domain; + +struct iommu_attach_handle { + struct iommu_domain *domain; +}; + +struct iopf_queue; + +struct iommu_fault_param { + struct mutex lock; + refcount_t users; + struct callback_head rcu; + struct device *dev; + struct iopf_queue *queue; + struct list_head queue_list; + struct list_head partial; + struct list_head faults; +}; + +struct iopf_queue { + struct workqueue_struct *wq; + struct list_head devices; + struct mutex lock; +}; + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); + +struct iommu_domain_geometry { + dma_addr_t aperture_start; + dma_addr_t aperture_end; + bool force_aperture; +}; + +struct iommu_domain_ops; + +struct iommu_dirty_ops; + +struct iommu_ops; + +struct iommu_dma_cookie; + +struct iommu_domain { + unsigned int type; + const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + const struct iommu_ops *owner; + long unsigned int pgsize_bitmap; + struct iommu_domain_geometry geometry; + struct iommu_dma_cookie *iova_cookie; + int (*iopf_handler)(struct iopf_group *); + void *fault_data; + union { + struct { + iommu_fault_handler_t handler; + void *handler_token; + }; + struct { + struct mm_struct *mm; + int users; + struct list_head next; + }; + }; +}; + +typedef unsigned int ioasid_t; + +struct iommu_iotlb_gather; + +struct iommu_user_data_array; + +struct iommu_domain_ops { + int (*attach_dev)(struct iommu_domain *, struct device *); + int (*set_dev_pasid)(struct iommu_domain *, struct device *, ioasid_t, struct iommu_domain *); + int (*map_pages)(struct iommu_domain *, long unsigned int, phys_addr_t, size_t, size_t, int, gfp_t, size_t *); + size_t (*unmap_pages)(struct iommu_domain *, long unsigned int, size_t, size_t, struct iommu_iotlb_gather *); + void (*flush_iotlb_all)(struct iommu_domain *); + int (*iotlb_sync_map)(struct iommu_domain *, long unsigned int, size_t); + void (*iotlb_sync)(struct iommu_domain *, struct iommu_iotlb_gather *); + int (*cache_invalidate_user)(struct iommu_domain *, struct iommu_user_data_array *); + phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t); + bool (*enforce_cache_coherency)(struct iommu_domain *); + int (*set_pgtable_quirks)(struct iommu_domain *, long unsigned int); + void (*free)(struct iommu_domain *); +}; + +struct iommu_dirty_bitmap; + +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *, bool); + int (*read_and_clear_dirty)(struct iommu_domain *, long unsigned int, size_t, long unsigned int, struct iommu_dirty_bitmap *); +}; + +enum iommu_cap { + IOMMU_CAP_CACHE_COHERENCY = 0, + IOMMU_CAP_NOEXEC = 1, + IOMMU_CAP_PRE_BOOT_PROTECTION = 2, + IOMMU_CAP_ENFORCE_CACHE_COHERENCY = 3, + IOMMU_CAP_DEFERRED_FLUSH = 4, + IOMMU_CAP_DIRTY_TRACKING = 5, +}; + +enum iommu_dev_features { + IOMMU_DEV_FEAT_SVA = 0, + IOMMU_DEV_FEAT_IOPF = 1, +}; + +struct iommufd_viommu; + +struct iommufd_ctx; + +struct iommu_user_data; + +struct iommu_ops { + bool (*capable)(struct device *, enum iommu_cap); + void * (*hw_info)(struct device *, u32 *, u32 *); + struct iommu_domain * (*domain_alloc)(unsigned int); + struct iommu_domain * (*domain_alloc_paging_flags)(struct device *, u32, const struct iommu_user_data *); + struct iommu_domain * (*domain_alloc_paging)(struct device *); + struct iommu_domain * (*domain_alloc_sva)(struct device *, struct mm_struct *); + struct iommu_domain * (*domain_alloc_nested)(struct device *, struct iommu_domain *, u32, const struct iommu_user_data *); + struct iommu_device * (*probe_device)(struct device *); + void (*release_device)(struct device *); + void (*probe_finalize)(struct device *); + struct iommu_group * (*device_group)(struct device *); + void (*get_resv_regions)(struct device *, struct list_head *); + int (*of_xlate)(struct device *, const struct of_phandle_args *); + bool (*is_attach_deferred)(struct device *); + int (*dev_enable_feat)(struct device *, enum iommu_dev_features); + int (*dev_disable_feat)(struct device *, enum iommu_dev_features); + void (*page_response)(struct device *, struct iopf_fault *, struct iommu_page_response *); + int (*def_domain_type)(struct device *); + void (*remove_dev_pasid)(struct device *, ioasid_t, struct iommu_domain *); + struct iommufd_viommu * (*viommu_alloc)(struct device *, struct iommu_domain *, struct iommufd_ctx *, unsigned int); + const struct iommu_domain_ops *default_domain_ops; + long unsigned int pgsize_bitmap; + struct module *owner; + struct iommu_domain *identity_domain; + struct iommu_domain *blocked_domain; + struct iommu_domain *release_domain; + struct iommu_domain *default_domain; + u8 user_pasid_table: 1; +}; + +struct iommu_iotlb_gather { + long unsigned int start; + long unsigned int end; + size_t pgsize; + struct list_head freelist; + bool queued; +}; + +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +struct iommu_user_data { + unsigned int type; + void *uptr; + size_t len; +}; + +struct iommu_user_data_array { + unsigned int type; + void *uptr; + size_t entry_len; + u32 entry_num; +}; + +struct iommu_device { + struct list_head list; + const struct iommu_ops *ops; + struct fwnode_handle *fwnode; + struct device *dev; + struct iommu_group *singleton_group; + u32 max_pasids; +}; + +struct iommu_fwspec { + struct fwnode_handle *iommu_fwnode; + u32 flags; + unsigned int num_ids; + u32 ids[0]; +}; + +struct pci_dynid { + struct list_head node; + struct pci_device_id id; +}; + +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +struct acpi_device; + +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +typedef void (*dr_release_t)(struct device *, void *); + +typedef int (*dr_match_t)(struct device *, void *, void *); + +struct devm_clk_state { + struct clk *clk; + void (*exit)(struct clk *); +}; + +struct clk_bulk_devres { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct clk_core; + +struct clk { + struct clk_core *core; + struct device *dev; + const char *dev_id; + const char *con_id; + long unsigned int min_rate; + long unsigned int max_rate; + unsigned int exclusive_count; + struct hlist_node clks_node; +}; + +typedef short unsigned int ushort; + +struct ld_semaphore { + atomic_long_t count; + raw_spinlock_t wait_lock; + unsigned int wait_readers; + struct list_head read_wait; + struct list_head write_wait; +}; + +typedef unsigned int tcflag_t; + +typedef unsigned char cc_t; + +typedef unsigned int speed_t; + +struct ktermios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct winsize { + short unsigned int ws_row; + short unsigned int ws_col; + short unsigned int ws_xpixel; + short unsigned int ws_ypixel; +}; + +struct tty_driver; + +struct tty_port; + +struct tty_operations; + +struct tty_ldisc; + +struct tty_struct { + struct kref kref; + int index; + struct device *dev; + struct tty_driver *driver; + struct tty_port *port; + const struct tty_operations *ops; + struct tty_ldisc *ldisc; + struct ld_semaphore ldisc_sem; + struct mutex atomic_write_lock; + struct mutex legacy_mutex; + struct mutex throttle_mutex; + struct rw_semaphore termios_rwsem; + struct mutex winsize_mutex; + struct ktermios termios; + struct ktermios termios_locked; + char name[64]; + long unsigned int flags; + int count; + unsigned int receive_room; + struct winsize winsize; + struct { + spinlock_t lock; + bool stopped; + bool tco_stopped; + } flow; + struct { + struct pid *pgrp; + struct pid *session; + spinlock_t lock; + unsigned char pktstatus; + bool packet; + } ctrl; + bool hw_stopped; + bool closing; + int flow_change; + struct tty_struct *link; + struct fasync_struct *fasync; + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + spinlock_t files_lock; + int write_cnt; + u8 *write_buf; + struct list_head tty_files; + struct work_struct SAK_work; +}; + +struct serial_icounter_struct; + +struct serial_struct; + +struct tty_operations { + struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); + int (*install)(struct tty_driver *, struct tty_struct *); + void (*remove)(struct tty_driver *, struct tty_struct *); + int (*open)(struct tty_struct *, struct file *); + void (*close)(struct tty_struct *, struct file *); + void (*shutdown)(struct tty_struct *); + void (*cleanup)(struct tty_struct *); + ssize_t (*write)(struct tty_struct *, const u8 *, size_t); + int (*put_char)(struct tty_struct *, u8); + void (*flush_chars)(struct tty_struct *); + unsigned int (*write_room)(struct tty_struct *); + unsigned int (*chars_in_buffer)(struct tty_struct *); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + void (*throttle)(struct tty_struct *); + void (*unthrottle)(struct tty_struct *); + void (*stop)(struct tty_struct *); + void (*start)(struct tty_struct *); + void (*hangup)(struct tty_struct *); + int (*break_ctl)(struct tty_struct *, int); + void (*flush_buffer)(struct tty_struct *); + int (*ldisc_ok)(struct tty_struct *, int); + void (*set_ldisc)(struct tty_struct *); + void (*wait_until_sent)(struct tty_struct *, int); + void (*send_xchar)(struct tty_struct *, u8); + int (*tiocmget)(struct tty_struct *); + int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); + int (*resize)(struct tty_struct *, struct winsize *); + int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); + int (*get_serial)(struct tty_struct *, struct serial_struct *); + int (*set_serial)(struct tty_struct *, struct serial_struct *); + void (*show_fdinfo)(struct tty_struct *, struct seq_file *); + int (*proc_show)(struct seq_file *, void *); +}; + +struct tty_driver { + struct kref kref; + struct cdev **cdevs; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; + int major; + int minor_start; + unsigned int num; + short int type; + short int subtype; + struct ktermios init_termios; + long unsigned int flags; + struct proc_dir_entry *proc_entry; + struct tty_driver *other; + struct tty_struct **ttys; + struct tty_port **ports; + struct ktermios **termios; + void *driver_state; + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + unsigned int used; + unsigned int size; + unsigned int commit; + unsigned int lookahead; + unsigned int read; + bool flags; + long: 0; + u8 data[0]; +}; + +struct tty_bufhead { + struct tty_buffer *head; + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; + atomic_t mem_used; + int mem_limit; + struct tty_buffer *tail; +}; + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +struct tty_port_operations; + +struct tty_port_client_operations; + +struct tty_port { + struct tty_bufhead buf; + struct tty_struct *tty; + struct tty_struct *itty; + const struct tty_port_operations *ops; + const struct tty_port_client_operations *client_ops; + spinlock_t lock; + int blocked_open; + int count; + wait_queue_head_t open_wait; + wait_queue_head_t delta_msr_wait; + long unsigned int flags; + long unsigned int iflags; + unsigned char console: 1; + struct mutex mutex; + struct mutex buf_mutex; + u8 *xmit_buf; + struct { + union { + struct __kfifo kfifo; + u8 *type; + const u8 *const_type; + char (*rectype)[0]; + u8 *ptr; + const u8 *ptr_const; + }; + u8 buf[0]; + } xmit_fifo; + unsigned int close_delay; + unsigned int closing_wait; + int drain_delay; + struct kref kref; + void *client_data; +}; + +struct tty_ldisc_ops { + char *name; + int num; + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *); + ssize_t (*read)(struct tty_struct *, struct file *, u8 *, size_t, void **, long unsigned int); + ssize_t (*write)(struct tty_struct *, struct file *, const u8 *, size_t); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); + void (*hangup)(struct tty_struct *); + void (*receive_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, bool); + size_t (*receive_buf2)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + struct module *owner; +}; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + struct tty_struct *tty; +}; + +struct tty_port_operations { + bool (*carrier_raised)(struct tty_port *); + void (*dtr_rts)(struct tty_port *, bool); + void (*shutdown)(struct tty_port *); + int (*activate)(struct tty_port *, struct tty_struct *); + void (*destruct)(struct tty_port *); +}; + +struct tty_port_client_operations { + size_t (*receive_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_port *); +}; + +struct unipair { + short unsigned int unicode; + short unsigned int fontpos; +}; + +struct unimapdesc { + short unsigned int entry_ct; + struct unipair *entries; +}; + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + short unsigned int kb_value; +}; + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; + +struct kbkeycode { + unsigned int scancode; + unsigned int keycode; +}; + +struct kbd_repeat { + int delay; + int period; +}; + +struct console_font_op { + unsigned int op; + unsigned int flags; + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct console_font { + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_mode { + char mode; + char waitv; + short int relsig; + short int acqsig; + short int frsig; +}; + +struct vt_stat { + short unsigned int v_active; + short unsigned int v_signal; + short unsigned int v_state; +}; + +struct vt_sizes { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_scrollsize; +}; + +struct vt_consize { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_vlin; + short unsigned int v_clin; + short unsigned int v_vcol; + short unsigned int v_ccol; +}; + +struct vt_event { + unsigned int event; + unsigned int oldev; + unsigned int newev; + unsigned int pad[4]; +}; + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +enum vesa_blank_mode { + VESA_NO_BLANKING = 0, + VESA_VSYNC_SUSPEND = 1, + VESA_HSYNC_SUSPEND = 2, + VESA_POWERDOWN = 3, + VESA_BLANK_MAX = 3, +}; + +enum con_scroll { + SM_UP = 0, + SM_DOWN = 1, +}; + +enum vc_intensity { + VCI_HALF_BRIGHT = 0, + VCI_NORMAL = 1, + VCI_BOLD = 2, + VCI_MASK = 3, +}; + +struct vc_data; + +struct consw { + struct module *owner; + const char * (*con_startup)(); + void (*con_init)(struct vc_data *, bool); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, unsigned int, unsigned int, unsigned int); + void (*con_putc)(struct vc_data *, u16, unsigned int, unsigned int); + void (*con_putcs)(struct vc_data *, const u16 *, unsigned int, unsigned int, unsigned int); + void (*con_cursor)(struct vc_data *, bool); + bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); + bool (*con_switch)(struct vc_data *); + bool (*con_blank)(struct vc_data *, enum vesa_blank_mode, bool); + int (*con_font_set)(struct vc_data *, const struct console_font *, unsigned int, unsigned int); + int (*con_font_get)(struct vc_data *, struct console_font *, unsigned int); + int (*con_font_default)(struct vc_data *, struct console_font *, const char *); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, bool); + void (*con_set_palette)(struct vc_data *, const unsigned char *); + void (*con_scrolldelta)(struct vc_data *, int); + bool (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); + void (*con_invert_region)(struct vc_data *, u16 *, int); + void (*con_debug_enter)(struct vc_data *); + void (*con_debug_leave)(struct vc_data *); +}; + +struct vc_state { + unsigned int x; + unsigned int y; + unsigned char color; + unsigned char Gx_charset[2]; + unsigned int charset: 1; + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; + +struct uni_pagedict; + +struct vc_data { + struct tty_port port; + struct vc_state state; + struct vc_state saved_state; + short unsigned int vc_num; + unsigned int vc_cols; + unsigned int vc_rows; + unsigned int vc_size_row; + unsigned int vc_scan_lines; + unsigned int vc_cell_height; + long unsigned int vc_origin; + long unsigned int vc_scr_end; + long unsigned int vc_visible_origin; + unsigned int vc_top; + unsigned int vc_bottom; + const struct consw *vc_sw; + short unsigned int *vc_screenbuf; + unsigned int vc_screenbuf_size; + unsigned char vc_mode; + unsigned char vc_attr; + unsigned char vc_def_color; + unsigned char vc_ulcolor; + unsigned char vc_itcolor; + unsigned char vc_halfcolor; + unsigned int vc_cursor_type; + short unsigned int vc_complement_mask; + short unsigned int vc_s_complement_mask; + long unsigned int vc_pos; + short unsigned int vc_hi_font_mask; + struct console_font vc_font; + short unsigned int vc_video_erase_char; + unsigned int vc_state; + unsigned int vc_npar; + unsigned int vc_par[16]; + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + unsigned int vc_disp_ctrl: 1; + unsigned int vc_toggle_meta: 1; + unsigned int vc_decscnm: 1; + unsigned int vc_decom: 1; + unsigned int vc_decawm: 1; + unsigned int vc_deccm: 1; + unsigned int vc_decim: 1; + unsigned int vc_priv: 3; + unsigned int vc_need_wrap: 1; + unsigned int vc_can_do_color: 1; + unsigned int vc_report_mouse: 2; + unsigned char vc_utf: 1; + unsigned char vc_utf_count; + int vc_utf_char; + long unsigned int vc_tab_stop[4]; + unsigned char vc_palette[48]; + short unsigned int *vc_translate; + unsigned int vc_bell_pitch; + unsigned int vc_bell_duration; + short unsigned int vc_cur_blink_ms; + struct vc_data **vc_display_fg; + struct uni_pagedict *uni_pagedict; + struct uni_pagedict **uni_pagedict_loc; + u32 **vc_uni_lines; +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; +}; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; +}; + +struct dev_pm_qos_request; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct freq_constraints freq; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE = 1, + DL_STATE_CONSUMER_PROBE = 2, + DL_STATE_ACTIVE = 3, + DL_STATE_SUPPLIER_UNBIND = 4, +}; + +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + struct work_struct rm_work; + bool supplier_preactivated; +}; + +struct dev_pm_domain_attach_data { + const char * const *pd_names; + const u32 num_pd_names; + const u32 pd_flags; +}; + +struct dev_pm_domain_list { + struct device **pd_devs; + struct device_link **pd_links; + u32 *opp_tokens; + u32 num_pds; +}; + +struct opp_table; + +struct dev_pm_opp; + +struct regulator; + +typedef int (*config_regulators_t)(struct device *, struct dev_pm_opp *, struct dev_pm_opp *, struct regulator **, unsigned int); + +typedef int (*config_clks_t)(struct device *, struct opp_table *, struct dev_pm_opp *, void *, bool); + +struct dev_pm_opp_config { + const char * const *clk_names; + config_clks_t config_clks; + const char *prop_name; + config_regulators_t config_regulators; + const unsigned int *supported_hw; + unsigned int supported_hw_count; + const char * const *regulator_names; + struct device *required_dev; + unsigned int required_dev_index; +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE = 2, + DEV_PM_QOS_MIN_FREQUENCY = 3, + DEV_PM_QOS_MAX_FREQUENCY = 4, + DEV_PM_QOS_FLAGS = 5, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + struct freq_qos_request freq; + } data; + struct device *dev; +}; + +typedef unsigned int uint; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +enum sam_status { + SAM_STAT_GOOD = 0, + SAM_STAT_CHECK_CONDITION = 2, + SAM_STAT_CONDITION_MET = 4, + SAM_STAT_BUSY = 8, + SAM_STAT_INTERMEDIATE = 16, + SAM_STAT_INTERMEDIATE_CONDITION_MET = 20, + SAM_STAT_RESERVATION_CONFLICT = 24, + SAM_STAT_COMMAND_TERMINATED = 34, + SAM_STAT_TASK_SET_FULL = 40, + SAM_STAT_ACA_ACTIVE = 48, + SAM_STAT_TASK_ABORTED = 64, +}; + +struct scsi_sense_hdr { + u8 response_code; + u8 sense_key; + u8 asc; + u8 ascq; + u8 byte4; + u8 byte5; + u8 byte6; + u8 additional_length; +}; + +enum scsi_host_status { + DID_OK = 0, + DID_NO_CONNECT = 1, + DID_BUS_BUSY = 2, + DID_TIME_OUT = 3, + DID_BAD_TARGET = 4, + DID_ABORT = 5, + DID_PARITY = 6, + DID_ERROR = 7, + DID_RESET = 8, + DID_BAD_INTR = 9, + DID_PASSTHROUGH = 10, + DID_SOFT_ERROR = 11, + DID_IMM_RETRY = 12, + DID_REQUEUE = 13, + DID_TRANSPORT_DISRUPTED = 14, + DID_TRANSPORT_FAILFAST = 15, + DID_TRANSPORT_MARGINAL = 20, +}; + +struct scatterlist { + long unsigned int page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; + unsigned int dma_length; + unsigned int dma_flags; +}; + +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; + +typedef __u64 blist_flags_t; + +struct scsi_mode_data { + __u32 length; + __u16 block_descriptor_length; + __u8 medium_type; + __u8 device_specific; + __u8 header_length; + __u8 longlba: 1; +}; + +enum scsi_device_state { + SDEV_CREATED = 1, + SDEV_RUNNING = 2, + SDEV_CANCEL = 3, + SDEV_DEL = 4, + SDEV_QUIESCE = 5, + SDEV_OFFLINE = 6, + SDEV_TRANSPORT_OFFLINE = 7, + SDEV_BLOCK = 8, + SDEV_CREATED_BLOCK = 9, +}; + +struct scsi_vpd { + struct callback_head rcu; + int len; + unsigned char data[0]; +}; + +struct Scsi_Host; + +struct scsi_target; + +struct scsi_device_handler; + +struct bsg_device; + +struct scsi_device { + struct Scsi_Host *host; + struct request_queue *request_queue; + struct list_head siblings; + struct list_head same_target_siblings; + struct sbitmap budget_map; + atomic_t device_blocked; + atomic_t restarts; + spinlock_t list_lock; + struct list_head starved_entry; + short unsigned int queue_depth; + short unsigned int max_queue_depth; + short unsigned int last_queue_full_depth; + short unsigned int last_queue_full_count; + long unsigned int last_queue_full_time; + long unsigned int queue_ramp_up_period; + long unsigned int last_queue_ramp_up; + unsigned int id; + unsigned int channel; + u64 lun; + unsigned int manufacturer; + unsigned int sector_size; + void *hostdata; + unsigned char type; + char scsi_level; + char inq_periph_qual; + struct mutex inquiry_mutex; + unsigned char inquiry_len; + unsigned char *inquiry; + const char *vendor; + const char *model; + const char *rev; + struct scsi_vpd *vpd_pg0; + struct scsi_vpd *vpd_pg83; + struct scsi_vpd *vpd_pg80; + struct scsi_vpd *vpd_pg89; + struct scsi_vpd *vpd_pgb0; + struct scsi_vpd *vpd_pgb1; + struct scsi_vpd *vpd_pgb2; + struct scsi_vpd *vpd_pgb7; + struct scsi_target *sdev_target; + blist_flags_t sdev_bflags; + unsigned int eh_timeout; + unsigned int manage_system_start_stop: 1; + unsigned int manage_runtime_start_stop: 1; + unsigned int manage_shutdown: 1; + unsigned int force_runtime_start_on_system_start: 1; + unsigned int removable: 1; + unsigned int changed: 1; + unsigned int busy: 1; + unsigned int lockable: 1; + unsigned int locked: 1; + unsigned int borken: 1; + unsigned int disconnect: 1; + unsigned int soft_reset: 1; + unsigned int sdtr: 1; + unsigned int wdtr: 1; + unsigned int ppr: 1; + unsigned int tagged_supported: 1; + unsigned int simple_tags: 1; + unsigned int was_reset: 1; + unsigned int expecting_cc_ua: 1; + unsigned int use_10_for_rw: 1; + unsigned int use_10_for_ms: 1; + unsigned int set_dbd_for_ms: 1; + unsigned int read_before_ms: 1; + unsigned int no_report_opcodes: 1; + unsigned int no_write_same: 1; + unsigned int use_16_for_rw: 1; + unsigned int use_16_for_sync: 1; + unsigned int skip_ms_page_8: 1; + unsigned int skip_ms_page_3f: 1; + unsigned int skip_vpd_pages: 1; + unsigned int try_vpd_pages: 1; + unsigned int use_192_bytes_for_3f: 1; + unsigned int no_start_on_add: 1; + unsigned int allow_restart: 1; + unsigned int start_stop_pwr_cond: 1; + unsigned int no_uld_attach: 1; + unsigned int select_no_atn: 1; + unsigned int fix_capacity: 1; + unsigned int guess_capacity: 1; + unsigned int retry_hwerror: 1; + unsigned int last_sector_bug: 1; + unsigned int no_read_disc_info: 1; + unsigned int no_read_capacity_16: 1; + unsigned int try_rc_10_first: 1; + unsigned int security_supported: 1; + unsigned int is_visible: 1; + unsigned int wce_default_on: 1; + unsigned int no_dif: 1; + unsigned int broken_fua: 1; + unsigned int lun_in_cdb: 1; + unsigned int unmap_limit_for_ws: 1; + unsigned int rpm_autosuspend: 1; + unsigned int ignore_media_change: 1; + unsigned int silence_suspend: 1; + unsigned int no_vpd_size: 1; + unsigned int cdl_supported: 1; + unsigned int cdl_enable: 1; + unsigned int queue_stopped; + bool offline_already; + atomic_t disk_events_disable_depth; + long unsigned int supported_events[1]; + long unsigned int pending_events[1]; + struct list_head event_list; + struct work_struct event_work; + unsigned int max_device_blocked; + atomic_t iorequest_cnt; + atomic_t iodone_cnt; + atomic_t ioerr_cnt; + atomic_t iotmo_cnt; + struct device sdev_gendev; + struct device sdev_dev; + struct work_struct requeue_work; + struct scsi_device_handler *handler; + void *handler_data; + size_t dma_drain_len; + void *dma_drain_buf; + unsigned int sg_timeout; + unsigned int sg_reserved_size; + struct bsg_device *bsg_dev; + unsigned char access_state; + struct mutex state_mutex; + enum scsi_device_state sdev_state; + struct task_struct *quiesced_by; + long unsigned int sdev_data[0]; +}; + +enum scsi_host_state { + SHOST_CREATED = 1, + SHOST_RUNNING = 2, + SHOST_CANCEL = 3, + SHOST_DEL = 4, + SHOST_RECOVERY = 5, + SHOST_CANCEL_RECOVERY = 6, + SHOST_DEL_RECOVERY = 7, +}; + +struct scsi_host_template; + +struct scsi_transport_template; + +struct Scsi_Host { + struct list_head __devices; + struct list_head __targets; + struct list_head starved_list; + spinlock_t default_lock; + spinlock_t *host_lock; + struct mutex scan_mutex; + struct list_head eh_abort_list; + struct list_head eh_cmd_q; + struct task_struct *ehandler; + struct completion *eh_action; + wait_queue_head_t host_wait; + const struct scsi_host_template *hostt; + struct scsi_transport_template *transportt; + struct kref tagset_refcnt; + struct completion tagset_freed; + struct blk_mq_tag_set tag_set; + atomic_t host_blocked; + unsigned int host_failed; + unsigned int host_eh_scheduled; + unsigned int host_no; + int eh_deadline; + long unsigned int last_reset; + unsigned int max_channel; + unsigned int max_id; + u64 max_lun; + unsigned int unique_id; + short unsigned int max_cmd_len; + int this_id; + int can_queue; + short int cmd_per_lun; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int opt_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + unsigned int nr_hw_queues; + unsigned int nr_maps; + unsigned int active_mode: 2; + unsigned int host_self_blocked: 1; + unsigned int reverse_ordering: 1; + unsigned int tmf_in_progress: 1; + unsigned int async_scan: 1; + unsigned int eh_noresume: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int short_inquiry: 1; + unsigned int no_scsi2_lun_in_cdb: 1; + unsigned int no_highmem: 1; + struct workqueue_struct *work_q; + struct workqueue_struct *tmf_work_q; + unsigned int max_host_blocked; + unsigned int prot_capabilities; + unsigned char prot_guard_type; + long unsigned int base; + long unsigned int io_port; + unsigned char n_io_port; + unsigned char dma_channel; + unsigned int irq; + enum scsi_host_state shost_state; + struct device shost_gendev; + struct device shost_dev; + void *shost_data; + struct device *dma_dev; + int rpm_autosuspend_delay; + long unsigned int hostdata[0]; +}; + +enum scsi_target_state { + STARGET_CREATED = 1, + STARGET_RUNNING = 2, + STARGET_REMOVE = 3, + STARGET_CREATED_REMOVE = 4, + STARGET_DEL = 5, +}; + +struct scsi_target { + struct scsi_device *starget_sdev_user; + struct list_head siblings; + struct list_head devices; + struct device dev; + struct kref reap_ref; + unsigned int channel; + unsigned int id; + unsigned int create: 1; + unsigned int single_lun: 1; + unsigned int pdt_1f_for_no_lun: 1; + unsigned int no_report_luns: 1; + unsigned int expecting_lun_change: 1; + atomic_t target_busy; + atomic_t target_blocked; + unsigned int can_queue; + unsigned int max_target_blocked; + char scsi_level; + enum scsi_target_state state; + void *hostdata; + long unsigned int starget_data[0]; +}; + +struct scsi_failure { + int result; + u8 sense; + u8 asc; + u8 ascq; + s8 allowed; + s8 retries; +}; + +struct scsi_failures { + int total_allowed; + int total_retries; + struct scsi_failure *failure_definitions; +}; + +struct scsi_exec_args { + unsigned char *sense; + unsigned int sense_len; + struct scsi_sense_hdr *sshdr; + blk_mq_req_flags_t req_flags; + int scmd_flags; + int *resid; + struct scsi_failures *failures; +}; + +struct scsi_data_buffer { + struct sg_table table; + unsigned int length; +}; + +enum scsi_cmnd_submitter { + SUBMITTED_BY_BLOCK_LAYER = 0, + SUBMITTED_BY_SCSI_ERROR_HANDLER = 1, + SUBMITTED_BY_SCSI_RESET_IOCTL = 2, +} __attribute__((mode(byte))); + +struct scsi_cmnd { + struct scsi_device *device; + struct list_head eh_entry; + struct delayed_work abort_work; + struct callback_head rcu; + int eh_eflags; + int budget_token; + long unsigned int jiffies_at_alloc; + int retries; + int allowed; + unsigned char prot_op; + unsigned char prot_type; + unsigned char prot_flags; + enum scsi_cmnd_submitter submitter; + short unsigned int cmd_len; + enum dma_data_direction sc_data_direction; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scsi_data_buffer *prot_sdb; + unsigned int underflow; + unsigned int transfersize; + unsigned int resid_len; + unsigned int sense_len; + unsigned char *sense_buffer; + int flags; + long unsigned int state; + unsigned int extra_len; + unsigned char *host_scribble; + int result; +}; + +enum scsi_prot_operations { + SCSI_PROT_NORMAL = 0, + SCSI_PROT_READ_INSERT = 1, + SCSI_PROT_WRITE_STRIP = 2, + SCSI_PROT_READ_STRIP = 3, + SCSI_PROT_WRITE_INSERT = 4, + SCSI_PROT_READ_PASS = 5, + SCSI_PROT_WRITE_PASS = 6, +}; + +struct scsi_driver { + struct device_driver gendrv; + int (*resume)(struct device *); + void (*rescan)(struct device *); + blk_status_t (*init_command)(struct scsi_cmnd *); + void (*uninit_command)(struct scsi_cmnd *); + int (*done)(struct scsi_cmnd *); + int (*eh_action)(struct scsi_cmnd *, int); + void (*eh_reset)(struct scsi_cmnd *); +}; + +enum scsi_timeout_action { + SCSI_EH_DONE = 0, + SCSI_EH_RESET_TIMER = 1, + SCSI_EH_NOT_HANDLED = 2, +}; + +struct scsi_host_template { + unsigned int cmd_size; + int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); + void (*commit_rqs)(struct Scsi_Host *, u16); + struct module *module; + const char *name; + const char * (*info)(struct Scsi_Host *); + int (*ioctl)(struct scsi_device *, unsigned int, void *); + int (*init_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*exit_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*eh_abort_handler)(struct scsi_cmnd *); + int (*eh_device_reset_handler)(struct scsi_cmnd *); + int (*eh_target_reset_handler)(struct scsi_cmnd *); + int (*eh_bus_reset_handler)(struct scsi_cmnd *); + int (*eh_host_reset_handler)(struct scsi_cmnd *); + int (*slave_alloc)(struct scsi_device *); + int (*device_configure)(struct scsi_device *, struct queue_limits *); + int (*slave_configure)(struct scsi_device *); + void (*slave_destroy)(struct scsi_device *); + int (*target_alloc)(struct scsi_target *); + void (*target_destroy)(struct scsi_target *); + int (*scan_finished)(struct Scsi_Host *, long unsigned int); + void (*scan_start)(struct Scsi_Host *); + int (*change_queue_depth)(struct scsi_device *, int); + void (*map_queues)(struct Scsi_Host *); + int (*mq_poll)(struct Scsi_Host *, unsigned int); + bool (*dma_need_drain)(struct request *); + int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *); + void (*unlock_native_capacity)(struct scsi_device *); + int (*show_info)(struct seq_file *, struct Scsi_Host *); + int (*write_info)(struct Scsi_Host *, char *, int); + enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); + bool (*eh_should_retry_cmd)(struct scsi_cmnd *); + int (*host_reset)(struct Scsi_Host *, int); + const char *proc_name; + int can_queue; + int this_id; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + short int cmd_per_lun; + int tag_alloc_policy; + unsigned int track_queue_depth: 1; + unsigned int supported_mode: 2; + unsigned int emulated: 1; + unsigned int skip_settle_delay: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int max_host_blocked; + const struct attribute_group **shost_groups; + const struct attribute_group **sdev_groups; + u64 vendor_id; +}; + +struct trace_event_raw_scsi_dispatch_cmd_start { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_dispatch_cmd_error { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int rtn; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_cmd_done_timeout_template { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int result; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + u8 sense_key; + u8 asc; + u8 ascq; + char __data[0]; +}; + +struct trace_event_raw_scsi_eh_wakeup { + struct trace_entry ent; + unsigned int host_no; + char __data[0]; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_start { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_error { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_cmd_done_timeout_template { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_eh_wakeup {}; + +typedef void (*btf_trace_scsi_dispatch_cmd_start)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_error)(void *, struct scsi_cmnd *, int); + +typedef void (*btf_trace_scsi_dispatch_cmd_done)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_timeout)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_eh_wakeup)(void *, struct Scsi_Host *); + +enum scsi_vpd_parameters { + SCSI_VPD_HEADER_SIZE = 4, + SCSI_VPD_LIST_SIZE = 36, +}; + +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; + +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, + ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, + ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, + __ETHTOOL_LINK_MODE_MASK_NBITS = 103, +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF = 0, + HWTSTAMP_TX_ON = 1, + HWTSTAMP_TX_ONESTEP_SYNC = 2, + HWTSTAMP_TX_ONESTEP_P2P = 3, + __HWTSTAMP_TX_CNT = 4, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE = 0, + HWTSTAMP_FILTER_ALL = 1, + HWTSTAMP_FILTER_SOME = 2, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, + HWTSTAMP_FILTER_PTP_V2_EVENT = 12, + HWTSTAMP_FILTER_PTP_V2_SYNC = 13, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, + HWTSTAMP_FILTER_NTP_ALL = 15, + __HWTSTAMP_FILTER_CNT = 16, +}; + +struct ethtool_keee { + long unsigned int supported[2]; + long unsigned int advertised[2]; + long unsigned int lp_advertised[2]; + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_active; + bool eee_enabled; +}; + +struct kernel_ethtool_ts_info { + u32 cmd; + u32 so_timestamping; + int phc_index; + enum hwtstamp_tx_types tx_types; + enum hwtstamp_rx_filters rx_filters; +}; + +struct gpio_desc; + +struct reset_control; + +struct mii_bus; + +struct mdio_device { + struct device dev; + struct mii_bus *bus; + char modalias[32]; + int (*bus_match)(struct device *, const struct device_driver *); + void (*device_free)(struct mdio_device *); + void (*device_remove)(struct mdio_device *); + int addr; + int flags; + int reset_state; + struct gpio_desc *reset_gpio; + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; +}; + +struct phy_c45_device_ids { + u32 devices_in_package; + u32 mmds_present; + u32 device_ids[32]; +}; + +enum phy_state { + PHY_DOWN = 0, + PHY_READY = 1, + PHY_HALTED = 2, + PHY_ERROR = 3, + PHY_UP = 4, + PHY_RUNNING = 5, + PHY_NOLINK = 6, + PHY_CABLETEST = 7, +}; + +typedef enum { + PHY_INTERFACE_MODE_NA = 0, + PHY_INTERFACE_MODE_INTERNAL = 1, + PHY_INTERFACE_MODE_MII = 2, + PHY_INTERFACE_MODE_GMII = 3, + PHY_INTERFACE_MODE_SGMII = 4, + PHY_INTERFACE_MODE_TBI = 5, + PHY_INTERFACE_MODE_REVMII = 6, + PHY_INTERFACE_MODE_RMII = 7, + PHY_INTERFACE_MODE_REVRMII = 8, + PHY_INTERFACE_MODE_RGMII = 9, + PHY_INTERFACE_MODE_RGMII_ID = 10, + PHY_INTERFACE_MODE_RGMII_RXID = 11, + PHY_INTERFACE_MODE_RGMII_TXID = 12, + PHY_INTERFACE_MODE_RTBI = 13, + PHY_INTERFACE_MODE_SMII = 14, + PHY_INTERFACE_MODE_XGMII = 15, + PHY_INTERFACE_MODE_XLGMII = 16, + PHY_INTERFACE_MODE_MOCA = 17, + PHY_INTERFACE_MODE_PSGMII = 18, + PHY_INTERFACE_MODE_QSGMII = 19, + PHY_INTERFACE_MODE_TRGMII = 20, + PHY_INTERFACE_MODE_100BASEX = 21, + PHY_INTERFACE_MODE_1000BASEX = 22, + PHY_INTERFACE_MODE_2500BASEX = 23, + PHY_INTERFACE_MODE_5GBASER = 24, + PHY_INTERFACE_MODE_RXAUI = 25, + PHY_INTERFACE_MODE_XAUI = 26, + PHY_INTERFACE_MODE_10GBASER = 27, + PHY_INTERFACE_MODE_25GBASER = 28, + PHY_INTERFACE_MODE_USXGMII = 29, + PHY_INTERFACE_MODE_10GKR = 30, + PHY_INTERFACE_MODE_QUSGMII = 31, + PHY_INTERFACE_MODE_1000BASEKX = 32, + PHY_INTERFACE_MODE_10G_QXGMII = 33, + PHY_INTERFACE_MODE_MAX = 34, +} phy_interface_t; + +struct eee_config { + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_enabled; +}; + +struct phylink; + +struct pse_control; + +struct phy_driver; + +struct phy_package_shared; + +struct mii_timestamper; + +struct phy_device { + struct mdio_device mdio; + const struct phy_driver *drv; + struct device_link *devlink; + u32 phyindex; + u32 phy_id; + struct phy_c45_device_ids c45_ids; + unsigned int is_c45: 1; + unsigned int is_internal: 1; + unsigned int is_pseudo_fixed_link: 1; + unsigned int is_gigabit_capable: 1; + unsigned int has_fixups: 1; + unsigned int suspended: 1; + unsigned int suspended_by_mdio_bus: 1; + unsigned int sysfs_links: 1; + unsigned int loopback_enabled: 1; + unsigned int downshifted_rate: 1; + unsigned int is_on_sfp_module: 1; + unsigned int mac_managed_pm: 1; + unsigned int wol_enabled: 1; + unsigned int autoneg: 1; + unsigned int link: 1; + unsigned int autoneg_complete: 1; + unsigned int interrupts: 1; + unsigned int irq_suspended: 1; + unsigned int irq_rerun: 1; + unsigned int default_timestamp: 1; + int rate_matching; + enum phy_state state; + u32 dev_flags; + phy_interface_t interface; + long unsigned int possible_interfaces[1]; + int speed; + int duplex; + int port; + int pause; + int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; + long unsigned int supported[2]; + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + long unsigned int adv_old[2]; + long unsigned int supported_eee[2]; + long unsigned int advertising_eee[2]; + long unsigned int eee_broken_modes[2]; + bool enable_tx_lpi; + bool eee_active; + struct eee_config eee_cfg; + long unsigned int host_interfaces[1]; + struct list_head leds; + int irq; + void *priv; + struct phy_package_shared *shared; + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + struct delayed_work state_queue; + struct mutex lock; + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; + struct phylink *phylink; + struct net_device *attached_dev; + struct mii_timestamper *mii_ts; + struct pse_control *psec; + u8 mdix; + u8 mdix_ctrl; + int pma_extable; + unsigned int link_down_events; + void (*phy_link_change)(struct phy_device *, bool); + void (*adjust_link)(struct net_device *); +}; + +struct phy_plca_cfg { + int version; + int enabled; + int node_id; + int node_cnt; + int to_tmr; + int burst_cnt; + int burst_tmr; +}; + +struct phy_plca_status { + bool pst; +}; + +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; + +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + struct u64_stats_sync syncp; +}; + +struct mii_bus { + struct module *owner; + const char *name; + char id[61]; + void *priv; + int (*read)(struct mii_bus *, int, int); + int (*write)(struct mii_bus *, int, int, u16); + int (*read_c45)(struct mii_bus *, int, int, int); + int (*write_c45)(struct mii_bus *, int, int, int, u16); + int (*reset)(struct mii_bus *); + struct mdio_bus_stats stats[32]; + struct mutex mdio_lock; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED = 2, + MDIOBUS_UNREGISTERED = 3, + MDIOBUS_RELEASED = 4, + } state; + struct device dev; + struct mdio_device *mdio_map[32]; + u32 phy_mask; + u32 phy_ignore_ta_mask; + int irq[32]; + int reset_delay_us; + int reset_post_delay_us; + struct gpio_desc *reset_gpiod; + struct mutex shared_lock; + struct phy_package_shared *shared[32]; +}; + +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct mii_timestamper { + bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); + void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); + int (*hwtstamp)(struct mii_timestamper *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); + void (*link_state)(struct mii_timestamper *, struct phy_device *); + int (*ts_info)(struct mii_timestamper *, struct kernel_ethtool_ts_info *); + struct device *device; +}; + +struct phy_package_shared { + u8 base_addr; + struct device_node *np; + refcount_t refcnt; + long unsigned int flags; + size_t priv_size; + void *priv; +}; + +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + const long unsigned int * const features; + u32 flags; + const void *driver_data; + int (*soft_reset)(struct phy_device *); + int (*config_init)(struct phy_device *); + int (*probe)(struct phy_device *); + int (*get_features)(struct phy_device *); + int (*get_rate_matching)(struct phy_device *, phy_interface_t); + int (*suspend)(struct phy_device *); + int (*resume)(struct phy_device *); + int (*config_aneg)(struct phy_device *); + int (*aneg_done)(struct phy_device *); + int (*read_status)(struct phy_device *); + int (*config_intr)(struct phy_device *); + irqreturn_t (*handle_interrupt)(struct phy_device *); + void (*remove)(struct phy_device *); + int (*match_phy_device)(struct phy_device *); + int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*link_change_notify)(struct phy_device *); + int (*read_mmd)(struct phy_device *, int, u16); + int (*write_mmd)(struct phy_device *, int, u16, u16); + int (*read_page)(struct phy_device *); + int (*write_page)(struct phy_device *, int); + int (*module_info)(struct phy_device *, struct ethtool_modinfo *); + int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); + int (*cable_test_start)(struct phy_device *); + int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); + int (*cable_test_get_status)(struct phy_device *, bool *); + int (*get_sset_count)(struct phy_device *); + void (*get_strings)(struct phy_device *, u8 *); + void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); + int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); + int (*set_loopback)(struct phy_device *, bool); + int (*get_sqi)(struct phy_device *); + int (*get_sqi_max)(struct phy_device *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*led_brightness_set)(struct phy_device *, u8, enum led_brightness); + int (*led_blink_set)(struct phy_device *, u8, long unsigned int *, long unsigned int *); + int (*led_hw_is_supported)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_set)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_get)(struct phy_device *, u8, long unsigned int *); + int (*led_polarity_set)(struct phy_device *, int, long unsigned int); +}; + +struct cyclecounter { + u64 (*read)(const struct cyclecounter *); + u64 mask; + u32 mult; + u32 shift; +}; + +struct timecounter { + const struct cyclecounter *cc; + u64 cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +enum wq_misc_consts { + WORK_NR_COLORS = 16, + WORK_CPU_UNBOUND = 32, + WORK_BUSY_PENDING = 1, + WORK_BUSY_RUNNING = 2, + WORKER_DESC_LEN = 32, +}; + +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +}; + +struct ethtool_netdev_state { + struct xarray rss_ctx; + struct mutex rss_lock; + unsigned int wol_enabled: 1; + unsigned int module_fw_flash_in_progress: 1; +}; + +struct msix_entry { + u32 vector; + u16 entry; +}; + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +struct ptp_clock_time { + __s64 sec; + __u32 nsec; + __u32 reserved; +}; + +struct ptp_extts_request { + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct ptp_perout_request { + union { + struct ptp_clock_time start; + struct ptp_clock_time phase; + }; + struct ptp_clock_time period; + unsigned int index; + unsigned int flags; + union { + struct ptp_clock_time on; + unsigned int rsv[4]; + }; +}; + +enum ptp_pin_function { + PTP_PF_NONE = 0, + PTP_PF_EXTTS = 1, + PTP_PF_PEROUT = 2, + PTP_PF_PHYSYNC = 3, +}; + +struct ptp_pin_desc { + char name[64]; + unsigned int index; + unsigned int func; + unsigned int chan; + unsigned int rsv[5]; +}; + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS = 0, + PTP_CLK_REQ_PEROUT = 1, + PTP_CLK_REQ_PPS = 2, + } type; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; + clockid_t clockid; +}; + +struct ptp_clock_info { + struct module *owner; + char name[32]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int n_pins; + int pps; + struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *, long int); + int (*adjphase)(struct ptp_clock_info *, s32); + s32 (*getmaxphase)(struct ptp_clock_info *); + int (*adjtime)(struct ptp_clock_info *, s64); + int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); + int (*gettimex64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); + int (*getcycles64)(struct ptp_clock_info *, struct timespec64 *); + int (*getcyclesx64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosscycles)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); + int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); + long int (*do_aux_work)(struct ptp_clock_info *); +}; + +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; + +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, + ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, + ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, + ETHTOOL_LINK_EXT_STATE_MODULE = 10, +}; + +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, +}; + +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, +}; + +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, +}; + +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST = 3, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS = 4, +}; + +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, +}; + +enum ethtool_link_ext_substate_module { + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, +}; + +enum ethtool_mac_stats_src { + ETHTOOL_MAC_STATS_SRC_AGGREGATE = 0, + ETHTOOL_MAC_STATS_SRC_EMAC = 1, + ETHTOOL_MAC_STATS_SRC_PMAC = 2, +}; + +enum ethtool_module_power_mode_policy { + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO = 2, +}; + +enum ethtool_module_power_mode { + ETHTOOL_MODULE_POWER_MODE_LOW = 1, + ETHTOOL_MODULE_POWER_MODE_HIGH = 2, +}; + +enum ethtool_mm_verify_status { + ETHTOOL_MM_VERIFY_STATUS_UNKNOWN = 0, + ETHTOOL_MM_VERIFY_STATUS_INITIAL = 1, + ETHTOOL_MM_VERIFY_STATUS_VERIFYING = 2, + ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED = 3, + ETHTOOL_MM_VERIFY_STATUS_FAILED = 4, + ETHTOOL_MM_VERIFY_STATUS_DISABLED = 5, +}; + +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; + +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; +}; + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_fecparam { + __u32 cmd; + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 rate_matching; + __u32 reserved[7]; +}; + +struct kernel_ethtool_ringparam { + u32 rx_buf_len; + u8 tcp_data_split; + u8 tx_push; + u8 rx_push; + u32 cqe_size; + u32 tx_push_buf_len; + u32 tx_push_buf_max_len; +}; + +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + enum ethtool_link_ext_substate_module module; + u32 __link_ext_substate; + }; +}; + +struct ethtool_link_ext_stats { + u64 link_down_events; +}; + +struct ethtool_rxfh_context { + u32 indir_size; + u32 key_size; + u16 priv_size; + u8 hfunc; + u8 input_xfrm; + u8 indir_configured: 1; + u8 key_configured: 1; + u32 key_off; + long: 0; + u8 data[0]; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + long unsigned int supported[2]; + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + } link_modes; + u32 lanes; +}; + +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; + u32 tx_aggr_max_bytes; + u32 tx_aggr_max_frames; + u32 tx_aggr_time_usecs; +}; + +struct ethtool_eth_mac_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + }; + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + } stats; + }; +}; + +struct ethtool_eth_phy_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 SymbolErrorDuringCarrier; + }; + struct { + u64 SymbolErrorDuringCarrier; + } stats; + }; +}; + +struct ethtool_eth_ctrl_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + }; + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + } stats; + }; +}; + +struct ethtool_pause_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + }; + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + } stats; + }; +}; + +struct ethtool_fec_stat { + u64 total; + u64 lanes[8]; +}; + +struct ethtool_fec_stats { + struct ethtool_fec_stat corrected_blocks; + struct ethtool_fec_stat uncorrectable_blocks; + struct ethtool_fec_stat corrected_bits; +}; + +struct ethtool_rmon_hist_range { + u16 low; + u16 high; +}; + +struct ethtool_rmon_stats { + enum ethtool_mac_stats_src src; + union { + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + }; + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + } stats; + }; +}; + +struct ethtool_ts_stats { + union { + struct { + u64 pkts; + u64 lost; + u64 err; + }; + struct { + u64 pkts; + u64 lost; + u64 err; + } tx_stats; + }; +}; + +struct ethtool_module_eeprom { + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 *data; +}; + +struct ethtool_module_power_mode_params { + enum ethtool_module_power_mode_policy policy; + enum ethtool_module_power_mode mode; +}; + +struct ethtool_mm_state { + u32 verify_time; + u32 max_verify_time; + enum ethtool_mm_verify_status verify_status; + bool tx_enabled; + bool tx_active; + bool pmac_enabled; + bool verify_enabled; + u32 tx_min_frag_size; + u32 rx_min_frag_size; +}; + +struct ethtool_mm_cfg { + u32 verify_time; + bool verify_enabled; + bool tx_enabled; + bool pmac_enabled; + u32 tx_min_frag_size; +}; + +struct ethtool_mm_stats { + u64 MACMergeFrameAssErrorCount; + u64 MACMergeFrameSmdErrorCount; + u64 MACMergeFrameAssOkCount; + u64 MACMergeFragCountRx; + u64 MACMergeFragCountTx; + u64 MACMergeHoldCount; +}; + +struct ethtool_rxfh_param { + u8 hfunc; + u32 indir_size; + u32 *indir; + u32 key_size; + u8 *key; + u32 rss_context; + u8 rss_delete; + u8 input_xfrm; +}; + +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +enum e1000_mac_type { + e1000_82571 = 0, + e1000_82572 = 1, + e1000_82573 = 2, + e1000_82574 = 3, + e1000_82583 = 4, + e1000_80003es2lan = 5, + e1000_ich8lan = 6, + e1000_ich9lan = 7, + e1000_ich10lan = 8, + e1000_pchlan = 9, + e1000_pch2lan = 10, + e1000_pch_lpt = 11, + e1000_pch_spt = 12, + e1000_pch_cnp = 13, + e1000_pch_tgp = 14, + e1000_pch_adp = 15, + e1000_pch_mtp = 16, + e1000_pch_lnp = 17, + e1000_pch_ptp = 18, + e1000_pch_nvp = 19, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types = 4, +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none = 1, + e1000_nvm_eeprom_spi = 2, + e1000_nvm_flash_hw = 3, + e1000_nvm_flash_sw = 4, +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small = 1, + e1000_nvm_override_spi_large = 2, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none = 1, + e1000_phy_m88 = 2, + e1000_phy_igp = 3, + e1000_phy_igp_2 = 4, + e1000_phy_gg82563 = 5, + e1000_phy_igp_3 = 6, + e1000_phy_ife = 7, + e1000_phy_bm = 8, + e1000_phy_82578 = 9, + e1000_phy_82577 = 10, + e1000_phy_82579 = 11, + e1000_phy_i217 = 12, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1 = 1, + e1000_bus_width_pcie_x2 = 2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32 = 9, + e1000_bus_width_64 = 10, + e1000_bus_width_reserved = 11, +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok = 1, + e1000_1000t_rx_status_undefined = 255, +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed = 1, + e1000_rev_polarity_undefined = 255, +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause = 1, + e1000_fc_tx_pause = 2, + e1000_fc_full = 3, + e1000_fc_default = 255, +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master = 1, + e1000_ms_force_slave = 2, + e1000_ms_auto = 3, +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on = 1, + e1000_smart_speed_off = 2, +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress = 1, + e1000_serdes_link_autoneg_complete = 2, + e1000_serdes_link_forced_up = 3, +}; + +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +struct e1000_hw; + +struct e1000_mac_operations { + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + int (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + u32 (*rar_get_count)(struct e1000_hw *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type type; + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_fc_info { + u32 high_water; + u32 low_water; + u16 pause_time; + u16 refresh_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + u32 retry_count; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; + bool retry_enabled; +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + u16 func; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +enum e1000_ulp_state { + e1000_ulp_state_unknown = 0, + e1000_ulp_state_off = 1, + e1000_ulp_state_on = 2, +}; + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[2048]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; +}; + +struct e1000_adapter; + +struct e1000_hw { + struct e1000_adapter *adapter; + void *hw_addr; + void *flash_address; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +struct e1000_phy_regs { + u16 bmcr; + u16 bmsr; + u16 advertise; + u16 lpa; + u16 expansion; + u16 ctrl1000; + u16 stat1000; + u16 estatus; +}; + +struct e1000_buffer; + +struct e1000_ring { + struct e1000_adapter *adapter; + void *desc; + dma_addr_t dma; + unsigned int size; + unsigned int count; + u16 next_to_use; + u16 next_to_clean; + void *head; + void *tail; + struct e1000_buffer *buffer_info; + char name[21]; + u32 ims_val; + u32 itr_val; + void *itr_register; + int set_itr; + struct sk_buff *rx_skb_top; +}; + +struct e1000_info; + +struct ptp_clock; + +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + struct work_struct reset_task; + struct work_struct watchdog_task; + const struct e1000_info *ei; + long unsigned int active_vlans[64]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + long unsigned int state; + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + long: 64; + long: 64; + long: 64; + struct e1000_ring *tx_ring; + u32 tx_fifo_limit; + struct napi_struct napi; + unsigned int uncorr_errors; + unsigned int corr_errors; + unsigned int restart_queue; + u32 txd_cmd; + bool detect_tx_hung; + bool tx_hang_recheck; + u8 tx_timeout_factor; + u32 tx_int_delay; + u32 tx_abs_int_delay; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + u64 tpt_old; + u64 colc_old; + u32 gotc; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + long: 64; + long: 64; + bool (*clean_rx)(struct e1000_ring *, int *, int); + void (*alloc_rx_buf)(struct e1000_ring *, int, gfp_t); + struct e1000_ring *rx_ring; + u32 rx_int_delay; + u32 rx_abs_int_delay; + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + struct net_device *netdev; + struct pci_dev *pdev; + struct e1000_hw hw; + spinlock_t stats64_lock; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + struct e1000_phy_regs phy_regs; + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + bool fc_autoneg; + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + int phy_hang_count; + u16 tx_ring_count; + u16 rx_ring_count; + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + long unsigned int tx_hwtstamp_start; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; + long int ptp_delta; + u16 eee_advert; + long: 64; + long: 64; +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; +}; + +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + struct { + long unsigned int time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + struct { + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + const struct e1000_nvm_operations *nvm_ops; +}; + +struct gen_pool; + +typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); + +struct gen_pool { + spinlock_t lock; + struct list_head chunks; + int min_alloc_order; + genpool_algo_t algo; + void *data; + const char *name; +}; + +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +}; + +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__((packed)); + +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +}; + +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__((packed)); + +struct usb_ssp_isoc_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wReseved; + __le32 dwBytesPerInterval; +}; + +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +}; + +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +}; + +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +struct usb_ext_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +} __attribute__((packed)); + +struct usb_ss_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; + __le16 wSpeedSupported; + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +}; + +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; +}; + +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; + __le16 wFunctionalitySupport; + __le16 wReserved; + union { + __le32 legacy_padding; + struct { + struct {} __empty_bmSublinkSpeedAttr; + __le32 bmSublinkSpeedAttr[0]; + }; + }; +}; + +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, + USB_SPEED_LOW = 1, + USB_SPEED_FULL = 2, + USB_SPEED_HIGH = 3, + USB_SPEED_WIRELESS = 4, + USB_SPEED_SUPER = 5, + USB_SPEED_SUPER_PLUS = 6, +}; + +enum usb_device_state { + USB_STATE_NOTATTACHED = 0, + USB_STATE_ATTACHED = 1, + USB_STATE_POWERED = 2, + USB_STATE_RECONNECTING = 3, + USB_STATE_UNAUTHENTICATED = 4, + USB_STATE_DEFAULT = 5, + USB_STATE_ADDRESS = 6, + USB_STATE_CONFIGURED = 7, + USB_STATE_SUSPENDED = 8, +}; + +enum usb3_link_state { + USB3_LPM_U0 = 0, + USB3_LPM_U1 = 1, + USB3_LPM_U2 = 2, + USB3_LPM_U3 = 3, +}; + +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1 = 1, + USB_SSP_GEN_1x2 = 2, + USB_SSP_GEN_2x2 = 3, +}; + +struct ep_device; + +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; + long: 0; + struct list_head urb_list; + void *hcpriv; + struct ep_device *ep_dev; + unsigned char *extra; + int extralen; + int enabled; + int streams; + long: 0; +} __attribute__((packed)); + +struct usb_host_interface { + struct usb_interface_descriptor desc; + int extralen; + unsigned char *extra; + struct usb_host_endpoint *endpoint; + char *string; +}; + +enum usb_interface_condition { + USB_INTERFACE_UNBOUND = 0, + USB_INTERFACE_BINDING = 1, + USB_INTERFACE_BOUND = 2, + USB_INTERFACE_UNBINDING = 3, +}; + +enum usb_wireless_status { + USB_WIRELESS_STATUS_NA = 0, + USB_WIRELESS_STATUS_DISCONNECTED = 1, + USB_WIRELESS_STATUS_CONNECTED = 2, +}; + +struct usb_interface { + struct usb_host_interface *altsetting; + struct usb_host_interface *cur_altsetting; + unsigned int num_altsetting; + struct usb_interface_assoc_descriptor *intf_assoc; + int minor; + enum usb_interface_condition condition; + unsigned int sysfs_files_created: 1; + unsigned int ep_devs_created: 1; + unsigned int unregistering: 1; + unsigned int needs_remote_wakeup: 1; + unsigned int needs_altsetting0: 1; + unsigned int needs_binding: 1; + unsigned int resetting_device: 1; + unsigned int authorized: 1; + enum usb_wireless_status wireless_status; + struct work_struct wireless_status_work; + struct device dev; + struct device *usb_dev; + struct work_struct reset_ws; +}; + +struct usb_interface_cache { + unsigned int num_altsetting; + struct kref ref; + struct usb_host_interface altsetting[0]; +}; + +struct usb_host_config { + struct usb_config_descriptor desc; + char *string; + struct usb_interface_assoc_descriptor *intf_assoc[16]; + struct usb_interface *interface[32]; + struct usb_interface_cache *intf_cache[32]; + unsigned char *extra; + int extralen; +}; + +struct usb_host_bos { + struct usb_bos_descriptor *desc; + struct usb_ext_cap_descriptor *ext_cap; + struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; +}; + +struct usb_device; + +struct usb_bus { + struct device *controller; + struct device *sysdev; + int busnum; + const char *bus_name; + u8 uses_pio_for_control; + u8 otg_port; + unsigned int is_b_host: 1; + unsigned int b_hnp_enable: 1; + unsigned int no_stop_on_short: 1; + unsigned int no_sg_constraint: 1; + unsigned int sg_tablesize; + int devnum_next; + struct mutex devnum_next_mutex; + long unsigned int devmap[2]; + struct usb_device *root_hub; + struct usb_bus *hs_companion; + int bandwidth_allocated; + int bandwidth_int_reqs; + int bandwidth_isoc_reqs; + unsigned int resuming_ports; +}; + +enum usb_link_tunnel_mode { + USB_LINK_UNKNOWN = 0, + USB_LINK_NATIVE = 1, + USB_LINK_TUNNELED = 2, +}; + +struct usb2_lpm_parameters { + unsigned int besl; + int timeout; +}; + +struct usb3_lpm_parameters { + unsigned int mel; + unsigned int pel; + unsigned int sel; + int timeout; +}; + +struct usb_tt; + +struct usb_device { + int devnum; + char devpath[16]; + u32 route; + enum usb_device_state state; + enum usb_device_speed speed; + unsigned int rx_lanes; + unsigned int tx_lanes; + enum usb_ssp_rate ssp_rate; + struct usb_tt *tt; + int ttport; + unsigned int toggle[2]; + struct usb_device *parent; + struct usb_bus *bus; + struct usb_host_endpoint ep0; + struct device dev; + struct usb_device_descriptor descriptor; + struct usb_host_bos *bos; + struct usb_host_config *config; + struct usb_host_config *actconfig; + struct usb_host_endpoint *ep_in[16]; + struct usb_host_endpoint *ep_out[16]; + char **rawdescriptors; + short unsigned int bus_mA; + u8 portnum; + u8 level; + u8 devaddr; + unsigned int can_submit: 1; + unsigned int persist_enabled: 1; + unsigned int reset_in_progress: 1; + unsigned int have_langid: 1; + unsigned int authorized: 1; + unsigned int authenticated: 1; + unsigned int lpm_capable: 1; + unsigned int lpm_devinit_allow: 1; + unsigned int usb2_hw_lpm_capable: 1; + unsigned int usb2_hw_lpm_besl_capable: 1; + unsigned int usb2_hw_lpm_enabled: 1; + unsigned int usb2_hw_lpm_allowed: 1; + unsigned int usb3_lpm_u1_enabled: 1; + unsigned int usb3_lpm_u2_enabled: 1; + int string_langid; + char *product; + char *manufacturer; + char *serial; + struct list_head filelist; + int maxchild; + u32 quirks; + atomic_t urbnum; + long unsigned int active_duration; + long unsigned int connect_time; + unsigned int do_remote_wakeup: 1; + unsigned int reset_resume: 1; + unsigned int port_is_suspended: 1; + enum usb_link_tunnel_mode tunnel_mode; + int slot_id; + struct usb2_lpm_parameters l1_params; + struct usb3_lpm_parameters u1_params; + struct usb3_lpm_parameters u2_params; + unsigned int lpm_disable_count; + u16 hub_delay; + unsigned int use_generic_driver: 1; +}; + +struct usb_tt { + struct usb_device *hub; + int multi; + unsigned int think_time; + void *hcpriv; + spinlock_t lock; + struct list_head clear_list; + struct work_struct clear_work; +}; + +struct usb_iso_packet_descriptor { + unsigned int offset; + unsigned int length; + unsigned int actual_length; + int status; +}; + +struct usb_anchor { + struct list_head urb_list; + wait_queue_head_t wait; + spinlock_t lock; + atomic_t suspend_wakeups; + unsigned int poisoned: 1; +}; + +struct urb; + +typedef void (*usb_complete_t)(struct urb *); + +struct urb { + struct kref kref; + int unlinked; + void *hcpriv; + atomic_t use_count; + atomic_t reject; + struct list_head urb_list; + struct list_head anchor_list; + struct usb_anchor *anchor; + struct usb_device *dev; + struct usb_host_endpoint *ep; + unsigned int pipe; + unsigned int stream_id; + int status; + unsigned int transfer_flags; + void *transfer_buffer; + dma_addr_t transfer_dma; + struct scatterlist *sg; + int num_mapped_sgs; + int num_sgs; + u32 transfer_buffer_length; + u32 actual_length; + unsigned char *setup_packet; + dma_addr_t setup_dma; + int start_frame; + int number_of_packets; + int interval; + int error_count; + void *context; + usb_complete_t complete; + struct usb_iso_packet_descriptor iso_frame_desc[0]; +}; + +struct giveback_urb_bh { + bool running; + bool high_prio; + spinlock_t lock; + struct list_head head; + struct work_struct bh; + struct usb_host_endpoint *completing_ep; +}; + +enum usb_dev_authorize_policy { + USB_DEVICE_AUTHORIZE_NONE = 0, + USB_DEVICE_AUTHORIZE_ALL = 1, + USB_DEVICE_AUTHORIZE_INTERNAL = 2, +}; + +struct hc_driver; + +struct usb_phy; + +struct usb_phy_roothub; + +struct dma_pool; + +struct usb_hcd { + struct usb_bus self; + struct kref kref; + const char *product_desc; + int speed; + char irq_descr[24]; + struct timer_list rh_timer; + struct urb *status_urb; + struct work_struct wakeup_work; + struct work_struct died_work; + const struct hc_driver *driver; + struct usb_phy *usb_phy; + struct usb_phy_roothub *phy_roothub; + long unsigned int flags; + enum usb_dev_authorize_policy dev_policy; + unsigned int rh_registered: 1; + unsigned int rh_pollable: 1; + unsigned int msix_enabled: 1; + unsigned int msi_enabled: 1; + unsigned int skip_phy_initialization: 1; + unsigned int uses_new_polling: 1; + unsigned int has_tt: 1; + unsigned int amd_resume_bug: 1; + unsigned int can_do_streams: 1; + unsigned int tpl_support: 1; + unsigned int cant_recv_wakeups: 1; + unsigned int irq; + void *regs; + resource_size_t rsrc_start; + resource_size_t rsrc_len; + unsigned int power_budget; + struct giveback_urb_bh high_prio_bh; + struct giveback_urb_bh low_prio_bh; + struct mutex *address0_mutex; + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + struct dma_pool *pool[4]; + int state; + struct gen_pool *localmem_pool; + long unsigned int hcd_priv[0]; +}; + +struct hc_driver { + const char *description; + const char *product_desc; + size_t hcd_priv_size; + irqreturn_t (*irq)(struct usb_hcd *); + int flags; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*pci_suspend)(struct usb_hcd *, bool); + int (*pci_resume)(struct usb_hcd *, pm_message_t); + int (*pci_poweroff_late)(struct usb_hcd *, bool); + void (*stop)(struct usb_hcd *); + void (*shutdown)(struct usb_hcd *); + int (*get_frame_number)(struct usb_hcd *); + int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); + int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); + int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); + void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); + void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); + void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); + int (*hub_status_data)(struct usb_hcd *, char *); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned int); + long unsigned int (*get_resuming_ports)(struct usb_hcd *); + void (*relinquish_port)(struct usb_hcd *, int); + int (*port_handed_over)(struct usb_hcd *, int); + void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + void (*free_dev)(struct usb_hcd *, struct usb_device *); + int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); + int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*address_device)(struct usb_hcd *, struct usb_device *, unsigned int); + int (*enable_device)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); + int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*find_raw_port_number)(struct usb_hcd *, int); + int (*port_power)(struct usb_hcd *, int, bool); + int (*submit_single_step_set_feature)(struct usb_hcd *, struct urb *, int); +}; + +typedef __kernel_long_t __kernel_off_t; + +typedef __kernel_off_t off_t; + +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +}; + +struct xhci_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; +}; + +struct xhci_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dev_notification; + __le64 cmd_ring; + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + __le32 reserved4[241]; + __le32 port_status_base; + __le32 port_power_base; + __le32 port_link_base; + __le32 reserved5; + __le32 reserved6[1016]; +}; + +struct xhci_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +struct xhci_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct xhci_intr_reg ir_set[128]; +}; + +struct xhci_doorbell_array { + __le32 doorbell[256]; +}; + +struct xhci_container_ctx { + unsigned int type; + int size; + u8 *bytes; + dma_addr_t dma; +}; + +struct xhci_slot_ctx { + __le32 dev_info; + __le32 dev_info2; + __le32 tt_info; + __le32 dev_state; + __le32 reserved[4]; +}; + +struct xhci_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 reserved[3]; +}; + +struct xhci_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +union xhci_trb; + +struct xhci_command { + struct xhci_container_ctx *in_ctx; + u32 status; + int slot_id; + struct completion *completion; + union xhci_trb *command_trb; + struct list_head cmd_list; + unsigned int timeout_ms; +}; + +struct xhci_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +struct xhci_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +struct xhci_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +struct xhci_generic_trb { + __le32 field[4]; +}; + +union xhci_trb { + struct xhci_link_trb link; + struct xhci_transfer_event trans_event; + struct xhci_event_cmd event_cmd; + struct xhci_generic_trb generic; +}; + +struct xhci_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +struct xhci_ring; + +struct xhci_stream_info { + struct xhci_ring **stream_rings; + unsigned int num_streams; + struct xhci_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct xarray trb_address_map; + struct xhci_command *free_streams_command; +}; + +enum xhci_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC = 1, + TYPE_BULK = 2, + TYPE_INTR = 3, + TYPE_STREAM = 4, + TYPE_COMMAND = 5, + TYPE_EVENT = 6, +}; + +struct xhci_segment; + +struct xhci_ring { + struct xhci_segment *first_seg; + struct xhci_segment *last_seg; + union xhci_trb *enqueue; + struct xhci_segment *enq_seg; + union xhci_trb *dequeue; + struct xhci_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int stream_id; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + enum xhci_ring_type type; + bool last_td_was_short; + struct xarray *trb_address_map; +}; + +struct xhci_bw_info { + unsigned int ep_interval; + unsigned int mult; + unsigned int num_packets; + unsigned int max_packet_size; + unsigned int max_esit_payload; + unsigned int type; +}; + +struct xhci_virt_device; + +struct xhci_hcd; + +struct xhci_virt_ep { + struct xhci_virt_device *vdev; + unsigned int ep_index; + struct xhci_ring *ring; + struct xhci_stream_info *stream_info; + struct xhci_ring *new_ring; + unsigned int err_count; + unsigned int ep_state; + struct list_head cancelled_td_list; + struct xhci_hcd *xhci; + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; + bool skip; + struct xhci_bw_info bw_info; + struct list_head bw_endpoint_list; + long unsigned int stop_time; + int next_frame_id; + bool use_extended_tbc; +}; + +struct xhci_port; + +struct xhci_interval_bw_table; + +struct xhci_tt_bw_info; + +struct xhci_virt_device { + int slot_id; + struct usb_device *udev; + struct xhci_container_ctx *out_ctx; + struct xhci_container_ctx *in_ctx; + struct xhci_virt_ep eps[31]; + struct xhci_port *rhub_port; + struct xhci_interval_bw_table *bw_table; + struct xhci_tt_bw_info *tt_info; + long unsigned int flags; + u16 current_mel; + void *debugfs_private; +}; + +struct s3_save { + u32 command; + u32 dev_nt; + u64 dcbaa_ptr; + u32 config_reg; +}; + +struct xhci_bus_state { + long unsigned int bus_suspended; + long unsigned int next_statechange; + u32 port_c_suspend; + u32 suspended_ports; + u32 port_remote_wakeup; + long unsigned int resuming_ports; +}; + +struct xhci_hub { + struct xhci_port **ports; + unsigned int num_ports; + struct usb_hcd *hcd; + struct xhci_bus_state bus_state; + u8 maj_rev; + u8 min_rev; +}; + +struct xhci_device_context_array; + +struct xhci_interrupter; + +struct xhci_scratchpad; + +struct xhci_root_port_bw_info; + +struct xhci_port_cap; + +struct xhci_hcd { + struct usb_hcd *main_hcd; + struct usb_hcd *shared_hcd; + struct xhci_cap_regs *cap_regs; + struct xhci_op_regs *op_regs; + struct xhci_run_regs *run_regs; + struct xhci_doorbell_array *dba; + __u32 hcs_params1; + __u32 hcs_params2; + __u32 hcs_params3; + __u32 hcc_params; + __u32 hcc_params2; + spinlock_t lock; + u16 hci_version; + u16 max_interrupters; + u32 imod_interval; + int page_size; + int page_shift; + int nvecs; + struct clk *clk; + struct clk *reg_clk; + struct reset_control *reset; + struct xhci_device_context_array *dcbaa; + struct xhci_interrupter **interrupters; + struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; + struct list_head cmd_list; + unsigned int cmd_ring_reserved_trbs; + struct delayed_work cmd_timer; + struct completion cmd_ring_stop_completion; + struct xhci_command *current_cmd; + struct xhci_scratchpad *scratchpad; + struct mutex mutex; + struct xhci_virt_device *devs[256]; + struct xhci_root_port_bw_info *rh_bw; + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + struct dma_pool *small_streams_pool; + struct dma_pool *medium_streams_pool; + unsigned int xhc_state; + long unsigned int run_graceperiod; + struct s3_save s3; + long long unsigned int quirks; + unsigned int num_active_eps; + unsigned int limit_active_eps; + struct xhci_port *hw_ports; + struct xhci_hub usb2_rhub; + struct xhci_hub usb3_rhub; + unsigned int hw_lpm_support: 1; + unsigned int broken_suspend: 1; + unsigned int allow_single_roothub: 1; + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; + u16 test_mode; + struct dentry *debugfs_root; + struct dentry *debugfs_slots; + struct list_head regset_list; + void *dbc; + long unsigned int priv[0]; +}; + +struct xhci_segment { + union xhci_trb *trbs; + struct xhci_segment *next; + unsigned int num; + dma_addr_t dma; + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +struct xhci_interval_bw { + unsigned int num_packets; + struct list_head endpoints; + unsigned int overhead[3]; +}; + +struct xhci_interval_bw_table { + unsigned int interval0_esit_payload; + struct xhci_interval_bw interval_bw[16]; + unsigned int bw_used; + unsigned int ss_bw_in; + unsigned int ss_bw_out; +}; + +struct xhci_port { + __le32 *addr; + int hw_portnum; + int hcd_portnum; + struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; + unsigned int lpm_incapable: 1; + long unsigned int resume_timestamp; + bool rexit_active; + int slot_id; + struct completion rexit_done; + struct completion u3exit_done; +}; + +struct xhci_tt_bw_info { + struct list_head tt_list; + int slot_id; + int ttport; + struct xhci_interval_bw_table bw_table; + int active_eps; +}; + +struct xhci_root_port_bw_info { + struct list_head tts; + unsigned int num_active_tts; + struct xhci_interval_bw_table bw_table; +}; + +struct xhci_device_context_array { + __le64 dev_context_ptrs[256]; + dma_addr_t dma; +}; + +enum xhci_ep_reset_type { + EP_HARD_RESET = 0, + EP_SOFT_RESET = 1, +}; + +enum xhci_setup_dev { + SETUP_CONTEXT_ONLY = 0, + SETUP_CONTEXT_ADDRESS = 1, +}; + +enum xhci_cancelled_td_status { + TD_DIRTY = 0, + TD_HALTED = 1, + TD_CLEARING_CACHE = 2, + TD_CLEARING_CACHE_DEFERRED = 3, + TD_CLEARED = 4, +}; + +struct xhci_td { + struct list_head td_list; + struct list_head cancelled_td_list; + int status; + enum xhci_cancelled_td_status cancel_status; + struct urb *urb; + struct xhci_segment *start_seg; + union xhci_trb *start_trb; + struct xhci_segment *end_seg; + union xhci_trb *end_trb; + struct xhci_segment *bounce_seg; + bool urb_length_set; + bool error_mid_td; +}; + +struct xhci_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 rsvd; +}; + +struct xhci_erst { + struct xhci_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; +}; + +struct xhci_scratchpad { + u64 *sp_array; + dma_addr_t sp_dma; + void **sp_buffers; +}; + +struct urb_priv { + int num_tds; + int num_tds_done; + struct xhci_td td[0]; +}; + +struct xhci_interrupter { + struct xhci_ring *event_ring; + struct xhci_erst erst; + struct xhci_intr_reg *ir_set; + unsigned int intr_num; + bool ip_autoclear; + u32 isoc_bei_interval; + u32 s3_irq_pending; + u32 s3_irq_control; + u32 s3_erst_size; + u64 s3_erst_base; + u64 s3_erst_dequeue; +}; + +struct xhci_port_cap { + u32 *psi; + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; + u32 protocol_caps; +}; + +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO = 1, + INPUT_CLK_BOOT = 2, + INPUT_CLK_MAX = 3, +}; + +enum rc_proto { + RC_PROTO_UNKNOWN = 0, + RC_PROTO_OTHER = 1, + RC_PROTO_RC5 = 2, + RC_PROTO_RC5X_20 = 3, + RC_PROTO_RC5_SZ = 4, + RC_PROTO_JVC = 5, + RC_PROTO_SONY12 = 6, + RC_PROTO_SONY15 = 7, + RC_PROTO_SONY20 = 8, + RC_PROTO_NEC = 9, + RC_PROTO_NECX = 10, + RC_PROTO_NEC32 = 11, + RC_PROTO_SANYO = 12, + RC_PROTO_MCIR2_KBD = 13, + RC_PROTO_MCIR2_MSE = 14, + RC_PROTO_RC6_0 = 15, + RC_PROTO_RC6_6A_20 = 16, + RC_PROTO_RC6_6A_24 = 17, + RC_PROTO_RC6_6A_32 = 18, + RC_PROTO_RC6_MCE = 19, + RC_PROTO_SHARP = 20, + RC_PROTO_XMP = 21, + RC_PROTO_CEC = 22, + RC_PROTO_IMON = 23, + RC_PROTO_RCMM12 = 24, + RC_PROTO_RCMM24 = 25, + RC_PROTO_RCMM32 = 26, + RC_PROTO_XBOX_DVD = 27, + RC_PROTO_MAX = 27, +}; + +struct rc_map_table { + u64 scancode; + u32 keycode; +}; + +struct rc_map { + struct rc_map_table *scan; + unsigned int size; + unsigned int len; + unsigned int alloc; + enum rc_proto rc_proto; + const char *name; + spinlock_t lock; +}; + +struct rc_map_list { + struct list_head list; + struct rc_map map; +}; + +enum kobject_action { + KOBJ_ADD = 0, + KOBJ_REMOVE = 1, + KOBJ_CHANGE = 2, + KOBJ_MOVE = 3, + KOBJ_ONLINE = 4, + KOBJ_OFFLINE = 5, + KOBJ_BIND = 6, + KOBJ_UNBIND = 7, +}; + +typedef int (*device_match_t)(struct device *, const void *); + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH = 1, + POWER_SUPPLY_TECHNOLOGY_LION = 2, + POWER_SUPPLY_TECHNOLOGY_LIPO = 3, + POWER_SUPPLY_TECHNOLOGY_LiFe = 4, + POWER_SUPPLY_TECHNOLOGY_NiCd = 5, + POWER_SUPPLY_TECHNOLOGY_LiMn = 6, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM = 1, + POWER_SUPPLY_SCOPE_DEVICE = 2, +}; + +enum power_supply_property { + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE = 1, + POWER_SUPPLY_PROP_HEALTH = 2, + POWER_SUPPLY_PROP_PRESENT = 3, + POWER_SUPPLY_PROP_ONLINE = 4, + POWER_SUPPLY_PROP_AUTHENTIC = 5, + POWER_SUPPLY_PROP_TECHNOLOGY = 6, + POWER_SUPPLY_PROP_CYCLE_COUNT = 7, + POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, + POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, + POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, + POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, + POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, + POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, + POWER_SUPPLY_PROP_CURRENT_MAX = 16, + POWER_SUPPLY_PROP_CURRENT_NOW = 17, + POWER_SUPPLY_PROP_CURRENT_AVG = 18, + POWER_SUPPLY_PROP_CURRENT_BOOT = 19, + POWER_SUPPLY_PROP_POWER_NOW = 20, + POWER_SUPPLY_PROP_POWER_AVG = 21, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, + POWER_SUPPLY_PROP_CHARGE_FULL = 24, + POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, + POWER_SUPPLY_PROP_CHARGE_NOW = 26, + POWER_SUPPLY_PROP_CHARGE_AVG = 27, + POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, + POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, + POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR = 37, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 38, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 39, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 40, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 41, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 42, + POWER_SUPPLY_PROP_ENERGY_FULL = 43, + POWER_SUPPLY_PROP_ENERGY_EMPTY = 44, + POWER_SUPPLY_PROP_ENERGY_NOW = 45, + POWER_SUPPLY_PROP_ENERGY_AVG = 46, + POWER_SUPPLY_PROP_CAPACITY = 47, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 48, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 49, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 50, + POWER_SUPPLY_PROP_CAPACITY_LEVEL = 51, + POWER_SUPPLY_PROP_TEMP = 52, + POWER_SUPPLY_PROP_TEMP_MAX = 53, + POWER_SUPPLY_PROP_TEMP_MIN = 54, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 55, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 56, + POWER_SUPPLY_PROP_TEMP_AMBIENT = 57, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 58, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 59, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 60, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 61, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 62, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 63, + POWER_SUPPLY_PROP_TYPE = 64, + POWER_SUPPLY_PROP_USB_TYPE = 65, + POWER_SUPPLY_PROP_SCOPE = 66, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 67, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 68, + POWER_SUPPLY_PROP_CALIBRATE = 69, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 70, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 71, + POWER_SUPPLY_PROP_MANUFACTURE_DAY = 72, + POWER_SUPPLY_PROP_MODEL_NAME = 73, + POWER_SUPPLY_PROP_MANUFACTURER = 74, + POWER_SUPPLY_PROP_SERIAL_NUMBER = 75, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY = 1, + POWER_SUPPLY_TYPE_UPS = 2, + POWER_SUPPLY_TYPE_MAINS = 3, + POWER_SUPPLY_TYPE_USB = 4, + POWER_SUPPLY_TYPE_USB_DCP = 5, + POWER_SUPPLY_TYPE_USB_CDP = 6, + POWER_SUPPLY_TYPE_USB_ACA = 7, + POWER_SUPPLY_TYPE_USB_TYPE_C = 8, + POWER_SUPPLY_TYPE_USB_PD = 9, + POWER_SUPPLY_TYPE_USB_PD_DRP = 10, + POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, + POWER_SUPPLY_TYPE_WIRELESS = 12, +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED = 0, +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + void *drv_data; + const struct attribute_group **attr_grp; + char **supplied_to; + size_t num_supplicants; + bool no_wakeup_source; +}; + +struct power_supply; + +struct power_supply_desc { + const char *name; + enum power_supply_type type; + u8 charge_behaviours; + u32 usb_types; + const enum power_supply_property *properties; + size_t num_properties; + int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); + int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); + int (*property_is_writeable)(struct power_supply *, enum power_supply_property); + void (*external_power_changed)(struct power_supply *); + void (*set_charged)(struct power_supply *); + bool no_thermal; + int use_for_apm; +}; + +struct power_supply_battery_info; + +struct thermal_zone_device; + +struct power_supply { + const struct power_supply_desc *desc; + char **supplied_to; + size_t num_supplicants; + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + void *drv_data; + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; + struct power_supply_battery_info *battery_info; + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; +}; + +struct power_supply_maintenance_charge_table; + +struct power_supply_battery_ocv_table; + +struct power_supply_resistance_temp_table; + +struct power_supply_vbat_ri_table; + +struct power_supply_battery_info { + unsigned int technology; + int energy_full_design_uwh; + int charge_full_design_uah; + int voltage_min_design_uv; + int voltage_max_design_uv; + int tricklecharge_current_ua; + int precharge_current_ua; + int precharge_voltage_max_uv; + int charge_term_current_ua; + int charge_restart_voltage_uv; + int overvoltage_limit_uv; + int constant_charge_current_max_ua; + int constant_charge_voltage_max_uv; + const struct power_supply_maintenance_charge_table *maintenance_charge; + int maintenance_charge_size; + int alert_low_temp_charge_current_ua; + int alert_low_temp_charge_voltage_uv; + int alert_high_temp_charge_current_ua; + int alert_high_temp_charge_voltage_uv; + int factory_internal_resistance_uohm; + int factory_internal_resistance_charging_uohm; + int ocv_temp[20]; + int temp_ambient_alert_min; + int temp_ambient_alert_max; + int temp_alert_min; + int temp_alert_max; + int temp_min; + int temp_max; + const struct power_supply_battery_ocv_table *ocv_table[20]; + int ocv_table_size[20]; + const struct power_supply_resistance_temp_table *resist_table; + int resist_table_size; + const struct power_supply_vbat_ri_table *vbat2ri_discharging; + int vbat2ri_discharging_size; + const struct power_supply_vbat_ri_table *vbat2ri_charging; + int vbat2ri_charging_size; + int bti_resistance_ohm; + int bti_resistance_tolerance; +}; + +struct thermal_cooling_device_ops; + +struct thermal_cooling_device { + int id; + const char *type; + long unsigned int max_state; + struct device device; + struct device_node *np; + void *devdata; + void *stats; + const struct thermal_cooling_device_ops *ops; + bool updated; + struct mutex lock; + struct list_head thermal_instances; + struct list_head node; +}; + +struct power_supply_battery_ocv_table { + int ocv; + int capacity; +}; + +struct power_supply_resistance_temp_table { + int temp; + int resistance; +}; + +struct power_supply_vbat_ri_table { + int vbat_uv; + int ri_uohm; +}; + +struct power_supply_maintenance_charge_table { + int charge_current_max_ua; + int charge_voltage_max_uv; + int charge_safety_timer_minutes; +}; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED = 1, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE = 1, + THERMAL_TRIP_HOT = 2, + THERMAL_TRIP_CRITICAL = 3, +}; + +enum thermal_trend { + THERMAL_TREND_STABLE = 0, + THERMAL_TREND_RAISING = 1, + THERMAL_TREND_DROPPING = 2, +}; + +struct thermal_trip { + int temperature; + int hysteresis; + enum thermal_trip_type type; + u8 flags; + void *priv; +}; + +struct cooling_spec { + long unsigned int upper; + long unsigned int lower; + unsigned int weight; +}; + +struct thermal_zone_device_ops { + bool (*should_bind)(struct thermal_zone_device *, const struct thermal_trip *, struct thermal_cooling_device *, struct cooling_spec *); + int (*get_temp)(struct thermal_zone_device *, int *); + int (*set_trips)(struct thermal_zone_device *, int, int); + int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); + int (*set_trip_temp)(struct thermal_zone_device *, const struct thermal_trip *, int); + int (*get_crit_temp)(struct thermal_zone_device *, int *); + int (*set_emul_temp)(struct thermal_zone_device *, int); + int (*get_trend)(struct thermal_zone_device *, const struct thermal_trip *, enum thermal_trend *); + void (*hot)(struct thermal_zone_device *); + void (*critical)(struct thermal_zone_device *); +}; + +struct thermal_cooling_device_ops { + int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); + int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); + int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, long unsigned int, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, long unsigned int *); +}; + +struct thermal_zone_params { + const char *governor_name; + bool no_hwmon; + u32 sustainable_power; + s32 k_po; + s32 k_pu; + s32 k_i; + s32 k_d; + s32 integral_cutoff; + int slope; + int offset; +}; + +struct psy_am_i_supplied_data { + struct power_supply *psy; + unsigned int count; +}; + +struct psy_get_supplier_prop_data { + struct power_supply *psy; + enum power_supply_property psp; + union power_supply_propval *val; +}; + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +struct input_keymap_entry { + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +struct ff_replay { + __u16 length; + __u16 delay; +}; + +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + __s16 right_coeff; + __s16 left_coeff; + __u16 deadband; + __s16 center; +}; + +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + __s16 *custom_data; +}; + +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct usb_device_id { + __u16 match_flags; + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice_lo; + __u16 bcdDevice_hi; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 bInterfaceNumber; + kernel_ulong_t driver_info; +}; + +struct hid_device_id { + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + kernel_ulong_t driver_data; +}; + +struct input_device_id { + kernel_ulong_t flags; + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + kernel_ulong_t evbit[1]; + kernel_ulong_t keybit[12]; + kernel_ulong_t relbit[1]; + kernel_ulong_t absbit[1]; + kernel_ulong_t mscbit[1]; + kernel_ulong_t ledbit[1]; + kernel_ulong_t sndbit[1]; + kernel_ulong_t ffbit[2]; + kernel_ulong_t swbit[1]; + kernel_ulong_t propbit[1]; + kernel_ulong_t driver_info; +}; + +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +struct ff_device; + +struct input_dev_poller; + +struct input_mt; + +struct input_handle; + +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + long unsigned int propbit[1]; + long unsigned int evbit[1]; + long unsigned int keybit[12]; + long unsigned int relbit[1]; + long unsigned int absbit[1]; + long unsigned int mscbit[1]; + long unsigned int ledbit[1]; + long unsigned int sndbit[1]; + long unsigned int ffbit[2]; + long unsigned int swbit[1]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); + int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); + struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; + struct timer_list timer; + int rep[2]; + struct input_mt *mt; + struct input_absinfo *absinfo; + long unsigned int key[12]; + long unsigned int led[1]; + long unsigned int snd[1]; + long unsigned int sw[1]; + int (*open)(struct input_dev *); + void (*close)(struct input_dev *); + int (*flush)(struct input_dev *, struct file *); + int (*event)(struct input_dev *, unsigned int, unsigned int, int); + struct input_handle *grab; + spinlock_t event_lock; + struct mutex mutex; + unsigned int users; + bool going_away; + struct device dev; + struct list_head h_list; + struct list_head node; + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + bool devres_managed; + ktime_t timestamp[3]; + bool inhibited; +}; + +struct ff_device { + int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); + int (*erase)(struct input_dev *, int); + int (*playback)(struct input_dev *, int, int); + void (*set_gain)(struct input_dev *, u16); + void (*set_autocenter)(struct input_dev *, u16); + void (*destroy)(struct ff_device *); + void *private; + long unsigned int ffbit[2]; + struct mutex mutex; + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[0]; +}; + +struct input_handler; + +struct input_handle { + void *private; + int open; + const char *name; + struct input_dev *dev; + struct input_handler *handler; + unsigned int (*handle_events)(struct input_handle *, struct input_value *, unsigned int); + struct list_head d_node; + struct list_head h_node; +}; + +struct input_handler { + void *private; + void (*event)(struct input_handle *, unsigned int, unsigned int, int); + unsigned int (*events)(struct input_handle *, struct input_value *, unsigned int); + bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); + bool (*match)(struct input_handler *, struct input_dev *); + int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); + void (*disconnect)(struct input_handle *); + void (*start)(struct input_handle *); + bool passive_observer; + bool legacy_minors; + int minor; + const char *name; + const struct input_device_id *id_table; + struct list_head h_list; + struct list_head node; +}; + +struct usb_dynids { + struct list_head list; +}; + +struct usb_driver { + const char *name; + int (*probe)(struct usb_interface *, const struct usb_device_id *); + void (*disconnect)(struct usb_interface *); + int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); + int (*suspend)(struct usb_interface *, pm_message_t); + int (*resume)(struct usb_interface *); + int (*reset_resume)(struct usb_interface *); + int (*pre_reset)(struct usb_interface *); + int (*post_reset)(struct usb_interface *); + void (*shutdown)(struct usb_interface *); + const struct usb_device_id *id_table; + const struct attribute_group **dev_groups; + struct usb_dynids dynids; + struct device_driver driver; + unsigned int no_dynamic_id: 1; + unsigned int supports_autosuspend: 1; + unsigned int disable_hub_initiated_lpm: 1; + unsigned int soft_unbind: 1; +}; + +enum hid_report_type { + HID_INPUT_REPORT = 0, + HID_OUTPUT_REPORT = 1, + HID_FEATURE_REPORT = 2, + HID_REPORT_TYPES = 3, +}; + +enum hid_class_request { + HID_REQ_GET_REPORT = 1, + HID_REQ_GET_IDLE = 2, + HID_REQ_GET_PROTOCOL = 3, + HID_REQ_SET_REPORT = 9, + HID_REQ_SET_IDLE = 10, + HID_REQ_SET_PROTOCOL = 11, +}; + +enum bpf_type_flag { + PTR_MAYBE_NULL = 256, + MEM_RDONLY = 512, + MEM_RINGBUF = 1024, + MEM_USER = 2048, + MEM_PERCPU = 4096, + OBJ_RELEASE = 8192, + PTR_UNTRUSTED = 16384, + MEM_UNINIT = 32768, + DYNPTR_TYPE_LOCAL = 65536, + DYNPTR_TYPE_RINGBUF = 131072, + MEM_FIXED_SIZE = 262144, + MEM_ALLOC = 524288, + PTR_TRUSTED = 1048576, + MEM_RCU = 2097152, + NON_OWN_REF = 4194304, + DYNPTR_TYPE_SKB = 8388608, + DYNPTR_TYPE_XDP = 16777216, + MEM_ALIGNED = 33554432, + MEM_WRITE = 67108864, + __BPF_TYPE_FLAG_MAX = 67108865, + __BPF_TYPE_LAST_FLAG = 67108864, +}; + +enum bpf_arg_type { + ARG_DONTCARE = 0, + ARG_CONST_MAP_PTR = 1, + ARG_PTR_TO_MAP_KEY = 2, + ARG_PTR_TO_MAP_VALUE = 3, + ARG_PTR_TO_MEM = 4, + ARG_PTR_TO_ARENA = 5, + ARG_CONST_SIZE = 6, + ARG_CONST_SIZE_OR_ZERO = 7, + ARG_PTR_TO_CTX = 8, + ARG_ANYTHING = 9, + ARG_PTR_TO_SPIN_LOCK = 10, + ARG_PTR_TO_SOCK_COMMON = 11, + ARG_PTR_TO_SOCKET = 12, + ARG_PTR_TO_BTF_ID = 13, + ARG_PTR_TO_RINGBUF_MEM = 14, + ARG_CONST_ALLOC_SIZE_OR_ZERO = 15, + ARG_PTR_TO_BTF_ID_SOCK_COMMON = 16, + ARG_PTR_TO_PERCPU_BTF_ID = 17, + ARG_PTR_TO_FUNC = 18, + ARG_PTR_TO_STACK = 19, + ARG_PTR_TO_CONST_STR = 20, + ARG_PTR_TO_TIMER = 21, + ARG_KPTR_XCHG_DEST = 22, + ARG_PTR_TO_DYNPTR = 23, + __BPF_ARG_TYPE_MAX = 24, + ARG_PTR_TO_MAP_VALUE_OR_NULL = 259, + ARG_PTR_TO_MEM_OR_NULL = 260, + ARG_PTR_TO_CTX_OR_NULL = 264, + ARG_PTR_TO_SOCKET_OR_NULL = 268, + ARG_PTR_TO_STACK_OR_NULL = 275, + ARG_PTR_TO_BTF_ID_OR_NULL = 269, + ARG_PTR_TO_UNINIT_MEM = 67141636, + ARG_PTR_TO_FIXED_SIZE_MEM = 262148, + __BPF_ARG_TYPE_LIMIT = 134217727, +}; + +enum bpf_return_type { + RET_INTEGER = 0, + RET_VOID = 1, + RET_PTR_TO_MAP_VALUE = 2, + RET_PTR_TO_SOCKET = 3, + RET_PTR_TO_TCP_SOCK = 4, + RET_PTR_TO_SOCK_COMMON = 5, + RET_PTR_TO_MEM = 6, + RET_PTR_TO_MEM_OR_BTF_ID = 7, + RET_PTR_TO_BTF_ID = 8, + __BPF_RET_TYPE_MAX = 9, + RET_PTR_TO_MAP_VALUE_OR_NULL = 258, + RET_PTR_TO_SOCKET_OR_NULL = 259, + RET_PTR_TO_TCP_SOCK_OR_NULL = 260, + RET_PTR_TO_SOCK_COMMON_OR_NULL = 261, + RET_PTR_TO_RINGBUF_MEM_OR_NULL = 1286, + RET_PTR_TO_DYNPTR_MEM_OR_NULL = 262, + RET_PTR_TO_BTF_ID_OR_NULL = 264, + RET_PTR_TO_BTF_ID_TRUSTED = 1048584, + __BPF_RET_TYPE_LIMIT = 134217727, +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED = 0, + BPF_CGROUP_STORAGE_PERCPU = 1, + __BPF_CGROUP_STORAGE_MAX = 2, +}; + +enum { + BPF_MAX_TRAMP_LINKS = 38, +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY = 0, + BPF_TRAMP_FEXIT = 1, + BPF_TRAMP_MODIFY_RETURN = 2, + BPF_TRAMP_MAX = 3, + BPF_TRAMP_REPLACE = 4, +}; + +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE = 1, + HID_TYPE_USBNONE = 2, +}; + +struct hid_report; + +struct hid_report_enum { + unsigned int numbered; + struct list_head report_list; + struct hid_report *report_id_hash[256]; +}; + +struct hid_collection; + +struct hid_driver; + +struct hid_ll_driver; + +struct hid_field; + +struct hid_usage; + +struct hid_device { + const __u8 *dev_rdesc; + const __u8 *bpf_rdesc; + const __u8 *rdesc; + unsigned int dev_rsize; + unsigned int bpf_rsize; + unsigned int rsize; + unsigned int collection_size; + struct hid_collection *collection; + unsigned int maxcollection; + unsigned int maxapplication; + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + __u32 version; + enum hid_type type; + unsigned int country; + struct hid_report_enum report_enum[3]; + struct work_struct led_work; + struct semaphore driver_input_lock; + struct device dev; + struct hid_driver *driver; + void *devres_group_id; + const struct hid_ll_driver *ll_driver; + struct mutex ll_open_lock; + unsigned int ll_open_count; + long unsigned int status; + unsigned int claimed; + unsigned int quirks; + unsigned int initial_quirks; + bool io_started; + struct list_head inputs; + void *hiddev; + void *hidraw; + char name[128]; + char phys[64]; + char uniq[64]; + void *driver_data; + int (*ff_init)(struct hid_device *); + int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); + void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*hiddev_report_event)(struct hid_device *, struct hid_report *); + short unsigned int debug; + struct dentry *debug_dir; + struct dentry *debug_rdesc; + struct dentry *debug_events; + struct list_head debug_list; + spinlock_t debug_list_lock; + wait_queue_head_t debug_wait; + struct kref ref; + unsigned int id; +}; + +struct hid_field_entry; + +struct hid_report { + struct list_head list; + struct list_head hidinput_list; + struct list_head field_entry_list; + unsigned int id; + enum hid_report_type type; + unsigned int application; + struct hid_field *field[256]; + struct hid_field_entry *field_entries; + unsigned int maxfield; + unsigned int size; + struct hid_device *device; + bool tool_active; + unsigned int tool; +}; + +struct hid_collection { + int parent_idx; + unsigned int type; + unsigned int usage; + unsigned int level; +}; + +struct hid_usage { + unsigned int hid; + unsigned int collection_index; + unsigned int usage_index; + __s8 resolution_multiplier; + __s8 wheel_factor; + __u16 code; + __u8 type; + __s16 hat_min; + __s16 hat_max; + __s16 hat_dir; + __s16 wheel_accumulated; +}; + +struct hid_input; + +struct hid_field { + unsigned int physical; + unsigned int logical; + unsigned int application; + struct hid_usage *usage; + unsigned int maxusage; + unsigned int flags; + unsigned int report_offset; + unsigned int report_size; + unsigned int report_count; + unsigned int report_type; + __s32 *value; + __s32 *new_value; + __s32 *usages_priorities; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + bool ignored; + struct hid_report *report; + unsigned int index; + struct hid_input *hidinput; + __u16 dpad; + unsigned int slot_idx; +}; + +struct hid_input { + struct list_head list; + struct hid_report *report; + struct input_dev *input; + const char *name; + struct list_head reports; + unsigned int application; + bool registered; +}; + +struct hid_field_entry { + struct list_head list; + struct hid_field *field; + unsigned int index; + __s32 priority; +}; + +struct hid_control_fifo { + unsigned char dir; + struct hid_report *report; + char *raw_report; +}; + +struct hid_output_fifo { + struct hid_report *report; + char *raw_report; +}; + +struct hid_report_id; + +struct hid_usage_id; + +struct hid_driver { + char *name; + const struct hid_device_id *id_table; + struct list_head dyn_list; + spinlock_t dyn_lock; + bool (*match)(struct hid_device *, bool); + int (*probe)(struct hid_device *, const struct hid_device_id *); + void (*remove)(struct hid_device *); + const struct hid_report_id *report_table; + int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); + const struct hid_usage_id *usage_table; + int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*report)(struct hid_device *, struct hid_report *); + const __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *); + int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_configured)(struct hid_device *, struct hid_input *); + void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); + int (*suspend)(struct hid_device *, pm_message_t); + int (*resume)(struct hid_device *); + int (*reset_resume)(struct hid_device *); + struct device_driver driver; +}; + +struct hid_ll_driver { + int (*start)(struct hid_device *); + void (*stop)(struct hid_device *); + int (*open)(struct hid_device *); + void (*close)(struct hid_device *); + int (*power)(struct hid_device *, int); + int (*parse)(struct hid_device *); + void (*request)(struct hid_device *, struct hid_report *, int); + int (*wait)(struct hid_device *); + int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int); + int (*output_report)(struct hid_device *, __u8 *, size_t); + int (*idle)(struct hid_device *, int, int, int); + bool (*may_wakeup)(struct hid_device *); + unsigned int max_buffer_size; +}; + +struct hid_class_descriptor { + __u8 bDescriptorType; + __le16 wDescriptorLength; +} __attribute__((packed)); + +struct hid_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdHID; + __u8 bCountryCode; + __u8 bNumDescriptors; + struct hid_class_descriptor desc[1]; +} __attribute__((packed)); + +struct hid_report_id { + __u32 report_type; +}; + +struct hid_usage_id { + __u32 usage_hid; + __u32 usage_type; + __u32 usage_code; +}; + +struct usbhid_device { + struct hid_device *hid; + struct usb_interface *intf; + int ifnum; + unsigned int bufsize; + struct urb *urbin; + char *inbuf; + dma_addr_t inbuf_dma; + struct urb *urbctrl; + struct usb_ctrlrequest *cr; + struct hid_control_fifo ctrl[256]; + unsigned char ctrlhead; + unsigned char ctrltail; + char *ctrlbuf; + dma_addr_t ctrlbuf_dma; + long unsigned int last_ctrl; + struct urb *urbout; + struct hid_output_fifo out[256]; + unsigned char outhead; + unsigned char outtail; + char *outbuf; + dma_addr_t outbuf_dma; + long unsigned int last_out; + struct mutex mutex; + spinlock_t lock; + long unsigned int iofl; + struct timer_list io_retry; + long unsigned int stop_retry; + unsigned int retry_delay; + struct work_struct reset_work; + wait_queue_head_t wait; +}; + +enum work_flags { + WORK_STRUCT_PENDING = 1, + WORK_STRUCT_INACTIVE = 2, + WORK_STRUCT_PWQ = 4, + WORK_STRUCT_LINKED = 8, + WORK_STRUCT_STATIC = 0, +}; + +enum net_device_flags { + IFF_UP = 1, + IFF_BROADCAST = 2, + IFF_DEBUG = 4, + IFF_LOOPBACK = 8, + IFF_POINTOPOINT = 16, + IFF_NOTRAILERS = 32, + IFF_RUNNING = 64, + IFF_NOARP = 128, + IFF_PROMISC = 256, + IFF_ALLMULTI = 512, + IFF_MASTER = 1024, + IFF_SLAVE = 2048, + IFF_MULTICAST = 4096, + IFF_PORTSEL = 8192, + IFF_AUTOMEDIA = 16384, + IFF_DYNAMIC = 32768, + IFF_LOWER_UP = 65536, + IFF_DORMANT = 131072, + IFF_ECHO = 262144, +}; + +enum { + IF_OPER_UNKNOWN = 0, + IF_OPER_NOTPRESENT = 1, + IF_OPER_DOWN = 2, + IF_OPER_LOWERLAYERDOWN = 3, + IF_OPER_TESTING = 4, + IF_OPER_DORMANT = 5, + IF_OPER_UP = 6, +}; + +enum { + IF_LINK_MODE_DEFAULT = 0, + IF_LINK_MODE_DORMANT = 1, + IF_LINK_MODE_TESTING = 2, +}; + +enum netdev_state_t { + __LINK_STATE_START = 0, + __LINK_STATE_PRESENT = 1, + __LINK_STATE_NOCARRIER = 2, + __LINK_STATE_LINKWATCH_PENDING = 3, + __LINK_STATE_DORMANT = 4, + __LINK_STATE_TESTING = 5, +}; + +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1, + IFF_EBRIDGE = 2, + IFF_BONDING = 4, + IFF_ISATAP = 8, + IFF_WAN_HDLC = 16, + IFF_XMIT_DST_RELEASE = 32, + IFF_DONT_BRIDGE = 64, + IFF_DISABLE_NETPOLL = 128, + IFF_MACVLAN_PORT = 256, + IFF_BRIDGE_PORT = 512, + IFF_OVS_DATAPATH = 1024, + IFF_TX_SKB_SHARING = 2048, + IFF_UNICAST_FLT = 4096, + IFF_TEAM_PORT = 8192, + IFF_SUPP_NOFCS = 16384, + IFF_LIVE_ADDR_CHANGE = 32768, + IFF_MACVLAN = 65536, + IFF_XMIT_DST_RELEASE_PERM = 131072, + IFF_L3MDEV_MASTER = 262144, + IFF_NO_QUEUE = 524288, + IFF_OPENVSWITCH = 1048576, + IFF_L3MDEV_SLAVE = 2097152, + IFF_TEAM = 4194304, + IFF_RXFH_CONFIGURED = 8388608, + IFF_PHONY_HEADROOM = 16777216, + IFF_MACSEC = 33554432, + IFF_NO_RX_HANDLER = 67108864, + IFF_FAILOVER = 134217728, + IFF_FAILOVER_SLAVE = 268435456, + IFF_L3MDEV_RX_HANDLER = 536870912, + IFF_NO_ADDRCONF = 1073741824, + IFF_TX_SKB_NO_LINEAR = 2147483648, +}; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; + struct callback_head rcu; +}; + +enum { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + TCA_EXT_WARN_MSG = 16, + __TCA_MAX = 17, +}; + +struct qdisc_walker { + int stop; + int skip; + int count; + int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); +}; + +enum lw_bits { + LW_URGENT = 0, +}; + +struct fdtable { + unsigned int max_fds; + struct file **fd; + long unsigned int *close_on_exec; + long unsigned int *open_fds; + long unsigned int *full_fds_bits; + struct callback_head rcu; +}; + +struct files_struct { + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable *fdt; + struct fdtable fdtab; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t file_lock; + unsigned int next_fd; + long unsigned int close_on_exec_init[1]; + long unsigned int open_fds_init[1]; + long unsigned int full_fds_bits_init[1]; + struct file *fd_array[64]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + struct list_head *cset_pos; + struct list_head *cset_head; + struct list_head *tcset_pos; + struct list_head *tcset_head; + struct list_head *task_pos; + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_NR_TABLES = 2, + NEIGH_LINK_TABLE = 2, +}; + +enum { + NAPI_STATE_SCHED = 0, + NAPI_STATE_MISSED = 1, + NAPI_STATE_DISABLE = 2, + NAPI_STATE_NPSVC = 3, + NAPI_STATE_LISTED = 4, + NAPI_STATE_NO_BUSY_POLL = 5, + NAPI_STATE_IN_BUSY_POLL = 6, + NAPI_STATE_PREFER_BUSY_POLL = 7, + NAPI_STATE_THREADED = 8, + NAPI_STATE_SCHED_THREADED = 9, +}; + +enum xps_map_type { + XPS_CPUS = 0, + XPS_RXQS = 1, + XPS_MAPS_MAX = 2, +}; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE = 3, +}; + +enum { + NETIF_MSG_DRV_BIT = 0, + NETIF_MSG_PROBE_BIT = 1, + NETIF_MSG_LINK_BIT = 2, + NETIF_MSG_TIMER_BIT = 3, + NETIF_MSG_IFDOWN_BIT = 4, + NETIF_MSG_IFUP_BIT = 5, + NETIF_MSG_RX_ERR_BIT = 6, + NETIF_MSG_TX_ERR_BIT = 7, + NETIF_MSG_TX_QUEUED_BIT = 8, + NETIF_MSG_INTR_BIT = 9, + NETIF_MSG_TX_DONE_BIT = 10, + NETIF_MSG_RX_STATUS_BIT = 11, + NETIF_MSG_PKTDATA_BIT = 12, + NETIF_MSG_HW_BIT = 13, + NETIF_MSG_WOL_BIT = 14, + NETIF_MSG_CLASS_COUNT = 15, +}; + +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; +}; + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +typedef unsigned int xa_mark_t; + +enum skb_drop_reason { + SKB_NOT_DROPPED_YET = 0, + SKB_CONSUMED = 1, + SKB_DROP_REASON_NOT_SPECIFIED = 2, + SKB_DROP_REASON_NO_SOCKET = 3, + SKB_DROP_REASON_PKT_TOO_SMALL = 4, + SKB_DROP_REASON_TCP_CSUM = 5, + SKB_DROP_REASON_SOCKET_FILTER = 6, + SKB_DROP_REASON_UDP_CSUM = 7, + SKB_DROP_REASON_NETFILTER_DROP = 8, + SKB_DROP_REASON_OTHERHOST = 9, + SKB_DROP_REASON_IP_CSUM = 10, + SKB_DROP_REASON_IP_INHDR = 11, + SKB_DROP_REASON_IP_RPFILTER = 12, + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST = 13, + SKB_DROP_REASON_XFRM_POLICY = 14, + SKB_DROP_REASON_IP_NOPROTO = 15, + SKB_DROP_REASON_SOCKET_RCVBUFF = 16, + SKB_DROP_REASON_PROTO_MEM = 17, + SKB_DROP_REASON_TCP_AUTH_HDR = 18, + SKB_DROP_REASON_TCP_MD5NOTFOUND = 19, + SKB_DROP_REASON_TCP_MD5UNEXPECTED = 20, + SKB_DROP_REASON_TCP_MD5FAILURE = 21, + SKB_DROP_REASON_TCP_AONOTFOUND = 22, + SKB_DROP_REASON_TCP_AOUNEXPECTED = 23, + SKB_DROP_REASON_TCP_AOKEYNOTFOUND = 24, + SKB_DROP_REASON_TCP_AOFAILURE = 25, + SKB_DROP_REASON_SOCKET_BACKLOG = 26, + SKB_DROP_REASON_TCP_FLAGS = 27, + SKB_DROP_REASON_TCP_ABORT_ON_DATA = 28, + SKB_DROP_REASON_TCP_ZEROWINDOW = 29, + SKB_DROP_REASON_TCP_OLD_DATA = 30, + SKB_DROP_REASON_TCP_OVERWINDOW = 31, + SKB_DROP_REASON_TCP_OFOMERGE = 32, + SKB_DROP_REASON_TCP_RFC7323_PAWS = 33, + SKB_DROP_REASON_TCP_OLD_SEQUENCE = 34, + SKB_DROP_REASON_TCP_INVALID_SEQUENCE = 35, + SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE = 36, + SKB_DROP_REASON_TCP_RESET = 37, + SKB_DROP_REASON_TCP_INVALID_SYN = 38, + SKB_DROP_REASON_TCP_CLOSE = 39, + SKB_DROP_REASON_TCP_FASTOPEN = 40, + SKB_DROP_REASON_TCP_OLD_ACK = 41, + SKB_DROP_REASON_TCP_TOO_OLD_ACK = 42, + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA = 43, + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE = 44, + SKB_DROP_REASON_TCP_OFO_DROP = 45, + SKB_DROP_REASON_IP_OUTNOROUTES = 46, + SKB_DROP_REASON_BPF_CGROUP_EGRESS = 47, + SKB_DROP_REASON_IPV6DISABLED = 48, + SKB_DROP_REASON_NEIGH_CREATEFAIL = 49, + SKB_DROP_REASON_NEIGH_FAILED = 50, + SKB_DROP_REASON_NEIGH_QUEUEFULL = 51, + SKB_DROP_REASON_NEIGH_DEAD = 52, + SKB_DROP_REASON_TC_EGRESS = 53, + SKB_DROP_REASON_SECURITY_HOOK = 54, + SKB_DROP_REASON_QDISC_DROP = 55, + SKB_DROP_REASON_CPU_BACKLOG = 56, + SKB_DROP_REASON_XDP = 57, + SKB_DROP_REASON_TC_INGRESS = 58, + SKB_DROP_REASON_UNHANDLED_PROTO = 59, + SKB_DROP_REASON_SKB_CSUM = 60, + SKB_DROP_REASON_SKB_GSO_SEG = 61, + SKB_DROP_REASON_SKB_UCOPY_FAULT = 62, + SKB_DROP_REASON_DEV_HDR = 63, + SKB_DROP_REASON_DEV_READY = 64, + SKB_DROP_REASON_FULL_RING = 65, + SKB_DROP_REASON_NOMEM = 66, + SKB_DROP_REASON_HDR_TRUNC = 67, + SKB_DROP_REASON_TAP_FILTER = 68, + SKB_DROP_REASON_TAP_TXFILTER = 69, + SKB_DROP_REASON_ICMP_CSUM = 70, + SKB_DROP_REASON_INVALID_PROTO = 71, + SKB_DROP_REASON_IP_INADDRERRORS = 72, + SKB_DROP_REASON_IP_INNOROUTES = 73, + SKB_DROP_REASON_IP_LOCAL_SOURCE = 74, + SKB_DROP_REASON_IP_INVALID_SOURCE = 75, + SKB_DROP_REASON_IP_LOCALNET = 76, + SKB_DROP_REASON_IP_INVALID_DEST = 77, + SKB_DROP_REASON_PKT_TOO_BIG = 78, + SKB_DROP_REASON_DUP_FRAG = 79, + SKB_DROP_REASON_FRAG_REASM_TIMEOUT = 80, + SKB_DROP_REASON_FRAG_TOO_FAR = 81, + SKB_DROP_REASON_TCP_MINTTL = 82, + SKB_DROP_REASON_IPV6_BAD_EXTHDR = 83, + SKB_DROP_REASON_IPV6_NDISC_FRAG = 84, + SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT = 85, + SKB_DROP_REASON_IPV6_NDISC_BAD_CODE = 86, + SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS = 87, + SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST = 88, + SKB_DROP_REASON_QUEUE_PURGE = 89, + SKB_DROP_REASON_TC_COOKIE_ERROR = 90, + SKB_DROP_REASON_PACKET_SOCK_ERROR = 91, + SKB_DROP_REASON_TC_CHAIN_NOTFOUND = 92, + SKB_DROP_REASON_TC_RECLASSIFY_LOOP = 93, + SKB_DROP_REASON_VXLAN_INVALID_HDR = 94, + SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND = 95, + SKB_DROP_REASON_MAC_INVALID_SOURCE = 96, + SKB_DROP_REASON_VXLAN_ENTRY_EXISTS = 97, + SKB_DROP_REASON_VXLAN_NO_REMOTE = 98, + SKB_DROP_REASON_IP_TUNNEL_ECN = 99, + SKB_DROP_REASON_TUNNEL_TXINFO = 100, + SKB_DROP_REASON_LOCAL_MAC = 101, + SKB_DROP_REASON_ARP_PVLAN_DISABLE = 102, + SKB_DROP_REASON_MAX = 103, + SKB_DROP_REASON_SUBSYS_MASK = 4294901760, +}; + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *); + void (*pre_exit)(struct net *); + void (*exit)(struct net *); + void (*exit_batch)(struct list_head *); + void (*exit_batch_rtnl)(struct list_head *, struct list_head *); + unsigned int * const id; + const size_t size; +}; + +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *); + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); +}; + +typedef int (*netlink_filter_fn)(struct sock *, struct sk_buff *, void *); + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + struct netlink_ext_ack *extack; + void *data; + struct module *module; + u32 min_dump_alloc; + int flags; +}; + +enum { + NLA_UNSPEC = 0, + NLA_U8 = 1, + NLA_U16 = 2, + NLA_U32 = 3, + NLA_U64 = 4, + NLA_STRING = 5, + NLA_FLAG = 6, + NLA_MSECS = 7, + NLA_NESTED = 8, + NLA_NESTED_ARRAY = 9, + NLA_NUL_STRING = 10, + NLA_BINARY = 11, + NLA_S8 = 12, + NLA_S16 = 13, + NLA_S32 = 14, + NLA_S64 = 15, + NLA_BITFIELD32 = 16, + NLA_REJECT = 17, + NLA_BE16 = 18, + NLA_BE32 = 19, + NLA_SINT = 20, + NLA_UINT = 21, + __NLA_TYPE_MAX = 22, +}; + +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = 1, + NL_VALIDATE_MAXTYPE = 2, + NL_VALIDATE_UNSPEC = 4, + NL_VALIDATE_STRICT_ATTRS = 8, + NL_VALIDATE_NESTED = 16, +}; + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +enum { + CTRL_CMD_UNSPEC = 0, + CTRL_CMD_NEWFAMILY = 1, + CTRL_CMD_DELFAMILY = 2, + CTRL_CMD_GETFAMILY = 3, + CTRL_CMD_NEWOPS = 4, + CTRL_CMD_DELOPS = 5, + CTRL_CMD_GETOPS = 6, + CTRL_CMD_NEWMCAST_GRP = 7, + CTRL_CMD_DELMCAST_GRP = 8, + CTRL_CMD_GETMCAST_GRP = 9, + CTRL_CMD_GETPOLICY = 10, + __CTRL_CMD_MAX = 11, +}; + +enum { + CTRL_ATTR_UNSPEC = 0, + CTRL_ATTR_FAMILY_ID = 1, + CTRL_ATTR_FAMILY_NAME = 2, + CTRL_ATTR_VERSION = 3, + CTRL_ATTR_HDRSIZE = 4, + CTRL_ATTR_MAXATTR = 5, + CTRL_ATTR_OPS = 6, + CTRL_ATTR_MCAST_GROUPS = 7, + CTRL_ATTR_POLICY = 8, + CTRL_ATTR_OP_POLICY = 9, + CTRL_ATTR_OP = 10, + __CTRL_ATTR_MAX = 11, +}; + +enum { + CTRL_ATTR_OP_UNSPEC = 0, + CTRL_ATTR_OP_ID = 1, + CTRL_ATTR_OP_FLAGS = 2, + __CTRL_ATTR_OP_MAX = 3, +}; + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC = 0, + CTRL_ATTR_MCAST_GRP_NAME = 1, + CTRL_ATTR_MCAST_GRP_ID = 2, + __CTRL_ATTR_MCAST_GRP_MAX = 3, +}; + +enum { + CTRL_ATTR_POLICY_UNSPEC = 0, + CTRL_ATTR_POLICY_DO = 1, + CTRL_ATTR_POLICY_DUMP = 2, + __CTRL_ATTR_POLICY_DUMP_MAX = 3, + CTRL_ATTR_POLICY_DUMP_MAX = 2, +}; + +struct genl_multicast_group { + char name[16]; + u8 flags; +}; + +struct genl_split_ops; + +struct genl_info; + +struct genl_ops; + +struct genl_small_ops; + +struct genl_family { + unsigned int hdrsize; + char name[16]; + unsigned int version; + unsigned int maxattr; + u8 netnsok: 1; + u8 parallel_ops: 1; + u8 n_ops; + u8 n_small_ops; + u8 n_split_ops; + u8 n_mcgrps; + u8 resv_start_op; + const struct nla_policy *policy; + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*bind)(int); + void (*unbind)(int); + const struct genl_ops *ops; + const struct genl_small_ops *small_ops; + const struct genl_split_ops *split_ops; + const struct genl_multicast_group *mcgrps; + struct module *module; + size_t sock_priv_size; + void (*sock_priv_init)(void *); + void (*sock_priv_destroy)(void *); + int id; + unsigned int mcgrp_offset; + struct xarray *sock_privs; +}; + +struct genl_split_ops { + union { + struct { + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*doit)(struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + }; + struct { + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + }; + }; + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_info { + u32 snd_seq; + u32 snd_portid; + const struct genl_family *family; + const struct nlmsghdr *nlhdr; + struct genlmsghdr *genlhdr; + struct nlattr **attrs; + possible_net_t _net; + union { + u8 ctx[48]; + void *user_ptr[2]; + }; + struct netlink_ext_ack *extack; +}; + +struct genl_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_small_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +enum genl_validate_flags { + GENL_DONT_VALIDATE_STRICT = 1, + GENL_DONT_VALIDATE_DUMP = 2, + GENL_DONT_VALIDATE_DUMP_STRICT = 4, +}; + +struct genl_dumpit_info { + struct genl_split_ops op; + struct genl_info info; +}; + +struct genl_op_iter { + const struct genl_family *family; + struct genl_split_ops doit; + struct genl_split_ops dumpit; + int cmd_idx; + int entry_idx; + u32 cmd; + u8 flags; +}; + +struct genl_start_context { + const struct genl_family *family; + struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; + const struct genl_split_ops *ops; + int hdrlen; +}; + +struct netlink_policy_dump_state; + +struct ctrl_dump_policy_ctx { + struct netlink_policy_dump_state *state; + const struct genl_family *rt; + struct genl_op_iter *op_iter; + u32 op; + u16 fam_id; + u8 dump_map: 1; + u8 single_op: 1; +}; + +typedef __u16 __sum16; + +typedef u16 u_int16_t; + +typedef u32 u_int32_t; + +typedef u64 u_int64_t; + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +enum ip_conntrack_info { + IP_CT_ESTABLISHED = 0, + IP_CT_RELATED = 1, + IP_CT_NEW = 2, + IP_CT_IS_REPLY = 3, + IP_CT_ESTABLISHED_REPLY = 3, + IP_CT_RELATED_REPLY = 4, + IP_CT_NUMBER = 5, + IP_CT_UNTRACKED = 7, +}; + +enum ip_conntrack_status { + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = 1, + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = 2, + IPS_ASSURED_BIT = 2, + IPS_ASSURED = 4, + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = 8, + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = 16, + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = 32, + IPS_NAT_MASK = 48, + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = 64, + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = 128, + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = 256, + IPS_NAT_DONE_MASK = 384, + IPS_DYING_BIT = 9, + IPS_DYING = 512, + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = 1024, + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = 2048, + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = 4096, + IPS_NAT_CLASH_BIT = 12, + IPS_NAT_CLASH = 4096, + IPS_HELPER_BIT = 13, + IPS_HELPER = 8192, + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = 16384, + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = 32768, + IPS_UNCHANGEABLE_MASK = 56313, + __IPS_MAX_BIT = 16, +}; + +struct nf_conntrack { + refcount_t use; +}; + +struct in_addr { + __be32 s_addr; +}; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5, + NF_INET_INGRESS = 5, +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_NUMPROTO = 11, +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct inet_peer_base { + struct rb_root rb_root; + seqlock_t lock; + int total; +}; + +struct rt6key { + struct in6_addr addr; + int plen; +}; + +struct rtable; + +struct fnhe_hash_bucket; + +struct fib_nh_common { + struct net_device *nhc_dev; + netdevice_tracker nhc_dev_tracker; + int nhc_oif; + unsigned char nhc_scope; + u8 nhc_family; + u8 nhc_gw_family; + unsigned char nhc_flags; + struct lwtunnel_state *nhc_lwtstate; + union { + __be32 ipv4; + struct in6_addr ipv6; + } nhc_gw; + int nhc_weight; + atomic_t nhc_upper_bound; + struct rtable **nhc_pcpu_rth_output; + struct rtable *nhc_rth_input; + struct fnhe_hash_bucket *nhc_exceptions; +}; + +struct rt6_exception_bucket; + +struct fib6_nh { + struct fib_nh_common nh_common; + struct rt6_info **rt6i_pcpu; + struct rt6_exception_bucket *rt6i_exception_bucket; +}; + +struct fib6_node; + +struct dst_metrics; + +struct nexthop; + +struct fib6_info { + struct fib6_table *fib6_table; + struct fib6_info *fib6_next; + struct fib6_node *fib6_node; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; + unsigned int fib6_nsiblings; + refcount_t fib6_ref; + long unsigned int expires; + struct hlist_node gc_link; + struct dst_metrics *fib6_metrics; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; + u32 fib6_metric; + u8 fib6_protocol; + u8 fib6_type; + u8 offload; + u8 trap; + u8 offload_failed; + u8 should_flush: 1; + u8 dst_nocount: 1; + u8 dst_nopolicy: 1; + u8 fib6_destroying: 1; + u8 unused: 4; + struct callback_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; +}; + +struct rt6_info { + struct dst_entry dst; + struct fib6_info *from; + int sernum; + struct rt6key rt6i_dst; + struct rt6key rt6i_src; + struct in6_addr rt6i_gateway; + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + short unsigned int rt6i_nfheader_len; +}; + +struct rt6_statistics { + __u32 fib_nodes; + __u32 fib_route_nodes; + __u32 fib_rt_entries; + __u32 fib_rt_cache; + __u32 fib_discarded_routes; + atomic_t fib_rt_alloc; +}; + +struct fib6_node { + struct fib6_node *parent; + struct fib6_node *left; + struct fib6_node *right; + struct fib6_info *leaf; + __u16 fn_bit; + __u16 fn_flags; + int fn_sernum; + struct fib6_info *rr_ptr; + struct callback_head rcu; +}; + +struct fib6_table { + struct hlist_node tb6_hlist; + u32 tb6_id; + spinlock_t tb6_lock; + struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; + unsigned int flags; + unsigned int fib_seq; + struct hlist_head tb6_gc_hlist; +}; + +struct nf_hook_state; + +typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + struct nf_hook_entry hooks[0]; +}; + +enum tcp_conntrack { + TCP_CONNTRACK_NONE = 0, + TCP_CONNTRACK_SYN_SENT = 1, + TCP_CONNTRACK_SYN_RECV = 2, + TCP_CONNTRACK_ESTABLISHED = 3, + TCP_CONNTRACK_FIN_WAIT = 4, + TCP_CONNTRACK_CLOSE_WAIT = 5, + TCP_CONNTRACK_LAST_ACK = 6, + TCP_CONNTRACK_TIME_WAIT = 7, + TCP_CONNTRACK_CLOSE = 8, + TCP_CONNTRACK_LISTEN = 9, + TCP_CONNTRACK_MAX = 10, + TCP_CONNTRACK_IGNORE = 11, + TCP_CONNTRACK_RETRANS = 12, + TCP_CONNTRACK_UNACK = 13, + TCP_CONNTRACK_TIMEOUT_MAX = 14, +}; + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + u_int8_t last_wscale; + u_int8_t last_flags; +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED = 0, + UDP_CT_REPLIED = 1, + UDP_CT_MAX = 2, +}; + +struct net_generic { + union { + struct { + unsigned int len; + struct callback_head rcu; + } s; + struct { + struct {} __empty_ptr; + void *ptr[0]; + }; + }; +}; + +struct seq_net_private { + struct net *net; + netns_tracker ns_tracker; +}; + +struct iphdr { + __u8 version: 4; + __u8 ihl: 4; + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + union { + struct { + __be32 saddr; + __be32 daddr; + }; + struct { + __be32 saddr; + __be32 daddr; + } addrs; + }; +}; + +struct ip_tunnel_parm_kern { + char name[16]; + long unsigned int i_flags[1]; + long unsigned int o_flags[1]; + __be32 i_key; + __be32 o_key; + int link; + struct iphdr iph; +}; + +struct nf_hook_state { + u8 hook; + u8 pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +union nf_conntrack_man_proto { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; + +struct nf_conntrack_man { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + u_int16_t l3num; +}; + +struct nf_conntrack_tuple { + struct nf_conntrack_man src; + struct { + union nf_inet_addr u3; + union { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + u_int8_t type; + u_int8_t code; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; + } u; + u_int8_t protonum; + struct {} __nfct_hash_offsetend; + u_int8_t dir; + } dst; +}; + +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; + struct nf_conntrack_tuple tuple; +}; + +struct nf_ct_dccp { + u_int8_t role[2]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE = 0, + SCTP_CONNTRACK_CLOSED = 1, + SCTP_CONNTRACK_COOKIE_WAIT = 2, + SCTP_CONNTRACK_COOKIE_ECHOED = 3, + SCTP_CONNTRACK_ESTABLISHED = 4, + SCTP_CONNTRACK_SHUTDOWN_SENT = 5, + SCTP_CONNTRACK_SHUTDOWN_RECD = 6, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, + SCTP_CONNTRACK_HEARTBEAT_SENT = 8, + SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, + SCTP_CONNTRACK_MAX = 10, +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + __be32 vtag[2]; + u8 init[2]; + u8 last_dir; + u8 flags; +}; + +struct nf_ct_udp { + long unsigned int stream_ts; +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +union nf_conntrack_proto { + struct nf_ct_dccp dccp; + struct ip_ct_sctp sctp; + struct ip_ct_tcp tcp; + struct nf_ct_udp udp; + struct nf_ct_gre gre; + unsigned int tmpl_padto; +}; + +struct nf_ct_ext; + +struct nf_conn { + struct nf_conntrack ct_general; + spinlock_t lock; + u32 timeout; + struct nf_conntrack_tuple_hash tuplehash[2]; + long unsigned int status; + possible_net_t ct_net; + struct hlist_node nat_bysource; + struct {} __nfct_init_offset; + struct nf_conn *master; + u_int32_t mark; + struct nf_ct_ext *ext; + union nf_conntrack_proto proto; +}; + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL = 0, + IP_CT_DIR_REPLY = 1, + IP_CT_DIR_MAX = 2, +}; + +enum { + IP_TUNNEL_CSUM_BIT = 0, + IP_TUNNEL_ROUTING_BIT = 1, + IP_TUNNEL_KEY_BIT = 2, + IP_TUNNEL_SEQ_BIT = 3, + IP_TUNNEL_STRICT_BIT = 4, + IP_TUNNEL_REC_BIT = 5, + IP_TUNNEL_VERSION_BIT = 6, + IP_TUNNEL_NO_KEY_BIT = 7, + IP_TUNNEL_DONT_FRAGMENT_BIT = 8, + IP_TUNNEL_OAM_BIT = 9, + IP_TUNNEL_CRIT_OPT_BIT = 10, + IP_TUNNEL_GENEVE_OPT_BIT = 11, + IP_TUNNEL_VXLAN_OPT_BIT = 12, + IP_TUNNEL_NOCACHE_BIT = 13, + IP_TUNNEL_ERSPAN_OPT_BIT = 14, + IP_TUNNEL_GTP_OPT_BIT = 15, + IP_TUNNEL_VTI_BIT = 16, + IP_TUNNEL_SIT_ISATAP_BIT = 16, + IP_TUNNEL_PFCP_OPT_BIT = 17, + __IP_TUNNEL_FLAG_NUM = 18, +}; + +struct lwtunnel_state { + __u16 type; + __u16 flags; + __u16 headroom; + atomic_t refcnt; + int (*orig_output)(struct net *, struct sock *, struct sk_buff *); + int (*orig_input)(struct sk_buff *); + struct callback_head rcu; + __u8 data[0]; +}; + +struct dst_metrics { + u32 metrics[17]; + refcount_t refcnt; +}; + +struct fib_nh_exception { + struct fib_nh_exception *fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + bool fnhe_mtu_locked; + __be32 fnhe_gw; + long unsigned int fnhe_expires; + struct rtable *fnhe_rth_input; + struct rtable *fnhe_rth_output; + long unsigned int fnhe_stamp; + struct callback_head rcu; +}; + +struct rtable { + struct dst_entry dst; + int rt_genid; + unsigned int rt_flags; + __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; + int rt_iif; + u8 rt_gw_family; + union { + __be32 rt_gw4; + struct in6_addr rt_gw6; + }; + u32 rt_mtu_locked: 1; + u32 rt_pmtu: 31; +}; + +struct fnhe_hash_bucket { + struct fib_nh_exception *chain; +}; + +struct fib_info; + +struct fib_nh { + struct fib_nh_common nh_common; + struct hlist_node nh_hash; + struct fib_info *nh_parent; + __be32 nh_saddr; + int nh_saddr_genid; +}; + +struct fib_info { + struct hlist_node fib_hash; + struct hlist_node fib_lhash; + struct list_head nh_list; + struct net *fib_net; + refcount_t fib_treeref; + refcount_t fib_clntref; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_tb_id; + u32 fib_priority; + struct dst_metrics *fib_metrics; + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; + bool pfsrc_removed; + struct nexthop *nh; + struct callback_head rcu; + struct fib_nh fib_nh[0]; +}; + +struct nh_info; + +struct nh_group; + +struct nexthop { + struct rb_node rb_node; + struct list_head fi_list; + struct list_head f6i_list; + struct list_head fdb_list; + struct list_head grp_list; + struct net *net; + u32 id; + u8 protocol; + u8 nh_flags; + bool is_group; + refcount_t refcnt; + struct callback_head rcu; + union { + struct nh_info *nh_info; + struct nh_group *nh_grp; + }; +}; + +enum { + __ND_OPT_PREFIX_INFO_END = 0, + ND_OPT_SOURCE_LL_ADDR = 1, + ND_OPT_TARGET_LL_ADDR = 2, + ND_OPT_PREFIX_INFO = 3, + ND_OPT_REDIRECT_HDR = 4, + ND_OPT_MTU = 5, + ND_OPT_NONCE = 14, + __ND_OPT_ARRAY_MAX = 15, + ND_OPT_ROUTE_INFO = 24, + ND_OPT_RDNSS = 25, + ND_OPT_DNSSL = 31, + ND_OPT_6CO = 34, + ND_OPT_CAPTIVE_PORTAL = 37, + ND_OPT_PREF64 = 38, + __ND_OPT_MAX = 39, +}; + +struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +}; + +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[15]; + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; +}; + +struct prefix_info { + __u8 type; + __u8 length; + __u8 prefix_len; + union { + __u8 flags; + struct { + __u8 onlink: 1; + __u8 autoconf: 1; + __u8 routeraddr: 1; + __u8 preferpd: 1; + __u8 reserved: 4; + }; + }; + __be32 valid; + __be32 prefered; + __be32 reserved2; + struct in6_addr prefix; +}; + +struct rt6_exception_bucket { + struct hlist_head chain; + int depth; +}; + +struct nh_info { + struct hlist_node dev_hash; + struct nexthop *nh_parent; + u8 family; + bool reject_nh; + bool fdb_nh; + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct nh_grp_entry; + +struct nh_res_bucket { + struct nh_grp_entry *nh_entry; + atomic_long_t used_time; + long unsigned int migrated_time; + bool occupied; + u8 nh_flags; +}; + +struct nh_grp_entry_stats; + +struct nh_grp_entry { + struct nexthop *nh; + struct nh_grp_entry_stats *stats; + u16 weight; + union { + struct { + atomic_t upper_bound; + } hthr; + struct { + struct list_head uw_nh_entry; + u16 count_buckets; + u16 wants_buckets; + } res; + }; + struct list_head nh_list; + struct nexthop *nh_parent; + u64 packets_hw; +}; + +struct nh_res_table { + struct net *net; + u32 nhg_id; + struct delayed_work upkeep_dw; + struct list_head uw_nh_entries; + long unsigned int unbalanced_since; + u32 idle_timer; + u32 unbalanced_timer; + u16 num_nh_buckets; + struct nh_res_bucket nh_buckets[0]; +}; + +struct nh_grp_entry_stats { + u64_stats_t packets; + struct u64_stats_sync syncp; +}; + +struct nh_group { + struct nh_group *spare; + u16 num_nh; + bool is_multipath; + bool hash_threshold; + bool resilient; + bool fdb_nh; + bool has_v4; + bool hw_stats; + struct nh_res_table *res_table; + struct nh_grp_entry nh_entries[0]; +}; + +struct nf_conntrack_net { + atomic_t count; + unsigned int expect_count; + unsigned int users4; + unsigned int users6; + unsigned int users_bridge; + struct ctl_table_header *sysctl_header; +}; + +struct nf_conntrack_l4proto { + u_int8_t l4proto; + bool allow_clash; + u16 nlattr_size; + bool (*can_early_drop)(const struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *, bool); + int (*from_nlattr)(struct nlattr **, struct nf_conn *); + int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *); + unsigned int (*nlattr_tuple_size)(); + int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *, u_int32_t); + const struct nla_policy *nla_policy; + struct { + int (*nlattr_to_obj)(struct nlattr **, struct net *, void *); + int (*obj_to_nlattr)(struct sk_buff *, const void *); + u16 obj_size; + u16 nlattr_max; + const struct nla_policy *nla_policy; + } ctnl_timeout; + void (*print_conntrack)(struct seq_file *, struct nf_conn *); +}; + +struct nf_ct_ext { + u8 offset[4]; + u8 len; + unsigned int gen_id; + long: 0; + char data[0]; +}; + +enum nf_ct_ext_id { + NF_CT_EXT_HELPER = 0, + NF_CT_EXT_NAT = 1, + NF_CT_EXT_SEQADJ = 2, + NF_CT_EXT_ACCT = 3, + NF_CT_EXT_NUM = 4, +}; + +struct nf_conn_counter { + atomic64_t packets; + atomic64_t bytes; +}; + +struct nf_conn_acct { + struct nf_conn_counter counter[2]; +}; + +struct ct_iter_state { + struct seq_net_private p; + struct hlist_nulls_head *hash; + unsigned int htable_size; + unsigned int bucket; + u_int64_t time_now; +}; + +enum nf_ct_sysctl_index { + NF_SYSCTL_CT_MAX = 0, + NF_SYSCTL_CT_COUNT = 1, + NF_SYSCTL_CT_BUCKETS = 2, + NF_SYSCTL_CT_CHECKSUM = 3, + NF_SYSCTL_CT_LOG_INVALID = 4, + NF_SYSCTL_CT_EXPECT_MAX = 5, + NF_SYSCTL_CT_ACCT = 6, + NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC = 7, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT = 8, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV = 9, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED = 10, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT = 11, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT = 12, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK = 13, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT = 14, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE = 15, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS = 16, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK = 17, + NF_SYSCTL_CT_PROTO_TCP_LOOSE = 18, + NF_SYSCTL_CT_PROTO_TCP_LIBERAL = 19, + NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST = 20, + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS = 21, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP = 22, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM = 23, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP = 24, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6 = 25, + NF_SYSCTL_CT_LAST_SYSCTL = 26, +}; + +struct rhash_lock_head; + +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + struct bucket_table *future_tbl; + struct lockdep_map dep_map; + long: 64; + struct rhash_lock_head *buckets[0]; +}; + +struct rhltable { + struct rhashtable ht; +}; + +enum { + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS = 1, + IPSTATS_MIB_INOCTETS = 2, + IPSTATS_MIB_INDELIVERS = 3, + IPSTATS_MIB_OUTFORWDATAGRAMS = 4, + IPSTATS_MIB_OUTREQUESTS = 5, + IPSTATS_MIB_OUTOCTETS = 6, + IPSTATS_MIB_INHDRERRORS = 7, + IPSTATS_MIB_INTOOBIGERRORS = 8, + IPSTATS_MIB_INNOROUTES = 9, + IPSTATS_MIB_INADDRERRORS = 10, + IPSTATS_MIB_INUNKNOWNPROTOS = 11, + IPSTATS_MIB_INTRUNCATEDPKTS = 12, + IPSTATS_MIB_INDISCARDS = 13, + IPSTATS_MIB_OUTDISCARDS = 14, + IPSTATS_MIB_OUTNOROUTES = 15, + IPSTATS_MIB_REASMTIMEOUT = 16, + IPSTATS_MIB_REASMREQDS = 17, + IPSTATS_MIB_REASMOKS = 18, + IPSTATS_MIB_REASMFAILS = 19, + IPSTATS_MIB_FRAGOKS = 20, + IPSTATS_MIB_FRAGFAILS = 21, + IPSTATS_MIB_FRAGCREATES = 22, + IPSTATS_MIB_INMCASTPKTS = 23, + IPSTATS_MIB_OUTMCASTPKTS = 24, + IPSTATS_MIB_INBCASTPKTS = 25, + IPSTATS_MIB_OUTBCASTPKTS = 26, + IPSTATS_MIB_INMCASTOCTETS = 27, + IPSTATS_MIB_OUTMCASTOCTETS = 28, + IPSTATS_MIB_INBCASTOCTETS = 29, + IPSTATS_MIB_OUTBCASTOCTETS = 30, + IPSTATS_MIB_CSUMERRORS = 31, + IPSTATS_MIB_NOECTPKTS = 32, + IPSTATS_MIB_ECT1PKTS = 33, + IPSTATS_MIB_ECT0PKTS = 34, + IPSTATS_MIB_CEPKTS = 35, + IPSTATS_MIB_REASM_OVERLAPS = 36, + IPSTATS_MIB_OUTPKTS = 37, + __IPSTATS_MIB_MAX = 38, +}; + +enum { + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS = 1, + ICMP_MIB_INERRORS = 2, + ICMP_MIB_INDESTUNREACHS = 3, + ICMP_MIB_INTIMEEXCDS = 4, + ICMP_MIB_INPARMPROBS = 5, + ICMP_MIB_INSRCQUENCHS = 6, + ICMP_MIB_INREDIRECTS = 7, + ICMP_MIB_INECHOS = 8, + ICMP_MIB_INECHOREPS = 9, + ICMP_MIB_INTIMESTAMPS = 10, + ICMP_MIB_INTIMESTAMPREPS = 11, + ICMP_MIB_INADDRMASKS = 12, + ICMP_MIB_INADDRMASKREPS = 13, + ICMP_MIB_OUTMSGS = 14, + ICMP_MIB_OUTERRORS = 15, + ICMP_MIB_OUTDESTUNREACHS = 16, + ICMP_MIB_OUTTIMEEXCDS = 17, + ICMP_MIB_OUTPARMPROBS = 18, + ICMP_MIB_OUTSRCQUENCHS = 19, + ICMP_MIB_OUTREDIRECTS = 20, + ICMP_MIB_OUTECHOS = 21, + ICMP_MIB_OUTECHOREPS = 22, + ICMP_MIB_OUTTIMESTAMPS = 23, + ICMP_MIB_OUTTIMESTAMPREPS = 24, + ICMP_MIB_OUTADDRMASKS = 25, + ICMP_MIB_OUTADDRMASKREPS = 26, + ICMP_MIB_CSUMERRORS = 27, + ICMP_MIB_RATELIMITGLOBAL = 28, + ICMP_MIB_RATELIMITHOST = 29, + __ICMP_MIB_MAX = 30, +}; + +enum { + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS = 1, + ICMP6_MIB_INERRORS = 2, + ICMP6_MIB_OUTMSGS = 3, + ICMP6_MIB_OUTERRORS = 4, + ICMP6_MIB_CSUMERRORS = 5, + ICMP6_MIB_RATELIMITHOST = 6, + __ICMP6_MIB_MAX = 7, +}; + +enum { + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM = 1, + TCP_MIB_RTOMIN = 2, + TCP_MIB_RTOMAX = 3, + TCP_MIB_MAXCONN = 4, + TCP_MIB_ACTIVEOPENS = 5, + TCP_MIB_PASSIVEOPENS = 6, + TCP_MIB_ATTEMPTFAILS = 7, + TCP_MIB_ESTABRESETS = 8, + TCP_MIB_CURRESTAB = 9, + TCP_MIB_INSEGS = 10, + TCP_MIB_OUTSEGS = 11, + TCP_MIB_RETRANSSEGS = 12, + TCP_MIB_INERRS = 13, + TCP_MIB_OUTRSTS = 14, + TCP_MIB_CSUMERRORS = 15, + __TCP_MIB_MAX = 16, +}; + +enum { + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS = 1, + UDP_MIB_NOPORTS = 2, + UDP_MIB_INERRORS = 3, + UDP_MIB_OUTDATAGRAMS = 4, + UDP_MIB_RCVBUFERRORS = 5, + UDP_MIB_SNDBUFERRORS = 6, + UDP_MIB_CSUMERRORS = 7, + UDP_MIB_IGNOREDMULTI = 8, + UDP_MIB_MEMERRORS = 9, + __UDP_MIB_MAX = 10, +}; + +enum { + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT = 1, + LINUX_MIB_SYNCOOKIESRECV = 2, + LINUX_MIB_SYNCOOKIESFAILED = 3, + LINUX_MIB_EMBRYONICRSTS = 4, + LINUX_MIB_PRUNECALLED = 5, + LINUX_MIB_RCVPRUNED = 6, + LINUX_MIB_OFOPRUNED = 7, + LINUX_MIB_OUTOFWINDOWICMPS = 8, + LINUX_MIB_LOCKDROPPEDICMPS = 9, + LINUX_MIB_ARPFILTER = 10, + LINUX_MIB_TIMEWAITED = 11, + LINUX_MIB_TIMEWAITRECYCLED = 12, + LINUX_MIB_TIMEWAITKILLED = 13, + LINUX_MIB_PAWSACTIVEREJECTED = 14, + LINUX_MIB_PAWSESTABREJECTED = 15, + LINUX_MIB_DELAYEDACKS = 16, + LINUX_MIB_DELAYEDACKLOCKED = 17, + LINUX_MIB_DELAYEDACKLOST = 18, + LINUX_MIB_LISTENOVERFLOWS = 19, + LINUX_MIB_LISTENDROPS = 20, + LINUX_MIB_TCPHPHITS = 21, + LINUX_MIB_TCPPUREACKS = 22, + LINUX_MIB_TCPHPACKS = 23, + LINUX_MIB_TCPRENORECOVERY = 24, + LINUX_MIB_TCPSACKRECOVERY = 25, + LINUX_MIB_TCPSACKRENEGING = 26, + LINUX_MIB_TCPSACKREORDER = 27, + LINUX_MIB_TCPRENOREORDER = 28, + LINUX_MIB_TCPTSREORDER = 29, + LINUX_MIB_TCPFULLUNDO = 30, + LINUX_MIB_TCPPARTIALUNDO = 31, + LINUX_MIB_TCPDSACKUNDO = 32, + LINUX_MIB_TCPLOSSUNDO = 33, + LINUX_MIB_TCPLOSTRETRANSMIT = 34, + LINUX_MIB_TCPRENOFAILURES = 35, + LINUX_MIB_TCPSACKFAILURES = 36, + LINUX_MIB_TCPLOSSFAILURES = 37, + LINUX_MIB_TCPFASTRETRANS = 38, + LINUX_MIB_TCPSLOWSTARTRETRANS = 39, + LINUX_MIB_TCPTIMEOUTS = 40, + LINUX_MIB_TCPLOSSPROBES = 41, + LINUX_MIB_TCPLOSSPROBERECOVERY = 42, + LINUX_MIB_TCPRENORECOVERYFAIL = 43, + LINUX_MIB_TCPSACKRECOVERYFAIL = 44, + LINUX_MIB_TCPRCVCOLLAPSED = 45, + LINUX_MIB_TCPDSACKOLDSENT = 46, + LINUX_MIB_TCPDSACKOFOSENT = 47, + LINUX_MIB_TCPDSACKRECV = 48, + LINUX_MIB_TCPDSACKOFORECV = 49, + LINUX_MIB_TCPABORTONDATA = 50, + LINUX_MIB_TCPABORTONCLOSE = 51, + LINUX_MIB_TCPABORTONMEMORY = 52, + LINUX_MIB_TCPABORTONTIMEOUT = 53, + LINUX_MIB_TCPABORTONLINGER = 54, + LINUX_MIB_TCPABORTFAILED = 55, + LINUX_MIB_TCPMEMORYPRESSURES = 56, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, + LINUX_MIB_TCPSACKDISCARD = 58, + LINUX_MIB_TCPDSACKIGNOREDOLD = 59, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, + LINUX_MIB_TCPSPURIOUSRTOS = 61, + LINUX_MIB_TCPMD5NOTFOUND = 62, + LINUX_MIB_TCPMD5UNEXPECTED = 63, + LINUX_MIB_TCPMD5FAILURE = 64, + LINUX_MIB_SACKSHIFTED = 65, + LINUX_MIB_SACKMERGED = 66, + LINUX_MIB_SACKSHIFTFALLBACK = 67, + LINUX_MIB_TCPBACKLOGDROP = 68, + LINUX_MIB_PFMEMALLOCDROP = 69, + LINUX_MIB_TCPMINTTLDROP = 70, + LINUX_MIB_TCPDEFERACCEPTDROP = 71, + LINUX_MIB_IPRPFILTER = 72, + LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, + LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, + LINUX_MIB_TCPREQQFULLDROP = 75, + LINUX_MIB_TCPRETRANSFAIL = 76, + LINUX_MIB_TCPRCVCOALESCE = 77, + LINUX_MIB_TCPBACKLOGCOALESCE = 78, + LINUX_MIB_TCPOFOQUEUE = 79, + LINUX_MIB_TCPOFODROP = 80, + LINUX_MIB_TCPOFOMERGE = 81, + LINUX_MIB_TCPCHALLENGEACK = 82, + LINUX_MIB_TCPSYNCHALLENGE = 83, + LINUX_MIB_TCPFASTOPENACTIVE = 84, + LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, + LINUX_MIB_TCPFASTOPENPASSIVE = 86, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, + LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, + LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, + LINUX_MIB_BUSYPOLLRXPACKETS = 92, + LINUX_MIB_TCPAUTOCORKING = 93, + LINUX_MIB_TCPFROMZEROWINDOWADV = 94, + LINUX_MIB_TCPTOZEROWINDOWADV = 95, + LINUX_MIB_TCPWANTZEROWINDOWADV = 96, + LINUX_MIB_TCPSYNRETRANS = 97, + LINUX_MIB_TCPORIGDATASENT = 98, + LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, + LINUX_MIB_TCPHYSTARTTRAINCWND = 100, + LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, + LINUX_MIB_TCPHYSTARTDELAYCWND = 102, + LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, + LINUX_MIB_TCPACKSKIPPEDPAWS = 104, + LINUX_MIB_TCPACKSKIPPEDSEQ = 105, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, + LINUX_MIB_TCPWINPROBE = 109, + LINUX_MIB_TCPKEEPALIVE = 110, + LINUX_MIB_TCPMTUPFAIL = 111, + LINUX_MIB_TCPMTUPSUCCESS = 112, + LINUX_MIB_TCPDELIVERED = 113, + LINUX_MIB_TCPDELIVEREDCE = 114, + LINUX_MIB_TCPACKCOMPRESSED = 115, + LINUX_MIB_TCPZEROWINDOWDROP = 116, + LINUX_MIB_TCPRCVQDROP = 117, + LINUX_MIB_TCPWQUEUETOOBIG = 118, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, + LINUX_MIB_TCPTIMEOUTREHASH = 120, + LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, + LINUX_MIB_TCPDSACKRECVSEGS = 122, + LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, + LINUX_MIB_TCPMIGRATEREQSUCCESS = 124, + LINUX_MIB_TCPMIGRATEREQFAILURE = 125, + LINUX_MIB_TCPPLBREHASH = 126, + LINUX_MIB_TCPAOREQUIRED = 127, + LINUX_MIB_TCPAOBAD = 128, + LINUX_MIB_TCPAOKEYNOTFOUND = 129, + LINUX_MIB_TCPAOGOOD = 130, + LINUX_MIB_TCPAODROPPEDICMPS = 131, + __LINUX_MIB_MAX = 132, +}; + +enum { + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR = 1, + LINUX_MIB_XFRMINBUFFERERROR = 2, + LINUX_MIB_XFRMINHDRERROR = 3, + LINUX_MIB_XFRMINNOSTATES = 4, + LINUX_MIB_XFRMINSTATEPROTOERROR = 5, + LINUX_MIB_XFRMINSTATEMODEERROR = 6, + LINUX_MIB_XFRMINSTATESEQERROR = 7, + LINUX_MIB_XFRMINSTATEEXPIRED = 8, + LINUX_MIB_XFRMINSTATEMISMATCH = 9, + LINUX_MIB_XFRMINSTATEINVALID = 10, + LINUX_MIB_XFRMINTMPLMISMATCH = 11, + LINUX_MIB_XFRMINNOPOLS = 12, + LINUX_MIB_XFRMINPOLBLOCK = 13, + LINUX_MIB_XFRMINPOLERROR = 14, + LINUX_MIB_XFRMOUTERROR = 15, + LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, + LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, + LINUX_MIB_XFRMOUTNOSTATES = 18, + LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, + LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, + LINUX_MIB_XFRMOUTSTATESEQERROR = 21, + LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, + LINUX_MIB_XFRMOUTPOLBLOCK = 23, + LINUX_MIB_XFRMOUTPOLDEAD = 24, + LINUX_MIB_XFRMOUTPOLERROR = 25, + LINUX_MIB_XFRMFWDHDRERROR = 26, + LINUX_MIB_XFRMOUTSTATEINVALID = 27, + LINUX_MIB_XFRMACQUIREERROR = 28, + LINUX_MIB_XFRMOUTSTATEDIRERROR = 29, + LINUX_MIB_XFRMINSTATEDIRERROR = 30, + __LINUX_MIB_XFRMMAX = 31, +}; + +enum { + LINUX_MIB_TLSNUM = 0, + LINUX_MIB_TLSCURRTXSW = 1, + LINUX_MIB_TLSCURRRXSW = 2, + LINUX_MIB_TLSCURRTXDEVICE = 3, + LINUX_MIB_TLSCURRRXDEVICE = 4, + LINUX_MIB_TLSTXSW = 5, + LINUX_MIB_TLSRXSW = 6, + LINUX_MIB_TLSTXDEVICE = 7, + LINUX_MIB_TLSRXDEVICE = 8, + LINUX_MIB_TLSDECRYPTERROR = 9, + LINUX_MIB_TLSRXDEVICERESYNC = 10, + LINUX_MIB_TLSDECRYPTRETRY = 11, + LINUX_MIB_TLSRXNOPADVIOL = 12, + __LINUX_MIB_TLSMAX = 13, +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3, +}; + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP = 1, + MAX_NETNS_BPF_ATTACH_TYPE = 2, +}; + +enum nft_registers { + NFT_REG_VERDICT = 0, + NFT_REG_1 = 1, + NFT_REG_2 = 2, + NFT_REG_3 = 3, + NFT_REG_4 = 4, + __NFT_REG_MAX = 5, + NFT_REG32_00 = 8, + NFT_REG32_01 = 9, + NFT_REG32_02 = 10, + NFT_REG32_03 = 11, + NFT_REG32_04 = 12, + NFT_REG32_05 = 13, + NFT_REG32_06 = 14, + NFT_REG32_07 = 15, + NFT_REG32_08 = 16, + NFT_REG32_09 = 17, + NFT_REG32_10 = 18, + NFT_REG32_11 = 19, + NFT_REG32_12 = 20, + NFT_REG32_13 = 21, + NFT_REG32_14 = 22, + NFT_REG32_15 = 23, +}; + +enum nft_verdicts { + NFT_CONTINUE = -1, + NFT_BREAK = -2, + NFT_JUMP = -3, + NFT_GOTO = -4, + NFT_RETURN = -5, +}; + +enum nft_set_flags { + NFT_SET_ANONYMOUS = 1, + NFT_SET_CONSTANT = 2, + NFT_SET_INTERVAL = 4, + NFT_SET_MAP = 8, + NFT_SET_TIMEOUT = 16, + NFT_SET_EVAL = 32, + NFT_SET_OBJECT = 64, + NFT_SET_CONCAT = 128, + NFT_SET_EXPR = 256, +}; + +enum nft_data_types { + NFT_DATA_VALUE = 0, + NFT_DATA_VERDICT = 4294967040, +}; + +enum nft_lookup_flags { + NFT_LOOKUP_F_INV = 1, +}; + +enum nft_lookup_attributes { + NFTA_LOOKUP_UNSPEC = 0, + NFTA_LOOKUP_SET = 1, + NFTA_LOOKUP_SREG = 2, + NFTA_LOOKUP_DREG = 3, + NFTA_LOOKUP_SET_ID = 4, + NFTA_LOOKUP_FLAGS = 5, + __NFTA_LOOKUP_MAX = 6, +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE = 0, + NLA_VALIDATE_RANGE = 1, + NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, + NLA_VALIDATE_MIN = 3, + NLA_VALIDATE_MAX = 4, + NLA_VALIDATE_MASK = 5, + NLA_VALIDATE_RANGE_PTR = 6, + NLA_VALIDATE_FUNCTION = 7, +}; + +struct rhash_lock_head {}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = 1, + FLOW_ACTION_HW_STATS_DELAYED = 2, + FLOW_ACTION_HW_STATS_ANY = 3, + FLOW_ACTION_HW_STATS_DISABLED = 4, + FLOW_ACTION_HW_STATS_DONT_CARE = 7, +}; + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 drops; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +enum l2tp_debug_flags { + L2TP_MSG_DEBUG = 1, + L2TP_MSG_CONTROL = 2, + L2TP_MSG_SEQ = 4, + L2TP_MSG_DATA = 8, +}; + +struct nft_pktinfo { + struct sk_buff *skb; + const struct nf_hook_state *state; + u8 flags; + u8 tprot; + u16 fragoff; + u16 thoff; + u16 inneroff; +}; + +struct nft_chain; + +struct nft_verdict { + u32 code; + struct nft_chain *chain; +}; + +struct nft_rule_blob; + +struct nft_table; + +struct nft_chain { + struct nft_rule_blob *blob_gen_0; + struct nft_rule_blob *blob_gen_1; + struct list_head rules; + struct list_head list; + struct rhlist_head rhlhead; + struct nft_table *table; + u64 handle; + u32 use; + u8 flags: 5; + u8 bound: 1; + u8 genmask: 2; + char *name; + u16 udlen; + u8 *udata; + struct callback_head callback_head; + struct nft_rule_blob *blob_next; +}; + +struct nft_data { + union { + u32 data[4]; + struct nft_verdict verdict; + }; +}; + +struct nft_regs { + union { + u32 data[20]; + struct nft_verdict verdict; + }; +}; + +struct nft_expr_ops; + +struct nft_expr { + const struct nft_expr_ops *ops; + unsigned char data[0]; +}; + +struct nft_regs_track { + struct { + const struct nft_expr *selector; + const struct nft_expr *bitwise; + u8 num_reg; + } regs[20]; + const struct nft_expr *cur; + const struct nft_expr *last; +}; + +struct nft_ctx { + struct net *net; + struct nft_table *table; + struct nft_chain *chain; + const struct nlattr * const *nla; + u32 portid; + u32 seq; + u16 flags; + u8 family; + u8 level; + bool report; + long unsigned int reg_inited[1]; +}; + +struct nft_table { + struct list_head list; + struct rhltable chains_ht; + struct list_head chains; + struct list_head sets; + struct list_head objects; + struct list_head flowtables; + possible_net_t net; + u64 hgenerator; + u64 handle; + u32 use; + u16 family: 6; + u16 flags: 8; + u16 genmask: 2; + u32 nlpid; + char *name; + u16 udlen; + u8 *udata; + u8 validate_state; +}; + +struct nft_elem_priv {}; + +struct nft_set_elem { + union { + u32 buf[16]; + struct nft_data val; + } key; + union { + u32 buf[16]; + struct nft_data val; + } key_end; + union { + u32 buf[16]; + struct nft_data val; + } data; + struct nft_elem_priv *priv; +}; + +enum nft_iter_type { + NFT_ITER_UNSPEC = 0, + NFT_ITER_READ = 1, + NFT_ITER_UPDATE = 2, +}; + +struct nft_set; + +struct nft_set_iter { + u8 genmask; + enum nft_iter_type type: 8; + unsigned int count; + unsigned int skip; + int err; + int (*fn)(const struct nft_ctx *, struct nft_set *, const struct nft_set_iter *, struct nft_elem_priv *); +}; + +struct nft_set_ops; + +struct nft_set { + struct list_head list; + struct list_head bindings; + refcount_t refs; + struct nft_table *table; + possible_net_t net; + char *name; + u64 handle; + u32 ktype; + u32 dtype; + u32 objtype; + u32 size; + u8 field_len[16]; + u8 field_count; + u32 use; + atomic_t nelems; + u32 ndeact; + u64 timeout; + u32 gc_int; + u16 policy; + u16 udlen; + unsigned char *udata; + struct list_head pending_update; + long: 64; + long: 64; + long: 64; + long: 64; + const struct nft_set_ops *ops; + u16 flags: 13; + u16 dead: 1; + u16 genmask: 2; + u8 klen; + u8 dlen; + u8 num_exprs; + struct nft_expr *exprs[2]; + struct list_head catchall_list; + unsigned char data[0]; + long: 64; + long: 64; +}; + +struct nft_set_desc { + u32 ktype; + unsigned int klen; + u32 dtype; + unsigned int dlen; + u32 objtype; + unsigned int size; + u32 policy; + u32 gc_int; + u64 timeout; + u8 field_len[16]; + u8 field_count; + bool expr; +}; + +enum nft_set_class { + NFT_SET_CLASS_O_1 = 0, + NFT_SET_CLASS_O_LOG_N = 1, + NFT_SET_CLASS_O_N = 2, +}; + +struct nft_set_estimate { + u64 size; + enum nft_set_class lookup; + enum nft_set_class space; +}; + +enum nft_trans_phase { + NFT_TRANS_PREPARE = 0, + NFT_TRANS_PREPARE_ERROR = 1, + NFT_TRANS_ABORT = 2, + NFT_TRANS_COMMIT = 3, + NFT_TRANS_RELEASE = 4, +}; + +struct nft_offload_ctx; + +struct nft_flow_rule; + +struct nft_expr_type; + +struct nft_expr_ops { + void (*eval)(const struct nft_expr *, struct nft_regs *, const struct nft_pktinfo *); + int (*clone)(struct nft_expr *, const struct nft_expr *, gfp_t); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nft_expr *, const struct nlattr * const *); + void (*activate)(const struct nft_ctx *, const struct nft_expr *); + void (*deactivate)(const struct nft_ctx *, const struct nft_expr *, enum nft_trans_phase); + void (*destroy)(const struct nft_ctx *, const struct nft_expr *); + void (*destroy_clone)(const struct nft_ctx *, const struct nft_expr *); + int (*dump)(struct sk_buff *, const struct nft_expr *, bool); + int (*validate)(const struct nft_ctx *, const struct nft_expr *); + bool (*reduce)(struct nft_regs_track *, const struct nft_expr *); + bool (*gc)(struct net *, const struct nft_expr *); + int (*offload)(struct nft_offload_ctx *, struct nft_flow_rule *, const struct nft_expr *); + bool (*offload_action)(const struct nft_expr *); + void (*offload_stats)(struct nft_expr *, const struct flow_stats *); + const struct nft_expr_type *type; + void *data; +}; + +struct nft_set_ext; + +struct nft_set_ops { + bool (*lookup)(const struct net *, const struct nft_set *, const u32 *, const struct nft_set_ext **); + bool (*update)(struct nft_set *, const u32 *, struct nft_elem_priv * (*)(struct nft_set *, const struct nft_expr *, struct nft_regs *), const struct nft_expr *, struct nft_regs *, const struct nft_set_ext **); + bool (*delete)(const struct nft_set *, const u32 *); + int (*insert)(const struct net *, const struct nft_set *, const struct nft_set_elem *, struct nft_elem_priv **); + void (*activate)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + struct nft_elem_priv * (*deactivate)(const struct net *, const struct nft_set *, const struct nft_set_elem *); + void (*flush)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*remove)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*walk)(const struct nft_ctx *, struct nft_set *, struct nft_set_iter *); + struct nft_elem_priv * (*get)(const struct net *, const struct nft_set *, const struct nft_set_elem *, unsigned int); + void (*commit)(struct nft_set *); + void (*abort)(const struct nft_set *); + u64 (*privsize)(const struct nlattr * const *, const struct nft_set_desc *); + bool (*estimate)(const struct nft_set_desc *, u32, struct nft_set_estimate *); + int (*init)(const struct nft_set *, const struct nft_set_desc *, const struct nlattr * const *); + void (*destroy)(const struct nft_ctx *, const struct nft_set *); + void (*gc_init)(const struct nft_set *); + unsigned int elemsize; +}; + +struct nft_set_ext { + u8 genmask; + u8 offset[8]; + char data[0]; +}; + +struct nft_set_elem_expr { + u8 size; + long: 0; + unsigned char data[0]; +}; + +struct nft_set_binding { + struct list_head list; + const struct nft_chain *chain; + u32 flags; +}; + +enum nft_set_extensions { + NFT_SET_EXT_KEY = 0, + NFT_SET_EXT_KEY_END = 1, + NFT_SET_EXT_DATA = 2, + NFT_SET_EXT_FLAGS = 3, + NFT_SET_EXT_TIMEOUT = 4, + NFT_SET_EXT_USERDATA = 5, + NFT_SET_EXT_EXPRESSIONS = 6, + NFT_SET_EXT_OBJREF = 7, + NFT_SET_EXT_NUM = 8, +}; + +struct nft_expr_type { + const struct nft_expr_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + void (*release_ops)(const struct nft_expr_ops *); + const struct nft_expr_ops *ops; + const struct nft_expr_ops *inner_ops; + struct list_head list; + const char *name; + struct module *owner; + const struct nla_policy *policy; + unsigned int maxattr; + u8 family; + u8 flags; +}; + +struct nft_rule_blob { + long unsigned int size; + unsigned char data[0]; +}; + +struct nft_lookup { + struct nft_set *set; + u8 sreg; + u8 dreg; + bool dreg_set; + bool invert; + struct nft_set_binding binding; +}; + +struct ipv6hdr { + __u8 version: 4; + __u8 priority: 4; + __u8 flow_lbl[3]; + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + union { + struct { + struct in6_addr saddr; + struct in6_addr daddr; + }; + struct { + struct in6_addr saddr; + struct in6_addr daddr; + } addrs; + }; +}; + +struct xt_action_param; + +struct xt_mtchk_param; + +struct xt_mtdtor_param; + +struct xt_match { + struct list_head list; + const char name[29]; + u_int8_t revision; + bool (*match)(const struct sk_buff *, struct xt_action_param *); + int (*checkentry)(const struct xt_mtchk_param *); + void (*destroy)(const struct xt_mtdtor_param *); + struct module *me; + const char *table; + unsigned int matchsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_tgchk_param; + +struct xt_tgdtor_param; + +struct xt_target { + struct list_head list; + const char name[29]; + u_int8_t revision; + unsigned int (*target)(struct sk_buff *, const struct xt_action_param *); + int (*checkentry)(const struct xt_tgchk_param *); + void (*destroy)(const struct xt_tgdtor_param *); + struct module *me; + const char *table; + unsigned int targetsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_action_param { + union { + const struct xt_match *match; + const struct xt_target *target; + }; + union { + const void *matchinfo; + const void *targinfo; + }; + const struct nf_hook_state *state; + unsigned int thoff; + u16 fragoff; + bool hotdrop; +}; + +struct xt_mtchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_mtdtor_param { + struct net *net; + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +struct xt_tgchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_tgdtor_param { + struct net *net; + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + +enum { + IPT_TTL_EQ = 0, + IPT_TTL_NE = 1, + IPT_TTL_LT = 2, + IPT_TTL_GT = 3, +}; + +struct ipt_ttl_info { + __u8 mode; + __u8 ttl; +}; + +enum { + IP6T_HL_EQ = 0, + IP6T_HL_NE = 1, + IP6T_HL_LT = 2, + IP6T_HL_GT = 3, +}; + +struct ip6t_hl_info { + __u8 mode; + __u8 hop_limit; +}; + +typedef __u64 __be64; + +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + int flowic_l3mdev; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + __u32 flowic_secid; + kuid_t flowic_uid; + __u32 flowic_multipath_hash; + struct flowi_tunnel flowic_tun_key; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + struct { + __u8 type; + __u8 code; + } icmpt; + __be32 gre_key; + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; + __be32 saddr; + __be32 daddr; + union flowi_uli uli; +}; + +struct flowi6 { + struct flowi_common __fl_common; + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; + __u32 mp_hash; +}; + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + } u; +}; + +struct minmax_sample { + u32 t; + u32 v; +}; + +struct minmax { + struct minmax_sample s[3]; +}; + +struct inet_ehash_bucket; + +struct inet_bind_hashbucket; + +struct inet_listen_hashbucket; + +struct inet_hashinfo { + struct inet_ehash_bucket *ehash; + spinlock_t *ehash_locks; + unsigned int ehash_mask; + unsigned int ehash_locks_mask; + struct kmem_cache *bind_bucket_cachep; + struct inet_bind_hashbucket *bhash; + struct kmem_cache *bind2_bucket_cachep; + struct inet_bind_hashbucket *bhash2; + unsigned int bhash_size; + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; + bool pernet; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ip_ra_chain { + struct ip_ra_chain *next; + struct sock *sk; + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct callback_head rcu; +}; + +struct tcp_fastopen_context { + siphash_key_t key[2]; + int num; + struct callback_head rcu; +}; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; +}; + +struct bpf_storage_buffer; + +struct bpf_cgroup_storage_map; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list_map; + struct list_head list_cg; + struct rb_node node; + struct callback_head rcu; +}; + +struct sock_reuseport { + struct callback_head rcu; + u16 max_socks; + u16 num_socks; + u16 num_closed_socks; + u16 incoming_cpu; + unsigned int synq_overflow_ts; + unsigned int reuseport_id; + unsigned int bind_inany: 1; + unsigned int has_conns: 1; + struct bpf_prog *prog; + struct sock *socks[0]; +}; + +struct fastopen_queue { + struct request_sock *rskq_rst_head; + struct request_sock *rskq_rst_tail; + spinlock_t lock; + int qlen; + int max_qlen; + struct tcp_fastopen_context *ctx; +}; + +struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + struct fastopen_queue fastopenq; +}; + +struct ip_options { + __be32 faddr; + __be32 nexthop; + unsigned char optlen; + unsigned char srr; + unsigned char rr; + unsigned char ts; + unsigned char is_strictroute: 1; + unsigned char srr_is_hit: 1; + unsigned char is_changed: 1; + unsigned char rr_needaddr: 1; + unsigned char ts_needtime: 1; + unsigned char ts_needaddr: 1; + unsigned char router_alert; + unsigned char cipso; + unsigned char __pad2; + unsigned char __data[0]; +}; + +struct ip_options_rcu { + struct callback_head rcu; + struct ip_options opt; +}; + +struct ipv6_opt_hdr; + +struct ipv6_rt_hdr; + +struct ipv6_txoptions { + refcount_t refcnt; + int tot_len; + __u16 opt_flen; + __u16 opt_nflen; + struct ipv6_opt_hdr *hopopt; + struct ipv6_opt_hdr *dst0opt; + struct ipv6_rt_hdr *srcrt; + struct ipv6_opt_hdr *dst1opt; + struct callback_head rcu; +}; + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + u32 ts_opt_id; + u64 transmit_time; + u32 mark; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + +struct ipv6_pinfo; + +struct ip_mc_socklist; + +struct inet_sock { + struct sock sk; + struct ipv6_pinfo *pinet6; + long unsigned int inet_flags; + __be32 inet_saddr; + __s16 uc_ttl; + __be16 inet_sport; + struct ip_options_rcu *inet_opt; + atomic_t inet_id; + __u8 tos; + __u8 min_ttl; + __u8 mc_ttl; + __u8 pmtudisc; + __u8 rcv_tos; + __u8 convert_csum; + int uc_index; + int mc_index; + __be32 mc_addr; + u32 local_port_range; + struct ip_mc_socklist *mc_list; + struct inet_cork_full cork; +}; + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +struct ipv6_mc_socklist; + +struct ipv6_ac_socklist; + +struct ipv6_fl_socklist; + +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; + __be32 flow_label; + __u32 frag_size; + s16 hop_limit; + u8 mcast_hops; + int ucast_oif; + int mcast_oif; + union { + struct { + __u16 srcrt: 1; + __u16 osrcrt: 1; + __u16 rxinfo: 1; + __u16 rxoinfo: 1; + __u16 rxhlim: 1; + __u16 rxohlim: 1; + __u16 hopopts: 1; + __u16 ohopopts: 1; + __u16 dstopts: 1; + __u16 odstopts: 1; + __u16 rxflow: 1; + __u16 rxtclass: 1; + __u16 rxpmtu: 1; + __u16 rxorigdstaddr: 1; + __u16 recvfragsize: 1; + } bits; + __u16 all; + } rxopt; + __u8 srcprefs; + __u8 pmtudisc; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + __u32 dst_cookie; + struct ipv6_mc_socklist *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist *ipv6_fl_list; + struct ipv6_txoptions *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +struct inet_connection_sock_af_ops { + int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); + void (*send_check)(struct sock *, struct sk_buff *); + int (*rebuild_header)(struct sock *); + void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); + int (*conn_request)(struct sock *, struct sk_buff *); + struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); + u16 net_header_len; + u16 sockaddr_len; + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*addr2sockaddr)(struct sock *, struct sockaddr *); + void (*mtu_reduced)(struct sock *); +}; + +struct inet_bind_bucket; + +struct inet_bind2_bucket; + +struct tcp_ulp_ops; + +struct inet_connection_sock { + struct inet_sock icsk_inet; + struct request_sock_queue icsk_accept_queue; + struct inet_bind_bucket *icsk_bind_hash; + struct inet_bind2_bucket *icsk_bind2_hash; + long unsigned int icsk_timeout; + struct timer_list icsk_retransmit_timer; + struct timer_list icsk_delack_timer; + __u32 icsk_rto; + __u32 icsk_rto_min; + __u32 icsk_delack_max; + __u32 icsk_pmtu_cookie; + const struct tcp_congestion_ops *icsk_ca_ops; + const struct inet_connection_sock_af_ops *icsk_af_ops; + const struct tcp_ulp_ops *icsk_ulp_ops; + void *icsk_ulp_data; + void (*icsk_clean_acked)(struct sock *, u32); + unsigned int (*icsk_sync_mss)(struct sock *, u32); + __u8 icsk_ca_state: 5; + __u8 icsk_ca_initialized: 1; + __u8 icsk_ca_setsockopt: 1; + __u8 icsk_ca_dst_locked: 1; + __u8 icsk_retransmits; + __u8 icsk_pending; + __u8 icsk_backoff; + __u8 icsk_syn_retries; + __u8 icsk_probes_out; + __u16 icsk_ext_hdr_len; + struct { + __u8 pending; + __u8 quick; + __u8 pingpong; + __u8 retry; + __u32 ato: 8; + __u32 lrcv_flowlabel: 20; + __u32 unused: 4; + long unsigned int timeout; + __u32 lrcvtime; + __u16 last_seg_size; + __u16 rcv_mss; + } icsk_ack; + struct { + int search_high; + int search_low; + u32 probe_size: 31; + u32 enabled: 1; + u32 probe_timestamp; + } icsk_mtup; + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + u64 icsk_ca_priv[13]; +}; + +struct inet_bind_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct in6_addr fast_v6_rcv_saddr; + __be32 fast_rcv_saddr; + short unsigned int fast_sk_family; + bool fast_ipv6_only; + struct hlist_node node; + struct hlist_head bhash2; +}; + +struct inet_bind2_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + short unsigned int addr_type; + struct in6_addr v6_rcv_saddr; + struct hlist_node node; + struct hlist_node bhash_node; + struct hlist_head owners; +}; + +struct tcp_ulp_ops { + struct list_head list; + int (*init)(struct sock *); + void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); + void (*release)(struct sock *); + int (*get_info)(struct sock *, struct sk_buff *); + size_t (*get_info_size)(const struct sock *); + void (*clone)(const struct request_sock *, struct sock *, const gfp_t); + char name[16]; + struct module *owner; +}; + +struct tcp_fastopen_cookie { + __le64 val[2]; + s8 len; + bool exp; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +struct tcp_options_received { + int ts_recent_stamp; + u32 ts_recent; + u32 rcv_tsval; + u32 rcv_tsecr; + u16 saw_tstamp: 1; + u16 tstamp_ok: 1; + u16 dsack: 1; + u16 wscale_ok: 1; + u16 sack_ok: 3; + u16 smc_ok: 1; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u8 saw_unknown: 1; + u8 unused: 7; + u8 num_sacks; + u16 user_mss; + u16 mss_clamp; +}; + +struct tcp_rack { + u64 mstamp; + u32 rtt_us; + u32 end_seq; + u32 last_delivered; + u8 reo_wnd_steps; + u8 reo_wnd_persist: 5; + u8 dsack_seen: 1; + u8 advanced: 1; +}; + +struct tcp_fastopen_request; + +struct tcp_sock { + struct inet_connection_sock inet_conn; + __u8 __cacheline_group_begin__tcp_sock_read_tx[0]; + u32 max_window; + u32 rcv_ssthresh; + u32 reordering; + u32 notsent_lowat; + u16 gso_segs; + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + __u8 __cacheline_group_end__tcp_sock_read_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_txrx[0]; + u32 tsoffset; + u32 snd_wnd; + u32 mss_cache; + u32 snd_cwnd; + u32 prr_out; + u32 lost_out; + u32 sacked_out; + u16 tcp_header_len; + u8 scaling_ratio; + u8 chrono_type: 2; + u8 repair: 1; + u8 tcp_usec_ts: 1; + u8 is_sack_reneg: 1; + u8 is_cwnd_limited: 1; + __u8 __cacheline_group_end__tcp_sock_read_txrx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_rx[0]; + u32 copied_seq; + u32 rcv_tstamp; + u32 snd_wl1; + u32 tlp_high_seq; + u32 rttvar_us; + u32 retrans_out; + u16 advmss; + u16 urg_data; + u32 lost; + struct minmax rtt_min; + struct rb_root out_of_order_queue; + u32 snd_ssthresh; + u8 recvmsg_inq: 1; + __u8 __cacheline_group_end__tcp_sock_read_rx[0]; + long: 64; + long: 64; + __u8 __cacheline_group_begin__tcp_sock_write_tx[0]; + u32 segs_out; + u32 data_segs_out; + u64 bytes_sent; + u32 snd_sml; + u32 chrono_start; + u32 chrono_stat[3]; + u32 write_seq; + u32 pushed_seq; + u32 lsndtime; + u32 mdev_us; + u32 rtt_seq; + u64 tcp_wstamp_ns; + struct list_head tsorted_sent_queue; + struct sk_buff *highest_sack; + u8 ecn_flags; + __u8 __cacheline_group_end__tcp_sock_write_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_write_txrx[0]; + __be32 pred_flags; + u64 tcp_clock_cache; + u64 tcp_mstamp; + u32 rcv_nxt; + u32 snd_nxt; + u32 snd_una; + u32 window_clamp; + u32 srtt_us; + u32 packets_out; + u32 snd_up; + u32 delivered; + u32 delivered_ce; + u32 app_limited; + u32 rcv_wnd; + struct tcp_options_received rx_opt; + u8 nonagle: 4; + u8 rate_app_limited: 1; + __u8 __cacheline_group_end__tcp_sock_write_txrx[0]; + long: 0; + __u8 __cacheline_group_begin__tcp_sock_write_rx[0]; + u64 bytes_received; + u32 segs_in; + u32 data_segs_in; + u32 rcv_wup; + u32 max_packets_out; + u32 cwnd_usage_seq; + u32 rate_delivered; + u32 rate_interval_us; + u32 rcv_rtt_last_tsecr; + u64 first_tx_mstamp; + u64 delivered_mstamp; + u64 bytes_acked; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + __u8 __cacheline_group_end__tcp_sock_write_rx[0]; + u32 dsack_dups; + u32 compressed_ack_rcv_nxt; + struct list_head tsq_node; + struct tcp_rack rack; + u8 compressed_ack; + u8 dup_ack_counter: 2; + u8 tlp_retrans: 1; + u8 unused: 5; + u8 thin_lto: 1; + u8 fastopen_connect: 1; + u8 fastopen_no_cookie: 1; + u8 fastopen_client_fail: 2; + u8 frto: 1; + u8 repair_queue; + u8 save_syn: 2; + u8 syn_data: 1; + u8 syn_fastopen: 1; + u8 syn_fastopen_exp: 1; + u8 syn_fastopen_ch: 1; + u8 syn_data_acked: 1; + u8 keepalive_probes; + u32 tcp_tx_delay; + u32 mdev_max_us; + u32 reord_seen; + u32 snd_cwnd_cnt; + u32 snd_cwnd_clamp; + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; + u32 prr_delivered; + u32 last_oow_ack_time; + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + struct sk_buff *ooo_last_skb; + struct tcp_sack_block duplicate_sack[1]; + struct tcp_sack_block selective_acks[4]; + struct tcp_sack_block recv_sack_cache[4]; + int lost_cnt_hint; + u32 prior_ssthresh; + u32 high_seq; + u32 retrans_stamp; + u32 undo_marker; + int undo_retrans; + u64 bytes_retrans; + u32 total_retrans; + u32 rto_stamp; + u16 total_rto; + u16 total_rto_recoveries; + u32 total_rto_time; + u32 urg_seq; + unsigned int keepalive_time; + unsigned int keepalive_intvl; + int linger2; + u8 bpf_sock_ops_cb_flags; + u8 bpf_chg_cc_inprogress: 1; + u16 timeout_rehash; + u32 rcv_ooopack; + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 plb_rehash; + u32 mtu_info; + struct tcp_fastopen_request *fastopen_req; + struct request_sock *fastopen_rsk; + struct saved_syn *saved_syn; + long: 64; +}; + +struct tcp_fastopen_request { + struct tcp_fastopen_cookie cookie; + struct msghdr *data; + size_t size; + int copied; + struct ubuf_info *uarg; +}; + +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +}; + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; +}; + +struct ip6_sf_socklist; + +struct ipv6_mc_socklist { + struct in6_addr addr; + int ifindex; + unsigned int sfmode; + struct ipv6_mc_socklist *next; + struct ip6_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ipv6_ac_socklist { + struct in6_addr acl_addr; + int acl_ifindex; + struct ipv6_ac_socklist *acl_next; +}; + +struct ip6_flowlabel; + +struct ipv6_fl_socklist { + struct ipv6_fl_socklist *next; + struct ip6_flowlabel *fl; + struct callback_head rcu; +}; + +struct ip6_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + struct in6_addr sl_addr[0]; +}; + +struct ip6_flowlabel { + struct ip6_flowlabel *next; + __be32 label; + atomic_t users; + struct in6_addr dst; + struct ipv6_txoptions *opt; + long unsigned int linger; + struct callback_head rcu; + u8 share; + union { + struct pid *pid; + kuid_t uid; + } owner; + long unsigned int lastuse; + long unsigned int expires; + struct net *fl_net; +}; + +struct inet_ehash_bucket { + struct hlist_nulls_head chain; +}; + +struct inet_bind_hashbucket { + spinlock_t lock; + struct hlist_head chain; +}; + +struct inet_listen_hashbucket { + spinlock_t lock; + struct hlist_nulls_head nulls_head; +}; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[0]; +}; + +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; + u32 in_flight; +}; + +struct rate_sample { + u64 prior_mstamp; + u32 prior_delivered; + u32 prior_delivered_ce; + s32 delivered; + s32 delivered_ce; + long int interval_us; + u32 snd_interval_us; + u32 rcv_interval_us; + long int rtt_us; + int losses; + u32 acked_sacked; + u32 prior_in_flight; + u32 last_end_seq; + bool is_app_limited; + bool is_retrans; + bool is_ack_delayed; +}; + +struct tcp_plb_state { + u8 consec_cong_rounds: 5; + u8 unused: 3; + u32 pause_until; +}; + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED = 0, + BRNF_PROTO_8021Q = 1, + BRNF_PROTO_PPPOE = 2, + } orig_proto: 8; + u8 pkt_otherhost: 1; + u8 in_prerouting: 1; + u8 bridged_dnat: 1; + u8 sabotage_in_done: 1; + __u16 frag_max_size; + int physinif; + struct net_device *physoutdev; + union { + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + char neigh_header[8]; + }; +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_NUM = 1, +}; + +enum nf_hook_ops_type { + NF_HOOK_OP_UNDEFINED = 0, + NF_HOOK_OP_NF_TABLES = 1, + NF_HOOK_OP_BPF = 2, +}; + +struct nf_hook_ops { + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u8 pf; + enum nf_hook_ops_type hook_ops_type: 8; + unsigned int hooknum; + int priority; +}; + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +struct nf_defrag_hook { + struct module *owner; + int (*enable)(struct net *); + void (*disable)(struct net *); +}; + +enum { + TCPF_ESTABLISHED = 2, + TCPF_SYN_SENT = 4, + TCPF_SYN_RECV = 8, + TCPF_FIN_WAIT1 = 16, + TCPF_FIN_WAIT2 = 32, + TCPF_TIME_WAIT = 64, + TCPF_CLOSE = 128, + TCPF_CLOSE_WAIT = 256, + TCPF_LAST_ACK = 512, + TCPF_LISTEN = 1024, + TCPF_CLOSING = 2048, + TCPF_NEW_SYN_RECV = 4096, + TCPF_BOUND_INACTIVE = 8192, +}; + +enum { + INET_FLAGS_PKTINFO = 0, + INET_FLAGS_TTL = 1, + INET_FLAGS_TOS = 2, + INET_FLAGS_RECVOPTS = 3, + INET_FLAGS_RETOPTS = 4, + INET_FLAGS_PASSSEC = 5, + INET_FLAGS_ORIGDSTADDR = 6, + INET_FLAGS_CHECKSUM = 7, + INET_FLAGS_RECVFRAGSIZE = 8, + INET_FLAGS_RECVERR = 9, + INET_FLAGS_RECVERR_RFC4884 = 10, + INET_FLAGS_FREEBIND = 11, + INET_FLAGS_HDRINCL = 12, + INET_FLAGS_MC_LOOP = 13, + INET_FLAGS_MC_ALL = 14, + INET_FLAGS_TRANSPARENT = 15, + INET_FLAGS_IS_ICSK = 16, + INET_FLAGS_NODEFRAG = 17, + INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, + INET_FLAGS_DEFER_CONNECT = 19, + INET_FLAGS_MC6_LOOP = 20, + INET_FLAGS_RECVERR6_RFC4884 = 21, + INET_FLAGS_MC6_ALL = 22, + INET_FLAGS_AUTOFLOWLABEL_SET = 23, + INET_FLAGS_AUTOFLOWLABEL = 24, + INET_FLAGS_DONTFRAG = 25, + INET_FLAGS_RECVERR6 = 26, + INET_FLAGS_REPFLOW = 27, + INET_FLAGS_RTALERT_ISOLATE = 28, + INET_FLAGS_SNDFLOW = 29, + INET_FLAGS_RTALERT = 30, +}; + +enum ip_defrag_users { + IP_DEFRAG_LOCAL_DELIVER = 0, + IP_DEFRAG_CALL_RA_CHAIN = 1, + IP_DEFRAG_CONNTRACK_IN = 2, + __IP_DEFRAG_CONNTRACK_IN_END = 65537, + IP_DEFRAG_CONNTRACK_OUT = 65538, + __IP_DEFRAG_CONNTRACK_OUT_END = 131073, + IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, + IP_DEFRAG_VS_IN = 196610, + IP_DEFRAG_VS_OUT = 196611, + IP_DEFRAG_VS_FWD = 196612, + IP_DEFRAG_AF_PACKET = 196613, + IP_DEFRAG_MACVLAN = 196614, +}; + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = -2147483648, + NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP_PRI_CONNTRACK_DEFRAG = -400, + NF_IP_PRI_RAW = -300, + NF_IP_PRI_SELINUX_FIRST = -225, + NF_IP_PRI_CONNTRACK = -200, + NF_IP_PRI_MANGLE = -150, + NF_IP_PRI_NAT_DST = -100, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_HELPER = 300, + NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, + NF_IP_PRI_LAST = 2147483647, +}; + +enum wq_flags { + WQ_BH = 1, + WQ_UNBOUND = 2, + WQ_FREEZABLE = 4, + WQ_MEM_RECLAIM = 8, + WQ_HIGHPRI = 16, + WQ_CPU_INTENSIVE = 32, + WQ_SYSFS = 64, + WQ_POWER_EFFICIENT = 128, + __WQ_DESTROYING = 32768, + __WQ_DRAINING = 65536, + __WQ_ORDERED = 131072, + __WQ_LEGACY = 262144, + __WQ_BH_ALLOWS = 17, +}; + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + char __data[126]; + }; + void *__align; + }; +}; + +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +struct sockaddr_in6 { + short unsigned int sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + +struct icmpv6_echo { + __be16 identifier; + __be16 sequence; +}; + +struct icmpv6_nd_advt { + __u32 router: 1; + __u32 solicited: 1; + __u32 override: 1; + __u32 reserved: 29; +}; + +struct icmpv6_nd_ra { + __u8 hop_limit; + __u8 managed: 1; + __u8 other: 1; + __u8 home_agent: 1; + __u8 router_pref: 2; + __u8 reserved: 3; + __be16 rt_lifetime; +}; + +struct icmp6hdr { + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + struct icmpv6_echo u_echo; + struct icmpv6_nd_advt u_nd_advt; + struct icmpv6_nd_ra u_nd_ra; + } icmp6_dataun; +}; + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN = 2, + NETDEV_REBOOT = 3, + NETDEV_CHANGE = 4, + NETDEV_REGISTER = 5, + NETDEV_UNREGISTER = 6, + NETDEV_CHANGEMTU = 7, + NETDEV_CHANGEADDR = 8, + NETDEV_PRE_CHANGEADDR = 9, + NETDEV_GOING_DOWN = 10, + NETDEV_CHANGENAME = 11, + NETDEV_FEAT_CHANGE = 12, + NETDEV_BONDING_FAILOVER = 13, + NETDEV_PRE_UP = 14, + NETDEV_PRE_TYPE_CHANGE = 15, + NETDEV_POST_TYPE_CHANGE = 16, + NETDEV_POST_INIT = 17, + NETDEV_PRE_UNINIT = 18, + NETDEV_RELEASE = 19, + NETDEV_NOTIFY_PEERS = 20, + NETDEV_JOIN = 21, + NETDEV_CHANGEUPPER = 22, + NETDEV_RESEND_IGMP = 23, + NETDEV_PRECHANGEMTU = 24, + NETDEV_CHANGEINFODATA = 25, + NETDEV_BONDING_INFO = 26, + NETDEV_PRECHANGEUPPER = 27, + NETDEV_CHANGELOWERSTATE = 28, + NETDEV_UDP_TUNNEL_PUSH_INFO = 29, + NETDEV_UDP_TUNNEL_DROP_INFO = 30, + NETDEV_CHANGE_TX_QUEUE_LEN = 31, + NETDEV_CVLAN_FILTER_PUSH_INFO = 32, + NETDEV_CVLAN_FILTER_DROP_INFO = 33, + NETDEV_SVLAN_FILTER_PUSH_INFO = 34, + NETDEV_SVLAN_FILTER_DROP_INFO = 35, + NETDEV_OFFLOAD_XSTATS_ENABLE = 36, + NETDEV_OFFLOAD_XSTATS_DISABLE = 37, + NETDEV_OFFLOAD_XSTATS_REPORT_USED = 38, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA = 39, + NETDEV_XDP_FEAT_CHANGE = 40, +}; + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; + __u16 frag_max_size; + __u16 srhoff; +}; + +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; + __u8 mld2q_resv2: 4; + __u8 mld2q_suppress: 1; + __u8 mld2q_qrv: 3; + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +struct igmp6_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct igmp6_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; + struct ifmcaddr6 *im; +}; + +enum { + NETIF_F_SG_BIT = 0, + NETIF_F_IP_CSUM_BIT = 1, + __UNUSED_NETIF_F_1 = 2, + NETIF_F_HW_CSUM_BIT = 3, + NETIF_F_IPV6_CSUM_BIT = 4, + NETIF_F_HIGHDMA_BIT = 5, + NETIF_F_FRAGLIST_BIT = 6, + NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, + NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, + NETIF_F_VLAN_CHALLENGED_BIT = 10, + NETIF_F_GSO_BIT = 11, + __UNUSED_NETIF_F_12 = 12, + __UNUSED_NETIF_F_13 = 13, + NETIF_F_GRO_BIT = 14, + NETIF_F_LRO_BIT = 15, + NETIF_F_GSO_SHIFT = 16, + NETIF_F_TSO_BIT = 16, + NETIF_F_GSO_ROBUST_BIT = 17, + NETIF_F_TSO_ECN_BIT = 18, + NETIF_F_TSO_MANGLEID_BIT = 19, + NETIF_F_TSO6_BIT = 20, + NETIF_F_FSO_BIT = 21, + NETIF_F_GSO_GRE_BIT = 22, + NETIF_F_GSO_GRE_CSUM_BIT = 23, + NETIF_F_GSO_IPXIP4_BIT = 24, + NETIF_F_GSO_IPXIP6_BIT = 25, + NETIF_F_GSO_UDP_TUNNEL_BIT = 26, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, + NETIF_F_GSO_PARTIAL_BIT = 28, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, + NETIF_F_GSO_SCTP_BIT = 30, + NETIF_F_GSO_ESP_BIT = 31, + NETIF_F_GSO_UDP_BIT = 32, + NETIF_F_GSO_UDP_L4_BIT = 33, + NETIF_F_GSO_FRAGLIST_BIT = 34, + NETIF_F_GSO_LAST = 34, + NETIF_F_FCOE_CRC_BIT = 35, + NETIF_F_SCTP_CRC_BIT = 36, + __UNUSED_NETIF_F_37 = 37, + NETIF_F_NTUPLE_BIT = 38, + NETIF_F_RXHASH_BIT = 39, + NETIF_F_RXCSUM_BIT = 40, + NETIF_F_NOCACHE_COPY_BIT = 41, + NETIF_F_LOOPBACK_BIT = 42, + NETIF_F_RXFCS_BIT = 43, + NETIF_F_RXALL_BIT = 44, + NETIF_F_HW_VLAN_STAG_TX_BIT = 45, + NETIF_F_HW_VLAN_STAG_RX_BIT = 46, + NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, + NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, + NETIF_F_HW_TC_BIT = 49, + NETIF_F_HW_ESP_BIT = 50, + NETIF_F_HW_ESP_TX_CSUM_BIT = 51, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, + NETIF_F_HW_TLS_TX_BIT = 53, + NETIF_F_HW_TLS_RX_BIT = 54, + NETIF_F_GRO_HW_BIT = 55, + NETIF_F_HW_TLS_RECORD_BIT = 56, + NETIF_F_GRO_FRAGLIST_BIT = 57, + NETIF_F_HW_MACSEC_BIT = 58, + NETIF_F_GRO_UDP_FWD_BIT = 59, + NETIF_F_HW_HSR_TAG_INS_BIT = 60, + NETIF_F_HW_HSR_TAG_RM_BIT = 61, + NETIF_F_HW_HSR_FWD_BIT = 62, + NETIF_F_HW_HSR_DUP_BIT = 63, + NETDEV_FEATURE_COUNT = 64, +}; + +struct devlink; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET = 0, + DEVLINK_PORT_TYPE_AUTO = 1, + DEVLINK_PORT_TYPE_ETH = 2, + DEVLINK_PORT_TYPE_IB = 3, +}; + +struct ib_device; + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, + DEVLINK_PORT_FLAVOUR_CPU = 1, + DEVLINK_PORT_FLAVOUR_DSA = 2, + DEVLINK_PORT_FLAVOUR_PCI_PF = 3, + DEVLINK_PORT_FLAVOUR_PCI_VF = 4, + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, + DEVLINK_PORT_FLAVOUR_UNUSED = 6, + DEVLINK_PORT_FLAVOUR_PCI_SF = 7, +}; + +struct devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u32 controller; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_pci_vf_attrs { + u32 controller; + u16 pf; + u16 vf; + u8 external: 1; +}; + +struct devlink_port_pci_sf_attrs { + u32 controller; + u32 sf; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_attrs { + u8 split: 1; + u8 splittable: 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + struct devlink_port_pci_sf_attrs pci_sf; + }; +}; + +struct devlink_linecard; + +struct devlink_port_ops; + +struct devlink_rate; + +struct devlink_port { + struct list_head list; + struct list_head region_list; + struct devlink *devlink; + const struct devlink_port_ops *ops; + unsigned int index; + spinlock_t type_lock; + enum devlink_port_type type; + enum devlink_port_type desired_type; + union { + struct { + struct net_device *netdev; + int ifindex; + char ifname[16]; + } type_eth; + struct { + struct ib_device *ibdev; + } type_ib; + }; + struct devlink_port_attrs attrs; + u8 attrs_set: 1; + u8 switch_port: 1; + u8 registered: 1; + u8 initialized: 1; + struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct devlink_rate *devlink_rate; + struct devlink_linecard *linecard; + u32 rel_index; +}; + +enum { + RTM_BASE = 16, + RTM_NEWLINK = 16, + RTM_DELLINK = 17, + RTM_GETLINK = 18, + RTM_SETLINK = 19, + RTM_NEWADDR = 20, + RTM_DELADDR = 21, + RTM_GETADDR = 22, + RTM_NEWROUTE = 24, + RTM_DELROUTE = 25, + RTM_GETROUTE = 26, + RTM_NEWNEIGH = 28, + RTM_DELNEIGH = 29, + RTM_GETNEIGH = 30, + RTM_NEWRULE = 32, + RTM_DELRULE = 33, + RTM_GETRULE = 34, + RTM_NEWQDISC = 36, + RTM_DELQDISC = 37, + RTM_GETQDISC = 38, + RTM_NEWTCLASS = 40, + RTM_DELTCLASS = 41, + RTM_GETTCLASS = 42, + RTM_NEWTFILTER = 44, + RTM_DELTFILTER = 45, + RTM_GETTFILTER = 46, + RTM_NEWACTION = 48, + RTM_DELACTION = 49, + RTM_GETACTION = 50, + RTM_NEWPREFIX = 52, + RTM_GETMULTICAST = 58, + RTM_GETANYCAST = 62, + RTM_NEWNEIGHTBL = 64, + RTM_GETNEIGHTBL = 66, + RTM_SETNEIGHTBL = 67, + RTM_NEWNDUSEROPT = 68, + RTM_NEWADDRLABEL = 72, + RTM_DELADDRLABEL = 73, + RTM_GETADDRLABEL = 74, + RTM_GETDCB = 78, + RTM_SETDCB = 79, + RTM_NEWNETCONF = 80, + RTM_DELNETCONF = 81, + RTM_GETNETCONF = 82, + RTM_NEWMDB = 84, + RTM_DELMDB = 85, + RTM_GETMDB = 86, + RTM_NEWNSID = 88, + RTM_DELNSID = 89, + RTM_GETNSID = 90, + RTM_NEWSTATS = 92, + RTM_GETSTATS = 94, + RTM_SETSTATS = 95, + RTM_NEWCACHEREPORT = 96, + RTM_NEWCHAIN = 100, + RTM_DELCHAIN = 101, + RTM_GETCHAIN = 102, + RTM_NEWNEXTHOP = 104, + RTM_DELNEXTHOP = 105, + RTM_GETNEXTHOP = 106, + RTM_NEWLINKPROP = 108, + RTM_DELLINKPROP = 109, + RTM_GETLINKPROP = 110, + RTM_NEWVLAN = 112, + RTM_DELVLAN = 113, + RTM_GETVLAN = 114, + RTM_NEWNEXTHOPBUCKET = 116, + RTM_DELNEXTHOPBUCKET = 117, + RTM_GETNEXTHOPBUCKET = 118, + RTM_NEWTUNNEL = 120, + RTM_DELTUNNEL = 121, + RTM_GETTUNNEL = 122, + __RTM_MAX = 123, +}; + +enum devlink_rate_type { + DEVLINK_RATE_TYPE_LEAF = 0, + DEVLINK_RATE_TYPE_NODE = 1, +}; + +enum devlink_port_fn_state { + DEVLINK_PORT_FN_STATE_INACTIVE = 0, + DEVLINK_PORT_FN_STATE_ACTIVE = 1, +}; + +enum devlink_port_fn_opstate { + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, +}; + +struct devlink_rate { + struct list_head list; + enum devlink_rate_type type; + struct devlink *devlink; + void *priv; + u64 tx_share; + u64 tx_max; + struct devlink_rate *parent; + union { + struct devlink_port *devlink_port; + struct { + char *name; + refcount_t refcnt; + }; + }; + u32 tx_priority; + u32 tx_weight; +}; + +struct devlink_port_ops { + int (*port_split)(struct devlink *, struct devlink_port *, unsigned int, struct netlink_ext_ack *); + int (*port_unsplit)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_type_set)(struct devlink_port *, enum devlink_port_type); + int (*port_del)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_get)(struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_set)(struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); + int (*port_fn_roce_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_roce_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_migratable_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_migratable_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_state_get)(struct devlink_port *, enum devlink_port_fn_state *, enum devlink_port_fn_opstate *, struct netlink_ext_ack *); + int (*port_fn_state_set)(struct devlink_port *, enum devlink_port_fn_state, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_get)(struct devlink_port *, u32 *, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_set)(struct devlink_port *, u32, struct netlink_ext_ack *); +}; + +struct bridge_stp_xstats { + __u64 transition_blk; + __u64 transition_fwd; + __u64 rx_bpdu; + __u64 tx_bpdu; + __u64 rx_tcn; + __u64 tx_tcn; +}; + +enum { + BR_MCAST_DIR_RX = 0, + BR_MCAST_DIR_TX = 1, + BR_MCAST_DIR_SIZE = 2, +}; + +struct br_mcast_stats { + __u64 igmp_v1queries[2]; + __u64 igmp_v2queries[2]; + __u64 igmp_v3queries[2]; + __u64 igmp_leaves[2]; + __u64 igmp_v1reports[2]; + __u64 igmp_v2reports[2]; + __u64 igmp_v3reports[2]; + __u64 igmp_parse_errors; + __u64 mld_v1queries[2]; + __u64 mld_v2queries[2]; + __u64 mld_leaves[2]; + __u64 mld_v1reports[2]; + __u64 mld_v2reports[2]; + __u64 mld_parse_errors; + __u64 mcast_bytes[2]; + __u64 mcast_packets[2]; +}; + +struct br_ip { + union { + __be32 ip4; + struct in6_addr ip6; + } src; + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } dst; + __be16 proto; + __u16 vid; +}; + +struct tcf_walker { + int stop; + int skip; + int count; + bool nonempty; + long unsigned int cookie; + int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); +}; + +struct tcf_exts { + int action; + int police; +}; + +struct bridge_id { + unsigned char prio[2]; + unsigned char addr[6]; +}; + +typedef struct bridge_id bridge_id; + +typedef __u16 port_id; + +struct bridge_mcast_own_query { + struct timer_list timer; + u32 startup_sent; +}; + +struct bridge_mcast_other_query { + struct timer_list timer; + struct timer_list delay_timer; +}; + +struct bridge_mcast_querier { + struct br_ip addr; + int port_ifidx; + seqcount_spinlock_t seq; +}; + +struct bridge_mcast_stats { + struct br_mcast_stats mstats; + struct u64_stats_sync syncp; +}; + +struct net_bridge; + +struct net_bridge_vlan; + +struct net_bridge_mcast { + struct net_bridge *br; + struct net_bridge_vlan *vlan; + u32 multicast_last_member_count; + u32 multicast_startup_query_count; + u8 multicast_querier; + u8 multicast_igmp_version; + u8 multicast_router; + u8 multicast_mld_version; + long unsigned int multicast_last_member_interval; + long unsigned int multicast_membership_interval; + long unsigned int multicast_querier_interval; + long unsigned int multicast_query_interval; + long unsigned int multicast_query_response_interval; + long unsigned int multicast_startup_query_interval; + struct hlist_head ip4_mc_router_list; + struct timer_list ip4_mc_router_timer; + struct bridge_mcast_other_query ip4_other_query; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_querier ip4_querier; + struct hlist_head ip6_mc_router_list; + struct timer_list ip6_mc_router_timer; + struct bridge_mcast_other_query ip6_other_query; + struct bridge_mcast_own_query ip6_own_query; + struct bridge_mcast_querier ip6_querier; +}; + +struct net_bridge { + spinlock_t lock; + spinlock_t hash_lock; + struct hlist_head frame_type_list; + struct net_device *dev; + long unsigned int options; + struct rhashtable fdb_hash_tbl; + struct list_head port_list; + union { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; + u16 group_fwd_mask; + u16 group_fwd_mask_required; + bridge_id designated_root; + bridge_id bridge_id; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; + long unsigned int max_age; + long unsigned int hello_time; + long unsigned int forward_delay; + long unsigned int ageing_time; + long unsigned int bridge_max_age; + long unsigned int bridge_hello_time; + long unsigned int bridge_forward_delay; + long unsigned int bridge_ageing_time; + u32 root_path_cost; + u8 group_addr[6]; + enum { + BR_NO_STP = 0, + BR_KERNEL_STP = 1, + BR_USER_STP = 2, + } stp_enabled; + struct net_bridge_mcast multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 hash_max; + spinlock_t multicast_lock; + struct rhashtable mdb_hash_tbl; + struct rhashtable sg_port_tbl; + struct hlist_head mcast_gc_list; + struct hlist_head mdb_list; + struct work_struct mcast_gc_work; + struct timer_list hello_timer; + struct timer_list tcn_timer; + struct timer_list topology_change_timer; + struct delayed_work gc_work; + struct kobject *ifobj; + u32 auto_cnt; + atomic_t fdb_n_learned; + u32 fdb_max_learned; + struct hlist_head fdb_list; +}; + +struct net_bridge_port; + +struct net_bridge_mcast_port { + struct net_bridge_port *port; + struct net_bridge_vlan *vlan; + struct bridge_mcast_own_query ip4_own_query; + struct timer_list ip4_mc_router_timer; + struct hlist_node ip4_rlist; + struct bridge_mcast_own_query ip6_own_query; + struct timer_list ip6_mc_router_timer; + struct hlist_node ip6_rlist; + unsigned char multicast_router; + u32 mdb_n_entries; + u32 mdb_max_entries; +}; + +struct net_bridge_port { + struct net_bridge *br; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + long unsigned int flags; + struct net_bridge_port *backup_port; + u32 backup_nhid; + u8 priority; + u8 state; + u16 port_no; + unsigned char topology_change_ack; + unsigned char config_pending; + port_id port_id; + port_id designated_port; + bridge_id designated_root; + bridge_id designated_bridge; + u32 path_cost; + u32 designated_cost; + long unsigned int designated_age; + struct timer_list forward_delay_timer; + struct timer_list hold_timer; + struct timer_list message_age_timer; + struct kobject kobj; + struct callback_head rcu; + struct net_bridge_mcast_port multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 multicast_eht_hosts_limit; + u32 multicast_eht_hosts_cnt; + struct hlist_head mglist; + char sysfs_name[16]; + u16 group_fwd_mask; + u16 backup_redirected_cnt; + struct bridge_stp_xstats stp_xstats; +}; + +struct metadata_dst; + +struct br_tunnel_info { + __be64 tunnel_id; + struct metadata_dst *tunnel_dst; +}; + +struct net_bridge_vlan { + struct rhash_head vnode; + struct rhash_head tnode; + u16 vid; + u16 flags; + u16 priv_flags; + u8 state; + struct pcpu_sw_netstats *stats; + union { + struct net_bridge *br; + struct net_bridge_port *port; + }; + union { + refcount_t refcnt; + struct net_bridge_vlan *brvlan; + }; + struct br_tunnel_info tinfo; + union { + struct net_bridge_mcast br_mcast_ctx; + struct net_bridge_mcast_port port_mcast_ctx; + }; + u16 msti; + struct list_head vlist; + struct callback_head rcu; +}; + +enum net_bridge_opts { + BROPT_VLAN_ENABLED = 0, + BROPT_VLAN_STATS_ENABLED = 1, + BROPT_NF_CALL_IPTABLES = 2, + BROPT_NF_CALL_IP6TABLES = 3, + BROPT_NF_CALL_ARPTABLES = 4, + BROPT_GROUP_ADDR_SET = 5, + BROPT_MULTICAST_ENABLED = 6, + BROPT_MULTICAST_QUERY_USE_IFADDR = 7, + BROPT_MULTICAST_STATS_ENABLED = 8, + BROPT_HAS_IPV6_ADDR = 9, + BROPT_NEIGH_SUPPRESS_ENABLED = 10, + BROPT_MTU_SET_BY_USER = 11, + BROPT_VLAN_STATS_PER_PORT = 12, + BROPT_NO_LL_LEARN = 13, + BROPT_VLAN_BRIDGE_BINDING = 14, + BROPT_MCAST_VLAN_SNOOPING_ENABLED = 15, + BROPT_MST_ENABLED = 16, +}; + +typedef struct { + long unsigned int key[2]; +} hsiphash_key_t; + +struct io_tlb_area; + +struct io_tlb_slot; + +struct io_tlb_pool { + phys_addr_t start; + phys_addr_t end; + void *vaddr; + long unsigned int nslabs; + bool late_alloc; + unsigned int nareas; + unsigned int area_nslabs; + struct io_tlb_area *areas; + struct io_tlb_slot *slots; +}; + +struct io_tlb_mem { + struct io_tlb_pool defpool; + long unsigned int nslabs; + struct dentry *debugfs; + bool force_bounce; + bool for_alloc; + atomic_long_t total_used; + atomic_long_t used_hiwater; + atomic_long_t transient_nslabs; +}; + +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node *parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void *slots[64]; + union { + long unsigned int tags[3]; + long unsigned int marks[3]; + }; +}; + +typedef void (*xa_update_node_t)(struct xa_node *); + +struct xa_state { + struct xarray *xa; + long unsigned int xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; + struct list_lru *xa_lru; +}; + +struct arm_smccc_res { + long unsigned int a0; + long unsigned int a1; + long unsigned int a2; + long unsigned int a3; +}; + +typedef long unsigned int hva_t; + +typedef u64 hfn_t; + +typedef hfn_t kvm_pfn_t; + +enum __kvm_host_smccc_func { + __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = 1, + __KVM_HOST_SMCCC_FUNC___pkvm_init = 2, + __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping = 3, + __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector = 4, + __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs = 5, + __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs = 6, + __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config = 7, + __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize = 8, + __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp = 9, + __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp = 10, + __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc = 11, + __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run = 12, + __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context = 13, + __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa = 14, + __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh = 15, + __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid = 16, + __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range = 17, + __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context = 18, + __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff = 19, + __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs = 20, + __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs = 21, + __KVM_HOST_SMCCC_FUNC___pkvm_init_vm = 22, + __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu = 23, + __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm = 24, +}; + +union kvm_mmu_notifier_arg { + long unsigned int attributes; +}; + +struct kvm_gfn_range { + struct kvm_memory_slot *slot; + gfn_t start; + gfn_t end; + union kvm_mmu_notifier_arg arg; + bool may_block; +}; + +enum kvm_mr_change { + KVM_MR_CREATE = 0, + KVM_MR_DELETE = 1, + KVM_MR_MOVE = 2, + KVM_MR_FLAGS_ONLY = 3, +}; + +struct hstate {}; + +enum kvm_pgtable_walk_flags { + KVM_PGTABLE_WALK_LEAF = 1, + KVM_PGTABLE_WALK_TABLE_PRE = 2, + KVM_PGTABLE_WALK_TABLE_POST = 4, + KVM_PGTABLE_WALK_SHARED = 8, + KVM_PGTABLE_WALK_HANDLE_FAULT = 16, + KVM_PGTABLE_WALK_SKIP_BBM_TLBI = 32, + KVM_PGTABLE_WALK_SKIP_CMO = 64, +}; + +struct kvm_s2_trans { + phys_addr_t output; + long unsigned int block_size; + bool writable; + bool readable; + int level; + u32 esr; + u64 desc; +}; + +struct hyp_shared_pfn { + u64 pfn; + int count; + struct rb_node node; +}; + +struct sysrq_key_op { + void (* const handler)(u8); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; +}; + +enum cons_flags { + CON_PRINTBUFFER = 1, + CON_CONSDEV = 2, + CON_ENABLED = 4, + CON_BOOT = 8, + CON_ANYTIME = 16, + CON_BRL = 32, + CON_EXTENDED = 64, + CON_SUSPENDED = 128, + CON_NBCON = 256, +}; + +struct nbcon_state { + union { + unsigned int atom; + struct { + unsigned int prio: 2; + unsigned int req_prio: 2; + unsigned int unsafe: 1; + unsigned int unsafe_takeover: 1; + unsigned int cpu: 24; + }; + }; +}; + +enum nbcon_prio { + NBCON_PRIO_NONE = 0, + NBCON_PRIO_NORMAL = 1, + NBCON_PRIO_EMERGENCY = 2, + NBCON_PRIO_PANIC = 3, + NBCON_PRIO_MAX = 4, +}; + +struct console; + +struct printk_buffers; + +struct nbcon_context { + struct console *console; + unsigned int spinwait_max_us; + enum nbcon_prio prio; + unsigned int allow_unsafe_takeover: 1; + unsigned int backlog: 1; + struct printk_buffers *pbufs; + u64 seq; +}; + +struct nbcon_write_context; + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned int); + int (*read)(struct console *, char *, unsigned int); + struct tty_driver * (*device)(struct console *, int *); + void (*unblank)(); + int (*setup)(struct console *, char *); + int (*exit)(struct console *); + int (*match)(struct console *, char *, int, char *); + short int flags; + short int index; + int cflag; + uint ispeed; + uint ospeed; + u64 seq; + long unsigned int dropped; + void *data; + struct hlist_node node; + void (*write_atomic)(struct console *, struct nbcon_write_context *); + void (*write_thread)(struct console *, struct nbcon_write_context *); + void (*device_lock)(struct console *, long unsigned int *); + void (*device_unlock)(struct console *, long unsigned int); + atomic_t nbcon_state; + atomic_long_t nbcon_seq; + struct nbcon_context nbcon_device_ctxt; + atomic_long_t nbcon_prev_seq; + struct printk_buffers *pbufs; + struct task_struct *kthread; + struct rcuwait rcuwait; + struct irq_work irq_work; +}; + +struct printk_buffers { + char outbuf[2048]; + char scratchbuf[1024]; +}; + +struct nbcon_write_context { + struct nbcon_context ctxt; + char *outbuf; + unsigned int len; + bool unsafe_takeover; +}; + +struct prb_desc; + +struct printk_info; + +struct prb_desc_ring { + unsigned int count_bits; + struct prb_desc *descs; + struct printk_info *infos; + atomic_long_t head_id; + atomic_long_t tail_id; + atomic_long_t last_finalized_seq; +}; + +struct prb_data_ring { + unsigned int size_bits; + char *data; + atomic_long_t head_lpos; + atomic_long_t tail_lpos; +}; + +struct printk_ringbuffer { + struct prb_desc_ring desc_ring; + struct prb_data_ring text_data_ring; + atomic_long_t fail; +}; + +struct console_flush_type { + bool nbcon_atomic; + bool nbcon_offload; + bool legacy_direct; + bool legacy_offload; +}; + +struct printk_message { + struct printk_buffers *pbufs; + unsigned int outbuf_len; + u64 seq; + long unsigned int dropped; +}; + +struct dev_printk_info { + char subsystem[16]; + char device[48]; +}; + +struct printk_info { + u64 seq; + u64 ts_nsec; + u16 text_len; + u8 facility; + u8 flags: 5; + u8 level: 3; + u32 caller_id; + struct dev_printk_info dev_info; +}; + +struct printk_record { + struct printk_info *info; + char *text_buf; + unsigned int text_buf_size; +}; + +struct prb_data_blk_lpos { + long unsigned int begin; + long unsigned int next; +}; + +struct prb_desc { + atomic_long_t state_var; + struct prb_data_blk_lpos text_blk_lpos; +}; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, + IRQD_AFFINITY_ON_ACTIVATE = 268435456, + IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, + IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, +}; + +typedef bool (*stack_trace_consume_fn)(void *, long unsigned int); + +struct stacktrace_cookie { + long unsigned int *store; + unsigned int size; + unsigned int skip; + unsigned int len; +}; + +enum { + CSS_NO_REF = 1, + CSS_ONLINE = 2, + CSS_RELEASED = 4, + CSS_VISIBLE = 8, + CSS_DYING = 16, +}; + +enum { + CFTYPE_ONLY_ON_ROOT = 1, + CFTYPE_NOT_ON_ROOT = 2, + CFTYPE_NS_DELEGATABLE = 4, + CFTYPE_NO_PREFIX = 8, + CFTYPE_WORLD_WRITABLE = 16, + CFTYPE_DEBUG = 32, + __CFTYPE_ONLY_ON_DFL = 65536, + __CFTYPE_NOT_ON_DFL = 131072, + __CFTYPE_ADDED = 262144, +}; + +typedef struct { + char *from; + char *to; +} substring_t; + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE = 0, + RDMACG_RESOURCE_HCA_OBJECT = 1, + RDMACG_RESOURCE_MAX = 2, +}; + +struct rdma_cgroup { + struct cgroup_subsys_state css; + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +enum rdmacg_file_type { + RDMACG_RESOURCE_TYPE_MAX = 0, + RDMACG_RESOURCE_TYPE_STAT = 1, +}; + +struct rdmacg_resource { + int max; + int usage; +}; + +struct rdmacg_resource_pool { + struct rdmacg_device *device; + struct rdmacg_resource resources[2]; + struct list_head cg_node; + struct list_head dev_node; + u64 usage_sum; + int num_max_cnt; +}; + +enum uts_proc { + UTS_PROC_ARCH = 0, + UTS_PROC_OSTYPE = 1, + UTS_PROC_OSRELEASE = 2, + UTS_PROC_VERSION = 3, + UTS_PROC_HOSTNAME = 4, + UTS_PROC_DOMAINNAME = 5, +}; + +enum { + TASK_COMM_LEN = 16, +}; + +enum ftrace_dump_mode { + DUMP_NONE = 0, + DUMP_ALL = 1, + DUMP_ORIG = 2, + DUMP_PARAM = 3, +}; + +enum { + TRACE_FTRACE_BIT = 0, + TRACE_FTRACE_NMI_BIT = 1, + TRACE_FTRACE_IRQ_BIT = 2, + TRACE_FTRACE_SIRQ_BIT = 3, + TRACE_FTRACE_TRANSITION_BIT = 4, + TRACE_INTERNAL_BIT = 5, + TRACE_INTERNAL_NMI_BIT = 6, + TRACE_INTERNAL_IRQ_BIT = 7, + TRACE_INTERNAL_SIRQ_BIT = 8, + TRACE_INTERNAL_TRANSITION_BIT = 9, + TRACE_BRANCH_BIT = 10, + TRACE_IRQ_BIT = 11, + TRACE_RECORD_RECURSION_BIT = 12, +}; + +enum { + TRACE_CTX_NMI = 0, + TRACE_CTX_IRQ = 1, + TRACE_CTX_SOFTIRQ = 2, + TRACE_CTX_NORMAL = 3, + TRACE_CTX_TRANSITION = 4, +}; + +struct __arch_ftrace_regs { + long unsigned int regs[9]; + long unsigned int direct_tramp; + long unsigned int fp; + long unsigned int lr; + long unsigned int sp; + long unsigned int pc; +}; + +enum { + FTRACE_OPS_FL_ENABLED = 1, + FTRACE_OPS_FL_DYNAMIC = 2, + FTRACE_OPS_FL_SAVE_REGS = 4, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8, + FTRACE_OPS_FL_RECURSION = 16, + FTRACE_OPS_FL_STUB = 32, + FTRACE_OPS_FL_INITIALIZED = 64, + FTRACE_OPS_FL_DELETED = 128, + FTRACE_OPS_FL_ADDING = 256, + FTRACE_OPS_FL_REMOVING = 512, + FTRACE_OPS_FL_MODIFYING = 1024, + FTRACE_OPS_FL_ALLOC_TRAMP = 2048, + FTRACE_OPS_FL_IPMODIFY = 4096, + FTRACE_OPS_FL_PID = 8192, + FTRACE_OPS_FL_RCU = 16384, + FTRACE_OPS_FL_TRACE_ARRAY = 32768, + FTRACE_OPS_FL_PERMANENT = 65536, + FTRACE_OPS_FL_DIRECT = 131072, + FTRACE_OPS_FL_SUBOP = 262144, +}; + +struct ftrace_hash { + long unsigned int size_bits; + struct hlist_head *buckets; + long unsigned int count; + long unsigned int flags; + struct callback_head rcu; +}; + +struct ftrace_graph_ent { + long unsigned int func; + int depth; +} __attribute__((packed)); + +struct ftrace_graph_ret { + long unsigned int func; + int depth; + unsigned int overrun; + long long unsigned int calltime; + long long unsigned int rettime; +}; + +struct fgraph_ops; + +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, struct fgraph_ops *); + +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, struct fgraph_ops *); + +struct fgraph_ops { + trace_func_graph_ent_t entryfunc; + trace_func_graph_ret_t retfunc; + struct ftrace_ops ops; + void *private; + trace_func_graph_ent_t saved_func; + int idx; +}; + +struct prog_entry; + +struct event_filter { + struct prog_entry *prog; + char *filter_string; +}; + +struct trace_array_cpu; + +struct array_buffer { + struct trace_array *tr; + struct trace_buffer *buffer; + struct trace_array_cpu *data; + u64 time_start; + int cpu; +}; + +struct trace_pid_list; + +struct trace_options; + +struct trace_func_repeats; + +struct trace_array { + struct list_head list; + char *name; + struct array_buffer array_buffer; + unsigned int mapped; + long unsigned int range_addr_start; + long unsigned int range_addr_size; + long int text_delta; + long int data_delta; + struct trace_pid_list *filtered_pids; + struct trace_pid_list *filtered_no_pids; + arch_spinlock_t max_lock; + int buffer_disabled; + int sys_refcount_enter; + int sys_refcount_exit; + struct trace_event_file *enter_syscall_files[467]; + struct trace_event_file *exit_syscall_files[467]; + int stop_count; + int clock_id; + int nr_topts; + bool clear_trace; + int buffer_percent; + unsigned int n_err_log_entries; + struct tracer *current_trace; + unsigned int trace_flags; + unsigned char trace_flags_index[32]; + unsigned int flags; + raw_spinlock_t start_lock; + const char *system_names; + struct list_head err_log; + struct dentry *dir; + struct dentry *options; + struct dentry *percpu_dir; + struct eventfs_inode *event_dir; + struct trace_options *topts; + struct list_head systems; + struct list_head events; + struct trace_event_file *trace_marker_file; + cpumask_var_t tracing_cpumask; + cpumask_var_t pipe_cpumask; + int ref; + int trace_ref; + struct ftrace_ops *ops; + struct trace_pid_list *function_pids; + struct trace_pid_list *function_no_pids; + struct fgraph_ops *gops; + struct list_head func_probes; + struct list_head mod_trace; + struct list_head mod_notrace; + int function_enabled; + int no_filter_buffering_ref; + struct list_head hist_vars; + struct trace_func_repeats *last_func_repeats; + bool ring_buffer_expanded; +}; + +struct tracer_flags; + +struct tracer { + const char *name; + int (*init)(struct trace_array *); + void (*reset)(struct trace_array *); + void (*start)(struct trace_array *); + void (*stop)(struct trace_array *); + int (*update_thresh)(struct trace_array *); + void (*open)(struct trace_iterator *); + void (*pipe_open)(struct trace_iterator *); + void (*close)(struct trace_iterator *); + void (*pipe_close)(struct trace_iterator *); + ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); + ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*print_header)(struct seq_file *); + enum print_line_t (*print_line)(struct trace_iterator *); + int (*set_flag)(struct trace_array *, u32, u32, int); + int (*flag_changed)(struct trace_array *, u32, int); + struct tracer *next; + struct tracer_flags *flags; + int enabled; + bool print_max; + bool allow_instances; + bool noboot; +}; + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 1, + TRACE_FLAG_NEED_RESCHED_LAZY = 2, + TRACE_FLAG_NEED_RESCHED = 4, + TRACE_FLAG_HARDIRQ = 8, + TRACE_FLAG_SOFTIRQ = 16, + TRACE_FLAG_PREEMPT_RESCHED = 32, + TRACE_FLAG_NMI = 64, + TRACE_FLAG_BH_OFF = 128, +}; + +struct event_subsystem; + +struct trace_subsystem_dir { + struct list_head list; + struct event_subsystem *subsystem; + struct trace_array *tr; + struct eventfs_inode *ei; + int ref_count; + int nr_events; +}; + +union lower_chunk { + union lower_chunk *next; + long unsigned int data[256]; +}; + +union upper_chunk { + union upper_chunk *next; + union lower_chunk *data[256]; +}; + +struct trace_pid_list { + raw_spinlock_t lock; + struct irq_work refill_irqwork; + union upper_chunk *upper[256]; + union upper_chunk *upper_list; + union lower_chunk *lower_list; + int free_upper_chunks; + int free_lower_chunks; +}; + +struct trace_array_cpu { + atomic_t disabled; + void *buffer_page; + long unsigned int entries; + long unsigned int saved_latency; + long unsigned int critical_start; + long unsigned int critical_end; + long unsigned int critical_sequence; + long unsigned int nice; + long unsigned int policy; + long unsigned int rt_priority; + long unsigned int skipped_entries; + u64 preempt_timestamp; + pid_t pid; + kuid_t uid; + char comm[16]; + int ftrace_ignore_pid; + bool ignore_pid; +}; + +struct trace_option_dentry; + +struct trace_options { + struct tracer *tracer; + struct trace_option_dentry *topts; +}; + +struct tracer_opt; + +struct trace_option_dentry { + struct tracer_opt *opt; + struct tracer_flags *flags; + struct trace_array *tr; + struct dentry *entry; +}; + +struct trace_func_repeats { + long unsigned int ip; + long unsigned int parent_ip; + long unsigned int count; + u64 ts_last_call; +}; + +enum { + TRACE_ARRAY_FL_GLOBAL = 1, + TRACE_ARRAY_FL_BOOT = 2, +}; + +struct tracer_opt { + const char *name; + u32 bit; +}; + +struct tracer_flags { + u32 val; + struct tracer_opt *opts; + struct tracer *trace; +}; + +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int); +}; + +struct ftrace_probe_ops { + void (*func)(long unsigned int, long unsigned int, struct trace_array *, struct ftrace_probe_ops *, void *); + int (*init)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *, void **); + void (*free)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *); + int (*print)(struct seq_file *, long unsigned int, struct ftrace_probe_ops *, void *); +}; + +typedef int (*ftrace_mapper_func)(void *); + +enum trace_iterator_bits { + TRACE_ITER_PRINT_PARENT_BIT = 0, + TRACE_ITER_SYM_OFFSET_BIT = 1, + TRACE_ITER_SYM_ADDR_BIT = 2, + TRACE_ITER_VERBOSE_BIT = 3, + TRACE_ITER_RAW_BIT = 4, + TRACE_ITER_HEX_BIT = 5, + TRACE_ITER_BIN_BIT = 6, + TRACE_ITER_BLOCK_BIT = 7, + TRACE_ITER_FIELDS_BIT = 8, + TRACE_ITER_PRINTK_BIT = 9, + TRACE_ITER_ANNOTATE_BIT = 10, + TRACE_ITER_USERSTACKTRACE_BIT = 11, + TRACE_ITER_SYM_USEROBJ_BIT = 12, + TRACE_ITER_PRINTK_MSGONLY_BIT = 13, + TRACE_ITER_CONTEXT_INFO_BIT = 14, + TRACE_ITER_LATENCY_FMT_BIT = 15, + TRACE_ITER_RECORD_CMD_BIT = 16, + TRACE_ITER_RECORD_TGID_BIT = 17, + TRACE_ITER_OVERWRITE_BIT = 18, + TRACE_ITER_STOP_ON_FREE_BIT = 19, + TRACE_ITER_IRQ_INFO_BIT = 20, + TRACE_ITER_MARKERS_BIT = 21, + TRACE_ITER_EVENT_FORK_BIT = 22, + TRACE_ITER_TRACE_PRINTK_BIT = 23, + TRACE_ITER_PAUSE_ON_TRACE_BIT = 24, + TRACE_ITER_HASH_PTR_BIT = 25, + TRACE_ITER_FUNCTION_BIT = 26, + TRACE_ITER_FUNC_FORK_BIT = 27, + TRACE_ITER_DISPLAY_GRAPH_BIT = 28, + TRACE_ITER_STACKTRACE_BIT = 29, + TRACE_ITER_LAST_BIT = 30, +}; + +struct event_subsystem { + struct list_head list; + const char *name; + struct event_filter *filter; + int ref_count; +}; + +enum { + TRACE_FUNC_NO_OPTS = 0, + TRACE_FUNC_OPT_STACK = 1, + TRACE_FUNC_OPT_NO_REPEATS = 2, + TRACE_FUNC_OPT_HIGHEST_BIT = 4, +}; + +enum cpu_pm_event { + CPU_PM_ENTER = 0, + CPU_PM_ENTER_FAILED = 1, + CPU_PM_EXIT = 2, + CPU_CLUSTER_PM_ENTER = 3, + CPU_CLUSTER_PM_ENTER_FAILED = 4, + CPU_CLUSTER_PM_EXIT = 5, +}; + +struct syscore_ops { + struct list_head node; + int (*suspend)(); + void (*resume)(); + void (*shutdown)(); +}; + +enum { + BPF_REG_0 = 0, + BPF_REG_1 = 1, + BPF_REG_2 = 2, + BPF_REG_3 = 3, + BPF_REG_4 = 4, + BPF_REG_5 = 5, + BPF_REG_6 = 6, + BPF_REG_7 = 7, + BPF_REG_8 = 8, + BPF_REG_9 = 9, + BPF_REG_10 = 10, + __MAX_BPF_REG = 11, +}; + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + BPF_FUNC_task_storage_get = 156, + BPF_FUNC_task_storage_delete = 157, + BPF_FUNC_get_current_task_btf = 158, + BPF_FUNC_bprm_opts_set = 159, + BPF_FUNC_ktime_get_coarse_ns = 160, + BPF_FUNC_ima_inode_hash = 161, + BPF_FUNC_sock_from_file = 162, + BPF_FUNC_check_mtu = 163, + BPF_FUNC_for_each_map_elem = 164, + BPF_FUNC_snprintf = 165, + BPF_FUNC_sys_bpf = 166, + BPF_FUNC_btf_find_by_name_kind = 167, + BPF_FUNC_sys_close = 168, + BPF_FUNC_timer_init = 169, + BPF_FUNC_timer_set_callback = 170, + BPF_FUNC_timer_start = 171, + BPF_FUNC_timer_cancel = 172, + BPF_FUNC_get_func_ip = 173, + BPF_FUNC_get_attach_cookie = 174, + BPF_FUNC_task_pt_regs = 175, + BPF_FUNC_get_branch_snapshot = 176, + BPF_FUNC_trace_vprintk = 177, + BPF_FUNC_skc_to_unix_sock = 178, + BPF_FUNC_kallsyms_lookup_name = 179, + BPF_FUNC_find_vma = 180, + BPF_FUNC_loop = 181, + BPF_FUNC_strncmp = 182, + BPF_FUNC_get_func_arg = 183, + BPF_FUNC_get_func_ret = 184, + BPF_FUNC_get_func_arg_cnt = 185, + BPF_FUNC_get_retval = 186, + BPF_FUNC_set_retval = 187, + BPF_FUNC_xdp_get_buff_len = 188, + BPF_FUNC_xdp_load_bytes = 189, + BPF_FUNC_xdp_store_bytes = 190, + BPF_FUNC_copy_from_user_task = 191, + BPF_FUNC_skb_set_tstamp = 192, + BPF_FUNC_ima_file_hash = 193, + BPF_FUNC_kptr_xchg = 194, + BPF_FUNC_map_lookup_percpu_elem = 195, + BPF_FUNC_skc_to_mptcp_sock = 196, + BPF_FUNC_dynptr_from_mem = 197, + BPF_FUNC_ringbuf_reserve_dynptr = 198, + BPF_FUNC_ringbuf_submit_dynptr = 199, + BPF_FUNC_ringbuf_discard_dynptr = 200, + BPF_FUNC_dynptr_read = 201, + BPF_FUNC_dynptr_write = 202, + BPF_FUNC_dynptr_data = 203, + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204, + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205, + BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206, + BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207, + BPF_FUNC_ktime_get_tai_ns = 208, + BPF_FUNC_user_ringbuf_drain = 209, + BPF_FUNC_cgrp_storage_get = 210, + BPF_FUNC_cgrp_storage_delete = 211, + __BPF_FUNC_MAX_ID = 212, +}; + +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP = 1, + XDP_PASS = 2, + XDP_TX = 3, + XDP_REDIRECT = 4, +}; + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + __u32 ingress_ifindex; + __u32 rx_queue_index; + __u32 egress_ifindex; +}; + +typedef struct { + seqcount_t seqcount; +} seqcount_latch_t; + +struct latch_tree_root { + seqcount_latch_t seq; + struct rb_root tree[2]; +}; + +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *, struct latch_tree_node *); + int (*comp)(void *, struct latch_tree_node *); +}; + +typedef sockptr_t bpfptr_t; + +struct btf_struct_meta { + u32 btf_id; + struct btf_record *record; +}; + +struct bpf_mem_caches; + +struct bpf_mem_cache; + +struct bpf_mem_alloc { + struct bpf_mem_caches *caches; + struct bpf_mem_cache *cache; + struct obj_cgroup *objcg; + bool percpu; + struct work_struct work; +}; + +struct bpf_verifier_log { + u64 start_pos; + u64 end_pos; + char *ubuf; + u32 level; + u32 len_total; + u32 len_max; + char kbuf[1024]; +}; + +enum priv_stack_mode { + PRIV_STACK_UNKNOWN = 0, + NO_PRIV_STACK = 1, + PRIV_STACK_ADAPTIVE = 2, +}; + +struct bpf_subprog_arg_info { + enum bpf_arg_type arg_type; + union { + u32 mem_size; + u32 btf_id; + }; +}; + +struct bpf_subprog_info { + u32 start; + u32 linfo_idx; + u16 stack_depth; + u16 stack_extra; + s16 fastcall_stack_off; + bool has_tail_call: 1; + bool tail_call_reachable: 1; + bool has_ld_abs: 1; + bool is_cb: 1; + bool is_async_cb: 1; + bool is_exception_cb: 1; + bool args_cached: 1; + bool keep_fastcall_stack: 1; + enum priv_stack_mode priv_stack_mode; + u8 arg_cnt; + struct bpf_subprog_arg_info args[5]; +}; + +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +struct bpf_idmap { + u32 tmp_id_gen; + struct bpf_id_pair map[600]; +}; + +struct bpf_idset { + u32 count; + u32 ids[600]; +}; + +struct backtrack_state { + struct bpf_verifier_env *env; + u32 frame; + u32 reg_masks[8]; + u64 stack_masks[8]; +}; + +enum bpf_dynptr_type { + BPF_DYNPTR_TYPE_INVALID = 0, + BPF_DYNPTR_TYPE_LOCAL = 1, + BPF_DYNPTR_TYPE_RINGBUF = 2, + BPF_DYNPTR_TYPE_SKB = 3, + BPF_DYNPTR_TYPE_XDP = 4, +}; + +enum bpf_iter_state { + BPF_ITER_STATE_INVALID = 0, + BPF_ITER_STATE_ACTIVE = 1, + BPF_ITER_STATE_DRAINED = 2, +}; + +struct tnum { + u64 value; + u64 mask; +}; + +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, + REG_LIVE_READ32 = 1, + REG_LIVE_READ64 = 2, + REG_LIVE_READ = 3, + REG_LIVE_WRITTEN = 4, + REG_LIVE_DONE = 8, +}; + +struct bpf_reg_state { + enum bpf_reg_type type; + s32 off; + union { + int range; + struct { + struct bpf_map *map_ptr; + u32 map_uid; + }; + struct { + struct btf *btf; + u32 btf_id; + }; + struct { + u32 mem_size; + u32 dynptr_id; + }; + struct { + enum bpf_dynptr_type type; + bool first_slot; + } dynptr; + struct { + struct btf *btf; + u32 btf_id; + enum bpf_iter_state state: 2; + int depth: 30; + } iter; + struct { + long unsigned int raw1; + long unsigned int raw2; + } raw; + u32 subprogno; + }; + struct tnum var_off; + s64 smin_value; + s64 smax_value; + u64 umin_value; + u64 umax_value; + s32 s32_min_value; + s32 s32_max_value; + u32 u32_min_value; + u32 u32_max_value; + u32 id; + u32 ref_obj_id; + struct bpf_reg_state *parent; + u32 frameno; + s32 subreg_def; + enum bpf_reg_liveness live; + bool precise; +}; + +struct bpf_verifier_ops; + +struct bpf_verifier_stack_elem; + +struct bpf_verifier_state; + +struct bpf_verifier_state_list; + +struct bpf_insn_aux_data; + +struct bpf_insn_hist_entry; + +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; + const struct bpf_verifier_ops *ops; + struct module *attach_btf_mod; + struct bpf_verifier_stack_elem *head; + int stack_size; + bool strict_alignment; + bool test_state_freq; + bool test_reg_invariants; + struct bpf_verifier_state *cur_state; + struct bpf_verifier_state_list **explored_states; + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[64]; + struct btf_mod_pair used_btfs[64]; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 id_gen; + u32 hidden_subprog_cnt; + int exception_callback_subprog; + bool explore_alu_limits; + bool allow_ptr_leaks; + bool allow_uninit_stack; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; + bool seen_direct_write; + bool seen_exception; + struct bpf_insn_aux_data *insn_aux_data; + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[258]; + union { + struct bpf_idmap idmap_scratch; + struct bpf_idset idset_scratch; + }; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + struct backtrack_state bt; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; + u32 pass_cnt; + u32 subprog_cnt; + u32 prev_insn_processed; + u32 insn_processed; + u32 prev_jmps_processed; + u32 jmps_processed; + u64 verification_time; + u32 max_states_per_insn; + u32 total_states; + u32 peak_states; + u32 longest_mark_read_walk; + bpfptr_t fd_array; + u32 scratched_regs; + u64 scratched_stack_slots; + u64 prev_log_pos; + u64 prev_insn_print_pos; + struct bpf_reg_state fake_reg[2]; + char tmp_str_buf[320]; + struct bpf_insn insn_buf[32]; + struct bpf_insn epilogue_buf[32]; +}; + +struct bpf_retval_range { + s32 minval; + s32 maxval; +}; + +struct bpf_reference_state; + +struct bpf_stack_state; + +struct bpf_func_state { + struct bpf_reg_state regs[11]; + int callsite; + u32 frameno; + u32 subprogno; + u32 async_entry_cnt; + struct bpf_retval_range callback_ret_range; + bool in_callback_fn; + bool in_async_callback_fn; + bool in_exception_callback_fn; + u32 callback_depth; + int acquired_refs; + int active_locks; + struct bpf_reference_state *refs; + struct bpf_stack_state *stack; + int allocated_stack; +}; + +struct bpf_func_proto { + u64 (*func)(u64, u64, u64, u64, u64); + bool gpl_only; + bool pkt_access; + bool might_sleep; + bool allow_fastcall; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + union { + struct { + u32 *arg1_btf_id; + u32 *arg2_btf_id; + u32 *arg3_btf_id; + u32 *arg4_btf_id; + u32 *arg5_btf_id; + }; + u32 *arg_btf_id[5]; + struct { + size_t arg1_size; + size_t arg2_size; + size_t arg3_size; + size_t arg4_size; + size_t arg5_size; + }; + size_t arg_size[5]; + }; + int *ret_btf_id; + bool (*allowed)(const struct bpf_prog *); +}; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2, +}; + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + bool is_ldsx; + union { + int ctx_field_size; + struct { + struct btf *btf; + u32 btf_id; + }; + }; + struct bpf_verifier_log *log; + bool is_retval; +}; + +struct bpf_verifier_ops { + const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); + bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); + int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); + int (*gen_epilogue)(struct bpf_insn *, const struct bpf_prog *, s16); + int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); + u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + int (*btf_struct_access)(struct bpf_verifier_log *, const struct bpf_reg_state *, int, int); +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL = 0, +}; + +struct bpf_array_aux { + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + union { + struct { + struct {} __empty_value; + char value[0]; + }; + struct { + struct {} __empty_ptrs; + void *ptrs[0]; + }; + struct { + struct {} __empty_pptrs; + void *pptrs[0]; + }; + }; +}; + +typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); + +struct bpf_empty_prog_array { + struct bpf_prog_array hdr; + struct bpf_prog *null_prog; +}; + +enum bpf_text_poke_type { + BPF_MOD_CALL = 0, + BPF_MOD_JUMP = 1, +}; + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize; + struct xdp_mem_info mem; + struct net_device *dev_rx; + u32 frame_sz; + u32 flags; +}; + +struct xdp_rxq_info; + +struct xdp_txq_info; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; + u32 flags; +}; + +struct bpf_binary_header { + u32 size; + long: 0; + u8 image[0]; +}; + +typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); + +struct rnd_state { + __u32 s1; + __u32 s2; + __u32 s3; + __u32 s4; +}; + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[8]; +}; + +enum ref_state_type { + REF_TYPE_PTR = 0, + REF_TYPE_LOCK = 1, +}; + +struct bpf_reference_state { + enum ref_state_type type; + int id; + int insn_idx; + void *ptr; +}; + +enum { + INSN_F_FRAMENO_MASK = 7, + INSN_F_SPI_MASK = 63, + INSN_F_SPI_SHIFT = 3, + INSN_F_STACK_ACCESS = 512, +}; + +struct bpf_insn_hist_entry { + u32 idx; + u32 prev_idx: 22; + u32 flags: 10; + u64 linked_regs; +}; + +struct bpf_verifier_state { + struct bpf_func_state *frame[8]; + struct bpf_verifier_state *parent; + u32 branches; + u32 insn_idx; + u32 curframe; + bool speculative; + bool active_rcu_lock; + u32 active_preempt_lock; + bool used_as_loop_entry; + bool in_sleepable; + u32 first_insn_idx; + u32 last_insn_idx; + struct bpf_verifier_state *loop_entry; + u32 insn_hist_start; + u32 insn_hist_end; + u32 dfs_depth; + u32 callback_unroll_depth; + u32 may_goto_depth; +}; + +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt; + int hit_cnt; +}; + +struct bpf_loop_inline_state { + unsigned int initialized: 1; + unsigned int fit_for_inline: 1; + u32 callback_subprogno; +}; + +struct bpf_map_ptr_state { + struct bpf_map *map_ptr; + bool poison; + bool unpriv; +}; + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; + struct bpf_map_ptr_state map_ptr_state; + s32 call_imm; + u32 alu_limit; + struct { + u32 map_index; + u32 map_off; + }; + struct { + enum bpf_reg_type reg_type; + union { + struct { + struct btf *btf; + u32 btf_id; + }; + u32 mem_size; + }; + } btf_var; + struct bpf_loop_inline_state loop_inline_state; + }; + union { + u64 obj_new_size; + u64 insert_off; + }; + struct btf_struct_meta *kptr_struct_meta; + u64 map_key_state; + int ctx_field_size; + u32 seen; + bool sanitize_stack_spill; + bool zext_dst; + bool needs_zext; + bool storage_get_func_atomic; + bool is_iter_next; + bool call_with_percpu_alloc_ptr; + u8 alu_state; + u8 fastcall_pattern: 1; + u8 fastcall_spills_num: 3; + unsigned int orig_idx; + bool jmp_point; + bool prune_point; + bool force_checkpoint; + bool calls_callback; +}; + +enum execmem_type { + EXECMEM_DEFAULT = 0, + EXECMEM_MODULE_TEXT = 0, + EXECMEM_KPROBES = 1, + EXECMEM_FTRACE = 2, + EXECMEM_BPF = 3, + EXECMEM_MODULE_DATA = 4, + EXECMEM_TYPE_MAX = 5, +}; + +enum page_size_enum { + __PAGE_SIZE = 4096, +}; + +struct bpf_prog_pack { + struct list_head list; + void *ptr; + long unsigned int bitmap[0]; +}; + +struct bpf_prog_dummy { + struct bpf_prog prog; +}; + +typedef u64 (*btf_bpf_user_rnd_u32)(); + +typedef u64 (*btf_bpf_get_raw_cpu_id)(); + +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0 = 1, + MEM_TYPE_PAGE_POOL = 2, + MEM_TYPE_XSK_BUFF_POOL = 3, + MEM_TYPE_MAX = 4, +}; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; + unsigned int napi_id; + u32 frag_size; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_txq_info { + struct net_device *dev; +}; + +struct xdp_cpumap_stats { + unsigned int redirect; + unsigned int pass; + unsigned int drop; +}; + +struct _bpf_dtab_netdev { + struct net_device *dev; +}; + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + }; + struct rhash_head node; + struct callback_head rcu; +}; + +struct trace_event_raw_xdp_exception { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_xdp_bulk_tx { + struct trace_entry ent; + int ifindex; + u32 act; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_xdp_redirect_template { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + int err; + int to_ifindex; + u32 map_id; + int map_index; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_kthread { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int sched; + unsigned int xdp_pass; + unsigned int xdp_drop; + unsigned int xdp_redirect; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_enqueue { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int to_cpu; + char __data[0]; +}; + +struct trace_event_raw_xdp_devmap_xmit { + struct trace_entry ent; + int from_ifindex; + u32 act; + int to_ifindex; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_mem_disconnect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + char __data[0]; +}; + +struct trace_event_raw_mem_connect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + const struct xdp_rxq_info *rxq; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_mem_return_failed { + struct trace_entry ent; + const struct page *page; + u32 mem_id; + u32 mem_type; + char __data[0]; +}; + +struct trace_event_raw_bpf_xdp_link_attach_failed { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_xdp_exception {}; + +struct trace_event_data_offsets_xdp_bulk_tx {}; + +struct trace_event_data_offsets_xdp_redirect_template {}; + +struct trace_event_data_offsets_xdp_cpumap_kthread {}; + +struct trace_event_data_offsets_xdp_cpumap_enqueue {}; + +struct trace_event_data_offsets_xdp_devmap_xmit {}; + +struct trace_event_data_offsets_mem_disconnect {}; + +struct trace_event_data_offsets_mem_connect {}; + +struct trace_event_data_offsets_mem_return_failed {}; + +struct trace_event_data_offsets_bpf_xdp_link_attach_failed { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); + +typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); + +typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); + +typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); + +typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); + +typedef void (*btf_trace_bpf_xdp_link_attach_failed)(void *, const char *); + +enum page_walk_lock { + PGWALK_RDLOCK = 0, + PGWALK_WRLOCK = 1, + PGWALK_WRLOCK_VERIFY = 2, +}; + +struct mm_walk; + +struct mm_walk_ops { + int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); + int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); + int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *); + void (*post_vma)(struct mm_walk *); + int (*install_pte)(long unsigned int, long unsigned int, pte_t *, struct mm_walk *); + enum page_walk_lock walk_lock; +}; + +enum page_walk_action { + ACTION_SUBTREE = 0, + ACTION_CONTINUE = 1, + ACTION_AGAIN = 2, +}; + +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + pgd_t *pgd; + struct vm_area_struct *vma; + enum page_walk_action action; + bool no_vma; + void *private; +}; + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; + swp_entry_t *slots_ret; + int n_ret; +}; + +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_MCE_DEAD = 8, + CPUHP_VIRT_NET_DEAD = 9, + CPUHP_IBMVNIC_DEAD = 10, + CPUHP_SLUB_DEAD = 11, + CPUHP_DEBUG_OBJ_DEAD = 12, + CPUHP_MM_WRITEBACK_DEAD = 13, + CPUHP_MM_VMSTAT_DEAD = 14, + CPUHP_SOFTIRQ_DEAD = 15, + CPUHP_NET_MVNETA_DEAD = 16, + CPUHP_CPUIDLE_DEAD = 17, + CPUHP_ARM64_FPSIMD_DEAD = 18, + CPUHP_ARM_OMAP_WAKE_DEAD = 19, + CPUHP_IRQ_POLL_DEAD = 20, + CPUHP_BLOCK_SOFTIRQ_DEAD = 21, + CPUHP_BIO_DEAD = 22, + CPUHP_ACPI_CPUDRV_DEAD = 23, + CPUHP_S390_PFAULT_DEAD = 24, + CPUHP_BLK_MQ_DEAD = 25, + CPUHP_FS_BUFF_DEAD = 26, + CPUHP_PRINTK_DEAD = 27, + CPUHP_MM_MEMCQ_DEAD = 28, + CPUHP_PERCPU_CNT_DEAD = 29, + CPUHP_RADIX_DEAD = 30, + CPUHP_PAGE_ALLOC = 31, + CPUHP_NET_DEV_DEAD = 32, + CPUHP_PCI_XGENE_DEAD = 33, + CPUHP_IOMMU_IOVA_DEAD = 34, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, + CPUHP_PADATA_DEAD = 36, + CPUHP_AP_DTPM_CPU_DEAD = 37, + CPUHP_RANDOM_PREPARE = 38, + CPUHP_WORKQUEUE_PREP = 39, + CPUHP_POWER_NUMA_PREPARE = 40, + CPUHP_HRTIMERS_PREPARE = 41, + CPUHP_X2APIC_PREPARE = 42, + CPUHP_SMPCFD_PREPARE = 43, + CPUHP_RELAY_PREPARE = 44, + CPUHP_MD_RAID5_PREPARE = 45, + CPUHP_RCUTREE_PREP = 46, + CPUHP_CPUIDLE_COUPLED_PREPARE = 47, + CPUHP_POWERPC_PMAC_PREPARE = 48, + CPUHP_POWERPC_MMU_CTX_PREPARE = 49, + CPUHP_XEN_PREPARE = 50, + CPUHP_XEN_EVTCHN_PREPARE = 51, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, + CPUHP_SH_SH3X_PREPARE = 53, + CPUHP_TOPOLOGY_PREPARE = 54, + CPUHP_NET_IUCV_PREPARE = 55, + CPUHP_ARM_BL_PREPARE = 56, + CPUHP_TRACE_RB_PREPARE = 57, + CPUHP_MM_ZS_PREPARE = 58, + CPUHP_MM_ZSWP_POOL_PREPARE = 59, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 60, + CPUHP_ZCOMP_PREPARE = 61, + CPUHP_TIMERS_PREPARE = 62, + CPUHP_TMIGR_PREPARE = 63, + CPUHP_MIPS_SOC_PREPARE = 64, + CPUHP_BP_PREPARE_DYN = 65, + CPUHP_BP_PREPARE_DYN_END = 85, + CPUHP_BP_KICK_AP = 86, + CPUHP_BRINGUP_CPU = 87, + CPUHP_AP_IDLE_DEAD = 88, + CPUHP_AP_OFFLINE = 89, + CPUHP_AP_CACHECTRL_STARTING = 90, + CPUHP_AP_SCHED_STARTING = 91, + CPUHP_AP_RCUTREE_DYING = 92, + CPUHP_AP_CPU_PM_STARTING = 93, + CPUHP_AP_IRQ_GIC_STARTING = 94, + CPUHP_AP_IRQ_HIP04_STARTING = 95, + CPUHP_AP_IRQ_APPLE_AIC_STARTING = 96, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 97, + CPUHP_AP_IRQ_BCM2836_STARTING = 98, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 99, + CPUHP_AP_IRQ_EIOINTC_STARTING = 100, + CPUHP_AP_IRQ_AVECINTC_STARTING = 101, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 102, + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING = 103, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING = 104, + CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING = 105, + CPUHP_AP_ARM_MVEBU_COHERENCY = 106, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 107, + CPUHP_AP_PERF_X86_STARTING = 108, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 109, + CPUHP_AP_PERF_XTENSA_STARTING = 110, + CPUHP_AP_ARM_VFP_STARTING = 111, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 112, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 113, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 114, + CPUHP_AP_PERF_ARM_STARTING = 115, + CPUHP_AP_PERF_RISCV_STARTING = 116, + CPUHP_AP_ARM_L2X0_STARTING = 117, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 118, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 119, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING = 120, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 121, + CPUHP_AP_JCORE_TIMER_STARTING = 122, + CPUHP_AP_ARM_TWD_STARTING = 123, + CPUHP_AP_QCOM_TIMER_STARTING = 124, + CPUHP_AP_TEGRA_TIMER_STARTING = 125, + CPUHP_AP_ARMADA_TIMER_STARTING = 126, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 127, + CPUHP_AP_ARC_TIMER_STARTING = 128, + CPUHP_AP_REALTEK_TIMER_STARTING = 129, + CPUHP_AP_RISCV_TIMER_STARTING = 130, + CPUHP_AP_CLINT_TIMER_STARTING = 131, + CPUHP_AP_CSKY_TIMER_STARTING = 132, + CPUHP_AP_TI_GP_TIMER_STARTING = 133, + CPUHP_AP_HYPERV_TIMER_STARTING = 134, + CPUHP_AP_DUMMY_TIMER_STARTING = 135, + CPUHP_AP_ARM_XEN_STARTING = 136, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 137, + CPUHP_AP_ARM_CORESIGHT_STARTING = 138, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 139, + CPUHP_AP_ARM64_ISNDEP_STARTING = 140, + CPUHP_AP_SMPCFD_DYING = 141, + CPUHP_AP_HRTIMERS_DYING = 142, + CPUHP_AP_TICK_DYING = 143, + CPUHP_AP_X86_TBOOT_DYING = 144, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 145, + CPUHP_AP_ONLINE = 146, + CPUHP_TEARDOWN_CPU = 147, + CPUHP_AP_ONLINE_IDLE = 148, + CPUHP_AP_HYPERV_ONLINE = 149, + CPUHP_AP_KVM_ONLINE = 150, + CPUHP_AP_SCHED_WAIT_EMPTY = 151, + CPUHP_AP_SMPBOOT_THREADS = 152, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 153, + CPUHP_AP_BLK_MQ_ONLINE = 154, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 155, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 156, + CPUHP_AP_PERF_ONLINE = 157, + CPUHP_AP_PERF_X86_ONLINE = 158, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 159, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 160, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 161, + CPUHP_AP_PERF_S390_CF_ONLINE = 162, + CPUHP_AP_PERF_S390_SF_ONLINE = 163, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 164, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 165, + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 166, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 167, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 168, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 169, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 170, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 171, + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 172, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 173, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 174, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 175, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 176, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 177, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 178, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 179, + CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE = 180, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 181, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 182, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 183, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 184, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 185, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 186, + CPUHP_AP_PERF_CSKY_ONLINE = 187, + CPUHP_AP_TMIGR_ONLINE = 188, + CPUHP_AP_WATCHDOG_ONLINE = 189, + CPUHP_AP_WORKQUEUE_ONLINE = 190, + CPUHP_AP_RANDOM_ONLINE = 191, + CPUHP_AP_RCUTREE_ONLINE = 192, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 193, + CPUHP_AP_ONLINE_DYN = 194, + CPUHP_AP_ONLINE_DYN_END = 234, + CPUHP_AP_X86_HPET_ONLINE = 235, + CPUHP_AP_X86_KVM_CLK_ONLINE = 236, + CPUHP_AP_ACTIVE = 237, + CPUHP_ONLINE = 238, +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +struct ida { + struct xarray xa; +}; + +typedef __kernel_ulong_t ino_t; + +typedef int class_get_unused_fd_t; + +typedef struct { + void *lock; +} class_rcu_t; + +struct nsset { + unsigned int flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +typedef struct ns_common *ns_get_path_helper_t(void *); + +struct mnt_ns_info { + __u32 size; + __u32 nr_mounts; + __u64 mnt_ns_id; +}; + +struct stashed_operations { + void (*put_data)(void *); + int (*init_inode)(struct inode *, void *); +}; + +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +typedef unsigned int __kernel_uid_t; + +typedef unsigned int __kernel_gid_t; + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + +struct siginfo { + union { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; + int _si_pad[32]; + }; +}; + +typedef struct siginfo siginfo_t; + +typedef __u64 Elf64_Off; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +}; + +struct elf64_shdr { + Elf64_Word sh_name; + Elf64_Word sh_type; + Elf64_Xword sh_flags; + Elf64_Addr sh_addr; + Elf64_Off sh_offset; + Elf64_Xword sh_size; + Elf64_Word sh_link; + Elf64_Word sh_info; + Elf64_Xword sh_addralign; + Elf64_Xword sh_entsize; +}; + +struct elf64_note { + Elf64_Word n_namesz; + Elf64_Word n_descsz; + Elf64_Word n_type; +}; + +typedef long unsigned int elf_greg_t; + +typedef elf_greg_t elf_gregset_t[34]; + +struct arch_elf_state { + int flags; +}; + +struct gnu_property { + u32 pr_type; + u32 pr_datasz; +}; + +struct core_vma_metadata; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct file *file; + long unsigned int limit; + long unsigned int mm_flags; + int cpu; + loff_t written; + loff_t pos; + loff_t to_skip; + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; +}; + +struct elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct elf_prstatus_common { + struct elf_siginfo pr_info; + short int pr_cursig; + long unsigned int pr_sigpend; + long unsigned int pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; + struct __kernel_old_timeval pr_stime; + struct __kernel_old_timeval pr_cutime; + struct __kernel_old_timeval pr_cstime; +}; + +struct elf_prstatus { + struct elf_prstatus_common common; + elf_gregset_t pr_reg; + int pr_fpvalid; +}; + +struct elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + long unsigned int pr_flag; + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct core_vma_metadata { + long unsigned int start; + long unsigned int end; + long unsigned int flags; + long unsigned int dump_size; + long unsigned int pgoff; + struct file *file; +}; + +struct memelfnote { + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +struct membuf { + void *p; + size_t left; +}; + +struct user_regset; + +typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); + +typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); + +typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); + +typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); + +struct user_regset { + user_regset_get2_fn *regset_get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +struct elf_thread_core_info { + struct elf_thread_core_info *next; + struct task_struct *task; + struct elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info { + struct elf_thread_core_info *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +typedef unsigned int fgf_t; + +enum { + EXT4_INODE_SECRM = 0, + EXT4_INODE_UNRM = 1, + EXT4_INODE_COMPR = 2, + EXT4_INODE_SYNC = 3, + EXT4_INODE_IMMUTABLE = 4, + EXT4_INODE_APPEND = 5, + EXT4_INODE_NODUMP = 6, + EXT4_INODE_NOATIME = 7, + EXT4_INODE_DIRTY = 8, + EXT4_INODE_COMPRBLK = 9, + EXT4_INODE_NOCOMPR = 10, + EXT4_INODE_ENCRYPT = 11, + EXT4_INODE_INDEX = 12, + EXT4_INODE_IMAGIC = 13, + EXT4_INODE_JOURNAL_DATA = 14, + EXT4_INODE_NOTAIL = 15, + EXT4_INODE_DIRSYNC = 16, + EXT4_INODE_TOPDIR = 17, + EXT4_INODE_HUGE_FILE = 18, + EXT4_INODE_EXTENTS = 19, + EXT4_INODE_VERITY = 20, + EXT4_INODE_EA_INODE = 21, + EXT4_INODE_DAX = 25, + EXT4_INODE_INLINE_DATA = 28, + EXT4_INODE_PROJINHERIT = 29, + EXT4_INODE_CASEFOLD = 30, + EXT4_INODE_RESERVED = 31, +}; + +struct ext4_extent { + __le32 ee_block; + __le16 ee_len; + __le16 ee_start_hi; + __le32 ee_start_lo; +}; + +struct ext4_extent_idx { + __le32 ei_block; + __le32 ei_leaf_lo; + __le16 ei_leaf_hi; + __u16 ei_unused; +}; + +struct ext4_extent_header { + __le16 eh_magic; + __le16 eh_entries; + __le16 eh_max; + __le16 eh_depth; + __le32 eh_generation; +}; + +struct ext4_ext_path { + ext4_fsblk_t p_block; + __u16 p_depth; + __u16 p_maxdepth; + struct ext4_extent *p_ext; + struct ext4_extent_idx *p_idx; + struct ext4_extent_header *p_hdr; + struct buffer_head *p_bh; +}; + +enum utf16_endian { + UTF16_HOST_ENDIAN = 0, + UTF16_LITTLE_ENDIAN = 1, + UTF16_BIG_ENDIAN = 2, +}; + +struct __fat_dirent { + long int d_ino; + __kernel_off_t d_off; + short unsigned int d_reclen; + char d_name[256]; +}; + +struct msdos_dir_entry { + __u8 name[11]; + __u8 attr; + __u8 lcase; + __u8 ctime_cs; + __le16 ctime; + __le16 cdate; + __le16 adate; + __le16 starthi; + __le16 time; + __le16 date; + __le16 start; + __le32 size; +}; + +struct msdos_dir_slot { + __u8 id; + __u8 name0_4[10]; + __u8 attr; + __u8 reserved; + __u8 alias_checksum; + __u8 name5_10[12]; + __le16 start; + __u8 name11_12[4]; +}; + +struct fat_slot_info { + loff_t i_pos; + loff_t slot_off; + int nr_slots; + struct msdos_dir_entry *de; + struct buffer_head *bh; +}; + +typedef long long unsigned int llu; + +enum { + PARSE_INVALID = 1, + PARSE_NOT_LONGNAME = 2, + PARSE_EOF = 3, +}; + +struct fat_ioctl_filldir_callback { + struct dir_context ctx; + void *dirent; + int result; + const char *longname; + int long_len; + const char *shortname; + int short_len; +}; + +enum fuse_opcode { + FUSE_LOOKUP = 1, + FUSE_FORGET = 2, + FUSE_GETATTR = 3, + FUSE_SETATTR = 4, + FUSE_READLINK = 5, + FUSE_SYMLINK = 6, + FUSE_MKNOD = 8, + FUSE_MKDIR = 9, + FUSE_UNLINK = 10, + FUSE_RMDIR = 11, + FUSE_RENAME = 12, + FUSE_LINK = 13, + FUSE_OPEN = 14, + FUSE_READ = 15, + FUSE_WRITE = 16, + FUSE_STATFS = 17, + FUSE_RELEASE = 18, + FUSE_FSYNC = 20, + FUSE_SETXATTR = 21, + FUSE_GETXATTR = 22, + FUSE_LISTXATTR = 23, + FUSE_REMOVEXATTR = 24, + FUSE_FLUSH = 25, + FUSE_INIT = 26, + FUSE_OPENDIR = 27, + FUSE_READDIR = 28, + FUSE_RELEASEDIR = 29, + FUSE_FSYNCDIR = 30, + FUSE_GETLK = 31, + FUSE_SETLK = 32, + FUSE_SETLKW = 33, + FUSE_ACCESS = 34, + FUSE_CREATE = 35, + FUSE_INTERRUPT = 36, + FUSE_BMAP = 37, + FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, + FUSE_BATCH_FORGET = 42, + FUSE_FALLOCATE = 43, + FUSE_READDIRPLUS = 44, + FUSE_RENAME2 = 45, + FUSE_LSEEK = 46, + FUSE_COPY_FILE_RANGE = 47, + FUSE_SETUPMAPPING = 48, + FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, + FUSE_TMPFILE = 51, + FUSE_STATX = 52, + CUSE_INIT = 4096, + CUSE_INIT_BSWAP_RESERVED = 1048576, + FUSE_INIT_BSWAP_RESERVED = 436207616, +}; + +struct fuse_forget_one { + uint64_t nodeid; + uint64_t nlookup; +}; + +struct fuse_setxattr_in { + uint32_t size; + uint32_t flags; + uint32_t setxattr_flags; + uint32_t padding; +}; + +struct fuse_getxattr_in { + uint32_t size; + uint32_t padding; +}; + +struct fuse_getxattr_out { + uint32_t size; + uint32_t padding; +}; + +struct fuse_in_header { + uint32_t len; + uint32_t opcode; + uint64_t unique; + uint64_t nodeid; + uint32_t uid; + uint32_t gid; + uint32_t pid; + uint16_t total_extlen; + uint16_t padding; +}; + +struct fuse_out_header { + uint32_t len; + int32_t error; + uint64_t unique; +}; + +struct fuse_forget_link { + struct fuse_forget_one forget_one; + struct fuse_forget_link *next; +}; + +struct fuse_submount_lookup { + refcount_t count; + u64 nodeid; + struct fuse_forget_link *forget; +}; + +struct fuse_backing { + struct file *file; + struct cred *cred; + refcount_t count; + struct callback_head rcu; +}; + +struct fuse_inode { + struct inode inode; + u64 nodeid; + u64 nlookup; + struct fuse_forget_link *forget; + u64 i_time; + u32 inval_mask; + umode_t orig_i_mode; + struct timespec64 i_btime; + u64 orig_ino; + u64 attr_version; + union { + struct { + struct list_head write_files; + struct list_head queued_writes; + int writectr; + int iocachectr; + wait_queue_head_t page_waitq; + wait_queue_head_t direct_io_waitq; + struct rb_root writepages; + }; + struct { + bool cached; + loff_t size; + loff_t pos; + u64 version; + struct timespec64 mtime; + u64 iversion; + spinlock_t lock; + } rdc; + }; + long unsigned int state; + struct mutex mutex; + spinlock_t lock; + struct fuse_submount_lookup *submount_lookup; + struct fuse_backing *fb; +}; + +enum { + FUSE_I_ADVISE_RDPLUS = 0, + FUSE_I_INIT_RDPLUS = 1, + FUSE_I_SIZE_UNSTABLE = 2, + FUSE_I_BAD = 3, + FUSE_I_BTIME = 4, + FUSE_I_CACHE_IO_MODE = 5, +}; + +struct fuse_conn; + +struct fuse_mount { + struct fuse_conn *fc; + struct super_block *sb; + struct list_head fc_entry; + struct callback_head rcu; +}; + +struct fuse_in_arg { + unsigned int size; + const void *value; +}; + +struct fuse_arg { + unsigned int size; + void *value; +}; + +struct fuse_args { + uint64_t nodeid; + uint32_t opcode; + uint8_t in_numargs; + uint8_t out_numargs; + uint8_t ext_idx; + bool force: 1; + bool noreply: 1; + bool nocreds: 1; + bool in_pages: 1; + bool out_pages: 1; + bool user_pages: 1; + bool out_argvar: 1; + bool page_zeroing: 1; + bool page_replace: 1; + bool may_block: 1; + bool is_ext: 1; + bool is_pinned: 1; + bool invalidate_vmap: 1; + struct fuse_in_arg in_args[3]; + struct fuse_arg out_args[2]; + void (*end)(struct fuse_mount *, struct fuse_args *, int); + void *vmap_base; +}; + +struct fuse_req { + struct list_head list; + struct list_head intr_entry; + struct fuse_args *args; + refcount_t count; + long unsigned int flags; + struct { + struct fuse_in_header h; + } in; + struct { + struct fuse_out_header h; + } out; + wait_queue_head_t waitq; + struct fuse_mount *fm; +}; + +struct fuse_iqueue; + +struct fuse_iqueue_ops { + void (*send_forget)(struct fuse_iqueue *, struct fuse_forget_link *); + void (*send_interrupt)(struct fuse_iqueue *, struct fuse_req *); + void (*send_req)(struct fuse_iqueue *, struct fuse_req *); + void (*release)(struct fuse_iqueue *); +}; + +struct fuse_iqueue { + unsigned int connected; + spinlock_t lock; + wait_queue_head_t waitq; + u64 reqctr; + struct list_head pending; + struct list_head interrupts; + struct fuse_forget_link forget_list_head; + struct fuse_forget_link *forget_list_tail; + int forget_batch; + struct fasync_struct *fasync; + const struct fuse_iqueue_ops *ops; + void *priv; +}; + +struct fuse_sync_bucket; + +struct fuse_conn { + spinlock_t lock; + refcount_t count; + atomic_t dev_count; + struct callback_head rcu; + kuid_t user_id; + kgid_t group_id; + struct pid_namespace *pid_ns; + struct user_namespace *user_ns; + unsigned int max_read; + unsigned int max_write; + unsigned int max_pages; + unsigned int max_pages_limit; + struct fuse_iqueue iq; + atomic64_t khctr; + struct rb_root polled_files; + unsigned int max_background; + unsigned int congestion_threshold; + unsigned int num_background; + unsigned int active_background; + struct list_head bg_queue; + spinlock_t bg_lock; + int initialized; + int blocked; + wait_queue_head_t blocked_waitq; + unsigned int connected; + bool aborted; + unsigned int conn_error: 1; + unsigned int conn_init: 1; + unsigned int async_read: 1; + unsigned int abort_err: 1; + unsigned int atomic_o_trunc: 1; + unsigned int export_support: 1; + unsigned int writeback_cache: 1; + unsigned int parallel_dirops: 1; + unsigned int handle_killpriv: 1; + unsigned int cache_symlinks: 1; + unsigned int legacy_opts_show: 1; + unsigned int handle_killpriv_v2: 1; + unsigned int no_open: 1; + unsigned int no_opendir: 1; + unsigned int no_fsync: 1; + unsigned int no_fsyncdir: 1; + unsigned int no_flush: 1; + unsigned int no_setxattr: 1; + unsigned int setxattr_ext: 1; + unsigned int no_getxattr: 1; + unsigned int no_listxattr: 1; + unsigned int no_removexattr: 1; + unsigned int no_lock: 1; + unsigned int no_access: 1; + unsigned int no_create: 1; + unsigned int no_interrupt: 1; + unsigned int no_bmap: 1; + unsigned int no_poll: 1; + unsigned int big_writes: 1; + unsigned int dont_mask: 1; + unsigned int no_flock: 1; + unsigned int no_fallocate: 1; + unsigned int no_rename2: 1; + unsigned int auto_inval_data: 1; + unsigned int explicit_inval_data: 1; + unsigned int do_readdirplus: 1; + unsigned int readdirplus_auto: 1; + unsigned int async_dio: 1; + unsigned int no_lseek: 1; + unsigned int posix_acl: 1; + unsigned int default_permissions: 1; + unsigned int allow_other: 1; + unsigned int no_copy_file_range: 1; + unsigned int destroy: 1; + unsigned int delete_stale: 1; + unsigned int no_control: 1; + unsigned int no_force_umount: 1; + unsigned int auto_submounts: 1; + unsigned int sync_fs: 1; + unsigned int init_security: 1; + unsigned int create_supp_group: 1; + unsigned int inode_dax: 1; + unsigned int no_tmpfile: 1; + unsigned int direct_io_allow_mmap: 1; + unsigned int no_statx: 1; + unsigned int passthrough: 1; + unsigned int use_pages_for_kvec_io: 1; + int max_stack_depth; + atomic_t num_waiting; + unsigned int minor; + struct list_head entry; + dev_t dev; + struct dentry *ctl_dentry[5]; + int ctl_ndents; + u32 scramble_key[4]; + atomic64_t attr_version; + atomic64_t evict_ctr; + void (*release)(struct fuse_conn *); + struct rw_semaphore killsb; + struct list_head devices; + struct list_head mounts; + struct fuse_sync_bucket *curr_bucket; + struct idr backing_files_map; +}; + +struct fuse_sync_bucket { + atomic_t count; + wait_queue_head_t waitq; + struct callback_head rcu; +}; + +struct xattr { + const char *name; + void *value; + size_t value_len; +}; + +typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); + +enum { + READA_NONE = 0, + READA_BACK = 1, + READA_FORWARD = 2, + READA_FORWARD_ALWAYS = 3, +}; + +enum { + BTRFS_INODE_FLUSH_ON_CLOSE = 0, + BTRFS_INODE_DUMMY = 1, + BTRFS_INODE_IN_DEFRAG = 2, + BTRFS_INODE_HAS_ASYNC_EXTENT = 3, + BTRFS_INODE_NEEDS_FULL_SYNC = 4, + BTRFS_INODE_COPY_EVERYTHING = 5, + BTRFS_INODE_HAS_PROPS = 6, + BTRFS_INODE_SNAPSHOT_FLUSH = 7, + BTRFS_INODE_NO_XATTRS = 8, + BTRFS_INODE_NO_DELALLOC_FLUSH = 9, + BTRFS_INODE_VERITY_IN_PROGRESS = 10, + BTRFS_INODE_FREE_SPACE_INODE = 11, + BTRFS_INODE_NO_CAP_XATTR = 12, + BTRFS_INODE_COW_WRITE_ERROR = 13, + BTRFS_INODE_ROOT_STUB = 14, +}; + +struct btrfs_free_space_info { + __le32 extent_count; + __le32 flags; +}; + +struct btrfs_item_batch { + const struct btrfs_key *keys; + const u32 *data_sizes; + u32 total_data_size; + int nr; +}; + +enum btrfs_block_group_flags { + BLOCK_GROUP_FLAG_IREF = 0, + BLOCK_GROUP_FLAG_REMOVED = 1, + BLOCK_GROUP_FLAG_TO_COPY = 2, + BLOCK_GROUP_FLAG_RELOCATING_REPAIR = 3, + BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED = 4, + BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE = 5, + BLOCK_GROUP_FLAG_ZONED_DATA_RELOC = 6, + BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE = 7, + BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE = 8, + BLOCK_GROUP_FLAG_NEW = 9, +}; + +typedef int __kernel_mqd_t; + +typedef __kernel_mqd_t mqd_t; + +enum inode_i_mutex_lock_class { + I_MUTEX_NORMAL = 0, + I_MUTEX_PARENT = 1, + I_MUTEX_CHILD = 2, + I_MUTEX_XATTR = 3, + I_MUTEX_NONDIR2 = 4, + I_MUTEX_PARENT2 = 5, +}; + +struct ipc_ids { + int in_use; + short unsigned int seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; + int last_idx; + struct rhashtable key_ht; +}; + +struct ipc_namespace { + struct ipc_ids ids[3]; + int sem_ctls[4]; + int used_sems; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + struct percpu_counter percpu_msg_bytes; + struct percpu_counter percpu_msg_hdrs; + size_t shm_ctlmax; + size_t shm_ctlall; + long unsigned int shm_tot; + int shm_ctlmni; + int shm_rmid_forced; + struct notifier_block ipcns_nb; + struct vfsmount *mq_mnt; + unsigned int mq_queues_count; + unsigned int mq_queues_max; + unsigned int mq_msg_max; + unsigned int mq_msgsize_max; + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + struct ctl_table_set mq_set; + struct ctl_table_header *mq_sysctls; + struct ctl_table_set ipc_set; + struct ctl_table_header *ipc_sysctls; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; +}; + +typedef struct poll_table_struct poll_table; + +struct mq_attr { + __kernel_long_t mq_flags; + __kernel_long_t mq_maxmsg; + __kernel_long_t mq_msgsize; + __kernel_long_t mq_curmsgs; + __kernel_long_t __reserved[4]; +}; + +struct msg_msgseg; + +struct msg_msg { + struct list_head m_list; + long int m_type; + size_t m_ts; + struct msg_msgseg *next; + void *security; +}; + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; + bool newns; +}; + +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; + +struct ext_wait_queue { + struct task_struct *task; + struct list_head list; + struct msg_msg *msg; + int state; +}; + +struct mqueue_inode_info { + spinlock_t lock; + struct inode vfs_inode; + wait_queue_head_t wait_q; + struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; + struct posix_msg_tree_node *node_cache; + struct mq_attr attr; + struct sigevent notify; + struct pid *notify_owner; + u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct ucounts *ucounts; + struct sock *notify_sock; + struct sk_buff *notify_cookie; + struct ext_wait_queue e_wait_q[2]; + long unsigned int qsize; +}; + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; +}; + +struct shash_desc { + struct crypto_shash *tfm; + void *__ctx[0]; +}; + +struct shash_alg { + int (*init)(struct shash_desc *); + int (*update)(struct shash_desc *, const u8 *, unsigned int); + int (*final)(struct shash_desc *, u8 *); + int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*export)(struct shash_desc *, void *); + int (*import)(struct shash_desc *, const void *); + int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_shash *); + void (*exit_tfm)(struct crypto_shash *); + int (*clone_tfm)(struct crypto_shash *, struct crypto_shash *); + unsigned int descsize; + union { + struct { + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; + }; + struct hash_alg_common halg; + }; +}; + +struct xxhash64_tfm_ctx { + u64 seed; +}; + +struct xxhash64_desc_ctx { + struct xxh64_state xxhstate; +}; + +typedef struct {} local_lock_t; + +struct radix_tree_preload { + local_lock_t lock; + unsigned int nr; + struct xa_node *nodes; +}; + +enum { + ICQ_EXITED = 4, + ICQ_DESTROYED = 8, +}; + +enum { + IOPRIO_HINT_NONE = 0, + IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, + IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, + IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, + IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, + IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, + IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, + IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT = 0, + HCTX_TYPE_READ = 1, + HCTX_TYPE_POLL = 2, + HCTX_MAX_TYPES = 3, +}; + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + u64 batch; +}; + +enum rq_qos_id { + RQ_QOS_WBT = 0, + RQ_QOS_LATENCY = 1, + RQ_QOS_COST = 2, +}; + +struct rq_qos_ops; + +struct rq_qos { + const struct rq_qos_ops *ops; + struct gendisk *disk; + enum rq_qos_id id; + struct rq_qos *next; + struct dentry *debugfs_dir; +}; + +struct blk_stat_callback { + struct list_head list; + struct timer_list timer; + struct blk_rq_stat *cpu_stat; + int (*bucket_fn)(const struct request *); + unsigned int buckets; + struct blk_rq_stat *stat; + void (*timer_fn)(struct blk_stat_callback *); + void *data; + struct callback_head rcu; +}; + +struct rq_wait { + wait_queue_head_t wait; + atomic_t inflight; +}; + +struct rq_qos_ops { + void (*throttle)(struct rq_qos *, struct bio *); + void (*track)(struct rq_qos *, struct request *, struct bio *); + void (*merge)(struct rq_qos *, struct request *, struct bio *); + void (*issue)(struct rq_qos *, struct request *); + void (*requeue)(struct rq_qos *, struct request *); + void (*done)(struct rq_qos *, struct request *); + void (*done_bio)(struct rq_qos *, struct bio *); + void (*cleanup)(struct rq_qos *, struct bio *); + void (*queue_depth_changed)(struct rq_qos *); + void (*exit)(struct rq_qos *); + const struct blk_mq_debugfs_attr *debugfs_attrs; +}; + +struct rq_depth { + unsigned int max_depth; + int scale_step; + bool scaled_max; + unsigned int queue_depth; + unsigned int default_depth; +}; + +typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); + +typedef void cleanup_cb_t(struct rq_wait *, void *); + +struct trace_event_raw_wbt_stat { + struct trace_entry ent; + char name[32]; + s64 rmean; + u64 rmin; + u64 rmax; + s64 rnr_samples; + s64 rtime; + s64 wmean; + u64 wmin; + u64 wmax; + s64 wnr_samples; + s64 wtime; + char __data[0]; +}; + +struct trace_event_raw_wbt_lat { + struct trace_entry ent; + char name[32]; + long unsigned int lat; + char __data[0]; +}; + +struct trace_event_raw_wbt_step { + struct trace_entry ent; + char name[32]; + const char *msg; + int step; + long unsigned int window; + unsigned int bg; + unsigned int normal; + unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_wbt_timer { + struct trace_entry ent; + char name[32]; + unsigned int status; + int step; + unsigned int inflight; + char __data[0]; +}; + +struct trace_event_data_offsets_wbt_stat {}; + +struct trace_event_data_offsets_wbt_lat {}; + +struct trace_event_data_offsets_wbt_step {}; + +struct trace_event_data_offsets_wbt_timer {}; + +typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); + +typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, long unsigned int); + +typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, long unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, unsigned int); + +enum wbt_flags { + WBT_TRACKED = 1, + WBT_READ = 2, + WBT_SWAP = 4, + WBT_DISCARD = 8, + WBT_NR_BITS = 4, +}; + +enum { + WBT_RWQ_BG = 0, + WBT_RWQ_SWAP = 1, + WBT_RWQ_DISCARD = 2, + WBT_NUM_RWQ = 3, +}; + +enum { + WBT_STATE_ON_DEFAULT = 1, + WBT_STATE_ON_MANUAL = 2, + WBT_STATE_OFF_DEFAULT = 3, + WBT_STATE_OFF_MANUAL = 4, +}; + +struct rq_wb { + unsigned int wb_background; + unsigned int wb_normal; + short int enable_state; + unsigned int unknown_cnt; + u64 win_nsec; + u64 cur_win_nsec; + struct blk_stat_callback *cb; + u64 sync_issue; + void *sync_cookie; + long unsigned int last_issue; + long unsigned int last_comp; + long unsigned int min_lat_nsec; + struct rq_qos rqos; + struct rq_wait rq_wait[3]; + struct rq_depth rq_depth; +}; + +enum { + RWB_DEF_DEPTH = 16, + RWB_WINDOW_NSEC = 100000000, + RWB_MIN_WRITE_SAMPLES = 3, + RWB_UNKNOWN_BUMP = 5, +}; + +enum { + LAT_OK = 1, + LAT_UNKNOWN = 2, + LAT_UNKNOWN_WRITES = 3, + LAT_EXCEEDED = 4, +}; + +struct wbt_wait_data { + struct rq_wb *rwb; + enum wbt_flags wb_acct; + blk_opf_t opf; +}; + +enum { + DUMP_PREFIX_NONE = 0, + DUMP_PREFIX_ADDRESS = 1, + DUMP_PREFIX_OFFSET = 2, +}; + +typedef mpi_limb_t UWtype; + +typedef unsigned int UHWtype; + +struct gcry_mpi { + int alloced; + int nlimbs; + int nbits; + int sign; + unsigned int flags; + mpi_limb_t *d; +}; + +typedef struct gcry_mpi *MPI; + +typedef struct { + unsigned char op; + unsigned char bits; + short unsigned int val; +} code; + +typedef enum { + CODES = 0, + LENS = 1, + DISTS = 2, +} codetype; + +typedef U64 ZSTD_VecMask; + +typedef enum { + search_hashChain = 0, + search_binaryTree = 1, + search_rowHash = 2, +} searchMethod_e; + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *, struct device_attribute *, char *); + ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); +}; + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; + +struct virtio_device; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + unsigned int num_max; + bool reset; + void *priv; +}; + +struct vringh_config_ops; + +struct virtio_config_ops; + +struct virtio_device { + int index; + bool failed; + bool config_core_enabled; + bool config_driver_disabled; + bool config_change_pending; + spinlock_t config_lock; + spinlock_t vqs_list_lock; + struct device dev; + struct virtio_device_id id; + const struct virtio_config_ops *config; + const struct vringh_config_ops *vringh_config; + struct list_head vqs; + u64 features; + void *priv; +}; + +struct virtqueue_info; + +struct irq_affinity; + +struct virtio_shm_region; + +struct virtio_config_ops { + void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); + void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); + u32 (*generation)(struct virtio_device *); + u8 (*get_status)(struct virtio_device *); + void (*set_status)(struct virtio_device *, u8); + void (*reset)(struct virtio_device *); + int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, struct virtqueue_info *, struct irq_affinity *); + void (*del_vqs)(struct virtio_device *); + void (*synchronize_cbs)(struct virtio_device *); + u64 (*get_features)(struct virtio_device *); + int (*finalize_features)(struct virtio_device *); + const char * (*bus_name)(struct virtio_device *); + int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); + const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); + bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); + int (*disable_vq_and_reset)(struct virtqueue *); + int (*enable_vq_after_reset)(struct virtqueue *); +}; + +struct virtio_driver { + struct device_driver driver; + const struct virtio_device_id *id_table; + const unsigned int *feature_table; + unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *); + int (*probe)(struct virtio_device *); + void (*scan)(struct virtio_device *); + void (*remove)(struct virtio_device *); + void (*config_changed)(struct virtio_device *); + int (*freeze)(struct virtio_device *); + int (*restore)(struct virtio_device *); +}; + +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +typedef void vq_callback_t(struct virtqueue *); + +struct virtqueue_info { + const char *name; + vq_callback_t *callback; + bool ctx; +}; + +struct trace_event_raw_iommu_group_event { + struct trace_entry ent; + int gid; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_iommu_device_event { + struct trace_entry ent; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_map { + struct trace_entry ent; + u64 iova; + u64 paddr; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_unmap { + struct trace_entry ent; + u64 iova; + size_t size; + size_t unmapped_size; + char __data[0]; +}; + +struct trace_event_raw_iommu_error { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u64 iova; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_iommu_group_event { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_iommu_device_event { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_map {}; + +struct trace_event_data_offsets_unmap {}; + +struct trace_event_data_offsets_iommu_error { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +typedef void (*btf_trace_add_device_to_group)(void *, int, struct device *); + +typedef void (*btf_trace_remove_device_from_group)(void *, int, struct device *); + +typedef void (*btf_trace_attach_device_to_domain)(void *, struct device *); + +typedef void (*btf_trace_map)(void *, long unsigned int, phys_addr_t, size_t); + +typedef void (*btf_trace_unmap)(void *, long unsigned int, size_t, size_t); + +typedef void (*btf_trace_io_page_fault)(void *, struct device *, long unsigned int, int); + +struct dma_fence_ops; + +struct dma_fence { + spinlock_t *lock; + const struct dma_fence_ops *ops; + union { + struct list_head cb_list; + ktime_t timestamp; + struct callback_head rcu; + }; + u64 context; + u64 seqno; + long unsigned int flags; + struct kref refcount; + int error; +}; + +struct dma_fence_ops { + bool use_64bit_seqno; + const char * (*get_driver_name)(struct dma_fence *); + const char * (*get_timeline_name)(struct dma_fence *); + bool (*enable_signaling)(struct dma_fence *); + bool (*signaled)(struct dma_fence *); + long int (*wait)(struct dma_fence *, bool, long int); + void (*release)(struct dma_fence *); + void (*fence_value_str)(struct dma_fence *, char *, int); + void (*timeline_value_str)(struct dma_fence *, char *, int); + void (*set_deadline)(struct dma_fence *, ktime_t); +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT = 0, + DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, + DMA_FENCE_FLAG_USER_BITS = 3, +}; + +struct dma_fence_cb; + +typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); + +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +struct trace_event_raw_dma_fence { + struct trace_entry ent; + u32 __data_loc_driver; + u32 __data_loc_timeline; + unsigned int context; + unsigned int seqno; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_fence { + u32 driver; + const void *driver_ptr_; + u32 timeline; + const void *timeline_ptr_; +}; + +typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +enum { + ATA_MAX_DEVICES = 2, + ATA_MAX_PRD = 256, + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_LBA48 = 65535, + ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_TRUSTED = 48, + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_SATA_CAPABILITY = 76, + ATA_ID_SATA_CAPABILITY_2 = 77, + ATA_ID_FEATURE_SUPP = 78, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, + ATA_ID_COMMAND_SET_3 = 119, + ATA_ID_COMMAND_SET_4 = 120, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = 2, + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + ATA_PCI_CTL_OFS = 2, + ATA_PIO0 = 1, + ATA_PIO1 = 3, + ATA_PIO2 = 7, + ATA_PIO3 = 15, + ATA_PIO4 = 31, + ATA_PIO5 = 63, + ATA_PIO6 = 127, + ATA_PIO4_ONLY = 16, + ATA_SWDMA0 = 1, + ATA_SWDMA1 = 3, + ATA_SWDMA2 = 7, + ATA_SWDMA2_ONLY = 4, + ATA_MWDMA0 = 1, + ATA_MWDMA1 = 3, + ATA_MWDMA2 = 7, + ATA_MWDMA3 = 15, + ATA_MWDMA4 = 31, + ATA_MWDMA12_ONLY = 6, + ATA_MWDMA2_ONLY = 4, + ATA_UDMA0 = 1, + ATA_UDMA1 = 3, + ATA_UDMA2 = 7, + ATA_UDMA3 = 15, + ATA_UDMA4 = 31, + ATA_UDMA5 = 63, + ATA_UDMA6 = 127, + ATA_UDMA7 = 255, + ATA_UDMA24_ONLY = 20, + ATA_UDMA_MASK_40C = 7, + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = 2048, + ATA_PRD_EOT = -2147483648, + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = 8, + ATA_DMA_START = 1, + ATA_DMA_INTR = 4, + ATA_DMA_ERR = 2, + ATA_DMA_ACTIVE = 1, + ATA_HOB = 128, + ATA_NIEN = 2, + ATA_LBA = 64, + ATA_DEV1 = 16, + ATA_DEVICE_OBS = 160, + ATA_DEVCTL_OBS = 8, + ATA_BUSY = 128, + ATA_DRDY = 64, + ATA_DF = 32, + ATA_DSC = 16, + ATA_DRQ = 8, + ATA_CORR = 4, + ATA_SENSE = 2, + ATA_ERR = 1, + ATA_SRST = 4, + ATA_ICRC = 128, + ATA_BBK = 128, + ATA_UNC = 64, + ATA_MC = 32, + ATA_IDNF = 16, + ATA_MCR = 8, + ATA_ABORTED = 4, + ATA_TRK0NF = 2, + ATA_AMNF = 1, + ATAPI_LFS = 240, + ATAPI_EOM = 2, + ATAPI_ILI = 1, + ATAPI_IO = 2, + ATAPI_COD = 1, + ATA_REG_DATA = 0, + ATA_REG_ERR = 1, + ATA_REG_NSECT = 2, + ATA_REG_LBAL = 3, + ATA_REG_LBAM = 4, + ATA_REG_LBAH = 5, + ATA_REG_DEVICE = 6, + ATA_REG_STATUS = 7, + ATA_REG_FEATURE = 1, + ATA_REG_CMD = 7, + ATA_REG_BYTEL = 4, + ATA_REG_BYTEH = 5, + ATA_REG_DEVSEL = 6, + ATA_REG_IRQ = 2, + ATA_CMD_DEV_RESET = 8, + ATA_CMD_CHK_POWER = 229, + ATA_CMD_STANDBY = 226, + ATA_CMD_IDLE = 227, + ATA_CMD_EDD = 144, + ATA_CMD_DOWNLOAD_MICRO = 146, + ATA_CMD_DOWNLOAD_MICRO_DMA = 147, + ATA_CMD_NOP = 0, + ATA_CMD_FLUSH = 231, + ATA_CMD_FLUSH_EXT = 234, + ATA_CMD_ID_ATA = 236, + ATA_CMD_ID_ATAPI = 161, + ATA_CMD_SERVICE = 162, + ATA_CMD_READ = 200, + ATA_CMD_READ_EXT = 37, + ATA_CMD_READ_QUEUED = 38, + ATA_CMD_READ_STREAM_EXT = 43, + ATA_CMD_READ_STREAM_DMA_EXT = 42, + ATA_CMD_WRITE = 202, + ATA_CMD_WRITE_EXT = 53, + ATA_CMD_WRITE_QUEUED = 54, + ATA_CMD_WRITE_STREAM_EXT = 59, + ATA_CMD_WRITE_STREAM_DMA_EXT = 58, + ATA_CMD_WRITE_FUA_EXT = 61, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 62, + ATA_CMD_FPDMA_READ = 96, + ATA_CMD_FPDMA_WRITE = 97, + ATA_CMD_NCQ_NON_DATA = 99, + ATA_CMD_FPDMA_SEND = 100, + ATA_CMD_FPDMA_RECV = 101, + ATA_CMD_PIO_READ = 32, + ATA_CMD_PIO_READ_EXT = 36, + ATA_CMD_PIO_WRITE = 48, + ATA_CMD_PIO_WRITE_EXT = 52, + ATA_CMD_READ_MULTI = 196, + ATA_CMD_READ_MULTI_EXT = 41, + ATA_CMD_WRITE_MULTI = 197, + ATA_CMD_WRITE_MULTI_EXT = 57, + ATA_CMD_WRITE_MULTI_FUA_EXT = 206, + ATA_CMD_SET_FEATURES = 239, + ATA_CMD_SET_MULTI = 198, + ATA_CMD_PACKET = 160, + ATA_CMD_VERIFY = 64, + ATA_CMD_VERIFY_EXT = 66, + ATA_CMD_WRITE_UNCORR_EXT = 69, + ATA_CMD_STANDBYNOW1 = 224, + ATA_CMD_IDLEIMMEDIATE = 225, + ATA_CMD_SLEEP = 230, + ATA_CMD_INIT_DEV_PARAMS = 145, + ATA_CMD_READ_NATIVE_MAX = 248, + ATA_CMD_READ_NATIVE_MAX_EXT = 39, + ATA_CMD_SET_MAX = 249, + ATA_CMD_SET_MAX_EXT = 55, + ATA_CMD_READ_LOG_EXT = 47, + ATA_CMD_WRITE_LOG_EXT = 63, + ATA_CMD_READ_LOG_DMA_EXT = 71, + ATA_CMD_WRITE_LOG_DMA_EXT = 87, + ATA_CMD_TRUSTED_NONDATA = 91, + ATA_CMD_TRUSTED_RCV = 92, + ATA_CMD_TRUSTED_RCV_DMA = 93, + ATA_CMD_TRUSTED_SND = 94, + ATA_CMD_TRUSTED_SND_DMA = 95, + ATA_CMD_PMP_READ = 228, + ATA_CMD_PMP_READ_DMA = 233, + ATA_CMD_PMP_WRITE = 232, + ATA_CMD_PMP_WRITE_DMA = 235, + ATA_CMD_CONF_OVERLAY = 177, + ATA_CMD_SEC_SET_PASS = 241, + ATA_CMD_SEC_UNLOCK = 242, + ATA_CMD_SEC_ERASE_PREP = 243, + ATA_CMD_SEC_ERASE_UNIT = 244, + ATA_CMD_SEC_FREEZE_LOCK = 245, + ATA_CMD_SEC_DISABLE_PASS = 246, + ATA_CMD_CONFIG_STREAM = 81, + ATA_CMD_SMART = 176, + ATA_CMD_MEDIA_LOCK = 222, + ATA_CMD_MEDIA_UNLOCK = 223, + ATA_CMD_DSM = 6, + ATA_CMD_CHK_MED_CRD_TYP = 209, + ATA_CMD_CFA_REQ_EXT_ERR = 3, + ATA_CMD_CFA_WRITE_NE = 56, + ATA_CMD_CFA_TRANS_SECT = 135, + ATA_CMD_CFA_ERASE = 192, + ATA_CMD_CFA_WRITE_MULT_NE = 205, + ATA_CMD_REQ_SENSE_DATA = 11, + ATA_CMD_SANITIZE_DEVICE = 180, + ATA_CMD_ZAC_MGMT_IN = 74, + ATA_CMD_ZAC_MGMT_OUT = 159, + ATA_CMD_RESTORE = 16, + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 1, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 2, + ATA_SUBCMD_FPDMA_SEND_DSM = 0, + ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 2, + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 5, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 6, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 7, + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0, + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 1, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 2, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 3, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 4, + ATA_LOG_DIRECTORY = 0, + ATA_LOG_SATA_NCQ = 16, + ATA_LOG_NCQ_NON_DATA = 18, + ATA_LOG_NCQ_SEND_RECV = 19, + ATA_LOG_CDL = 24, + ATA_LOG_CDL_SIZE = 512, + ATA_LOG_IDENTIFY_DEVICE = 48, + ATA_LOG_SENSE_NCQ = 15, + ATA_LOG_SENSE_NCQ_SIZE = 1024, + ATA_LOG_CONCURRENT_POSITIONING_RANGES = 71, + ATA_LOG_SUPPORTED_CAPABILITIES = 3, + ATA_LOG_CURRENT_SETTINGS = 4, + ATA_LOG_SECURITY = 6, + ATA_LOG_SATA_SETTINGS = 8, + ATA_LOG_ZONED_INFORMATION = 9, + ATA_LOG_DEVSLP_OFFSET = 48, + ATA_LOG_DEVSLP_SIZE = 8, + ATA_LOG_DEVSLP_MDAT = 0, + ATA_LOG_DEVSLP_MDAT_MASK = 31, + ATA_LOG_DEVSLP_DETO = 1, + ATA_LOG_DEVSLP_VALID = 7, + ATA_LOG_DEVSLP_VALID_MASK = 128, + ATA_LOG_NCQ_PRIO_OFFSET = 9, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = 1, + ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 4, + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = 1, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 8, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 12, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 16, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = 2, + ATA_LOG_NCQ_SEND_RECV_SIZE = 20, + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = 1, + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = 2, + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = 4, + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = 8, + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = 16, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 28, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = 1, + ATA_LOG_NCQ_NON_DATA_SIZE = 64, + ATA_CMD_READ_LONG = 34, + ATA_CMD_READ_LONG_ONCE = 35, + ATA_CMD_WRITE_LONG = 50, + ATA_CMD_WRITE_LONG_ONCE = 51, + SETFEATURES_XFER = 3, + XFER_UDMA_7 = 71, + XFER_UDMA_6 = 70, + XFER_UDMA_5 = 69, + XFER_UDMA_4 = 68, + XFER_UDMA_3 = 67, + XFER_UDMA_2 = 66, + XFER_UDMA_1 = 65, + XFER_UDMA_0 = 64, + XFER_MW_DMA_4 = 36, + XFER_MW_DMA_3 = 35, + XFER_MW_DMA_2 = 34, + XFER_MW_DMA_1 = 33, + XFER_MW_DMA_0 = 32, + XFER_SW_DMA_2 = 18, + XFER_SW_DMA_1 = 17, + XFER_SW_DMA_0 = 16, + XFER_PIO_6 = 14, + XFER_PIO_5 = 13, + XFER_PIO_4 = 12, + XFER_PIO_3 = 11, + XFER_PIO_2 = 10, + XFER_PIO_1 = 9, + XFER_PIO_0 = 8, + XFER_PIO_SLOW = 0, + SETFEATURES_WC_ON = 2, + SETFEATURES_WC_OFF = 130, + SETFEATURES_RA_ON = 170, + SETFEATURES_RA_OFF = 85, + SETFEATURES_AAM_ON = 66, + SETFEATURES_AAM_OFF = 194, + SETFEATURES_SPINUP = 7, + SETFEATURES_SPINUP_TIMEOUT = 30000, + SETFEATURES_SATA_ENABLE = 16, + SETFEATURES_SATA_DISABLE = 144, + SETFEATURES_CDL = 13, + SATA_FPDMA_OFFSET = 1, + SATA_FPDMA_AA = 2, + SATA_DIPM = 3, + SATA_FPDMA_IN_ORDER = 4, + SATA_AN = 5, + SATA_SSP = 6, + SATA_DEVSLP = 9, + SETFEATURE_SENSE_DATA = 195, + SETFEATURE_SENSE_DATA_SUCC_NCQ = 196, + ATA_SET_MAX_ADDR = 0, + ATA_SET_MAX_PASSWD = 1, + ATA_SET_MAX_LOCK = 2, + ATA_SET_MAX_UNLOCK = 3, + ATA_SET_MAX_FREEZE_LOCK = 4, + ATA_SET_MAX_PASSWD_DMA = 5, + ATA_SET_MAX_UNLOCK_DMA = 6, + ATA_DCO_RESTORE = 192, + ATA_DCO_FREEZE_LOCK = 193, + ATA_DCO_IDENTIFY = 194, + ATA_DCO_SET = 195, + ATA_SMART_ENABLE = 216, + ATA_SMART_READ_VALUES = 208, + ATA_SMART_READ_THRESHOLDS = 209, + ATA_DSM_TRIM = 1, + ATA_SMART_LBAM_PASS = 79, + ATA_SMART_LBAH_PASS = 194, + ATAPI_PKT_DMA = 1, + ATAPI_DMADIR = 4, + ATAPI_CDB_LEN = 16, + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + SATA_PMP_FEAT_BIST = 1, + SATA_PMP_FEAT_PMREQ = 2, + SATA_PMP_FEAT_DYNSSC = 4, + SATA_PMP_FEAT_NOTIFY = 8, + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, + ATA_CBL_PATA_UNK = 4, + ATA_CBL_PATA_IGN = 5, + ATA_CBL_SATA = 6, + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + SERR_DATA_RECOVERED = 1, + SERR_COMM_RECOVERED = 2, + SERR_DATA = 256, + SERR_PERSISTENT = 512, + SERR_PROTOCOL = 1024, + SERR_INTERNAL = 2048, + SERR_PHYRDY_CHG = 65536, + SERR_PHY_INT_ERR = 131072, + SERR_COMM_WAKE = 262144, + SERR_10B_8B_ERR = 524288, + SERR_DISPARITY = 1048576, + SERR_CRC = 2097152, + SERR_HANDSHAKE = 4194304, + SERR_LINK_SEQ_ERR = 8388608, + SERR_TRANS_ST_ERROR = 16777216, + SERR_UNRECOG_FIS = 33554432, + SERR_DEV_XCHG = 67108864, +}; + +enum ata_prot_flags { + ATA_PROT_FLAG_PIO = 1, + ATA_PROT_FLAG_DMA = 2, + ATA_PROT_FLAG_NCQ = 4, + ATA_PROT_FLAG_ATAPI = 8, + ATA_PROT_UNKNOWN = 255, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = 1, + ATA_PROT_DMA = 2, + ATA_PROT_NCQ_NODATA = 4, + ATA_PROT_NCQ = 6, + ATAPI_PROT_NODATA = 8, + ATAPI_PROT_PIO = 9, + ATAPI_PROT_DMA = 10, +}; + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +typedef u64 async_cookie_t; + +enum { + LIBATA_MAX_PRD = 128, + LIBATA_DUMB_MAX_PRD = 64, + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = 32, + ATA_SHORT_PAUSE = 16, + ATAPI_MAX_DRAIN = 16384, + ATA_ALL_DEVICES = 3, + ATA_SHT_EMULATED = 1, + ATA_SHT_THIS_ID = -1, + ATA_TFLAG_LBA48 = 1, + ATA_TFLAG_ISADDR = 2, + ATA_TFLAG_DEVICE = 4, + ATA_TFLAG_WRITE = 8, + ATA_TFLAG_LBA = 16, + ATA_TFLAG_FUA = 32, + ATA_TFLAG_POLLING = 64, + ATA_DFLAG_LBA = 1, + ATA_DFLAG_LBA48 = 2, + ATA_DFLAG_CDB_INTR = 4, + ATA_DFLAG_NCQ = 8, + ATA_DFLAG_FLUSH_EXT = 16, + ATA_DFLAG_ACPI_PENDING = 32, + ATA_DFLAG_ACPI_FAILED = 64, + ATA_DFLAG_AN = 128, + ATA_DFLAG_TRUSTED = 256, + ATA_DFLAG_FUA = 512, + ATA_DFLAG_DMADIR = 1024, + ATA_DFLAG_NCQ_SEND_RECV = 2048, + ATA_DFLAG_NCQ_PRIO = 4096, + ATA_DFLAG_CDL = 8192, + ATA_DFLAG_CFG_MASK = 16383, + ATA_DFLAG_PIO = 16384, + ATA_DFLAG_NCQ_OFF = 32768, + ATA_DFLAG_SLEEPING = 65536, + ATA_DFLAG_DUBIOUS_XFER = 131072, + ATA_DFLAG_NO_UNLOAD = 262144, + ATA_DFLAG_UNLOCK_HPA = 524288, + ATA_DFLAG_INIT_MASK = 1048575, + ATA_DFLAG_NCQ_PRIO_ENABLED = 1048576, + ATA_DFLAG_CDL_ENABLED = 2097152, + ATA_DFLAG_RESUMING = 4194304, + ATA_DFLAG_DETACH = 16777216, + ATA_DFLAG_DETACHED = 33554432, + ATA_DFLAG_DA = 67108864, + ATA_DFLAG_DEVSLP = 134217728, + ATA_DFLAG_ACPI_DISABLED = 268435456, + ATA_DFLAG_D_SENSE = 536870912, + ATA_DFLAG_ZAC = 1073741824, + ATA_DFLAG_FEATURES_MASK = 201341696, + ATA_DEV_UNKNOWN = 0, + ATA_DEV_ATA = 1, + ATA_DEV_ATA_UNSUP = 2, + ATA_DEV_ATAPI = 3, + ATA_DEV_ATAPI_UNSUP = 4, + ATA_DEV_PMP = 5, + ATA_DEV_PMP_UNSUP = 6, + ATA_DEV_SEMB = 7, + ATA_DEV_SEMB_UNSUP = 8, + ATA_DEV_ZAC = 9, + ATA_DEV_ZAC_UNSUP = 10, + ATA_DEV_NONE = 11, + ATA_LFLAG_NO_HRST = 2, + ATA_LFLAG_NO_SRST = 4, + ATA_LFLAG_ASSUME_ATA = 8, + ATA_LFLAG_ASSUME_SEMB = 16, + ATA_LFLAG_ASSUME_CLASS = 24, + ATA_LFLAG_NO_RETRY = 32, + ATA_LFLAG_DISABLED = 64, + ATA_LFLAG_SW_ACTIVITY = 128, + ATA_LFLAG_NO_LPM = 256, + ATA_LFLAG_RST_ONCE = 512, + ATA_LFLAG_CHANGED = 1024, + ATA_LFLAG_NO_DEBOUNCE_DELAY = 2048, + ATA_FLAG_SLAVE_POSS = 1, + ATA_FLAG_SATA = 2, + ATA_FLAG_NO_LPM = 4, + ATA_FLAG_NO_LOG_PAGE = 32, + ATA_FLAG_NO_ATAPI = 64, + ATA_FLAG_PIO_DMA = 128, + ATA_FLAG_PIO_LBA48 = 256, + ATA_FLAG_PIO_POLLING = 512, + ATA_FLAG_NCQ = 1024, + ATA_FLAG_NO_POWEROFF_SPINDOWN = 2048, + ATA_FLAG_NO_HIBERNATE_SPINDOWN = 4096, + ATA_FLAG_DEBUGMSG = 8192, + ATA_FLAG_FPDMA_AA = 16384, + ATA_FLAG_IGN_SIMPLEX = 32768, + ATA_FLAG_NO_IORDY = 65536, + ATA_FLAG_ACPI_SATA = 131072, + ATA_FLAG_AN = 262144, + ATA_FLAG_PMP = 524288, + ATA_FLAG_FPDMA_AUX = 1048576, + ATA_FLAG_EM = 2097152, + ATA_FLAG_SW_ACTIVITY = 4194304, + ATA_FLAG_NO_DIPM = 8388608, + ATA_FLAG_SAS_HOST = 16777216, + ATA_PFLAG_EH_PENDING = 1, + ATA_PFLAG_EH_IN_PROGRESS = 2, + ATA_PFLAG_FROZEN = 4, + ATA_PFLAG_RECOVERED = 8, + ATA_PFLAG_LOADING = 16, + ATA_PFLAG_SCSI_HOTPLUG = 64, + ATA_PFLAG_INITIALIZING = 128, + ATA_PFLAG_RESETTING = 256, + ATA_PFLAG_UNLOADING = 512, + ATA_PFLAG_UNLOADED = 1024, + ATA_PFLAG_RESUMING = 65536, + ATA_PFLAG_SUSPENDED = 131072, + ATA_PFLAG_PM_PENDING = 262144, + ATA_PFLAG_INIT_GTM_VALID = 524288, + ATA_PFLAG_PIO32 = 1048576, + ATA_PFLAG_PIO32CHANGE = 2097152, + ATA_PFLAG_EXTERNAL = 4194304, + ATA_QCFLAG_ACTIVE = 1, + ATA_QCFLAG_DMAMAP = 2, + ATA_QCFLAG_RTF_FILLED = 4, + ATA_QCFLAG_IO = 8, + ATA_QCFLAG_RESULT_TF = 16, + ATA_QCFLAG_CLEAR_EXCL = 32, + ATA_QCFLAG_QUIET = 64, + ATA_QCFLAG_RETRY = 128, + ATA_QCFLAG_HAS_CDL = 256, + ATA_QCFLAG_EH = 65536, + ATA_QCFLAG_SENSE_VALID = 131072, + ATA_QCFLAG_EH_SCHEDULED = 262144, + ATA_QCFLAG_EH_SUCCESS_CMD = 524288, + ATA_HOST_SIMPLEX = 1, + ATA_HOST_STARTED = 2, + ATA_HOST_PARALLEL_SCAN = 4, + ATA_HOST_IGNORE_ATA = 8, + ATA_HOST_NO_PART = 16, + ATA_HOST_NO_SSC = 32, + ATA_HOST_NO_DEVSLP = 64, + ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, + ATA_TMOUT_FF_WAIT_LONG = 2000, + ATA_TMOUT_FF_WAIT = 800, + ATA_WAIT_AFTER_RESET = 150, + ATA_TMOUT_PMP_SRST_WAIT = 10000, + ATA_TMOUT_SPURIOUS_PHY = 10000, + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = 7, + ATA_SHIFT_UDMA = 12, + ATA_SHIFT_PRIO = 6, + ATA_PRIO_HIGH = 2, + ATA_DMA_PAD_SZ = 4, + ATA_ERING_SIZE = 32, + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + ATA_EH_DESC_LEN = 80, + ATA_EH_REVALIDATE = 1, + ATA_EH_SOFTRESET = 2, + ATA_EH_HARDRESET = 4, + ATA_EH_RESET = 6, + ATA_EH_ENABLE_LINK = 8, + ATA_EH_PARK = 32, + ATA_EH_GET_SUCCESS_SENSE = 64, + ATA_EH_SET_ACTIVE = 128, + ATA_EH_PERDEV_MASK = 225, + ATA_EH_ALL_ACTIONS = 15, + ATA_EHI_HOTPLUGGED = 1, + ATA_EHI_NO_AUTOPSY = 4, + ATA_EHI_QUIET = 8, + ATA_EHI_NO_RECOVERY = 16, + ATA_EHI_DID_SOFTRESET = 65536, + ATA_EHI_DID_HARDRESET = 131072, + ATA_EHI_PRINTINFO = 262144, + ATA_EHI_SETMODE = 524288, + ATA_EHI_POST_SETMODE = 1048576, + ATA_EHI_DID_PRINT_QUIRKS = 2097152, + ATA_EHI_DID_RESET = 196608, + ATA_EHI_TO_SLAVE_MASK = 12, + ATA_EH_MAX_TRIES = 5, + ATA_LINK_RESUME_TRIES = 5, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + SATA_PMP_RW_TIMEOUT = 3000, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, + ATA_QUIRK_DIAGNOSTIC = 1, + ATA_QUIRK_NODMA = 2, + ATA_QUIRK_NONCQ = 4, + ATA_QUIRK_MAX_SEC_128 = 8, + ATA_QUIRK_BROKEN_HPA = 16, + ATA_QUIRK_DISABLE = 32, + ATA_QUIRK_HPA_SIZE = 64, + ATA_QUIRK_IVB = 128, + ATA_QUIRK_STUCK_ERR = 256, + ATA_QUIRK_BRIDGE_OK = 512, + ATA_QUIRK_ATAPI_MOD16_DMA = 1024, + ATA_QUIRK_FIRMWARE_WARN = 2048, + ATA_QUIRK_1_5_GBPS = 4096, + ATA_QUIRK_NOSETXFER = 8192, + ATA_QUIRK_BROKEN_FPDMA_AA = 16384, + ATA_QUIRK_DUMP_ID = 32768, + ATA_QUIRK_MAX_SEC_LBA48 = 65536, + ATA_QUIRK_ATAPI_DMADIR = 131072, + ATA_QUIRK_NO_NCQ_TRIM = 262144, + ATA_QUIRK_NOLPM = 524288, + ATA_QUIRK_WD_BROKEN_LPM = 1048576, + ATA_QUIRK_ZERO_AFTER_TRIM = 2097152, + ATA_QUIRK_NO_DMA_LOG = 4194304, + ATA_QUIRK_NOTRIM = 8388608, + ATA_QUIRK_MAX_SEC_1024 = 16777216, + ATA_QUIRK_MAX_TRIM_128M = 33554432, + ATA_QUIRK_NO_NCQ_ON_ATI = 67108864, + ATA_QUIRK_NO_ID_DEV_LOG = 134217728, + ATA_QUIRK_NO_LOG_DIR = 268435456, + ATA_QUIRK_NO_FUA = 536870912, + ATA_DMA_MASK_ATA = 1, + ATA_DMA_MASK_ATAPI = 2, + ATA_DMA_MASK_CFA = 4, + ATAPI_READ = 0, + ATAPI_WRITE = 1, + ATAPI_READ_CD = 2, + ATAPI_PASS_THRU = 3, + ATAPI_MISC = 4, + ATA_TIMING_SETUP = 1, + ATA_TIMING_ACT8B = 2, + ATA_TIMING_REC8B = 4, + ATA_TIMING_CYC8B = 8, + ATA_TIMING_8BIT = 14, + ATA_TIMING_ACTIVE = 16, + ATA_TIMING_RECOVER = 32, + ATA_TIMING_DMACK_HOLD = 64, + ATA_TIMING_CYCLE = 128, + ATA_TIMING_UDMA = 256, + ATA_TIMING_ALL = 511, + ATA_ACPI_FILTER_SETXFER = 1, + ATA_ACPI_FILTER_LOCK = 2, + ATA_ACPI_FILTER_DIPM = 4, + ATA_ACPI_FILTER_FPDMA_OFFSET = 8, + ATA_ACPI_FILTER_FPDMA_AA = 16, + ATA_ACPI_FILTER_DEFAULT = 7, +}; + +enum ata_completion_errors { + AC_ERR_OK = 0, + AC_ERR_DEV = 1, + AC_ERR_HSM = 2, + AC_ERR_TIMEOUT = 4, + AC_ERR_MEDIA = 8, + AC_ERR_ATA_BUS = 16, + AC_ERR_HOST_BUS = 32, + AC_ERR_SYSTEM = 64, + AC_ERR_INVALID = 128, + AC_ERR_OTHER = 256, + AC_ERR_NODEV_HINT = 512, + AC_ERR_NCQ = 1024, +}; + +enum ata_lpm_policy { + ATA_LPM_UNKNOWN = 0, + ATA_LPM_MAX_POWER = 1, + ATA_LPM_MED_POWER = 2, + ATA_LPM_MED_POWER_WITH_DIPM = 3, + ATA_LPM_MIN_POWER_WITH_PARTIAL = 4, + ATA_LPM_MIN_POWER = 5, +}; + +enum ata_lpm_hints { + ATA_LPM_EMPTY = 1, + ATA_LPM_HIPM = 2, + ATA_LPM_WAKE_ONLY = 4, +}; + +struct ata_queued_cmd; + +typedef void (*ata_qc_cb_t)(struct ata_queued_cmd *); + +struct ata_taskfile { + long unsigned int flags; + u8 protocol; + u8 ctl; + u8 hob_feature; + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + union { + u8 error; + u8 feature; + }; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + union { + u8 status; + u8 command; + }; + u32 auxiliary; +}; + +struct ata_port; + +struct ata_device; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + struct ata_taskfile tf; + u8 cdb[16]; + long unsigned int flags; + unsigned int tag; + unsigned int hw_tag; + unsigned int n_elem; + unsigned int orig_n_elem; + int dma_dir; + unsigned int sect_size; + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + struct scatterlist sgent; + struct scatterlist *sg; + struct scatterlist *cursg; + unsigned int cursg_ofs; + unsigned int err_mask; + struct ata_taskfile result_tf; + ata_qc_cb_t complete_fn; + void *private_data; + void *lldd_task; +}; + +struct ata_link; + +typedef int (*ata_prereset_fn_t)(struct ata_link *, long unsigned int); + +struct ata_eh_info { + struct ata_device *dev; + u32 serror; + unsigned int err_mask; + unsigned int action; + unsigned int dev_action[2]; + unsigned int flags; + unsigned int probe_mask; + char desc[80]; + int desc_len; +}; + +struct ata_eh_context { + struct ata_eh_info i; + int tries[2]; + int cmd_timeout_idx[16]; + unsigned int classes[2]; + unsigned int did_probe_mask; + unsigned int unloaded_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[2]; + long unsigned int last_reset; +}; + +struct ata_ering_entry { + unsigned int eflags; + unsigned int err_mask; + u64 timestamp; +}; + +struct ata_ering { + int cursor; + struct ata_ering_entry ring[32]; +}; + +struct ata_cpr_log; + +struct ata_cdl; + +struct ata_device { + struct ata_link *link; + unsigned int devno; + unsigned int quirks; + long unsigned int flags; + struct scsi_device *sdev; + void *private_data; + struct device tdev; + u64 n_sectors; + u64 n_native_sectors; + unsigned int class; + long unsigned int unpark_deadline; + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; + unsigned int multi_count; + unsigned int max_sectors; + unsigned int cdb_len; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + u16 cylinders; + u16 heads; + u16 sectors; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + u16 id[256]; + u32 gscr[128]; + }; + u8 devslp_timing[8]; + u8 ncq_send_recv_cmds[20]; + u8 ncq_non_data_cmds[64]; + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; + struct ata_cpr_log *cpr_log; + struct ata_cdl *cdl; + int spdn_cnt; + struct ata_ering ering; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u8 sector_buf[512]; +}; + +struct ata_link { + struct ata_port *ap; + int pmp; + struct device tdev; + unsigned int active_tag; + u32 sactive; + unsigned int flags; + u32 saved_scontrol; + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; + enum ata_lpm_policy lpm_policy; + struct ata_eh_info eh_info; + struct ata_eh_context eh_context; + long: 64; + long: 64; + long: 64; + long: 64; + struct ata_device device[2]; + long unsigned int last_lpm_change; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef int (*ata_reset_fn_t)(struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*ata_postreset_fn_t)(struct ata_link *, unsigned int *); + +enum sw_activity { + OFF = 0, + BLINK_ON = 1, + BLINK_OFF = 2, +}; + +struct ata_ioports { + void *cmd_addr; + void *data_addr; + void *error_addr; + void *feature_addr; + void *nsect_addr; + void *lbal_addr; + void *lbam_addr; + void *lbah_addr; + void *device_addr; + void *status_addr; + void *command_addr; + void *altstatus_addr; + void *ctl_addr; + void *bmdma_addr; + void *scr_addr; +}; + +struct ata_port_operations; + +struct ata_host { + spinlock_t lock; + struct device *dev; + void * const *iomap; + unsigned int n_ports; + unsigned int n_tags; + void *private_data; + struct ata_port_operations *ops; + long unsigned int flags; + struct kref kref; + struct mutex eh_mutex; + struct task_struct *eh_owner; + struct ata_port *simplex_claimed; + struct ata_port *ports[0]; +}; + +struct ata_port_operations { + int (*qc_defer)(struct ata_queued_cmd *); + int (*check_atapi_dma)(struct ata_queued_cmd *); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *); + unsigned int (*qc_issue)(struct ata_queued_cmd *); + void (*qc_fill_rtf)(struct ata_queued_cmd *); + void (*qc_ncq_fill_rtf)(struct ata_port *, u64); + int (*cable_detect)(struct ata_port *); + unsigned int (*mode_filter)(struct ata_device *, unsigned int); + void (*set_piomode)(struct ata_port *, struct ata_device *); + void (*set_dmamode)(struct ata_port *, struct ata_device *); + int (*set_mode)(struct ata_link *, struct ata_device **); + unsigned int (*read_id)(struct ata_device *, struct ata_taskfile *, __le16 *); + void (*dev_config)(struct ata_device *); + void (*freeze)(struct ata_port *); + void (*thaw)(struct ata_port *); + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; + ata_prereset_fn_t pmp_prereset; + ata_reset_fn_t pmp_softreset; + ata_reset_fn_t pmp_hardreset; + ata_postreset_fn_t pmp_postreset; + void (*error_handler)(struct ata_port *); + void (*lost_interrupt)(struct ata_port *); + void (*post_internal_cmd)(struct ata_queued_cmd *); + void (*sched_eh)(struct ata_port *); + void (*end_eh)(struct ata_port *); + int (*scr_read)(struct ata_link *, unsigned int, u32 *); + int (*scr_write)(struct ata_link *, unsigned int, u32); + void (*pmp_attach)(struct ata_port *); + void (*pmp_detach)(struct ata_port *); + int (*set_lpm)(struct ata_link *, enum ata_lpm_policy, unsigned int); + int (*port_suspend)(struct ata_port *, pm_message_t); + int (*port_resume)(struct ata_port *); + int (*port_start)(struct ata_port *); + void (*port_stop)(struct ata_port *); + void (*host_stop)(struct ata_host *); + void (*sff_dev_select)(struct ata_port *, unsigned int); + void (*sff_set_devctl)(struct ata_port *, u8); + u8 (*sff_check_status)(struct ata_port *); + u8 (*sff_check_altstatus)(struct ata_port *); + void (*sff_tf_load)(struct ata_port *, const struct ata_taskfile *); + void (*sff_tf_read)(struct ata_port *, struct ata_taskfile *); + void (*sff_exec_command)(struct ata_port *, const struct ata_taskfile *); + unsigned int (*sff_data_xfer)(struct ata_queued_cmd *, unsigned char *, unsigned int, int); + void (*sff_irq_on)(struct ata_port *); + bool (*sff_irq_check)(struct ata_port *); + void (*sff_irq_clear)(struct ata_port *); + void (*sff_drain_fifo)(struct ata_queued_cmd *); + void (*bmdma_setup)(struct ata_queued_cmd *); + void (*bmdma_start)(struct ata_queued_cmd *); + void (*bmdma_stop)(struct ata_queued_cmd *); + u8 (*bmdma_status)(struct ata_port *); + ssize_t (*em_show)(struct ata_port *, char *); + ssize_t (*em_store)(struct ata_port *, const char *, size_t); + ssize_t (*sw_activity_show)(struct ata_device *, char *); + ssize_t (*sw_activity_store)(struct ata_device *, enum sw_activity); + ssize_t (*transmit_led_message)(struct ata_port *, u32, ssize_t); + const struct ata_port_operations *inherits; +}; + +struct ata_port_stats { + long unsigned int unhandled_irq; + long unsigned int idle_irq; + long unsigned int rw_reqbuf; +}; + +struct ata_port { + struct Scsi_Host *scsi_host; + struct ata_port_operations *ops; + spinlock_t *lock; + long unsigned int flags; + unsigned int pflags; + unsigned int print_id; + unsigned int port_no; + struct ata_ioports ioaddr; + u8 ctl; + u8 last_ctl; + struct ata_link *sff_pio_task_link; + struct delayed_work sff_pio_task; + struct ata_bmdma_prd *bmdma_prd; + dma_addr_t bmdma_prd_dma; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; + struct ata_queued_cmd qcmd[33]; + u64 qc_active; + int nr_active_links; + long: 64; + long: 64; + struct ata_link link; + struct ata_link *slave_link; + int nr_pmp_links; + struct ata_link *pmp_link; + struct ata_link *excl_link; + struct ata_port_stats stats; + struct ata_host *host; + struct device *dev; + struct device tdev; + struct mutex scsi_scan_mutex; + struct delayed_work hotplug_task; + struct delayed_work scsi_rescan_task; + unsigned int hsm_task_state; + struct list_head eh_done_q; + wait_queue_head_t eh_wait_q; + int eh_tries; + struct completion park_req_pending; + pm_message_t pm_mesg; + enum ata_lpm_policy target_lpm_policy; + struct timer_list fastdrain_timer; + unsigned int fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; + void *private_data; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ata_cpr { + u8 num; + u8 num_storage_elements; + u64 start_lba; + u64 num_lbas; +}; + +struct ata_cpr_log { + u8 nr_cpr; + struct ata_cpr cpr[0]; +}; + +struct ata_cdl { + u8 desc_log_buf[512]; + u8 ncq_sense_log_buf[1024]; +}; + +struct ata_port_info { + long unsigned int flags; + long unsigned int link_flags; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + struct ata_port_operations *port_ops; + void *private_data; +}; + +enum ata_link_iter_mode { + ATA_LITER_EDGE = 0, + ATA_LITER_HOST_FIRST = 1, + ATA_LITER_PMP_FIRST = 2, +}; + +struct phy_configure_opts_dp { + unsigned int link_rate; + unsigned int lanes; + unsigned int voltage[4]; + unsigned int pre[4]; + u8 ssc: 1; + u8 set_rate: 1; + u8 set_lanes: 1; + u8 set_voltages: 1; +}; + +struct phy_configure_opts_lvds { + unsigned int bits_per_lane_and_dclk_cycle; + long unsigned int differential_clk_rate; + unsigned int lanes; + bool is_slave; +}; + +struct phy_configure_opts_mipi_dphy { + unsigned int clk_miss; + unsigned int clk_post; + unsigned int clk_pre; + unsigned int clk_prepare; + unsigned int clk_settle; + unsigned int clk_term_en; + unsigned int clk_trail; + unsigned int clk_zero; + unsigned int d_term_en; + unsigned int eot; + unsigned int hs_exit; + unsigned int hs_prepare; + unsigned int hs_settle; + unsigned int hs_skip; + unsigned int hs_trail; + unsigned int hs_zero; + unsigned int init; + unsigned int lpx; + unsigned int ta_get; + unsigned int ta_go; + unsigned int ta_sure; + unsigned int wakeup; + long unsigned int hs_clk_rate; + long unsigned int lp_clk_rate; + unsigned char lanes; +}; + +enum phy_mode { + PHY_MODE_INVALID = 0, + PHY_MODE_USB_HOST = 1, + PHY_MODE_USB_HOST_LS = 2, + PHY_MODE_USB_HOST_FS = 3, + PHY_MODE_USB_HOST_HS = 4, + PHY_MODE_USB_HOST_SS = 5, + PHY_MODE_USB_DEVICE = 6, + PHY_MODE_USB_DEVICE_LS = 7, + PHY_MODE_USB_DEVICE_FS = 8, + PHY_MODE_USB_DEVICE_HS = 9, + PHY_MODE_USB_DEVICE_SS = 10, + PHY_MODE_USB_OTG = 11, + PHY_MODE_UFS_HS_A = 12, + PHY_MODE_UFS_HS_B = 13, + PHY_MODE_PCIE = 14, + PHY_MODE_ETHERNET = 15, + PHY_MODE_MIPI_DPHY = 16, + PHY_MODE_SATA = 17, + PHY_MODE_LVDS = 18, + PHY_MODE_DP = 19, +}; + +enum phy_media { + PHY_MEDIA_DEFAULT = 0, + PHY_MEDIA_SR = 1, + PHY_MEDIA_DAC = 2, +}; + +union phy_configure_opts { + struct phy_configure_opts_mipi_dphy mipi_dphy; + struct phy_configure_opts_dp dp; + struct phy_configure_opts_lvds lvds; +}; + +struct phy; + +struct phy_ops { + int (*init)(struct phy *); + int (*exit)(struct phy *); + int (*power_on)(struct phy *); + int (*power_off)(struct phy *); + int (*set_mode)(struct phy *, enum phy_mode, int); + int (*set_media)(struct phy *, enum phy_media); + int (*set_speed)(struct phy *, int); + int (*configure)(struct phy *, union phy_configure_opts *); + int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); + int (*reset)(struct phy *); + int (*calibrate)(struct phy *); + int (*connect)(struct phy *, int); + int (*disconnect)(struct phy *, int); + void (*release)(struct phy *); + struct module *owner; +}; + +struct phy_attrs { + u32 bus_width; + u32 max_link_rate; + enum phy_mode mode; +}; + +struct phy { + struct device dev; + int id; + const struct phy_ops *ops; + struct mutex mutex; + int init_count; + int power_count; + struct phy_attrs attrs; + struct regulator *pwr; + struct dentry *debugfs; +}; + +enum { + AHCI_MAX_PORTS = 32, + AHCI_MAX_SG = 168, + AHCI_DMA_BOUNDARY = 4294967295, + AHCI_MAX_CMDS = 32, + AHCI_CMD_SZ = 32, + AHCI_CMD_SLOT_SZ = 1024, + AHCI_RX_FIS_SZ = 256, + AHCI_CMD_TBL_CDB = 64, + AHCI_CMD_TBL_HDR_SZ = 128, + AHCI_CMD_TBL_SZ = 2816, + AHCI_CMD_TBL_AR_SZ = 90112, + AHCI_PORT_PRIV_DMA_SZ = 91392, + AHCI_PORT_PRIV_FBS_DMA_SZ = 95232, + AHCI_IRQ_ON_SG = 2147483648, + AHCI_CMD_ATAPI = 32, + AHCI_CMD_WRITE = 64, + AHCI_CMD_PREFETCH = 128, + AHCI_CMD_RESET = 256, + AHCI_CMD_CLR_BUSY = 1024, + RX_FIS_PIO_SETUP = 32, + RX_FIS_D2H_REG = 64, + RX_FIS_SDB = 88, + RX_FIS_UNK = 96, + HOST_CAP = 0, + HOST_CTL = 4, + HOST_IRQ_STAT = 8, + HOST_PORTS_IMPL = 12, + HOST_VERSION = 16, + HOST_EM_LOC = 28, + HOST_EM_CTL = 32, + HOST_CAP2 = 36, + HOST_RESET = 1, + HOST_IRQ_EN = 2, + HOST_MRSM = 4, + HOST_AHCI_EN = 2147483648, + HOST_CAP_SXS = 32, + HOST_CAP_EMS = 64, + HOST_CAP_CCC = 128, + HOST_CAP_PART = 8192, + HOST_CAP_SSC = 16384, + HOST_CAP_PIO_MULTI = 32768, + HOST_CAP_FBS = 65536, + HOST_CAP_PMP = 131072, + HOST_CAP_ONLY = 262144, + HOST_CAP_CLO = 16777216, + HOST_CAP_LED = 33554432, + HOST_CAP_ALPM = 67108864, + HOST_CAP_SSS = 134217728, + HOST_CAP_MPS = 268435456, + HOST_CAP_SNTF = 536870912, + HOST_CAP_NCQ = 1073741824, + HOST_CAP_64 = 2147483648, + HOST_CAP2_BOH = 1, + HOST_CAP2_NVMHCI = 2, + HOST_CAP2_APST = 4, + HOST_CAP2_SDS = 8, + HOST_CAP2_SADM = 16, + HOST_CAP2_DESO = 32, + PORT_LST_ADDR = 0, + PORT_LST_ADDR_HI = 4, + PORT_FIS_ADDR = 8, + PORT_FIS_ADDR_HI = 12, + PORT_IRQ_STAT = 16, + PORT_IRQ_MASK = 20, + PORT_CMD = 24, + PORT_TFDATA = 32, + PORT_SIG = 36, + PORT_CMD_ISSUE = 56, + PORT_SCR_STAT = 40, + PORT_SCR_CTL = 44, + PORT_SCR_ERR = 48, + PORT_SCR_ACT = 52, + PORT_SCR_NTF = 60, + PORT_FBS = 64, + PORT_DEVSLP = 68, + PORT_IRQ_COLD_PRES = 2147483648, + PORT_IRQ_TF_ERR = 1073741824, + PORT_IRQ_HBUS_ERR = 536870912, + PORT_IRQ_HBUS_DATA_ERR = 268435456, + PORT_IRQ_IF_ERR = 134217728, + PORT_IRQ_IF_NONFATAL = 67108864, + PORT_IRQ_OVERFLOW = 16777216, + PORT_IRQ_BAD_PMP = 8388608, + PORT_IRQ_PHYRDY = 4194304, + PORT_IRQ_DMPS = 128, + PORT_IRQ_CONNECT = 64, + PORT_IRQ_SG_DONE = 32, + PORT_IRQ_UNK_FIS = 16, + PORT_IRQ_SDB_FIS = 8, + PORT_IRQ_DMAS_FIS = 4, + PORT_IRQ_PIOS_FIS = 2, + PORT_IRQ_D2H_REG_FIS = 1, + PORT_IRQ_FREEZE = 683671632, + PORT_IRQ_ERROR = 2025848912, + DEF_PORT_IRQ = 2025848959, + PORT_CMD_ASP = 134217728, + PORT_CMD_ALPE = 67108864, + PORT_CMD_ATAPI = 16777216, + PORT_CMD_FBSCP = 4194304, + PORT_CMD_ESP = 2097152, + PORT_CMD_CPD = 1048576, + PORT_CMD_MPSP = 524288, + PORT_CMD_HPCP = 262144, + PORT_CMD_PMP = 131072, + PORT_CMD_LIST_ON = 32768, + PORT_CMD_FIS_ON = 16384, + PORT_CMD_FIS_RX = 16, + PORT_CMD_CLO = 8, + PORT_CMD_POWER_ON = 4, + PORT_CMD_SPIN_UP = 2, + PORT_CMD_START = 1, + PORT_CMD_ICC_MASK = 4026531840, + PORT_CMD_ICC_ACTIVE = 268435456, + PORT_CMD_ICC_PARTIAL = 536870912, + PORT_CMD_ICC_SLUMBER = 1610612736, + PORT_CMD_CAP = 8126464, + PORT_FBS_DWE_OFFSET = 16, + PORT_FBS_ADO_OFFSET = 12, + PORT_FBS_DEV_OFFSET = 8, + PORT_FBS_DEV_MASK = 3840, + PORT_FBS_SDE = 4, + PORT_FBS_DEC = 2, + PORT_FBS_EN = 1, + PORT_DEVSLP_DM_OFFSET = 25, + PORT_DEVSLP_DM_MASK = 503316480, + PORT_DEVSLP_DITO_OFFSET = 15, + PORT_DEVSLP_MDAT_OFFSET = 10, + PORT_DEVSLP_DETO_OFFSET = 2, + PORT_DEVSLP_DSP = 2, + PORT_DEVSLP_ADSE = 1, + AHCI_HFLAG_NO_NCQ = 1, + AHCI_HFLAG_IGN_IRQ_IF_ERR = 2, + AHCI_HFLAG_IGN_SERR_INTERNAL = 4, + AHCI_HFLAG_32BIT_ONLY = 8, + AHCI_HFLAG_MV_PATA = 16, + AHCI_HFLAG_NO_MSI = 32, + AHCI_HFLAG_NO_PMP = 64, + AHCI_HFLAG_SECT255 = 256, + AHCI_HFLAG_YES_NCQ = 512, + AHCI_HFLAG_NO_SUSPEND = 1024, + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = 2048, + AHCI_HFLAG_NO_SNTF = 4096, + AHCI_HFLAG_NO_FPDMA_AA = 8192, + AHCI_HFLAG_YES_FBS = 16384, + AHCI_HFLAG_DELAY_ENGINE = 32768, + AHCI_HFLAG_NO_DEVSLP = 131072, + AHCI_HFLAG_NO_FBS = 262144, + AHCI_HFLAG_MULTI_MSI = 1048576, + AHCI_HFLAG_WAKE_BEFORE_STOP = 4194304, + AHCI_HFLAG_YES_ALPM = 8388608, + AHCI_HFLAG_NO_WRITE_TO_RO = 16777216, + AHCI_HFLAG_SUSPEND_PHYS = 33554432, + AHCI_HFLAG_NO_SXS = 67108864, + AHCI_HFLAG_43BIT_ONLY = 134217728, + AHCI_HFLAG_INTEL_PCS_QUIRK = 268435456, + AHCI_FLAG_COMMON = 393346, + ICH_MAP = 144, + PCS_6 = 146, + PCS_7 = 148, + EM_MAX_SLOTS = 15, + EM_MAX_RETRY = 5, + EM_CTL_RST = 512, + EM_CTL_TM = 256, + EM_CTL_MR = 1, + EM_CTL_ALHD = 67108864, + EM_CTL_XMT = 33554432, + EM_CTL_SMB = 16777216, + EM_CTL_SGPIO = 524288, + EM_CTL_SES = 262144, + EM_CTL_SAFTE = 131072, + EM_CTL_LED = 65536, + EM_MSG_TYPE_LED = 1, + EM_MSG_TYPE_SAFTE = 2, + EM_MSG_TYPE_SES2 = 4, + EM_MSG_TYPE_SGPIO = 8, +}; + +struct ahci_cmd_hdr { + __le32 opts; + __le32 status; + __le32 tbl_addr; + __le32 tbl_addr_hi; + __le32 reserved[4]; +}; + +struct ahci_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 flags_size; +}; + +struct ahci_em_priv { + enum sw_activity blink_policy; + struct timer_list timer; + long unsigned int saved_activity; + long unsigned int activity; + long unsigned int led_state; + struct ata_link *link; +}; + +struct ahci_port_priv { + struct ata_link *active_link; + struct ahci_cmd_hdr *cmd_slot; + dma_addr_t cmd_slot_dma; + void *cmd_tbl; + dma_addr_t cmd_tbl_dma; + void *rx_fis; + dma_addr_t rx_fis_dma; + unsigned int ncq_saw_d2h: 1; + unsigned int ncq_saw_dmas: 1; + unsigned int ncq_saw_sdb: 1; + spinlock_t lock; + u32 intr_mask; + bool fbs_supported; + bool fbs_enabled; + int fbs_last_dev; + struct ahci_em_priv em_priv[15]; + char *irq_desc; +}; + +struct ahci_host_priv { + unsigned int flags; + u32 mask_port_map; + void *mmio; + u32 cap; + u32 cap2; + u32 version; + u32 port_map; + u32 saved_cap; + u32 saved_cap2; + u32 saved_port_map; + u32 saved_port_cap[32]; + u32 em_loc; + u32 em_buf_sz; + u32 em_msg_type; + u32 remapped_nvme; + bool got_runtime_pm; + unsigned int n_clks; + struct clk_bulk_data *clks; + unsigned int f_rsts; + struct reset_control *rsts; + struct regulator **target_pwrs; + struct regulator *ahci_regulator; + struct regulator *phy_regulator; + struct phy **phys; + unsigned int nports; + void *plat_data; + unsigned int irq; + void (*start_engine)(struct ata_port *); + int (*stop_engine)(struct ata_port *); + irqreturn_t (*irq_handler)(int, void *); + int (*get_irq_vector)(struct ata_host *, int); +}; + +enum e1000_mac_type___2 { + e1000_undefined = 0, + e1000_82575 = 1, + e1000_82576 = 2, + e1000_82580 = 3, + e1000_i350 = 4, + e1000_i354 = 5, + e1000_i210 = 6, + e1000_i211 = 7, + e1000_num_macs = 8, +}; + +enum e1000_nvm_type___2 { + e1000_nvm_unknown___2 = 0, + e1000_nvm_none___2 = 1, + e1000_nvm_eeprom_spi___2 = 2, + e1000_nvm_flash_hw___2 = 3, + e1000_nvm_invm = 4, + e1000_nvm_flash_sw___2 = 5, +}; + +enum e1000_phy_type___2 { + e1000_phy_unknown___2 = 0, + e1000_phy_none___2 = 1, + e1000_phy_m88___2 = 2, + e1000_phy_igp___2 = 3, + e1000_phy_igp_2___2 = 4, + e1000_phy_gg82563___2 = 5, + e1000_phy_igp_3___2 = 6, + e1000_phy_ife___2 = 7, + e1000_phy_82580 = 8, + e1000_phy_i210 = 9, + e1000_phy_bcm54616 = 10, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci = 1, + e1000_bus_type_pcix = 2, + e1000_bus_type_pci_express = 3, + e1000_bus_type_reserved = 4, +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33 = 1, + e1000_bus_speed_66 = 2, + e1000_bus_speed_100 = 3, + e1000_bus_speed_120 = 4, + e1000_bus_speed_133 = 5, + e1000_bus_speed_2500 = 6, + e1000_bus_speed_5000 = 7, + e1000_bus_speed_reserved = 8, +}; + +struct e1000_sfp_flags { + u8 e1000_base_sx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_t: 1; + u8 e100_base_lx: 1; + u8 e100_base_fx: 1; + u8 e10_base_bx10: 1; + u8 e10_base_px: 1; +}; + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + +struct e1000_hw___2; + +struct e1000_mac_operations___2 { + s32 (*check_for_link)(struct e1000_hw___2 *); + s32 (*reset_hw)(struct e1000_hw___2 *); + s32 (*init_hw)(struct e1000_hw___2 *); + bool (*check_mng_mode)(struct e1000_hw___2 *); + s32 (*setup_physical_interface)(struct e1000_hw___2 *); + void (*rar_set)(struct e1000_hw___2 *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw___2 *); + s32 (*get_speed_and_duplex)(struct e1000_hw___2 *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw___2 *, u16); + void (*release_swfw_sync)(struct e1000_hw___2 *, u16); + s32 (*get_thermal_sensor_data)(struct e1000_hw___2 *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw___2 *); + void (*write_vfta)(struct e1000_hw___2 *, u32, u32); +}; + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[3]; +}; + +struct e1000_mac_info___2 { + struct e1000_mac_operations___2 ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type___2 type; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + u16 mta_reg_count; + u16 uta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_fc_info___2 { + u32 high_water; + u32 low_water; + u16 pause_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*check_polarity)(struct e1000_hw___2 *); + s32 (*check_reset_block)(struct e1000_hw___2 *); + s32 (*force_speed_duplex)(struct e1000_hw___2 *); + s32 (*get_cfg_done)(struct e1000_hw___2 *); + s32 (*get_cable_length)(struct e1000_hw___2 *); + s32 (*get_phy_info)(struct e1000_hw___2 *); + s32 (*read_reg)(struct e1000_hw___2 *, u32, u16 *); + void (*release)(struct e1000_hw___2 *); + s32 (*reset)(struct e1000_hw___2 *); + s32 (*set_d0_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*write_reg)(struct e1000_hw___2 *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw___2 *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw___2 *, u8, u8, u8); +}; + +struct e1000_phy_info___2 { + struct e1000_phy_operations___2 ops; + enum e1000_phy_type___2 type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*read)(struct e1000_hw___2 *, u16, u16, u16 *); + void (*release)(struct e1000_hw___2 *); + s32 (*write)(struct e1000_hw___2 *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw___2 *); + s32 (*validate)(struct e1000_hw___2 *); + s32 (*valid_led_default)(struct e1000_hw___2 *, u16 *); +}; + +struct e1000_nvm_info___2 { + struct e1000_nvm_operations___2 ops; + enum e1000_nvm_type___2 type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info___2 { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + u32 snoop; + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw___2 *); + s32 (*read)(struct e1000_hw___2 *, u32 *, u16, u16, bool); + s32 (*write)(struct e1000_hw___2 *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw___2 *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw___2 *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw___2 *, u16); + s32 (*check_for_ack)(struct e1000_hw___2 *, u16); + s32 (*check_for_rst)(struct e1000_hw___2 *, u16); + s32 (*unlock)(struct e1000_hw___2 *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw___2 { + void *back; + u8 *hw_addr; + u8 *flash_address; + long unsigned int io_base; + struct e1000_mac_info___2 mac; + struct e1000_fc_info___2 fc; + struct e1000_phy_info___2 phy; + struct e1000_nvm_info___2 nvm; + struct e1000_bus_info___2 bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + u8 revision_id; +}; + +struct debugfs_reg32 { + char *name; + long unsigned int offset; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void *base; + struct device *dev; +}; + +struct xhci_regset { + char name[32]; + struct debugfs_regset32 regset; + size_t nregs; + struct list_head list; +}; + +struct xhci_file_map { + const char *name; + int (*show)(struct seq_file *, void *); +}; + +struct xhci_ep_priv { + char name[32]; + struct dentry *root; + struct xhci_stream_info *stream_info; + struct xhci_ring *show_ring; + unsigned int stream_id; +}; + +struct xhci_slot_priv { + char name[32]; + struct dentry *root; + struct xhci_ep_priv *eps[31]; + struct xhci_virt_device *dev; +}; + +enum rc_driver_type { + RC_DRIVER_SCANCODE = 0, + RC_DRIVER_IR_RAW = 1, + RC_DRIVER_IR_RAW_TX = 2, +}; + +struct rc_scancode_filter { + u32 data; + u32 mask; +}; + +struct ir_raw_event_ctrl; + +struct rc_dev { + struct device dev; + bool managed_alloc; + const struct attribute_group *sysfs_groups[5]; + const char *device_name; + const char *input_phys; + struct input_id input_id; + const char *driver_name; + const char *map_name; + struct rc_map rc_map; + struct mutex lock; + unsigned int minor; + struct ir_raw_event_ctrl *raw; + struct input_dev *input_dev; + enum rc_driver_type driver_type; + bool idle; + bool encode_wakeup; + u64 allowed_protocols; + u64 enabled_protocols; + u64 allowed_wakeup_protocols; + enum rc_proto wakeup_protocol; + struct rc_scancode_filter scancode_filter; + struct rc_scancode_filter scancode_wakeup_filter; + u32 scancode_mask; + u32 users; + void *priv; + spinlock_t keylock; + bool keypressed; + long unsigned int keyup_jiffies; + struct timer_list timer_keyup; + struct timer_list timer_repeat; + u32 last_keycode; + enum rc_proto last_protocol; + u64 last_scancode; + u8 last_toggle; + u32 timeout; + u32 min_timeout; + u32 max_timeout; + u32 rx_resolution; + bool registered; + int (*change_protocol)(struct rc_dev *, u64 *); + int (*open)(struct rc_dev *); + void (*close)(struct rc_dev *); + int (*s_tx_mask)(struct rc_dev *, u32); + int (*s_tx_carrier)(struct rc_dev *, u32); + int (*s_tx_duty_cycle)(struct rc_dev *, u32); + int (*s_rx_carrier_range)(struct rc_dev *, u32, u32); + int (*tx_ir)(struct rc_dev *, unsigned int *, unsigned int); + void (*s_idle)(struct rc_dev *, bool); + int (*s_wideband_receiver)(struct rc_dev *, int); + int (*s_carrier_report)(struct rc_dev *, int); + int (*s_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_wakeup_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_timeout)(struct rc_dev *, unsigned int); +}; + +struct ir_raw_event { + union { + u32 duration; + u32 carrier; + }; + u8 duty_cycle; + unsigned int pulse: 1; + unsigned int overflow: 1; + unsigned int timeout: 1; + unsigned int carrier_report: 1; +}; + +struct nec_dec { + int state; + unsigned int count; + u32 bits; + bool is_nec_x; + bool necx_repeat; +}; + +struct rc5_dec { + int state; + u32 bits; + unsigned int count; + bool is_rc5x; +}; + +struct rc6_dec { + int state; + u8 header; + u32 body; + bool toggle; + unsigned int count; + unsigned int wanted_bits; +}; + +struct sony_dec { + int state; + u32 bits; + unsigned int count; +}; + +struct jvc_dec { + int state; + u16 bits; + u16 old_bits; + unsigned int count; + bool first; + bool toggle; +}; + +struct sanyo_dec { + int state; + unsigned int count; + u64 bits; +}; + +struct sharp_dec { + int state; + unsigned int count; + u32 bits; + unsigned int pulse_len; +}; + +struct mce_kbd_dec { + spinlock_t keylock; + struct timer_list rx_timeout; + int state; + u8 header; + u32 body; + unsigned int count; + unsigned int wanted_bits; +}; + +struct xmp_dec { + int state; + unsigned int count; + u32 durations[16]; +}; + +struct ir_raw_event_ctrl { + struct list_head list; + struct task_struct *thread; + struct { + union { + struct __kfifo kfifo; + struct ir_raw_event *type; + const struct ir_raw_event *const_type; + char (*rectype)[0]; + struct ir_raw_event *ptr; + const struct ir_raw_event *ptr_const; + }; + struct ir_raw_event buf[512]; + } kfifo; + ktime_t last_event; + struct rc_dev *dev; + spinlock_t edge_spinlock; + struct timer_list edge_handle; + struct ir_raw_event prev_ev; + struct ir_raw_event this_ev; + struct nec_dec nec; + struct rc5_dec rc5; + struct rc6_dec rc6; + struct sony_dec sony; + struct jvc_dec jvc; + struct sanyo_dec sanyo; + struct sharp_dec sharp; + struct mce_kbd_dec mce_kbd; + struct xmp_dec xmp; +}; + +struct ir_raw_handler { + struct list_head list; + u64 protocols; + int (*decode)(struct rc_dev *, struct ir_raw_event); + int (*encode)(enum rc_proto, u32, struct ir_raw_event *, unsigned int); + u32 carrier; + u32 min_timeout; + int (*raw_register)(struct rc_dev *); + int (*raw_unregister)(struct rc_dev *); +}; + +struct ir_raw_timings_manchester { + unsigned int leader_pulse; + unsigned int leader_space; + unsigned int clock; + unsigned int invert: 1; + unsigned int trailer_space; +}; + +enum rc5_state { + STATE_INACTIVE = 0, + STATE_BIT_START = 1, + STATE_BIT_END = 2, + STATE_CHECK_RC5X = 3, + STATE_FINISHED = 4, +}; + +enum { + BIO_PAGE_PINNED = 0, + BIO_CLONED = 1, + BIO_BOUNCED = 2, + BIO_QUIET = 3, + BIO_CHAIN = 4, + BIO_REFFED = 5, + BIO_BPS_THROTTLED = 6, + BIO_TRACE_COMPLETION = 7, + BIO_CGROUP_ACCT = 8, + BIO_QOS_THROTTLED = 9, + BIO_QOS_MERGED = 10, + BIO_REMAPPED = 11, + BIO_ZONE_WRITE_PLUGGING = 12, + BIO_EMULATES_ZONE_APPEND = 13, + BIO_FLAG_LAST = 14, +}; + +struct badblocks { + struct device *dev; + int count; + int unacked_exist; + int shift; + u64 *page; + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; +}; + +enum sync_action { + ACTION_RESYNC = 0, + ACTION_RECOVER = 1, + ACTION_CHECK = 2, + ACTION_REPAIR = 3, + ACTION_RESHAPE = 4, + ACTION_FROZEN = 5, + ACTION_IDLE = 6, + NR_SYNC_ACTIONS = 7, +}; + +struct md_cluster_info; + +struct md_personality; + +struct md_thread; + +struct bitmap_operations; + +struct md_rdev; + +struct mddev { + void *private; + struct md_personality *pers; + dev_t unit; + int md_minor; + struct list_head disks; + long unsigned int flags; + long unsigned int sb_flags; + int suspended; + struct mutex suspend_mutex; + struct percpu_ref active_io; + int ro; + int sysfs_active; + struct gendisk *gendisk; + struct kobject kobj; + int hold_active; + int major_version; + int minor_version; + int patch_version; + int persistent; + int external; + char metadata_type[17]; + int chunk_sectors; + time64_t ctime; + time64_t utime; + int level; + int layout; + char clevel[16]; + int raid_disks; + int max_disks; + sector_t dev_sectors; + sector_t array_sectors; + int external_size; + __u64 events; + int can_decrease_events; + char uuid[16]; + sector_t reshape_position; + int delta_disks; + int new_level; + int new_layout; + int new_chunk_sectors; + int reshape_backwards; + struct md_thread *thread; + struct md_thread *sync_thread; + enum sync_action last_sync_action; + sector_t curr_resync; + sector_t curr_resync_completed; + long unsigned int resync_mark; + sector_t resync_mark_cnt; + sector_t curr_mark_cnt; + sector_t resync_max_sectors; + atomic64_t resync_mismatches; + sector_t suspend_lo; + sector_t suspend_hi; + int sync_speed_min; + int sync_speed_max; + int parallel_resync; + int ok_start_degraded; + long unsigned int recovery; + int recovery_disabled; + int in_sync; + struct mutex open_mutex; + struct mutex reconfig_mutex; + atomic_t active; + atomic_t openers; + int changed; + int degraded; + atomic_t recovery_active; + wait_queue_head_t recovery_wait; + sector_t recovery_cp; + sector_t resync_min; + sector_t resync_max; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_action; + struct kernfs_node *sysfs_completed; + struct kernfs_node *sysfs_degraded; + struct kernfs_node *sysfs_level; + struct work_struct del_work; + struct work_struct sync_work; + spinlock_t lock; + wait_queue_head_t sb_wait; + atomic_t pending_writes; + unsigned int safemode; + unsigned int safemode_delay; + struct timer_list safemode_timer; + struct percpu_ref writes_pending; + int sync_checkers; + void *bitmap; + struct bitmap_operations *bitmap_ops; + struct { + struct file *file; + loff_t offset; + long unsigned int space; + loff_t default_offset; + long unsigned int default_space; + struct mutex mutex; + long unsigned int chunksize; + long unsigned int daemon_sleep; + long unsigned int max_write_behind; + int external; + int nodes; + char cluster_name[64]; + } bitmap_info; + atomic_t max_corr_read_errors; + struct list_head all_mddevs; + const struct attribute_group *to_remove; + struct bio_set bio_set; + struct bio_set sync_set; + struct bio_set io_clone_set; + struct work_struct event_work; + mempool_t *serial_info_pool; + void (*sync_super)(struct mddev *, struct md_rdev *); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; + unsigned int noio_flag; + struct list_head deleting; + atomic_t sync_seq; + bool has_superblocks: 1; + bool fail_last_dev: 1; + bool serialize_policy: 1; +}; + +struct serial_in_rdev; + +struct md_rdev { + struct list_head same_set; + sector_t sectors; + struct mddev *mddev; + int last_events; + struct block_device *meta_bdev; + struct block_device *bdev; + struct file *bdev_file; + struct page *sb_page; + struct page *bb_page; + int sb_loaded; + __u64 sb_events; + sector_t data_offset; + sector_t new_data_offset; + sector_t sb_start; + int sb_size; + int preferred_minor; + struct kobject kobj; + long unsigned int flags; + wait_queue_head_t blocked_wait; + int desc_nr; + int raid_disk; + int new_raid_disk; + int saved_raid_disk; + union { + sector_t recovery_offset; + sector_t journal_tail; + }; + atomic_t nr_pending; + atomic_t read_errors; + time64_t last_read_error; + atomic_t corrected_errors; + struct serial_in_rdev *serial; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_unack_badblocks; + struct kernfs_node *sysfs_badblocks; + struct badblocks badblocks; + struct { + short int offset; + unsigned int size; + sector_t sector; + } ppl; +}; + +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + +enum mddev_flags { + MD_ARRAY_FIRST_USE = 0, + MD_CLOSING = 1, + MD_JOURNAL_CLEAN = 2, + MD_HAS_JOURNAL = 3, + MD_CLUSTER_RESYNC_LOCKED = 4, + MD_FAILFAST_SUPPORTED = 5, + MD_HAS_PPL = 6, + MD_HAS_MULTIPLE_PPLS = 7, + MD_NOT_READY = 8, + MD_BROKEN = 9, + MD_DELETED = 10, +}; + +struct md_personality { + char *name; + int level; + struct list_head list; + struct module *owner; + bool (*make_request)(struct mddev *, struct bio *); + int (*run)(struct mddev *); + int (*start)(struct mddev *); + void (*free)(struct mddev *, void *); + void (*status)(struct seq_file *, struct mddev *); + void (*error_handler)(struct mddev *, struct md_rdev *); + int (*hot_add_disk)(struct mddev *, struct md_rdev *); + int (*hot_remove_disk)(struct mddev *, struct md_rdev *); + int (*spare_active)(struct mddev *); + sector_t (*sync_request)(struct mddev *, sector_t, sector_t, int *); + int (*resize)(struct mddev *, sector_t); + sector_t (*size)(struct mddev *, sector_t, int); + int (*check_reshape)(struct mddev *); + int (*start_reshape)(struct mddev *); + void (*finish_reshape)(struct mddev *); + void (*update_reshape_pos)(struct mddev *); + void (*prepare_suspend)(struct mddev *); + void (*quiesce)(struct mddev *, int); + void * (*takeover)(struct mddev *); + int (*change_consistency_policy)(struct mddev *, const char *); +}; + +struct md_thread { + void (*run)(struct md_thread *); + struct mddev *mddev; + wait_queue_head_t wqueue; + long unsigned int flags; + struct task_struct *tsk; + long unsigned int timeout; + void *private; +}; + +struct strip_zone { + sector_t zone_end; + sector_t dev_start; + int nb_dev; + int disk_shift; +}; + +enum r0layout { + RAID0_ORIG_LAYOUT = 1, + RAID0_ALT_MULTIZONE_LAYOUT = 2, +}; + +struct r0conf { + struct strip_zone *strip_zone; + struct md_rdev **devlist; + int nr_strip_zones; + enum r0layout layout; +}; + +enum dma_transaction_type { + DMA_MEMCPY = 0, + DMA_XOR = 1, + DMA_PQ = 2, + DMA_XOR_VAL = 3, + DMA_PQ_VAL = 4, + DMA_MEMSET = 5, + DMA_MEMSET_SG = 6, + DMA_INTERRUPT = 7, + DMA_PRIVATE = 8, + DMA_ASYNC_TX = 9, + DMA_SLAVE = 10, + DMA_CYCLIC = 11, + DMA_INTERLEAVE = 12, + DMA_COMPLETION_NO_ORDER = 13, + DMA_REPEAT = 14, + DMA_LOAD_EOT = 15, + DMA_TX_TYPE_END = 16, +}; + +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +enum { + TCA_STATS_UNSPEC = 0, + TCA_STATS_BASIC = 1, + TCA_STATS_RATE_EST = 2, + TCA_STATS_QUEUE = 3, + TCA_STATS_APP = 4, + TCA_STATS_RATE_EST64 = 5, + TCA_STATS_PAD = 6, + TCA_STATS_BASIC_HW = 7, + TCA_STATS_PKT64 = 8, + __TCA_STATS_MAX = 9, +}; + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; +}; + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + +enum { + NETDEV_A_DEV_IFINDEX = 1, + NETDEV_A_DEV_PAD = 2, + NETDEV_A_DEV_XDP_FEATURES = 3, + NETDEV_A_DEV_XDP_ZC_MAX_SEGS = 4, + NETDEV_A_DEV_XDP_RX_METADATA_FEATURES = 5, + NETDEV_A_DEV_XSK_FEATURES = 6, + __NETDEV_A_DEV_MAX = 7, + NETDEV_A_DEV_MAX = 6, +}; + +enum { + NETDEV_A_PAGE_POOL_ID = 1, + NETDEV_A_PAGE_POOL_IFINDEX = 2, + NETDEV_A_PAGE_POOL_NAPI_ID = 3, + NETDEV_A_PAGE_POOL_INFLIGHT = 4, + NETDEV_A_PAGE_POOL_INFLIGHT_MEM = 5, + NETDEV_A_PAGE_POOL_DETACH_TIME = 6, + NETDEV_A_PAGE_POOL_DMABUF = 7, + __NETDEV_A_PAGE_POOL_MAX = 8, + NETDEV_A_PAGE_POOL_MAX = 7, +}; + +enum { + NETDEV_A_PAGE_POOL_STATS_INFO = 1, + NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW = 9, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER = 10, + NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY = 11, + NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL = 12, + NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE = 13, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED = 14, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL = 15, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING = 16, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL = 17, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT = 18, + __NETDEV_A_PAGE_POOL_STATS_MAX = 19, + NETDEV_A_PAGE_POOL_STATS_MAX = 18, +}; + +enum { + NETDEV_A_NAPI_IFINDEX = 1, + NETDEV_A_NAPI_ID = 2, + NETDEV_A_NAPI_IRQ = 3, + NETDEV_A_NAPI_PID = 4, + NETDEV_A_NAPI_DEFER_HARD_IRQS = 5, + NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT = 6, + NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT = 7, + __NETDEV_A_NAPI_MAX = 8, + NETDEV_A_NAPI_MAX = 7, +}; + +enum { + NETDEV_A_QUEUE_ID = 1, + NETDEV_A_QUEUE_IFINDEX = 2, + NETDEV_A_QUEUE_TYPE = 3, + NETDEV_A_QUEUE_NAPI_ID = 4, + NETDEV_A_QUEUE_DMABUF = 5, + __NETDEV_A_QUEUE_MAX = 6, + NETDEV_A_QUEUE_MAX = 5, +}; + +enum { + NETDEV_A_QSTATS_IFINDEX = 1, + NETDEV_A_QSTATS_QUEUE_TYPE = 2, + NETDEV_A_QSTATS_QUEUE_ID = 3, + NETDEV_A_QSTATS_SCOPE = 4, + NETDEV_A_QSTATS_RX_PACKETS = 8, + NETDEV_A_QSTATS_RX_BYTES = 9, + NETDEV_A_QSTATS_TX_PACKETS = 10, + NETDEV_A_QSTATS_TX_BYTES = 11, + NETDEV_A_QSTATS_RX_ALLOC_FAIL = 12, + NETDEV_A_QSTATS_RX_HW_DROPS = 13, + NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS = 14, + NETDEV_A_QSTATS_RX_CSUM_COMPLETE = 15, + NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY = 16, + NETDEV_A_QSTATS_RX_CSUM_NONE = 17, + NETDEV_A_QSTATS_RX_CSUM_BAD = 18, + NETDEV_A_QSTATS_RX_HW_GRO_PACKETS = 19, + NETDEV_A_QSTATS_RX_HW_GRO_BYTES = 20, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS = 21, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES = 22, + NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS = 23, + NETDEV_A_QSTATS_TX_HW_DROPS = 24, + NETDEV_A_QSTATS_TX_HW_DROP_ERRORS = 25, + NETDEV_A_QSTATS_TX_CSUM_NONE = 26, + NETDEV_A_QSTATS_TX_NEEDS_CSUM = 27, + NETDEV_A_QSTATS_TX_HW_GSO_PACKETS = 28, + NETDEV_A_QSTATS_TX_HW_GSO_BYTES = 29, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS = 30, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES = 31, + NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS = 32, + NETDEV_A_QSTATS_TX_STOP = 33, + NETDEV_A_QSTATS_TX_WAKE = 34, + __NETDEV_A_QSTATS_MAX = 35, + NETDEV_A_QSTATS_MAX = 34, +}; + +enum { + NETDEV_A_DMABUF_IFINDEX = 1, + NETDEV_A_DMABUF_QUEUES = 2, + NETDEV_A_DMABUF_FD = 3, + NETDEV_A_DMABUF_ID = 4, + __NETDEV_A_DMABUF_MAX = 5, + NETDEV_A_DMABUF_MAX = 4, +}; + +enum { + NETDEV_CMD_DEV_GET = 1, + NETDEV_CMD_DEV_ADD_NTF = 2, + NETDEV_CMD_DEV_DEL_NTF = 3, + NETDEV_CMD_DEV_CHANGE_NTF = 4, + NETDEV_CMD_PAGE_POOL_GET = 5, + NETDEV_CMD_PAGE_POOL_ADD_NTF = 6, + NETDEV_CMD_PAGE_POOL_DEL_NTF = 7, + NETDEV_CMD_PAGE_POOL_CHANGE_NTF = 8, + NETDEV_CMD_PAGE_POOL_STATS_GET = 9, + NETDEV_CMD_QUEUE_GET = 10, + NETDEV_CMD_NAPI_GET = 11, + NETDEV_CMD_QSTATS_GET = 12, + NETDEV_CMD_BIND_RX = 13, + NETDEV_CMD_NAPI_SET = 14, + __NETDEV_CMD_MAX = 15, + NETDEV_CMD_MAX = 14, +}; + +enum { + NETDEV_NLGRP_MGMT = 0, + NETDEV_NLGRP_PAGE_POOL = 1, +}; + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + netdevice_tracker dev_tracker; + int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); + bool (*id_match)(struct packet_type *, struct sock *); + struct net *af_packet_net; + void *af_packet_priv; + struct list_head list; +}; + +struct llc_addr { + unsigned char lsap; + unsigned char mac[6]; +}; + +struct llc_sap { + unsigned char state; + unsigned char p_bit; + unsigned char f_bit; + refcount_t refcnt; + int (*rcv_func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + struct llc_addr laddr; + struct list_head node; + spinlock_t sk_lock; + int sk_count; + struct hlist_nulls_head sk_laddr_hash[64]; + struct hlist_head sk_dev_hash[64]; + struct callback_head rcu; +}; + +struct llc_pdu_un { + u8 dsap; + u8 ssap; + u8 ctrl_1; +}; + +enum tunable_id { + ETHTOOL_ID_UNSPEC = 0, + ETHTOOL_RX_COPYBREAK = 1, + ETHTOOL_TX_COPYBREAK = 2, + ETHTOOL_PFC_PREVENTION_TOUT = 3, + ETHTOOL_TX_COPYBREAK_BUF_SIZE = 4, + __ETHTOOL_TUNABLE_COUNT = 5, +}; + +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC = 0, + ETHTOOL_PHY_DOWNSHIFT = 1, + ETHTOOL_PHY_FAST_LINK_DOWN = 2, + ETHTOOL_PHY_EDPD = 3, + __ETHTOOL_PHY_TUNABLE_COUNT = 4, +}; + +enum { + ETH_RSS_HASH_TOP_BIT = 0, + ETH_RSS_HASH_XOR_BIT = 1, + ETH_RSS_HASH_CRC32_BIT = 2, + ETH_RSS_HASH_FUNCS_COUNT = 3, +}; + +enum { + ETHTOOL_MSG_USER_NONE = 0, + ETHTOOL_MSG_STRSET_GET = 1, + ETHTOOL_MSG_LINKINFO_GET = 2, + ETHTOOL_MSG_LINKINFO_SET = 3, + ETHTOOL_MSG_LINKMODES_GET = 4, + ETHTOOL_MSG_LINKMODES_SET = 5, + ETHTOOL_MSG_LINKSTATE_GET = 6, + ETHTOOL_MSG_DEBUG_GET = 7, + ETHTOOL_MSG_DEBUG_SET = 8, + ETHTOOL_MSG_WOL_GET = 9, + ETHTOOL_MSG_WOL_SET = 10, + ETHTOOL_MSG_FEATURES_GET = 11, + ETHTOOL_MSG_FEATURES_SET = 12, + ETHTOOL_MSG_PRIVFLAGS_GET = 13, + ETHTOOL_MSG_PRIVFLAGS_SET = 14, + ETHTOOL_MSG_RINGS_GET = 15, + ETHTOOL_MSG_RINGS_SET = 16, + ETHTOOL_MSG_CHANNELS_GET = 17, + ETHTOOL_MSG_CHANNELS_SET = 18, + ETHTOOL_MSG_COALESCE_GET = 19, + ETHTOOL_MSG_COALESCE_SET = 20, + ETHTOOL_MSG_PAUSE_GET = 21, + ETHTOOL_MSG_PAUSE_SET = 22, + ETHTOOL_MSG_EEE_GET = 23, + ETHTOOL_MSG_EEE_SET = 24, + ETHTOOL_MSG_TSINFO_GET = 25, + ETHTOOL_MSG_CABLE_TEST_ACT = 26, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27, + ETHTOOL_MSG_TUNNEL_INFO_GET = 28, + ETHTOOL_MSG_FEC_GET = 29, + ETHTOOL_MSG_FEC_SET = 30, + ETHTOOL_MSG_MODULE_EEPROM_GET = 31, + ETHTOOL_MSG_STATS_GET = 32, + ETHTOOL_MSG_PHC_VCLOCKS_GET = 33, + ETHTOOL_MSG_MODULE_GET = 34, + ETHTOOL_MSG_MODULE_SET = 35, + ETHTOOL_MSG_PSE_GET = 36, + ETHTOOL_MSG_PSE_SET = 37, + ETHTOOL_MSG_RSS_GET = 38, + ETHTOOL_MSG_PLCA_GET_CFG = 39, + ETHTOOL_MSG_PLCA_SET_CFG = 40, + ETHTOOL_MSG_PLCA_GET_STATUS = 41, + ETHTOOL_MSG_MM_GET = 42, + ETHTOOL_MSG_MM_SET = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 44, + ETHTOOL_MSG_PHY_GET = 45, + __ETHTOOL_MSG_USER_CNT = 46, + ETHTOOL_MSG_USER_MAX = 45, +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, + ETHTOOL_MSG_LINKINFO_NTF = 3, + ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, + ETHTOOL_MSG_LINKMODES_NTF = 5, + ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, + ETHTOOL_MSG_DEBUG_GET_REPLY = 7, + ETHTOOL_MSG_DEBUG_NTF = 8, + ETHTOOL_MSG_WOL_GET_REPLY = 9, + ETHTOOL_MSG_WOL_NTF = 10, + ETHTOOL_MSG_FEATURES_GET_REPLY = 11, + ETHTOOL_MSG_FEATURES_SET_REPLY = 12, + ETHTOOL_MSG_FEATURES_NTF = 13, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, + ETHTOOL_MSG_PRIVFLAGS_NTF = 15, + ETHTOOL_MSG_RINGS_GET_REPLY = 16, + ETHTOOL_MSG_RINGS_NTF = 17, + ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, + ETHTOOL_MSG_CHANNELS_NTF = 19, + ETHTOOL_MSG_COALESCE_GET_REPLY = 20, + ETHTOOL_MSG_COALESCE_NTF = 21, + ETHTOOL_MSG_PAUSE_GET_REPLY = 22, + ETHTOOL_MSG_PAUSE_NTF = 23, + ETHTOOL_MSG_EEE_GET_REPLY = 24, + ETHTOOL_MSG_EEE_NTF = 25, + ETHTOOL_MSG_TSINFO_GET_REPLY = 26, + ETHTOOL_MSG_CABLE_TEST_NTF = 27, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, + ETHTOOL_MSG_FEC_GET_REPLY = 30, + ETHTOOL_MSG_FEC_NTF = 31, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, + ETHTOOL_MSG_STATS_GET_REPLY = 33, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 34, + ETHTOOL_MSG_MODULE_GET_REPLY = 35, + ETHTOOL_MSG_MODULE_NTF = 36, + ETHTOOL_MSG_PSE_GET_REPLY = 37, + ETHTOOL_MSG_RSS_GET_REPLY = 38, + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 39, + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 40, + ETHTOOL_MSG_PLCA_NTF = 41, + ETHTOOL_MSG_MM_GET_REPLY = 42, + ETHTOOL_MSG_MM_NTF = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 44, + ETHTOOL_MSG_PHY_GET_REPLY = 45, + ETHTOOL_MSG_PHY_NTF = 46, + __ETHTOOL_MSG_KERNEL_CNT = 47, + ETHTOOL_MSG_KERNEL_MAX = 46, +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC = 0, + ETHTOOL_A_HEADER_DEV_INDEX = 1, + ETHTOOL_A_HEADER_DEV_NAME = 2, + ETHTOOL_A_HEADER_FLAGS = 3, + ETHTOOL_A_HEADER_PHY_INDEX = 4, + __ETHTOOL_A_HEADER_CNT = 5, + ETHTOOL_A_HEADER_MAX = 4, +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC = 0, + ETHTOOL_A_STRSET_HEADER = 1, + ETHTOOL_A_STRSET_STRINGSETS = 2, + ETHTOOL_A_STRSET_COUNTS_ONLY = 3, + __ETHTOOL_A_STRSET_CNT = 4, + ETHTOOL_A_STRSET_MAX = 3, +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC = 0, + ETHTOOL_A_LINKINFO_HEADER = 1, + ETHTOOL_A_LINKINFO_PORT = 2, + ETHTOOL_A_LINKINFO_PHYADDR = 3, + ETHTOOL_A_LINKINFO_TP_MDIX = 4, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, + ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, + __ETHTOOL_A_LINKINFO_CNT = 7, + ETHTOOL_A_LINKINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC = 0, + ETHTOOL_A_LINKMODES_HEADER = 1, + ETHTOOL_A_LINKMODES_AUTONEG = 2, + ETHTOOL_A_LINKMODES_OURS = 3, + ETHTOOL_A_LINKMODES_PEER = 4, + ETHTOOL_A_LINKMODES_SPEED = 5, + ETHTOOL_A_LINKMODES_DUPLEX = 6, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, + ETHTOOL_A_LINKMODES_LANES = 9, + ETHTOOL_A_LINKMODES_RATE_MATCHING = 10, + __ETHTOOL_A_LINKMODES_CNT = 11, + ETHTOOL_A_LINKMODES_MAX = 10, +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC = 0, + ETHTOOL_A_LINKSTATE_HEADER = 1, + ETHTOOL_A_LINKSTATE_LINK = 2, + ETHTOOL_A_LINKSTATE_SQI = 3, + ETHTOOL_A_LINKSTATE_SQI_MAX = 4, + ETHTOOL_A_LINKSTATE_EXT_STATE = 5, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 7, + __ETHTOOL_A_LINKSTATE_CNT = 8, + ETHTOOL_A_LINKSTATE_MAX = 7, +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC = 0, + ETHTOOL_A_DEBUG_HEADER = 1, + ETHTOOL_A_DEBUG_MSGMASK = 2, + __ETHTOOL_A_DEBUG_CNT = 3, + ETHTOOL_A_DEBUG_MAX = 2, +}; + +enum { + ETHTOOL_A_WOL_UNSPEC = 0, + ETHTOOL_A_WOL_HEADER = 1, + ETHTOOL_A_WOL_MODES = 2, + ETHTOOL_A_WOL_SOPASS = 3, + __ETHTOOL_A_WOL_CNT = 4, + ETHTOOL_A_WOL_MAX = 3, +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC = 0, + ETHTOOL_A_FEATURES_HEADER = 1, + ETHTOOL_A_FEATURES_HW = 2, + ETHTOOL_A_FEATURES_WANTED = 3, + ETHTOOL_A_FEATURES_ACTIVE = 4, + ETHTOOL_A_FEATURES_NOCHANGE = 5, + __ETHTOOL_A_FEATURES_CNT = 6, + ETHTOOL_A_FEATURES_MAX = 5, +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, + ETHTOOL_A_PRIVFLAGS_HEADER = 1, + ETHTOOL_A_PRIVFLAGS_FLAGS = 2, + __ETHTOOL_A_PRIVFLAGS_CNT = 3, + ETHTOOL_A_PRIVFLAGS_MAX = 2, +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC = 0, + ETHTOOL_A_RINGS_HEADER = 1, + ETHTOOL_A_RINGS_RX_MAX = 2, + ETHTOOL_A_RINGS_RX_MINI_MAX = 3, + ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, + ETHTOOL_A_RINGS_TX_MAX = 5, + ETHTOOL_A_RINGS_RX = 6, + ETHTOOL_A_RINGS_RX_MINI = 7, + ETHTOOL_A_RINGS_RX_JUMBO = 8, + ETHTOOL_A_RINGS_TX = 9, + ETHTOOL_A_RINGS_RX_BUF_LEN = 10, + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 11, + ETHTOOL_A_RINGS_CQE_SIZE = 12, + ETHTOOL_A_RINGS_TX_PUSH = 13, + ETHTOOL_A_RINGS_RX_PUSH = 14, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 15, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 16, + __ETHTOOL_A_RINGS_CNT = 17, + ETHTOOL_A_RINGS_MAX = 16, +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC = 0, + ETHTOOL_A_CHANNELS_HEADER = 1, + ETHTOOL_A_CHANNELS_RX_MAX = 2, + ETHTOOL_A_CHANNELS_TX_MAX = 3, + ETHTOOL_A_CHANNELS_OTHER_MAX = 4, + ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, + ETHTOOL_A_CHANNELS_RX_COUNT = 6, + ETHTOOL_A_CHANNELS_TX_COUNT = 7, + ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, + ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, + __ETHTOOL_A_CHANNELS_CNT = 10, + ETHTOOL_A_CHANNELS_MAX = 9, +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC = 0, + ETHTOOL_A_COALESCE_HEADER = 1, + ETHTOOL_A_COALESCE_RX_USECS = 2, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, + ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, + ETHTOOL_A_COALESCE_TX_USECS = 6, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, + ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, + ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, + ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, + ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, + ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, + ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 24, + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 25, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES = 26, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES = 27, + ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS = 28, + ETHTOOL_A_COALESCE_RX_PROFILE = 29, + ETHTOOL_A_COALESCE_TX_PROFILE = 30, + __ETHTOOL_A_COALESCE_CNT = 31, + ETHTOOL_A_COALESCE_MAX = 30, +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC = 0, + ETHTOOL_A_PAUSE_HEADER = 1, + ETHTOOL_A_PAUSE_AUTONEG = 2, + ETHTOOL_A_PAUSE_RX = 3, + ETHTOOL_A_PAUSE_TX = 4, + ETHTOOL_A_PAUSE_STATS = 5, + ETHTOOL_A_PAUSE_STATS_SRC = 6, + __ETHTOOL_A_PAUSE_CNT = 7, + ETHTOOL_A_PAUSE_MAX = 6, +}; + +enum { + ETHTOOL_A_EEE_UNSPEC = 0, + ETHTOOL_A_EEE_HEADER = 1, + ETHTOOL_A_EEE_MODES_OURS = 2, + ETHTOOL_A_EEE_MODES_PEER = 3, + ETHTOOL_A_EEE_ACTIVE = 4, + ETHTOOL_A_EEE_ENABLED = 5, + ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, + ETHTOOL_A_EEE_TX_LPI_TIMER = 7, + __ETHTOOL_A_EEE_CNT = 8, + ETHTOOL_A_EEE_MAX = 7, +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC = 0, + ETHTOOL_A_TSINFO_HEADER = 1, + ETHTOOL_A_TSINFO_TIMESTAMPING = 2, + ETHTOOL_A_TSINFO_TX_TYPES = 3, + ETHTOOL_A_TSINFO_RX_FILTERS = 4, + ETHTOOL_A_TSINFO_PHC_INDEX = 5, + ETHTOOL_A_TSINFO_STATS = 6, + __ETHTOOL_A_TSINFO_CNT = 7, + ETHTOOL_A_TSINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC = 0, + ETHTOOL_A_PHC_VCLOCKS_HEADER = 1, + ETHTOOL_A_PHC_VCLOCKS_NUM = 2, + ETHTOOL_A_PHC_VCLOCKS_INDEX = 3, + __ETHTOOL_A_PHC_VCLOCKS_CNT = 4, + ETHTOOL_A_PHC_VCLOCKS_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_HEADER = 1, + __ETHTOOL_A_CABLE_TEST_CNT = 2, + ETHTOOL_A_CABLE_TEST_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, + __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, + ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, + ETHTOOL_A_TUNNEL_INFO_HEADER = 1, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, + __ETHTOOL_A_TUNNEL_INFO_CNT = 3, + ETHTOOL_A_TUNNEL_INFO_MAX = 2, +}; + +enum { + ETHTOOL_A_FEC_UNSPEC = 0, + ETHTOOL_A_FEC_HEADER = 1, + ETHTOOL_A_FEC_MODES = 2, + ETHTOOL_A_FEC_AUTO = 3, + ETHTOOL_A_FEC_ACTIVE = 4, + ETHTOOL_A_FEC_STATS = 5, + __ETHTOOL_A_FEC_CNT = 6, + ETHTOOL_A_FEC_MAX = 5, +}; + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, + ETHTOOL_A_MODULE_EEPROM_HEADER = 1, + ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, + ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, + ETHTOOL_A_MODULE_EEPROM_PAGE = 4, + ETHTOOL_A_MODULE_EEPROM_BANK = 5, + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, + ETHTOOL_A_MODULE_EEPROM_DATA = 7, + __ETHTOOL_A_MODULE_EEPROM_CNT = 8, + ETHTOOL_A_MODULE_EEPROM_MAX = 7, +}; + +enum { + ETHTOOL_A_STATS_UNSPEC = 0, + ETHTOOL_A_STATS_PAD = 1, + ETHTOOL_A_STATS_HEADER = 2, + ETHTOOL_A_STATS_GROUPS = 3, + ETHTOOL_A_STATS_GRP = 4, + ETHTOOL_A_STATS_SRC = 5, + __ETHTOOL_A_STATS_CNT = 6, + ETHTOOL_A_STATS_MAX = 5, +}; + +enum { + ETHTOOL_STATS_ETH_PHY = 0, + ETHTOOL_STATS_ETH_MAC = 1, + ETHTOOL_STATS_ETH_CTRL = 2, + ETHTOOL_STATS_RMON = 3, + __ETHTOOL_STATS_CNT = 4, +}; + +enum { + ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, + __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, + ETHTOOL_A_STATS_ETH_PHY_MAX = 0, +}; + +enum { + ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, + ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, + ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, + ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, + ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, + ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, + ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, + ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, + ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, + ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, + ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, + ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, + ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, + ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, + ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, + ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, + ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, + ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, + ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, + ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, + ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, + ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, + __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, + ETHTOOL_A_STATS_ETH_MAC_MAX = 21, +}; + +enum { + ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, + ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, + ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, + __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, + ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, +}; + +enum { + ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, + ETHTOOL_A_STATS_RMON_OVERSIZE = 1, + ETHTOOL_A_STATS_RMON_FRAG = 2, + ETHTOOL_A_STATS_RMON_JABBER = 3, + __ETHTOOL_A_STATS_RMON_CNT = 4, + ETHTOOL_A_STATS_RMON_MAX = 3, +}; + +enum { + ETHTOOL_A_MODULE_UNSPEC = 0, + ETHTOOL_A_MODULE_HEADER = 1, + ETHTOOL_A_MODULE_POWER_MODE_POLICY = 2, + ETHTOOL_A_MODULE_POWER_MODE = 3, + __ETHTOOL_A_MODULE_CNT = 4, + ETHTOOL_A_MODULE_MAX = 3, +}; + +enum { + ETHTOOL_A_PSE_UNSPEC = 0, + ETHTOOL_A_PSE_HEADER = 1, + ETHTOOL_A_PODL_PSE_ADMIN_STATE = 2, + ETHTOOL_A_PODL_PSE_ADMIN_CONTROL = 3, + ETHTOOL_A_PODL_PSE_PW_D_STATUS = 4, + ETHTOOL_A_C33_PSE_ADMIN_STATE = 5, + ETHTOOL_A_C33_PSE_ADMIN_CONTROL = 6, + ETHTOOL_A_C33_PSE_PW_D_STATUS = 7, + ETHTOOL_A_C33_PSE_PW_CLASS = 8, + ETHTOOL_A_C33_PSE_ACTUAL_PW = 9, + ETHTOOL_A_C33_PSE_EXT_STATE = 10, + ETHTOOL_A_C33_PSE_EXT_SUBSTATE = 11, + ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT = 12, + ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES = 13, + __ETHTOOL_A_PSE_CNT = 14, + ETHTOOL_A_PSE_MAX = 13, +}; + +enum { + ETHTOOL_A_RSS_UNSPEC = 0, + ETHTOOL_A_RSS_HEADER = 1, + ETHTOOL_A_RSS_CONTEXT = 2, + ETHTOOL_A_RSS_HFUNC = 3, + ETHTOOL_A_RSS_INDIR = 4, + ETHTOOL_A_RSS_HKEY = 5, + ETHTOOL_A_RSS_INPUT_XFRM = 6, + ETHTOOL_A_RSS_START_CONTEXT = 7, + __ETHTOOL_A_RSS_CNT = 8, + ETHTOOL_A_RSS_MAX = 7, +}; + +enum { + ETHTOOL_A_PLCA_UNSPEC = 0, + ETHTOOL_A_PLCA_HEADER = 1, + ETHTOOL_A_PLCA_VERSION = 2, + ETHTOOL_A_PLCA_ENABLED = 3, + ETHTOOL_A_PLCA_STATUS = 4, + ETHTOOL_A_PLCA_NODE_CNT = 5, + ETHTOOL_A_PLCA_NODE_ID = 6, + ETHTOOL_A_PLCA_TO_TMR = 7, + ETHTOOL_A_PLCA_BURST_CNT = 8, + ETHTOOL_A_PLCA_BURST_TMR = 9, + __ETHTOOL_A_PLCA_CNT = 10, + ETHTOOL_A_PLCA_MAX = 9, +}; + +enum { + ETHTOOL_A_MM_UNSPEC = 0, + ETHTOOL_A_MM_HEADER = 1, + ETHTOOL_A_MM_PMAC_ENABLED = 2, + ETHTOOL_A_MM_TX_ENABLED = 3, + ETHTOOL_A_MM_TX_ACTIVE = 4, + ETHTOOL_A_MM_TX_MIN_FRAG_SIZE = 5, + ETHTOOL_A_MM_RX_MIN_FRAG_SIZE = 6, + ETHTOOL_A_MM_VERIFY_ENABLED = 7, + ETHTOOL_A_MM_VERIFY_STATUS = 8, + ETHTOOL_A_MM_VERIFY_TIME = 9, + ETHTOOL_A_MM_MAX_VERIFY_TIME = 10, + ETHTOOL_A_MM_STATS = 11, + __ETHTOOL_A_MM_CNT = 12, + ETHTOOL_A_MM_MAX = 11, +}; + +enum { + ETHTOOL_A_MODULE_FW_FLASH_UNSPEC = 0, + ETHTOOL_A_MODULE_FW_FLASH_HEADER = 1, + ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME = 2, + ETHTOOL_A_MODULE_FW_FLASH_PASSWORD = 3, + ETHTOOL_A_MODULE_FW_FLASH_STATUS = 4, + ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG = 5, + ETHTOOL_A_MODULE_FW_FLASH_DONE = 6, + ETHTOOL_A_MODULE_FW_FLASH_TOTAL = 7, + __ETHTOOL_A_MODULE_FW_FLASH_CNT = 8, + ETHTOOL_A_MODULE_FW_FLASH_MAX = 7, +}; + +enum { + ETHTOOL_A_PHY_UNSPEC = 0, + ETHTOOL_A_PHY_HEADER = 1, + ETHTOOL_A_PHY_INDEX = 2, + ETHTOOL_A_PHY_DRVNAME = 3, + ETHTOOL_A_PHY_NAME = 4, + ETHTOOL_A_PHY_UPSTREAM_TYPE = 5, + ETHTOOL_A_PHY_UPSTREAM_INDEX = 6, + ETHTOOL_A_PHY_UPSTREAM_SFP_NAME = 7, + ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME = 8, + __ETHTOOL_A_PHY_CNT = 9, + ETHTOOL_A_PHY_MAX = 8, +}; + +struct ethnl_req_info { + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u32 phy_index; +}; + +struct ethnl_reply_data { + struct net_device *dev; +}; + +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + u8 set_ntf_cmd; + int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); + int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, const struct genl_info *); + int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); + int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); + void (*cleanup_data)(struct ethnl_reply_data *); + int (*set_validate)(struct ethnl_req_info *, struct genl_info *); + int (*set)(struct ethnl_req_info *, struct genl_info *); +}; + +struct rss_req_info { + struct ethnl_req_info base; + u32 rss_context; +}; + +struct rss_reply_data { + struct ethnl_reply_data base; + bool no_key_fields; + u32 indir_size; + u32 hkey_size; + u32 hfunc; + u32 input_xfrm; + u32 *indir_table; + u8 *hkey; +}; + +struct rss_nl_dump_ctx { + long unsigned int ifindex; + long unsigned int ctx_idx; + unsigned int match_ifindex; + unsigned int start_ctx; +}; + +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +enum sock_flags { + SOCK_DEAD = 0, + SOCK_DONE = 1, + SOCK_URGINLINE = 2, + SOCK_KEEPOPEN = 3, + SOCK_LINGER = 4, + SOCK_DESTROY = 5, + SOCK_BROADCAST = 6, + SOCK_TIMESTAMP = 7, + SOCK_ZAPPED = 8, + SOCK_USE_WRITE_QUEUE = 9, + SOCK_DBG = 10, + SOCK_RCVTSTAMP = 11, + SOCK_RCVTSTAMPNS = 12, + SOCK_LOCALROUTE = 13, + SOCK_MEMALLOC = 14, + SOCK_TIMESTAMPING_RX_SOFTWARE = 15, + SOCK_FASYNC = 16, + SOCK_RXQ_OVFL = 17, + SOCK_ZEROCOPY = 18, + SOCK_WIFI_STATUS = 19, + SOCK_NOFCS = 20, + SOCK_FILTER_LOCKED = 21, + SOCK_SELECT_ERR_QUEUE = 22, + SOCK_RCU_FREE = 23, + SOCK_TXTIME = 24, + SOCK_XDP = 25, + SOCK_TSTAMP_NEW = 26, + SOCK_RCVMARK = 27, +}; + +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + unsigned int hook_index; + struct net_device *physin; + struct net_device *physout; + struct nf_hook_state state; + u16 size; +}; + +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *, unsigned int); + void (*nf_hook_drop)(struct net *); +}; + +struct ipv4_devconf { + void *sysctl; + int data[33]; + long unsigned int state[1]; +}; + +struct in_ifaddr; + +struct ip_mc_list; + +struct in_device { + struct net_device *dev; + netdevice_tracker dev_tracker; + refcount_t refcnt; + int dead; + struct in_ifaddr *ifa_list; + struct ip_mc_list *mc_list; + struct ip_mc_list **mc_hash; + int mc_count; + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + long unsigned int mr_v1_seen; + long unsigned int mr_v2_seen; + long unsigned int mr_maxdelay; + long unsigned int mr_qi; + long unsigned int mr_qri; + unsigned char mr_qrv; + unsigned char mr_gq_running; + u32 mr_ifc_count; + struct timer_list mr_gq_timer; + struct timer_list mr_ifc_timer; + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct callback_head callback_head; +}; + +struct in_ifaddr { + struct hlist_node addr_lst; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct callback_head callback_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + unsigned char ifa_proto; + __u32 ifa_flags; + char ifa_label[16]; + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + long unsigned int ifa_cstamp; + long unsigned int ifa_tstamp; +}; + +enum nf_nat_manip_type { + NF_NAT_MANIP_SRC = 0, + NF_NAT_MANIP_DST = 1, +}; + +struct inet6_ifaddr { + struct in6_addr addr; + __u32 prefix_len; + __u32 rt_priority; + __u32 valid_lft; + __u32 prefered_lft; + refcount_t refcnt; + spinlock_t lock; + int state; + __u32 flags; + __u8 dad_probes; + __u8 stable_privacy_retry; + __u16 scope; + __u64 dad_nonce; + long unsigned int cstamp; + long unsigned int tstamp; + struct delayed_work dad_work; + struct inet6_dev *idev; + struct fib6_info *rt; + struct hlist_node addr_lst; + struct list_head if_list; + struct list_head if_list_aux; + struct list_head tmp_list; + struct inet6_ifaddr *ifpub; + int regen_count; + bool tokenized; + u8 ifa_proto; + struct callback_head rcu; + struct in6_addr peer_addr; +}; + +struct nf_nat_range2 { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; + union nf_conntrack_man_proto base_proto; +}; + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __u16 doff: 4; + __u16 res1: 4; + __u16 cwr: 1; + __u16 ece: 1; + __u16 urg: 1; + __u16 ack: 1; + __u16 psh: 1; + __u16 rst: 1; + __u16 syn: 1; + __u16 fin: 1; + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +struct xt_ecn_info { + __u8 operation; + __u8 invert; + __u8 ip_ect; + union { + struct { + __u8 ect; + } tcp; + } proto; +}; + +struct ipt_ip { + struct in_addr src; + struct in_addr dst; + struct in_addr smsk; + struct in_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 flags; + __u8 invflags; +}; + +struct ip6t_ip6 { + struct in6_addr src; + struct in6_addr dst; + struct in6_addr smsk; + struct in6_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 tos; + __u8 flags; + __u8 invflags; +}; + +typedef long unsigned int netmem_ref; + +struct skb_frag { + netmem_ref netmem; + unsigned int len; + unsigned int offset; +}; + +typedef struct skb_frag skb_frag_t; + +enum { + SKBTX_HW_TSTAMP = 1, + SKBTX_SW_TSTAMP = 2, + SKBTX_IN_PROGRESS = 4, + SKBTX_HW_TSTAMP_USE_CYCLES = 8, + SKBTX_WIFI_STATUS = 16, + SKBTX_HW_TSTAMP_NETDEV = 32, + SKBTX_SCHED_TSTAMP = 64, +}; + +struct xsk_tx_metadata_compl { + __u64 *tx_timestamp; +}; + +struct skb_shared_info { + __u8 flags; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + short unsigned int gso_size; + short unsigned int gso_segs; + struct sk_buff *frag_list; + union { + struct skb_shared_hwtstamps hwtstamps; + struct xsk_tx_metadata_compl xsk_meta; + }; + unsigned int gso_type; + u32 tskey; + atomic_t dataref; + unsigned int xdp_frags_size; + void *destructor_arg; + skb_frag_t frags[17]; +}; + +enum { + SKB_GSO_TCPV4 = 1, + SKB_GSO_DODGY = 2, + SKB_GSO_TCP_ECN = 4, + SKB_GSO_TCP_FIXEDID = 8, + SKB_GSO_TCPV6 = 16, + SKB_GSO_FCOE = 32, + SKB_GSO_GRE = 64, + SKB_GSO_GRE_CSUM = 128, + SKB_GSO_IPXIP4 = 256, + SKB_GSO_IPXIP6 = 512, + SKB_GSO_UDP_TUNNEL = 1024, + SKB_GSO_UDP_TUNNEL_CSUM = 2048, + SKB_GSO_PARTIAL = 4096, + SKB_GSO_TUNNEL_REMCSUM = 8192, + SKB_GSO_SCTP = 16384, + SKB_GSO_ESP = 32768, + SKB_GSO_UDP = 65536, + SKB_GSO_UDP_L4 = 131072, + SKB_GSO_FRAGLIST = 262144, +}; + +struct udp_hslot; + +struct udp_hslot_main; + +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot_main *hash2; + struct udp_hslot *hash4; + unsigned int mask; + unsigned int log; +}; + +struct offload_callbacks { + struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); + struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sk_buff *, int); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +enum { + UDP_FLAGS_CORK = 0, + UDP_FLAGS_NO_CHECK6_TX = 1, + UDP_FLAGS_NO_CHECK6_RX = 2, + UDP_FLAGS_GRO_ENABLED = 3, + UDP_FLAGS_ACCEPT_FRAGLIST = 4, + UDP_FLAGS_ACCEPT_L4 = 5, + UDP_FLAGS_ENCAP_ENABLED = 6, + UDP_FLAGS_UDPLITE_SEND_CC = 7, + UDP_FLAGS_UDPLITE_RECV_CC = 8, +}; + +struct udp_sock { + struct inet_sock inet; + long unsigned int udp_flags; + int pending; + __u8 encap_type; + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; + __u16 len; + __u16 gso_size; + __u16 pcslen; + __u16 pcrlen; + int (*encap_rcv)(struct sock *, struct sk_buff *); + void (*encap_err_rcv)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*encap_err_lookup)(struct sock *, struct sk_buff *); + void (*encap_destroy)(struct sock *); + struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sock *, struct sk_buff *, int); + long: 64; + long: 64; + long: 64; + long: 64; + struct sk_buff_head reader_queue; + int forward_deficit; + int forward_threshold; + bool peeking_with_offset; + long: 64; + long: 64; + long: 64; +}; + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + +struct udp_hslot { + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; + int count; + spinlock_t lock; +}; + +struct udp_hslot_main { + struct udp_hslot hslot; + u32 hash4_cnt; + long: 64; +}; + +typedef struct sock * (*udp_lookup_t)(const struct sk_buff *, __be16, __be16); + +struct net_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, u32); + unsigned int no_policy: 1; + unsigned int icmp_strict_tag_validation: 1; + u32 secret; +}; + +struct inet6_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + unsigned int flags; + u32 secret; +}; + +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; + u32 secret; +}; + +struct rps_sock_flow_table; + +struct net_hotdata { + struct packet_offload ip_packet_offload; + struct net_offload tcpv4_offload; + struct net_protocol tcp_protocol; + struct net_offload udpv4_offload; + struct net_protocol udp_protocol; + struct packet_offload ipv6_packet_offload; + struct net_offload tcpv6_offload; + struct inet6_protocol tcpv6_protocol; + struct inet6_protocol udpv6_protocol; + struct net_offload udpv6_offload; + struct list_head offload_base; + struct list_head ptype_all; + struct kmem_cache *skbuff_cache; + struct kmem_cache *skbuff_fclone_cache; + struct kmem_cache *skb_small_head_cache; + struct rps_sock_flow_table *rps_sock_flow_table; + u32 rps_cpu_mask; + int gro_normal_batch; + int netdev_budget; + int netdev_budget_usecs; + int tstamp_prequeue; + int max_backlog; + int dev_tx_weight; + int dev_rx_weight; + int sysctl_max_skb_frags; + int sysctl_skb_defer_max; + int sysctl_mem_pcpu_rsv; +}; + +struct napi_gro_cb { + union { + struct { + void *frag0; + unsigned int frag0_len; + }; + struct { + struct sk_buff *last; + long unsigned int age; + }; + }; + int data_offset; + u16 flush; + u16 count; + u16 proto; + u16 pad; + union { + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + }; + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + } zeroed; + }; + __wsum csum; + union { + struct { + u16 network_offset; + u16 inner_network_offset; + }; + u16 network_offsets[2]; + }; +}; + +typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); + +typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); + +struct xt_counters { + __u64 pcnt; + __u64 bcnt; +}; + +struct xt_table_info; + +struct xt_table { + struct list_head list; + unsigned int valid_hooks; + struct xt_table_info *private; + struct nf_hook_ops *ops; + struct module *me; + u_int8_t af; + int priority; + const char name[32]; +}; + +struct xt_table_info { + unsigned int size; + unsigned int number; + unsigned int initial_entries; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int stacksize; + void ***jumpstack; + unsigned char entries[0]; +}; + +struct ipt_entry { + struct ipt_ip ip; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ipt_replace { + char name[32]; + unsigned int valid_hooks; + unsigned int num_entries; + unsigned int size; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_counters; + struct xt_counters *counters; + struct ipt_entry entries[0]; +}; + +struct snmp_mib { + const char *name; + int entry; +}; + +enum switchdev_attr_id { + SWITCHDEV_ATTR_ID_UNDEFINED = 0, + SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1, + SWITCHDEV_ATTR_ID_PORT_MST_STATE = 2, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 3, + SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 4, + SWITCHDEV_ATTR_ID_PORT_MROUTER = 5, + SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 6, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 7, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL = 8, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 9, + SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 10, + SWITCHDEV_ATTR_ID_BRIDGE_MST = 11, + SWITCHDEV_ATTR_ID_MRP_PORT_ROLE = 12, + SWITCHDEV_ATTR_ID_VLAN_MSTI = 13, +}; + +struct switchdev_mst_state { + u16 msti; + u8 state; +}; + +struct switchdev_brport_flags { + long unsigned int val; + long unsigned int mask; +}; + +struct switchdev_vlan_msti { + u16 vid; + u16 msti; +}; + +struct switchdev_attr { + struct net_device *orig_dev; + enum switchdev_attr_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); + union { + u8 stp_state; + struct switchdev_mst_state mst_state; + struct switchdev_brport_flags brport_flags; + bool mrouter; + clock_t ageing_time; + bool vlan_filtering; + u16 vlan_protocol; + bool mst; + bool mc_disabled; + u8 mrp_port_role; + struct switchdev_vlan_msti vlan_msti; + } u; +}; + +struct br_config_bpdu { + unsigned int topology_change: 1; + unsigned int topology_change_ack: 1; + bridge_id root; + int root_path_cost; + bridge_id bridge_id; + port_id port_id; + int message_age; + int max_age; + int hello_time; + int forward_delay; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +struct radix_tree_iter { + long unsigned int index; + long unsigned int next_index; + long unsigned int tags; + struct xa_node *node; +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 15, + RADIX_TREE_ITER_TAGGED = 16, + RADIX_TREE_ITER_CONTIG = 32, +}; + +struct ida_bitmap { + long unsigned int bitmap[16]; +}; + +struct uf_node { + struct uf_node *parent; + unsigned int rank; +}; + +struct xa_limit { + u32 max; + u32 min; +}; + +typedef int (*initcall_t)(); + +typedef int initcall_entry_t; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_FREEING_INITMEM = 2, + SYSTEM_RUNNING = 3, + SYSTEM_HALT = 4, + SYSTEM_POWER_OFF = 5, + SYSTEM_RESTART = 6, + SYSTEM_SUSPEND = 7, +}; + +typedef int (*parse_unknown_fn)(char *, char *, const char *, void *); + +enum { + TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = 1, + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = 2, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = 4, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = 8, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = 16, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = 32, + __TCA_FLOWER_KEY_FLAGS_MAX = 33, +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, + FLOW_DISSECTOR_KEY_PPPOE = 29, + FLOW_DISSECTOR_KEY_L2TPV3 = 30, + FLOW_DISSECTOR_KEY_CFM = 31, + FLOW_DISSECTOR_KEY_IPSEC = 32, + FLOW_DISSECTOR_KEY_MAX = 33, +}; + +struct trace_event_raw_initcall_level { + struct trace_entry ent; + u32 __data_loc_level; + char __data[0]; +}; + +struct trace_event_raw_initcall_start { + struct trace_entry ent; + initcall_t func; + char __data[0]; +}; + +struct trace_event_raw_initcall_finish { + struct trace_entry ent; + initcall_t func; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_initcall_level { + u32 level; + const void *level_ptr_; +}; + +struct trace_event_data_offsets_initcall_start {}; + +struct trace_event_data_offsets_initcall_finish {}; + +typedef void (*btf_trace_initcall_level)(void *, const char *); + +typedef void (*btf_trace_initcall_start)(void *, initcall_t); + +typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); + +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +struct kvm_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + __u32 flags; + __u32 devid; + __u8 pad[12]; +}; + +struct its_vlpi_map { + struct its_vm *vm; + struct its_vpe *vpe; + u32 vintid; + u8 properties; + bool db_enabled; +}; + +typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); + +struct its_device { + struct list_head dev_list; + struct list_head itt_head; + u32 num_eventid_bits; + gpa_t itt_addr; + u32 device_id; +}; + +struct its_collection { + struct list_head coll_list; + u32 collection_id; + u32 target_addr; +}; + +struct its_ite { + struct list_head ite_list; + struct vgic_irq *irq; + struct its_collection *collection; + u32 event_id; +}; + +struct vgic_its_abi { + int cte_esz; + int dte_esz; + int ite_esz; + int (*save_tables)(struct vgic_its *); + int (*restore_tables)(struct vgic_its *); + int (*commit)(struct vgic_its *); +}; + +typedef int (*entry_fn_t)(struct vgic_its *, u32, void *, void *); + +struct hyp_page { + short unsigned int refcount; + short unsigned int order; +}; + +union hyp_spinlock { + u32 __val; + struct { + u16 next; + u16 owner; + }; +}; + +typedef union hyp_spinlock hyp_spinlock_t; + +struct hyp_pool { + hyp_spinlock_t lock; + struct list_head free_area[11]; + phys_addr_t range_start; + phys_addr_t range_end; + short unsigned int max_order; +}; + +struct cpu_sve_state { + __u64 zcr_el1; + __u32 fpsr; + __u32 fpcr; + __u8 sve_regs[0]; +}; + +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; + union { + struct user_fpsimd_state *fpsimd_state; + struct cpu_sve_state *sve_state; + }; + union { + u64 *fpmr_ptr; + u64 fpmr; + }; + enum { + FP_STATE_FREE = 0, + FP_STATE_HOST_OWNED = 1, + FP_STATE_GUEST_OWNED = 2, + } fp_owner; + struct { + struct kvm_guest_debug_arch regs; + u64 pmscr_el1; + u64 trfcr_el1; + u64 mdcr_el2; + } host_debug_state; +}; + +struct pkvm_hyp_vcpu { + struct kvm_vcpu vcpu; + struct kvm_vcpu *host_vcpu; + long: 64; +}; + +struct pkvm_hyp_vm { + struct kvm kvm; + struct kvm *host_kvm; + struct kvm_pgtable pgt; + struct kvm_pgtable_mm_ops mm_ops; + struct hyp_pool pool; + hyp_spinlock_t lock; + unsigned int nr_vcpus; + struct pkvm_hyp_vcpu *vcpus[0]; +}; + +struct host_mmu { + struct kvm_arch arch; + struct kvm_pgtable pgt; + struct kvm_pgtable_mm_ops mm_ops; + hyp_spinlock_t lock; +}; + +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY = 1, + IRQ_SET_MASK_OK_DONE = 2, +}; + +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED = 1, + DOMAIN_BUS_GENERIC_MSI = 2, + DOMAIN_BUS_PCI_MSI = 3, + DOMAIN_BUS_PLATFORM_MSI = 4, + DOMAIN_BUS_NEXUS = 5, + DOMAIN_BUS_IPI = 6, + DOMAIN_BUS_FSL_MC_MSI = 7, + DOMAIN_BUS_TI_SCI_INTA_MSI = 8, + DOMAIN_BUS_WAKEUP = 9, + DOMAIN_BUS_VMD_MSI = 10, + DOMAIN_BUS_PCI_DEVICE_MSI = 11, + DOMAIN_BUS_PCI_DEVICE_MSIX = 12, + DOMAIN_BUS_DMAR = 13, + DOMAIN_BUS_AMDVI = 14, + DOMAIN_BUS_DEVICE_MSI = 15, + DOMAIN_BUS_WIRED_TO_MSI = 16, +}; + +struct irq_domain_ops; + +struct irq_domain_chip_generic; + +struct msi_parent_ops; + +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + struct mutex mutex; + struct irq_domain *root; + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; + struct device *dev; + struct device *pm_dev; + struct irq_domain *parent; + const struct msi_parent_ops *msi_parent_ops; + void (*exit)(struct irq_domain *); + irq_hw_number_t hwirq_max; + unsigned int revmap_size; + struct xarray revmap_tree; + struct irq_data *revmap[0]; +}; + +enum { + IRQCHIP_SET_TYPE_MASKED = 1, + IRQCHIP_EOI_IF_HANDLED = 2, + IRQCHIP_MASK_ON_SUSPEND = 4, + IRQCHIP_ONOFFLINE_ENABLED = 8, + IRQCHIP_SKIP_SET_WAKE = 16, + IRQCHIP_ONESHOT_SAFE = 32, + IRQCHIP_EOI_THREADED = 64, + IRQCHIP_SUPPORTS_LEVEL_MSI = 128, + IRQCHIP_SUPPORTS_NMI = 256, + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, + IRQCHIP_AFFINITY_PRE_STARTUP = 1024, + IRQCHIP_IMMUTABLE = 2048, +}; + +struct irq_chip_regs { + long unsigned int enable; + long unsigned int disable; + long unsigned int mask; + long unsigned int ack; + long unsigned int eoi; + long unsigned int type; +}; + +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +struct irq_chip_generic { + raw_spinlock_t lock; + void *reg_base; + u32 (*reg_readl)(void *); + void (*reg_writel)(u32, void *); + void (*suspend)(struct irq_chip_generic *); + void (*resume)(struct irq_chip_generic *); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + long unsigned int installed; + long unsigned int unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1, + IRQ_GC_INIT_NESTED_LOCK = 2, + IRQ_GC_MASK_CACHE_PER_TYPE = 4, + IRQ_GC_NO_MASK = 8, + IRQ_GC_BE_IO = 16, +}; + +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + void (*exit)(struct irq_chip_generic *); + struct irq_chip_generic *gc[0]; +}; + +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED = 1, +}; + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed: 1; +}; + +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[16]; +}; + +struct irq_domain_ops { + int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); + int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); + int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); + void (*unmap)(struct irq_domain *, unsigned int); + int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); + int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); + void (*free)(struct irq_domain *, unsigned int, unsigned int); + int (*activate)(struct irq_domain *, struct irq_data *, bool); + void (*deactivate)(struct irq_domain *, struct irq_data *); + int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); +}; + +struct msi_domain_info; + +struct msi_parent_ops { + u32 supported_flags; + u32 required_flags; + u32 bus_select_token; + u32 bus_select_mask; + const char *prefix; + bool (*init_dev_msi_info)(struct device *, struct irq_domain *, struct irq_domain *, struct msi_domain_info *); +}; + +typedef void (*task_work_func_t)(struct callback_head *); + +enum task_work_notify_mode { + TWA_NONE = 0, + TWA_RESUME = 1, + TWA_SIGNAL = 2, + TWA_SIGNAL_NO_IPI = 3, + TWA_NMI_CURRENT = 4, + TWA_FLAGS = 65280, + TWAF_NO_ALLOC = 256, +}; + +enum { + IRQTF_RUNTHREAD = 0, + IRQTF_WARNED = 1, + IRQTF_AFFINITY = 2, + IRQTF_FORCED_THREAD = 3, + IRQTF_READY = 4, +}; + +typedef struct { + raw_spinlock_t *lock; + long unsigned int flags; +} class_raw_spinlock_irqsave_t; + +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER = 1, + CSID_S390_TOD = 2, + CSID_X86_TSC_EARLY = 3, + CSID_X86_TSC = 4, + CSID_X86_KVM_CLK = 5, + CSID_X86_ART = 6, + CSID_MAX = 7, +}; + +struct ktime_timestamps { + u64 mono; + u64 boot; + u64 real; +}; + +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t boot; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; +}; + +struct system_counterval_t { + u64 cycles; + enum clocksource_ids cs_id; + bool use_nsecs; +}; + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE = 0, + VDSO_CLOCKMODE_ARCHTIMER = 1, + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT = 2, + VDSO_CLOCKMODE_MAX = 3, + VDSO_CLOCKMODE_TIMENS = 2147483647, +}; + +struct clocksource_base; + +struct clocksource { + u64 (*read)(struct clocksource *); + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; + u32 uncertainty_margin; + u64 max_cycles; + const char *name; + struct list_head list; + u32 freq_khz; + int rating; + enum clocksource_ids id; + enum vdso_clock_mode vdso_clock_mode; + long unsigned int flags; + struct clocksource_base *base; + int (*enable)(struct clocksource *); + void (*disable)(struct clocksource *); + void (*suspend)(struct clocksource *); + void (*resume)(struct clocksource *); + void (*mark_unstable)(struct clocksource *); + void (*tick_stable)(struct clocksource *); + struct module *owner; +}; + +struct clocksource_base { + enum clocksource_ids id; + u32 freq_khz; + u64 offset; + u32 numerator; + u32 denominator; +}; + +struct tk_read_base { + struct clocksource *clock; + u64 mask; + u64 cycle_last; + u32 mult; + u32 shift; + u64 xtime_nsec; + ktime_t base; + u64 base_real; +}; + +struct timekeeper { + struct tk_read_base tkr_mono; + u64 xtime_sec; + long unsigned int ktime_sec; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + struct tk_read_base tkr_raw; + u64 raw_sec; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + struct timespec64 monotonic_to_boot; + u64 cycle_interval; + u64 xtime_interval; + s64 xtime_remainder; + u64 raw_interval; + ktime_t next_leap_ktime; + u64 ntp_tick; + s64 ntp_error; + u32 ntp_error_shift; + u32 ntp_err_mult; + u32 skip_second_overflow; +}; + +typedef int (*cpu_stop_fn_t)(void *); + +struct audit_ntp_data {}; + +enum timekeeping_adv_mode { + TK_ADV_TICK = 0, + TK_ADV_FREQ = 1, +}; + +struct tk_data { + seqcount_raw_spinlock_t seq; + struct timekeeper timekeeper; + struct timekeeper shadow_timekeeper; + raw_spinlock_t lock; +}; + +struct tk_fast { + seqcount_latch_t seq; + struct tk_read_base base[2]; +}; + +struct vma_iterator { + struct ma_state mas; +}; + +typedef __u16 comp_t; + +typedef __u32 comp2_t; + +struct acct { + char ac_flag; + char ac_version; + __u16 ac_uid16; + __u16 ac_gid16; + __u16 ac_tty; + __u32 ac_btime; + comp_t ac_utime; + comp_t ac_stime; + comp_t ac_etime; + comp_t ac_mem; + comp_t ac_io; + comp_t ac_rw; + comp_t ac_minflt; + comp_t ac_majflt; + comp_t ac_swaps; + __u16 ac_ahz; + __u32 ac_exitcode; + char ac_comm[17]; + __u8 ac_etime_hi; + __u16 ac_etime_lo; + __u32 ac_uid; + __u32 ac_gid; +}; + +typedef struct acct acct_t; + +struct bsd_acct_struct { + struct fs_pin pin; + atomic_long_t count; + struct callback_head rcu; + struct mutex lock; + int active; + long unsigned int needcheck; + struct file *file; + struct pid_namespace *ns; + struct work_struct work; + struct completion done; +}; + +enum lockdown_reason { + LOCKDOWN_NONE = 0, + LOCKDOWN_MODULE_SIGNATURE = 1, + LOCKDOWN_DEV_MEM = 2, + LOCKDOWN_EFI_TEST = 3, + LOCKDOWN_KEXEC = 4, + LOCKDOWN_HIBERNATION = 5, + LOCKDOWN_PCI_ACCESS = 6, + LOCKDOWN_IOPORT = 7, + LOCKDOWN_MSR = 8, + LOCKDOWN_ACPI_TABLES = 9, + LOCKDOWN_DEVICE_TREE = 10, + LOCKDOWN_PCMCIA_CIS = 11, + LOCKDOWN_TIOCSSERIAL = 12, + LOCKDOWN_MODULE_PARAMETERS = 13, + LOCKDOWN_MMIOTRACE = 14, + LOCKDOWN_DEBUGFS = 15, + LOCKDOWN_XMON_WR = 16, + LOCKDOWN_BPF_WRITE_USER = 17, + LOCKDOWN_DBG_WRITE_KERNEL = 18, + LOCKDOWN_RTAS_ERROR_INJECTION = 19, + LOCKDOWN_INTEGRITY_MAX = 20, + LOCKDOWN_KCORE = 21, + LOCKDOWN_KPROBES = 22, + LOCKDOWN_BPF_READ_KERNEL = 23, + LOCKDOWN_DBG_READ_KERNEL = 24, + LOCKDOWN_PERF = 25, + LOCKDOWN_TRACEFS = 26, + LOCKDOWN_XMON_RW = 27, + LOCKDOWN_XFRM_SECRET = 28, + LOCKDOWN_CONFIDENTIALITY_MAX = 29, +}; + +struct trace_dynamic_info { + u16 len; + u16 offset; +}; + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE = 2, + DYNEVENT_TYPE_NONE = 3, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +struct synth_field_desc { + const char *type; + const char *name; +}; + +struct synth_trace_event; + +struct synth_event; + +struct synth_event_trace_state { + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int cur_field; + unsigned int n_u64; + bool disabled; + bool add_next; + bool add_name; +}; + +union trace_synth_field { + u8 as_u8; + u16 as_u16; + u32 as_u32; + u64 as_u64; + struct trace_dynamic_info as_dynamic; +}; + +struct synth_trace_event { + struct trace_entry ent; + union trace_synth_field fields[0]; +}; + +struct dyn_event_operations; + +struct dyn_event { + struct list_head list; + struct dyn_event_operations *ops; +}; + +struct synth_field; + +struct synth_event { + struct dyn_event devent; + int ref; + char *name; + struct synth_field **fields; + unsigned int n_fields; + struct synth_field **dynamic_fields; + unsigned int n_dynamic_fields; + unsigned int n_u64; + struct trace_event_class class; + struct trace_event_call call; + struct tracepoint *tp; + struct module *mod; +}; + +enum trace_iterator_flags { + TRACE_ITER_PRINT_PARENT = 1, + TRACE_ITER_SYM_OFFSET = 2, + TRACE_ITER_SYM_ADDR = 4, + TRACE_ITER_VERBOSE = 8, + TRACE_ITER_RAW = 16, + TRACE_ITER_HEX = 32, + TRACE_ITER_BIN = 64, + TRACE_ITER_BLOCK = 128, + TRACE_ITER_FIELDS = 256, + TRACE_ITER_PRINTK = 512, + TRACE_ITER_ANNOTATE = 1024, + TRACE_ITER_USERSTACKTRACE = 2048, + TRACE_ITER_SYM_USEROBJ = 4096, + TRACE_ITER_PRINTK_MSGONLY = 8192, + TRACE_ITER_CONTEXT_INFO = 16384, + TRACE_ITER_LATENCY_FMT = 32768, + TRACE_ITER_RECORD_CMD = 65536, + TRACE_ITER_RECORD_TGID = 131072, + TRACE_ITER_OVERWRITE = 262144, + TRACE_ITER_STOP_ON_FREE = 524288, + TRACE_ITER_IRQ_INFO = 1048576, + TRACE_ITER_MARKERS = 2097152, + TRACE_ITER_EVENT_FORK = 4194304, + TRACE_ITER_TRACE_PRINTK = 8388608, + TRACE_ITER_PAUSE_ON_TRACE = 16777216, + TRACE_ITER_HASH_PTR = 33554432, + TRACE_ITER_FUNCTION = 67108864, + TRACE_ITER_FUNC_FORK = 134217728, + TRACE_ITER_DISPLAY_GRAPH = 268435456, + TRACE_ITER_STACKTRACE = 536870912, +}; + +struct dyn_event_operations { + struct list_head list; + int (*create)(const char *); + int (*show)(struct seq_file *, struct dyn_event *); + bool (*is_busy)(struct dyn_event *); + int (*free)(struct dyn_event *); + bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); +}; + +typedef int (*dynevent_check_arg_fn_t)(void *); + +struct dynevent_arg { + const char *str; + char separator; +}; + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; + char separator; +}; + +struct synth_field { + char *type; + char *name; + size_t size; + unsigned int offset; + unsigned int field_pos; + bool is_signed; + bool is_string; + bool is_dynamic; + bool is_stack; +}; + +enum { + SYNTH_ERR_BAD_NAME = 0, + SYNTH_ERR_INVALID_CMD = 1, + SYNTH_ERR_INVALID_DYN_CMD = 2, + SYNTH_ERR_EVENT_EXISTS = 3, + SYNTH_ERR_TOO_MANY_FIELDS = 4, + SYNTH_ERR_INCOMPLETE_TYPE = 5, + SYNTH_ERR_INVALID_TYPE = 6, + SYNTH_ERR_INVALID_FIELD = 7, + SYNTH_ERR_INVALID_ARRAY_SPEC = 8, +}; + +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +struct bpf_iter_meta { + union { + struct seq_file *seq; + }; + u64 session_id; + u64 seq_num; +}; + +struct bpf_iter__bpf_map_elem { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + void *value; + }; +}; + +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_id; + __u64 map_extra; +}; + +struct bpf_tramp_link; + +struct bpf_tramp_links { + struct bpf_tramp_link *links[38]; + int nr_links; +}; + +struct bpf_tramp_link { + struct bpf_link link; + struct hlist_node tramp_hlist; + u64 cookie; +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *); + int (*check_member)(const struct btf_type *, const struct btf_member *, const struct bpf_prog *); + int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); + int (*reg)(void *, struct bpf_link *); + void (*unreg)(void *, struct bpf_link *); + int (*update)(void *, void *, struct bpf_link *); + int (*validate)(void *); + void *cfi_stubs; + struct module *owner; + const char *name; + struct btf_func_model func_models[64]; +}; + +struct bpf_struct_ops_arg_info { + struct bpf_ctx_arg_aux *info; + u32 cnt; +}; + +struct bpf_struct_ops_desc { + struct bpf_struct_ops *st_ops; + const struct btf_type *type; + const struct btf_type *value_type; + u32 type_id; + u32 value_id; + struct bpf_struct_ops_arg_info *arg_info; +}; + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT = 0, + BPF_STRUCT_OPS_STATE_INUSE = 1, + BPF_STRUCT_OPS_STATE_TOBEFREE = 2, + BPF_STRUCT_OPS_STATE_READY = 3, +}; + +struct bpf_struct_ops_common_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; +}; + +struct rcu_synchronize { + struct callback_head head; + struct completion completion; +}; + +struct bpf_struct_ops_value { + struct bpf_struct_ops_common_value common; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + char data[0]; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + const struct bpf_struct_ops_desc *st_ops_desc; + struct mutex lock; + struct bpf_link **links; + struct bpf_ksym **ksyms; + u32 funcs_cnt; + u32 image_pages_cnt; + void *image_pages[8]; + struct btf *btf; + struct bpf_struct_ops_value *uvalue; + struct bpf_struct_ops_value kvalue; +}; + +struct bpf_struct_ops_link { + struct bpf_link link; + struct bpf_map *map; + wait_queue_head_t wait_hup; +}; + +enum { + IDX_MODULE_ID = 0, + IDX_ST_OPS_COMMON_VALUE_ID = 1, +}; + +typedef struct pglist_data pg_data_t; + +enum objext_flags { + OBJEXTS_ALLOC_FAIL = 4, + __NR_OBJEXTS_FLAGS = 8, +}; + +struct mlock_fbatch { + local_lock_t lock; + struct folio_batch fbatch; +}; + +typedef unsigned int kasan_vmalloc_flags_t; + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_spinlock_t seq; + int umask; + int in_exec; + struct path root; + struct path pwd; +}; + +struct prepend_buffer { + char *buf; + int len; +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + unsigned char f_handle[0]; +}; + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_ANY = -1, + FSNOTIFY_OBJ_TYPE_INODE = 0, + FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, + FSNOTIFY_OBJ_TYPE_SB = 2, + FSNOTIFY_OBJ_TYPE_COUNT = 3, + FSNOTIFY_OBJ_TYPE_DETACHED = 3, +}; + +enum fid_type { + FILEID_ROOT = 0, + FILEID_INO32_GEN = 1, + FILEID_INO32_GEN_PARENT = 2, + FILEID_BTRFS_WITHOUT_PARENT = 77, + FILEID_BTRFS_WITH_PARENT = 78, + FILEID_BTRFS_WITH_PARENT_ROOT = 79, + FILEID_UDF_WITHOUT_PARENT = 81, + FILEID_UDF_WITH_PARENT = 82, + FILEID_NILFS_WITHOUT_PARENT = 97, + FILEID_NILFS_WITH_PARENT = 98, + FILEID_FAT_WITHOUT_PARENT = 113, + FILEID_FAT_WITH_PARENT = 114, + FILEID_INO64_GEN = 129, + FILEID_INO64_GEN_PARENT = 130, + FILEID_LUSTRE = 151, + FILEID_BCACHEFS_WITHOUT_PARENT = 177, + FILEID_BCACHEFS_WITH_PARENT = 178, + FILEID_KERNFS = 254, + FILEID_INVALID = 255, +}; + +struct inotify_inode_mark { + struct fsnotify_mark fsn_mark; + int wd; +}; + +enum handle_to_path_flags { + HANDLE_CHECK_PERMS = 1, + HANDLE_CHECK_SUBTREE = 2, +}; + +struct handle_to_path_ctx { + struct path root; + enum handle_to_path_flags flags; + unsigned int fh_flags; +}; + +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 1, + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, + KERNFS_ROOT_SUPPORT_EXPORTOP = 4, + KERNFS_ROOT_SUPPORT_USER_XATTR = 8, +}; + +enum { + MBE_REFERENCED_B = 0, + MBE_REUSABLE_B = 1, +}; + +struct mb_cache_entry { + struct list_head e_list; + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + u32 e_key; + long unsigned int e_flags; + u64 e_value; +}; + +enum jbd_state_bits { + BH_JBD = 16, + BH_JWrite = 17, + BH_Freed = 18, + BH_Revoked = 19, + BH_RevokeValid = 20, + BH_JBDDirty = 21, + BH_JournalHead = 22, + BH_Shadow = 23, + BH_Verified = 24, + BH_JBDPrivateStart = 25, +}; + +struct ext4_map_blocks { + ext4_fsblk_t m_pblk; + ext4_lblk_t m_lblk; + unsigned int m_len; + unsigned int m_flags; +}; + +struct ext4_inode { + __le16 i_mode; + __le16 i_uid; + __le32 i_size_lo; + __le32 i_atime; + __le32 i_ctime; + __le32 i_mtime; + __le32 i_dtime; + __le16 i_gid; + __le16 i_links_count; + __le32 i_blocks_lo; + __le32 i_flags; + union { + struct { + __le32 l_i_version; + } linux1; + struct { + __u32 h_i_translator; + } hurd1; + struct { + __u32 m_i_reserved1; + } masix1; + } osd1; + __le32 i_block[15]; + __le32 i_generation; + __le32 i_file_acl_lo; + __le32 i_size_high; + __le32 i_obso_faddr; + union { + struct { + __le16 l_i_blocks_high; + __le16 l_i_file_acl_high; + __le16 l_i_uid_high; + __le16 l_i_gid_high; + __le16 l_i_checksum_lo; + __le16 l_i_reserved; + } linux2; + struct { + __le16 h_i_reserved1; + __u16 h_i_mode_high; + __u16 h_i_uid_high; + __u16 h_i_gid_high; + __u32 h_i_author; + } hurd2; + struct { + __le16 h_i_reserved1; + __le16 m_i_file_acl_high; + __u32 m_i_reserved2[2]; + } masix2; + } osd2; + __le16 i_extra_isize; + __le16 i_checksum_hi; + __le32 i_ctime_extra; + __le32 i_mtime_extra; + __le32 i_atime_extra; + __le32 i_crtime; + __le32 i_crtime_extra; + __le32 i_version_hi; + __le32 i_projid; +}; + +enum { + EXT4_FC_REASON_XATTR = 0, + EXT4_FC_REASON_CROSS_RENAME = 1, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, + EXT4_FC_REASON_NOMEM = 3, + EXT4_FC_REASON_SWAP_BOOT = 4, + EXT4_FC_REASON_RESIZE = 5, + EXT4_FC_REASON_RENAME_DIR = 6, + EXT4_FC_REASON_FALLOC_RANGE = 7, + EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, + EXT4_FC_REASON_ENCRYPTED_FILENAME = 9, + EXT4_FC_REASON_MAX = 10, +}; + +enum { + EXT4_STATE_NEW = 0, + EXT4_STATE_XATTR = 1, + EXT4_STATE_NO_EXPAND = 2, + EXT4_STATE_DA_ALLOC_CLOSE = 3, + EXT4_STATE_EXT_MIGRATE = 4, + EXT4_STATE_NEWENTRY = 5, + EXT4_STATE_MAY_INLINE_DATA = 6, + EXT4_STATE_EXT_PRECACHED = 7, + EXT4_STATE_LUSTRE_EA_INODE = 8, + EXT4_STATE_VERITY_IN_PROGRESS = 9, + EXT4_STATE_FC_COMMITTING = 10, + EXT4_STATE_ORPHAN_FILE = 11, +}; + +struct ext4_xattr_header { + __le32 h_magic; + __le32 h_refcount; + __le32 h_blocks; + __le32 h_hash; + __le32 h_checksum; + __u32 h_reserved[3]; +}; + +struct ext4_xattr_ibody_header { + __le32 h_magic; +}; + +struct ext4_xattr_entry { + __u8 e_name_len; + __u8 e_name_index; + __le16 e_value_offs; + __le32 e_value_inum; + __le32 e_value_size; + __le32 e_hash; + char e_name[0]; +}; + +struct ext4_xattr_info { + const char *name; + const void *value; + size_t value_len; + int name_index; + int in_inode; +}; + +struct ext4_xattr_search { + struct ext4_xattr_entry *first; + void *base; + void *end; + struct ext4_xattr_entry *here; + int not_found; +}; + +struct ext4_xattr_ibody_find { + struct ext4_xattr_search s; + struct ext4_iloc iloc; +}; + +struct ext4_xattr_inode_array { + unsigned int count; + struct inode *inodes[0]; +}; + +struct ext4_xattr_block_find { + struct ext4_xattr_search s; + struct buffer_head *bh; +}; + +typedef int __kernel_rwf_t; + +struct btrfs_data_container { + __u32 bytes_left; + __u32 bytes_missing; + __u32 elem_cnt; + __u32 elem_missed; + __u64 val[0]; +}; + +struct btrfs_ioctl_encoded_io_args { + const struct iovec *iov; + long unsigned int iovcnt; + __s64 offset; + __u64 flags; + __u64 len; + __u64 unencoded_len; + __u64 unencoded_offset; + __u32 compression; + __u32 encryption; + __u8 reserved[64]; +}; + +struct btrfs_extent_item { + __le64 refs; + __le64 generation; + __le64 flags; +}; + +struct btrfs_inode_extref { + __le64 parent_objectid; + __le64 index; + __le16 name_len; + __u8 name[0]; +} __attribute__((packed)); + +struct btrfs_tree_parent_check { + u64 owner_root; + u64 transid; + struct btrfs_key first_key; + bool has_first_key; + u8 level; +}; + +struct btrfs_bio; + +typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *); + +struct btrfs_ordered_sum; + +struct btrfs_bio { + struct btrfs_inode *inode; + u64 file_offset; + union { + struct { + u8 *csum; + u8 csum_inline[64]; + struct bvec_iter saved_iter; + }; + struct { + struct btrfs_ordered_extent *ordered; + struct btrfs_ordered_sum *sums; + u64 orig_physical; + }; + struct btrfs_tree_parent_check parent_check; + }; + btrfs_bio_end_io_t end_io; + void *private; + unsigned int mirror_num; + atomic_t pending_ios; + struct work_struct end_io_work; + struct btrfs_fs_info *fs_info; + blk_status_t status; + struct bio bio; +}; + +struct btrfs_ordered_sum { + u64 logical; + u32 len; + struct list_head list; + u8 sums[0]; +}; + +enum { + __PAGE_UNLOCK_BIT = 0, + PAGE_UNLOCK = 1, + __PAGE_UNLOCK_SEQ = 0, + __PAGE_START_WRITEBACK_BIT = 1, + PAGE_START_WRITEBACK = 2, + __PAGE_START_WRITEBACK_SEQ = 1, + __PAGE_END_WRITEBACK_BIT = 2, + PAGE_END_WRITEBACK = 4, + __PAGE_END_WRITEBACK_SEQ = 2, + __PAGE_SET_ORDERED_BIT = 3, + PAGE_SET_ORDERED = 8, + __PAGE_SET_ORDERED_SEQ = 3, +}; + +struct extent_changeset { + u64 bytes_changed; + struct ulist range_changed; +}; + +struct btrfs_map_token { + struct extent_buffer *eb; + char *kaddr; + long unsigned int offset; +}; + +enum { + BTRFS_ROOT_IN_TRANS_SETUP = 0, + BTRFS_ROOT_SHAREABLE = 1, + BTRFS_ROOT_TRACK_DIRTY = 2, + BTRFS_ROOT_IN_RADIX = 3, + BTRFS_ROOT_ORPHAN_ITEM_INSERTED = 4, + BTRFS_ROOT_DEFRAG_RUNNING = 5, + BTRFS_ROOT_FORCE_COW = 6, + BTRFS_ROOT_MULTI_LOG_TASKS = 7, + BTRFS_ROOT_DIRTY = 8, + BTRFS_ROOT_DELETING = 9, + BTRFS_ROOT_DEAD_RELOC_TREE = 10, + BTRFS_ROOT_DEAD_TREE = 11, + BTRFS_ROOT_HAS_LOG_TREE = 12, + BTRFS_ROOT_QGROUP_FLUSHING = 13, + BTRFS_ROOT_ORPHAN_CLEANUP = 14, + BTRFS_ROOT_UNFINISHED_DROP = 15, + BTRFS_ROOT_RESET_LOCKDEP_CLASS = 16, +}; + +struct btrfs_replace_extent_info { + u64 disk_offset; + u64 disk_len; + u64 data_offset; + u64 data_len; + u64 file_offset; + char *extent_buf; + bool is_new_extent; + bool update_times; + int qgroup_reserved; + int insertions; +}; + +struct btrfs_drop_extents_args { + struct btrfs_path *path; + u64 start; + u64 end; + bool drop_cache; + bool replace_extent; + u32 extent_item_size; + u64 drop_end; + u64 bytes_found; + bool extent_inserted; +}; + +struct btrfs_file_private { + void *filldir_buf; + u64 last_index; + struct extent_state *llseek_cached_state; + struct task_struct *owner_task; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + bool is_nokey_name; +}; + +struct btrfs_file_extent { + u64 disk_bytenr; + u64 disk_num_bytes; + u64 num_bytes; + u64 ram_bytes; + u64 offset; + u8 compression; +}; + +struct btrfs_new_inode_args { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool orphan; + bool subvol; + struct posix_acl *default_acl; + struct posix_acl *acl; + struct fscrypt_name fname; +}; + +enum btrfs_ilock_type { + __BTRFS_ILOCK_SHARED_BIT = 0, + BTRFS_ILOCK_SHARED = 1, + __BTRFS_ILOCK_SHARED_SEQ = 0, + __BTRFS_ILOCK_TRY_BIT = 1, + BTRFS_ILOCK_TRY = 2, + __BTRFS_ILOCK_TRY_SEQ = 1, + __BTRFS_ILOCK_MMAP_BIT = 2, + BTRFS_ILOCK_MMAP = 4, + __BTRFS_ILOCK_MMAP_SEQ = 2, +}; + +struct btrfs_swapfile_pin { + struct rb_node node; + void *ptr; + struct inode *inode; + bool is_block_group; + int bg_extent_count; +}; + +struct btrfs_subpage { + spinlock_t lock; + union { + atomic_t eb_refs; + atomic_t nr_locked; + }; + long unsigned int bitmaps[0]; +}; + +struct btrfs_truncate_control { + struct btrfs_inode *inode; + u64 new_size; + u64 extents_found; + u64 last_size; + u64 sub_bytes; + u64 ino; + u32 min_type; + bool skip_ref_updates; + bool clear_extent_range; +}; + +typedef int iterate_extent_inodes_t(u64, u64, u64, u64, void *); + +struct btrfs_backref_walk_ctx { + u64 bytenr; + u64 extent_item_pos; + bool ignore_extent_item_pos; + bool skip_inode_ref_list; + struct btrfs_trans_handle *trans; + struct btrfs_fs_info *fs_info; + u64 time_seq; + struct ulist *refs; + struct ulist *roots; + bool (*cache_lookup)(u64, void *, const u64 **, int *); + void (*cache_store)(u64, const struct ulist *, void *); + iterate_extent_inodes_t *indirect_ref_iterator; + int (*check_extent_item)(u64, const struct btrfs_extent_item *, const struct extent_buffer *, void *); + bool (*skip_data_ref)(u64, u64, u64, void *); + void *user_ctx; +}; + +struct inode_fs_paths { + struct btrfs_path *btrfs_path; + struct btrfs_root *fs_root; + struct btrfs_data_container *fspath; +}; + +struct btrfs_iget_args { + u64 ino; + struct btrfs_root *root; +}; + +struct btrfs_rename_ctx { + u64 index; +}; + +struct data_reloc_warn { + struct btrfs_path path; + struct btrfs_fs_info *fs_info; + u64 extent_item_size; + u64 logical; + int mirror_num; +}; + +struct async_extent { + u64 start; + u64 ram_size; + u64 compressed_size; + struct folio **folios; + long unsigned int nr_folios; + int compress_type; + struct list_head list; +}; + +struct async_cow; + +struct async_chunk { + struct btrfs_inode *inode; + struct folio *locked_folio; + u64 start; + u64 end; + blk_opf_t write_flags; + struct list_head extents; + struct cgroup_subsys_state *blkcg_css; + struct btrfs_work work; + struct async_cow *async_cow; +}; + +struct async_cow { + atomic_t num_chunks; + struct async_chunk chunks[0]; +}; + +struct can_nocow_file_extent_args { + u64 start; + u64 end; + bool writeback_path; + bool strict; + bool free_path; + struct btrfs_file_extent file_extent; +}; + +struct btrfs_writepage_fixup { + struct folio *folio; + struct btrfs_inode *inode; + struct btrfs_work work; +}; + +struct dir_entry { + u64 ino; + u64 offset; + unsigned int type; + int name_len; +}; + +struct btrfs_delalloc_work { + struct inode *inode; + struct completion completion; + struct list_head list; + struct btrfs_work work; +}; + +struct btrfs_encoded_read_private { + wait_queue_head_t wait; + void *uring_ctx; + atomic_t pending; + blk_status_t status; +}; + +struct btrfs_swap_info { + u64 start; + u64 block_start; + u64 block_len; + u64 lowest_ppage; + u64 highest_ppage; + long unsigned int nr_pages; + int nr_extents; +}; + +typedef int __kernel_key_t; + +typedef unsigned int __kernel_mode_t; + +typedef int __kernel_ipc_pid_t; + +typedef __kernel_long_t __kernel_old_time_t; + +typedef __kernel_key_t key_t; + +typedef long unsigned int ulong; + +enum { + HUGETLB_SHMFS_INODE = 1, + HUGETLB_ANONHUGE_INODE = 2, +}; + +struct ipc_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + short unsigned int seq; +}; + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned char __pad1[0]; + short unsigned int seq; + short unsigned int __pad2; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; +}; + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + long unsigned int seq; + void *security; + struct rhash_head khtnode; + struct callback_head rcu; + refcount_t refcount; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + +struct shmid64_ds { + struct ipc64_perm shm_perm; + __kernel_size_t shm_segsz; + long int shm_atime; + long int shm_dtime; + long int shm_ctime; + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + long unsigned int shm_nattch; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct shminfo64 { + long unsigned int shmmax; + long unsigned int shmmin; + long unsigned int shmmni; + long unsigned int shmseg; + long unsigned int shmall; + long unsigned int __unused1; + long unsigned int __unused2; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; + +struct shared_policy {}; + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; + long unsigned int flags; + long unsigned int alloced; + long unsigned int swapped; + union { + struct offset_ctx dir_offsets; + struct { + struct list_head shrinklist; + struct list_head swaplist; + }; + }; + struct timespec64 i_crtime; + struct shared_policy policy; + struct simple_xattrs xattrs; + long unsigned int fallocend; + unsigned int fsflags; + atomic_t stop_eviction; + struct inode vfs_inode; +}; + +struct ipc_params { + key_t key; + int flg; + union { + size_t size; + int nsems; + } u; +}; + +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); +}; + +struct shmid_kernel { + struct kern_ipc_perm shm_perm; + struct file *shm_file; + long unsigned int shm_nattch; + long unsigned int shm_segsz; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct ucounts *mlock_ucounts; + struct task_struct *shm_creator; + struct list_head shm_clist; + struct ipc_namespace *ns; + long: 64; + long: 64; + long: 64; +}; + +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +struct sha512_state { + u64 state[8]; + u64 count[2]; + u8 buf[128]; +}; + +typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); + +struct __key_reference_with_attributes; + +typedef struct __key_reference_with_attributes *key_ref_t; + +struct key_preparsed_payload { + const char *orig_description; + char *description; + union key_payload payload; + const void *data; + size_t datalen; + size_t quotalen; + time64_t expiry; +}; + +struct key_match_data { + bool (*cmp)(const struct key *, const struct key_match_data *); + const void *raw_data; + void *preparsed; + unsigned int lookup_type; +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt = 0, + kernel_pkey_decrypt = 1, + kernel_pkey_sign = 2, + kernel_pkey_verify = 3, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op: 8; +}; + +struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; +}; + +struct asymmetric_key_id { + short unsigned int len; + unsigned char data[0]; +}; + +enum OID { + OID_id_dsa_with_sha1 = 0, + OID_id_dsa = 1, + OID_id_ecPublicKey = 2, + OID_id_prime192v1 = 3, + OID_id_prime256v1 = 4, + OID_id_ecdsa_with_sha1 = 5, + OID_id_ecdsa_with_sha224 = 6, + OID_id_ecdsa_with_sha256 = 7, + OID_id_ecdsa_with_sha384 = 8, + OID_id_ecdsa_with_sha512 = 9, + OID_rsaEncryption = 10, + OID_sha1WithRSAEncryption = 11, + OID_sha256WithRSAEncryption = 12, + OID_sha384WithRSAEncryption = 13, + OID_sha512WithRSAEncryption = 14, + OID_sha224WithRSAEncryption = 15, + OID_data = 16, + OID_signed_data = 17, + OID_email_address = 18, + OID_contentType = 19, + OID_messageDigest = 20, + OID_signingTime = 21, + OID_smimeCapabilites = 22, + OID_smimeAuthenticatedAttrs = 23, + OID_mskrb5 = 24, + OID_krb5 = 25, + OID_krb5u2u = 26, + OID_msIndirectData = 27, + OID_msStatementType = 28, + OID_msSpOpusInfo = 29, + OID_msPeImageDataObjId = 30, + OID_msIndividualSPKeyPurpose = 31, + OID_msOutlookExpress = 32, + OID_ntlmssp = 33, + OID_negoex = 34, + OID_spnego = 35, + OID_IAKerb = 36, + OID_PKU2U = 37, + OID_Scram = 38, + OID_certAuthInfoAccess = 39, + OID_sha1 = 40, + OID_id_ansip384r1 = 41, + OID_id_ansip521r1 = 42, + OID_sha256 = 43, + OID_sha384 = 44, + OID_sha512 = 45, + OID_sha224 = 46, + OID_commonName = 47, + OID_surname = 48, + OID_countryName = 49, + OID_locality = 50, + OID_stateOrProvinceName = 51, + OID_organizationName = 52, + OID_organizationUnitName = 53, + OID_title = 54, + OID_description = 55, + OID_name = 56, + OID_givenName = 57, + OID_initials = 58, + OID_generationalQualifier = 59, + OID_subjectKeyIdentifier = 60, + OID_keyUsage = 61, + OID_subjectAltName = 62, + OID_issuerAltName = 63, + OID_basicConstraints = 64, + OID_crlDistributionPoints = 65, + OID_certPolicies = 66, + OID_authorityKeyIdentifier = 67, + OID_extKeyUsage = 68, + OID_NetlogonMechanism = 69, + OID_appleLocalKdcSupported = 70, + OID_gostCPSignA = 71, + OID_gostCPSignB = 72, + OID_gostCPSignC = 73, + OID_gost2012PKey256 = 74, + OID_gost2012PKey512 = 75, + OID_gost2012Digest256 = 76, + OID_gost2012Digest512 = 77, + OID_gost2012Signature256 = 78, + OID_gost2012Signature512 = 79, + OID_gostTC26Sign256A = 80, + OID_gostTC26Sign256B = 81, + OID_gostTC26Sign256C = 82, + OID_gostTC26Sign256D = 83, + OID_gostTC26Sign512A = 84, + OID_gostTC26Sign512B = 85, + OID_gostTC26Sign512C = 86, + OID_sm2 = 87, + OID_sm3 = 88, + OID_SM2_with_SM3 = 89, + OID_sm3WithRSAEncryption = 90, + OID_TPMLoadableKey = 91, + OID_TPMImportableKey = 92, + OID_TPMSealedData = 93, + OID_sha3_256 = 94, + OID_sha3_384 = 95, + OID_sha3_512 = 96, + OID_id_ecdsa_with_sha3_256 = 97, + OID_id_ecdsa_with_sha3_384 = 98, + OID_id_ecdsa_with_sha3_512 = 99, + OID_id_rsassa_pkcs1_v1_5_with_sha3_256 = 100, + OID_id_rsassa_pkcs1_v1_5_with_sha3_384 = 101, + OID_id_rsassa_pkcs1_v1_5_with_sha3_512 = 102, + OID__NR = 103, +}; + +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; + long unsigned int key_eflags; +}; + +struct public_key_signature { + struct asymmetric_key_id *auth_ids[3]; + u8 *s; + u8 *digest; + u32 s_size; + u32 digest_size; + const char *pkey_algo; + const char *hash_algo; + const char *encoding; +}; + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; + struct public_key *pub; + struct public_key_signature *sig; + char *issuer; + char *subject; + struct asymmetric_key_id *id; + struct asymmetric_key_id *skid; + time64_t valid_from; + time64_t valid_to; + const void *tbs; + unsigned int tbs_size; + unsigned int raw_sig_size; + const void *raw_sig; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_subject; + unsigned int raw_subject_size; + unsigned int raw_skid_size; + const void *raw_skid; + unsigned int index; + bool seen; + bool verified; + bool self_signed; + bool unsupported_sig; + bool blacklisted; +}; + +struct pkcs7_signed_info { + struct pkcs7_signed_info *next; + struct x509_certificate *signer; + unsigned int index; + bool unsupported_crypto; + bool blacklisted; + const void *msgdigest; + unsigned int msgdigest_len; + unsigned int authattrs_len; + const void *authattrs; + long unsigned int aa_set; + time64_t signing_time; + struct public_key_signature *sig; +}; + +struct pkcs7_message { + struct x509_certificate *certs; + struct x509_certificate *crl; + struct pkcs7_signed_info *signed_infos; + u8 version; + bool have_authattrs; + enum OID data_type; + size_t data_len; + size_t data_hdrlen; + const void *data; +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +struct bio_integrity_payload { + struct bio *bip_bio; + struct bvec_iter bip_iter; + short unsigned int bip_vcnt; + short unsigned int bip_max_vcnt; + short unsigned int bip_flags; + int: 0; + struct bvec_iter bio_iter; + struct work_struct bip_work; + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0]; +}; + +enum xen_domain_type { + XEN_NATIVE = 0, + XEN_PV_DOMAIN = 1, + XEN_HVM_DOMAIN = 2, +}; + +enum bio_merge_status { + BIO_MERGE_OK = 0, + BIO_MERGE_NONE = 1, + BIO_MERGE_FAILED = 2, +}; + +enum blkg_iostat_type { + BLKG_IOSTAT_READ = 0, + BLKG_IOSTAT_WRITE = 1, + BLKG_IOSTAT_DISCARD = 2, + BLKG_IOSTAT_NR = 3, +}; + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ = 0, + BLKG_RWSTAT_WRITE = 1, + BLKG_RWSTAT_SYNC = 2, + BLKG_RWSTAT_ASYNC = 3, + BLKG_RWSTAT_DISCARD = 4, + BLKG_RWSTAT_NR = 5, + BLKG_RWSTAT_TOTAL = 5, +}; + +enum { + BLK_TAG_ALLOC_FIFO = 0, + BLK_TAG_ALLOC_RR = 1, + BLK_TAG_ALLOC_MAX = 2, +}; + +typedef bool busy_tag_iter_fn(struct request *, void *); + +enum { + BLK_MQ_F_SHOULD_MERGE = 1, + BLK_MQ_F_TAG_QUEUE_SHARED = 2, + BLK_MQ_F_STACKING = 4, + BLK_MQ_F_TAG_HCTX_SHARED = 8, + BLK_MQ_F_BLOCKING = 16, + BLK_MQ_F_NO_SCHED = 32, + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 64, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, +}; + +enum { + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_S_MAX = 4, +}; + +struct show_busy_params { + struct seq_file *m; + struct blk_mq_hw_ctx *hctx; +}; + +struct io_ring_ctx; + +struct io_wq; + +struct io_uring_task { + int cached_refs; + const struct io_ring_ctx *last; + struct task_struct *task; + struct io_wq *io_wq; + struct file *registered_rings[16]; + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_cancel; + atomic_t inflight_tracked; + struct percpu_counter inflight; + long: 64; + struct { + struct llist_head task_list; + struct callback_head task_work; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; +}; + +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + struct { + __u32 cmd_op; + __u32 __pad1; + }; + }; + union { + __u64 addr; + __u64 splice_off_in; + struct { + __u32 level; + __u32 optname; + }; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + __u32 hardlink_flags; + __u32 xattr_flags; + __u32 msg_ring_flags; + __u32 uring_cmd_flags; + __u32 waitid_flags; + __u32 futex_flags; + __u32 install_fd_flags; + __u32 nop_flags; + }; + __u64 user_data; + union { + __u16 buf_index; + __u16 buf_group; + }; + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; + __u32 optlen; + struct { + __u16 addr_len; + __u16 __pad3[1]; + }; + }; + union { + struct { + __u64 addr3; + __u64 __pad2[1]; + }; + __u64 optval; + __u8 cmd[0]; + }; +}; + +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; + __u64 big_cqe[0]; +}; + +enum io_uring_cmd_flags { + IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, + IO_URING_F_MULTISHOT = 4, + IO_URING_F_IOWQ = 8, + IO_URING_F_NONBLOCK = -2147483648, + IO_URING_F_SQE128 = 256, + IO_URING_F_CQE32 = 512, + IO_URING_F_IOPOLL = 1024, + IO_URING_F_CANCEL = 2048, + IO_URING_F_COMPAT = 4096, + IO_URING_F_TASK_DEAD = 8192, +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_wq_work { + struct io_wq_work_node list; + atomic_t flags; + int cancel_seq; +}; + +struct io_rsrc_node; + +struct io_rsrc_data { + unsigned int nr; + struct io_rsrc_node **nodes; +}; + +struct io_mapped_ubuf; + +struct io_rsrc_node { + unsigned char type; + int refs; + u64 tag; + union { + long unsigned int file_ptr; + struct io_mapped_ubuf *buf; + }; +}; + +struct io_file_table { + struct io_rsrc_data data; + long unsigned int *bitmap; + unsigned int alloc_hint; +}; + +struct io_hash_bucket { + struct hlist_head list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned int hash_bits; +}; + +struct io_mapped_region { + struct page **pages; + void *vmap_ptr; + size_t nr_pages; +}; + +struct io_kiocb; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + struct io_wq_work_node free_list; + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + bool plug_started; + bool need_plug; + bool cq_flush; + short unsigned int submit_nr; + struct blk_plug plug; +}; + +struct io_alloc_cache { + void **entries; + unsigned int nr_cached; + unsigned int max_cached; + size_t elem_size; +}; + +struct io_restriction { + long unsigned int register_op[1]; + long unsigned int sqe_op[1]; + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_rings; + +struct io_ev_fd; + +struct io_sq_data; + +struct io_wq_hash; + +struct io_ring_ctx { + struct { + unsigned int flags; + unsigned int drain_next: 1; + unsigned int restricted: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + unsigned int has_evfd: 1; + unsigned int task_complete: 1; + unsigned int lockless_cq: 1; + unsigned int syscall_iopoll: 1; + unsigned int poll_activated: 1; + unsigned int drain_disabled: 1; + unsigned int compat: 1; + unsigned int iowq_limits_set: 1; + struct task_struct *submitter_task; + struct io_rings *rings; + struct percpu_ref refs; + clockid_t clockid; + enum tk_offsets clock_offset; + enum task_work_notify_mode notify_method; + unsigned int sq_thread_idle; + long: 64; + }; + struct { + struct mutex uring_lock; + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned int cached_sq_head; + unsigned int sq_entries; + atomic_t cancel_seq; + bool poll_multi_queue; + struct io_wq_work_list iopoll_list; + struct io_file_table file_table; + struct io_rsrc_data buf_table; + struct io_submit_state submit_state; + struct xarray io_bl_xa; + struct io_hash_table cancel_table; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + struct io_alloc_cache rw_cache; + struct io_alloc_cache uring_cache; + struct hlist_head cancelable_uring_cmd; + u64 hybrid_poll_time; + }; + struct { + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + unsigned int cached_cq_tail; + unsigned int cq_entries; + struct io_ev_fd *io_ev_fd; + unsigned int cq_extra; + void *cq_wait_arg; + size_t cq_wait_size; + long: 64; + }; + struct { + struct llist_head work_llist; + struct llist_head retry_llist; + long unsigned int check_cq; + atomic_t cq_wait_nr; + atomic_t cq_timeouts; + struct wait_queue_head cq_wait; + long: 64; + }; + struct { + spinlock_t timeout_lock; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned int cq_last_tm_flush; + long: 64; + long: 64; + }; + spinlock_t completion_lock; + struct list_head io_buffers_comp; + struct list_head cq_overflow_list; + struct hlist_head waitid_list; + struct hlist_head futex_list; + struct io_alloc_cache futex_cache; + const struct cred *sq_creds; + struct io_sq_data *sq_data; + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + unsigned int file_alloc_start; + unsigned int file_alloc_end; + struct list_head io_buffers_cache; + struct wait_queue_head poll_wq; + struct io_restriction restrictions; + u32 pers_next; + struct xarray personalities; + struct io_wq_hash *hash_map; + struct user_struct *user; + struct mm_struct *mm_account; + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + u32 iowq_limits[2]; + struct callback_head poll_wq_task_work; + struct list_head defer_list; + struct io_alloc_cache msg_cache; + spinlock_t msg_lock; + struct list_head napi_list; + spinlock_t napi_lock; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; + u8 napi_track_mode; + struct hlist_head napi_ht[16]; + unsigned int evfd_last_cq_tail; + struct mutex resize_lock; + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_mapped_region param_region; + long: 64; +}; + +struct io_uring { + u32 head; + u32 tail; +}; + +struct io_rings { + struct io_uring sq; + struct io_uring cq; + u32 sq_ring_mask; + u32 cq_ring_mask; + u32 sq_ring_entries; + u32 cq_ring_entries; + u32 sq_dropped; + atomic_t sq_flags; + u32 cq_flags; + u32 cq_overflow; + long: 64; + long: 64; + struct io_uring_cqe cqes[0]; +}; + +struct io_cmd_data { + struct file *file; + __u8 data[56]; +}; + +typedef u64 io_req_flags_t; + +struct io_cqe { + __u64 user_data; + __s32 res; + union { + __u32 flags; + int fd; + }; +}; + +struct io_tw_state; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *, struct io_tw_state *); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_buffer; + +struct io_buffer_list; + +struct async_poll; + +struct io_kiocb { + union { + struct file *file; + struct io_cmd_data cmd; + }; + u8 opcode; + u8 iopoll_completed; + u16 buf_index; + unsigned int nr_tw; + io_req_flags_t flags; + struct io_cqe cqe; + struct io_ring_ctx *ctx; + struct io_uring_task *tctx; + union { + struct io_buffer *kbuf; + struct io_buffer_list *buf_list; + struct io_rsrc_node *buf_node; + }; + union { + struct io_wq_work_node comp_list; + __poll_t apoll_events; + }; + struct io_rsrc_node *file_node; + atomic_t refs; + bool cancel_seq_set; + struct io_task_work io_task_work; + union { + struct hlist_node hash_node; + u64 iopoll_start; + }; + struct async_poll *apoll; + void *async_data; + atomic_t poll_refs; + struct io_kiocb *link; + const struct cred *creds; + struct io_wq_work work; + struct { + u64 extra1; + u64 extra2; + } big_cqe; +}; + +struct io_wq_hash { + refcount_t refs; + long unsigned int map; + struct wait_queue_head wait; +}; + +struct io_tw_state {}; + +enum { + REQ_F_FIXED_FILE = 1ULL, + REQ_F_IO_DRAIN = 2ULL, + REQ_F_LINK = 4ULL, + REQ_F_HARDLINK = 8ULL, + REQ_F_FORCE_ASYNC = 16ULL, + REQ_F_BUFFER_SELECT = 32ULL, + REQ_F_CQE_SKIP = 64ULL, + REQ_F_FAIL = 256ULL, + REQ_F_INFLIGHT = 512ULL, + REQ_F_CUR_POS = 1024ULL, + REQ_F_NOWAIT = 2048ULL, + REQ_F_LINK_TIMEOUT = 4096ULL, + REQ_F_NEED_CLEANUP = 8192ULL, + REQ_F_POLLED = 16384ULL, + REQ_F_IOPOLL_STATE = 32768ULL, + REQ_F_BUFFER_SELECTED = 65536ULL, + REQ_F_BUFFER_RING = 131072ULL, + REQ_F_REISSUE = 262144ULL, + REQ_F_SUPPORT_NOWAIT = 268435456ULL, + REQ_F_ISREG = 536870912ULL, + REQ_F_CREDS = 524288ULL, + REQ_F_REFCOUNT = 1048576ULL, + REQ_F_ARM_LTIMEOUT = 2097152ULL, + REQ_F_ASYNC_DATA = 4194304ULL, + REQ_F_SKIP_LINK_CQES = 8388608ULL, + REQ_F_SINGLE_POLL = 16777216ULL, + REQ_F_DOUBLE_POLL = 33554432ULL, + REQ_F_APOLL_MULTISHOT = 67108864ULL, + REQ_F_CLEAR_POLLIN = 134217728ULL, + REQ_F_POLL_NO_LAZY = 1073741824ULL, + REQ_F_CAN_POLL = 2147483648ULL, + REQ_F_BL_EMPTY = 4294967296ULL, + REQ_F_BL_NO_RECYCLE = 8589934592ULL, + REQ_F_BUFFERS_COMMIT = 17179869184ULL, + REQ_F_BUF_NODE = 34359738368ULL, +}; + +struct io_mapped_ubuf { + u64 ubuf; + unsigned int len; + unsigned int nr_bvecs; + unsigned int folio_shift; + refcount_t refs; + long unsigned int acct_pages; + struct bio_vec bvec[0]; +}; + +enum { + IOU_OK = 0, + IOU_ISSUE_SKIP_COMPLETE = -529, + IOU_REQUEUE = -3072, + IOU_STOP_MULTISHOT = -125, +}; + +struct io_ftrunc { + struct file *file; + loff_t len; +}; + +typedef unsigned char Byte; + +typedef long unsigned int uLong; + +struct internal_state; + +struct z_stream_s { + const Byte *next_in; + uLong avail_in; + uLong total_in; + Byte *next_out; + uLong avail_out; + uLong total_out; + char *msg; + struct internal_state *state; + void *workspace; + int data_type; + uLong adler; + uLong reserved; +}; + +struct internal_state { + int dummy; +}; + +typedef struct z_stream_s z_stream; + +typedef z_stream *z_streamp; + +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 10, + ZSTD_error_version_unsupported = 12, + ZSTD_error_frameParameter_unsupported = 14, + ZSTD_error_frameParameter_windowTooLarge = 16, + ZSTD_error_corruption_detected = 20, + ZSTD_error_checksum_wrong = 22, + ZSTD_error_dictionary_corrupted = 30, + ZSTD_error_dictionary_wrong = 32, + ZSTD_error_dictionaryCreation_failed = 34, + ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_outOfBound = 42, + ZSTD_error_tableLog_tooLarge = 44, + ZSTD_error_maxSymbolValue_tooLarge = 46, + ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stage_wrong = 60, + ZSTD_error_init_missing = 62, + ZSTD_error_memory_allocation = 64, + ZSTD_error_workSpace_tooSmall = 66, + ZSTD_error_dstSize_tooSmall = 70, + ZSTD_error_srcSize_wrong = 72, + ZSTD_error_dstBuffer_null = 74, + ZSTD_error_frameIndex_tooLarge = 100, + ZSTD_error_seekableIO = 102, + ZSTD_error_dstBuffer_wrong = 104, + ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_maxCode = 120, +} ZSTD_ErrorCode; + +typedef struct { + U16 nextState; + BYTE nbAdditionalBits; + BYTE nbBits; + U32 baseValue; +} ZSTD_seqSymbol; + +typedef U32 HUF_DTable; + +typedef struct { + ZSTD_seqSymbol LLTable[513]; + ZSTD_seqSymbol OFTable[257]; + ZSTD_seqSymbol MLTable[513]; + HUF_DTable hufTable[4097]; + U32 rep[3]; + U32 workspace[157]; +} ZSTD_entropyDTables_t; + +typedef enum { + ZSTD_frame = 0, + ZSTD_skippableFrame = 1, +} ZSTD_frameType_e; + +typedef struct { + long long unsigned int frameContentSize; + long long unsigned int windowSize; + unsigned int blockSizeMax; + ZSTD_frameType_e frameType; + unsigned int headerSize; + unsigned int dictID; + unsigned int checksumFlag; +} ZSTD_frameHeader; + +typedef enum { + bt_raw = 0, + bt_rle = 1, + bt_compressed = 2, + bt_reserved = 3, +} blockType_e; + +typedef enum { + ZSTDds_getFrameHeaderSize = 0, + ZSTDds_decodeFrameHeader = 1, + ZSTDds_decodeBlockHeader = 2, + ZSTDds_decompressBlock = 3, + ZSTDds_decompressLastBlock = 4, + ZSTDds_checkChecksum = 5, + ZSTDds_decodeSkippableHeader = 6, + ZSTDds_skipFrame = 7, +} ZSTD_dStage; + +typedef enum { + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1, +} ZSTD_forceIgnoreChecksum_e; + +typedef enum { + ZSTD_use_indefinitely = -1, + ZSTD_dont_use = 0, + ZSTD_use_once = 1, +} ZSTD_dictUses_e; + +struct ZSTD_DDict_s; + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +typedef struct { + const ZSTD_DDict **ddictPtrTable; + size_t ddictPtrTableSize; + size_t ddictPtrCount; +} ZSTD_DDictHashSet; + +typedef enum { + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1, +} ZSTD_refMultipleDDicts_e; + +typedef enum { + zdss_init = 0, + zdss_loadHeader = 1, + zdss_read = 2, + zdss_load = 3, + zdss_flush = 4, +} ZSTD_dStreamStage; + +typedef enum { + ZSTD_not_in_dst = 0, + ZSTD_in_dst = 1, + ZSTD_split = 2, +} ZSTD_litLocation_e; + +struct ZSTD_DCtx_s { + const ZSTD_seqSymbol *LLTptr; + const ZSTD_seqSymbol *MLTptr; + const ZSTD_seqSymbol *OFTptr; + const HUF_DTable *HUFptr; + ZSTD_entropyDTables_t entropy; + U32 workspace[640]; + const void *previousDstEnd; + const void *prefixStart; + const void *virtualStart; + const void *dictEnd; + size_t expected; + ZSTD_frameHeader fParams; + U64 processedCSize; + U64 decodedSize; + blockType_e bType; + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + struct xxh64_state xxhState; + size_t headerSize; + ZSTD_format_e format; + ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; + U32 validateChecksum; + const BYTE *litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + size_t staticSize; + ZSTD_DDict *ddictLocal; + const ZSTD_DDict *ddict; + U32 dictID; + int ddictIsCold; + ZSTD_dictUses_e dictUses; + ZSTD_DDictHashSet *ddictSet; + ZSTD_refMultipleDDicts_e refMultipleDDicts; + ZSTD_dStreamStage streamStage; + char *inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char *outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t lhSize; + U32 hostageByte; + int noForwardProgress; + ZSTD_bufferMode_e outBufferMode; + ZSTD_outBuffer expectedOutBuffer; + BYTE *litBuffer; + const BYTE *litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[65568]; + BYTE headerBuffer[18]; + size_t oversizedDuration; +}; + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +typedef ZSTD_DCtx ZSTD_DStream; + +struct ZSTD_DDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictSize; + ZSTD_entropyDTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; + +typedef ZSTD_ErrorCode zstd_error_code; + +typedef ZSTD_customMem zstd_custom_mem; + +typedef ZSTD_DCtx zstd_dctx; + +typedef ZSTD_DDict zstd_ddict; + +typedef ZSTD_inBuffer zstd_in_buffer; + +typedef ZSTD_outBuffer zstd_out_buffer; + +typedef ZSTD_DStream zstd_dstream; + +typedef ZSTD_frameHeader zstd_frame_header; + +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; + +enum { + IRQ_DOMAIN_FLAG_HIERARCHY = 1, + IRQ_DOMAIN_NAME_ALLOCATED = 2, + IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, + IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, + IRQ_DOMAIN_FLAG_MSI = 16, + IRQ_DOMAIN_FLAG_ISOLATED_MSI = 32, + IRQ_DOMAIN_FLAG_NO_MAP = 64, + IRQ_DOMAIN_FLAG_MSI_PARENT = 256, + IRQ_DOMAIN_FLAG_MSI_DEVICE = 512, + IRQ_DOMAIN_FLAG_DESTROY_GC = 1024, + IRQ_DOMAIN_FLAG_NONCORE = 65536, +}; + +struct arch_msi_msg_addr_lo { + u32 address_lo; +}; + +typedef struct arch_msi_msg_addr_lo arch_msi_msg_addr_lo_t; + +struct arch_msi_msg_addr_hi { + u32 address_hi; +}; + +typedef struct arch_msi_msg_addr_hi arch_msi_msg_addr_hi_t; + +struct arch_msi_msg_data { + u32 data; +}; + +typedef struct arch_msi_msg_data arch_msi_msg_data_t; + +struct msi_msg { + union { + u32 address_lo; + arch_msi_msg_addr_lo_t arch_addr_lo; + }; + union { + u32 address_hi; + arch_msi_msg_addr_hi_t arch_addr_hi; + }; + union { + u32 data; + arch_msi_msg_data_t arch_data; + }; +}; + +struct pci_msi_desc { + union { + u32 msi_mask; + u32 msix_ctrl; + }; + struct { + u8 is_msix: 1; + u8 multiple: 3; + u8 multi_cap: 3; + u8 can_mask: 1; + u8 is_64: 1; + u8 is_virtual: 1; + unsigned int default_irq; + } msi_attrib; + union { + u8 mask_pos; + void *mask_base; + }; +}; + +union msi_domain_cookie { + u64 value; + void *ptr; + void *iobase; +}; + +union msi_instance_cookie { + u64 value; + void *ptr; +}; + +struct msi_desc_data { + union msi_domain_cookie dcookie; + union msi_instance_cookie icookie; +}; + +struct msi_desc { + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct irq_affinity_desc *affinity; + const void *iommu_cookie; + struct device_attribute *sysfs_attrs; + void (*write_msi_msg)(struct msi_desc *, void *); + void *write_msi_msg_data; + u16 msi_index; + union { + struct pci_msi_desc pci; + struct msi_desc_data data; + }; +}; + +enum msi_domain_ids { + MSI_DEFAULT_DOMAIN = 0, + MSI_MAX_DEVICE_IRQDOMAINS = 1, +}; + +struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + long unsigned int flags; + union { + long unsigned int ul; + void *ptr; + } scratchpad[2]; +}; + +typedef struct msi_alloc_info msi_alloc_info_t; + +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); + int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); + void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); + int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); + void (*prepare_desc)(struct irq_domain *, msi_alloc_info_t *, struct msi_desc *); + void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); + int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); + void (*domain_free_irqs)(struct irq_domain *, struct device *); + void (*msi_post_free)(struct irq_domain *, struct device *); + int (*msi_translate)(struct irq_domain *, struct irq_fwspec *, irq_hw_number_t *, unsigned int *); +}; + +struct msi_domain_info { + u32 flags; + enum irq_domain_bus_token bus_token; + unsigned int hwsize; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +struct msi_domain_template { + char name[48]; + struct irq_chip chip; + struct msi_domain_ops ops; + struct msi_domain_info info; +}; + +enum { + MSI_FLAG_USE_DEF_DOM_OPS = 1, + MSI_FLAG_USE_DEF_CHIP_OPS = 2, + MSI_FLAG_ACTIVATE_EARLY = 4, + MSI_FLAG_MUST_REACTIVATE = 8, + MSI_FLAG_DEV_SYSFS = 16, + MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = 32, + MSI_FLAG_FREE_MSI_DESCS = 64, + MSI_FLAG_USE_DEV_FWNODE = 128, + MSI_FLAG_PARENT_PM_DEV = 256, + MSI_FLAG_PCI_MSI_MASK_PARENT = 512, + MSI_GENERIC_FLAGS_MASK = 65535, + MSI_DOMAIN_FLAGS_MASK = 4294901760, + MSI_FLAG_MULTI_PCI_MSI = 65536, + MSI_FLAG_PCI_MSIX = 131072, + MSI_FLAG_LEVEL_CAPABLE = 262144, + MSI_FLAG_MSIX_CONTIGUOUS = 524288, + MSI_FLAG_PCI_MSIX_ALLOC_DYN = 1048576, + MSI_FLAG_NO_AFFINITY = 2097152, +}; + +enum support_mode { + ALLOW_LEGACY = 0, + DENY_LEGACY = 1, +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_usage srcuu; + struct srcu_struct srcu; + struct notifier_block *head; +}; + +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +struct clk_notifier_data { + struct clk *clk; + long unsigned int old_rate; + long unsigned int new_rate; +}; + +struct clk_hw; + +struct clk_rate_request { + struct clk_core *core; + long unsigned int rate; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +struct clk_ops; + +struct clk_parent_map; + +struct clk_core { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct module *owner; + struct device *dev; + struct hlist_node rpm_node; + struct device_node *of_node; + struct clk_core *parent; + struct clk_parent_map *parents; + u8 num_parents; + u8 new_parent_index; + long unsigned int rate; + long unsigned int req_rate; + long unsigned int new_rate; + struct clk_core *new_parent; + struct clk_core *new_child; + long unsigned int flags; + bool orphan; + bool rpm_enabled; + unsigned int enable_count; + unsigned int prepare_count; + unsigned int protect_count; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int accuracy; + int phase; + struct clk_duty duty; + struct hlist_head children; + struct hlist_node child_node; + struct hlist_head clks; + unsigned int notifier_count; + struct dentry *dentry; + struct hlist_node debug_node; + struct kref ref; +}; + +struct clk_init_data; + +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +struct clk_ops { + int (*prepare)(struct clk_hw *); + void (*unprepare)(struct clk_hw *); + int (*is_prepared)(struct clk_hw *); + void (*unprepare_unused)(struct clk_hw *); + int (*enable)(struct clk_hw *); + void (*disable)(struct clk_hw *); + int (*is_enabled)(struct clk_hw *); + void (*disable_unused)(struct clk_hw *); + int (*save_context)(struct clk_hw *); + void (*restore_context)(struct clk_hw *); + long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int); + long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *); + int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); + int (*set_parent)(struct clk_hw *, u8); + u8 (*get_parent)(struct clk_hw *); + int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int); + int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8); + long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int); + int (*get_phase)(struct clk_hw *); + int (*set_phase)(struct clk_hw *, int); + int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*init)(struct clk_hw *); + void (*terminate)(struct clk_hw *); + void (*debug_init)(struct clk_hw *, struct dentry *); +}; + +struct clk_parent_data { + const struct clk_hw *hw; + const char *fw_name; + const char *name; + int index; +}; + +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + const char * const *parent_names; + const struct clk_parent_data *parent_data; + const struct clk_hw **parent_hws; + u8 num_parents; + long unsigned int flags; +}; + +struct clk_onecell_data { + struct clk **clks; + unsigned int clk_num; +}; + +struct clk_hw_onecell_data { + unsigned int num; + struct clk_hw *hws[0]; +}; + +struct clk_parent_map { + const struct clk_hw *hw; + struct clk_core *core; + const char *fw_name; + const char *name; + int index; +}; + +struct trace_event_raw_clk { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_clk_rate { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int rate; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_range { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int min; + long unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_clk_parent { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + char __data[0]; +}; + +struct trace_event_raw_clk_phase { + struct trace_entry ent; + u32 __data_loc_name; + int phase; + char __data[0]; +}; + +struct trace_event_raw_clk_duty_cycle { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int num; + unsigned int den; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_request { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + long unsigned int min; + long unsigned int max; + long unsigned int prate; + char __data[0]; +}; + +struct trace_event_data_offsets_clk { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_range { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_parent { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +struct trace_event_data_offsets_clk_phase { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_duty_cycle { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_request { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +typedef void (*btf_trace_clk_enable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_min_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_max_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_range)(void *, struct clk_core *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_rate_request_start)(void *, struct clk_rate_request *); + +typedef void (*btf_trace_clk_rate_request_done)(void *, struct clk_rate_request *); + +struct clk_notifier_devres { + struct clk *clk; + struct notifier_block *nb; +}; + +struct of_clk_provider { + struct list_head link; + struct device_node *node; + struct clk * (*get)(struct of_phandle_args *, void *); + struct clk_hw * (*get_hw)(struct of_phandle_args *, void *); + void *data; +}; + +struct clock_provider { + void (*clk_init_cb)(struct device_node *); + struct device_node *np; + struct list_head node; +}; + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); + +struct scsi_lun { + __u8 scsi_lun[8]; +}; + +enum scsi_pr_type { + SCSI_PR_WRITE_EXCLUSIVE = 1, + SCSI_PR_EXCLUSIVE_ACCESS = 3, + SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 5, + SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 6, + SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 7, + SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 8, +}; + +enum { + NVME_AEN_BIT_NS_ATTR = 8, + NVME_AEN_BIT_FW_ACT = 9, + NVME_AEN_BIT_ANA_CHANGE = 11, + NVME_AEN_BIT_DISC_CHANGE = 31, +}; + +enum nvme_opcode { + nvme_cmd_flush = 0, + nvme_cmd_write = 1, + nvme_cmd_read = 2, + nvme_cmd_write_uncor = 4, + nvme_cmd_compare = 5, + nvme_cmd_write_zeroes = 8, + nvme_cmd_dsm = 9, + nvme_cmd_verify = 12, + nvme_cmd_resv_register = 13, + nvme_cmd_resv_report = 14, + nvme_cmd_resv_acquire = 17, + nvme_cmd_resv_release = 21, + nvme_cmd_zone_mgmt_send = 121, + nvme_cmd_zone_mgmt_recv = 122, + nvme_cmd_zone_append = 125, + nvme_cmd_vendor_start = 128, +}; + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0, + nvme_admin_create_sq = 1, + nvme_admin_get_log_page = 2, + nvme_admin_delete_cq = 4, + nvme_admin_create_cq = 5, + nvme_admin_identify = 6, + nvme_admin_abort_cmd = 8, + nvme_admin_set_features = 9, + nvme_admin_get_features = 10, + nvme_admin_async_event = 12, + nvme_admin_ns_mgmt = 13, + nvme_admin_activate_fw = 16, + nvme_admin_download_fw = 17, + nvme_admin_dev_self_test = 20, + nvme_admin_ns_attach = 21, + nvme_admin_keep_alive = 24, + nvme_admin_directive_send = 25, + nvme_admin_directive_recv = 26, + nvme_admin_virtual_mgmt = 28, + nvme_admin_nvme_mi_send = 29, + nvme_admin_nvme_mi_recv = 30, + nvme_admin_dbbuf = 124, + nvme_admin_format_nvm = 128, + nvme_admin_security_send = 129, + nvme_admin_security_recv = 130, + nvme_admin_sanitize_nvm = 132, + nvme_admin_get_lba_status = 134, + nvme_admin_vendor_start = 192, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0, + nvme_fabrics_type_connect = 1, + nvme_fabrics_type_property_get = 4, + nvme_fabrics_type_auth_send = 5, + nvme_fabrics_type_auth_receive = 6, +}; + +enum blktrace_act { + __BLK_TA_QUEUE = 1, + __BLK_TA_BACKMERGE = 2, + __BLK_TA_FRONTMERGE = 3, + __BLK_TA_GETRQ = 4, + __BLK_TA_SLEEPRQ = 5, + __BLK_TA_REQUEUE = 6, + __BLK_TA_ISSUE = 7, + __BLK_TA_COMPLETE = 8, + __BLK_TA_PLUG = 9, + __BLK_TA_UNPLUG_IO = 10, + __BLK_TA_UNPLUG_TIMER = 11, + __BLK_TA_INSERT = 12, + __BLK_TA_SPLIT = 13, + __BLK_TA_BOUNCE = 14, + __BLK_TA_REMAP = 15, + __BLK_TA_ABORT = 16, + __BLK_TA_DRV_DATA = 17, + __BLK_TA_CGROUP = 256, +}; + +typedef __u32 nvme_submit_flags_t; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = 1, + GPIOD_OUT_LOW = 3, + GPIOD_OUT_HIGH = 7, + GPIOD_OUT_LOW_OPEN_DRAIN = 11, + GPIOD_OUT_HIGH_OPEN_DRAIN = 15, +}; + +struct mdio_driver { + struct mdio_driver_common mdiodrv; + int (*probe)(struct mdio_device *); + void (*remove)(struct mdio_device *); + void (*shutdown)(struct mdio_device *); +}; + +struct mdio_board_info { + const char *bus_id; + char modalias[32]; + int mdio_addr; + const void *platform_data; +}; + +enum reset_control_flags { + RESET_CONTROL_EXCLUSIVE = 4, + RESET_CONTROL_EXCLUSIVE_DEASSERTED = 12, + RESET_CONTROL_EXCLUSIVE_RELEASED = 0, + RESET_CONTROL_SHARED = 1, + RESET_CONTROL_SHARED_DEASSERTED = 9, + RESET_CONTROL_OPTIONAL_EXCLUSIVE = 6, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = 14, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = 2, + RESET_CONTROL_OPTIONAL_SHARED = 3, + RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = 11, +}; + +struct trace_event_raw_mdio_access { + struct trace_entry ent; + char busid[61]; + char read; + u8 addr; + u16 val; + unsigned int regnum; + char __data[0]; +}; + +struct trace_event_data_offsets_mdio_access {}; + +typedef void (*btf_trace_mdio_access)(void *, struct mii_bus *, char, u8, unsigned int, u16, int); + +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +struct platform_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct pdev_archdata {}; + +struct mfd_cell; + +struct platform_device { + const char *name; + int id; + bool id_auto; + struct device dev; + u64 platform_dma_mask; + struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + const struct platform_device_id *id_entry; + const char *driver_override; + struct mfd_cell *mfd_cell; + struct pdev_archdata archdata; +}; + +enum usb_otg_state { + OTG_STATE_UNDEFINED = 0, + OTG_STATE_B_IDLE = 1, + OTG_STATE_B_SRP_INIT = 2, + OTG_STATE_B_PERIPHERAL = 3, + OTG_STATE_B_WAIT_ACON = 4, + OTG_STATE_B_HOST = 5, + OTG_STATE_A_IDLE = 6, + OTG_STATE_A_WAIT_VRISE = 7, + OTG_STATE_A_WAIT_BCON = 8, + OTG_STATE_A_HOST = 9, + OTG_STATE_A_SUSPEND = 10, + OTG_STATE_A_PERIPHERAL = 11, + OTG_STATE_A_WAIT_VFALL = 12, + OTG_STATE_A_VBUS_ERR = 13, +}; + +struct usb_otg_caps { + u16 otg_rev; + bool hnp_support; + bool srp_support; + bool adp_support; +}; + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN = 0, + USB_DR_MODE_HOST = 1, + USB_DR_MODE_PERIPHERAL = 2, + USB_DR_MODE_OTG = 3, +}; + +struct usb_hub_descriptor { + __u8 bDescLength; + __u8 bDescriptorType; + __u8 bNbrPorts; + __le16 wHubCharacteristics; + __u8 bPwrOn2PwrGood; + __u8 bHubContrCurrent; + union { + struct { + __u8 DeviceRemovable[4]; + __u8 PortPwrCtrlMask[4]; + } hs; + struct { + __u8 bHubHdrDecLat; + __le16 wHubDelay; + __le16 DeviceRemovable; + } __attribute__((packed)) ss; + } u; +} __attribute__((packed)); + +enum hwmon_sensor_types { + hwmon_chip = 0, + hwmon_temp = 1, + hwmon_in = 2, + hwmon_curr = 3, + hwmon_power = 4, + hwmon_energy = 5, + hwmon_humidity = 6, + hwmon_fan = 7, + hwmon_pwm = 8, + hwmon_intrusion = 9, + hwmon_max = 10, +}; + +enum hwmon_chip_attributes { + hwmon_chip_temp_reset_history = 0, + hwmon_chip_in_reset_history = 1, + hwmon_chip_curr_reset_history = 2, + hwmon_chip_power_reset_history = 3, + hwmon_chip_register_tz = 4, + hwmon_chip_update_interval = 5, + hwmon_chip_alarms = 6, + hwmon_chip_samples = 7, + hwmon_chip_curr_samples = 8, + hwmon_chip_in_samples = 9, + hwmon_chip_power_samples = 10, + hwmon_chip_temp_samples = 11, + hwmon_chip_beep_enable = 12, + hwmon_chip_pec = 13, +}; + +enum hwmon_temp_attributes { + hwmon_temp_enable = 0, + hwmon_temp_input = 1, + hwmon_temp_type = 2, + hwmon_temp_lcrit = 3, + hwmon_temp_lcrit_hyst = 4, + hwmon_temp_min = 5, + hwmon_temp_min_hyst = 6, + hwmon_temp_max = 7, + hwmon_temp_max_hyst = 8, + hwmon_temp_crit = 9, + hwmon_temp_crit_hyst = 10, + hwmon_temp_emergency = 11, + hwmon_temp_emergency_hyst = 12, + hwmon_temp_alarm = 13, + hwmon_temp_lcrit_alarm = 14, + hwmon_temp_min_alarm = 15, + hwmon_temp_max_alarm = 16, + hwmon_temp_crit_alarm = 17, + hwmon_temp_emergency_alarm = 18, + hwmon_temp_fault = 19, + hwmon_temp_offset = 20, + hwmon_temp_label = 21, + hwmon_temp_lowest = 22, + hwmon_temp_highest = 23, + hwmon_temp_reset_history = 24, + hwmon_temp_rated_min = 25, + hwmon_temp_rated_max = 26, + hwmon_temp_beep = 27, +}; + +enum hwmon_in_attributes { + hwmon_in_enable = 0, + hwmon_in_input = 1, + hwmon_in_min = 2, + hwmon_in_max = 3, + hwmon_in_lcrit = 4, + hwmon_in_crit = 5, + hwmon_in_average = 6, + hwmon_in_lowest = 7, + hwmon_in_highest = 8, + hwmon_in_reset_history = 9, + hwmon_in_label = 10, + hwmon_in_alarm = 11, + hwmon_in_min_alarm = 12, + hwmon_in_max_alarm = 13, + hwmon_in_lcrit_alarm = 14, + hwmon_in_crit_alarm = 15, + hwmon_in_rated_min = 16, + hwmon_in_rated_max = 17, + hwmon_in_beep = 18, + hwmon_in_fault = 19, +}; + +enum hwmon_curr_attributes { + hwmon_curr_enable = 0, + hwmon_curr_input = 1, + hwmon_curr_min = 2, + hwmon_curr_max = 3, + hwmon_curr_lcrit = 4, + hwmon_curr_crit = 5, + hwmon_curr_average = 6, + hwmon_curr_lowest = 7, + hwmon_curr_highest = 8, + hwmon_curr_reset_history = 9, + hwmon_curr_label = 10, + hwmon_curr_alarm = 11, + hwmon_curr_min_alarm = 12, + hwmon_curr_max_alarm = 13, + hwmon_curr_lcrit_alarm = 14, + hwmon_curr_crit_alarm = 15, + hwmon_curr_rated_min = 16, + hwmon_curr_rated_max = 17, + hwmon_curr_beep = 18, +}; + +enum hwmon_power_attributes { + hwmon_power_enable = 0, + hwmon_power_average = 1, + hwmon_power_average_interval = 2, + hwmon_power_average_interval_max = 3, + hwmon_power_average_interval_min = 4, + hwmon_power_average_highest = 5, + hwmon_power_average_lowest = 6, + hwmon_power_average_max = 7, + hwmon_power_average_min = 8, + hwmon_power_input = 9, + hwmon_power_input_highest = 10, + hwmon_power_input_lowest = 11, + hwmon_power_reset_history = 12, + hwmon_power_accuracy = 13, + hwmon_power_cap = 14, + hwmon_power_cap_hyst = 15, + hwmon_power_cap_max = 16, + hwmon_power_cap_min = 17, + hwmon_power_min = 18, + hwmon_power_max = 19, + hwmon_power_crit = 20, + hwmon_power_lcrit = 21, + hwmon_power_label = 22, + hwmon_power_alarm = 23, + hwmon_power_cap_alarm = 24, + hwmon_power_min_alarm = 25, + hwmon_power_max_alarm = 26, + hwmon_power_lcrit_alarm = 27, + hwmon_power_crit_alarm = 28, + hwmon_power_rated_min = 29, + hwmon_power_rated_max = 30, +}; + +enum hwmon_energy_attributes { + hwmon_energy_enable = 0, + hwmon_energy_input = 1, + hwmon_energy_label = 2, +}; + +enum hwmon_humidity_attributes { + hwmon_humidity_enable = 0, + hwmon_humidity_input = 1, + hwmon_humidity_label = 2, + hwmon_humidity_min = 3, + hwmon_humidity_min_hyst = 4, + hwmon_humidity_max = 5, + hwmon_humidity_max_hyst = 6, + hwmon_humidity_alarm = 7, + hwmon_humidity_fault = 8, + hwmon_humidity_rated_min = 9, + hwmon_humidity_rated_max = 10, + hwmon_humidity_min_alarm = 11, + hwmon_humidity_max_alarm = 12, +}; + +enum hwmon_fan_attributes { + hwmon_fan_enable = 0, + hwmon_fan_input = 1, + hwmon_fan_label = 2, + hwmon_fan_min = 3, + hwmon_fan_max = 4, + hwmon_fan_div = 5, + hwmon_fan_pulses = 6, + hwmon_fan_target = 7, + hwmon_fan_alarm = 8, + hwmon_fan_min_alarm = 9, + hwmon_fan_max_alarm = 10, + hwmon_fan_fault = 11, + hwmon_fan_beep = 12, +}; + +enum hwmon_pwm_attributes { + hwmon_pwm_input = 0, + hwmon_pwm_enable = 1, + hwmon_pwm_mode = 2, + hwmon_pwm_freq = 3, + hwmon_pwm_auto_channels_temp = 4, +}; + +enum hwmon_intrusion_attributes { + hwmon_intrusion_alarm = 0, + hwmon_intrusion_beep = 1, +}; + +struct hwmon_ops { + umode_t visible; + umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); + int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *); + int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); + int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int); +}; + +struct hwmon_channel_info { + enum hwmon_sensor_types type; + const u32 *config; +}; + +struct hwmon_chip_info { + const struct hwmon_ops *ops; + const struct hwmon_channel_info * const *info; +}; + +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +struct rt_mutex { + struct rt_mutex_base rtmutex; +}; + +struct i2c_msg { + __u16 addr; + __u16 flags; + __u16 len; + __u8 *buf; +}; + +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[34]; +}; + +struct i2c_adapter; + +struct i2c_client { + short unsigned int flags; + short unsigned int addr; + char name[20]; + struct i2c_adapter *adapter; + struct device dev; + int init_irq; + int irq; + struct list_head detected; + void *devres_group_id; +}; + +struct i2c_algorithm; + +struct i2c_lock_operations; + +struct i2c_bus_recovery_info; + +struct i2c_adapter_quirks; + +struct i2c_adapter { + struct module *owner; + unsigned int class; + const struct i2c_algorithm *algo; + void *algo_data; + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + int timeout; + int retries; + struct device dev; + long unsigned int locked_flags; + int nr; + char name[48]; + struct completion dev_released; + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; + struct dentry *debugfs; + long unsigned int addrs_in_instantiation[2]; +}; + +struct i2c_algorithm { + union { + int (*xfer)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); + }; + union { + int (*xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + }; + int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + u32 (*functionality)(struct i2c_adapter *); +}; + +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int); + int (*trylock_bus)(struct i2c_adapter *, unsigned int); + void (*unlock_bus)(struct i2c_adapter *, unsigned int); +}; + +struct pinctrl; + +struct pinctrl_state; + +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int); + int (*get_sda)(struct i2c_adapter *); + void (*set_sda)(struct i2c_adapter *, int); + int (*get_bus_free)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; +}; + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; +}; + +enum thermal_notify_event { + THERMAL_EVENT_UNSPECIFIED = 0, + THERMAL_EVENT_TEMP_SAMPLE = 1, + THERMAL_TRIP_VIOLATED = 2, + THERMAL_TRIP_CHANGED = 3, + THERMAL_DEVICE_DOWN = 4, + THERMAL_DEVICE_UP = 5, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, + THERMAL_TABLE_CHANGED = 7, + THERMAL_EVENT_KEEP_ALIVE = 8, + THERMAL_TZ_BIND_CDEV = 9, + THERMAL_TZ_UNBIND_CDEV = 10, + THERMAL_INSTANCE_WEIGHT_CHANGED = 11, + THERMAL_TZ_RESUME = 12, + THERMAL_TZ_ADD_THRESHOLD = 13, + THERMAL_TZ_DEL_THRESHOLD = 14, + THERMAL_TZ_FLUSH_THRESHOLDS = 15, +}; + +struct trace_event_raw_hwmon_attr_class { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + long int val; + char __data[0]; +}; + +struct trace_event_raw_hwmon_attr_show_string { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + u32 __data_loc_label; + char __data[0]; +}; + +struct trace_event_data_offsets_hwmon_attr_class { + u32 attr_name; + const void *attr_name_ptr_; +}; + +struct trace_event_data_offsets_hwmon_attr_show_string { + u32 attr_name; + const void *attr_name_ptr_; + u32 label; + const void *label_ptr_; +}; + +typedef void (*btf_trace_hwmon_attr_show)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_store)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_show_string)(void *, int, const char *, const char *); + +struct hwmon_device { + const char *name; + const char *label; + struct device dev; + const struct hwmon_chip_info *chip; + struct list_head tzdata; + struct attribute_group group; + const struct attribute_group **groups; +}; + +struct hwmon_device_attribute { + struct device_attribute dev_attr; + const struct hwmon_ops *ops; + enum hwmon_sensor_types type; + u32 attr; + int index; + char name[32]; +}; + +struct hwmon_thermal_data { + struct list_head node; + struct device *dev; + int index; + struct thermal_zone_device *tzd; +}; + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +enum rtnetlink_groups { + RTNLGRP_NONE = 0, + RTNLGRP_LINK = 1, + RTNLGRP_NOTIFY = 2, + RTNLGRP_NEIGH = 3, + RTNLGRP_TC = 4, + RTNLGRP_IPV4_IFADDR = 5, + RTNLGRP_IPV4_MROUTE = 6, + RTNLGRP_IPV4_ROUTE = 7, + RTNLGRP_IPV4_RULE = 8, + RTNLGRP_IPV6_IFADDR = 9, + RTNLGRP_IPV6_MROUTE = 10, + RTNLGRP_IPV6_ROUTE = 11, + RTNLGRP_IPV6_IFINFO = 12, + RTNLGRP_DECnet_IFADDR = 13, + RTNLGRP_NOP2 = 14, + RTNLGRP_DECnet_ROUTE = 15, + RTNLGRP_DECnet_RULE = 16, + RTNLGRP_NOP4 = 17, + RTNLGRP_IPV6_PREFIX = 18, + RTNLGRP_IPV6_RULE = 19, + RTNLGRP_ND_USEROPT = 20, + RTNLGRP_PHONET_IFADDR = 21, + RTNLGRP_PHONET_ROUTE = 22, + RTNLGRP_DCB = 23, + RTNLGRP_IPV4_NETCONF = 24, + RTNLGRP_IPV6_NETCONF = 25, + RTNLGRP_MDB = 26, + RTNLGRP_MPLS_ROUTE = 27, + RTNLGRP_NSID = 28, + RTNLGRP_MPLS_NETCONF = 29, + RTNLGRP_IPV4_MROUTE_R = 30, + RTNLGRP_IPV6_MROUTE_R = 31, + RTNLGRP_NEXTHOP = 32, + RTNLGRP_BRVLAN = 33, + RTNLGRP_MCTP_IFADDR = 34, + RTNLGRP_TUNNEL = 35, + RTNLGRP_STATS = 36, + __RTNLGRP_MAX = 37, +}; + +enum { + NETNSA_NONE = 0, + NETNSA_NSID = 1, + NETNSA_PID = 2, + NETNSA_FD = 3, + NETNSA_TARGET_NSID = 4, + NETNSA_CURRENT_NSID = 5, + __NETNSA_MAX = 6, +}; + +struct pcpu_gen_cookie { + local_t nesting; + u64 last; +}; + +struct gen_cookie { + struct pcpu_gen_cookie *local; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic64_t forward_last; + atomic64_t reverse_last; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); + +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, + RTNL_FLAG_BULK_DEL_SUPPORTED = 2, + RTNL_FLAG_DUMP_UNLOCKED = 4, + RTNL_FLAG_DUMP_SPLIT_NLM_DONE = 8, +}; + +struct rtnl_msg_handler { + struct module *owner; + int protocol; + int msgtype; + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + int flags; +}; + +struct net_fill_args { + u32 portid; + u32 seq; + int flags; + int cmd; + int nsid; + bool add_ref; + int ref_nsid; +}; + +struct rtnl_net_dump_cb { + struct net *tgt_net; + struct net *ref_net; + struct sk_buff *skb; + struct net_fill_args fillargs; + int idx; + int s_idx; +}; + +typedef void (*smp_call_func_t)(void *); + +struct __call_single_data { + struct __call_single_node node; + smp_call_func_t func; + void *info; +}; + +typedef struct __call_single_data call_single_data_t; + +struct netdev_xmit { + u16 recursion; + u8 more; + u8 skip_txqueue; +}; + +struct netdev_hw_addr { + struct list_head list; + struct rb_node node; + unsigned char addr[32]; + unsigned char type; + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +struct sd_flow_limit; + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + local_lock_t process_queue_bh_lock; + unsigned int processed; + unsigned int time_squeeze; + struct softnet_data *rps_ipi_list; + unsigned int received_rps; + bool in_net_rx_action; + bool in_napi_threaded_poll; + struct sd_flow_limit *flow_limit; + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + struct netdev_xmit xmit; + long: 64; + long: 64; + long: 64; + unsigned int input_queue_head; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + call_single_data_t csd; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; + long: 64; + atomic_t dropped; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t defer_lock; + int defer_count; + int defer_ipi_scheduled; + struct sk_buff *defer_list; + long: 64; + call_single_data_t defer_csd; +}; + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[128]; + u8 buckets[0]; +}; + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id: 12; + u16 vlan_dei: 1; + u16 vlan_priority: 3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; + __be16 vlan_eth_type; + u16 padding; +}; + +struct flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +struct flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +struct flow_dissector_key_eth_addrs { + unsigned char dst[6]; + unsigned char src[6]; +}; + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + +struct flow_dissector { + long long unsigned int used_keys; + short unsigned int offset[33]; +}; + +struct xsk_queue; + +struct xdp_umem; + +struct xdp_buff_xsk; + +struct xdp_desc; + +struct xsk_buff_pool { + struct device *dev; + struct net_device *netdev; + struct list_head xsk_tx_list; + spinlock_t xsk_tx_list_lock; + refcount_t users; + struct xdp_umem *umem; + struct work_struct work; + struct list_head free_list; + struct list_head xskb_list; + u32 heads_cnt; + u16 queue_id; + long: 64; + struct xsk_queue *fq; + struct xsk_queue *cq; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + struct xdp_desc *tx_descs; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 chunk_shift; + u32 frame_len; + u32 xdp_zc_max_segs; + u8 tx_metadata_len; + u8 cached_need_wakeup; + bool uses_need_wakeup; + bool unaligned; + bool tx_sw_csum; + void *addrs; + spinlock_t cq_lock; + struct xdp_buff_xsk *free_heads[0]; + long: 64; + long: 64; +}; + +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC = 0, + ETHTOOL_TUNABLE_U8 = 1, + ETHTOOL_TUNABLE_U16 = 2, + ETHTOOL_TUNABLE_U32 = 3, + ETHTOOL_TUNABLE_U64 = 4, + ETHTOOL_TUNABLE_STRING = 5, + ETHTOOL_TUNABLE_S8 = 6, + ETHTOOL_TUNABLE_S16 = 7, + ETHTOOL_TUNABLE_S32 = 8, + ETHTOOL_TUNABLE_S64 = 9, +}; + +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS = 1, + ETH_SS_PRIV_FLAGS = 2, + ETH_SS_NTUPLE_FILTERS = 3, + ETH_SS_FEATURES = 4, + ETH_SS_RSS_HASH_FUNCS = 5, + ETH_SS_TUNABLES = 6, + ETH_SS_PHY_STATS = 7, + ETH_SS_PHY_TUNABLES = 8, + ETH_SS_LINK_MODES = 9, + ETH_SS_MSG_CLASSES = 10, + ETH_SS_WOL_MODES = 11, + ETH_SS_SOF_TIMESTAMPING = 12, + ETH_SS_TS_TX_TYPES = 13, + ETH_SS_TS_RX_FILTERS = 14, + ETH_SS_UDP_TUNNEL_TYPES = 15, + ETH_SS_STATS_STD = 16, + ETH_SS_STATS_ETH_PHY = 17, + ETH_SS_STATS_ETH_MAC = 18, + ETH_SS_STATS_ETH_CTRL = 19, + ETH_SS_STATS_RMON = 20, + ETH_SS_COUNT = 21, +}; + +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; + +enum ethtool_flags { + ETH_FLAG_TXVLAN = 128, + ETH_FLAG_RXVLAN = 256, + ETH_FLAG_LRO = 32768, + ETH_FLAG_NTUPLE = 134217728, + ETH_FLAG_RXHASH = 268435456, +}; + +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 input_xfrm; + __u8 rsvd8[2]; + __u32 rsvd32; + __u32 rss_config[0]; +}; + +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT = 0, + ETHTOOL_F_WISH__BIT = 1, + ETHTOOL_F_COMPAT__BIT = 2, +}; + +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[128]; + char data[0]; +}; + +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT = 0, + ETHTOOL_FEC_AUTO_BIT = 1, + ETHTOOL_FEC_OFF_BIT = 2, + ETHTOOL_FEC_RS_BIT = 3, + ETHTOOL_FEC_BASER_BIT = 4, + ETHTOOL_FEC_LLRS_BIT = 5, +}; + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = 1, + SOF_TIMESTAMPING_TX_SOFTWARE = 2, + SOF_TIMESTAMPING_RX_HARDWARE = 4, + SOF_TIMESTAMPING_RX_SOFTWARE = 8, + SOF_TIMESTAMPING_SOFTWARE = 16, + SOF_TIMESTAMPING_SYS_HARDWARE = 32, + SOF_TIMESTAMPING_RAW_HARDWARE = 64, + SOF_TIMESTAMPING_OPT_ID = 128, + SOF_TIMESTAMPING_TX_SCHED = 256, + SOF_TIMESTAMPING_TX_ACK = 512, + SOF_TIMESTAMPING_OPT_CMSG = 1024, + SOF_TIMESTAMPING_OPT_TSONLY = 2048, + SOF_TIMESTAMPING_OPT_STATS = 4096, + SOF_TIMESTAMPING_OPT_PKTINFO = 8192, + SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, + SOF_TIMESTAMPING_BIND_PHC = 32768, + SOF_TIMESTAMPING_OPT_ID_TCP = 65536, + SOF_TIMESTAMPING_OPT_RX_FILTER = 131072, + SOF_TIMESTAMPING_LAST = 131072, + SOF_TIMESTAMPING_MASK = 262143, +}; + +struct flow_rule; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + long unsigned int priv[0]; +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP = 1, + FLOW_ACTION_TRAP = 2, + FLOW_ACTION_GOTO = 3, + FLOW_ACTION_REDIRECT = 4, + FLOW_ACTION_MIRRED = 5, + FLOW_ACTION_REDIRECT_INGRESS = 6, + FLOW_ACTION_MIRRED_INGRESS = 7, + FLOW_ACTION_VLAN_PUSH = 8, + FLOW_ACTION_VLAN_POP = 9, + FLOW_ACTION_VLAN_MANGLE = 10, + FLOW_ACTION_TUNNEL_ENCAP = 11, + FLOW_ACTION_TUNNEL_DECAP = 12, + FLOW_ACTION_MANGLE = 13, + FLOW_ACTION_ADD = 14, + FLOW_ACTION_CSUM = 15, + FLOW_ACTION_MARK = 16, + FLOW_ACTION_PTYPE = 17, + FLOW_ACTION_PRIORITY = 18, + FLOW_ACTION_RX_QUEUE_MAPPING = 19, + FLOW_ACTION_WAKE = 20, + FLOW_ACTION_QUEUE = 21, + FLOW_ACTION_SAMPLE = 22, + FLOW_ACTION_POLICE = 23, + FLOW_ACTION_CT = 24, + FLOW_ACTION_CT_METADATA = 25, + FLOW_ACTION_MPLS_PUSH = 26, + FLOW_ACTION_MPLS_POP = 27, + FLOW_ACTION_MPLS_MANGLE = 28, + FLOW_ACTION_GATE = 29, + FLOW_ACTION_PPPOE_PUSH = 30, + FLOW_ACTION_JUMP = 31, + FLOW_ACTION_PIPE = 32, + FLOW_ACTION_VLAN_PUSH_ETH = 33, + FLOW_ACTION_VLAN_POP_ETH = 34, + FLOW_ACTION_CONTINUE = 35, + NUM_FLOW_ACTIONS = 36, +}; + +typedef void (*action_destr)(void *); + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, + FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, + FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, + FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, + FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, +}; + +struct psample_group; + +struct action_gate_entry; + +struct ip_tunnel_info; + +struct nf_flowtable; + +struct flow_action_cookie; + +struct flow_action_entry { + enum flow_action_id id; + u32 hw_index; + long unsigned int cookie; + u64 miss_cookie; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + unsigned char dst[6]; + unsigned char src[6]; + } vlan_push_eth; + struct { + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u16 rx_queue; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + u32 burst; + u64 rate_bytes_ps; + u64 peakrate_bytes_ps; + u32 avrate; + u16 overhead; + u64 burst_pkt; + u64 rate_pkt_ps; + u32 mtu; + struct { + enum flow_action_id act_id; + u32 extval; + } exceed; + struct { + enum flow_action_id act_id; + u32 extval; + } notexceed; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + long unsigned int cookie; + u32 mark; + u32 labels[4]; + bool orig_dir; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + s32 prio; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + struct { + u16 sid; + } pppoe; + }; + struct flow_action_cookie *user_cookie; +}; + +struct flow_action { + unsigned int num_entries; + struct flow_action_entry entries[0]; +}; + +struct flow_rule { + struct flow_match match; + struct flow_action action; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *); + int (*get_strings)(struct phy_device *, u8 *); + int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *, struct netlink_ext_ack *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); + int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); +}; + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[0]; +}; + +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +struct xdp_umem { + void *addrs; + u64 size; + u32 headroom; + u32 chunk_size; + u32 chunks; + u32 npgs; + struct user_struct *user; + refcount_t users; + u8 flags; + u8 tx_metadata_len; + bool zc; + struct page **pgs; + int id; + struct list_head xsk_dma_list; + struct work_struct work; +}; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + u8 cb[24]; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + struct list_head list_node; +}; + +struct ethtool_devlink_compat { + struct devlink *devlink; + union { + struct ethtool_flash efl; + struct ethtool_drvinfo info; + }; +}; + +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[4]; + __u32 advertising[4]; + __u32 lp_advertising[4]; + } link_modes; +}; + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; +}; + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +struct flow_dissector_key_tcp { + __be16 flags; +}; + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; + u8 l2_miss; +}; + +enum nft_counter_attributes { + NFTA_COUNTER_UNSPEC = 0, + NFTA_COUNTER_BYTES = 1, + NFTA_COUNTER_PACKETS = 2, + NFTA_COUNTER_PAD = 3, + __NFTA_COUNTER_MAX = 4, +}; + +struct nf_flowtable_type; + +struct nf_flowtable { + unsigned int flags; + int priority; + struct rhashtable rhashtable; + struct list_head list; + const struct nf_flowtable_type *type; + struct delayed_work gc_work; + struct flow_block flow_block; + struct rw_semaphore flow_block_lock; + possible_net_t net; +}; + +enum flow_block_command { + FLOW_BLOCK_BIND = 0, + FLOW_BLOCK_UNBIND = 1, +}; + +struct nf_flow_key { + struct flow_dissector_key_meta meta; + struct flow_dissector_key_control control; + struct flow_dissector_key_control enc_control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_keyid enc_key_id; + union { + struct flow_dissector_key_ipv4_addrs enc_ipv4; + struct flow_dissector_key_ipv6_addrs enc_ipv6; + }; + struct flow_dissector_key_tcp tcp; + struct flow_dissector_key_ports tp; +}; + +struct nf_flow_match { + struct flow_dissector dissector; + struct nf_flow_key key; + struct nf_flow_key mask; +}; + +struct nf_flow_rule { + struct nf_flow_match match; + struct flow_rule *rule; +}; + +enum flow_offload_tuple_dir { + FLOW_OFFLOAD_DIR_ORIGINAL = 0, + FLOW_OFFLOAD_DIR_REPLY = 1, +}; + +struct flow_offload; + +struct nf_flowtable_type { + struct list_head list; + int family; + int (*init)(struct nf_flowtable *); + bool (*gc)(const struct flow_offload *); + int (*setup)(struct nf_flowtable *, struct net_device *, enum flow_block_command); + int (*action)(struct net *, struct flow_offload *, enum flow_offload_tuple_dir, struct nf_flow_rule *); + void (*free)(struct nf_flowtable *); + void (*get)(struct nf_flowtable *); + void (*put)(struct nf_flowtable *); + nf_hookfn *hook; + struct module *owner; +}; + +struct flow_offload_tuple { + union { + struct in_addr src_v4; + struct in6_addr src_v6; + }; + union { + struct in_addr dst_v4; + struct in6_addr dst_v6; + }; + struct { + __be16 src_port; + __be16 dst_port; + }; + int iifidx; + u8 l3proto; + u8 l4proto; + struct { + u16 id; + __be16 proto; + } encap[2]; + struct {} __hash; + u8 dir: 2; + u8 xmit_type: 3; + u8 encap_num: 2; + char: 1; + u8 in_vlan_ingress: 2; + u16 mtu; + union { + struct { + struct dst_entry *dst_cache; + u32 dst_cookie; + }; + struct { + u32 ifidx; + u32 hw_ifidx; + u8 h_source[6]; + u8 h_dest[6]; + } out; + struct { + u32 iifidx; + } tc; + }; +}; + +struct flow_offload_tuple_rhash { + struct rhash_head node; + struct flow_offload_tuple tuple; +}; + +struct flow_offload { + struct flow_offload_tuple_rhash tuplehash[2]; + struct nf_conn *ct; + long unsigned int flags; + u16 type; + u32 timeout; + struct callback_head callback_head; +}; + +enum nft_offload_dep_type { + NFT_OFFLOAD_DEP_UNSPEC = 0, + NFT_OFFLOAD_DEP_NETWORK = 1, + NFT_OFFLOAD_DEP_TRANSPORT = 2, +}; + +struct nft_offload_reg { + u32 key; + u32 len; + u32 base_offset; + u32 offset; + u32 flags; + struct nft_data data; + struct nft_data mask; +}; + +struct nft_offload_ctx { + struct { + enum nft_offload_dep_type type; + __be16 l3num; + u8 protonum; + } dep; + unsigned int num_actions; + struct net *net; + struct nft_offload_reg regs[24]; +}; + +struct nft_flow_key { + struct flow_dissector_key_basic basic; + struct flow_dissector_key_control control; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_eth_addrs eth_addrs; + struct flow_dissector_key_meta meta; +}; + +struct nft_flow_match { + struct flow_dissector dissector; + struct nft_flow_key key; + struct nft_flow_key mask; +}; + +struct nft_flow_rule { + __be16 proto; + struct nft_flow_match match; + struct flow_rule *rule; +}; + +struct nft_object_hash_key { + const char *name; + const struct nft_table *table; +}; + +struct nft_object_ops; + +struct nft_object { + struct list_head list; + struct rhlist_head rhlhead; + struct nft_object_hash_key key; + u32 genmask: 2; + u32 use; + u64 handle; + u16 udlen; + u8 *udata; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + const struct nft_object_ops *ops; + unsigned char data[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct nft_object_type; + +struct nft_object_ops { + void (*eval)(struct nft_object *, struct nft_regs *, const struct nft_pktinfo *); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nlattr * const *, struct nft_object *); + void (*destroy)(const struct nft_ctx *, struct nft_object *); + int (*dump)(struct sk_buff *, struct nft_object *, bool); + void (*update)(struct nft_object *, struct nft_object *); + const struct nft_object_type *type; +}; + +struct nft_object_type { + const struct nft_object_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + const struct nft_object_ops *ops; + struct list_head list; + u32 type; + unsigned int maxattr; + u8 family; + struct module *owner; + const struct nla_policy *policy; +}; + +struct nft_counter { + u64_stats_t bytes; + u64_stats_t packets; +}; + +struct nft_counter_tot { + s64 bytes; + s64 packets; +}; + +struct nft_counter_percpu_priv { + struct nft_counter *counter; +}; + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long int tv_nsec; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; + +struct pipe_buffer; + +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait; + wait_queue_head_t wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + bool poll_usage; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; +}; + +struct dmabuf_cmsg { + __u64 frag_offset; + __u32 frag_size; + __u32 frag_token; + __u32 dmabuf_id; + __u32 flags; +}; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +struct dmabuf_genpool_chunk_owner; + +struct net_iov { + long unsigned int __unused_padding; + long unsigned int pp_magic; + struct page_pool *pp; + struct dmabuf_genpool_chunk_owner *owner; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; +}; + +struct net_devmem_dmabuf_binding; + +struct dmabuf_genpool_chunk_owner { + long unsigned int base_virtual; + dma_addr_t base_dma_addr; + struct net_iov *niovs; + size_t num_niovs; + struct net_devmem_dmabuf_binding *binding; +}; + +enum { + SKBFL_ZEROCOPY_ENABLE = 1, + SKBFL_SHARED_FRAG = 2, + SKBFL_PURE_ZEROCOPY = 4, + SKBFL_DONT_ORPHAN = 8, + SKBFL_MANAGED_FRAG_REFS = 16, +}; + +struct mmpin { + struct user_struct *user; + unsigned int num_pg; +}; + +struct ubuf_info_msgzc { + struct ubuf_info ubuf; + union { + struct { + long unsigned int desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy: 1; + u32 bytelen; + }; + }; + struct mmpin mmp; +}; + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = 1, + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, + BPF_SOCK_OPS_STATE_CB_FLAG = 4, + BPF_SOCK_OPS_RTT_CB_FLAG = 8, + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, + BPF_SOCK_OPS_ALL_CB_FLAGS = 127, +}; + +enum { + BPF_SOCK_OPS_VOID = 0, + BPF_SOCK_OPS_TIMEOUT_INIT = 1, + BPF_SOCK_OPS_RWND_INIT = 2, + BPF_SOCK_OPS_TCP_CONNECT_CB = 3, + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, + BPF_SOCK_OPS_NEEDS_ECN = 6, + BPF_SOCK_OPS_BASE_RTT = 7, + BPF_SOCK_OPS_RTO_CB = 8, + BPF_SOCK_OPS_RETRANS_CB = 9, + BPF_SOCK_OPS_STATE_CB = 10, + BPF_SOCK_OPS_TCP_LISTEN_CB = 11, + BPF_SOCK_OPS_RTT_CB = 12, + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, +}; + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT = 2, + BPF_TCP_SYN_RECV = 3, + BPF_TCP_FIN_WAIT1 = 4, + BPF_TCP_FIN_WAIT2 = 5, + BPF_TCP_TIME_WAIT = 6, + BPF_TCP_CLOSE = 7, + BPF_TCP_CLOSE_WAIT = 8, + BPF_TCP_LAST_ACK = 9, + BPF_TCP_LISTEN = 10, + BPF_TCP_CLOSING = 11, + BPF_TCP_NEW_SYN_RECV = 12, + BPF_TCP_BOUND_INACTIVE = 13, + BPF_TCP_MAX_STATES = 14, +}; + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; + +struct tcp_bbr_info { + __u32 bbr_bw_lo; + __u32 bbr_bw_hi; + __u32 bbr_min_rtt; + __u32 bbr_pacing_gain; + __u32 bbr_cwnd_gain; +}; + +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; + struct tcp_bbr_info bbr; +}; + +struct pipe_buf_operations; + +struct pipe_buffer { + struct page *page; + unsigned int offset; + unsigned int len; + const struct pipe_buf_operations *ops; + unsigned int flags; + long unsigned int private; +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255, +}; + +enum { + XFRM_MSG_BASE = 16, + XFRM_MSG_NEWSA = 16, + XFRM_MSG_DELSA = 17, + XFRM_MSG_GETSA = 18, + XFRM_MSG_NEWPOLICY = 19, + XFRM_MSG_DELPOLICY = 20, + XFRM_MSG_GETPOLICY = 21, + XFRM_MSG_ALLOCSPI = 22, + XFRM_MSG_ACQUIRE = 23, + XFRM_MSG_EXPIRE = 24, + XFRM_MSG_UPDPOLICY = 25, + XFRM_MSG_UPDSA = 26, + XFRM_MSG_POLEXPIRE = 27, + XFRM_MSG_FLUSHSA = 28, + XFRM_MSG_FLUSHPOLICY = 29, + XFRM_MSG_NEWAE = 30, + XFRM_MSG_GETAE = 31, + XFRM_MSG_REPORT = 32, + XFRM_MSG_MIGRATE = 33, + XFRM_MSG_NEWSADINFO = 34, + XFRM_MSG_GETSADINFO = 35, + XFRM_MSG_NEWSPDINFO = 36, + XFRM_MSG_GETSPDINFO = 37, + XFRM_MSG_MAPPING = 38, + XFRM_MSG_SETDEFAULT = 39, + XFRM_MSG_GETDEFAULT = 40, + __XFRM_MSG_MAX = 41, +}; + +enum xfrm_attr_type_t { + XFRMA_UNSPEC = 0, + XFRMA_ALG_AUTH = 1, + XFRMA_ALG_CRYPT = 2, + XFRMA_ALG_COMP = 3, + XFRMA_ENCAP = 4, + XFRMA_TMPL = 5, + XFRMA_SA = 6, + XFRMA_POLICY = 7, + XFRMA_SEC_CTX = 8, + XFRMA_LTIME_VAL = 9, + XFRMA_REPLAY_VAL = 10, + XFRMA_REPLAY_THRESH = 11, + XFRMA_ETIMER_THRESH = 12, + XFRMA_SRCADDR = 13, + XFRMA_COADDR = 14, + XFRMA_LASTUSED = 15, + XFRMA_POLICY_TYPE = 16, + XFRMA_MIGRATE = 17, + XFRMA_ALG_AEAD = 18, + XFRMA_KMADDRESS = 19, + XFRMA_ALG_AUTH_TRUNC = 20, + XFRMA_MARK = 21, + XFRMA_TFCPAD = 22, + XFRMA_REPLAY_ESN_VAL = 23, + XFRMA_SA_EXTRA_FLAGS = 24, + XFRMA_PROTO = 25, + XFRMA_ADDRESS_FILTER = 26, + XFRMA_PAD = 27, + XFRMA_OFFLOAD_DEV = 28, + XFRMA_SET_MARK = 29, + XFRMA_SET_MARK_MASK = 30, + XFRMA_IF_ID = 31, + XFRMA_MTIMER_THRESH = 32, + XFRMA_SA_DIR = 33, + XFRMA_NAT_KEEPALIVE_INTERVAL = 34, + XFRMA_SA_PCPU = 35, + __XFRMA_MAX = 36, +}; + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u32 tsflags; + u32 ts_opt_id; +}; + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16, + ICSK_ACK_NOMEM = 32, +}; + +struct inet_timewait_sock { + struct sock_common __tw_common; + __u32 tw_mark; + unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __be16 tw_sport; + unsigned int tw_transparent: 1; + unsigned int tw_flowlabel: 20; + unsigned int tw_usec_ts: 1; + unsigned int tw_pad: 2; + unsigned int tw_tos: 8; + u32 tw_txhash; + u32 tw_priority; + struct timer_list tw_timer; + struct inet_bind_bucket *tw_tb; + struct inet_bind2_bucket *tw_tb2; +}; + +enum { + TCP_FLAG_CWR = 8388608, + TCP_FLAG_ECE = 4194304, + TCP_FLAG_URG = 2097152, + TCP_FLAG_ACK = 1048576, + TCP_FLAG_PSH = 524288, + TCP_FLAG_RST = 262144, + TCP_FLAG_SYN = 131072, + TCP_FLAG_FIN = 65536, + TCP_RESERVED_BITS = 251658240, + TCP_DATA_OFFSET = 4026531840, +}; + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +struct tcp_repair_window { + __u32 snd_wl1; + __u32 snd_wnd; + __u32 max_window; + __u32 rcv_wnd; + __u32 rcv_wup; +}; + +enum { + TCP_NO_QUEUE = 0, + TCP_RECV_QUEUE = 1, + TCP_SEND_QUEUE = 2, + TCP_QUEUES_NR = 3, +}; + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4, +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale: 4; + __u8 tcpi_rcv_wscale: 4; + __u8 tcpi_delivery_rate_app_limited: 1; + __u8 tcpi_fastopen_client_fail: 2; + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + __u32 tcpi_total_retrans; + __u64 tcpi_pacing_rate; + __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; + __u64 tcpi_bytes_received; + __u32 tcpi_segs_out; + __u32 tcpi_segs_in; + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; + __u32 tcpi_data_segs_out; + __u64 tcpi_delivery_rate; + __u64 tcpi_busy_time; + __u64 tcpi_rwnd_limited; + __u64 tcpi_sndbuf_limited; + __u32 tcpi_delivered; + __u32 tcpi_delivered_ce; + __u64 tcpi_bytes_sent; + __u64 tcpi_bytes_retrans; + __u32 tcpi_dsack_dups; + __u32 tcpi_reord_seen; + __u32 tcpi_rcv_ooopack; + __u32 tcpi_snd_wnd; + __u32 tcpi_rcv_wnd; + __u32 tcpi_rehash; + __u16 tcpi_total_rto; + __u16 tcpi_total_rto_recoveries; + __u32 tcpi_total_rto_time; +}; + +enum { + TCP_NLA_PAD = 0, + TCP_NLA_BUSY = 1, + TCP_NLA_RWND_LIMITED = 2, + TCP_NLA_SNDBUF_LIMITED = 3, + TCP_NLA_DATA_SEGS_OUT = 4, + TCP_NLA_TOTAL_RETRANS = 5, + TCP_NLA_PACING_RATE = 6, + TCP_NLA_DELIVERY_RATE = 7, + TCP_NLA_SND_CWND = 8, + TCP_NLA_REORDERING = 9, + TCP_NLA_MIN_RTT = 10, + TCP_NLA_RECUR_RETRANS = 11, + TCP_NLA_DELIVERY_RATE_APP_LMT = 12, + TCP_NLA_SNDQ_SIZE = 13, + TCP_NLA_CA_STATE = 14, + TCP_NLA_SND_SSTHRESH = 15, + TCP_NLA_DELIVERED = 16, + TCP_NLA_DELIVERED_CE = 17, + TCP_NLA_BYTES_SENT = 18, + TCP_NLA_BYTES_RETRANS = 19, + TCP_NLA_DSACK_DUPS = 20, + TCP_NLA_REORD_SEEN = 21, + TCP_NLA_SRTT = 22, + TCP_NLA_TIMEOUT_REHASH = 23, + TCP_NLA_BYTES_NOTSENT = 24, + TCP_NLA_EDT = 25, + TCP_NLA_TTL = 26, + TCP_NLA_REHASH = 27, +}; + +struct tcp_zerocopy_receive { + __u64 address; + __u32 length; + __u32 recv_skip_hint; + __u32 inq; + __s32 err; + __u64 copybuf_address; + __s32 copybuf_len; + __u32 flags; + __u64 msg_control; + __u64 msg_controllen; + __u32 msg_flags; + __u32 reserved; +}; + +struct inet_skb_parm { + int iif; + struct ip_options opt; + u16 flags; + u16 frag_max_size; +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; + u64 temp; +}; + +union tcp_ao_addr { + struct in_addr a4; + struct in6_addr a6; +}; + +struct tcp_ao_hdr { + u8 kind; + u8 length; + u8 keyid; + u8 rnext_keyid; +}; + +enum tcp_skb_cb_sacked_flags { + TCPCB_SACKED_ACKED = 1, + TCPCB_SACKED_RETRANS = 2, + TCPCB_LOST = 4, + TCPCB_TAGBITS = 7, + TCPCB_REPAIRED = 16, + TCPCB_EVER_RETRANS = 128, + TCPCB_RETRANS = 146, +}; + +struct tcp_skb_cb { + __u32 seq; + __u32 end_seq; + union { + struct { + u16 tcp_gso_segs; + u16 tcp_gso_size; + }; + }; + __u8 tcp_flags; + __u8 sacked; + __u8 ip_dsfield; + __u8 txstamp_ack: 1; + __u8 eor: 1; + __u8 has_rxtstamp: 1; + __u8 unused: 5; + __u32 ack_seq; + union { + struct { + __u32 is_app_limited: 1; + __u32 delivered_ce: 20; + __u32 unused: 11; + __u32 delivered; + u64 first_tx_mstamp; + u64 delivered_mstamp; + } tx; + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + }; +}; + +struct tcp_md5sig_key { + struct hlist_node node; + u8 keylen; + u8 family; + u8 prefixlen; + u8 flags; + union tcp_ao_addr addr; + int l3index; + u8 key[80]; + struct callback_head rcu; +}; + +enum tcp_chrono { + TCP_CHRONO_UNSPEC = 0, + TCP_CHRONO_BUSY = 1, + TCP_CHRONO_RWND_LIMITED = 2, + TCP_CHRONO_SNDBUF_LIMITED = 3, + __TCP_CHRONO_MAX = 4, +}; + +struct rps_sock_flow_table { + u32 mask; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 ents[0]; +}; + +struct dma_buf; + +struct dma_buf_attachment; + +struct net_devmem_dmabuf_binding { + struct dma_buf *dmabuf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct net_device *dev; + struct gen_pool *chunk_pool; + refcount_t ref; + struct list_head list; + struct xarray bound_rxqs; + u32 id; +}; + +enum { + TCP_CMSG_INQ = 1, + TCP_CMSG_TS = 2, +}; + +struct tcp_splice_state { + struct pipe_inode_info *pipe; + size_t len; + unsigned int flags; +}; + +struct tcp_xa_pool { + u8 max; + u8 idx; + __u32 tokens[17]; + netmem_ref netmems[17]; +}; + +enum { + RTN_UNSPEC = 0, + RTN_UNICAST = 1, + RTN_LOCAL = 2, + RTN_BROADCAST = 3, + RTN_ANYCAST = 4, + RTN_MULTICAST = 5, + RTN_BLACKHOLE = 6, + RTN_UNREACHABLE = 7, + RTN_PROHIBIT = 8, + RTN_THROW = 9, + RTN_NAT = 10, + RTN_XRESOLVE = 11, + __RTN_MAX = 12, +}; + +enum { + INET_FRAG_FIRST_IN = 1, + INET_FRAG_LAST_IN = 2, + INET_FRAG_COMPLETE = 4, + INET_FRAG_HASH_DEAD = 8, + INET_FRAG_DROP = 16, +}; + +struct raw_hashinfo { + spinlock_t lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct hlist_head ht[256]; +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER = 0, + IP6_DEFRAG_CONNTRACK_IN = 1, + __IP6_DEFRAG_CONNTRACK_IN = 65536, + IP6_DEFRAG_CONNTRACK_OUT = 65537, + __IP6_DEFRAG_CONNTRACK_OUT = 131072, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, +}; + +struct frag_queue { + struct inet_frag_queue q; + int iif; + __u16 nhoffset; + u8 ecn; +}; + +enum { + INET_ECN_NOT_ECT = 0, + INET_ECN_ECT_1 = 1, + INET_ECN_ECT_0 = 2, + INET_ECN_CE = 3, + INET_ECN_MASK = 3, +}; + +struct udp_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + __u16 cscov; + __u8 partial_cov; +}; + +enum br_boolopt_id { + BR_BOOLOPT_NO_LL_LEARN = 0, + BR_BOOLOPT_MCAST_VLAN_SNOOPING = 1, + BR_BOOLOPT_MST_ENABLE = 2, + BR_BOOLOPT_MAX = 3, +}; + +enum { + BR_GROUPFWD_STP = 1, + BR_GROUPFWD_MACPAUSE = 2, + BR_GROUPFWD_LACP = 4, +}; + +enum { + BR_FDB_LOCAL = 0, + BR_FDB_STATIC = 1, + BR_FDB_STICKY = 2, + BR_FDB_ADDED_BY_USER = 3, + BR_FDB_ADDED_BY_EXT_LEARN = 4, + BR_FDB_OFFLOADED = 5, + BR_FDB_NOTIFY = 6, + BR_FDB_NOTIFY_INACTIVE = 7, + BR_FDB_LOCKED = 8, + BR_FDB_DYNAMIC_LEARNED = 9, +}; + +struct net_bridge_fdb_flush_desc { + long unsigned int flags; + long unsigned int flags_mask; + int port_ifindex; + u16 vlan_id; +}; + +enum { + Root_NFS = 255, + Root_CIFS = 254, + Root_Generic = 253, + Root_RAM0 = 1048576, +}; + +enum aarch64_insn_hint_cr_op { + AARCH64_INSN_HINT_NOP = 0, + AARCH64_INSN_HINT_YIELD = 32, + AARCH64_INSN_HINT_WFE = 64, + AARCH64_INSN_HINT_WFI = 96, + AARCH64_INSN_HINT_SEV = 128, + AARCH64_INSN_HINT_SEVL = 160, + AARCH64_INSN_HINT_XPACLRI = 224, + AARCH64_INSN_HINT_PACIA_1716 = 256, + AARCH64_INSN_HINT_PACIB_1716 = 320, + AARCH64_INSN_HINT_AUTIA_1716 = 384, + AARCH64_INSN_HINT_AUTIB_1716 = 448, + AARCH64_INSN_HINT_PACIAZ = 768, + AARCH64_INSN_HINT_PACIASP = 800, + AARCH64_INSN_HINT_PACIBZ = 832, + AARCH64_INSN_HINT_PACIBSP = 864, + AARCH64_INSN_HINT_AUTIAZ = 896, + AARCH64_INSN_HINT_AUTIASP = 928, + AARCH64_INSN_HINT_AUTIBZ = 960, + AARCH64_INSN_HINT_AUTIBSP = 992, + AARCH64_INSN_HINT_ESB = 512, + AARCH64_INSN_HINT_PSB = 544, + AARCH64_INSN_HINT_TSB = 576, + AARCH64_INSN_HINT_CSDB = 640, + AARCH64_INSN_HINT_CLEARBHB = 704, + AARCH64_INSN_HINT_BTI = 1024, + AARCH64_INSN_HINT_BTIC = 1088, + AARCH64_INSN_HINT_BTIJ = 1152, + AARCH64_INSN_HINT_BTIJC = 1216, +}; + +enum aarch64_insn_register_type { + AARCH64_INSN_REGTYPE_RT = 0, + AARCH64_INSN_REGTYPE_RN = 1, + AARCH64_INSN_REGTYPE_RT2 = 2, + AARCH64_INSN_REGTYPE_RM = 3, + AARCH64_INSN_REGTYPE_RD = 4, + AARCH64_INSN_REGTYPE_RA = 5, + AARCH64_INSN_REGTYPE_RS = 6, +}; + +enum aarch64_insn_register { + AARCH64_INSN_REG_0 = 0, + AARCH64_INSN_REG_1 = 1, + AARCH64_INSN_REG_2 = 2, + AARCH64_INSN_REG_3 = 3, + AARCH64_INSN_REG_4 = 4, + AARCH64_INSN_REG_5 = 5, + AARCH64_INSN_REG_6 = 6, + AARCH64_INSN_REG_7 = 7, + AARCH64_INSN_REG_8 = 8, + AARCH64_INSN_REG_9 = 9, + AARCH64_INSN_REG_10 = 10, + AARCH64_INSN_REG_11 = 11, + AARCH64_INSN_REG_12 = 12, + AARCH64_INSN_REG_13 = 13, + AARCH64_INSN_REG_14 = 14, + AARCH64_INSN_REG_15 = 15, + AARCH64_INSN_REG_16 = 16, + AARCH64_INSN_REG_17 = 17, + AARCH64_INSN_REG_18 = 18, + AARCH64_INSN_REG_19 = 19, + AARCH64_INSN_REG_20 = 20, + AARCH64_INSN_REG_21 = 21, + AARCH64_INSN_REG_22 = 22, + AARCH64_INSN_REG_23 = 23, + AARCH64_INSN_REG_24 = 24, + AARCH64_INSN_REG_25 = 25, + AARCH64_INSN_REG_26 = 26, + AARCH64_INSN_REG_27 = 27, + AARCH64_INSN_REG_28 = 28, + AARCH64_INSN_REG_29 = 29, + AARCH64_INSN_REG_FP = 29, + AARCH64_INSN_REG_30 = 30, + AARCH64_INSN_REG_LR = 30, + AARCH64_INSN_REG_ZR = 31, + AARCH64_INSN_REG_SP = 31, +}; + +enum aarch64_insn_variant { + AARCH64_INSN_VARIANT_32BIT = 0, + AARCH64_INSN_VARIANT_64BIT = 1, +}; + +enum aarch64_insn_branch_type { + AARCH64_INSN_BRANCH_NOLINK = 0, + AARCH64_INSN_BRANCH_LINK = 1, + AARCH64_INSN_BRANCH_RETURN = 2, + AARCH64_INSN_BRANCH_COMP_ZERO = 3, + AARCH64_INSN_BRANCH_COMP_NONZERO = 4, +}; + +enum aarch64_insn_adsb_type { + AARCH64_INSN_ADSB_ADD = 0, + AARCH64_INSN_ADSB_SUB = 1, + AARCH64_INSN_ADSB_ADD_SETFLAGS = 2, + AARCH64_INSN_ADSB_SUB_SETFLAGS = 3, +}; + +enum aarch64_insn_movewide_type { + AARCH64_INSN_MOVEWIDE_ZERO = 0, + AARCH64_INSN_MOVEWIDE_KEEP = 1, + AARCH64_INSN_MOVEWIDE_INVERSE = 2, +}; + +enum aarch64_insn_logic_type { + AARCH64_INSN_LOGIC_AND = 0, + AARCH64_INSN_LOGIC_BIC = 1, + AARCH64_INSN_LOGIC_ORR = 2, + AARCH64_INSN_LOGIC_ORN = 3, + AARCH64_INSN_LOGIC_EOR = 4, + AARCH64_INSN_LOGIC_EON = 5, + AARCH64_INSN_LOGIC_AND_SETFLAGS = 6, + AARCH64_INSN_LOGIC_BIC_SETFLAGS = 7, +}; + +struct alt_instr { + s32 orig_offset; + s32 alt_offset; + u16 cpucap; + u8 orig_len; + u8 alt_len; +}; + +struct trace_event_raw_kvm_wfx_arm64 { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool is_wfe; + char __data[0]; +}; + +struct trace_event_raw_kvm_hvc_arm64 { + struct trace_entry ent; + long unsigned int vcpu_pc; + long unsigned int r0; + long unsigned int imm; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_setup_debug { + struct trace_entry ent; + struct kvm_vcpu *vcpu; + __u32 guest_debug; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_clear_debug { + struct trace_entry ent; + __u32 guest_debug; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_set_dreg32 { + struct trace_entry ent; + const char *name; + __u64 value; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_set_regset { + struct trace_entry ent; + const char *name; + int len; + u64 ctrls[16]; + u64 values[16]; + char __data[0]; +}; + +struct trace_event_raw_trap_reg { + struct trace_entry ent; + const char *fn; + int reg; + bool is_write; + u64 write_value; + char __data[0]; +}; + +struct trace_event_raw_kvm_handle_sys_reg { + struct trace_entry ent; + long unsigned int hsr; + char __data[0]; +}; + +struct trace_event_raw_kvm_sys_access { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool is_write; + const char *name; + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + char __data[0]; +}; + +struct trace_event_raw_kvm_set_guest_debug { + struct trace_entry ent; + struct kvm_vcpu *vcpu; + __u32 guest_debug; + char __data[0]; +}; + +struct trace_event_data_offsets_kvm_wfx_arm64 {}; + +struct trace_event_data_offsets_kvm_hvc_arm64 {}; + +struct trace_event_data_offsets_kvm_arm_setup_debug {}; + +struct trace_event_data_offsets_kvm_arm_clear_debug {}; + +struct trace_event_data_offsets_kvm_arm_set_dreg32 {}; + +struct trace_event_data_offsets_kvm_arm_set_regset {}; + +struct trace_event_data_offsets_trap_reg {}; + +struct trace_event_data_offsets_kvm_handle_sys_reg {}; + +struct trace_event_data_offsets_kvm_sys_access {}; + +struct trace_event_data_offsets_kvm_set_guest_debug {}; + +typedef void (*btf_trace_kvm_wfx_arm64)(void *, long unsigned int, bool); + +typedef void (*btf_trace_kvm_hvc_arm64)(void *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_arm_setup_debug)(void *, struct kvm_vcpu *, __u32); + +typedef void (*btf_trace_kvm_arm_clear_debug)(void *, __u32); + +typedef void (*btf_trace_kvm_arm_set_dreg32)(void *, const char *, __u64); + +typedef void (*btf_trace_kvm_arm_set_regset)(void *, const char *, int, __u64 *, __u64 *); + +typedef void (*btf_trace_trap_reg)(void *, const char *, int, bool, u64); + +typedef void (*btf_trace_kvm_handle_sys_reg)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_sys_access)(void *, long unsigned int, struct sys_reg_params *, const struct sys_reg_desc *); + +typedef void (*btf_trace_kvm_set_guest_debug)(void *, struct kvm_vcpu *, __u32); + +typedef int (*exit_handle_fn)(struct kvm_vcpu *); + +struct __user_cap_header_struct { + __u32 version; + int pid; +}; + +typedef struct __user_cap_header_struct *cap_user_header_t; + +struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +}; + +typedef struct __user_cap_data_struct *cap_user_data_t; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); + ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); +}; + +struct kimage_arch { + void *dtb; + phys_addr_t dtb_mem; + phys_addr_t kern_reloc; + phys_addr_t el2_vectors; + phys_addr_t ttbr0; + phys_addr_t ttbr1; + phys_addr_t zero_page; + long unsigned int phys_offset; + long unsigned int t0sz; +}; + +typedef long unsigned int kimage_entry_t; + +struct kexec_segment { + union { + void *buf; + void *kbuf; + }; + size_t bufsz; + long unsigned int mem; + size_t memsz; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + long unsigned int start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + long unsigned int nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + long unsigned int control_page; + unsigned int type: 1; + unsigned int preserve_context: 1; + unsigned int file_mode: 1; + struct kimage_arch arch; + void *elf_headers; + long unsigned int elf_headers_sz; + long unsigned int elf_load_addr; +}; + +enum { + IRQ_STARTUP_NORMAL = 0, + IRQ_STARTUP_MANAGED = 1, + IRQ_STARTUP_ABORT = 2, +}; + +typedef struct elf64_hdr Elf64_Ehdr; + +typedef struct elf64_shdr Elf64_Shdr; + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source; + struct module *target; +}; + +struct module_sect_attr { + struct bin_attribute battr; + long unsigned int address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[0]; +}; + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[0]; +}; + +struct load_info { + const char *name; + struct module *mod; + Elf64_Ehdr *hdr; + long unsigned int len; + Elf64_Shdr *sechdrs; + char *secstrings; + char *strtab; + long unsigned int symoffs; + long unsigned int stroffs; + long unsigned int init_typeoffs; + long unsigned int core_typeoffs; + bool sig_ok; + long unsigned int mod_kallsyms_init_off; + struct { + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; + } index; +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head; + u32 tail; + u32 size; + char elements[0]; +}; + +struct bpf_mprog_fp { + struct bpf_prog *prog; +}; + +struct bpf_mprog_cp { + struct bpf_link *link; +}; + +struct bpf_mprog_bundle; + +struct bpf_mprog_entry { + struct bpf_mprog_fp fp_items[64]; + struct bpf_mprog_bundle *parent; +}; + +struct bpf_mprog_bundle { + struct bpf_mprog_entry a; + struct bpf_mprog_entry b; + struct bpf_mprog_cp cp_items[64]; + struct bpf_prog *ref; + atomic64_t revision; + u32 count; +}; + +struct bpf_tuple { + struct bpf_prog *prog; + struct bpf_link *link; +}; + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + long unsigned int rcu_state; +}; + +struct tcx_entry { + struct mini_Qdisc *miniq; + struct bpf_mprog_bundle bundle; + u32 miniq_active; + struct callback_head rcu; +}; + +struct tcx_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +enum vmscan_throttle_state { + VMSCAN_THROTTLE_WRITEBACK = 0, + VMSCAN_THROTTLE_ISOLATED = 1, + VMSCAN_THROTTLE_NOPROGRESS = 2, + VMSCAN_THROTTLE_CONGESTED = 3, + NR_VMSCAN_THROTTLE = 4, +}; + +enum lruvec_flags { + LRUVEC_CGROUP_CONGESTED = 0, + LRUVEC_NODE_CONGESTED = 1, +}; + +enum zone_watermarks { + WMARK_MIN = 0, + WMARK_LOW = 1, + WMARK_HIGH = 2, + WMARK_PROMO = 3, + NR_WMARK = 4, +}; + +enum pgdat_flags { + PGDAT_DIRTY = 0, + PGDAT_WRITEBACK = 1, + PGDAT_RECLAIM_LOCKED = 2, +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK = 0, + ZONE_RECLAIM_ACTIVE = 1, + ZONE_BELOW_HIGH = 2, +}; + +enum { + ZONELIST_FALLBACK = 0, + MAX_ZONELISTS = 1, +}; + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 524288, + AOP_TRUNCATED_PAGE = 524289, +}; + +struct reclaim_stat { + unsigned int nr_dirty; + unsigned int nr_unqueued_dirty; + unsigned int nr_congested; + unsigned int nr_writeback; + unsigned int nr_immediate; + unsigned int nr_pageout; + unsigned int nr_activate[2]; + unsigned int nr_ref_keep; + unsigned int nr_unmap_fail; + unsigned int nr_lazyfree_fail; + unsigned int nr_demoted; +}; + +enum { + CGRP_ROOT_NOPREFIX = 2, + CGRP_ROOT_XATTR = 4, + CGRP_ROOT_NS_DELEGATE = 8, + CGRP_ROOT_FAVOR_DYNMODS = 16, + CGRP_ROOT_CPUSET_V2_MODE = 65536, + CGRP_ROOT_MEMORY_LOCAL_EVENTS = 131072, + CGRP_ROOT_MEMORY_RECURSIVE_PROT = 262144, + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = 524288, + CGRP_ROOT_PIDS_LOCAL_EVENTS = 1048576, +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + int generation; +}; + +enum ttu_flags { + TTU_SPLIT_HUGE_PMD = 4, + TTU_IGNORE_MLOCK = 8, + TTU_SYNC = 16, + TTU_HWPOISON = 32, + TTU_BATCH_FLUSH = 64, + TTU_RMAP_LOCKED = 128, +}; + +typedef struct folio *new_folio_t(struct folio *, long unsigned int); + +typedef void free_folio_t(struct folio *, long unsigned int); + +struct migration_target_control { + int nid; + nodemask_t *nmask; + gfp_t gfp_mask; + enum migrate_reason reason; +}; + +struct trace_event_raw_mm_vmscan_kswapd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_kswapd_wake { + struct trace_entry ent; + int nid; + int zid; + int order; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_wakeup_kswapd { + struct trace_entry ent; + int nid; + int zid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { + struct trace_entry ent; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { + struct trace_entry ent; + long unsigned int nr_reclaimed; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_start { + struct trace_entry ent; + struct shrinker *shr; + void *shrink; + int nid; + long int nr_objects_to_shrink; + long unsigned int gfp_flags; + long unsigned int cache_items; + long long unsigned int delta; + long unsigned int total_scan; + int priority; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_end { + struct trace_entry ent; + struct shrinker *shr; + int nid; + void *shrink; + long int unused_scan; + long int new_scan; + int retval; + long int total_scan; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_isolate { + struct trace_entry ent; + int highest_zoneidx; + int order; + long unsigned int nr_requested; + long unsigned int nr_scanned; + long unsigned int nr_skipped; + long unsigned int nr_taken; + int lru; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_write_folio { + struct trace_entry ent; + long unsigned int pfn; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_reclaim_pages { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_inactive { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_active { + struct trace_entry ent; + int nid; + long unsigned int nr_taken; + long unsigned int nr_active; + long unsigned int nr_deactivated; + long unsigned int nr_referenced; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_node_reclaim_begin { + struct trace_entry ent; + int nid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_throttled { + struct trace_entry ent; + int nid; + int usec_timeout; + int usec_delayed; + int reason; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; + +struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; + +struct trace_event_data_offsets_mm_shrink_slab_start {}; + +struct trace_event_data_offsets_mm_shrink_slab_end {}; + +struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; + +struct trace_event_data_offsets_mm_vmscan_write_folio {}; + +struct trace_event_data_offsets_mm_vmscan_reclaim_pages {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; + +struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; + +struct trace_event_data_offsets_mm_vmscan_throttled {}; + +typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); + +typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); + +typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int); + +typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_vmscan_write_folio)(void *, struct folio *); + +typedef void (*btf_trace_mm_vmscan_reclaim_pages)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_throttled)(void *, int, int, int, int); + +struct scan_control { + long unsigned int nr_to_reclaim; + nodemask_t *nodemask; + struct mem_cgroup *target_mem_cgroup; + long unsigned int anon_cost; + long unsigned int file_cost; + int *proactive_swappiness; + unsigned int may_deactivate: 2; + unsigned int force_deactivate: 1; + unsigned int skipped_deactivate: 1; + unsigned int may_writepage: 1; + unsigned int may_unmap: 1; + unsigned int may_swap: 1; + unsigned int no_cache_trim_mode: 1; + unsigned int cache_trim_mode_failed: 1; + unsigned int proactive: 1; + unsigned int memcg_low_reclaim: 1; + unsigned int memcg_low_skipped: 1; + unsigned int memcg_full_walk: 1; + unsigned int hibernation_mode: 1; + unsigned int compaction_ready: 1; + unsigned int cache_trim_mode: 1; + unsigned int file_is_tiny: 1; + unsigned int no_demotion: 1; + s8 order; + s8 priority; + s8 reclaim_idx; + gfp_t gfp_mask; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + struct { + unsigned int dirty; + unsigned int unqueued_dirty; + unsigned int congested; + unsigned int writeback; + unsigned int immediate; + unsigned int file_taken; + unsigned int taken; + } nr; + struct reclaim_state reclaim_state; +}; + +typedef enum { + PAGE_KEEP = 0, + PAGE_ACTIVATE = 1, + PAGE_SUCCESS = 2, + PAGE_CLEAN = 3, +} pageout_t; + +enum folio_references { + FOLIOREF_RECLAIM = 0, + FOLIOREF_RECLAIM_CLEAN = 1, + FOLIOREF_KEEP = 2, + FOLIOREF_ACTIVATE = 3, +}; + +enum scan_balance { + SCAN_EQUAL = 0, + SCAN_FRACT = 1, + SCAN_ANON = 2, + SCAN_FILE = 3, +}; + +struct stat { + long unsigned int st_dev; + long unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + long unsigned int st_rdev; + long unsigned int __pad1; + long int st_size; + int st_blksize; + int __pad2; + long int st_blocks; + long int st_atime; + long unsigned int st_atime_nsec; + long int st_mtime; + long unsigned int st_mtime_nsec; + long int st_ctime; + long unsigned int st_ctime_nsec; + unsigned int __unused4; + unsigned int __unused5; +}; + +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +struct statx { + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + __u64 stx_mnt_id; + __u32 stx_dio_mem_align; + __u32 stx_dio_offset_align; + __u64 stx_subvol; + __u32 stx_atomic_write_unit_min; + __u32 stx_atomic_write_unit_max; + __u32 stx_atomic_write_segments_max; + __u32 __spare1[1]; + __u64 __spare3[9]; +}; + +typedef struct fd class_fd_raw_t; + +struct mnt_idmap { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + refcount_t count; +}; + +struct dnotify_struct { + struct dnotify_struct *dn_next; + __u32 dn_mask; + int dn_fd; + struct file *dn_filp; + fl_owner_t dn_owner; +}; + +struct dnotify_mark { + struct fsnotify_mark fsn_mark; + struct dnotify_struct *dn; +}; + +struct kernfs_super_info { + struct super_block *sb; + struct kernfs_root *root; + const void *ns; + struct list_head node; +}; + +struct shortname_info { + unsigned char lower: 1; + unsigned char upper: 1; + unsigned char valid: 1; +}; + +enum btrfs_ref_type { + BTRFS_REF_NOT_SET = 0, + BTRFS_REF_DATA = 1, + BTRFS_REF_METADATA = 2, + BTRFS_REF_LAST = 3, +} __attribute__((mode(byte))); + +struct btrfs_ref { + enum btrfs_ref_type type; + enum btrfs_delayed_ref_action action; + bool skip_qgroup; + u64 bytenr; + u64 num_bytes; + u64 owning_root; + u64 ref_root; + u64 parent; + union { + struct btrfs_data_ref data_ref; + struct btrfs_tree_ref tree_ref; + }; +}; + +struct btrfs_log_ctx { + int log_ret; + int log_transid; + bool log_new_dentries; + bool logging_new_name; + bool logging_new_delayed_dentries; + bool logged_before; + struct btrfs_inode *inode; + struct list_head list; + struct list_head ordered_extents; + struct list_head conflict_inodes; + int num_conflict_inodes; + bool logging_conflict_inodes; + struct extent_buffer *scratch_eb; +}; + +struct falloc_range { + struct list_head list; + u64 start; + u64 len; +}; + +enum { + RANGE_BOUNDARY_WRITTEN_EXTENT = 0, + RANGE_BOUNDARY_PREALLOC_EXTENT = 1, + RANGE_BOUNDARY_HOLE = 2, +}; + +struct prop_handler { + struct hlist_node node; + const char *xattr_name; + int (*validate)(const struct btrfs_inode *, const char *, size_t); + int (*apply)(struct inode *, const char *, size_t); + const char * (*extract)(const struct inode *); + bool (*ignore)(const struct btrfs_inode *); + int inheritable; +}; + +enum key_need_perm { + KEY_NEED_UNSPECIFIED = 0, + KEY_NEED_VIEW = 1, + KEY_NEED_READ = 2, + KEY_NEED_WRITE = 3, + KEY_NEED_SEARCH = 4, + KEY_NEED_LINK = 5, + KEY_NEED_SETATTR = 6, + KEY_NEED_UNLINK = 7, + KEY_SYSADMIN_OVERRIDE = 8, + KEY_AUTHTOKEN_OVERRIDE = 9, + KEY_DEFER_PERM_CHECK = 10, +}; + +enum key_lookup_flag { + KEY_LOOKUP_CREATE = 1, + KEY_LOOKUP_PARTIAL = 2, + KEY_LOOKUP_ALL = 3, +}; + +enum key_state { + KEY_IS_UNINSTANTIATED = 0, + KEY_IS_POSITIVE = 1, +}; + +struct key_user { + struct rb_node node; + struct mutex cons_lock; + spinlock_t lock; + refcount_t usage; + atomic_t nkeys; + atomic_t nikeys; + kuid_t uid; + int qnkeys; + int qnbytes; +}; + +struct keyctl_dh_params { + union { + __s32 private; + __s32 priv; + }; + __s32 prime; + __s32 base; +}; + +struct keyctl_kdf_params { + char *hashname; + char *otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct keyctl_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + __u32 __spare[7]; +}; + +struct request_key_auth { + struct callback_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +}; + +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, + NOTIFY_KEY_UPDATED = 1, + NOTIFY_KEY_LINKED = 2, + NOTIFY_KEY_UNLINKED = 3, + NOTIFY_KEY_CLEARED = 4, + NOTIFY_KEY_REVOKED = 5, + NOTIFY_KEY_INVALIDATED = 6, + NOTIFY_KEY_SETATTR = 7, +}; + +struct aead_request { + struct crypto_async_request base; + unsigned int assoclen; + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + void *__ctx[0]; +}; + +struct crypto_aead; + +struct aead_alg { + int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); + int (*setauthsize)(struct crypto_aead *, unsigned int); + int (*encrypt)(struct aead_request *); + int (*decrypt)(struct aead_request *); + int (*init)(struct crypto_aead *); + void (*exit)(struct crypto_aead *); + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct rtattr { + short unsigned int rta_len; + short unsigned int rta_type; +}; + +struct aead_instance { + void (*free)(struct aead_instance *); + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct ahash_request { + struct crypto_async_request base; + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + void *priv; + void *__ctx[0]; +}; + +struct crypto_ahash { + bool using_shash; + unsigned int statesize; + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC = 0, + CRYPTO_AUTHENC_KEYA_PARAM = 1, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + unsigned int authkeylen; + unsigned int enckeylen; +}; + +struct authenc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct crypto_authenc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct klist_node; + +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +}; + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + +struct subsys_private; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; + struct subsys_private *sp; +}; + +enum { + GENHD_FL_REMOVABLE = 1, + GENHD_FL_HIDDEN = 2, + GENHD_FL_NO_PART = 4, +}; + +struct uuidcmp { + const char *uuid; + int len; +}; + +struct io_uring_cmd { + struct file *file; + const struct io_uring_sqe *sqe; + void (*task_work_cb)(struct io_uring_cmd *, unsigned int); + u32 cmd_op; + u32 flags; + u8 pdu[32]; +}; + +enum io_uring_sqe_flags_bit { + IOSQE_FIXED_FILE_BIT = 0, + IOSQE_IO_DRAIN_BIT = 1, + IOSQE_IO_LINK_BIT = 2, + IOSQE_IO_HARDLINK_BIT = 3, + IOSQE_ASYNC_BIT = 4, + IOSQE_BUFFER_SELECT_BIT = 5, + IOSQE_CQE_SKIP_SUCCESS_BIT = 6, +}; + +enum io_uring_op { + IORING_OP_NOP = 0, + IORING_OP_READV = 1, + IORING_OP_WRITEV = 2, + IORING_OP_FSYNC = 3, + IORING_OP_READ_FIXED = 4, + IORING_OP_WRITE_FIXED = 5, + IORING_OP_POLL_ADD = 6, + IORING_OP_POLL_REMOVE = 7, + IORING_OP_SYNC_FILE_RANGE = 8, + IORING_OP_SENDMSG = 9, + IORING_OP_RECVMSG = 10, + IORING_OP_TIMEOUT = 11, + IORING_OP_TIMEOUT_REMOVE = 12, + IORING_OP_ACCEPT = 13, + IORING_OP_ASYNC_CANCEL = 14, + IORING_OP_LINK_TIMEOUT = 15, + IORING_OP_CONNECT = 16, + IORING_OP_FALLOCATE = 17, + IORING_OP_OPENAT = 18, + IORING_OP_CLOSE = 19, + IORING_OP_FILES_UPDATE = 20, + IORING_OP_STATX = 21, + IORING_OP_READ = 22, + IORING_OP_WRITE = 23, + IORING_OP_FADVISE = 24, + IORING_OP_MADVISE = 25, + IORING_OP_SEND = 26, + IORING_OP_RECV = 27, + IORING_OP_OPENAT2 = 28, + IORING_OP_EPOLL_CTL = 29, + IORING_OP_SPLICE = 30, + IORING_OP_PROVIDE_BUFFERS = 31, + IORING_OP_REMOVE_BUFFERS = 32, + IORING_OP_TEE = 33, + IORING_OP_SHUTDOWN = 34, + IORING_OP_RENAMEAT = 35, + IORING_OP_UNLINKAT = 36, + IORING_OP_MKDIRAT = 37, + IORING_OP_SYMLINKAT = 38, + IORING_OP_LINKAT = 39, + IORING_OP_MSG_RING = 40, + IORING_OP_FSETXATTR = 41, + IORING_OP_SETXATTR = 42, + IORING_OP_FGETXATTR = 43, + IORING_OP_GETXATTR = 44, + IORING_OP_SOCKET = 45, + IORING_OP_URING_CMD = 46, + IORING_OP_SEND_ZC = 47, + IORING_OP_SENDMSG_ZC = 48, + IORING_OP_READ_MULTISHOT = 49, + IORING_OP_WAITID = 50, + IORING_OP_FUTEX_WAIT = 51, + IORING_OP_FUTEX_WAKE = 52, + IORING_OP_FUTEX_WAITV = 53, + IORING_OP_FIXED_FD_INSTALL = 54, + IORING_OP_FTRUNCATE = 55, + IORING_OP_BIND = 56, + IORING_OP_LISTEN = 57, + IORING_OP_LAST = 58, +}; + +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 user_addr; +}; + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 user_addr; +}; + +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; + +enum io_uring_register_op { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + IORING_REGISTER_PBUF_RING = 22, + IORING_UNREGISTER_PBUF_RING = 23, + IORING_REGISTER_SYNC_CANCEL = 24, + IORING_REGISTER_FILE_ALLOC_RANGE = 25, + IORING_REGISTER_PBUF_STATUS = 26, + IORING_REGISTER_NAPI = 27, + IORING_UNREGISTER_NAPI = 28, + IORING_REGISTER_CLOCK = 29, + IORING_REGISTER_CLONE_BUFFERS = 30, + IORING_REGISTER_SEND_MSG_RING = 31, + IORING_REGISTER_RESIZE_RINGS = 33, + IORING_REGISTER_MEM_REGION = 34, + IORING_REGISTER_LAST = 35, + IORING_REGISTER_USE_REGISTERED_RING = 2147483648, +}; + +struct io_uring_buf { + __u64 addr; + __u32 len; + __u16 bid; + __u16 resv; +}; + +struct io_uring_buf_ring { + union { + struct { + __u64 resv1; + __u32 resv2; + __u16 resv3; + __u16 tail; + }; + struct { + struct {} __empty_bufs; + struct io_uring_buf bufs[0]; + }; + }; +}; + +enum { + IORING_REG_WAIT_TS = 1, +}; + +struct io_uring_reg_wait { + struct __kernel_timespec ts; + __u32 min_wait_usec; + __u32 flags; + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad[3]; + __u64 pad2[2]; +}; + +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 min_wait_usec; + __u64 ts; +}; + +enum { + IOU_F_TWQ_LAZY_WAKE = 1, +}; + +struct io_sq_data { + refcount_t refs; + atomic_t park_pending; + struct mutex lock; + struct list_head ctx_list; + struct task_struct *thread; + struct wait_queue_head wait; + unsigned int sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + u64 work_time; + long unsigned int state; + struct completion exited; +}; + +enum { + REQ_F_FIXED_FILE_BIT = 0, + REQ_F_IO_DRAIN_BIT = 1, + REQ_F_LINK_BIT = 2, + REQ_F_HARDLINK_BIT = 3, + REQ_F_FORCE_ASYNC_BIT = 4, + REQ_F_BUFFER_SELECT_BIT = 5, + REQ_F_CQE_SKIP_BIT = 6, + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT = 9, + REQ_F_CUR_POS_BIT = 10, + REQ_F_NOWAIT_BIT = 11, + REQ_F_LINK_TIMEOUT_BIT = 12, + REQ_F_NEED_CLEANUP_BIT = 13, + REQ_F_POLLED_BIT = 14, + REQ_F_HYBRID_IOPOLL_STATE_BIT = 15, + REQ_F_BUFFER_SELECTED_BIT = 16, + REQ_F_BUFFER_RING_BIT = 17, + REQ_F_REISSUE_BIT = 18, + REQ_F_CREDS_BIT = 19, + REQ_F_REFCOUNT_BIT = 20, + REQ_F_ARM_LTIMEOUT_BIT = 21, + REQ_F_ASYNC_DATA_BIT = 22, + REQ_F_SKIP_LINK_CQES_BIT = 23, + REQ_F_SINGLE_POLL_BIT = 24, + REQ_F_DOUBLE_POLL_BIT = 25, + REQ_F_APOLL_MULTISHOT_BIT = 26, + REQ_F_CLEAR_POLLIN_BIT = 27, + REQ_F_SUPPORT_NOWAIT_BIT = 28, + REQ_F_ISREG_BIT = 29, + REQ_F_POLL_NO_LAZY_BIT = 30, + REQ_F_CAN_POLL_BIT = 31, + REQ_F_BL_EMPTY_BIT = 32, + REQ_F_BL_NO_RECYCLE_BIT = 33, + REQ_F_BUFFERS_COMMIT_BIT = 34, + REQ_F_BUF_NODE_BIT = 35, + __REQ_F_LAST_BIT = 36, +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __u32 len; + __u16 bid; + __u16 bgid; +}; + +struct io_buffer_list { + union { + struct list_head buf_list; + struct { + struct page **buf_pages; + struct io_uring_buf_ring *buf_ring; + }; + struct callback_head rcu; + }; + __u16 bgid; + __u16 buf_nr_pages; + __u16 nr_entries; + __u16 head; + __u16 mask; + __u16 flags; + atomic_t refs; +}; + +struct io_poll { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + int retries; + struct wait_queue_entry wait; +}; + +struct async_poll { + struct io_poll poll; + struct io_poll *double_poll; +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +struct trace_event_raw_io_uring_create { + struct trace_entry ent; + int fd; + void *ctx; + u32 sq_entries; + u32 cq_entries; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_register { + struct trace_entry ent; + void *ctx; + unsigned int opcode; + unsigned int nr_files; + unsigned int nr_bufs; + long int ret; + char __data[0]; +}; + +struct trace_event_raw_io_uring_file_get { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int fd; + char __data[0]; +}; + +struct trace_event_raw_io_uring_queue_async_work { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + u8 opcode; + long long unsigned int flags; + struct io_wq_work *work; + int rw; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_defer { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int data; + u8 opcode; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_link { + struct trace_entry ent; + void *ctx; + void *req; + void *target_req; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqring_wait { + struct trace_entry ent; + void *ctx; + int min_events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_fail_link { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + void *link; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_complete { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int res; + unsigned int cflags; + u64 extra1; + u64 extra2; + char __data[0]; +}; + +struct trace_event_raw_io_uring_submit_req { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + long long unsigned int flags; + bool sq_thread; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_arm { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + int events; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_add { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_req_failed { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + u8 flags; + u8 ioprio; + u64 off; + u64 addr; + u32 len; + u32 op_flags; + u16 buf_index; + u16 personality; + u32 file_index; + u64 pad1; + u64 addr3; + int error; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqe_overflow { + struct trace_entry ent; + void *ctx; + long long unsigned int user_data; + s32 res; + u32 cflags; + void *ocqe; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_work_run { + struct trace_entry ent; + void *tctx; + unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_io_uring_short_write { + struct trace_entry ent; + void *ctx; + u64 fpos; + u64 wanted; + u64 got; + char __data[0]; +}; + +struct trace_event_raw_io_uring_local_work_run { + struct trace_entry ent; + void *ctx; + int count; + unsigned int loops; + char __data[0]; +}; + +struct trace_event_data_offsets_io_uring_create {}; + +struct trace_event_data_offsets_io_uring_register {}; + +struct trace_event_data_offsets_io_uring_file_get {}; + +struct trace_event_data_offsets_io_uring_queue_async_work { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_defer { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_link {}; + +struct trace_event_data_offsets_io_uring_cqring_wait {}; + +struct trace_event_data_offsets_io_uring_fail_link { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_complete {}; + +struct trace_event_data_offsets_io_uring_submit_req { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_poll_arm { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_task_add { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_req_failed { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_cqe_overflow {}; + +struct trace_event_data_offsets_io_uring_task_work_run {}; + +struct trace_event_data_offsets_io_uring_short_write {}; + +struct trace_event_data_offsets_io_uring_local_work_run {}; + +typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); + +typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, long int); + +typedef void (*btf_trace_io_uring_file_get)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_queue_async_work)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_defer)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); + +typedef void (*btf_trace_io_uring_fail_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_complete)(void *, struct io_ring_ctx *, void *, struct io_uring_cqe *); + +typedef void (*btf_trace_io_uring_submit_req)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_poll_arm)(void *, struct io_kiocb *, int, int); + +typedef void (*btf_trace_io_uring_task_add)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_req_failed)(void *, const struct io_uring_sqe *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_cqe_overflow)(void *, void *, long long unsigned int, s32, u32, void *); + +typedef void (*btf_trace_io_uring_task_work_run)(void *, void *, unsigned int); + +typedef void (*btf_trace_io_uring_short_write)(void *, void *, u64, u64, u64); + +typedef void (*btf_trace_io_uring_local_work_run)(void *, void *, int, unsigned int); + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_CONCURRENT = 16, + IO_WQ_HASH_SHIFT = 24, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK = 0, + IO_WQ_CANCEL_RUNNING = 1, + IO_WQ_CANCEL_NOTFOUND = 2, +}; + +typedef bool work_cancel_fn(struct io_wq_work *, void *); + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned int cq_tail; + unsigned int cq_min_tail; + unsigned int nr_timeouts; + int hit_timeout; + ktime_t min_timeout; + ktime_t timeout; + struct hrtimer t; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; +}; + +enum { + IO_CHECK_CQ_OVERFLOW_BIT = 0, + IO_CHECK_CQ_DROPPED_BIT = 1, +}; + +struct io_issue_def { + unsigned int needs_file: 1; + unsigned int plug: 1; + unsigned int hash_reg_file: 1; + unsigned int unbound_nonreg_file: 1; + unsigned int pollin: 1; + unsigned int pollout: 1; + unsigned int poll_exclusive: 1; + unsigned int buffer_select: 1; + unsigned int audit_skip: 1; + unsigned int ioprio: 1; + unsigned int iopoll: 1; + unsigned int iopoll_queue: 1; + unsigned int vectored: 1; + short unsigned int async_size; + int (*issue)(struct io_kiocb *, unsigned int); + int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); +}; + +struct io_cold_def { + const char *name; + void (*cleanup)(struct io_kiocb *); + void (*fail)(struct io_kiocb *); +}; + +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct io_ring_ctx *ctx; +}; + +enum { + IOBL_BUF_RING = 1, + IOBL_MMAP = 2, + IOBL_INC = 4, +}; + +enum { + IO_APOLL_OK = 0, + IO_APOLL_ABORTED = 1, + IO_APOLL_READY = 2, +}; + +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; +}; + +struct ext_arg { + size_t argsz; + struct timespec64 ts; + const sigset_t *sig; + ktime_t min_time; + bool ts_set; +}; + +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; + +struct io_task_cancel { + struct io_uring_task *tctx; + bool all; +}; + +struct creds; + +typedef u64 pci_bus_addr_t; + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +typedef unsigned char u_char; + +typedef short unsigned int u_short; + +struct fb_fix_screeninfo { + char id[16]; + long unsigned int smem_start; + __u32 smem_len; + __u32 type; + __u32 type_aux; + __u32 visual; + __u16 xpanstep; + __u16 ypanstep; + __u16 ywrapstep; + __u32 line_length; + long unsigned int mmio_start; + __u32 mmio_len; + __u32 accel; + __u16 capabilities; + __u16 reserved[2]; +}; + +struct fb_bitfield { + __u32 offset; + __u32 length; + __u32 msb_right; +}; + +struct fb_var_screeninfo { + __u32 xres; + __u32 yres; + __u32 xres_virtual; + __u32 yres_virtual; + __u32 xoffset; + __u32 yoffset; + __u32 bits_per_pixel; + __u32 grayscale; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + __u32 nonstd; + __u32 activate; + __u32 height; + __u32 width; + __u32 accel_flags; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 rotate; + __u32 colorspace; + __u32 reserved[4]; +}; + +struct fb_cmap { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 fg_color; + __u32 bg_color; + __u8 depth; + const char *data; + struct fb_cmap cmap; +}; + +struct fbcurpos { + __u16 x; + __u16 y; +}; + +struct fb_cursor { + __u16 set; + __u16 enable; + __u16 rop; + const char *mask; + struct fbcurpos hot; + struct fb_image image; +}; + +struct fb_chroma { + __u32 redx; + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_videomode; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; + __u8 manufacturer[4]; + __u8 monitor[14]; + __u8 serial_no[14]; + __u8 ascii[14]; + __u32 modedb_len; + __u32 model; + __u32 serial; + __u32 year; + __u32 week; + __u32 hfmin; + __u32 hfmax; + __u32 dclkmin; + __u32 dclkmax; + __u16 input; + __u16 dpms; + __u16 signal; + __u16 vfmin; + __u16 vfmax; + __u16 gamma; + __u16 gtf: 1; + __u16 misc; + __u8 version; + __u8 revision; + __u8 max_x; + __u8 max_y; +}; + +struct fb_videomode { + const char *name; + u32 refresh; + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct fb_info; + +struct fb_pixmap { + u8 *addr; + u32 size; + u32 offset; + u32 buf_align; + u32 scan_align; + u32 access_align; + u32 flags; + long unsigned int blit_x[1]; + long unsigned int blit_y[2]; + void (*writeio)(struct fb_info *, void *, void *, unsigned int); + void (*readio)(struct fb_info *, void *, void *, unsigned int); +}; + +struct lcd_device; + +struct fb_ops; + +struct fb_info { + refcount_t count; + int node; + int flags; + int fbcon_rotate_hint; + struct mutex lock; + struct mutex mm_lock; + struct fb_var_screeninfo var; + struct fb_fix_screeninfo fix; + struct fb_monspecs monspecs; + struct fb_pixmap pixmap; + struct fb_pixmap sprite; + struct fb_cmap cmap; + struct list_head modelist; + struct fb_videomode *mode; + struct lcd_device *lcd_dev; + const struct fb_ops *fbops; + struct device *device; + struct device *dev; + int class_flag; + union { + char *screen_base; + char *screen_buffer; + }; + long unsigned int screen_size; + void *pseudo_palette; + u32 state; + void *fbcon_par; + void *par; + bool skip_vt_switch; + bool skip_panic; +}; + +struct fb_blit_caps { + long unsigned int x[1]; + long unsigned int y[2]; + u32 len; + u32 flags; +}; + +struct fb_ops { + struct module *owner; + int (*fb_open)(struct fb_info *, int); + int (*fb_release)(struct fb_info *, int); + ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); + ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); + int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); + int (*fb_set_par)(struct fb_info *); + int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); + int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); + int (*fb_blank)(int, struct fb_info *); + int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); + void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); + void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); + void (*fb_imageblit)(struct fb_info *, const struct fb_image *); + int (*fb_cursor)(struct fb_info *, struct fb_cursor *); + int (*fb_sync)(struct fb_info *); + int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); + void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); + void (*fb_destroy)(struct fb_info *); + int (*fb_debug_enter)(struct fb_info *); + int (*fb_debug_leave)(struct fb_info *); +}; + +struct fbcon_display { + const u_char *fontdata; + int userfont; + u_short inverse; + short int yscroll; + int vrows; + int cursor_shape; + int con_rotate; + u32 xres_virtual; + u32 yres_virtual; + u32 height; + u32 width; + u32 bits_per_pixel; + u32 grayscale; + u32 nonstd; + u32 accel_flags; + u32 rotate; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + const struct fb_videomode *mode; +}; + +struct fbcon_ops { + void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); + void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); + void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); + void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); + void (*cursor)(struct vc_data *, struct fb_info *, bool, int, int); + int (*update_start)(struct fb_info *); + int (*rotate_font)(struct fb_info *, struct vc_data *); + struct fb_var_screeninfo var; + struct delayed_work cursor_work; + struct fb_cursor cursor_state; + struct fbcon_display *p; + struct fb_info *info; + int currcon; + int cur_blink_jiffies; + int cursor_flash; + int cursor_reset; + int blank_state; + int graphics; + int save_graphics; + bool initialized; + int rotate; + int cur_rotate; + char *cursor_data; + u8 *fontbuffer; + u8 *fontdata; + u8 *cursor_src; + u32 cursor_size; + u32 fd_size; +}; + +enum oom_constraint { + CONSTRAINT_NONE = 0, + CONSTRAINT_CPUSET = 1, + CONSTRAINT_MEMORY_POLICY = 2, + CONSTRAINT_MEMCG = 3, +}; + +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct mem_cgroup *memcg; + const gfp_t gfp_mask; + const int order; + long unsigned int totalpages; + struct task_struct *chosen; + long int chosen_points; + enum oom_constraint constraint; +}; + +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + long unsigned int key_down[12]; + unsigned int alt; + unsigned int alt_use; + unsigned int shift; + unsigned int shift_use; + bool active; + bool need_reinject; + bool reinjecting; + bool reset_canceled; + bool reset_requested; + long unsigned int reset_keybit[12]; + int reset_seq_len; + int reset_seq_cnt; + int reset_seq_version; + struct timer_list keyreset_timer; +}; + +struct execute_work { + struct work_struct work; +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +}; + +typedef s32 compat_int_t; + +typedef u32 compat_uint_t; + +typedef u32 compat_uptr_t; + +struct class_interface { + struct list_head node; + const struct class *class; + int (*add_dev)(struct device *); + void (*remove_dev)(struct device *); +}; + +struct rq_map_data { + struct page **pages; + long unsigned int offset; + short unsigned int page_order; + short unsigned int nr_entries; + bool null_mapped; + bool from_user; +}; + +enum scsi_msg_byte { + COMMAND_COMPLETE = 0, + EXTENDED_MESSAGE = 1, + SAVE_POINTERS = 2, + RESTORE_POINTERS = 3, + DISCONNECT = 4, + INITIATOR_ERROR = 5, + ABORT_TASK_SET = 6, + MESSAGE_REJECT = 7, + NOP = 8, + MSG_PARITY_ERROR = 9, + LINKED_CMD_COMPLETE = 10, + LINKED_FLG_CMD_COMPLETE = 11, + TARGET_RESET = 12, + ABORT_TASK = 13, + CLEAR_TASK_SET = 14, + INITIATE_RECOVERY = 15, + RELEASE_RECOVERY = 16, + TERMINATE_IO_PROC = 17, + CLEAR_ACA = 22, + LOGICAL_UNIT_RESET = 23, + SIMPLE_QUEUE_TAG = 32, + HEAD_OF_QUEUE_TAG = 33, + ORDERED_QUEUE_TAG = 34, + IGNORE_WIDE_RESIDUE = 35, + ACA = 36, + QAS_REQUEST = 85, + BUS_DEVICE_RESET = 12, + ABORT = 6, +}; + +enum scsi_device_event { + SDEV_EVT_MEDIA_CHANGE = 1, + SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, + SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, + SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, + SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, + SDEV_EVT_LUN_CHANGE_REPORTED = 6, + SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, + SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, + SDEV_EVT_FIRST = 1, + SDEV_EVT_LAST = 8, + SDEV_EVT_MAXBITS = 9, +}; + +struct sg_io_hdr { + int interface_id; + int dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + unsigned int dxfer_len; + void *dxferp; + unsigned char *cmdp; + void *sbp; + unsigned int timeout; + unsigned int flags; + int pack_id; + void *usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + int resid; + unsigned int duration; + unsigned int info; +}; + +typedef struct sg_io_hdr sg_io_hdr_t; + +struct compat_sg_io_hdr { + compat_int_t interface_id; + compat_int_t dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + compat_uint_t dxfer_len; + compat_uint_t dxferp; + compat_uptr_t cmdp; + compat_uptr_t sbp; + compat_uint_t timeout; + compat_uint_t flags; + compat_int_t pack_id; + compat_uptr_t usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + compat_int_t resid; + compat_uint_t duration; + compat_uint_t info; +}; + +struct sg_scsi_id { + int host_no; + int channel; + int scsi_id; + int lun; + int scsi_type; + short int h_cmd_per_lun; + short int d_queue_depth; + int unused[2]; +}; + +typedef struct sg_scsi_id sg_scsi_id_t; + +struct sg_req_info { + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + void *usr_ptr; + unsigned int duration; + int unused; +}; + +typedef struct sg_req_info sg_req_info_t; + +struct sg_header { + int pack_len; + int reply_len; + int pack_id; + int result; + unsigned int twelve_byte: 1; + unsigned int target_status: 5; + unsigned int host_status: 8; + unsigned int driver_status: 8; + unsigned int other_flags: 10; + unsigned char sense_buffer[16]; +}; + +struct sg_scatter_hold { + short unsigned int k_use_sg; + unsigned int sglist_len; + unsigned int bufflen; + struct page **pages; + int page_order; + char dio_in_use; + unsigned char cmd_opcode; +}; + +typedef struct sg_scatter_hold Sg_scatter_hold; + +struct sg_fd; + +struct sg_request { + struct list_head entry; + struct sg_fd *parentfp; + Sg_scatter_hold data; + sg_io_hdr_t header; + unsigned char sense_b[96]; + char res_used; + char orphan; + char sg_io_owned; + char done; + struct request *rq; + struct bio *bio; + struct execute_work ew; +}; + +typedef struct sg_request Sg_request; + +struct sg_device; + +struct sg_fd { + struct list_head sfd_siblings; + struct sg_device *parentdp; + wait_queue_head_t read_wait; + rwlock_t rq_list_lock; + struct mutex f_mutex; + int timeout; + int timeout_user; + Sg_scatter_hold reserve; + struct list_head rq_list; + struct fasync_struct *async_qp; + Sg_request req_arr[16]; + char force_packid; + char cmd_q; + unsigned char next_cmd_len; + char keep_orphan; + char mmap_called; + char res_in_use; + struct kref f_ref; + struct execute_work ew; +}; + +struct sg_device { + struct scsi_device *device; + wait_queue_head_t open_wait; + struct mutex open_rel_lock; + int sg_tablesize; + u32 index; + struct list_head sfds; + rwlock_t sfd_lock; + atomic_t detaching; + bool exclude; + int open_cnt; + char sgdebug; + char name[32]; + struct cdev *cdev; + struct kref d_ref; +}; + +typedef struct sg_fd Sg_fd; + +typedef struct sg_device Sg_device; + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = 1, + ETH_TEST_FL_FAILED = 2, + ETH_TEST_FL_EXTERNAL_LB = 4, + ETH_TEST_FL_EXTERNAL_LB_DONE = 8, +}; + +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +struct e1000_tx_desc { + __le64 buffer_addr; + union { + __le32 data; + struct { + __le16 length; + u8 cso; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; + u8 css; + __le16 special; + } fields; + } upper; +}; + +enum e1000_state_t { + __E1000_TESTING = 0, + __E1000_RESETTING = 1, + __E1000_ACCESS_SHARED_RESOURCE = 2, + __E1000_DOWN = 3, +}; + +enum { + NETDEV_STATS = 0, + E1000_STATS = 1, +}; + +struct e1000_stats { + char stat_string[32]; + int type; + int sizeof_stat; + int stat_offset; +}; + +typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *); + +typedef class_mutex_t class_mutex_intr_t; + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +enum dmi_field { + DMI_NONE = 0, + DMI_BIOS_VENDOR = 1, + DMI_BIOS_VERSION = 2, + DMI_BIOS_DATE = 3, + DMI_BIOS_RELEASE = 4, + DMI_EC_FIRMWARE_RELEASE = 5, + DMI_SYS_VENDOR = 6, + DMI_PRODUCT_NAME = 7, + DMI_PRODUCT_VERSION = 8, + DMI_PRODUCT_SERIAL = 9, + DMI_PRODUCT_UUID = 10, + DMI_PRODUCT_SKU = 11, + DMI_PRODUCT_FAMILY = 12, + DMI_BOARD_VENDOR = 13, + DMI_BOARD_NAME = 14, + DMI_BOARD_VERSION = 15, + DMI_BOARD_SERIAL = 16, + DMI_BOARD_ASSET_TAG = 17, + DMI_CHASSIS_VENDOR = 18, + DMI_CHASSIS_TYPE = 19, + DMI_CHASSIS_VERSION = 20, + DMI_CHASSIS_SERIAL = 21, + DMI_CHASSIS_ASSET_TAG = 22, + DMI_STRING_MAX = 23, + DMI_OEM_STRING = 24, +}; + +struct dmi_strmatch { + unsigned char slot: 7; + unsigned char exact_match: 1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; + +struct vivaldi_data { + u32 function_row_physmap[24]; + unsigned int num_function_row_keys; +}; + +struct serio_driver; + +struct serio { + void *port_data; + char name[32]; + char phys[32]; + char firmware_id[128]; + bool manual_bind; + struct serio_device_id id; + spinlock_t lock; + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + struct serio *parent; + struct list_head child_node; + struct list_head children; + unsigned int depth; + struct serio_driver *drv; + struct mutex drv_mutex; + struct device dev; + struct list_head node; + struct mutex *ps2_cmd_mutex; +}; + +struct serio_driver { + const char *description; + const struct serio_device_id *id_table; + bool manual_bind; + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *); + int (*reconnect)(struct serio *); + int (*fast_reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + struct device_driver driver; +}; + +typedef struct serio *class_serio_pause_rx_t; + +enum ps2_disposition { + PS2_PROCESS = 0, + PS2_IGNORE = 1, + PS2_ERROR = 2, +}; + +struct ps2dev; + +typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8, unsigned int); + +typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8); + +struct ps2dev { + struct serio *serio; + struct mutex cmd_mutex; + wait_queue_head_t wait; + long unsigned int flags; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; + ps2_pre_receive_handler_t pre_receive_handler; + ps2_receive_handler_t receive_handler; +}; + +struct atkbd { + struct ps2dev ps2dev; + struct input_dev *dev; + char name[64]; + char phys[32]; + short unsigned int id; + short unsigned int keycode[512]; + long unsigned int force_release_mask[8]; + unsigned char set; + bool translated; + bool extra; + bool write; + bool softrepeat; + bool softraw; + bool scroll; + bool enabled; + unsigned char emul; + bool resend; + bool release; + long unsigned int xl_bit; + unsigned int last; + long unsigned int time; + long unsigned int err_count; + struct delayed_work event_work; + long unsigned int event_jiffies; + long unsigned int event_mask; + struct mutex mutex; + struct vivaldi_data vdata; +}; + +struct ir_raw_timings_pl { + unsigned int header_pulse; + unsigned int bit_space; + unsigned int bit_pulse[2]; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +enum sony_state { + STATE_INACTIVE___2 = 0, + STATE_HEADER_SPACE = 1, + STATE_BIT_PULSE = 2, + STATE_BIT_SPACE = 3, + STATE_FINISHED___2 = 4, +}; + +typedef int (*of_init_fn_1_ret)(struct device_node *); + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, + MEMBLOCK_DRIVER_MANAGED = 8, + MEMBLOCK_RSRV_NOINIT = 16, +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; +}; + +struct serial_icounter_struct { + int cts; + int dsr; + int rng; + int dcd; + int rx; + int tx; + int frame; + int overrun; + int parity; + int brk; + int buf_overrun; + int reserved[9]; +}; + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char[1]; + int hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + long unsigned int iomap_base; +}; + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct tso_t { + int next_frag_idx; + int size; + void *data; + u16 ip_id; + u8 tlen; + bool ipv6; + u32 tcp_seq; +}; + +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; + struct { + enum bpf_cgroup_iter_order order; + __u32 cgroup_fd; + __u64 cgroup_id; + } cgroup; + struct { + __u32 tid; + __u32 pid; + __u32 pid_fd; + } task; +}; + +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = 1, + BPF_SK_STORAGE_GET_F_CREATE = 1, +}; + +struct bpf_local_storage_data; + +struct bpf_local_storage { + struct bpf_local_storage_data *cache[16]; + struct bpf_local_storage_map *smap; + struct hlist_head list; + void *owner; + struct callback_head rcu; + raw_spinlock_t lock; +}; + +enum { + BTF_SOCK_TYPE_INET = 0, + BTF_SOCK_TYPE_INET_CONN = 1, + BTF_SOCK_TYPE_INET_REQ = 2, + BTF_SOCK_TYPE_INET_TW = 3, + BTF_SOCK_TYPE_REQ = 4, + BTF_SOCK_TYPE_SOCK = 5, + BTF_SOCK_TYPE_SOCK_COMMON = 6, + BTF_SOCK_TYPE_TCP = 7, + BTF_SOCK_TYPE_TCP_REQ = 8, + BTF_SOCK_TYPE_TCP_TW = 9, + BTF_SOCK_TYPE_TCP6 = 10, + BTF_SOCK_TYPE_UDP = 11, + BTF_SOCK_TYPE_UDP6 = 12, + BTF_SOCK_TYPE_UNIX = 13, + BTF_SOCK_TYPE_MPTCP = 14, + BTF_SOCK_TYPE_SOCKET = 15, + MAX_BTF_SOCK_TYPE = 16, +}; + +struct bpf_local_storage_map_bucket; + +struct bpf_local_storage_map { + struct bpf_map map; + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; + struct bpf_mem_alloc selem_ma; + struct bpf_mem_alloc storage_ma; + bool bpf_ma; +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); + +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); + +typedef const struct bpf_func_proto * (*bpf_iter_get_func_proto_t)(enum bpf_func_id, const struct bpf_prog *); + +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + bpf_iter_show_fdinfo_t show_fdinfo; + bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; + u32 ctx_arg_info_size; + u32 feature; + struct bpf_ctx_arg_aux ctx_arg_info[2]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_data { + struct bpf_local_storage_map *smap; + u8 data[0]; +}; + +struct bpf_local_storage_elem { + struct hlist_node map_node; + struct hlist_node snode; + struct bpf_local_storage *local_storage; + union { + struct callback_head rcu; + struct hlist_node free_node; + }; + long: 64; + struct bpf_local_storage_data sdata; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + u64 idx_usage_counts[16]; +}; + +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE = 0, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, + __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE = 0, + SK_DIAG_BPF_STORAGE = 1, + __SK_DIAG_BPF_STORAGE_REP_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_NONE = 0, + SK_DIAG_BPF_STORAGE_PAD = 1, + SK_DIAG_BPF_STORAGE_MAP_ID = 2, + SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, + __SK_DIAG_BPF_STORAGE_MAX = 4, +}; + +typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); + +typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[0]; +}; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned int skip_elems; +}; + +struct bpf_iter__bpf_sk_storage_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + struct sock *sk; + }; + union { + void *value; + }; +}; + +enum ethtool_supported_ring_param { + ETHTOOL_RING_USE_RX_BUF_LEN = 1, + ETHTOOL_RING_USE_CQE_SIZE = 2, + ETHTOOL_RING_USE_TX_PUSH = 4, + ETHTOOL_RING_USE_RX_PUSH = 8, + ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = 16, + ETHTOOL_RING_USE_TCP_DATA_SPLIT = 32, +}; + +enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED = 1, + ETHTOOL_TCP_DATA_SPLIT_ENABLED = 2, +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; + u32 supported_ring_params; +}; + +typedef unsigned int (*bpf_func_t)(const void *, const struct bpf_insn *); + +typedef unsigned int (*bpf_dispatcher_fn)(const void *, const struct bpf_insn *, unsigned int (*)(const void *, const struct bpf_insn *)); + +struct bpf_nf_ctx { + const struct nf_hook_state *state; + struct sk_buff *skb; +}; + +struct bpf_nf_link { + struct bpf_link link; + struct nf_hook_ops hook_ops; + netns_tracker ns_tracker; + struct net *net; + u32 dead; + const struct nf_defrag_hook *defrag_hook; +}; + +enum nft_rt_keys { + NFT_RT_CLASSID = 0, + NFT_RT_NEXTHOP4 = 1, + NFT_RT_NEXTHOP6 = 2, + NFT_RT_TCPMSS = 3, + NFT_RT_XFRM = 4, + __NFT_RT_MAX = 5, +}; + +enum nft_rt_attributes { + NFTA_RT_UNSPEC = 0, + NFTA_RT_DREG = 1, + NFTA_RT_KEY = 2, + __NFTA_RT_MAX = 3, +}; + +struct nft_rt { + enum nft_rt_keys key: 8; + u8 dreg; +}; + +typedef u8 dscp_t; + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + unsigned char __pad[8]; +}; + +enum { + IPV4_DEVCONF_FORWARDING = 1, + IPV4_DEVCONF_MC_FORWARDING = 2, + IPV4_DEVCONF_PROXY_ARP = 3, + IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, + IPV4_DEVCONF_SECURE_REDIRECTS = 5, + IPV4_DEVCONF_SEND_REDIRECTS = 6, + IPV4_DEVCONF_SHARED_MEDIA = 7, + IPV4_DEVCONF_RP_FILTER = 8, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, + IPV4_DEVCONF_BOOTP_RELAY = 10, + IPV4_DEVCONF_LOG_MARTIANS = 11, + IPV4_DEVCONF_TAG = 12, + IPV4_DEVCONF_ARPFILTER = 13, + IPV4_DEVCONF_MEDIUM_ID = 14, + IPV4_DEVCONF_NOXFRM = 15, + IPV4_DEVCONF_NOPOLICY = 16, + IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, + IPV4_DEVCONF_ARP_ANNOUNCE = 18, + IPV4_DEVCONF_ARP_IGNORE = 19, + IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, + IPV4_DEVCONF_ARP_ACCEPT = 21, + IPV4_DEVCONF_ARP_NOTIFY = 22, + IPV4_DEVCONF_ACCEPT_LOCAL = 23, + IPV4_DEVCONF_SRC_VMARK = 24, + IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, + IPV4_DEVCONF_ROUTE_LOCALNET = 26, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, + IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, + IPV4_DEVCONF_BC_FORWARDING = 32, + IPV4_DEVCONF_ARP_EVICT_NOCARRIER = 33, + __IPV4_DEVCONF_MAX = 34, +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +enum rt_scope_t { + RT_SCOPE_UNIVERSE = 0, + RT_SCOPE_SITE = 200, + RT_SCOPE_LINK = 253, + RT_SCOPE_HOST = 254, + RT_SCOPE_NOWHERE = 255, +}; + +struct arpreq { + struct sockaddr arp_pa; + struct sockaddr arp_ha; + int arp_flags; + struct sockaddr arp_netmask; + char arp_dev[16]; +}; + +struct arphdr { + __be16 ar_hrd; + __be16 ar_pro; + unsigned char ar_hln; + unsigned char ar_pln; + __be16 ar_op; +}; + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); + unsigned int bucket; + unsigned int flags; +}; + +struct neighbour_cb { + long unsigned int sched_next; + unsigned int flags; +}; + +struct ip_tunnel_key { + __be64 tun_id; + union { + struct { + __be32 src; + __be32 dst; + } ipv4; + struct { + struct in6_addr src; + struct in6_addr dst; + } ipv6; + } u; + long unsigned int tun_flags[1]; + __be32 label; + u32 nhid; + u8 tos; + u8 ttl; + __be16 tp_src; + __be16 tp_dst; + __u8 flow_flags; +}; + +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + +struct dst_cache_pcpu; + +struct dst_cache { + struct dst_cache_pcpu *cache; + long unsigned int reset_ts; +}; + +struct ip_tunnel_info { + struct ip_tunnel_key key; + struct ip_tunnel_encap encap; + struct dst_cache dst_cache; + u8 options_len; + u8 mode; +}; + +enum { + AX25_VALUES_IPDEFMODE = 0, + AX25_VALUES_AXDEFMODE = 1, + AX25_VALUES_BACKOFF = 2, + AX25_VALUES_CONMODE = 3, + AX25_VALUES_WINDOW = 4, + AX25_VALUES_EWINDOW = 5, + AX25_VALUES_T1 = 6, + AX25_VALUES_T2 = 7, + AX25_VALUES_T3 = 8, + AX25_VALUES_IDLE = 9, + AX25_VALUES_N2 = 10, + AX25_VALUES_PACLEN = 11, + AX25_VALUES_PROTOCOL = 12, + AX25_MAX_VALUES = 13, +}; + +typedef u64 sci_t; + +enum metadata_type { + METADATA_IP_TUNNEL = 0, + METADATA_HW_PORT_MUX = 1, + METADATA_MACSEC = 2, + METADATA_XFRM = 3, +}; + +struct hw_port_info { + struct net_device *lower_dev; + u32 port_id; +}; + +struct macsec_info { + sci_t sci; +}; + +struct xfrm_md_info { + u32 if_id; + int link; + struct dst_entry *dst_orig; +}; + +struct metadata_dst { + struct dst_entry dst; + enum metadata_type type; + union { + struct ip_tunnel_info tun_info; + struct hw_port_info port_info; + struct macsec_info macsec_info; + struct xfrm_md_info xfrm_info; + } u; +}; + +typedef union { + __be32 a4; + __be32 a6[4]; + struct in6_addr in6; +} xfrm_address_t; + +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; + +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_trunc_len; + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_icv_len; + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + +struct xfrm_mark { + __u32 v; + __u32 m; +}; + +struct xfrm_address_filter { + xfrm_address_t saddr; + xfrm_address_t daddr; + __u16 family; + __u8 splen; + __u8 dplen; +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_tipc { + __be32 key; +}; + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_addrs addrs; + long: 0; +}; + +enum skb_tstamp_type { + SKB_CLOCK_REALTIME = 0, + SKB_CLOCK_MONOTONIC = 1, + SKB_CLOCK_TAI = 2, + __SKB_CLOCK_MAX = 2, +}; + +struct hop_jumbo_hdr { + u8 nexthdr; + u8 hdrlen; + u8 tlv_type; + u8 tlv_len; + __be32 jumbo_payload_len; +}; + +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_ra_chain { + struct ip6_ra_chain *next; + struct sock *sk; + int sel; + void (*destructor)(struct sock *); +}; + +struct ipcm6_cookie { + struct sockcm_cookie sockc; + __s16 hlimit; + __s16 tclass; + __u16 gso_size; + __s8 dontfrag; + struct ipv6_txoptions *opt; +}; + +struct ipv4_addr_key { + __be32 addr; + int vif; +}; + +struct inetpeer_addr { + union { + struct ipv4_addr_key a4; + struct in6_addr a6; + u32 key[4]; + }; + __u16 family; +}; + +struct inet_peer { + struct rb_node rb_node; + struct inetpeer_addr daddr; + u32 metrics[17]; + u32 rate_tokens; + u32 n_redirects; + long unsigned int rate_last; + union { + struct { + atomic_t rid; + }; + struct callback_head rcu; + }; + __u32 dtime; + refcount_t refcnt; +}; + +enum { + LWTUNNEL_XMIT_DONE = 0, + LWTUNNEL_XMIT_CONTINUE = 256, +}; + +struct xfrm_state_walk { + struct list_head all; + u8 state; + u8 dying; + u8 proto; + u32 seq; + struct xfrm_address_filter *filter; +}; + +struct xfrm_dev_offload { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net_device *real_dev; + long unsigned int offload_handle; + u8 dir: 2; + u8 type: 2; + u8 flags: 2; +}; + +struct xfrm_mode { + u8 encap; + u8 family; + u8 flags; +}; + +enum xfrm_replay_mode { + XFRM_REPLAY_MODE_LEGACY = 0, + XFRM_REPLAY_MODE_BMP = 1, + XFRM_REPLAY_MODE_ESN = 2, +}; + +struct xfrm_type; + +struct xfrm_type_offload; + +struct xfrm_state { + possible_net_t xs_net; + union { + struct hlist_node gclist; + struct hlist_node bydst; + }; + union { + struct hlist_node dev_gclist; + struct hlist_node bysrc; + }; + struct hlist_node byspi; + struct hlist_node byseq; + struct hlist_node state_cache; + struct hlist_node state_cache_input; + refcount_t refcnt; + spinlock_t lock; + u32 pcpu_num; + struct xfrm_id id; + struct xfrm_selector sel; + struct xfrm_mark mark; + u32 if_id; + u32 tfcpad; + u32 genid; + struct xfrm_state_walk km; + struct { + u32 reqid; + u8 mode; + u8 replay_window; + u8 aalgo; + u8 ealgo; + u8 calgo; + u8 flags; + u16 family; + xfrm_address_t saddr; + int header_len; + int trailer_len; + u32 extra_flags; + struct xfrm_mark smark; + } props; + struct xfrm_lifetime_cfg lft; + struct xfrm_algo_auth *aalg; + struct xfrm_algo *ealg; + struct xfrm_algo *calg; + struct xfrm_algo_aead *aead; + const char *geniv; + __be16 new_mapping_sport; + u32 new_mapping; + u32 mapping_maxage; + struct xfrm_encap_tmpl *encap; + struct sock *encap_sk; + u32 nat_keepalive_interval; + time64_t nat_keepalive_expiration; + xfrm_address_t *coaddr; + struct xfrm_state *tunnel; + atomic_t tunnel_users; + struct xfrm_replay_state replay; + struct xfrm_replay_state_esn *replay_esn; + struct xfrm_replay_state preplay; + struct xfrm_replay_state_esn *preplay_esn; + enum xfrm_replay_mode repl_mode; + u32 xflags; + u32 replay_maxage; + u32 replay_maxdiff; + struct timer_list rtimer; + struct xfrm_stats stats; + struct xfrm_lifetime_cur curlft; + struct hrtimer mtimer; + struct xfrm_dev_offload xso; + long int saved_tmo; + time64_t lastused; + struct page_frag xfrag; + const struct xfrm_type *type; + struct xfrm_mode inner_mode; + struct xfrm_mode inner_mode_iaf; + struct xfrm_mode outer_mode; + const struct xfrm_type_offload *type_offload; + struct xfrm_sec_ctx *security; + void *data; + u8 dir; +}; + +struct xfrm_type { + struct module *owner; + u8 proto; + u8 flags; + int (*init_state)(struct xfrm_state *, struct netlink_ext_ack *); + void (*destructor)(struct xfrm_state *); + int (*input)(struct xfrm_state *, struct sk_buff *); + int (*output)(struct xfrm_state *, struct sk_buff *); + int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); +}; + +struct xfrm_type_offload { + struct module *owner; + u8 proto; + void (*encap)(struct xfrm_state *, struct sk_buff *); + int (*input_tail)(struct xfrm_state *, struct sk_buff *); + int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); +}; + +struct xfrm_offload { + struct { + __u32 low; + __u32 hi; + } seq; + __u32 flags; + __u32 status; + __u32 orig_mac_len; + __u8 proto; + __u8 inner_ipproto; +}; + +struct sec_path { + int len; + int olen; + int verified_cnt; + struct xfrm_state *xvec[6]; + struct xfrm_offload ovec[1]; +}; + +struct __bridge_info { + __u64 designated_root; + __u64 bridge_id; + __u32 root_path_cost; + __u32 max_age; + __u32 hello_time; + __u32 forward_delay; + __u32 bridge_max_age; + __u32 bridge_hello_time; + __u32 bridge_forward_delay; + __u8 topology_change; + __u8 topology_change_detected; + __u8 root_port; + __u8 stp_enabled; + __u32 ageing_time; + __u32 gc_interval; + __u32 hello_timer_value; + __u32 tcn_timer_value; + __u32 topology_change_timer_value; + __u32 gc_timer_value; +}; + +struct __port_info { + __u64 designated_root; + __u64 designated_bridge; + __u16 port_id; + __u16 designated_port; + __u32 path_cost; + __u32 designated_cost; + __u8 state; + __u8 top_change_ack; + __u8 config_pending; + __u8 unused0; + __u32 message_age_timer_value; + __u32 forward_delay_timer_value; + __u32 hold_timer_value; +}; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING = 1, + UMH_DISABLED = 2, +}; + +enum { + NDD_UNARMED = 1, + NDD_LOCKED = 2, + NDD_SECURITY_OVERWRITE = 3, + NDD_WORK_PENDING = 4, + NDD_LABELING = 6, + NDD_INCOHERENT = 7, + NDD_REGISTER_SYNC = 8, + ND_IOCTL_MAX_BUFLEN = 4194304, + ND_CMD_MAX_ELEM = 5, + ND_CMD_MAX_ENVELOPE = 256, + ND_MAX_MAPPINGS = 32, + ND_REGION_PAGEMAP = 0, + ND_REGION_PERSIST_CACHE = 1, + ND_REGION_PERSIST_MEMCTRL = 2, + ND_REGION_ASYNC = 3, + ND_REGION_CXL = 4, + DPA_RESOURCE_ADJUSTED = 1, +}; + +enum bpf_addr_space_cast { + BPF_ADDR_SPACE_CAST = 1, +}; + +enum aarch64_insn_system_register { + AARCH64_INSN_SYSREG_TPIDR_EL1 = 18052, + AARCH64_INSN_SYSREG_TPIDR_EL2 = 26242, + AARCH64_INSN_SYSREG_SP_EL0 = 16904, +}; + +enum aarch64_insn_condition { + AARCH64_INSN_COND_EQ = 0, + AARCH64_INSN_COND_NE = 1, + AARCH64_INSN_COND_CS = 2, + AARCH64_INSN_COND_CC = 3, + AARCH64_INSN_COND_MI = 4, + AARCH64_INSN_COND_PL = 5, + AARCH64_INSN_COND_VS = 6, + AARCH64_INSN_COND_VC = 7, + AARCH64_INSN_COND_HI = 8, + AARCH64_INSN_COND_LS = 9, + AARCH64_INSN_COND_GE = 10, + AARCH64_INSN_COND_LT = 11, + AARCH64_INSN_COND_GT = 12, + AARCH64_INSN_COND_LE = 13, + AARCH64_INSN_COND_AL = 14, +}; + +enum aarch64_insn_size_type { + AARCH64_INSN_SIZE_8 = 0, + AARCH64_INSN_SIZE_16 = 1, + AARCH64_INSN_SIZE_32 = 2, + AARCH64_INSN_SIZE_64 = 3, +}; + +enum aarch64_insn_ldst_type { + AARCH64_INSN_LDST_LOAD_REG_OFFSET = 0, + AARCH64_INSN_LDST_STORE_REG_OFFSET = 1, + AARCH64_INSN_LDST_LOAD_IMM_OFFSET = 2, + AARCH64_INSN_LDST_STORE_IMM_OFFSET = 3, + AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX = 4, + AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX = 5, + AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX = 6, + AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX = 7, + AARCH64_INSN_LDST_LOAD_EX = 8, + AARCH64_INSN_LDST_LOAD_ACQ_EX = 9, + AARCH64_INSN_LDST_STORE_EX = 10, + AARCH64_INSN_LDST_STORE_REL_EX = 11, + AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET = 12, + AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET = 13, +}; + +enum aarch64_insn_bitfield_type { + AARCH64_INSN_BITFIELD_MOVE = 0, + AARCH64_INSN_BITFIELD_MOVE_UNSIGNED = 1, + AARCH64_INSN_BITFIELD_MOVE_SIGNED = 2, +}; + +enum aarch64_insn_data1_type { + AARCH64_INSN_DATA1_REVERSE_16 = 0, + AARCH64_INSN_DATA1_REVERSE_32 = 1, + AARCH64_INSN_DATA1_REVERSE_64 = 2, +}; + +enum aarch64_insn_data2_type { + AARCH64_INSN_DATA2_UDIV = 0, + AARCH64_INSN_DATA2_SDIV = 1, + AARCH64_INSN_DATA2_LSLV = 2, + AARCH64_INSN_DATA2_LSRV = 3, + AARCH64_INSN_DATA2_ASRV = 4, + AARCH64_INSN_DATA2_RORV = 5, +}; + +enum aarch64_insn_data3_type { + AARCH64_INSN_DATA3_MADD = 0, + AARCH64_INSN_DATA3_MSUB = 1, +}; + +enum aarch64_insn_adr_type { + AARCH64_INSN_ADR_TYPE_ADRP = 0, + AARCH64_INSN_ADR_TYPE_ADR = 1, +}; + +enum aarch64_insn_mem_atomic_op { + AARCH64_INSN_MEM_ATOMIC_ADD = 0, + AARCH64_INSN_MEM_ATOMIC_CLR = 1, + AARCH64_INSN_MEM_ATOMIC_EOR = 2, + AARCH64_INSN_MEM_ATOMIC_SET = 3, + AARCH64_INSN_MEM_ATOMIC_SWP = 4, +}; + +enum aarch64_insn_mem_order_type { + AARCH64_INSN_MEM_ORDER_NONE = 0, + AARCH64_INSN_MEM_ORDER_ACQ = 1, + AARCH64_INSN_MEM_ORDER_REL = 2, + AARCH64_INSN_MEM_ORDER_ACQREL = 3, +}; + +enum aarch64_insn_mb_type { + AARCH64_INSN_MB_SY = 0, + AARCH64_INSN_MB_ST = 1, + AARCH64_INSN_MB_LD = 2, + AARCH64_INSN_MB_ISH = 3, + AARCH64_INSN_MB_ISHST = 4, + AARCH64_INSN_MB_ISHLD = 5, + AARCH64_INSN_MB_NSH = 6, + AARCH64_INSN_MB_NSHST = 7, + AARCH64_INSN_MB_NSHLD = 8, + AARCH64_INSN_MB_OSH = 9, + AARCH64_INSN_MB_OSHST = 10, + AARCH64_INSN_MB_OSHLD = 11, +}; + +struct bpf_tramp_run_ctx; + +typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *, struct bpf_tramp_run_ctx *); + +struct bpf_tramp_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + struct bpf_run_ctx *saved_run_ctx; +}; + +typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *, u64, struct bpf_tramp_run_ctx *); + +struct jit_ctx { + const struct bpf_prog *prog; + int idx; + int epilogue_offset; + int *offset; + int exentry_idx; + int nr_used_callee_reg; + u8 used_callee_reg[8]; + __le32 *image; + __le32 *ro_image; + u32 stack_size; + u64 user_vm_start; + u64 arena_vm_start; + bool fp_used; + bool write; +}; + +struct bpf_plt { + u32 insn_ldr; + u32 insn_br; + u64 target; +}; + +struct arm64_jit_data { + struct bpf_binary_header *header; + u8 *ro_image; + struct bpf_binary_header *ro_header; + struct jit_ctx ctx; +}; + +struct kvm_s390_adapter_int { + u64 ind_addr; + u64 summary_addr; + u64 ind_offset; + u32 summary_offset; + u32 adapter_id; +}; + +struct kvm_hv_sint { + u32 vcpu; + u32 sint; +}; + +struct kvm_xen_evtchn { + u32 port; + u32 vcpu_id; + int vcpu_idx; + u32 priority; +}; + +struct kvm_kernel_irq_routing_entry { + u32 gsi; + u32 type; + int (*set)(struct kvm_kernel_irq_routing_entry *, struct kvm *, int, int, bool); + union { + struct { + unsigned int irqchip; + unsigned int pin; + } irqchip; + struct { + u32 address_lo; + u32 address_hi; + u32 data; + u32 flags; + u32 devid; + } msi; + struct kvm_s390_adapter_int adapter; + struct kvm_hv_sint hv_sint; + struct kvm_xen_evtchn xen_evtchn; + }; + struct hlist_node link; +}; + +enum proc_cn_event { + PROC_EVENT_NONE = 0, + PROC_EVENT_FORK = 1, + PROC_EVENT_EXEC = 2, + PROC_EVENT_UID = 4, + PROC_EVENT_GID = 64, + PROC_EVENT_SID = 128, + PROC_EVENT_PTRACE = 256, + PROC_EVENT_COMM = 512, + PROC_EVENT_NONZERO_EXIT = 536870912, + PROC_EVENT_COREDUMP = 1073741824, + PROC_EVENT_EXIT = 2147483648, +}; + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +typedef int suspend_state_t; + +struct platform_suspend_ops { + int (*valid)(suspend_state_t); + int (*begin)(suspend_state_t); + int (*prepare)(); + int (*prepare_late)(); + int (*enter)(suspend_state_t); + void (*wake)(); + void (*finish)(); + bool (*suspend_again)(); + void (*end)(); + void (*recover)(); +}; + +struct platform_s2idle_ops { + int (*begin)(); + int (*prepare)(); + int (*prepare_late)(); + void (*check)(); + bool (*wake)(); + void (*restore_early)(); + void (*restore)(); + void (*end)(); +}; + +enum s2idle_states { + S2IDLE_STATE_NONE = 0, + S2IDLE_STATE_ENTER = 1, + S2IDLE_STATE_WAKE = 2, +}; + +enum suspend_stat_step { + SUSPEND_WORKING = 0, + SUSPEND_FREEZE = 1, + SUSPEND_PREPARE = 2, + SUSPEND_SUSPEND = 3, + SUSPEND_SUSPEND_LATE = 4, + SUSPEND_SUSPEND_NOIRQ = 5, + SUSPEND_RESUME_NOIRQ = 6, + SUSPEND_RESUME_EARLY = 7, + SUSPEND_RESUME = 8, +}; + +enum { + TEST_NONE = 0, + TEST_CORE = 1, + TEST_CPUS = 2, + TEST_PLATFORM = 3, + TEST_DEVICES = 4, + TEST_FREEZER = 5, + __TEST_AFTER_LAST = 6, +}; + +struct taint_flag { + char c_true; + char c_false; + bool module; + const char *desc; +}; + +enum kernel_read_file_id { + READING_UNKNOWN = 0, + READING_FIRMWARE = 1, + READING_MODULE = 2, + READING_KEXEC_IMAGE = 3, + READING_KEXEC_INITRAMFS = 4, + READING_POLICY = 5, + READING_X509_CERTIFICATE = 6, + READING_MAX_ID = 7, +}; + +enum kernel_load_data_id { + LOADING_UNKNOWN = 0, + LOADING_FIRMWARE = 1, + LOADING_MODULE = 2, + LOADING_KEXEC_IMAGE = 3, + LOADING_KEXEC_INITRAMFS = 4, + LOADING_POLICY = 5, + LOADING_X509_CERTIFICATE = 6, + LOADING_MAX_ID = 7, +}; + +enum mod_license { + NOT_GPL_ONLY = 0, + GPL_ONLY = 1, +}; + +struct find_symbol_arg { + const char *name; + bool gplok; + bool warn; + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +enum fail_dup_mod_reason { + FAIL_DUP_MOD_BECOMING = 0, + FAIL_DUP_MOD_LOAD = 1, +}; + +struct mod_tree_root { + struct latch_tree_root root; + long unsigned int addr_min; + long unsigned int addr_max; +}; + +struct trace_event_raw_module_load { + struct trace_entry ent; + unsigned int taints; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_free { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_refcnt { + struct trace_entry ent; + long unsigned int ip; + int refcnt; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_request { + struct trace_entry ent; + long unsigned int ip; + bool wait; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_module_load { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_refcnt { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_request { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_module_load)(void *, struct module *); + +typedef void (*btf_trace_module_free)(void *, struct module *); + +typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int); + +struct symsearch { + const struct kernel_symbol *start; + const struct kernel_symbol *stop; + const s32 *crcs; + enum mod_license license; +}; + +struct mod_initfree { + struct llist_node node; + void *init_text; + void *init_data; + void *init_rodata; +}; + +struct idempotent { + const void *cookie; + struct hlist_node entry; + struct completion complete; + int ret; +}; + +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + __u64 nr_io_wait; +}; + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + long unsigned int magic; + bool new_sb_created; +}; + +enum { + CGRP_NOTIFY_ON_RELEASE = 0, + CGRP_CPUSET_CLONE_CHILDREN = 1, + CGRP_FREEZE = 2, + CGRP_FROZEN = 3, + CGRP_KILL = 4, +}; + +struct cgroup_taskset { + struct list_head src_csets; + struct list_head dst_csets; + int nr_tasks; + int ssid; + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +struct cgroup_of_peak { + long unsigned int value; + struct list_head list; +}; + +struct cgroup_fs_context { + struct kernfs_fs_context kfc; + struct cgroup_root *root; + struct cgroup_namespace *ns; + unsigned int flags; + bool cpuset_clone_children; + bool none; + bool all_ss; + u16 subsys_mask; + char *name; + char *release_agent; +}; + +enum cgroup_filetype { + CGROUP_FILE_PROCS = 0, + CGROUP_FILE_TASKS = 1, +}; + +struct cgroup_pidlist { + struct { + enum cgroup_filetype type; + struct pid_namespace *ns; + } key; + pid_t *list; + int length; + struct list_head links; + struct cgroup *owner; + struct delayed_work destroy_dwork; +}; + +struct cgroup_file_ctx { + struct cgroup_namespace *ns; + struct { + void *trigger; + } psi; + struct { + bool started; + struct css_task_iter iter; + } procs; + struct { + struct cgroup_pidlist *pidlist; + } procs1; + struct cgroup_of_peak peak; +}; + +struct cgrp_cset_link { + struct cgroup *cgrp; + struct css_set *cset; + struct list_head cset_link; + struct list_head cgrp_link; +}; + +struct cgroup_mgctx { + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + struct cgroup_taskset tset; + u16 ss_mask; +}; + +enum cgroup1_param { + Opt_all = 0, + Opt_clone_children = 1, + Opt_cpuset_v2_mode = 2, + Opt_name = 3, + Opt_none = 4, + Opt_noprefix = 5, + Opt_release_agent = 6, + Opt_xattr = 7, + Opt_favordynmods = 8, + Opt_nofavordynmods = 9, +}; + +enum { + CSD_FLAG_LOCK = 1, + IRQ_WORK_PENDING = 1, + IRQ_WORK_BUSY = 2, + IRQ_WORK_LAZY = 4, + IRQ_WORK_HARD_IRQ = 8, + IRQ_WORK_CLAIMED = 3, + CSD_TYPE_ASYNC = 0, + CSD_TYPE_SYNC = 16, + CSD_TYPE_IRQ_WORK = 32, + CSD_TYPE_TTWU = 48, + CSD_FLAG_TYPE_MASK = 240, +}; + +struct smp_hotplug_thread { + struct task_struct **store; + struct list_head list; + int (*thread_should_run)(unsigned int); + void (*thread_fn)(unsigned int); + void (*create)(unsigned int); + void (*setup)(unsigned int); + void (*cleanup)(unsigned int, bool); + void (*park)(unsigned int); + void (*unpark)(unsigned int); + bool selfparking; + const char *thread_comm; +}; + +struct pcpu_freelist_node; + +struct pcpu_freelist_head { + struct pcpu_freelist_node *first; + raw_spinlock_t lock; +}; + +struct pcpu_freelist_node { + struct pcpu_freelist_node *next; +}; + +struct pcpu_freelist { + struct pcpu_freelist_head *freelist; + struct pcpu_freelist_head extralist; +}; + +enum bpf_cond_pseudo_jmp { + BPF_MAY_GOTO = 0, +}; + +typedef void (*bpf_insn_print_t)(void *, const char *, ...); + +typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); + +typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + void *rw_image; + u32 image_off; + struct bpf_ksym ksym; +}; + +struct static_key_mod { + struct static_key_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +struct static_key_deferred { + struct static_key key; + long unsigned int timeout; + struct delayed_work work; +}; + +enum migratetype { + MIGRATE_UNMOVABLE = 0, + MIGRATE_MOVABLE = 1, + MIGRATE_RECLAIMABLE = 2, + MIGRATE_PCPTYPES = 3, + MIGRATE_HIGHATOMIC = 3, + MIGRATE_CMA = 4, + MIGRATE_ISOLATE = 5, + MIGRATE_TYPES = 6, +}; + +enum meminit_context { + MEMINIT_EARLY = 0, + MEMINIT_HOTPLUG = 1, +}; + +struct reciprocal_value { + u32 m; + u8 sh1; + u8 sh2; +}; + +struct kmem_cache_order_objects { + unsigned int x; +}; + +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; + +struct kmem_cache_cpu; + +struct kmem_cache_node; + +struct kmem_cache { + struct kmem_cache_cpu *cpu_slab; + slab_flags_t flags; + long unsigned int min_partial; + unsigned int size; + unsigned int object_size; + struct reciprocal_value reciprocal_size; + unsigned int offset; + unsigned int cpu_partial; + unsigned int cpu_partial_slabs; + struct kmem_cache_order_objects oo; + struct kmem_cache_order_objects min; + gfp_t allocflags; + int refcount; + void (*ctor)(void *); + unsigned int inuse; + unsigned int align; + unsigned int red_left_pad; + const char *name; + struct list_head list; + struct kobject kobj; + struct kasan_cache kasan_info; + struct kmem_cache_node *node[1]; +}; + +struct memblock_type { + long unsigned int cnt; + long unsigned int max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +enum mminit_level { + MMINIT_WARNING = 0, + MMINIT_VERIFY = 1, + MMINIT_TRACE = 2, +}; + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[0]; +}; + +enum { + PERCPU_REF_INIT_ATOMIC = 1, + PERCPU_REF_INIT_DEAD = 2, + PERCPU_REF_ALLOW_REINIT = 4, +}; + +typedef int filler_t(struct file *, struct folio *); + +union swap_header { + struct { + char reserved[4086]; + char magic[10]; + } magic; + struct { + char bootbits[1024]; + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + +struct swap_extent { + struct rb_node rb_node; + long unsigned int start_page; + long unsigned int nr_pages; + sector_t start_block; +}; + +typedef union { +} release_pages_arg; + +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + union { + void *userptr; + struct file *file; + void *data; + } u; + void (*splice_eof)(struct splice_desc *); + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + long unsigned int private; +}; + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); + +typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); + +enum iter_type { + ITER_UBUF = 0, + ITER_IOVEC = 1, + ITER_BVEC = 2, + ITER_KVEC = 3, + ITER_FOLIOQ = 4, + ITER_XARRAY = 5, + ITER_DISCARD = 6, +}; + +struct iomap_folio_ops; + +struct iomap { + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + struct block_device *bdev; + struct dax_device *dax_dev; + void *inline_data; + void *private; + const struct iomap_folio_ops *folio_ops; + u64 validity_cookie; +}; + +struct iomap_iter; + +struct iomap_folio_ops { + struct folio * (*get_folio)(struct iomap_iter *, loff_t, unsigned int); + void (*put_folio)(struct inode *, loff_t, unsigned int, struct folio *); + bool (*iomap_valid)(struct inode *, const struct iomap *); +}; + +struct iomap_iter { + struct inode *inode; + loff_t pos; + u64 len; + s64 processed; + unsigned int flags; + struct iomap iomap; + struct iomap srcmap; + void *private; +}; + +struct iomap_ops { + int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, struct iomap *); + int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *); +}; + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); + void (*submit_io)(const struct iomap_iter *, struct bio *, loff_t); + struct bio_set *bio_set; +}; + +struct iomap_dio { + struct kiocb *iocb; + const struct iomap_dio_ops *dops; + loff_t i_size; + loff_t size; + atomic_t ref; + unsigned int flags; + int error; + size_t done_before; + bool wait_for_completion; + union { + struct { + struct iov_iter *iter; + struct task_struct *waiter; + } submit; + struct { + struct work_struct work; + } aio; + }; +}; + +struct kernfs_global_locks { + struct mutex open_file_mutex[1024]; +}; + +struct kernfs_open_node { + struct callback_head callback_head; + atomic_t event; + wait_queue_head_t poll; + struct list_head files; + unsigned int nr_mmapped; + unsigned int nr_to_release; +}; + +typedef int (*writepage_t)(struct folio *, struct writeback_control *, void *); + +struct dax_holder_operations { + int (*notify_failure)(struct dax_device *, u64, u64, int); +}; + +struct fs_error_report { + int error; + struct inode *inode; + struct super_block *sb; +}; + +enum criteria { + CR_POWER2_ALIGNED = 0, + CR_GOAL_LEN_FAST = 1, + CR_BEST_AVAIL_LEN = 2, + CR_GOAL_LEN_SLOW = 3, + CR_ANY_FREE = 4, + EXT4_MB_NUM_CRS = 5, +}; + +struct ext4_allocation_request { + struct inode *inode; + unsigned int len; + ext4_lblk_t logical; + ext4_lblk_t lleft; + ext4_lblk_t lright; + ext4_fsblk_t goal; + ext4_fsblk_t pleft; + ext4_fsblk_t pright; + unsigned int flags; +}; + +enum { + ES_WRITTEN_B = 0, + ES_UNWRITTEN_B = 1, + ES_DELAYED_B = 2, + ES_HOLE_B = 3, + ES_REFERENCED_B = 4, + ES_FLAGS = 5, +}; + +struct ext4_locality_group { + struct mutex lg_mutex; + struct list_head lg_prealloc_list[10]; + spinlock_t lg_prealloc_lock; +}; + +enum { + EXT4_MF_MNTDIR_SAMPLED = 0, + EXT4_MF_FC_INELIGIBLE = 1, +}; + +struct ext4_lazy_init { + long unsigned int li_state; + struct list_head li_request_list; + struct mutex li_list_mtx; +}; + +struct partial_cluster { + ext4_fsblk_t pclu; + ext4_lblk_t lblk; + enum { + initial = 0, + tofree = 1, + nofree = 2, + } state; +}; + +struct ext4_journal_cb_entry { + struct list_head jce_list; + void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); +}; + +struct ext4_prealloc_space { + union { + struct rb_node inode_node; + struct list_head lg_list; + } pa_node; + struct list_head pa_group_list; + union { + struct list_head pa_tmp_list; + struct callback_head pa_rcu; + } u; + spinlock_t pa_lock; + atomic_t pa_count; + unsigned int pa_deleted; + ext4_fsblk_t pa_pstart; + ext4_lblk_t pa_lstart; + ext4_grpblk_t pa_len; + ext4_grpblk_t pa_free; + short unsigned int pa_type; + union { + rwlock_t *inode_lock; + spinlock_t *lg_lock; + } pa_node_lock; + struct inode *pa_inode; +}; + +struct ext4_free_extent { + ext4_lblk_t fe_logical; + ext4_grpblk_t fe_start; + ext4_group_t fe_group; + ext4_grpblk_t fe_len; +}; + +struct ext4_allocation_context { + struct inode *ac_inode; + struct super_block *ac_sb; + struct ext4_free_extent ac_o_ex; + struct ext4_free_extent ac_g_ex; + struct ext4_free_extent ac_b_ex; + struct ext4_free_extent ac_f_ex; + ext4_grpblk_t ac_orig_goal_len; + __u32 ac_flags; + __u32 ac_groups_linear_remaining; + __u16 ac_groups_scanned; + __u16 ac_found; + __u16 ac_cX_found[5]; + __u16 ac_tail; + __u16 ac_buddy; + __u8 ac_status; + __u8 ac_criteria; + __u8 ac_2order; + __u8 ac_op; + struct folio *ac_bitmap_folio; + struct folio *ac_buddy_folio; + struct ext4_prealloc_space *ac_pa; + struct ext4_locality_group *ac_lg; +}; + +struct ext4_fsmap { + struct list_head fmr_list; + dev_t fmr_device; + uint32_t fmr_flags; + uint64_t fmr_physical; + uint64_t fmr_owner; + uint64_t fmr_length; +}; + +struct trace_event_raw_ext4_other_inode_update_time { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t orig_ino; + uid_t uid; + gid_t gid; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + uid_t uid; + gid_t gid; + __u64 blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_inode { + struct trace_entry ent; + dev_t dev; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_evict_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int nlink; + char __data[0]; +}; + +struct trace_event_raw_ext4_drop_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int drop; + char __data[0]; +}; + +struct trace_event_raw_ext4_nfs_commit_metadata { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_mark_inode_dirty { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_ext4_begin_ordered_truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t new_size; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_end { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int copied; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + long unsigned int writeback_index; + int sync_mode; + char for_kupdate; + char range_cyclic; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int first_page; + long int nr_to_write; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 lblk; + __u32 len; + __u32 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages_result { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + int pages_written; + long int pages_skipped; + long unsigned int writeback_index; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_ext4_invalidate_folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + size_t offset; + size_t length; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_blocks { + struct trace_entry ent; + dev_t dev; + __u64 blk; + __u64 count; + char __data[0]; +}; + +struct trace_event_raw_ext4__mb_new_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 pa_pstart; + __u64 pa_lstart; + __u32 pa_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_release_inode_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + __u32 count; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_release_group_pa { + struct trace_entry ent; + dev_t dev; + __u64 pa_pstart; + __u32 pa_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_preallocations { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_discard_preallocations { + struct trace_entry ent; + dev_t dev; + int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + long unsigned int count; + int flags; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + int datasync; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_fs { + struct trace_entry ent; + dev_t dev; + int wait; + char __data[0]; +}; + +struct trace_event_raw_ext4_alloc_da_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int data_blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_alloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 goal_logical; + int goal_start; + __u32 goal_group; + int goal_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + __u16 found; + __u16 groups; + __u16 buddy; + __u16 flags; + __u16 tail; + __u8 cr; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_prealloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4__mballoc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_forget { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + int is_metadata; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_update_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int used_blocks; + int reserved_data_blocks; + int quota_claim; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int reserve_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_release_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int freed_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_read_block_bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + bool prefetch; + char __data[0]; +}; + +struct trace_event_raw_ext4__fallocate_mode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + int mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_fallocate_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int blocks; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + loff_t size; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4__truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + ext4_lblk_t i_lblk; + unsigned int i_len; + ext4_fsblk_t i_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int flags; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + unsigned int len; + unsigned int mflags; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_load_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_load_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_sb { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_inode { + struct trace_entry ent; + long unsigned int ino; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_reserved { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4__trim { + struct trace_entry ent; + int dev_major; + int dev_minor; + __u32 group; + int start; + int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_handle_unwritten_extents { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + unsigned int allocated; + ext4_fsblk_t newblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_show_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + short unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_remove_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t from; + ext4_lblk_t to; + ext4_fsblk_t ee_pblk; + ext4_lblk_t ee_lblk; + short unsigned int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_leaf { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t ee_lblk; + ext4_fsblk_t ee_pblk; + short int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_idx { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space_done { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + short unsigned int eh_entries; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_remove_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t lblk; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + int found; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_shrink_enter { + struct trace_entry ent; + dev_t dev; + int nr_to_scan; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_collapse_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_insert_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + long long unsigned int scan_time; + int nr_skipped; + int retried; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_insert_delayed_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + bool lclu_allocated; + bool end_allocated; + char __data[0]; +}; + +struct trace_event_raw_ext4_fsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u32 agno; + u64 bno; + u64 len; + u64 owner; + char __data[0]; +}; + +struct trace_event_raw_ext4_getfsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u64 block; + u64 len; + u64 owner; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_shutdown { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_error { + struct trace_entry ent; + dev_t dev; + const char *function; + unsigned int line; + char __data[0]; +}; + +struct trace_event_raw_ext4_prefetch_bitmaps { + struct trace_entry ent; + dev_t dev; + __u32 group; + __u32 next; + __u32 ios; + char __data[0]; +}; + +struct trace_event_raw_ext4_lazy_itable_init { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay_scan { + struct trace_entry ent; + dev_t dev; + int error; + int off; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay { + struct trace_entry ent; + dev_t dev; + int tag; + int ino; + int priv1; + int priv2; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_start { + struct trace_entry ent; + dev_t dev; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_stop { + struct trace_entry ent; + dev_t dev; + int nblks; + int reason; + int num_fc; + int num_fc_ineligible; + int nblks_agg; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_stats { + struct trace_entry ent; + dev_t dev; + unsigned int fc_ineligible_rc[10]; + long unsigned int fc_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_numblks; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_dentry { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_inode { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_range { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + long int start; + long int end; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_cleanup { + struct trace_entry ent; + dev_t dev; + int j_fc_off; + int full; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_update_sb { + struct trace_entry ent; + dev_t dev; + ext4_fsblk_t fsblk; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_ext4_other_inode_update_time {}; + +struct trace_event_data_offsets_ext4_free_inode {}; + +struct trace_event_data_offsets_ext4_request_inode {}; + +struct trace_event_data_offsets_ext4_allocate_inode {}; + +struct trace_event_data_offsets_ext4_evict_inode {}; + +struct trace_event_data_offsets_ext4_drop_inode {}; + +struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; + +struct trace_event_data_offsets_ext4_mark_inode_dirty {}; + +struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; + +struct trace_event_data_offsets_ext4__write_begin {}; + +struct trace_event_data_offsets_ext4__write_end {}; + +struct trace_event_data_offsets_ext4_writepages {}; + +struct trace_event_data_offsets_ext4_da_write_pages {}; + +struct trace_event_data_offsets_ext4_da_write_pages_extent {}; + +struct trace_event_data_offsets_ext4_writepages_result {}; + +struct trace_event_data_offsets_ext4__folio_op {}; + +struct trace_event_data_offsets_ext4_invalidate_folio_op {}; + +struct trace_event_data_offsets_ext4_discard_blocks {}; + +struct trace_event_data_offsets_ext4__mb_new_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_group_pa {}; + +struct trace_event_data_offsets_ext4_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_request_blocks {}; + +struct trace_event_data_offsets_ext4_allocate_blocks {}; + +struct trace_event_data_offsets_ext4_free_blocks {}; + +struct trace_event_data_offsets_ext4_sync_file_enter {}; + +struct trace_event_data_offsets_ext4_sync_file_exit {}; + +struct trace_event_data_offsets_ext4_sync_fs {}; + +struct trace_event_data_offsets_ext4_alloc_da_blocks {}; + +struct trace_event_data_offsets_ext4_mballoc_alloc {}; + +struct trace_event_data_offsets_ext4_mballoc_prealloc {}; + +struct trace_event_data_offsets_ext4__mballoc {}; + +struct trace_event_data_offsets_ext4_forget {}; + +struct trace_event_data_offsets_ext4_da_update_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_release_space {}; + +struct trace_event_data_offsets_ext4__bitmap_load {}; + +struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; + +struct trace_event_data_offsets_ext4__fallocate_mode {}; + +struct trace_event_data_offsets_ext4_fallocate_exit {}; + +struct trace_event_data_offsets_ext4_unlink_enter {}; + +struct trace_event_data_offsets_ext4_unlink_exit {}; + +struct trace_event_data_offsets_ext4__truncate {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; + +struct trace_event_data_offsets_ext4__map_blocks_enter {}; + +struct trace_event_data_offsets_ext4__map_blocks_exit {}; + +struct trace_event_data_offsets_ext4_ext_load_extent {}; + +struct trace_event_data_offsets_ext4_load_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_sb {}; + +struct trace_event_data_offsets_ext4_journal_start_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_reserved {}; + +struct trace_event_data_offsets_ext4__trim {}; + +struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; + +struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; + +struct trace_event_data_offsets_ext4_ext_show_extent {}; + +struct trace_event_data_offsets_ext4_remove_blocks {}; + +struct trace_event_data_offsets_ext4_ext_rm_leaf {}; + +struct trace_event_data_offsets_ext4_ext_rm_idx {}; + +struct trace_event_data_offsets_ext4_ext_remove_space {}; + +struct trace_event_data_offsets_ext4_ext_remove_space_done {}; + +struct trace_event_data_offsets_ext4__es_extent {}; + +struct trace_event_data_offsets_ext4_es_remove_extent {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; + +struct trace_event_data_offsets_ext4__es_shrink_enter {}; + +struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; + +struct trace_event_data_offsets_ext4_collapse_range {}; + +struct trace_event_data_offsets_ext4_insert_range {}; + +struct trace_event_data_offsets_ext4_es_shrink {}; + +struct trace_event_data_offsets_ext4_es_insert_delayed_extent {}; + +struct trace_event_data_offsets_ext4_fsmap_class {}; + +struct trace_event_data_offsets_ext4_getfsmap_class {}; + +struct trace_event_data_offsets_ext4_shutdown {}; + +struct trace_event_data_offsets_ext4_error {}; + +struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; + +struct trace_event_data_offsets_ext4_lazy_itable_init {}; + +struct trace_event_data_offsets_ext4_fc_replay_scan {}; + +struct trace_event_data_offsets_ext4_fc_replay {}; + +struct trace_event_data_offsets_ext4_fc_commit_start {}; + +struct trace_event_data_offsets_ext4_fc_commit_stop {}; + +struct trace_event_data_offsets_ext4_fc_stats {}; + +struct trace_event_data_offsets_ext4_fc_track_dentry {}; + +struct trace_event_data_offsets_ext4_fc_track_inode {}; + +struct trace_event_data_offsets_ext4_fc_track_range {}; + +struct trace_event_data_offsets_ext4_fc_cleanup {}; + +struct trace_event_data_offsets_ext4_update_sb {}; + +typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); + +typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); + +typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int); + +typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); + +typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); + +typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); + +typedef void (*btf_trace_ext4_read_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_release_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_journalled_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int); + +typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int); + +typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); + +typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int); + +typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int); + +typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); + +typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); + +typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); + +typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int, bool); + +typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); + +typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); + +typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); + +typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); + +typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_sb)(void *, struct super_block *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_inode)(void *, struct inode *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int); + +typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int); + +typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); + +typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); + +typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); + +typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); + +typedef void (*btf_trace_ext4_es_insert_delayed_extent)(void *, struct inode *, struct extent_status *, bool, bool); + +typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); + +typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); + +typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); + +typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); + +typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *, tid_t); + +typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int, tid_t); + +typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_track_create)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_link)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_unlink)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_inode)(void *, handle_t *, struct inode *, int); + +typedef void (*btf_trace_ext4_fc_track_range)(void *, handle_t *, struct inode *, long int, long int, int); + +typedef void (*btf_trace_ext4_fc_cleanup)(void *, journal_t *, int, tid_t); + +typedef void (*btf_trace_ext4_update_sb)(void *, struct super_block *, ext4_fsblk_t, unsigned int); + +struct ext4_err_translation { + int code; + int errno; +}; + +enum { + Opt_bsd_df = 0, + Opt_minix_df = 1, + Opt_grpid = 2, + Opt_nogrpid = 3, + Opt_resgid = 4, + Opt_resuid = 5, + Opt_sb = 6, + Opt_nouid32 = 7, + Opt_debug = 8, + Opt_removed = 9, + Opt_user_xattr = 10, + Opt_acl___2 = 11, + Opt_auto_da_alloc = 12, + Opt_noauto_da_alloc = 13, + Opt_noload = 14, + Opt_commit = 15, + Opt_min_batch_time = 16, + Opt_max_batch_time = 17, + Opt_journal_dev = 18, + Opt_journal_path = 19, + Opt_journal_checksum = 20, + Opt_journal_async_commit = 21, + Opt_abort = 22, + Opt_data_journal = 23, + Opt_data_ordered = 24, + Opt_data_writeback = 25, + Opt_data_err_abort = 26, + Opt_data_err_ignore = 27, + Opt_test_dummy_encryption = 28, + Opt_inlinecrypt = 29, + Opt_usrjquota = 30, + Opt_grpjquota = 31, + Opt_quota = 32, + Opt_noquota = 33, + Opt_barrier___2 = 34, + Opt_nobarrier = 35, + Opt_err___2 = 36, + Opt_usrquota = 37, + Opt_grpquota = 38, + Opt_prjquota = 39, + Opt_dax = 40, + Opt_dax_always = 41, + Opt_dax_inode = 42, + Opt_dax_never = 43, + Opt_stripe = 44, + Opt_delalloc = 45, + Opt_nodelalloc = 46, + Opt_warn_on_error = 47, + Opt_nowarn_on_error = 48, + Opt_mblk_io_submit = 49, + Opt_debug_want_extra_isize = 50, + Opt_nomblk_io_submit = 51, + Opt_block_validity = 52, + Opt_noblock_validity = 53, + Opt_inode_readahead_blks = 54, + Opt_journal_ioprio = 55, + Opt_dioread_nolock = 56, + Opt_dioread_lock = 57, + Opt_discard___2 = 58, + Opt_nodiscard = 59, + Opt_init_itable = 60, + Opt_noinit_itable = 61, + Opt_max_dir_size_kb = 62, + Opt_nojournal_checksum = 63, + Opt_nombcache = 64, + Opt_no_prefetch_block_bitmaps = 65, + Opt_mb_optimize_scan = 66, + Opt_errors = 67, + Opt_data = 68, + Opt_data_err = 69, + Opt_jqfmt = 70, + Opt_dax_type = 71, +}; + +struct mount_opts { + int token; + int mount_opt; + int flags; +}; + +struct ext4_fs_context { + char *s_qf_names[3]; + struct fscrypt_dummy_policy dummy_enc_policy; + int s_jquota_fmt; + short unsigned int qname_spec; + long unsigned int vals_s_flags; + long unsigned int mask_s_flags; + long unsigned int journal_devnum; + long unsigned int s_commit_interval; + long unsigned int s_stripe; + unsigned int s_inode_readahead_blks; + unsigned int s_want_extra_isize; + unsigned int s_li_wait_mult; + unsigned int s_max_dir_size_kb; + unsigned int journal_ioprio; + unsigned int vals_s_mount_opt; + unsigned int mask_s_mount_opt; + unsigned int vals_s_mount_opt2; + unsigned int mask_s_mount_opt2; + unsigned int opt_flags; + unsigned int spec; + u32 s_max_batch_time; + u32 s_min_batch_time; + kuid_t s_resuid; + kgid_t s_resgid; + ext4_fsblk_t s_sb_block; +}; + +struct ext4_mount_options { + long unsigned int s_mount_opt; + long unsigned int s_mount_opt2; + kuid_t s_resuid; + kgid_t s_resgid; + long unsigned int s_commit_interval; + u32 s_min_batch_time; + u32 s_max_batch_time; +}; + +struct comp_alg_common { + struct crypto_alg base; +}; + +struct crypto_scomp { + struct crypto_tfm base; +}; + +struct scomp_alg { + void * (*alloc_ctx)(struct crypto_scomp *); + void (*free_ctx)(struct crypto_scomp *, void *); + int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +struct blkg_conf_ctx { + char *input; + char *body; + struct block_device *bdev; + struct blkcg_gq *blkg; +}; + +struct blk_iolatency { + struct rq_qos rqos; + struct timer_list timer; + bool enabled; + atomic_t enable_cnt; + struct work_struct enable_work; +}; + +struct iolatency_grp; + +struct child_latency_info { + spinlock_t lock; + u64 last_scale_event; + u64 scale_lat; + u64 nr_samples; + struct iolatency_grp *scale_grp; + atomic_t scale_cookie; +}; + +struct percentile_stats { + u64 total; + u64 missed; +}; + +struct latency_stat { + union { + struct percentile_stats ps; + struct blk_rq_stat rqs; + }; +}; + +struct iolatency_grp { + struct blkg_policy_data pd; + struct latency_stat *stats; + struct latency_stat cur_stat; + struct blk_iolatency *blkiolat; + unsigned int max_depth; + struct rq_wait rq_wait; + atomic64_t window_start; + atomic_t scale_cookie; + u64 min_lat_nsec; + u64 cur_win_nsec; + u64 lat_avg; + u64 nr_samples; + bool ssd; + struct child_latency_info child_lat; +}; + +enum io_uring_napi_tracking_strategy { + IO_URING_NAPI_TRACKING_DYNAMIC = 0, + IO_URING_NAPI_TRACKING_STATIC = 1, + IO_URING_NAPI_TRACKING_INACTIVE = 255, +}; + +typedef __int128 unsigned __u128; + +typedef __u128 u128; + +struct reciprocal_value_adv { + u32 m; + u8 sh; + u8 exp; + bool is_wide_m; +}; + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[64]; + unsigned int buflen; + unsigned int outlen; +}; + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + mpi_size_t tspace_size; + mpi_ptr_t tp; + mpi_size_t tp_size; +}; + +typedef struct { + FSE_CTable CTable[59]; + U32 scratchBuffer[41]; + unsigned int count[13]; + S16 norm[13]; +} HUF_CompressWeightsWksp; + +typedef struct { + HUF_CompressWeightsWksp wksp; + BYTE bitsToWeight[13]; + BYTE huffWeight[255]; +} HUF_WriteCTableWksp; + +struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +}; + +typedef struct nodeElt_s nodeElt; + +typedef struct { + U16 base; + U16 curr; +} rankPos; + +typedef nodeElt huffNodeTable[512]; + +typedef struct { + huffNodeTable huffNodeTbl; + rankPos rankPosition[192]; +} HUF_buildCTable_wksp_tables; + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + BYTE *startPtr; + BYTE *ptr; + BYTE *endPtr; +} HUF_CStream_t; + +typedef enum { + HUF_singleStream = 0, + HUF_fourStreams = 1, +} HUF_nbStreams_e; + +typedef struct { + unsigned int count[256]; + HUF_CElt CTable[257]; + union { + HUF_buildCTable_wksp_tables buildCTable_wksp; + HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[1024]; + } wksps; +} HUF_compress_tables_t; + +typedef resource_size_t (*resource_alignf)(void *, const struct resource *, resource_size_t, resource_size_t); + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; + struct pci_ops *ops; + struct pci_ops *child_ops; + void *sysdata; + int busnr; + int domain_nr; + struct list_head windows; + struct list_head dma_ranges; + u8 (*swizzle_irq)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + unsigned int ignore_reset_delay: 1; + unsigned int no_ext_tags: 1; + unsigned int no_inc_mrrs: 1; + unsigned int native_aer: 1; + unsigned int native_pcie_hotplug: 1; + unsigned int native_shpc_hotplug: 1; + unsigned int native_pme: 1; + unsigned int native_ltr: 1; + unsigned int native_dpc: 1; + unsigned int native_cxl_error: 1; + unsigned int preserve_config: 1; + unsigned int size_windows: 1; + unsigned int msi_domain: 1; + resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int private[0]; +}; + +enum bus_notifier_event { + BUS_NOTIFY_ADD_DEVICE = 0, + BUS_NOTIFY_DEL_DEVICE = 1, + BUS_NOTIFY_REMOVED_DEVICE = 2, + BUS_NOTIFY_BIND_DRIVER = 3, + BUS_NOTIFY_BOUND_DRIVER = 4, + BUS_NOTIFY_UNBIND_DRIVER = 5, + BUS_NOTIFY_UNBOUND_DRIVER = 6, + BUS_NOTIFY_DRIVER_NOT_BOUND = 7, +}; + +struct vga_device { + struct list_head list; + struct pci_dev *pdev; + unsigned int decodes; + unsigned int owns; + unsigned int locks; + unsigned int io_lock_cnt; + unsigned int mem_lock_cnt; + unsigned int io_norm_cnt; + unsigned int mem_norm_cnt; + bool bridge_has_one_vga; + bool is_firmware_default; + unsigned int (*set_decode)(struct pci_dev *, bool); +}; + +struct vga_arb_user_card { + struct pci_dev *pdev; + unsigned int mem_cnt; + unsigned int io_cnt; +}; + +struct vga_arb_private { + struct list_head list; + struct pci_dev *target; + struct vga_arb_user_card cards[16]; + spinlock_t lock; +}; + +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +enum translation_map { + LAT1_MAP = 0, + GRAF_MAP = 1, + IBMPC_MAP = 2, + USER_MAP = 3, + FIRST_MAP = 0, + LAST_MAP = 3, +}; + +struct uni_pagedict { + u16 **uni_pgdir[32]; + long unsigned int refcount; + long unsigned int sum; + unsigned char *inverse_translations[4]; + u16 *inverse_trans_unicode; +}; + +struct msdos_partition { + u8 boot_ind; + u8 head; + u8 sector; + u8 cyl; + u8 sys_ind; + u8 end_head; + u8 end_sector; + u8 end_cyl; + __le32 start_sect; + __le32 nr_sects; +}; + +enum scsi_disposition { + NEEDS_RETRY = 8193, + SUCCESS = 8194, + FAILED = 8195, + QUEUED = 8196, + SOFT_ERROR = 8197, + ADD_TO_MLQUEUE = 8198, + TIMEOUT_ERROR = 8199, + SCSI_RETURN_NOT_HANDLED = 8200, + FAST_IO_FAIL = 8201, +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED = 0, + ATA_DITER_ENABLED_REVERSE = 1, + ATA_DITER_ALL = 2, + ATA_DITER_ALL_REVERSE = 3, +}; + +enum { + ATA_READID_POSTRESET = 1, + ATA_DNXFER_PIO = 0, + ATA_DNXFER_DMA = 1, + ATA_DNXFER_40C = 2, + ATA_DNXFER_FORCE_PIO = 3, + ATA_DNXFER_FORCE_PIO0 = 4, + ATA_DNXFER_QUIET = -2147483648, +}; + +enum { + ATA_EH_SPDN_NCQ_OFF = 1, + ATA_EH_SPDN_SPEED_DOWN = 2, + ATA_EH_SPDN_FALLBACK_TO_PIO = 4, + ATA_EH_SPDN_KEEP_ERRORS = 8, + ATA_EFLAG_IS_IO = 1, + ATA_EFLAG_DUBIOUS_XFER = 2, + ATA_EFLAG_OLD_ER = -2147483648, + ATA_ECAT_NONE = 0, + ATA_ECAT_ATA_BUS = 1, + ATA_ECAT_TOUT_HSM = 2, + ATA_ECAT_UNK_DEV = 3, + ATA_ECAT_DUBIOUS_NONE = 4, + ATA_ECAT_DUBIOUS_ATA_BUS = 5, + ATA_ECAT_DUBIOUS_TOUT_HSM = 6, + ATA_ECAT_DUBIOUS_UNK_DEV = 7, + ATA_ECAT_NR = 8, + ATA_EH_CMD_DFL_TIMEOUT = 5000, + ATA_EH_RESET_COOL_DOWN = 5000, + ATA_EH_PRERESET_TIMEOUT = 10000, + ATA_EH_FASTDRAIN_INTERVAL = 3000, + ATA_EH_UA_TRIES = 5, + ATA_EH_PROBE_TRIAL_INTERVAL = 60000, + ATA_EH_PROBE_TRIALS = 2, +}; + +struct ata_eh_cmd_timeout_ent { + const u8 *commands; + const unsigned int *timeouts; +}; + +struct speed_down_verdict_arg { + u64 since; + int xfer_ok; + int nr_errors[8]; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +enum usb_phy_type { + USB_PHY_TYPE_UNDEFINED = 0, + USB_PHY_TYPE_USB2 = 1, + USB_PHY_TYPE_USB3 = 2, +}; + +enum usb_phy_events { + USB_EVENT_NONE = 0, + USB_EVENT_VBUS = 1, + USB_EVENT_ID = 2, + USB_EVENT_CHARGER = 3, + USB_EVENT_ENUMERATED = 4, +}; + +struct extcon_dev; + +enum usb_charger_type { + UNKNOWN_TYPE = 0, + SDP_TYPE = 1, + DCP_TYPE = 2, + CDP_TYPE = 3, + ACA_TYPE = 4, +}; + +enum usb_charger_state { + USB_CHARGER_DEFAULT = 0, + USB_CHARGER_PRESENT = 1, + USB_CHARGER_ABSENT = 2, +}; + +struct usb_charger_current { + unsigned int sdp_min; + unsigned int sdp_max; + unsigned int dcp_min; + unsigned int dcp_max; + unsigned int cdp_min; + unsigned int cdp_max; + unsigned int aca_min; + unsigned int aca_max; +}; + +struct usb_otg; + +struct usb_phy_io_ops; + +struct usb_phy { + struct device *dev; + const char *label; + unsigned int flags; + enum usb_phy_type type; + enum usb_phy_events last_event; + struct usb_otg *otg; + struct device *io_dev; + struct usb_phy_io_ops *io_ops; + void *io_priv; + struct extcon_dev *edev; + struct extcon_dev *id_edev; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct notifier_block type_nb; + enum usb_charger_type chg_type; + enum usb_charger_state chg_state; + struct usb_charger_current chg_cur; + struct work_struct chg_work; + struct atomic_notifier_head notifier; + u16 port_status; + u16 port_change; + struct list_head head; + int (*init)(struct usb_phy *); + void (*shutdown)(struct usb_phy *); + int (*set_vbus)(struct usb_phy *, int); + int (*set_power)(struct usb_phy *, unsigned int); + int (*set_suspend)(struct usb_phy *, int); + int (*set_wakeup)(struct usb_phy *, bool); + int (*notify_connect)(struct usb_phy *, enum usb_device_speed); + int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed); + enum usb_charger_type (*charger_detect)(struct usb_phy *); +}; + +struct usb_phy_io_ops { + int (*read)(struct usb_phy *, u32); + int (*write)(struct usb_phy *, u32, u32); +}; + +struct usb_gadget; + +struct usb_otg { + u8 default_a; + struct phy *phy; + struct usb_phy *usb_phy; + struct usb_bus *host; + struct usb_gadget *gadget; + enum usb_otg_state state; + int (*set_host)(struct usb_otg *, struct usb_bus *); + int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); + int (*set_vbus)(struct usb_otg *, bool); + int (*start_srp)(struct usb_otg *); + int (*start_hnp)(struct usb_otg *); +}; + +struct platform_driver { + int (*probe)(struct platform_device *); + union { + void (*remove)(struct platform_device *); + void (*remove_new)(struct platform_device *); + }; + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; + bool driver_managed_dma; +}; + +struct ehci_stats { + long unsigned int normal; + long unsigned int error; + long unsigned int iaa; + long unsigned int lost_iaa; + long unsigned int complete; + long unsigned int unlink; +}; + +struct ehci_per_sched { + struct usb_device *udev; + struct usb_host_endpoint *ep; + struct list_head ps_list; + u16 tt_usecs; + u16 cs_mask; + u16 period; + u16 phase; + u8 bw_phase; + u8 phase_uf; + u8 usecs; + u8 c_usecs; + u8 bw_uperiod; + u8 bw_period; +}; + +enum ehci_rh_state { + EHCI_RH_HALTED = 0, + EHCI_RH_SUSPENDED = 1, + EHCI_RH_RUNNING = 2, + EHCI_RH_STOPPING = 3, +}; + +enum ehci_hrtimer_event { + EHCI_HRTIMER_POLL_ASS = 0, + EHCI_HRTIMER_POLL_PSS = 1, + EHCI_HRTIMER_POLL_DEAD = 2, + EHCI_HRTIMER_UNLINK_INTR = 3, + EHCI_HRTIMER_FREE_ITDS = 4, + EHCI_HRTIMER_ACTIVE_UNLINK = 5, + EHCI_HRTIMER_START_UNLINK_INTR = 6, + EHCI_HRTIMER_ASYNC_UNLINKS = 7, + EHCI_HRTIMER_IAA_WATCHDOG = 8, + EHCI_HRTIMER_DISABLE_PERIODIC = 9, + EHCI_HRTIMER_DISABLE_ASYNC = 10, + EHCI_HRTIMER_IO_WATCHDOG = 11, + EHCI_HRTIMER_NUM_EVENTS = 12, +}; + +struct ehci_caps; + +struct ehci_regs; + +struct ehci_dbg_port; + +struct ehci_qh; + +union ehci_shadow; + +struct ehci_itd; + +struct ehci_sitd; + +struct ehci_hcd { + enum ehci_hrtimer_event next_hrtimer_event; + unsigned int enabled_hrtimer_events; + ktime_t hr_timeouts[12]; + struct hrtimer hrtimer; + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + struct ehci_caps *caps; + struct ehci_regs *regs; + struct ehci_dbg_port *debug; + __u32 hcs_params; + spinlock_t lock; + enum ehci_rh_state rh_state; + bool scanning: 1; + bool need_rescan: 1; + bool intr_unlinking: 1; + bool iaa_in_progress: 1; + bool async_unlinking: 1; + bool shutdown: 1; + struct ehci_qh *qh_scan_next; + struct ehci_qh *async; + struct ehci_qh *dummy; + struct list_head async_unlink; + struct list_head async_idle; + unsigned int async_unlink_cycle; + unsigned int async_count; + __le32 old_current; + __le32 old_token; + unsigned int periodic_size; + __le32 *periodic; + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned int i_thresh; + union ehci_shadow *pshadow; + struct list_head intr_unlink_wait; + struct list_head intr_unlink; + unsigned int intr_unlink_wait_cycle; + unsigned int intr_unlink_cycle; + unsigned int now_frame; + unsigned int last_iso_frame; + unsigned int intr_count; + unsigned int isoc_count; + unsigned int periodic_count; + unsigned int uframe_periodic_max; + struct list_head cached_itd_list; + struct ehci_itd *last_itd_to_free; + struct list_head cached_sitd_list; + struct ehci_sitd *last_sitd_to_free; + long unsigned int reset_done[15]; + long unsigned int bus_suspended; + long unsigned int companion_ports; + long unsigned int owned_ports; + long unsigned int port_c_suspend; + long unsigned int suspended_ports; + long unsigned int resuming_ports; + struct dma_pool *qh_pool; + struct dma_pool *qtd_pool; + struct dma_pool *itd_pool; + struct dma_pool *sitd_pool; + unsigned int random_frame; + long unsigned int next_statechange; + ktime_t last_periodic_enable; + u32 command; + unsigned int no_selective_suspend: 1; + unsigned int has_fsl_port_bug: 1; + unsigned int has_fsl_hs_errata: 1; + unsigned int has_fsl_susp_errata: 1; + unsigned int has_ci_pec_bug: 1; + unsigned int big_endian_mmio: 1; + unsigned int big_endian_desc: 1; + unsigned int big_endian_capbase: 1; + unsigned int has_amcc_usb23: 1; + unsigned int need_io_watchdog: 1; + unsigned int amd_pll_fix: 1; + unsigned int use_dummy_qh: 1; + unsigned int has_synopsys_hc_bug: 1; + unsigned int frame_index_bug: 1; + unsigned int need_oc_pp_cycle: 1; + unsigned int imx28_write_fix: 1; + unsigned int spurious_oc: 1; + unsigned int is_aspeed: 1; + unsigned int zx_wakeup_clear_needed: 1; + __le32 *ohci_hcctrl_reg; + unsigned int has_hostpc: 1; + unsigned int has_tdi_phy_lpm: 1; + unsigned int has_ppcd: 1; + u8 sbrn; + struct ehci_stats stats; + struct dentry *debug_dir; + u8 bandwidth[64]; + u8 tt_budget[64]; + struct list_head tt_list; + long unsigned int priv[0]; +}; + +struct ehci_caps { + u32 hc_capbase; + u32 hcs_params; + u32 hcc_params; + u8 portroute[8]; +}; + +struct ehci_regs { + u32 command; + u32 status; + u32 intr_enable; + u32 frame_index; + u32 segment; + u32 frame_list; + u32 async_next; + u32 reserved1[2]; + u32 txfill_tuning; + u32 reserved2[6]; + u32 configured_flag; + union { + u32 port_status[15]; + struct { + u32 reserved3[9]; + u32 usbmode; + }; + }; + union { + struct { + u32 reserved4; + u32 hostpc[15]; + }; + u32 brcm_insnreg[4]; + }; + u32 reserved5[2]; + u32 usbmode_ex; +}; + +struct ehci_dbg_port { + u32 control; + u32 pids; + u32 data03; + u32 data47; + u32 address; +}; + +struct ehci_fstn; + +union ehci_shadow { + struct ehci_qh *qh; + struct ehci_itd *itd; + struct ehci_sitd *sitd; + struct ehci_fstn *fstn; + __le32 *hw_next; + void *ptr; +}; + +struct ehci_qh_hw; + +struct ehci_qtd; + +struct ehci_qh { + struct ehci_qh_hw *hw; + dma_addr_t qh_dma; + union ehci_shadow qh_next; + struct list_head qtd_list; + struct list_head intr_node; + struct ehci_qtd *dummy; + struct list_head unlink_node; + struct ehci_per_sched ps; + unsigned int unlink_cycle; + u8 qh_state; + u8 xacterrs; + u8 unlink_reason; + u8 gap_uf; + unsigned int is_out: 1; + unsigned int clearing_tt: 1; + unsigned int dequeue_during_giveback: 1; + unsigned int should_be_inactive: 1; +}; + +struct ehci_iso_stream; + +struct ehci_itd { + __le32 hw_next; + __le32 hw_transaction[8]; + __le32 hw_bufp[7]; + __le32 hw_bufp_hi[7]; + dma_addr_t itd_dma; + union ehci_shadow itd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head itd_list; + unsigned int frame; + unsigned int pg; + unsigned int index[8]; + long: 64; +}; + +struct ehci_sitd { + __le32 hw_next; + __le32 hw_fullspeed_ep; + __le32 hw_uframe; + __le32 hw_results; + __le32 hw_buf[2]; + __le32 hw_backpointer; + __le32 hw_buf_hi[2]; + dma_addr_t sitd_dma; + union ehci_shadow sitd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head sitd_list; + unsigned int frame; + unsigned int index; +}; + +struct ehci_qtd { + __le32 hw_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + dma_addr_t qtd_dma; + struct list_head qtd_list; + struct urb *urb; + size_t length; +}; + +struct ehci_fstn { + __le32 hw_next; + __le32 hw_prev; + dma_addr_t fstn_dma; + union ehci_shadow fstn_next; + long: 64; +}; + +struct ehci_qh_hw { + __le32 hw_next; + __le32 hw_info1; + __le32 hw_info2; + __le32 hw_current; + __le32 hw_qtd_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + long: 64; + long: 64; + long: 64; +}; + +struct ehci_iso_packet { + u64 bufp; + __le32 transaction; + u8 cross; + u32 buf1; +}; + +struct ehci_iso_sched { + struct list_head td_list; + unsigned int span; + unsigned int first_packet; + struct ehci_iso_packet packet[0]; +}; + +struct ehci_iso_stream { + struct ehci_qh_hw *hw; + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; + struct list_head free_list; + struct ehci_per_sched ps; + unsigned int next_uframe; + __le32 splits; + u16 uperiod; + u16 maxp; + unsigned int bandwidth; + __le32 buf0; + __le32 buf1; + __le32 buf2; + __le32 address; +}; + +struct ehci_tt { + u16 bandwidth[8]; + struct list_head tt_list; + struct list_head ps_list; + struct usb_tt *usb_tt; + int tt_port; +}; + +struct ehci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*port_power)(struct usb_hcd *, int, bool); +}; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); + struct usb_bus *bus; + struct mutex mutex; + size_t count; + char *output_buf; + size_t alloc_size; +}; + +struct thermal_attr { + struct device_attribute attr; + char name[20]; +}; + +struct thermal_trip_attrs { + struct thermal_attr type; + struct thermal_attr temp; + struct thermal_attr hyst; +}; + +struct thermal_trip_desc { + struct thermal_trip trip; + struct thermal_trip_attrs trip_attrs; + struct list_head list_node; + struct list_head thermal_instances; + int threshold; +}; + +struct thermal_governor; + +struct thermal_zone_device { + int id; + char type[20]; + struct device device; + struct completion removal; + struct completion resume; + struct attribute_group trips_attribute_group; + struct list_head trips_high; + struct list_head trips_reached; + struct list_head trips_invalid; + enum thermal_device_mode mode; + void *devdata; + int num_trips; + long unsigned int passive_delay_jiffies; + long unsigned int polling_delay_jiffies; + long unsigned int recheck_delay_jiffies; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + struct thermal_zone_device_ops ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; + u8 state; + struct list_head user_thresholds; + struct thermal_trip_desc trips[0]; +}; + +struct user_threshold { + struct list_head list_node; + int temperature; + int direction; +}; + +struct thermal_governor { + const char *name; + int (*bind_to_tz)(struct thermal_zone_device *); + void (*unbind_from_tz)(struct thermal_zone_device *); + void (*trip_crossed)(struct thermal_zone_device *, const struct thermal_trip *, bool); + void (*manage)(struct thermal_zone_device *); + void (*update_tz)(struct thermal_zone_device *, enum thermal_notify_event); + struct list_head governor_list; +}; + +typedef struct thermal_zone_device *class_thermal_zone_t; + +struct hid_item { + unsigned int format; + __u8 size; + __u8 type; + __u8 tag; + union { + __u8 u8; + __s8 s8; + __u16 u16; + __s16 s16; + __u32 u32; + __s32 s32; + const __u8 *longdata; + } data; +}; + +struct hid_global { + unsigned int usage_page; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + unsigned int report_id; + unsigned int report_size; + unsigned int report_count; +}; + +struct hid_local { + unsigned int usage[12288]; + u8 usage_size[12288]; + unsigned int collection_index[12288]; + unsigned int usage_index; + unsigned int usage_minimum; + unsigned int delimiter_depth; + unsigned int delimiter_branch; +}; + +struct hid_parser { + struct hid_global global; + struct hid_global global_stack[4]; + unsigned int global_stack_ptr; + struct hid_local local; + unsigned int *collection_stack; + unsigned int collection_stack_ptr; + unsigned int collection_stack_size; + struct hid_device *device; + unsigned int scan_flags; +}; + +struct hiddev { + int minor; + int exist; + int open; + struct mutex existancelock; + wait_queue_head_t wait; + struct hid_device *hid; + struct list_head list; + spinlock_t list_lock; + bool initialized; +}; + +struct hidraw { + unsigned int minor; + int exist; + int open; + wait_queue_head_t wait; + struct hid_device *hid; + struct device *dev; + spinlock_t list_lock; + struct list_head list; +}; + +struct hid_dynid { + struct list_head list; + struct hid_device_id id; +}; + +struct page_pool_params_fast { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; +}; + +struct page_pool_alloc_stats { + u64 fast; + u64 slow; + u64 slow_high_order; + u64 empty; + u64 refill; + u64 waive; +}; + +struct pp_alloc_cache { + u32 count; + netmem_ref cache[128]; +}; + +struct ptr_ring { + int producer; + spinlock_t producer_lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + int consumer_head; + int consumer_tail; + spinlock_t consumer_lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + int size; + int batch; + void **queue; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct page_pool_params_slow { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; +}; + +struct page_pool_recycle_stats; + +struct page_pool { + struct page_pool_params_fast p; + int cpuid; + u32 pages_state_hold_cnt; + bool has_init_callback: 1; + bool dma_map: 1; + bool dma_sync: 1; + bool system: 1; + long: 0; + __u8 __cacheline_group_begin__frag[0]; + long int frag_users; + netmem_ref frag_page; + unsigned int frag_offset; + long: 0; + __u8 __cacheline_group_end__frag[0]; + long: 64; + struct {} __cacheline_group_pad__frag; + struct delayed_work release_dw; + void (*disconnect)(void *); + long unsigned int defer_start; + long unsigned int defer_warn; + struct page_pool_alloc_stats alloc_stats; + u32 xdp_mem_id; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct pp_alloc_cache alloc; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct ptr_ring ring; + void *mp_priv; + struct page_pool_recycle_stats *recycle_stats; + atomic_t pages_state_release_cnt; + refcount_t user_cnt; + u64 destroy_cnt; + struct page_pool_params_slow slow; + struct { + struct hlist_node list; + u64 detach_time; + u32 napi_id; + u32 id; + } user; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct cpu_topology { + int thread_id; + int core_id; + int cluster_id; + int package_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t cluster_sibling; + cpumask_t llc_sibling; +}; + +enum { + SKB_FCLONE_UNAVAILABLE = 0, + SKB_FCLONE_ORIG = 1, + SKB_FCLONE_CLONE = 2, +}; + +struct skb_checksum_ops { + __wsum (*update)(const void *, int, __wsum); + __wsum (*combine)(__wsum, __wsum, int, int); +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS = 0, + NF_NETDEV_EGRESS = 1, + NF_NETDEV_NUMHOOKS = 2, +}; + +enum tcx_action_base { + TCX_NEXT = -1, + TCX_PASS = 0, + TCX_DROP = 2, + TCX_REDIRECT = 7, +}; + +struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +}; + +typedef struct ifbond ifbond; + +struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +}; + +typedef struct ifslave ifslave; + +enum netdev_queue_type { + NETDEV_QUEUE_TYPE_RX = 0, + NETDEV_QUEUE_TYPE_TX = 1, +}; + +enum { + NAPIF_STATE_SCHED = 1, + NAPIF_STATE_MISSED = 2, + NAPIF_STATE_DISABLE = 4, + NAPIF_STATE_NPSVC = 8, + NAPIF_STATE_LISTED = 16, + NAPIF_STATE_NO_BUSY_POLL = 32, + NAPIF_STATE_IN_BUSY_POLL = 64, + NAPIF_STATE_PREFER_BUSY_POLL = 128, + NAPIF_STATE_THREADED = 256, + NAPIF_STATE_SCHED_THREADED = 512, +}; + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF = 0, + __QUEUE_STATE_STACK_XOFF = 1, + __QUEUE_STATE_FROZEN = 2, +}; + +struct net_device_path_stack { + int num_paths; + struct net_device_path path[5]; +}; + +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; + int flags; +}; + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; + +enum netdev_reg_state { + NETREG_UNINITIALIZED = 0, + NETREG_REGISTERED = 1, + NETREG_UNREGISTERING = 2, + NETREG_UNREGISTERED = 3, + NETREG_RELEASED = 4, + NETREG_DUMMY = 5, +}; + +struct pp_memory_provider_params { + void *mp_priv; +}; + +struct rps_map; + +struct rps_dev_flow_table; + +struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; + struct rps_map *rps_map; + struct rps_dev_flow_table *rps_flow_table; + struct kobject kobj; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct napi_struct *napi; + struct pp_memory_provider_params mp_params; + long: 64; + long: 64; + long: 64; +}; + +struct cpu_rmap { + struct kref refcount; + u16 size; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; + +struct phy_link_topology { + struct xarray phys; + u32 next_phy_index; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +struct netdev_notifier_offload_xstats_rd { + struct rtnl_hw_stats64 stats; + bool used; +}; + +struct netdev_notifier_offload_xstats_ru { + bool used; +}; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; + enum netdev_offload_xstats_type type; + union { + struct netdev_notifier_offload_xstats_rd *report_delta; + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +enum { + NESTED_SYNC_IMM_BIT = 0, + NESTED_SYNC_TODO_BIT = 1, +}; + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +struct vlan_ethhdr { + union { + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + }; + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + } addrs; + }; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED = 0, + __QDISC_STATE_DEACTIVATED = 1, + __QDISC_STATE_MISSED = 2, + __QDISC_STATE_DRAINING = 3, +}; + +enum qdisc_state2_t { + __QDISC_STATE2_RUNNING = 0, +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + unsigned char data[20]; +}; + +struct tc_skb_cb { + struct qdisc_skb_cb qdisc_cb; + u32 drop_reason; + u16 zone; + u16 mru; + u8 post_ct: 1; + u8 post_ct_snat: 1; + u8 post_ct_dnat: 1; +}; + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +enum xdp_buff_flags { + XDP_FLAGS_HAS_FRAGS = 1, + XDP_FLAGS_FRAGS_PF_MEMALLOC = 2, +}; + +enum { + SCM_TSTAMP_SND = 0, + SCM_TSTAMP_SCHED = 1, + SCM_TSTAMP_ACK = 2, +}; + +enum sctp_msg_flags { + MSG_NOTIFICATION = 32768, +}; + +struct udp_tunnel_info { + short unsigned int type; + sa_family_t sa_family; + __be16 port; + u8 hw_priv; +}; + +struct udp_tunnel_nic_shared { + struct udp_tunnel_nic *udp_tunnel_nic_info; + struct list_head devices; +}; + +struct page_pool_params { + union { + struct { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; + }; + struct page_pool_params_fast fast; + }; + union { + struct { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; + }; + struct page_pool_params_slow slow; + }; +}; + +struct page_pool_recycle_stats { + u64 cached; + u64 cache_full; + u64 ring; + u64 ring_full; + u64 released_refcnt; +}; + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[0]; +}; + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[0]; +}; + +typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); + +struct dev_kfree_skb_cb { + enum skb_drop_reason reason; +}; + +enum { + NAPI_F_PREFER_BUSY_POLL = 1, + NAPI_F_END_ON_RESCHED = 2, +}; + +struct netdev_adjacent { + struct net_device *dev; + netdevice_tracker dev_tracker; + bool master; + bool ignore; + u16 ref_nr; + void *private; + struct list_head list; + struct callback_head rcu; +}; + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC = 0, + ETHTOOL_A_STATS_GRP_PAD = 1, + ETHTOOL_A_STATS_GRP_ID = 2, + ETHTOOL_A_STATS_GRP_SS_ID = 3, + ETHTOOL_A_STATS_GRP_STAT = 4, + ETHTOOL_A_STATS_GRP_HIST_RX = 5, + ETHTOOL_A_STATS_GRP_HIST_TX = 6, + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, + ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, + ETHTOOL_A_STATS_GRP_HIST_VAL = 9, + __ETHTOOL_A_STATS_GRP_CNT = 10, + ETHTOOL_A_STATS_GRP_MAX = 9, +}; + +typedef const char (* const ethnl_string_array_t)[32]; + +struct stats_req_info { + struct ethnl_req_info base; + long unsigned int stat_mask[1]; + enum ethtool_mac_stats_src src; +}; + +struct stats_reply_data { + struct ethnl_reply_data base; + union { + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + }; + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + } stats; + }; + const struct ethtool_rmon_hist_range *rmon_ranges; +}; + +struct seqcount_rwlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_rwlock seqcount_rwlock_t; + +enum nft_set_elem_flags { + NFT_SET_ELEM_INTERVAL_END = 1, + NFT_SET_ELEM_CATCHALL = 2, +}; + +struct nft_set_type { + const struct nft_set_ops ops; + u32 features; +}; + +struct nft_timeout { + u64 timeout; + u64 expiration; +}; + +struct nft_trans_gc { + struct list_head list; + struct net *net; + struct nft_set *set; + u32 seq; + u16 count; + struct nft_elem_priv *priv[256]; + struct callback_head rcu; +}; + +struct nftables_pernet { + struct list_head tables; + struct list_head commit_list; + struct list_head commit_set_list; + struct list_head binding_list; + struct list_head module_list; + struct list_head notify_list; + struct mutex commit_mutex; + u64 table_handle; + u64 tstamp; + unsigned int base_seq; + unsigned int gc_seq; + u8 validate_state; +}; + +struct nft_rbtree { + struct rb_root root; + rwlock_t lock; + seqcount_rwlock_t count; + long unsigned int last_gc; +}; + +struct nft_rbtree_elem { + struct nft_elem_priv priv; + struct rb_node node; + struct nft_set_ext ext; +}; + +struct inet_request_sock { + struct request_sock req; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u16 tstamp_ok: 1; + u16 sack_ok: 1; + u16 wscale_ok: 1; + u16 ecn_ok: 1; + u16 acked: 1; + u16 no_srccheck: 1; + u16 smc_ok: 1; + u32 ir_mark; + union { + struct ip_options_rcu *ireq_opt; + struct { + struct ipv6_txoptions *ipv6_opt; + struct sk_buff *pktopts; + }; + }; +}; + +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + u64 snt_synack; + bool tfo_listener; + bool is_mptcp; + bool req_usec_ts; + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; + u32 rcv_nxt; + u8 syn_tos; +}; + +enum tcp_synack_type { + TCP_SYNACK_NORMAL = 0, + TCP_SYNACK_FASTOPEN = 1, + TCP_SYNACK_COOKIE = 2, +}; + +struct tcp_request_sock_ops { + u16 mss_clamp; + __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); + struct dst_entry * (*route_req)(const struct sock *, struct sk_buff *, struct flowi *, struct request_sock *, u32); + u32 (*init_seq)(const struct sk_buff *); + u32 (*init_ts_off)(const struct net *, const struct sk_buff *); + int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); +}; + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + u32 tw_last_oow_ack_time; + int tw_ts_recent_stamp; + u32 tw_tx_delay; +}; + +enum tcp_tw_status { + TCP_TW_SUCCESS = 0, + TCP_TW_RST = 1, + TCP_TW_ACK = 2, + TCP_TW_SYN = 3, +}; + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + __u8 reserved[4]; + } un; +}; + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + unsigned int rtm_flags; +}; + +enum rt_class_t { + RT_TABLE_UNSPEC = 0, + RT_TABLE_COMPAT = 252, + RT_TABLE_DEFAULT = 253, + RT_TABLE_MAIN = 254, + RT_TABLE_LOCAL = 255, + RT_TABLE_MAX = 4294967295, +}; + +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify: 1; + u8 skip_notify_kernel: 1; +}; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE = 0, + FIB_EVENT_ENTRY_APPEND = 1, + FIB_EVENT_ENTRY_ADD = 2, + FIB_EVENT_ENTRY_DEL = 3, + FIB_EVENT_RULE_ADD = 4, + FIB_EVENT_RULE_DEL = 5, + FIB_EVENT_NH_ADD = 6, + FIB_EVENT_NH_DEL = 7, + FIB_EVENT_VIF_ADD = 8, + FIB_EVENT_VIF_DEL = 9, +}; + +struct fib_dump_filter { + u32 table_id; + bool filter_set; + bool dump_routes; + bool dump_exceptions; + bool rtnl_held; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + +struct fib6_result { + struct fib6_nh *nh; + struct fib6_info *f6i; + u32 fib6_flags; + u8 fib6_type; + struct rt6_info *rt6; +}; + +struct fib6_gc_args { + int timeout; + int more; +}; + +enum fib6_walk_state { + FWS_L = 0, + FWS_R = 1, + FWS_C = 2, + FWS_U = 3, +}; + +struct fib6_walker { + struct list_head lh; + struct fib6_node *root; + struct fib6_node *node; + struct fib6_info *leaf; + enum fib6_walk_state state; + unsigned int skip; + unsigned int count; + unsigned int skip_in_node; + int (*func)(struct fib6_walker *); + void *args; +}; + +typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); + +struct fib6_entry_notifier_info { + struct fib_notifier_info info; + struct fib6_info *rt; + unsigned int nsiblings; +}; + +struct ipv6_route_iter { + struct seq_net_private p; + struct fib6_walker w; + loff_t skip; + struct fib6_table *tbl; + int sernum; +}; + +struct bpf_iter__ipv6_route { + union { + struct bpf_iter_meta *meta; + }; + union { + struct fib6_info *rt; + }; +}; + +struct rt6_rtnl_dump_arg { + struct sk_buff *skb; + struct netlink_callback *cb; + struct net *net; + struct fib_dump_filter filter; +}; + +struct fib6_cleaner { + struct fib6_walker w; + struct net *net; + int (*func)(struct fib6_info *, void *); + int sernum; + void *arg; + bool skip_notify; +}; + +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; + struct netlink_ext_ack *extack; +}; + +struct fib6_nh_pcpu_arg { + struct fib6_info *from; + const struct fib6_table *table; +}; + +struct lookup_args { + int offset; + const struct in6_addr *addr; +}; + +struct return_address_data { + unsigned int level; + void *addr; +}; + +struct clone_args { + __u64 flags; + __u64 pidfd; + __u64 child_tid; + __u64 parent_tid; + __u64 exit_signal; + __u64 stack; + __u64 stack_size; + __u64 tls; + __u64 set_tid; + __u64 set_tid_size; + __u64 cgroup; +}; + +struct robust_list { + struct robust_list *next; +}; + +struct robust_list_head { + struct robust_list list; + long int futex_offset; + struct robust_list *list_op_pending; +}; + +enum mm_cid_state { + MM_CID_UNSET = 4294967295, + MM_CID_LAZY_PUT = 2147483648, +}; + +struct kernel_clone_args { + u64 flags; + int *pidfd; + int *child_tid; + int *parent_tid; + const char *name; + int exit_signal; + u32 kthread: 1; + u32 io_thread: 1; + u32 user_worker: 1; + u32 no_files: 1; + long unsigned int stack; + long unsigned int stack_size; + long unsigned int tls; + pid_t *set_tid; + size_t set_tid_size; + int cgroup; + int idle; + int (*fn)(void *); + void *fn_arg; + struct cgroup *cgrp; + struct css_set *cset; +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +typedef int (*proc_visitor)(struct task_struct *, void *); + +struct mempolicy {}; + +struct fd_range { + unsigned int from; + unsigned int to; +}; + +enum { + FUTEX_STATE_OK = 0, + FUTEX_STATE_EXITING = 1, + FUTEX_STATE_DEAD = 2, +}; + +struct trace_event_raw_task_newtask { + struct trace_entry ent; + pid_t pid; + char comm[16]; + long unsigned int clone_flags; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_task_rename { + struct trace_entry ent; + pid_t pid; + char oldcomm[16]; + char newcomm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_data_offsets_task_newtask {}; + +struct trace_event_data_offsets_task_rename {}; + +typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int); + +typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); + +struct vm_stack { + struct callback_head rcu; + struct vm_struct *stack_vm_area; +}; + +struct audit_context; + +struct trace_event_raw_timer_class { + struct trace_entry ent; + void *timer; + char __data[0]; +}; + +struct trace_event_raw_timer_start { + struct trace_entry ent; + void *timer; + void *function; + long unsigned int expires; + long unsigned int bucket_expiry; + long unsigned int now; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_timer_expire_entry { + struct trace_entry ent; + void *timer; + long unsigned int now; + void *function; + long unsigned int baseclk; + char __data[0]; +}; + +struct trace_event_raw_timer_base_idle { + struct trace_entry ent; + bool is_idle; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_init { + struct trace_entry ent; + void *hrtimer; + clockid_t clockid; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_start { + struct trace_entry ent; + void *hrtimer; + void *function; + s64 expires; + s64 softexpires; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_expire_entry { + struct trace_entry ent; + void *hrtimer; + s64 now; + void *function; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_class { + struct trace_entry ent; + void *hrtimer; + char __data[0]; +}; + +struct trace_event_raw_itimer_state { + struct trace_entry ent; + int which; + long long unsigned int expires; + long int value_sec; + long int value_nsec; + long int interval_sec; + long int interval_nsec; + char __data[0]; +}; + +struct trace_event_raw_itimer_expire { + struct trace_entry ent; + int which; + pid_t pid; + long long unsigned int now; + char __data[0]; +}; + +struct trace_event_data_offsets_timer_class {}; + +struct trace_event_data_offsets_timer_start {}; + +struct trace_event_data_offsets_timer_expire_entry {}; + +struct trace_event_data_offsets_timer_base_idle {}; + +struct trace_event_data_offsets_hrtimer_init {}; + +struct trace_event_data_offsets_hrtimer_start {}; + +struct trace_event_data_offsets_hrtimer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_class {}; + +struct trace_event_data_offsets_itimer_state {}; + +struct trace_event_data_offsets_itimer_expire {}; + +typedef void (*btf_trace_timer_init)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_base_idle)(void *, bool, unsigned int); + +typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); + +typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); + +typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); + +typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int); + +typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int); + +struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; + long unsigned int clk; + long unsigned int next_expiry; + unsigned int cpu; + bool next_expiry_recalc; + bool is_idle; + bool timers_pending; + long unsigned int pending_map[9]; + struct hlist_head vectors[576]; + long: 64; + long: 64; +}; + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, +}; + +enum trace_type { + __TRACE_FIRST_TYPE = 0, + TRACE_FN = 1, + TRACE_CTX = 2, + TRACE_WAKE = 3, + TRACE_STACK = 4, + TRACE_PRINT = 5, + TRACE_BPRINT = 6, + TRACE_MMIO_RW = 7, + TRACE_MMIO_MAP = 8, + TRACE_BRANCH = 9, + TRACE_GRAPH_RET = 10, + TRACE_GRAPH_ENT = 11, + TRACE_GRAPH_RETADDR_ENT = 12, + TRACE_USER_STACK = 13, + TRACE_BLK = 14, + TRACE_BPUTS = 15, + TRACE_HWLAT = 16, + TRACE_OSNOISE = 17, + TRACE_TIMERLAT = 18, + TRACE_RAW_DATA = 19, + TRACE_FUNC_REPEATS = 20, + __TRACE_LAST_TYPE = 21, +}; + +struct ftrace_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; +}; + +typedef long unsigned int perf_trace_t[1024]; + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +struct bpf_iter__bpf_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; +}; + +struct bpf_dynptr { + __u64 __opaque[2]; +}; + +struct btf_id_dtor_kfunc { + u32 btf_id; + u32 kfunc_btf_id; +}; + +struct bpf_dynptr_kern { + void *data; + u32 size; + u32 offset; +}; + +struct bpf_crypto_type { + void * (*alloc_tfm)(const char *); + void (*free_tfm)(void *); + int (*has_algo)(const char *); + int (*setkey)(void *, const u8 *, unsigned int); + int (*setauthsize)(void *, unsigned int); + int (*encrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + int (*decrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + unsigned int (*ivsize)(void *); + unsigned int (*statesize)(void *); + u32 (*get_flags)(void *); + struct module *owner; + char name[14]; +}; + +struct bpf_crypto_type_list { + const struct bpf_crypto_type *type; + struct list_head list; +}; + +struct bpf_crypto_params { + char type[14]; + u8 reserved[2]; + char algo[128]; + u8 key[256]; + u32 key_len; + u32 authsize; +}; + +struct bpf_crypto_ctx { + const struct bpf_crypto_type *type; + void *tfm; + u32 siv_len; + struct callback_head rcu; + refcount_t usage; +}; + +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; + struct rb_node rb; + long unsigned int rb_subtree_last; +}; + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *, struct rb_node *); + void (*copy)(struct rb_node *, struct rb_node *); + void (*rotate)(struct rb_node *, struct rb_node *); +}; + +struct vmap_area { + long unsigned int va_start; + long unsigned int va_end; + struct rb_node rb_node; + struct list_head list; + union { + long unsigned int subtree_max_size; + struct vm_struct *vm; + }; + long unsigned int flags; +}; + +enum memcg_stat_item { + MEMCG_SWAP = 45, + MEMCG_SOCK = 46, + MEMCG_PERCPU_B = 47, + MEMCG_VMALLOC = 48, + MEMCG_KMEM = 49, + MEMCG_ZSWAP_B = 50, + MEMCG_ZSWAPPED = 51, + MEMCG_NR_STAT = 52, +}; + +struct trace_event_raw_alloc_vmap_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int size; + long unsigned int align; + long unsigned int vstart; + long unsigned int vend; + int failed; + char __data[0]; +}; + +struct trace_event_raw_purge_vmap_area_lazy { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + unsigned int npurged; + char __data[0]; +}; + +struct trace_event_raw_free_vmap_area_noflush { + struct trace_entry ent; + long unsigned int va_start; + long unsigned int nr_lazy; + long unsigned int nr_lazy_max; + char __data[0]; +}; + +struct trace_event_data_offsets_alloc_vmap_area {}; + +struct trace_event_data_offsets_purge_vmap_area_lazy {}; + +struct trace_event_data_offsets_free_vmap_area_noflush {}; + +typedef void (*btf_trace_alloc_vmap_area)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_purge_vmap_area_lazy)(void *, long unsigned int, long unsigned int, unsigned int); + +typedef void (*btf_trace_free_vmap_area_noflush)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; + +struct rb_list { + struct rb_root root; + struct list_head head; + spinlock_t lock; +}; + +struct vmap_pool { + struct list_head head; + long unsigned int len; +}; + +struct vmap_node { + struct vmap_pool pool[256]; + spinlock_t pool_lock; + bool skip_populate; + struct rb_list busy; + struct rb_list lazy; + struct list_head purge_list; + struct work_struct purge_work; + long unsigned int nr_purged; +}; + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, + LE_FIT_TYPE = 2, + RE_FIT_TYPE = 3, + NE_FIT_TYPE = 4, +}; + +struct vmap_block_queue { + spinlock_t lock; + struct list_head free; + struct xarray vmap_blocks; +}; + +struct vmap_block { + spinlock_t lock; + struct vmap_area *va; + long unsigned int free; + long unsigned int dirty; + long unsigned int used_map[16]; + long unsigned int dirty_min; + long unsigned int dirty_max; + struct list_head free_list; + struct callback_head callback_head; + struct list_head purge; + unsigned int cpu; +}; + +struct inotify_event { + __s32 wd; + __u32 mask; + __u32 cookie; + __u32 len; + char name[0]; +}; + +struct inotify_event_info { + struct fsnotify_event fse; + u32 mask; + int wd; + u32 sync_cookie; + int name_len; + char name[0]; +}; + +typedef u32 nlink_t; + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +struct proc_dir_entry { + atomic_t in_use; + refcount_t refcnt; + struct list_head pde_openers; + spinlock_t pde_unload_lock; + struct completion *pde_unload_completion; + const struct inode_operations *proc_iops; + union { + const struct proc_ops *proc_ops; + const struct file_operations *proc_dir_ops; + }; + const struct dentry_operations *proc_dops; + union { + const struct seq_operations *seq_ops; + int (*single_show)(struct seq_file *, void *); + }; + proc_write_t write; + void *data; + unsigned int state_size; + unsigned int low_ino; + nlink_t nlink; + kuid_t uid; + kgid_t gid; + loff_t size; + struct proc_dir_entry *parent; + struct rb_root subdir; + struct rb_node subdir_node; + char *name; + umode_t mode; + u8 flags; + u8 namelen; + char inline_name[0]; +}; + +typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); + +enum hash_algo { + HASH_ALGO_MD4 = 0, + HASH_ALGO_MD5 = 1, + HASH_ALGO_SHA1 = 2, + HASH_ALGO_RIPE_MD_160 = 3, + HASH_ALGO_SHA256 = 4, + HASH_ALGO_SHA384 = 5, + HASH_ALGO_SHA512 = 6, + HASH_ALGO_SHA224 = 7, + HASH_ALGO_RIPE_MD_128 = 8, + HASH_ALGO_RIPE_MD_256 = 9, + HASH_ALGO_RIPE_MD_320 = 10, + HASH_ALGO_WP_256 = 11, + HASH_ALGO_WP_384 = 12, + HASH_ALGO_WP_512 = 13, + HASH_ALGO_TGR_128 = 14, + HASH_ALGO_TGR_160 = 15, + HASH_ALGO_TGR_192 = 16, + HASH_ALGO_SM3_256 = 17, + HASH_ALGO_STREEBOG_256 = 18, + HASH_ALGO_STREEBOG_512 = 19, + HASH_ALGO_SHA3_256 = 20, + HASH_ALGO_SHA3_384 = 21, + HASH_ALGO_SHA3_512 = 22, + HASH_ALGO__LAST = 23, +}; + +struct ext4_dir_entry_hash { + __le32 hash; + __le32 minor_hash; +}; + +struct ext4_dir_entry_2 { + __le32 inode; + __le16 rec_len; + __u8 name_len; + __u8 file_type; + char name[255]; +}; + +struct dx_hash_info { + u32 hash; + u32 minor_hash; + int hash_version; + u32 *seed; +}; + +struct ext4_filename { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + struct dx_hash_info hinfo; +}; + +struct fname; + +struct dir_private_info { + struct rb_root root; + struct rb_node *curr_node; + struct fname *extra_fname; + loff_t last_pos; + __u32 curr_hash; + __u32 curr_minor_hash; + __u32 next_hash; + u64 cookie; + bool initialized; +}; + +struct iso_directory_record { + __u8 length[1]; + __u8 ext_attr_length[1]; + __u8 extent[8]; + __u8 size[8]; + __u8 date[7]; + __u8 flags[1]; + __u8 file_unit_size[1]; + __u8 interleave[1]; + __u8 volume_sequence_number[4]; + __u8 name_len[1]; + char name[0]; +}; + +enum isofs_file_format { + isofs_file_normal = 0, + isofs_file_sparse = 1, + isofs_file_compressed = 2, +}; + +struct iso_inode_info { + long unsigned int i_iget5_block; + long unsigned int i_iget5_offset; + unsigned int i_first_extent; + unsigned char i_file_format; + unsigned char i_format_parm[3]; + long unsigned int i_next_section_block; + long unsigned int i_next_section_offset; + off_t i_section_size; + struct inode vfs_inode; +}; + +struct isofs_sb_info { + long unsigned int s_ninodes; + long unsigned int s_nzones; + long unsigned int s_firstdatazone; + long unsigned int s_log_zone_size; + long unsigned int s_max_size; + int s_rock_offset; + s32 s_sbsector; + unsigned char s_joliet_level; + unsigned char s_mapping; + unsigned char s_check; + unsigned char s_session; + unsigned int s_high_sierra: 1; + unsigned int s_rock: 2; + unsigned int s_cruft: 1; + unsigned int s_nocompress: 1; + unsigned int s_hide: 1; + unsigned int s_showassoc: 1; + unsigned int s_overriderockperm: 1; + unsigned int s_uid_set: 1; + unsigned int s_gid_set: 1; + umode_t s_fmode; + umode_t s_dmode; + kgid_t s_gid; + kuid_t s_uid; + struct nls_table *s_nls_iocharset; +}; + +struct SU_SP_s { + __u8 magic[2]; + __u8 skip; +}; + +struct SU_CE_s { + __u8 extent[8]; + __u8 offset[8]; + __u8 size[8]; +}; + +struct SU_ER_s { + __u8 len_id; + __u8 len_des; + __u8 len_src; + __u8 ext_ver; + __u8 data[0]; +}; + +struct RR_RR_s { + __u8 flags[1]; +}; + +struct RR_PX_s { + __u8 mode[8]; + __u8 n_links[8]; + __u8 uid[8]; + __u8 gid[8]; +}; + +struct RR_PN_s { + __u8 dev_high[8]; + __u8 dev_low[8]; +}; + +struct SL_component { + __u8 flags; + __u8 len; + __u8 text[0]; +}; + +struct RR_SL_s { + __u8 flags; + struct SL_component link; +}; + +struct RR_NM_s { + __u8 flags; + char name[0]; +}; + +struct RR_CL_s { + __u8 location[8]; +}; + +struct RR_PL_s { + __u8 location[8]; +}; + +struct stamp { + __u8 time[7]; +}; + +struct RR_TF_s { + __u8 flags; + struct stamp times[0]; +}; + +struct RR_ZF_s { + __u8 algorithm[2]; + __u8 parms[2]; + __u8 real_size[8]; +}; + +struct rock_ridge { + __u8 signature[2]; + __u8 len; + __u8 version; + union { + struct SU_SP_s SP; + struct SU_CE_s CE; + struct SU_ER_s ER; + struct RR_RR_s RR; + struct RR_PX_s PX; + struct RR_PN_s PN; + struct RR_SL_s SL; + struct RR_NM_s NM; + struct RR_CL_s CL; + struct RR_PL_s PL; + struct RR_TF_s TF; + struct RR_ZF_s ZF; + } u; +}; + +struct rock_state { + void *buffer; + unsigned char *chr; + int len; + int cont_size; + int cont_extent; + int cont_offset; + int cont_loops; + struct inode *inode; +}; + +struct fuse_open_out { + uint64_t fh; + uint32_t open_flags; + int32_t backing_id; +}; + +struct fuse_release_in { + uint64_t fh; + uint32_t flags; + uint32_t release_flags; + uint64_t lock_owner; +}; + +union fuse_file_args; + +struct fuse_file { + struct fuse_mount *fm; + union fuse_file_args *args; + u64 kh; + u64 fh; + u64 nodeid; + refcount_t count; + u32 open_flags; + struct list_head write_entry; + struct { + loff_t pos; + loff_t cache_off; + u64 version; + } readdir; + struct rb_node polled_node; + wait_queue_head_t poll_wait; + enum { + IOM_NONE = 0, + IOM_CACHED = 1, + IOM_UNCACHED = 2, + } iomode; + struct file *passthrough; + const struct cred *cred; + bool flock: 1; +}; + +struct fuse_release_args { + struct fuse_args args; + struct fuse_release_in inarg; + struct inode *inode; +}; + +union fuse_file_args { + struct fuse_open_out open_outarg; + struct fuse_release_args release_args; +}; + +struct folio_iter { + struct folio *folio; + size_t offset; + size_t length; + struct folio *_next; + size_t _seg_count; + int _i; +}; + +struct btrfs_key_ptr { + struct btrfs_disk_key key; + __le64 blockptr; + __le64 generation; +} __attribute__((packed)); + +enum { + EXTENT_BUFFER_UPTODATE = 0, + EXTENT_BUFFER_DIRTY = 1, + EXTENT_BUFFER_CORRUPT = 2, + EXTENT_BUFFER_READAHEAD = 3, + EXTENT_BUFFER_TREE_REF = 4, + EXTENT_BUFFER_STALE = 5, + EXTENT_BUFFER_WRITEBACK = 6, + EXTENT_BUFFER_READ_ERR = 7, + EXTENT_BUFFER_UNMAPPED = 8, + EXTENT_BUFFER_IN_TREE = 9, + EXTENT_BUFFER_WRITE_ERR = 10, + EXTENT_BUFFER_ZONED_ZEROOUT = 11, + EXTENT_BUFFER_READING = 12, +}; + +struct btrfs_eb_write_context { + struct writeback_control *wbc; + struct extent_buffer *eb; + struct btrfs_block_group *zoned_bg; +}; + +enum { + btrfs_bitmap_nr_uptodate = 0, + btrfs_bitmap_nr_dirty = 1, + btrfs_bitmap_nr_writeback = 2, + btrfs_bitmap_nr_ordered = 3, + btrfs_bitmap_nr_checked = 4, + btrfs_bitmap_nr_locked = 5, + btrfs_bitmap_nr_max = 6, +}; + +enum btrfs_subpage_type { + BTRFS_SUBPAGE_METADATA = 0, + BTRFS_SUBPAGE_DATA = 1, +}; + +enum btrfs_map_op { + BTRFS_MAP_READ = 0, + BTRFS_MAP_WRITE = 1, + BTRFS_MAP_GET_READ_MIRRORS = 2, +}; + +struct btrfs_bio_ctrl { + struct btrfs_bio *bbio; + enum btrfs_compression_type compress_type; + u32 len_to_oe_boundary; + blk_opf_t opf; + btrfs_bio_end_io_t end_io_func; + struct writeback_control *wbc; + long unsigned int submit_bitmap; +}; + +struct fsverity_info; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct shash_instance { + void (*free)(struct shash_instance *); + union { + struct { + char head[104]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct ccm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn mac; +}; + +struct crypto_ccm_ctx { + struct crypto_ahash *mac; + struct crypto_skcipher *ctr; +}; + +struct crypto_rfc4309_ctx { + struct crypto_aead *child; + u8 nonce[3]; +}; + +struct crypto_rfc4309_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct aead_request subreq; +}; + +struct crypto_ccm_req_priv_ctx { + u8 odata[16]; + u8 idata[16]; + u8 auth_tag[16]; + u32 flags; + struct scatterlist src[3]; + struct scatterlist dst[3]; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + }; +}; + +struct cbcmac_tfm_ctx { + struct crypto_cipher *child; +}; + +struct cbcmac_desc_ctx { + unsigned int len; + u8 dg[0]; +}; + +struct sg_append_table { + struct sg_table sgt; + struct scatterlist *prv; + unsigned int total_nents; +}; + +typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); + +typedef void sg_free_fn(struct scatterlist *, unsigned int); + +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + unsigned int __nents; + int __pg_advance; +}; + +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +struct sg_mapping_iter { + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +typedef unsigned int iov_iter_extraction_t; + +typedef enum { + ZSTD_d_windowLogMax = 100, + ZSTD_d_experimentalParam1 = 1000, + ZSTD_d_experimentalParam2 = 1001, + ZSTD_d_experimentalParam3 = 1002, + ZSTD_d_experimentalParam4 = 1003, +} ZSTD_dParameter; + +typedef enum { + ZSTDnit_frameHeader = 0, + ZSTDnit_blockHeader = 1, + ZSTDnit_block = 2, + ZSTDnit_lastBlock = 3, + ZSTDnit_checksum = 4, + ZSTDnit_skippableFrame = 5, +} ZSTD_nextInputType_e; + +typedef struct { + size_t compressedSize; + long long unsigned int decompressedBound; +} ZSTD_frameSizeInfo; + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +typedef enum { + not_streaming = 0, + is_streaming = 1, +} streaming_operation; + +enum asn1_method { + ASN1_PRIM = 0, + ASN1_CONS = 1, +}; + +enum asn1_tag { + ASN1_EOC = 0, + ASN1_BOOL = 1, + ASN1_INT = 2, + ASN1_BTS = 3, + ASN1_OTS = 4, + ASN1_NULL = 5, + ASN1_OID = 6, + ASN1_ODE = 7, + ASN1_EXT = 8, + ASN1_REAL = 9, + ASN1_ENUM = 10, + ASN1_EPDV = 11, + ASN1_UTF8STR = 12, + ASN1_RELOID = 13, + ASN1_SEQ = 16, + ASN1_SET = 17, + ASN1_NUMSTR = 18, + ASN1_PRNSTR = 19, + ASN1_TEXSTR = 20, + ASN1_VIDSTR = 21, + ASN1_IA5STR = 22, + ASN1_UNITIM = 23, + ASN1_GENTIM = 24, + ASN1_GRASTR = 25, + ASN1_VISSTR = 26, + ASN1_GENSTR = 27, + ASN1_UNISTR = 28, + ASN1_CHRSTR = 29, + ASN1_BMPSTR = 30, + ASN1_LONG_TAG = 31, +}; + +typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum asn1_opcode { + ASN1_OP_MATCH = 0, + ASN1_OP_MATCH_OR_SKIP = 1, + ASN1_OP_MATCH_ACT = 2, + ASN1_OP_MATCH_ACT_OR_SKIP = 3, + ASN1_OP_MATCH_JUMP = 4, + ASN1_OP_MATCH_JUMP_OR_SKIP = 5, + ASN1_OP_MATCH_ANY = 8, + ASN1_OP_MATCH_ANY_OR_SKIP = 9, + ASN1_OP_MATCH_ANY_ACT = 10, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, + ASN1_OP_COND_MATCH_OR_SKIP = 17, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, + ASN1_OP_COND_MATCH_ANY = 24, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, + ASN1_OP_COND_MATCH_ANY_ACT = 26, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, + ASN1_OP_COND_FAIL = 28, + ASN1_OP_COMPLETE = 29, + ASN1_OP_ACT = 30, + ASN1_OP_MAYBE_ACT = 31, + ASN1_OP_END_SEQ = 32, + ASN1_OP_END_SET = 33, + ASN1_OP_END_SEQ_OF = 34, + ASN1_OP_END_SET_OF = 35, + ASN1_OP_END_SEQ_ACT = 36, + ASN1_OP_END_SET_ACT = 37, + ASN1_OP_END_SEQ_OF_ACT = 38, + ASN1_OP_END_SET_OF_ACT = 39, + ASN1_OP_RETURN = 40, + ASN1_OP__NR = 41, +}; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_t; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_irq_t; + +enum { + MEMREMAP_WB = 1, + MEMREMAP_WT = 2, + MEMREMAP_WC = 4, + MEMREMAP_ENC = 8, + MEMREMAP_DEC = 16, +}; + +struct rdists { + struct { + raw_spinlock_t rd_lock; + void *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + u64 flags; + cpumask_t *vpe_table_mask; + void *vpe_l1_base; + } *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + u32 gicd_typer2; + int cpuhp_memreserve_state; + bool has_vlpis; + bool has_rvpeid; + bool has_direct_lpi; + bool has_vpend_valid_dirty; +}; + +enum its_vcpu_info_cmd_type { + MAP_VLPI = 0, + GET_VLPI = 1, + PROP_UPDATE_VLPI = 2, + PROP_UPDATE_AND_INV_VLPI = 3, + SCHEDULE_VPE = 4, + DESCHEDULE_VPE = 5, + COMMIT_VPE = 6, + INVALL_VPE = 7, + PROP_UPDATE_VSGI = 8, +}; + +struct its_cmd_info { + enum its_vcpu_info_cmd_type cmd_type; + union { + struct its_vlpi_map *map; + u8 config; + bool req_db; + struct { + bool g0en; + bool g1en; + }; + struct { + u8 priority; + bool group; + }; + }; +}; + +struct gic_quirk { + const char *desc; + const char *compatible; + const char *property; + bool (*init)(void *); + u32 iidr; + u32 mask; +}; + +struct its_collection___2 { + u64 target_address; + u16 col_id; +}; + +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_cmd_block; + +struct its_device___2; + +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void *base; + void *sgir_base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[8]; + struct its_collection___2 *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device___2 *); + u64 typer; + u64 cbaser_save; + u32 ctlr_save; + u32 mpidr; + struct list_head its_device_list; + u64 flags; + long unsigned int list_nr; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; + int vlpi_redist_offset; +}; + +struct its_cmd_block { + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; +}; + +struct event_lpi_map { + long unsigned int *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +struct its_device___2 { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 itt_sz; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +struct its_cmd_desc { + union { + struct { + struct its_device___2 *dev; + u32 event_id; + } its_inv_cmd; + struct { + struct its_device___2 *dev; + u32 event_id; + } its_clear_cmd; + struct { + struct its_device___2 *dev; + u32 event_id; + } its_int_cmd; + struct { + struct its_device___2 *dev; + int valid; + } its_mapd_cmd; + struct { + struct its_collection___2 *col; + int valid; + } its_mapc_cmd; + struct { + struct its_device___2 *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + struct { + struct its_device___2 *dev; + struct its_collection___2 *col; + u32 event_id; + } its_movi_cmd; + struct { + struct its_device___2 *dev; + u32 event_id; + } its_discard_cmd; + struct { + struct its_collection___2 *col; + } its_invall_cmd; + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + struct { + struct its_vpe *vpe; + struct its_collection___2 *col; + bool valid; + } its_vmapp_cmd; + struct { + struct its_vpe *vpe; + struct its_device___2 *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + struct { + struct its_vpe *vpe; + struct its_device___2 *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + struct { + struct its_vpe *vpe; + struct its_collection___2 *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; + }; +}; + +typedef struct its_collection___2 * (*its_cmd_builder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); + +typedef struct its_vpe * (*its_cmd_vbuilder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +typedef __u16 __virtio16; + +typedef __u32 __virtio32; + +typedef __u64 __virtio64; + +struct vring_desc { + __virtio64 addr; + __virtio32 len; + __virtio16 flags; + __virtio16 next; +}; + +struct vring_avail { + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[0]; +}; + +struct vring_used_elem { + __virtio32 id; + __virtio32 len; +}; + +typedef struct vring_used_elem vring_used_elem_t; + +struct vring_used { + __virtio16 flags; + __virtio16 idx; + vring_used_elem_t ring[0]; +}; + +typedef struct vring_desc vring_desc_t; + +typedef struct vring_avail vring_avail_t; + +typedef struct vring_used vring_used_t; + +struct vring { + unsigned int num; + vring_desc_t *desc; + vring_avail_t *avail; + vring_used_t *used; +}; + +struct vring_packed_desc_event { + __le16 off_wrap; + __le16 flags; +}; + +struct vring_packed_desc { + __le64 addr; + __le32 len; + __le16 id; + __le16 flags; +}; + +struct vring_desc_state_split { + void *data; + struct vring_desc *indir_desc; +}; + +struct vring_desc_state_packed { + void *data; + struct vring_packed_desc *indir_desc; + u16 num; + u16 last; +}; + +struct vring_desc_extra { + dma_addr_t addr; + u32 len; + u16 flags; + u16 next; +}; + +struct vring_virtqueue_split { + struct vring vring; + u16 avail_flags_shadow; + u16 avail_idx_shadow; + struct vring_desc_state_split *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + u32 vring_align; + bool may_reduce_num; +}; + +struct vring_virtqueue_packed { + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + bool avail_wrap_counter; + u16 avail_used_flags; + u16 next_avail_idx; + u16 event_flags_shadow; + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; +}; + +struct vring_virtqueue { + struct virtqueue vq; + bool packed_ring; + bool use_dma_api; + bool weak_barriers; + bool broken; + bool indirect; + bool event; + unsigned int free_head; + unsigned int num_added; + u16 last_used_idx; + bool event_triggered; + union { + struct vring_virtqueue_split split; + struct vring_virtqueue_packed packed; + }; + bool (*notify)(struct virtqueue *); + bool we_own_ring; + struct device *dma_dev; +}; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); + long unsigned int flags; +}; + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + const struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead: 1; +}; + +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + struct device classdev; +}; + +struct dma_fence_unwrap { + struct dma_fence *chain; + struct dma_fence *array; + unsigned int index; +}; + +struct sync_file { + struct file *file; + char user_name[32]; + struct list_head sync_file_list; + wait_queue_head_t wq; + long unsigned int flags; + struct dma_fence *fence; + struct dma_fence_cb cb; +}; + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + __u64 sync_fence_info; +}; + +struct sync_set_deadline { + __u64 deadline_ns; + __u64 pad; +}; + +enum { + BLK_MQ_REQ_NOWAIT = 1, + BLK_MQ_REQ_RESERVED = 2, + BLK_MQ_REQ_PM = 4, +}; + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +}; + +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + union { + __u32 data_len; + __u32 vec_cnt; + }; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; + __u64 result; +}; + +struct nvme_uring_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; +}; + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, + NVME_NQN_NVME = 2, + NVME_NQN_CURR = 3, +}; + +enum nvme_ctrl_type { + NVME_CTRL_IO = 1, + NVME_CTRL_DISC = 2, + NVME_CTRL_ADMIN = 3, +}; + +enum nvme_dctype { + NVME_DCTYPE_NOT_REPORTED = 0, + NVME_DCTYPE_DDC = 1, + NVME_DCTYPE_CDC = 2, +}; + +struct nvme_id_power_state { + __le16 max_power; + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; + __le32 exit_lat; + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; + +enum { + NVME_CTRL_CMIC_MULTI_PORT = 1, + NVME_CTRL_CMIC_MULTI_CTRL = 2, + NVME_CTRL_CMIC_ANA = 8, + NVME_CTRL_ONCS_COMPARE = 1, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 2, + NVME_CTRL_ONCS_DSM = 4, + NVME_CTRL_ONCS_WRITE_ZEROES = 8, + NVME_CTRL_ONCS_RESERVATIONS = 32, + NVME_CTRL_ONCS_TIMESTAMP = 64, + NVME_CTRL_VWC_PRESENT = 1, + NVME_CTRL_OACS_SEC_SUPP = 1, + NVME_CTRL_OACS_NS_MNGT_SUPP = 8, + NVME_CTRL_OACS_DIRECTIVES = 32, + NVME_CTRL_OACS_DBBUF_SUPP = 256, + NVME_CTRL_LPA_CMD_EFFECTS_LOG = 2, + NVME_CTRL_CTRATT_128_ID = 1, + NVME_CTRL_CTRATT_NON_OP_PSP = 2, + NVME_CTRL_CTRATT_NVM_SETS = 4, + NVME_CTRL_CTRATT_READ_RECV_LVLS = 8, + NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 16, + NVME_CTRL_CTRATT_PREDICTABLE_LAT = 32, + NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 128, + NVME_CTRL_CTRATT_UUID_LIST = 512, + NVME_CTRL_SGLS_BYTE_ALIGNED = 1, + NVME_CTRL_SGLS_DWORD_ALIGNED = 2, + NVME_CTRL_SGLS_KSDBDS = 4, + NVME_CTRL_SGLS_MSDS = 524288, + NVME_CTRL_SGLS_SAOS = 1048576, +}; + +enum { + NVME_ID_CNS_NS = 0, + NVME_ID_CNS_CTRL = 1, + NVME_ID_CNS_NS_ACTIVE_LIST = 2, + NVME_ID_CNS_NS_DESC_LIST = 3, + NVME_ID_CNS_CS_NS = 5, + NVME_ID_CNS_CS_CTRL = 6, + NVME_ID_CNS_NS_ACTIVE_LIST_CS = 7, + NVME_ID_CNS_NS_CS_INDEP = 8, + NVME_ID_CNS_NS_PRESENT_LIST = 16, + NVME_ID_CNS_NS_PRESENT = 17, + NVME_ID_CNS_CTRL_NS_LIST = 18, + NVME_ID_CNS_CTRL_LIST = 19, + NVME_ID_CNS_SCNDRY_CTRL_LIST = 21, + NVME_ID_CNS_NS_GRANULARITY = 22, + NVME_ID_CNS_UUID_LIST = 23, + NVME_ID_CNS_ENDGRP_LIST = 25, +}; + +enum { + NVME_CMD_EFFECTS_CSUPP = 1, + NVME_CMD_EFFECTS_LBCC = 2, + NVME_CMD_EFFECTS_NCC = 4, + NVME_CMD_EFFECTS_NIC = 8, + NVME_CMD_EFFECTS_CCC = 16, + NVME_CMD_EFFECTS_CSER_MASK = 49152, + NVME_CMD_EFFECTS_CSE_MASK = 458752, + NVME_CMD_EFFECTS_UUID_SEL = 524288, + NVME_CMD_EFFECTS_SCOPE_MASK = 4293918720, +}; + +struct nvme_effects_log { + __le32 acs[256]; + __le32 iocs[256]; + __u8 resv[2048]; +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + union { + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + }; + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + } cdws; + }; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2; + __le32 cdw3; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +enum { + NVME_RW_LR = 32768, + NVME_RW_FUA = 16384, + NVME_RW_APPEND_PIREMAP = 512, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0, + NVME_RW_DSM_LATENCY_IDLE = 16, + NVME_RW_DSM_LATENCY_NORM = 32, + NVME_RW_DSM_LATENCY_LOW = 48, + NVME_RW_DSM_SEQ_REQ = 64, + NVME_RW_DSM_COMPRESSED = 128, + NVME_RW_PRINFO_PRCHK_REF = 1024, + NVME_RW_PRINFO_PRCHK_APP = 2048, + NVME_RW_PRINFO_PRCHK_GUARD = 4096, + NVME_RW_PRINFO_PRACT = 8192, + NVME_RW_DTYPE_STREAMS = 16, + NVME_WZ_DEAC = 512, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +struct nvme_zone_mgmt_send_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le32 cdw12; + __u8 zsa; + __u8 select_all; + __u8 rsvd13[2]; + __le32 cdw14[2]; +}; + +struct nvme_zone_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __le64 slba; + __le32 numd; + __u8 zra; + __u8 zrasf; + __u8 pr; + __u8 rsvd13; + __le32 cdw14[2]; +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; + __le16 cnssid; + __u8 rsvd11; + __u8 csi; + __u32 rsvd12[4]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + union nvme_data_ptr dptr; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 lsp; + __le16 numdl; + __le16 numdu; + __le16 lsi; + union { + struct { + __le32 lpol; + __le32 lpou; + }; + __le64 lpo; + }; + __u8 rsvd14[3]; + __u8 csi; + __u32 rsvd15; +}; + +struct nvme_directive_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 numd; + __u8 doper; + __u8 dtype; + __le16 dspec; + __u8 endir; + __u8 tdtype; + __u16 rsvd15; + __u32 rsvd16[3]; +}; + +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 127, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + +struct nvmf_auth_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al_tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_send_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_receive_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al; + __u8 resv4[16]; +}; + +struct nvme_dbbuf { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __u32 rsvd12[6]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_zone_mgmt_send_cmd zms; + struct nvme_zone_mgmt_recv_cmd zmr; + struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; + struct nvmf_auth_common_command auth_common; + struct nvmf_auth_send_command auth_send; + struct nvmf_auth_receive_command auth_receive; + struct nvme_dbbuf dbbuf; + struct nvme_directive_cmd directive; + }; +}; + +union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; +}; + +struct nvme_ctrl; + +struct nvme_request { + struct nvme_command *cmd; + union nvme_result result; + u8 genctr; + u8 retries; + u8 flags; + u16 status; + struct nvme_ctrl *ctrl; +}; + +enum nvme_ctrl_state { + NVME_CTRL_NEW = 0, + NVME_CTRL_LIVE = 1, + NVME_CTRL_RESETTING = 2, + NVME_CTRL_CONNECTING = 3, + NVME_CTRL_DELETING = 4, + NVME_CTRL_DELETING_NOIO = 5, + NVME_CTRL_DEAD = 6, +}; + +struct opal_dev; + +struct nvme_fault_inject {}; + +struct nvme_ctrl_ops; + +struct nvme_subsystem; + +struct nvmf_ctrl_options; + +struct nvme_ctrl { + bool comp_seen; + bool identified; + bool passthru_err_log_enabled; + enum nvme_ctrl_state state; + spinlock_t lock; + struct mutex scan_lock; + const struct nvme_ctrl_ops *ops; + struct request_queue *admin_q; + struct request_queue *connect_q; + struct request_queue *fabrics_q; + struct device *dev; + int instance; + int numa_node; + struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; + struct list_head namespaces; + struct mutex namespaces_lock; + struct srcu_struct srcu; + struct device ctrl_device; + struct device *device; + struct cdev cdev; + struct work_struct reset_work; + struct work_struct delete_work; + wait_queue_head_t state_wq; + struct nvme_subsystem *subsys; + struct list_head subsys_entry; + struct opal_dev *opal_dev; + u16 cntlid; + u16 mtfa; + u32 ctrl_config; + u32 queue_count; + u64 cap; + u32 max_hw_sectors; + u32 max_segments; + u32 max_integrity_segments; + u32 max_zeroes_sectors; + u16 crdt[3]; + u16 oncs; + u8 dmrl; + u32 dmrsl; + u16 oacs; + u16 sqsize; + u32 max_namespaces; + atomic_t abort_limit; + u8 vwc; + u32 vs; + u32 sgls; + u16 kas; + u8 npss; + u8 apsta; + u16 wctemp; + u16 cctemp; + u32 oaes; + u32 aen_result; + u32 ctratt; + unsigned int shutdown_timeout; + unsigned int kato; + bool subsystem; + long unsigned int quirks; + struct nvme_id_power_state psd[32]; + struct nvme_effects_log *effects; + struct xarray cels; + struct work_struct scan_work; + struct work_struct async_event_work; + struct delayed_work ka_work; + struct delayed_work failfast_work; + struct nvme_command ka_cmd; + long unsigned int ka_last_check_time; + struct work_struct fw_act_work; + long unsigned int events; + key_serial_t tls_pskid; + u64 ps_max_latency_us; + bool apst_enabled; + u16 hmmaxd; + u32 hmpre; + u32 hmmin; + u32 hmminds; + u32 ioccsz; + u32 iorcsz; + u16 icdoff; + u16 maxcmd; + int nr_reconnects; + long unsigned int flags; + struct nvmf_ctrl_options *opts; + struct page *discard_page; + long unsigned int discard_page_busy; + struct nvme_fault_inject fault_inject; + enum nvme_ctrl_type cntrltype; + enum nvme_dctype dctype; +}; + +enum { + NVME_REQ_CANCELLED = 1, + NVME_REQ_USERCMD = 2, + NVME_MPATH_IO_STATS = 4, + NVME_MPATH_CNT_ACTIVE = 8, +}; + +struct nvme_ctrl_ops { + const char *name; + struct module *module; + unsigned int flags; + const struct attribute_group **dev_attr_groups; + int (*reg_read32)(struct nvme_ctrl *, u32, u32 *); + int (*reg_write32)(struct nvme_ctrl *, u32, u32); + int (*reg_read64)(struct nvme_ctrl *, u32, u64 *); + void (*free_ctrl)(struct nvme_ctrl *); + void (*submit_async_event)(struct nvme_ctrl *); + int (*subsystem_reset)(struct nvme_ctrl *); + void (*delete_ctrl)(struct nvme_ctrl *); + void (*stop_ctrl)(struct nvme_ctrl *); + int (*get_address)(struct nvme_ctrl *, char *, int); + void (*print_device_info)(struct nvme_ctrl *); + bool (*supports_pci_p2pdma)(struct nvme_ctrl *); +}; + +struct nvme_subsystem { + int instance; + struct device dev; + struct kref ref; + struct list_head entry; + struct mutex lock; + struct list_head ctrls; + struct list_head nsheads; + char subnqn[223]; + char serial[20]; + char model[40]; + char firmware_rev[8]; + u8 cmic; + enum nvme_subsys_type subtype; + u16 vendor_id; + u16 awupf; + struct ida ns_ida; +}; + +struct nvme_ns_ids { + u8 eui64[8]; + u8 nguid[16]; + uuid_t uuid; + u8 csi; +}; + +struct nvme_ns_head { + struct list_head list; + struct srcu_struct srcu; + struct nvme_subsystem *subsys; + struct nvme_ns_ids ids; + u8 lba_shift; + u16 ms; + u16 pi_size; + u8 pi_type; + u8 guard_type; + struct list_head entry; + struct kref ref; + bool shared; + bool rotational; + bool passthru_err_log_enabled; + struct nvme_effects_log *effects; + u64 nuse; + unsigned int ns_id; + int instance; + long unsigned int features; + struct ratelimit_state rs_nuse; + struct cdev cdev; + struct device cdev_device; + struct gendisk *disk; +}; + +enum nvme_ns_features { + NVME_NS_EXT_LBAS = 1, + NVME_NS_METADATA_SUPPORTED = 2, + NVME_NS_DEAC = 4, +}; + +struct nvme_ns { + struct list_head list; + struct nvme_ctrl *ctrl; + struct request_queue *queue; + struct gendisk *disk; + struct list_head siblings; + struct kref kref; + struct nvme_ns_head *head; + long unsigned int flags; + struct cdev cdev; + struct device cdev_device; + struct nvme_fault_inject fault_inject; +}; + +enum { + NVME_IOCTL_VEC = 1, + NVME_IOCTL_PARTITION = 2, +}; + +struct nvme_uring_data { + __u64 metadata; + __u64 addr; + __u32 data_len; + __u32 metadata_len; + __u32 timeout_ms; +}; + +struct nvme_uring_cmd_pdu { + struct request *req; + struct bio *bio; + u64 result; + int status; +}; + +enum pci_ers_result { + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NO_AER_DRIVER = 6, +}; + +struct mii_ioctl_data { + __u16 phy_id; + __u16 reg_num; + __u16 val_in; + __u16 val_out; +}; + +struct mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + unsigned int full_duplex: 1; + unsigned int force_media: 1; + unsigned int supports_gmii: 1; + struct net_device *dev; + int (*mdio_read)(struct net_device *, int, int); + void (*mdio_write)(struct net_device *, int, int, int); +}; + +struct firmware { + size_t size; + const u8 *data; + void *priv; +}; + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 255, +}; + +enum phy___2 { + phy_100a = 992, + phy_100c = 55575208, + phy_82555_tx = 22020776, + phy_nsc_tx = 1543512064, + phy_82562_et = 53478056, + phy_82562_em = 52429480, + phy_82562_ek = 51380904, + phy_82562_eh = 24117928, + phy_82552_v = 3496017997, + phy_unknown = 4294967295, +}; + +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 8, + rus_ready = 16, + rus_mask = 60, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0, + stat_ack_sw_gen = 4, + stat_ack_rnr = 16, + stat_ack_cu_idle = 32, + stat_ack_frame_rx = 64, + stat_ack_cu_cmd_done = 128, + stat_ack_not_present = 255, + stat_ack_rx = 84, + stat_ack_tx = 160, +}; + +enum scb_cmd_hi { + irq_mask_none = 0, + irq_mask_all = 1, + irq_sw_gen = 2, +}; + +enum scb_cmd_lo { + cuc_nop = 0, + ruc_start = 1, + ruc_load_base = 6, + cuc_start = 16, + cuc_resume = 32, + cuc_dump_addr = 64, + cuc_dump_stats = 80, + cuc_load_base = 96, + cuc_dump_reset = 112, +}; + +enum cuc_dump { + cuc_dump_complete = 40965, + cuc_dump_reset_complete = 40967, +}; + +enum port { + software_reset = 0, + selftest = 1, + selective_reset = 2, +}; + +enum eeprom_ctrl_lo { + eesk = 1, + eecs = 2, + eedi = 4, + eedo = 8, +}; + +enum mdi_ctrl { + mdi_write = 67108864, + mdi_read = 134217728, + mdi_ready = 268435456, +}; + +enum eeprom_op { + op_write = 5, + op_read = 6, + op_ewds = 16, + op_ewen = 19, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 3, + eeprom_phy_iface = 6, + eeprom_id = 10, + eeprom_config_asf = 13, + eeprom_smbus_addr = 144, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 128, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB = 1, + I82553C = 2, + I82503 = 3, + DP83840 = 4, + S80C240 = 5, + S80C24 = 6, + I82555 = 7, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 32, +}; + +enum eeprom_config_asf { + eeprom_asf = 32768, + eeprom_gcl = 16384, +}; + +enum cb_status { + cb_complete = 32768, + cb_ok = 8192, +}; + +enum cb_command { + cb_nop = 0, + cb_iaaddr = 1, + cb_config = 2, + cb_multi = 3, + cb_tx = 4, + cb_ucode = 5, + cb_dump = 6, + cb_tx_sf = 8, + cb_tx_nc = 16, + cb_cid = 7936, + cb_i = 8192, + cb_s = 16384, + cb_el = 32768, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next; + struct rx *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +struct config { + u8 pad0: 2; + u8 byte_count: 6; + u8 pad1: 1; + u8 tx_fifo_limit: 3; + u8 rx_fifo_limit: 4; + u8 adaptive_ifs; + u8 pad3: 4; + u8 term_write_cache_line: 1; + u8 read_align_enable: 1; + u8 type_enable: 1; + u8 mwi_enable: 1; + u8 pad4: 1; + u8 rx_dma_max_count: 7; + u8 dma_max_count_enable: 1; + u8 tx_dma_max_count: 7; + u8 rx_save_bad_frames: 1; + u8 rx_save_overruns: 1; + u8 standard_stat_counter: 1; + u8 standard_tcb: 1; + u8 cna_intr: 1; + u8 tno_intr: 1; + u8 direct_rx_dma: 1; + u8 late_scb_update: 1; + u8 tx_dynamic_tbd: 1; + u8 tx_two_frames_in_fifo: 1; + u8 rx_extended_rfd: 1; + u8 pad7: 2; + u8 tx_underrun_retry: 2; + u8 rx_discard_short_frames: 1; + u8 csma_disabled: 1; + u8 pad8: 6; + u8 mii_mode: 1; + u8 mcmatch_wake: 1; + u8 arp_wake: 1; + u8 link_status_wake: 1; + u8 vlan_arp_tco: 1; + u8 pad9: 3; + u8 rx_tcpudp_checksum: 1; + u8 loopback: 2; + u8 preamble_length: 2; + u8 no_source_addr_insertion: 1; + u8 pad10: 3; + u8 pad11: 5; + u8 linear_priority: 3; + u8 ifs: 4; + u8 pad12: 3; + u8 linear_priority_mode: 1; + u8 ip_addr_lo; + u8 ip_addr_hi; + u8 crs_or_cdt: 1; + u8 pad15_2: 1; + u8 crc_16_bit: 1; + u8 ignore_ul_bit: 1; + u8 pad15_1: 1; + u8 wait_after_win: 1; + u8 broadcast_disabled: 1; + u8 promiscuous_mode: 1; + u8 fc_delay_lo; + u8 fc_delay_hi; + u8 pad18: 1; + u8 fc_priority_threshold: 3; + u8 rx_long_ok: 1; + u8 rx_crc_transfer: 1; + u8 tx_padding: 1; + u8 rx_stripping: 1; + u8 full_duplex_pin: 1; + u8 full_duplex_force: 1; + u8 fc_reject: 1; + u8 fc_restart: 1; + u8 fc_restop: 1; + u8 fc_disable: 1; + u8 magic_packet_disable: 1; + u8 addr_wake: 1; + u8 pad20_2: 1; + u8 multi_ia: 1; + u8 fc_priority_location: 1; + u8 pad20_1: 5; + u8 pad21_2: 4; + u8 multicast_all: 1; + u8 pad21_1: 3; + u8 pad22: 6; + u8 rx_vlan_drop: 1; + u8 rx_d102_mode: 1; + u8 pad_d102[9]; +}; + +struct multi { + __le16 count; + u8 addr[386]; +}; + +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[6]; + __le32 ucode[134]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next; + struct cb *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, + lb_mac = 1, + lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames; + __le32 tx_max_collisions; + __le32 tx_late_collisions; + __le32 tx_underruns; + __le32 tx_lost_crs; + __le32 tx_deferred; + __le32 tx_single_collisions; + __le32 tx_multiple_collisions; + __le32 tx_total_collisions; + __le32 rx_good_frames; + __le32 rx_crc_errors; + __le32 rx_alignment_errors; + __le32 rx_resource_errors; + __le32 rx_overrun_errors; + __le32 rx_cdt_errors; + __le32 rx_short_frame_errors; + __le32 fc_xmt_pause; + __le32 fc_rcv_pause; + __le32 fc_rcv_unsupported; + __le16 xmt_tco_frames; + __le16 rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + u32 msg_enable; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *, u32, u32, u32, u16); + long: 64; + long: 64; + long: 64; + long: 64; + struct rx *rxs; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + long: 64; + long: 64; + spinlock_t cb_lock; + spinlock_t cmd_lock; + struct csr *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + long: 64; + long: 64; + enum { + ich = 1, + promiscuous = 2, + multicast_all = 4, + wol_magic = 8, + ich_10h_workaround = 16, + } flags; + enum mac mac; + enum phy___2 phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + struct mem *mem; + dma_addr_t dma_addr; + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum led_state { + led_on = 1, + led_off = 4, + led_on_559 = 5, + led_on_557 = 7, +}; + +struct usb_phy_roothub { + struct phy *phy; + struct list_head list; +}; + +struct input_dev_poller { + void (*poll)(struct input_dev *); + unsigned int poll_interval; + unsigned int poll_interval_max; + unsigned int poll_interval_min; + struct input_dev *input; + struct delayed_work work; +}; + +struct ir_raw_timings_pd { + unsigned int header_pulse; + unsigned int header_space; + unsigned int bit_pulse; + unsigned int bit_space[2]; + unsigned int trailer_pulse; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +enum jvc_state { + STATE_INACTIVE___3 = 0, + STATE_HEADER_SPACE___2 = 1, + STATE_BIT_PULSE___2 = 2, + STATE_BIT_SPACE___2 = 3, + STATE_TRAILER_PULSE = 4, + STATE_TRAILER_SPACE = 5, + STATE_CHECK_REPEAT = 6, +}; + +struct blk_plug_cb; + +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); + +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; + +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + void (*xor_syndrome)(int, int, int, size_t, void **); + int (*valid)(); + const char *name; + int priority; +}; + +typedef s32 dma_cookie_t; + +enum dma_status { + DMA_COMPLETE = 0, + DMA_IN_PROGRESS = 1, + DMA_PAUSED = 2, + DMA_ERROR = 3, + DMA_OUT_OF_ORDER = 4, +}; + +enum dma_transfer_direction { + DMA_MEM_TO_MEM = 0, + DMA_MEM_TO_DEV = 1, + DMA_DEV_TO_MEM = 2, + DMA_DEV_TO_DEV = 3, + DMA_TRANS_NONE = 4, +}; + +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +struct dma_vec { + dma_addr_t addr; + size_t len; +}; + +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = 1, + DMA_CTRL_ACK = 2, + DMA_PREP_PQ_DISABLE_P = 4, + DMA_PREP_PQ_DISABLE_Q = 8, + DMA_PREP_CONTINUE = 16, + DMA_PREP_FENCE = 32, + DMA_CTRL_REUSE = 64, + DMA_PREP_CMD = 128, + DMA_PREP_REPEAT = 256, + DMA_PREP_LOAD_EOT = 512, +}; + +enum sum_check_flags { + SUM_CHECK_P_RESULT = 1, + SUM_CHECK_Q_RESULT = 2, +}; + +typedef struct { + long unsigned int bits[1]; +} dma_cap_mask_t; + +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = 1, + DESC_METADATA_ENGINE = 2, +}; + +struct dma_chan_percpu { + long unsigned int memcpy_count; + long unsigned int bytes_transferred; +}; + +struct dma_router { + struct device *dev; + void (*route_free)(struct device *, void *); +}; + +struct dma_device; + +struct dma_chan_dev; + +struct dma_chan { + struct dma_device *device; + struct device *slave; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + int chan_id; + struct dma_chan_dev *dev; + const char *name; + char *dbg_client_name; + struct list_head device_node; + struct dma_chan_percpu *local; + int client_count; + int table_count; + struct dma_router *router; + void *route_data; + void *private; +}; + +typedef bool (*dma_filter_fn)(struct dma_chan *, void *); + +struct dma_slave_map; + +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, +}; + +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +struct dma_async_tx_descriptor; + +struct dma_slave_caps; + +struct dma_slave_config; + +struct dma_tx_state; + +struct dma_device { + struct kref ref; + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; + short unsigned int max_xor; + short unsigned int max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + int dev_id; + struct device *dev; + struct module *owner; + struct ida chan_ida; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan *); + int (*device_router_config)(struct dma_chan *); + void (*device_free_chan_resources)(struct dma_chan *); + struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t, dma_addr_t, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t, int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_peripheral_dma_vec)(struct dma_chan *, const struct dma_vec *, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); + struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t, u64, long unsigned int); + void (*device_caps)(struct dma_chan *, struct dma_slave_caps *); + int (*device_config)(struct dma_chan *, struct dma_slave_config *); + int (*device_pause)(struct dma_chan *); + int (*device_resume)(struct dma_chan *); + int (*device_terminate_all)(struct dma_chan *); + void (*device_synchronize)(struct dma_chan *); + enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t, struct dma_tx_state *); + void (*device_issue_pending)(struct dma_chan *); + void (*device_release)(struct dma_device *); + void (*dbg_summary_show)(struct seq_file *, struct dma_device *); + struct dentry *dbg_dev_root; +}; + +struct dma_chan_dev { + struct dma_chan *chan; + struct device device; + int dev_id; + bool chan_dma_dev; +}; + +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, + DMA_SLAVE_BUSWIDTH_128_BYTES = 128, +}; + +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + void *peripheral_config; + size_t peripheral_size; +}; + +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +typedef void (*dma_async_tx_callback)(void *); + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, + DMA_TRANS_READ_FAILED = 1, + DMA_TRANS_WRITE_FAILED = 2, + DMA_TRANS_ABORTED = 3, +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); + +struct dmaengine_unmap_data { + u8 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); + void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); + int (*set_len)(struct dma_async_tx_descriptor *, size_t); +}; + +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; + dma_addr_t phys; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); + int (*desc_free)(struct dma_async_tx_descriptor *); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; +}; + +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; + u32 in_flight_bytes; +}; + +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = 1, + ASYNC_TX_XOR_DROP_DST = 2, + ASYNC_TX_ACK = 4, + ASYNC_TX_FENCE = 8, + ASYNC_TX_PQ_XOR_DST = 16, +}; + +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + +typedef union { + long unsigned int addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +enum flag_bits { + Faulty = 0, + In_sync = 1, + Bitmap_sync = 2, + WriteMostly = 3, + AutoDetected = 4, + Blocked = 5, + WriteErrorSeen = 6, + FaultRecorded = 7, + BlockedBadBlocks = 8, + WantReplacement = 9, + Replacement = 10, + Candidate = 11, + Journal = 12, + ClusterRemove = 13, + ExternalBbl = 14, + FailFast = 15, + LastDev = 16, + CollisionCheck = 17, + Nonrot = 18, +}; + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS = 0, + MD_SB_CHANGE_CLEAN = 1, + MD_SB_CHANGE_PENDING = 2, + MD_SB_NEED_REWRITE = 3, +}; + +struct md_bitmap_stats; + +struct bitmap_operations { + bool (*enabled)(struct mddev *); + int (*create)(struct mddev *, int); + int (*resize)(struct mddev *, sector_t, int, bool); + int (*load)(struct mddev *); + void (*destroy)(struct mddev *); + void (*flush)(struct mddev *); + void (*write_all)(struct mddev *); + void (*dirty_bits)(struct mddev *, long unsigned int, long unsigned int); + void (*unplug)(struct mddev *, bool); + void (*daemon_work)(struct mddev *); + void (*wait_behind_writes)(struct mddev *); + int (*startwrite)(struct mddev *, sector_t, long unsigned int, bool); + void (*endwrite)(struct mddev *, sector_t, long unsigned int, bool, bool); + bool (*start_sync)(struct mddev *, sector_t, sector_t *, bool); + void (*end_sync)(struct mddev *, sector_t, sector_t *); + void (*cond_end_sync)(struct mddev *, sector_t, bool); + void (*close_sync)(struct mddev *); + void (*update_sb)(void *); + int (*get_stats)(void *, struct md_bitmap_stats *); + void (*sync_with_cluster)(struct mddev *, sector_t, sector_t, sector_t, sector_t); + void * (*get_from_slot)(struct mddev *, int); + int (*copy_from_slot)(struct mddev *, int, sector_t *, sector_t *, bool); + void (*set_pages)(void *, long unsigned int); + void (*free)(void *); +}; + +enum recovery_flags { + MD_RECOVERY_NEEDED = 0, + MD_RECOVERY_RUNNING = 1, + MD_RECOVERY_INTR = 2, + MD_RECOVERY_DONE = 3, + MD_RECOVERY_FROZEN = 4, + MD_RECOVERY_WAIT = 5, + MD_RECOVERY_ERROR = 6, + MD_RECOVERY_SYNC = 7, + MD_RECOVERY_REQUESTED = 8, + MD_RECOVERY_CHECK = 9, + MD_RECOVERY_RECOVER = 10, + MD_RECOVERY_RESHAPE = 11, + MD_RESYNCING_REMOTE = 12, +}; + +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct mddev *, char *); + ssize_t (*store)(struct mddev *, const char *, size_t); +}; + +enum check_states { + check_state_idle = 0, + check_state_run = 1, + check_state_run_q = 2, + check_state_run_pq = 3, + check_state_check_result = 4, + check_state_compute_run = 5, + check_state_compute_result = 6, +}; + +enum reconstruct_states { + reconstruct_state_idle = 0, + reconstruct_state_prexor_drain_run = 1, + reconstruct_state_drain_run = 2, + reconstruct_state_run = 3, + reconstruct_state_prexor_drain_result = 4, + reconstruct_state_drain_result = 5, + reconstruct_state_result = 6, +}; + +struct stripe_operations { + int target; + int target2; + enum sum_check_flags zero_sum_result; +}; + +struct r5dev { + struct bio req; + struct bio rreq; + struct bio_vec vec; + struct bio_vec rvec; + struct page *page; + struct page *orig_page; + unsigned int offset; + struct bio *toread; + struct bio *read; + struct bio *towrite; + struct bio *written; + sector_t sector; + long unsigned int flags; + u32 log_checksum; + short unsigned int write_hint; +}; + +struct r5conf; + +struct r5worker_group; + +struct r5l_io_unit; + +struct ppl_io_unit; + +struct stripe_head { + struct hlist_node hash; + struct list_head lru; + struct llist_node release_list; + struct r5conf *raid_conf; + short int generation; + sector_t sector; + short int pd_idx; + short int qd_idx; + short int ddf_layout; + short int hash_lock_index; + long unsigned int state; + atomic_t count; + int bm_seq; + int disks; + int overwrite_disks; + enum check_states check_state; + enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; + int cpu; + struct r5worker_group *group; + struct stripe_head *batch_head; + spinlock_t batch_lock; + struct list_head batch_list; + union { + struct r5l_io_unit *log_io; + struct ppl_io_unit *ppl_io; + }; + struct list_head log_list; + sector_t log_start; + struct list_head r5c; + struct page *ppl_page; + struct stripe_operations ops; + struct r5dev dev[0]; +}; + +struct raid5_percpu; + +struct disk_info; + +struct r5l_log; + +struct r5pending_data; + +struct r5conf { + struct hlist_head *stripe_hashtbl; + spinlock_t hash_locks[8]; + struct mddev *mddev; + int chunk_sectors; + int level; + int algorithm; + int rmw_level; + int max_degraded; + int raid_disks; + int max_nr_stripes; + int min_nr_stripes; + sector_t reshape_progress; + sector_t reshape_safe; + int previous_raid_disks; + int prev_chunk_sectors; + int prev_algo; + short int generation; + seqcount_spinlock_t gen_lock; + long unsigned int reshape_checkpoint; + long long int min_offset_diff; + struct list_head handle_list; + struct list_head loprio_list; + struct list_head hold_list; + struct list_head delayed_list; + struct list_head bitmap_list; + struct bio *retry_read_aligned; + unsigned int retry_read_offset; + struct bio *retry_read_aligned_list; + atomic_t preread_active_stripes; + atomic_t active_aligned_reads; + atomic_t pending_full_writes; + int bypass_count; + int bypass_threshold; + int skip_copy; + struct list_head *last_hold; + atomic_t reshape_stripes; + int active_name; + char cache_name[96]; + struct kmem_cache *slab_cache; + struct mutex cache_size_mutex; + int seq_flush; + int seq_write; + int quiesce; + int fullsync; + int recovery_disabled; + struct raid5_percpu *percpu; + int scribble_disks; + int scribble_sectors; + struct hlist_node node; + atomic_t active_stripes; + struct list_head inactive_list[8]; + atomic_t r5c_cached_full_stripes; + struct list_head r5c_full_stripe_list; + atomic_t r5c_cached_partial_stripes; + struct list_head r5c_partial_stripe_list; + atomic_t r5c_flushing_full_stripes; + atomic_t r5c_flushing_partial_stripes; + atomic_t empty_inactive_list_nr; + struct llist_head released_stripes; + wait_queue_head_t wait_for_quiescent; + wait_queue_head_t wait_for_stripe; + wait_queue_head_t wait_for_reshape; + long unsigned int cache_state; + struct shrinker *shrinker; + int pool_size; + spinlock_t device_lock; + struct disk_info *disks; + struct bio_set bio_split; + struct md_thread *thread; + struct list_head temp_inactive_list[8]; + struct r5worker_group *worker_groups; + int group_cnt; + int worker_cnt_per_group; + struct r5l_log *log; + void *log_private; + spinlock_t pending_bios_lock; + bool batch_bio_dispatch; + struct r5pending_data *pending_data; + struct list_head free_list; + struct list_head pending_list; + int pending_data_cnt; + struct r5pending_data *next_pending_data; +}; + +struct r5worker; + +struct r5worker_group { + struct list_head handle_list; + struct list_head loprio_list; + struct r5conf *conf; + struct r5worker *workers; + int stripes_cnt; +}; + +struct stripe_head_state { + int syncing; + int expanding; + int expanded; + int replacing; + int locked; + int uptodate; + int to_read; + int to_write; + int failed; + int written; + int to_fill; + int compute; + int req_compute; + int non_overwrite; + int injournal; + int just_cached; + int failed_num[2]; + int p_failed; + int q_failed; + int dec_preread_active; + long unsigned int ops_request; + struct md_rdev *blocked_rdev; + int handle_bad_blocks; + int log_failed; + int waiting_extra_page; +}; + +enum r5dev_flags { + R5_UPTODATE = 0, + R5_LOCKED = 1, + R5_DOUBLE_LOCKED = 2, + R5_OVERWRITE = 3, + R5_Insync = 4, + R5_Wantread = 5, + R5_Wantwrite = 6, + R5_Overlap = 7, + R5_ReadNoMerge = 8, + R5_ReadError = 9, + R5_ReWrite = 10, + R5_Expanded = 11, + R5_Wantcompute = 12, + R5_Wantfill = 13, + R5_Wantdrain = 14, + R5_WantFUA = 15, + R5_SyncIO = 16, + R5_WriteError = 17, + R5_MadeGood = 18, + R5_ReadRepl = 19, + R5_MadeGoodRepl = 20, + R5_NeedReplace = 21, + R5_WantReplace = 22, + R5_Discard = 23, + R5_SkipCopy = 24, + R5_InJournal = 25, + R5_OrigPageUPTDODATE = 26, +}; + +enum { + STRIPE_ACTIVE = 0, + STRIPE_HANDLE = 1, + STRIPE_SYNC_REQUESTED = 2, + STRIPE_SYNCING = 3, + STRIPE_INSYNC = 4, + STRIPE_REPLACED = 5, + STRIPE_PREREAD_ACTIVE = 6, + STRIPE_DELAYED = 7, + STRIPE_DEGRADED = 8, + STRIPE_BIT_DELAY = 9, + STRIPE_EXPANDING = 10, + STRIPE_EXPAND_SOURCE = 11, + STRIPE_EXPAND_READY = 12, + STRIPE_IO_STARTED = 13, + STRIPE_FULL_WRITE = 14, + STRIPE_BIOFILL_RUN = 15, + STRIPE_COMPUTE_RUN = 16, + STRIPE_ON_UNPLUG_LIST = 17, + STRIPE_DISCARD = 18, + STRIPE_ON_RELEASE_LIST = 19, + STRIPE_BATCH_READY = 20, + STRIPE_BATCH_ERR = 21, + STRIPE_BITMAP_PENDING = 22, + STRIPE_LOG_TRAPPED = 23, + STRIPE_R5C_CACHING = 24, + STRIPE_R5C_PARTIAL_STRIPE = 25, + STRIPE_R5C_FULL_STRIPE = 26, + STRIPE_R5C_PREFLUSH = 27, +}; + +enum { + STRIPE_OP_BIOFILL = 0, + STRIPE_OP_COMPUTE_BLK = 1, + STRIPE_OP_PREXOR = 2, + STRIPE_OP_BIODRAIN = 3, + STRIPE_OP_RECONSTRUCT = 4, + STRIPE_OP_CHECK = 5, + STRIPE_OP_PARTIAL_PARITY = 6, +}; + +enum { + PARITY_DISABLE_RMW = 0, + PARITY_ENABLE_RMW = 1, + PARITY_PREFER_RMW = 2, +}; + +enum { + SYNDROME_SRC_ALL = 0, + SYNDROME_SRC_WANT_DRAIN = 1, + SYNDROME_SRC_WRITTEN = 2, +}; + +struct disk_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + struct page *extra_page; +}; + +struct r5worker { + struct work_struct work; + struct r5worker_group *group; + struct list_head temp_inactive_list[8]; + bool working; +}; + +enum r5_cache_state { + R5_INACTIVE_BLOCKED = 0, + R5_ALLOC_MORE = 1, + R5_DID_ALLOC = 2, + R5C_LOG_TIGHT = 3, + R5C_LOG_CRITICAL = 4, + R5C_EXTRA_PAGE_IN_USE = 5, +}; + +struct r5pending_data { + struct list_head sibling; + sector_t sector; + struct bio_list bios; +}; + +struct raid5_percpu { + struct page *spare_page; + void *scribble; + int scribble_obj_size; + local_lock_t lock; +}; + +struct md_bitmap_stats { + u64 events_cleared; + int behind_writes; + bool behind_wait; + long unsigned int missing_pages; + long unsigned int file_pages; + long unsigned int sync_size; + long unsigned int pages; + struct file *file; +}; + +enum stripe_result { + STRIPE_SUCCESS = 0, + STRIPE_RETRY = 1, + STRIPE_SCHEDULE_AND_RETRY = 2, + STRIPE_FAIL = 3, + STRIPE_WAIT_RESHAPE = 4, +}; + +struct stripe_request_ctx { + struct stripe_head *batch_last; + sector_t first_sector; + sector_t last_sector; + long unsigned int sectors_to_do[5]; + bool do_flush; +}; + +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; + struct list_head temp_inactive_list[8]; +}; + +enum reshape_loc { + LOC_NO_RESHAPE = 0, + LOC_AHEAD_OF_RESHAPE = 1, + LOC_INSIDE_RESHAPE = 2, + LOC_BEHIND_RESHAPE = 3, +}; + +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; + +enum ethtool_header_flags { + ETHTOOL_FLAG_COMPACT_BITSETS = 1, + ETHTOOL_FLAG_OMIT_REPLY = 2, + ETHTOOL_FLAG_STATS = 4, +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + struct ethtool_link_ext_stats link_stats; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +struct nf_ct_hook { + int (*update)(struct net *, struct sk_buff *); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); + void (*attach)(struct sk_buff *, const struct sk_buff *); + void (*set_closing)(struct nf_conntrack *); + int (*confirm)(struct sk_buff *); +}; + +struct nfnl_ct_hook { + size_t (*build_size)(const struct nf_conn *); + int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t); + int (*parse)(const struct nlattr *, struct nf_conn *); + int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); + void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); +}; + +struct nf_ipv6_ops { + void (*route_input)(struct sk_buff *); + int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); +}; + +struct nfgenmsg { + __u8 nfgen_family; + __u8 version; + __be16 res_id; +}; + +struct nfnl_info { + struct net *net; + struct sock *sk; + const struct nlmsghdr *nlh; + const struct nfgenmsg *nfmsg; + struct netlink_ext_ack *extack; +}; + +enum nfnl_callback_type { + NFNL_CB_UNSPEC = 0, + NFNL_CB_MUTEX = 1, + NFNL_CB_RCU = 2, + NFNL_CB_BATCH = 3, +}; + +struct nfnl_callback { + int (*call)(struct sk_buff *, const struct nfnl_info *, const struct nlattr * const *); + const struct nla_policy *policy; + enum nfnl_callback_type type; + __u16 attr_count; +}; + +enum nfnl_abort_action { + NFNL_ABORT_NONE = 0, + NFNL_ABORT_AUTOLOAD = 1, + NFNL_ABORT_VALIDATE = 2, +}; + +struct nfnetlink_subsystem { + const char *name; + __u8 subsys_id; + __u8 cb_count; + const struct nfnl_callback *cb; + struct module *owner; + int (*commit)(struct net *, struct sk_buff *); + int (*abort)(struct net *, struct sk_buff *, enum nfnl_abort_action); + bool (*valid_genid)(struct net *, u32); +}; + +enum nfqnl_msg_types { + NFQNL_MSG_PACKET = 0, + NFQNL_MSG_VERDICT = 1, + NFQNL_MSG_CONFIG = 2, + NFQNL_MSG_VERDICT_BATCH = 3, + NFQNL_MSG_MAX = 4, +}; + +struct nfqnl_msg_packet_hdr { + __be32 packet_id; + __be16 hw_protocol; + __u8 hook; +} __attribute__((packed)); + +struct nfqnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfqnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfqnl_vlan_attr { + NFQA_VLAN_UNSPEC = 0, + NFQA_VLAN_PROTO = 1, + NFQA_VLAN_TCI = 2, + __NFQA_VLAN_MAX = 3, +}; + +enum nfqnl_attr_type { + NFQA_UNSPEC = 0, + NFQA_PACKET_HDR = 1, + NFQA_VERDICT_HDR = 2, + NFQA_MARK = 3, + NFQA_TIMESTAMP = 4, + NFQA_IFINDEX_INDEV = 5, + NFQA_IFINDEX_OUTDEV = 6, + NFQA_IFINDEX_PHYSINDEV = 7, + NFQA_IFINDEX_PHYSOUTDEV = 8, + NFQA_HWADDR = 9, + NFQA_PAYLOAD = 10, + NFQA_CT = 11, + NFQA_CT_INFO = 12, + NFQA_CAP_LEN = 13, + NFQA_SKB_INFO = 14, + NFQA_EXP = 15, + NFQA_UID = 16, + NFQA_GID = 17, + NFQA_SECCTX = 18, + NFQA_VLAN = 19, + NFQA_L2HDR = 20, + NFQA_PRIORITY = 21, + NFQA_CGROUP_CLASSID = 22, + __NFQA_MAX = 23, +}; + +struct nfqnl_msg_verdict_hdr { + __be32 verdict; + __be32 id; +}; + +enum nfqnl_msg_config_cmds { + NFQNL_CFG_CMD_NONE = 0, + NFQNL_CFG_CMD_BIND = 1, + NFQNL_CFG_CMD_UNBIND = 2, + NFQNL_CFG_CMD_PF_BIND = 3, + NFQNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfqnl_msg_config_cmd { + __u8 command; + __u8 _pad; + __be16 pf; +}; + +enum nfqnl_config_mode { + NFQNL_COPY_NONE = 0, + NFQNL_COPY_META = 1, + NFQNL_COPY_PACKET = 2, +}; + +struct nfqnl_msg_config_params { + __be32 copy_range; + __u8 copy_mode; +} __attribute__((packed)); + +enum nfqnl_attr_config { + NFQA_CFG_UNSPEC = 0, + NFQA_CFG_CMD = 1, + NFQA_CFG_PARAMS = 2, + NFQA_CFG_QUEUE_MAXLEN = 3, + NFQA_CFG_MASK = 4, + NFQA_CFG_FLAGS = 5, + __NFQA_CFG_MAX = 6, +}; + +struct nfqnl_instance { + struct hlist_node hlist; + struct callback_head rcu; + u32 peer_portid; + unsigned int queue_maxlen; + unsigned int copy_range; + unsigned int queue_dropped; + unsigned int queue_user_dropped; + u_int16_t queue_num; + u_int8_t copy_mode; + u_int32_t flags; + long: 0; + spinlock_t lock; + unsigned int queue_total; + unsigned int id_sequence; + struct list_head queue_list; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, long unsigned int); + +struct nfnl_queue_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; +}; + +struct iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum nft_exthdr_flags { + NFT_EXTHDR_F_PRESENT = 1, +}; + +enum nft_exthdr_op { + NFT_EXTHDR_OP_IPV6 = 0, + NFT_EXTHDR_OP_TCPOPT = 1, + NFT_EXTHDR_OP_IPV4 = 2, + NFT_EXTHDR_OP_SCTP = 3, + NFT_EXTHDR_OP_DCCP = 4, + __NFT_EXTHDR_OP_MAX = 5, +}; + +enum nft_exthdr_attributes { + NFTA_EXTHDR_UNSPEC = 0, + NFTA_EXTHDR_DREG = 1, + NFTA_EXTHDR_TYPE = 2, + NFTA_EXTHDR_OFFSET = 3, + NFTA_EXTHDR_LEN = 4, + NFTA_EXTHDR_FLAGS = 5, + NFTA_EXTHDR_OP = 6, + NFTA_EXTHDR_SREG = 7, + __NFTA_EXTHDR_MAX = 8, +}; + +struct dccp_hdr { + __be16 dccph_sport; + __be16 dccph_dport; + __u8 dccph_doff; + __u8 dccph_ccval: 4; + __u8 dccph_cscov: 4; + __sum16 dccph_checksum; + __u8 dccph_reserved: 3; + __u8 dccph_type: 4; + __u8 dccph_x: 1; + __u8 dccph_seq2; + __be16 dccph_seq; +}; + +enum dccp_pkt_type { + DCCP_PKT_REQUEST = 0, + DCCP_PKT_RESPONSE = 1, + DCCP_PKT_DATA = 2, + DCCP_PKT_ACK = 3, + DCCP_PKT_DATAACK = 4, + DCCP_PKT_CLOSEREQ = 5, + DCCP_PKT_CLOSE = 6, + DCCP_PKT_RESET = 7, + DCCP_PKT_SYNC = 8, + DCCP_PKT_SYNCACK = 9, + DCCP_PKT_INVALID = 10, +}; + +enum { + DCCPO_PADDING = 0, + DCCPO_MANDATORY = 1, + DCCPO_MIN_RESERVED = 3, + DCCPO_MAX_RESERVED = 31, + DCCPO_CHANGE_L = 32, + DCCPO_CONFIRM_L = 33, + DCCPO_CHANGE_R = 34, + DCCPO_CONFIRM_R = 35, + DCCPO_NDP_COUNT = 37, + DCCPO_ACK_VECTOR_0 = 38, + DCCPO_ACK_VECTOR_1 = 39, + DCCPO_TIMESTAMP = 41, + DCCPO_TIMESTAMP_ECHO = 42, + DCCPO_ELAPSED_TIME = 43, + DCCPO_MAX = 45, + DCCPO_MIN_RX_CCID_SPECIFIC = 128, + DCCPO_MAX_RX_CCID_SPECIFIC = 191, + DCCPO_MIN_TX_CCID_SPECIFIC = 192, + DCCPO_MAX_TX_CCID_SPECIFIC = 255, +}; + +struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +}; + +struct nft_exthdr { + u8 type; + u8 offset; + u8 len; + u8 op; + u8 dreg; + u8 sreg; + u8 flags; +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS = 1, +}; + +typedef u32 inet_ehashfn_t(const struct net *, const __be32, const __u16, const __be32, const __be16); + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + __be16 sport; + u16 dport; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + struct sock *selected_sk; + u32 ingress_ifindex; + bool no_reuseport; +}; + +enum lwtunnel_encap_types { + LWTUNNEL_ENCAP_NONE = 0, + LWTUNNEL_ENCAP_MPLS = 1, + LWTUNNEL_ENCAP_IP = 2, + LWTUNNEL_ENCAP_ILA = 3, + LWTUNNEL_ENCAP_IP6 = 4, + LWTUNNEL_ENCAP_SEG6 = 5, + LWTUNNEL_ENCAP_BPF = 6, + LWTUNNEL_ENCAP_SEG6_LOCAL = 7, + LWTUNNEL_ENCAP_RPL = 8, + LWTUNNEL_ENCAP_IOAM6 = 9, + LWTUNNEL_ENCAP_XFRM = 10, + __LWTUNNEL_ENCAP_MAX = 11, +}; + +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC = 0, + LWTUNNEL_IP_ID = 1, + LWTUNNEL_IP_DST = 2, + LWTUNNEL_IP_SRC = 3, + LWTUNNEL_IP_TTL = 4, + LWTUNNEL_IP_TOS = 5, + LWTUNNEL_IP_FLAGS = 6, + LWTUNNEL_IP_PAD = 7, + LWTUNNEL_IP_OPTS = 8, + __LWTUNNEL_IP_MAX = 9, +}; + +enum lwtunnel_ip6_t { + LWTUNNEL_IP6_UNSPEC = 0, + LWTUNNEL_IP6_ID = 1, + LWTUNNEL_IP6_DST = 2, + LWTUNNEL_IP6_SRC = 3, + LWTUNNEL_IP6_HOPLIMIT = 4, + LWTUNNEL_IP6_TC = 5, + LWTUNNEL_IP6_FLAGS = 6, + LWTUNNEL_IP6_PAD = 7, + LWTUNNEL_IP6_OPTS = 8, + __LWTUNNEL_IP6_MAX = 9, +}; + +enum { + LWTUNNEL_IP_OPTS_UNSPEC = 0, + LWTUNNEL_IP_OPTS_GENEVE = 1, + LWTUNNEL_IP_OPTS_VXLAN = 2, + LWTUNNEL_IP_OPTS_ERSPAN = 3, + __LWTUNNEL_IP_OPTS_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, + LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, + LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, + LWTUNNEL_IP_OPT_GENEVE_DATA = 3, + __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_VXLAN_GBP = 1, + __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, +}; + +enum { + LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_ERSPAN_VER = 1, + LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, + LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, + LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, + __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, +}; + +struct lwtunnel_encap_ops { + int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); + void (*destroy_state)(struct lwtunnel_state *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*input)(struct sk_buff *); + int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); + int (*get_encap_size)(struct lwtunnel_state *); + int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); + int (*xmit)(struct sk_buff *); + struct module *owner; +}; + +enum { + IFLA_IPTUN_UNSPEC = 0, + IFLA_IPTUN_LINK = 1, + IFLA_IPTUN_LOCAL = 2, + IFLA_IPTUN_REMOTE = 3, + IFLA_IPTUN_TTL = 4, + IFLA_IPTUN_TOS = 5, + IFLA_IPTUN_ENCAP_LIMIT = 6, + IFLA_IPTUN_FLOWINFO = 7, + IFLA_IPTUN_FLAGS = 8, + IFLA_IPTUN_PROTO = 9, + IFLA_IPTUN_PMTUDISC = 10, + IFLA_IPTUN_6RD_PREFIX = 11, + IFLA_IPTUN_6RD_RELAY_PREFIX = 12, + IFLA_IPTUN_6RD_PREFIXLEN = 13, + IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14, + IFLA_IPTUN_ENCAP_TYPE = 15, + IFLA_IPTUN_ENCAP_FLAGS = 16, + IFLA_IPTUN_ENCAP_SPORT = 17, + IFLA_IPTUN_ENCAP_DPORT = 18, + IFLA_IPTUN_COLLECT_METADATA = 19, + IFLA_IPTUN_FWMARK = 20, + __IFLA_IPTUN_MAX = 21, +}; + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); + int (*err_handler)(struct sk_buff *, u32); +}; + +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); +}; + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 r1: 1; + u8 r2: 1; + u8 r3: 1; + u8 length: 5; + u8 opt_data[0]; +}; + +struct vxlan_metadata { + u32 gbp; +}; + +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; + __u8 p: 1; + __u8 ft: 5; + __u8 hwid_upper: 2; + __u8 hwid: 4; + __u8 dir: 1; + __u8 gra: 2; + __u8 o: 1; +}; + +struct erspan_metadata { + int version; + union { + __be32 index; + struct erspan_md2 md2; + } u; +}; + +typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); + +struct bpf_struct_ops_tcp_congestion_ops { + struct bpf_struct_ops_common_value common; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct tcp_congestion_ops data; +}; + +struct seg6_pernet_data { + struct mutex lock; + struct in6_addr *tun_src; +}; + +struct ipv6_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u8 first_segment; + __u8 flags; + __u16 tag; + struct in6_addr segments[0]; +}; + +struct sr6_tlv { + __u8 type; + __u8 len; + __u8 data[0]; +}; + +enum { + SEG6_ATTR_UNSPEC = 0, + SEG6_ATTR_DST = 1, + SEG6_ATTR_DSTLEN = 2, + SEG6_ATTR_HMACKEYID = 3, + SEG6_ATTR_SECRET = 4, + SEG6_ATTR_SECRETLEN = 5, + SEG6_ATTR_ALGID = 6, + SEG6_ATTR_HMACINFO = 7, + __SEG6_ATTR_MAX = 8, +}; + +enum { + SEG6_CMD_UNSPEC = 0, + SEG6_CMD_SETHMAC = 1, + SEG6_CMD_DUMPHMAC = 2, + SEG6_CMD_SET_TUNSRC = 3, + SEG6_CMD_GET_TUNSRC = 4, + __SEG6_CMD_MAX = 5, +}; + +typedef u32 inet6_ehashfn_t(const struct net *, const struct in6_addr *, const u16, const struct in6_addr *, const __be16); + +typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); + +struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +}; + +enum { + ASSUME_PERFECT = 255, + ASSUME_VALID_DTB = 1, + ASSUME_VALID_INPUT = 2, + ASSUME_LATEST = 4, + ASSUME_NO_ROLLBACK = 8, + ASSUME_LIBFDT_ORDER = 16, + ASSUME_LIBFDT_FLAWLESS = 32, +}; + +struct fdt_errtabent { + const char *str; +}; + +struct word_at_a_time { + const long unsigned int high_bits; + const long unsigned int low_bits; +}; + +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int per_cpu_data_slice_size; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; + bool early_ci_levels; +}; + +typedef __s64 Elf64_Sxword; + +struct elf64_rela { + Elf64_Addr r_offset; + Elf64_Xword r_info; + Elf64_Sxword r_addend; +}; + +typedef struct elf64_rela Elf64_Rela; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short int contexts; + bool contexts_maxed; +}; + +struct cpu_fp_state { + struct user_fpsimd_state *st; + void *sve_state; + void *sme_state; + u64 *svcr; + u64 *fpmr; + unsigned int sve_vl; + unsigned int sme_vl; + enum fp_type *fp_type; + enum fp_type to_save; +}; + +struct vgic_reg_attr { + struct kvm_vcpu *vcpu; + gpa_t addr; +}; + +struct prctl_mm_map { + __u64 start_code; + __u64 end_code; + __u64 start_data; + __u64 end_data; + __u64 start_brk; + __u64 brk; + __u64 start_stack; + __u64 arg_start; + __u64 arg_end; + __u64 env_start; + __u64 env_end; + __u64 *auxv; + __u32 auxv_size; + __u32 exe_fd; +}; + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +struct getcpu_cache { + long unsigned int blob[16]; +}; + +enum con_flush_mode { + CONSOLE_FLUSH_PENDING = 0, + CONSOLE_REPLAY_ALL = 1, +}; + +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF = 0, + KMSG_DUMP_PANIC = 1, + KMSG_DUMP_OOPS = 2, + KMSG_DUMP_EMERG = 3, + KMSG_DUMP_SHUTDOWN = 4, + KMSG_DUMP_MAX = 5, +}; + +struct kmsg_dump_iter { + u64 cur_seq; + u64 next_seq; +}; + +struct kmsg_dump_detail { + enum kmsg_dump_reason reason; + const char *description; +}; + +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *, struct kmsg_dump_detail *); + enum kmsg_dump_reason max_reason; + bool registered; +}; + +struct trace_event_raw_console { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_console { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_console)(void *, const char *, size_t); + +struct prb_reserved_entry { + struct printk_ringbuffer *rb; + long unsigned int irqflags; + long unsigned int id; + unsigned int text_space; +}; + +enum desc_state { + desc_miss = -1, + desc_reserved = 0, + desc_committed = 1, + desc_finalized = 2, + desc_reusable = 3, +}; + +struct console_cmdline { + char name[16]; + int index; + char devname[32]; + bool user_specified; + char *options; +}; + +enum printk_info_flags { + LOG_FORCE_CON = 1, + LOG_NEWLINE = 2, + LOG_CONT = 8, +}; + +enum devkmsg_log_bits { + __DEVKMSG_LOG_BIT_ON = 0, + __DEVKMSG_LOG_BIT_OFF = 1, + __DEVKMSG_LOG_BIT_LOCK = 2, +}; + +enum devkmsg_log_masks { + DEVKMSG_LOG_MASK_ON = 1, + DEVKMSG_LOG_MASK_OFF = 2, + DEVKMSG_LOG_MASK_LOCK = 4, +}; + +enum con_msg_format_flags { + MSG_FORMAT_DEFAULT = 0, + MSG_FORMAT_SYSLOG = 1, +}; + +struct latched_seq { + seqcount_latch_t latch; + u64 val[2]; +}; + +struct devkmsg_user { + atomic64_t seq; + struct ratelimit_state rs; + struct mutex lock; + struct printk_buffers pbufs; +}; + +struct clock_read_data { + u64 epoch_ns; + u64 epoch_cyc; + u64 sched_clock_mask; + u64 (*read_sched_clock)(); + u32 mult; + u32 shift; +}; + +struct clock_data { + seqcount_latch_t seq; + struct clock_read_data read_data[2]; + ktime_t wrap_kt; + long unsigned int rate; + u64 (*actual_read_sched_clock)(); +}; + +struct proc_timens_offset { + int clockid; + struct timespec64 val; +}; + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +enum { + PROC_ROOT_INO = 1, + PROC_IPC_INIT_INO = 4026531839, + PROC_UTS_INIT_INO = 4026531838, + PROC_USER_INIT_INO = 4026531837, + PROC_PID_INIT_INO = 4026531836, + PROC_CGROUP_INIT_INO = 4026531835, + PROC_TIME_INIT_INO = 4026531834, +}; + +struct arch_vdso_time_data {}; + +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +struct vdso_data { + u32 seq; + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + union { + struct vdso_timestamp basetime[12]; + struct timens_offset offset[12]; + }; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; + struct arch_vdso_time_data arch_data; +}; + +struct fgraph_ret_regs { + long unsigned int regs[8]; + long unsigned int fp; + long unsigned int __unused; +}; + +enum { + FTRACE_UPDATE_CALLS = 1, + FTRACE_DISABLE_CALLS = 2, + FTRACE_UPDATE_TRACE_FUNC = 4, + FTRACE_START_FUNC_RET = 8, + FTRACE_STOP_FUNC_RET = 16, + FTRACE_MAY_SLEEP = 32, +}; + +struct ftrace_ret_stack { + long unsigned int ret; + long unsigned int func; + long unsigned int fp; + long unsigned int *retp; +}; + +enum { + FGRAPH_TYPE_RESERVED = 0, + FGRAPH_TYPE_BITMAP = 1, + FGRAPH_TYPE_DATA = 2, +}; + +enum { + BTF_TRACING_TYPE_TASK = 0, + BTF_TRACING_TYPE_FILE = 1, + BTF_TRACING_TYPE_VMA = 2, + MAX_BTF_TRACING_TYPE = 3, +}; + +enum { + CSS_TASK_ITER_PROCS = 1, + CSS_TASK_ITER_THREADED = 2, + CSS_TASK_ITER_SKIPPED = 65536, +}; + +enum bpf_iter_feature { + BPF_ITER_RESCHED = 1, +}; + +struct mmap_unlock_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; + enum bpf_iter_task_type type; + u32 pid; + u32 pid_visiting; +}; + +struct bpf_iter_seq_task_info { + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +struct bpf_iter__task { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; +}; + +struct bpf_iter_seq_task_file_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + u32 tid; + u32 fd; +}; + +struct bpf_iter__task_file { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + u32 fd; + union { + struct file *file; + }; +}; + +struct bpf_iter_seq_task_vma_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + u32 tid; + long unsigned int prev_vm_start; + long unsigned int prev_vm_end; +}; + +enum bpf_task_vma_iter_find_op { + task_vma_iter_first_vma = 0, + task_vma_iter_next_vma = 1, + task_vma_iter_find_vma = 2, +}; + +struct bpf_iter__task_vma { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + union { + struct vm_area_struct *vma; + }; +}; + +typedef u64 (*btf_bpf_find_vma)(struct task_struct *, u64, bpf_callback_t, void *, u64); + +struct bpf_iter_task_vma_kern_data { + struct task_struct *task; + struct mm_struct *mm; + struct mmap_unlock_irq_work *work; + struct vma_iterator vmi; +}; + +struct bpf_iter_task_vma { + __u64 __opaque[1]; +}; + +struct bpf_iter_task_vma_kern { + struct bpf_iter_task_vma_kern_data *data; +}; + +struct bpf_iter_css_task { + __u64 __opaque[1]; +}; + +struct bpf_iter_css_task_kern { + struct css_task_iter *css_it; +}; + +struct bpf_iter_task { + __u64 __opaque[3]; +}; + +struct bpf_iter_task_kern { + struct task_struct *task; + struct task_struct *pos; + unsigned int flags; +}; + +enum { + BPF_TASK_ITER_ALL_PROCS = 0, + BPF_TASK_ITER_ALL_THREADS = 1, + BPF_TASK_ITER_PROC_THREADS = 2, +}; + +enum lru_status { + LRU_REMOVED = 0, + LRU_REMOVED_RETRY = 1, + LRU_ROTATE = 2, + LRU_SKIP = 3, + LRU_RETRY = 4, + LRU_STOP = 5, +}; + +struct list_lru_memcg { + struct callback_head rcu; + struct list_lru_one node[0]; +}; + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, void *); + +typedef void (*swap_r_func_t)(void *, void *, int, const void *); + +typedef int (*cmp_r_func_t)(const void *, const void *, const void *); + +union __u128_halves { + u128 full; + struct { + u64 low; + u64 high; + }; +}; + +typedef struct kmem_cache *kmem_buckets[14]; + +struct slabobj_ext { + struct obj_cgroup *objcg; +}; + +typedef u128 freelist_full_t; + +typedef union { + struct { + void *freelist; + long unsigned int counter; + }; + freelist_full_t full; +} freelist_aba_t; + +struct slab { + long unsigned int __page_flags; + struct kmem_cache *slab_cache; + union { + struct { + union { + struct list_head slab_list; + struct { + struct slab *next; + int slabs; + }; + }; + union { + struct { + void *freelist; + union { + long unsigned int counters; + struct { + unsigned int inuse: 16; + unsigned int objects: 15; + unsigned int frozen: 1; + }; + }; + }; + freelist_aba_t freelist_counter; + }; + }; + struct callback_head callback_head; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int obj_exts; +}; + +struct kmem_cache_cpu { + union { + struct { + void **freelist; + long unsigned int tid; + }; + freelist_aba_t freelist_tid; + }; + struct slab *slab; + struct slab *partial; + local_lock_t lock; +}; + +struct kmem_cache_node { + spinlock_t list_lock; + long unsigned int nr_partial; + struct list_head partial; + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +}; + +enum slab_state { + DOWN = 0, + PARTIAL = 1, + UP = 2, + FULL = 3, +}; + +struct slabinfo { + long unsigned int active_objs; + long unsigned int num_objs; + long unsigned int active_slabs; + long unsigned int num_slabs; + long unsigned int shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +struct kmem_obj_info { + void *kp_ptr; + struct slab *kp_slab; + void *kp_objp; + long unsigned int kp_data_offset; + struct kmem_cache *kp_slab_cache; + void *kp_ret; + void *kp_stack[16]; + void *kp_free_stack[16]; +}; + +typedef u32 depot_stack_handle_t; + +struct memory_notify { + long unsigned int altmap_start_pfn; + long unsigned int altmap_nr_pages; + long unsigned int start_pfn; + long unsigned int nr_pages; + int status_change_nid_normal; + int status_change_nid; +}; + +struct partial_context { + gfp_t flags; + unsigned int orig_size; + void *object; +}; + +struct track { + long unsigned int addr; + depot_stack_handle_t handle; + int cpu; + int pid; + long unsigned int when; +}; + +enum track_item { + TRACK_ALLOC = 0, + TRACK_FREE = 1, +}; + +enum stat_item { + ALLOC_FASTPATH = 0, + ALLOC_SLOWPATH = 1, + FREE_FASTPATH = 2, + FREE_SLOWPATH = 3, + FREE_FROZEN = 4, + FREE_ADD_PARTIAL = 5, + FREE_REMOVE_PARTIAL = 6, + ALLOC_FROM_PARTIAL = 7, + ALLOC_SLAB = 8, + ALLOC_REFILL = 9, + ALLOC_NODE_MISMATCH = 10, + FREE_SLAB = 11, + CPUSLAB_FLUSH = 12, + DEACTIVATE_FULL = 13, + DEACTIVATE_EMPTY = 14, + DEACTIVATE_TO_HEAD = 15, + DEACTIVATE_TO_TAIL = 16, + DEACTIVATE_REMOTE_FREES = 17, + DEACTIVATE_BYPASS = 18, + ORDER_FALLBACK = 19, + CMPXCHG_DOUBLE_CPU_FAIL = 20, + CMPXCHG_DOUBLE_FAIL = 21, + CPU_PARTIAL_ALLOC = 22, + CPU_PARTIAL_FREE = 23, + CPU_PARTIAL_NODE = 24, + CPU_PARTIAL_DRAIN = 25, + NR_SLUB_STAT_ITEMS = 26, +}; + +struct rcu_delayed_free { + struct callback_head head; + void *object; +}; + +struct slub_flush_work { + struct work_struct work; + struct kmem_cache *s; + bool skip; +}; + +struct detached_freelist { + struct slab *slab; + void *tail; + void *freelist; + int cnt; + struct kmem_cache *s; +}; + +struct location { + depot_stack_handle_t handle; + long unsigned int count; + long unsigned int addr; + long unsigned int waste; + long long int sum_time; + long int min_time; + long int max_time; + long int min_pid; + long int max_pid; + long unsigned int cpus[1]; + nodemask_t nodes; +}; + +struct loc_track { + long unsigned int max; + long unsigned int count; + struct location *loc; + loff_t idx; +}; + +enum slab_stat_type { + SL_ALL = 0, + SL_PARTIAL = 1, + SL_CPU = 2, + SL_OBJECTS = 3, + SL_TOTAL = 4, +}; + +struct slab_attribute { + struct attribute attr; + ssize_t (*show)(struct kmem_cache *, char *); + ssize_t (*store)(struct kmem_cache *, const char *, size_t); +}; + +struct saved_alias { + struct kmem_cache *s; + const char *name; + struct saved_alias *next; +}; + +typedef freelist_full_t pcp_op_T__; + +enum dentry_d_lock_class { + DENTRY_D_LOCK_NORMAL = 0, + DENTRY_D_LOCK_NESTED = 1, +}; + +struct tree_descr { + const char *name; + const struct file_operations *ops; + int mode; +}; + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +enum { + DIR_OFFSET_MIN = 2, +}; + +struct simple_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; + char set_buf[24]; + void *data; + const char *fmt; + struct mutex mutex; +}; + +struct ext4_system_zone { + struct rb_node node; + ext4_fsblk_t start_blk; + unsigned int count; + u32 ino; +}; + +struct ext4_orphan_block_tail { + __le32 ob_magic; + __le32 ob_checksum; +}; + +typedef u32 unicode_t; + +struct utf8_table { + int cmask; + int cval; + int shift; + long int lmask; + long int lval; +}; + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[40]; +}; + +typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); + +struct debugfs_short_fops { + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + loff_t (*llseek)(struct file *, loff_t, int); +}; + +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +struct debugfs_fsdata { + const struct file_operations *real_fops; + const struct debugfs_short_fops *short_fops; + union { + debugfs_automount_t automount; + struct { + refcount_t active_users; + struct completion active_users_drained; + struct mutex cancellations_mtx; + struct list_head cancellations; + }; + }; +}; + +struct debugfs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid = 0, + Opt_gid = 1, + Opt_mode = 2, + Opt_source = 3, +}; + +struct btrfs_tree_block_info { + struct btrfs_disk_key key; + __u8 level; +}; + +struct btrfs_extent_inline_ref { + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct ulist_iterator { + struct list_head *cur_list; +}; + +struct rb_simple_node { + struct rb_node rb_node; + u64 bytenr; +}; + +struct btrfs_backref_node; + +struct btrfs_backref_cache { + struct rb_root rb_root; + struct btrfs_backref_node *path[8]; + struct list_head pending[8]; + struct list_head leaves; + struct list_head changed; + struct list_head detached; + u64 last_trans; + int nr_nodes; + int nr_edges; + struct list_head pending_edge; + struct list_head useless_node; + struct btrfs_fs_info *fs_info; + bool is_reloc; +}; + +struct file_extent_cluster { + u64 start; + u64 end; + u64 boundary[128]; + unsigned int nr; + u64 owning_root; +}; + +struct mapping_tree { + struct rb_root rb_root; + spinlock_t lock; +}; + +enum reloc_stage { + MOVE_DATA_EXTENTS = 0, + UPDATE_DATA_PTRS = 1, +}; + +struct reloc_control { + struct btrfs_block_group *block_group; + struct btrfs_root *extent_root; + struct inode *data_inode; + struct btrfs_block_rsv *block_rsv; + struct btrfs_backref_cache backref_cache; + struct file_extent_cluster cluster; + struct extent_io_tree processed_blocks; + struct mapping_tree reloc_root_tree; + struct list_head reloc_roots; + struct list_head dirty_subvol_roots; + u64 merging_rsv_size; + u64 nodes_relocated; + u64 reserved_bytes; + u64 search_start; + u64 extents_found; + enum reloc_stage stage; + bool create_reloc_tree; + bool merge_reloc_tree; + bool found_file_extent; +}; + +enum btrfs_qgroup_mode { + BTRFS_QGROUP_MODE_DISABLED = 0, + BTRFS_QGROUP_MODE_FULL = 1, + BTRFS_QGROUP_MODE_SIMPLE = 2, +}; + +struct btrfs_backref_iter { + u64 bytenr; + struct btrfs_path *path; + struct btrfs_fs_info *fs_info; + struct btrfs_key cur_key; + u32 item_ptr; + u32 cur_ptr; + u32 end_ptr; +}; + +struct btrfs_backref_node { + struct { + struct rb_node rb_node; + u64 bytenr; + }; + u64 new_bytenr; + u64 owner; + struct list_head list; + struct list_head upper; + struct list_head lower; + struct btrfs_root *root; + struct extent_buffer *eb; + unsigned int level: 8; + unsigned int cowonly: 1; + unsigned int lowest: 1; + unsigned int locked: 1; + unsigned int processed: 1; + unsigned int checked: 1; + unsigned int pending: 1; + unsigned int detached: 1; + unsigned int is_reloc_root: 1; +}; + +struct btrfs_backref_edge { + struct list_head list[2]; + struct btrfs_backref_node *node[2]; +}; + +enum btrfs_inline_ref_type { + BTRFS_REF_TYPE_INVALID = 0, + BTRFS_REF_TYPE_BLOCK = 1, + BTRFS_REF_TYPE_DATA = 2, + BTRFS_REF_TYPE_ANY = 3, +}; + +struct mapping_node { + struct { + struct rb_node rb_node; + u64 bytenr; + }; + void *data; +}; + +struct tree_block { + struct { + struct rb_node rb_node; + u64 bytenr; + }; + u64 owner; + struct btrfs_key key; + u8 level; + bool key_ready; +}; + +enum blake2b_lengths { + BLAKE2B_BLOCK_SIZE = 128, + BLAKE2B_HASH_SIZE = 64, + BLAKE2B_KEY_SIZE = 64, + BLAKE2B_160_HASH_SIZE = 20, + BLAKE2B_256_HASH_SIZE = 32, + BLAKE2B_384_HASH_SIZE = 48, + BLAKE2B_512_HASH_SIZE = 64, +}; + +struct blake2b_state { + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[128]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2b_iv { + BLAKE2B_IV0 = 7640891576956012808ULL, + BLAKE2B_IV1 = 13503953896175478587ULL, + BLAKE2B_IV2 = 4354685564936845355ULL, + BLAKE2B_IV3 = 11912009170470909681ULL, + BLAKE2B_IV4 = 5840696475078001361ULL, + BLAKE2B_IV5 = 11170449401992604703ULL, + BLAKE2B_IV6 = 2270897969802886507ULL, + BLAKE2B_IV7 = 6620516959819538809ULL, +}; + +typedef void (*blake2b_compress_t)(struct blake2b_state *, const u8 *, size_t, u32); + +struct blake2b_tfm_ctx { + u8 key[64]; + unsigned int keylen; +}; + +enum asymmetric_payload_bits { + asym_crypto = 0, + asym_subtype = 1, + asym_key_ids = 2, + asym_auth = 3, +}; + +struct asymmetric_key_ids { + void *id[3]; +}; + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = 65535, +}; + +enum { + BLK_MQ_NO_TAG = 4294967295, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = 4294967294, +}; + +struct mq_inflight { + struct block_device *part; + unsigned int inflight[2]; +}; + +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +struct blk_expired_data { + bool has_timedout_rq; + long unsigned int next; + long unsigned int timeout_start; +}; + +struct flush_busy_ctx_data { + struct blk_mq_hw_ctx *hctx; + struct list_head *list; +}; + +struct dispatch_rq_data { + struct blk_mq_hw_ctx *hctx; + struct request *rq; +}; + +enum prep_dispatch { + PREP_DISPATCH_OK = 0, + PREP_DISPATCH_NO_TAG = 1, + PREP_DISPATCH_NO_BUDGET = 2, +}; + +struct rq_iter_data { + struct blk_mq_hw_ctx *hctx; + bool has_rq; +}; + +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +struct io_cancel_data { + struct io_ring_ctx *ctx; + union { + u64 data; + struct file *file; + }; + u8 opcode; + u32 flags; + int seq; +}; + +struct io_timeout_data { + struct io_kiocb *req; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; + u32 flags; +}; + +struct io_timeout { + struct file *file; + u32 off; + u32 target_seq; + u32 repeats; + struct list_head list; + struct io_kiocb *head; + struct io_kiocb *prev; +}; + +struct io_timeout_rem { + struct file *file; + u64 addr; + struct timespec64 ts; + u32 flags; + bool ltimeout; +}; + +struct crypto_aes_ctx { + u32 key_enc[60]; + u32 key_dec[60]; + u32 key_length; +}; + +typedef enum { + trustInput = 0, + checkMaxSymbolValue = 1, +} HIST_checkInput_e; + +typedef ZSTD_ErrorCode ERR_enum; + +typedef unsigned char uint8_t___2; + +typedef signed char int8x16_t[16]; + +typedef unsigned char __Poly8_t; + +typedef __Poly8_t poly8x16_t[16]; + +typedef unsigned char uint8x16_t[16]; + +typedef uint8x16_t unative_t; + +struct irq_domain_chip_generic_info { + const char *name; + irq_flow_handler_t handler; + unsigned int irqs_per_chip; + unsigned int num_ct; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + int (*init)(struct irq_chip_generic *); + void (*exit)(struct irq_chip_generic *); +}; + +struct nmi_ctx { + u64 hcr; + unsigned int cnt; +}; + +typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); + +struct irq_domain_info { + struct fwnode_handle *fwnode; + unsigned int domain_flags; + unsigned int size; + irq_hw_number_t hwirq_max; + int direct_max; + unsigned int hwirq_base; + unsigned int virq_base; + enum irq_domain_bus_token bus_token; + const char *name_suffix; + const struct irq_domain_ops *ops; + void *host_data; + struct irq_domain *parent; + struct irq_domain_chip_generic_info *dgc_info; + int (*init)(struct irq_domain *); + void (*exit)(struct irq_domain *); +}; + +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); + +enum gic_type { + GIC_V2 = 0, + GIC_V3 = 1, +}; + +struct gic_kvm_info { + enum gic_type type; + struct resource vcpu; + unsigned int maint_irq; + bool no_maint_irq_mask; + struct resource vctrl; + bool has_v4; + bool has_v4_1; + bool no_hw_deactivation; +}; + +struct partition_affinity { + cpumask_t mask; + void *partition_id; +}; + +struct redist_region { + void *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +struct partition_desc; + +struct gic_chip_data { + struct fwnode_handle *fwnode; + phys_addr_t dist_phys_base; + void *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +enum gic_intid_range { + SGI_RANGE = 0, + PPI_RANGE = 1, + SPI_RANGE = 2, + EPPI_RANGE = 3, + ESPI_RANGE = 4, + LPI_RANGE = 5, + __INVALID_RANGE__ = 6, +}; + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +struct clk_divider { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u16 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +struct termios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; +}; + +struct termios2 { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct termio { + short unsigned int c_iflag; + short unsigned int c_oflag; + short unsigned int c_cflag; + short unsigned int c_lflag; + unsigned char c_line; + unsigned char c_cc[8]; +}; + +enum tty_flow_change { + TTY_FLOW_NO_CHANGE = 0, + TTY_THROTTLE_SAFE = 1, + TTY_UNTHROTTLE_SAFE = 2, +}; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(const struct bus_type *, char *); + ssize_t (*store)(const struct bus_type *, const char *, size_t); +}; + +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe: 1; + const struct bus_type *bus; + struct device *dev_root; + struct kset glue_dirs; + const struct class *class; + struct lock_class_key lock_key; +}; + +struct subsys_interface { + const char *name; + const struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *, struct subsys_interface *); + void (*remove_dev)(struct device *, struct subsys_interface *); +}; + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +struct builtin_fw { + char *name; + void *data; + long unsigned int size; +}; + +struct scsi_varlen_cdb_hdr { + __u8 opcode; + __u8 control; + __u8 misc[5]; + __u8 additional_cdb_length; + __be16 service_action; +}; + +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +struct e1000_hw_stats___2 { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_info___2 { + s32 (*get_invariants)(struct e1000_hw___2 *); + struct e1000_mac_operations___2 *mac_ops; + const struct e1000_phy_operations___2 *phy_ops; + struct e1000_nvm_operations___2 *nvm_ops; +}; + +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +struct i2c_algo_bit_data { + void *data; + void (*setsda)(void *, int); + void (*setscl)(void *, int); + int (*getsda)(void *); + int (*getscl)(void *); + int (*pre_xfer)(struct i2c_adapter *); + void (*post_xfer)(struct i2c_adapter *); + int udelay; + int timeout; + bool can_do_atomic; +}; + +struct vf_data_storage { + unsigned char vf_mac_addresses[6]; + u16 vf_mc_hashes[30]; + u16 num_vf_mc_hashes; + u32 flags; + long unsigned int last_nack; + u16 pf_vlan; + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[6]; +}; + +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP = 1, +}; + +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + long unsigned int time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + dma_addr_t dma; + __u32 len; + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring; + +struct igb_ring_container { + struct igb_ring *ring; + unsigned int total_bytes; + unsigned int total_packets; + u16 work_limit; + u8 count; + u8 itr; +}; + +struct igb_q_vector; + +struct igb_ring { + struct igb_q_vector *q_vector; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct device *dev; + union { + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; + long unsigned int flags; + void *tail; + dma_addr_t dma; + unsigned int size; + u16 count; + u8 queue_index; + u8 reg_idx; + bool launchtime_enable; + bool cbs_enable; + s32 idleslope; + s32 sendslope; + s32 hicredit; + s32 locredit; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + union { + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + struct { + struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + }; + }; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info xdp_rxq; +}; + +struct igb_adapter; + +struct igb_q_vector { + struct igb_adapter *adapter; + int cpu; + u32 eims_value; + u16 itr_val; + u8 set_itr; + void *itr_register; + struct igb_ring_container rx; + struct igb_ring_container tx; + struct napi_struct napi; + struct callback_head rcu; + char name[25]; + long: 64; + long: 64; + struct igb_ring ring[0]; +}; + +struct hwmon_buff; + +struct igb_mac_addr; + +struct igb_adapter { + long unsigned int active_vlans[64]; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + long unsigned int state; + unsigned int flags; + unsigned int num_q_vectors; + struct msix_entry msix_entries[10]; + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + int num_rx_queues; + struct igb_ring *rx_ring[16]; + u32 max_frame_size; + u32 min_frame_size; + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + u8 *io_addr; + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + long unsigned int led_status; + struct pci_dev *pdev; + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + struct e1000_hw___2 hw; + struct e1000_hw_stats___2 stats; + struct e1000_phy_info___2 phy_info; + u32 test_icr; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + int msg_enable; + struct igb_q_vector *q_vector[8]; + u32 eims_enable_mask; + u32 eims_other; + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + long unsigned int ptp_tx_start; + long unsigned int last_rx_ptp_check; + long unsigned int last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + struct ptp_pin_desc sdp_config[4]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[2]; + char fw_version[48]; + struct hwmon_buff *igb_hwmon_buff; + bool ets; + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[128]; + long unsigned int link_check_timeout; + int copper_tries; + struct e1000_info___2 ei; + u16 eee_advert; + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + spinlock_t nfc_lock; + bool etype_bitmap[3]; + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; + spinlock_t vfs_lock; + long: 64; + long: 64; +}; + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw___2 *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[13]; + struct hwmon_attr hwmon_list[12]; + unsigned int n_hwmon; +}; + +struct igb_mac_addr { + u8 addr[6]; + u8 queue; + u8 state; +}; + +enum dev_prop_type { + DEV_PROP_U8 = 0, + DEV_PROP_U16 = 1, + DEV_PROP_U32 = 2, + DEV_PROP_U64 = 3, + DEV_PROP_STRING = 4, + DEV_PROP_REF = 5, +}; + +struct property_entry { + const char *name; + size_t length; + bool is_inline; + enum dev_prop_type type; + union { + const void *pointer; + union { + u8 u8_data[8]; + u16 u16_data[4]; + u32 u32_data[2]; + u64 u64_data[1]; + const char *str[1]; + } value; + }; +}; + +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; +}; + +struct input_event { + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct hidraw_devinfo { + __u32 bustype; + __s16 vendor; + __s16 product; +}; + +struct hidraw_report { + __u8 *value; + int len; +}; + +struct hidraw_list { + struct hidraw_report buffer[64]; + int head; + int tail; + struct fasync_struct *fasync; + struct hidraw *hidraw; + struct list_head node; + struct mutex read_mutex; + bool revoked; +}; + +struct net_packet_attrs { + const unsigned char *src; + const unsigned char *dst; + u32 ip_src; + u32 ip_dst; + bool tcp; + u16 sport; + u16 dport; + int timeout; + int size; + int max_size; + u8 id; + u16 queue_mapping; +}; + +struct net_test_priv { + struct net_packet_attrs *packet; + struct packet_type pt; + struct completion comp; + int double_vlan; + int vlan_id; + int ok; +}; + +struct netsfhdr { + __be32 version; + __be64 magic; + u8 id; +} __attribute__((packed)); + +struct net_test { + char name[32]; + int (*fn)(struct net_device *); +}; + +enum { + ETHTOOL_A_STRING_UNSPEC = 0, + ETHTOOL_A_STRING_INDEX = 1, + ETHTOOL_A_STRING_VALUE = 2, + __ETHTOOL_A_STRING_CNT = 3, + ETHTOOL_A_STRING_MAX = 2, +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC = 0, + ETHTOOL_A_STRINGS_STRING = 1, + __ETHTOOL_A_STRINGS_CNT = 2, + ETHTOOL_A_STRINGS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC = 0, + ETHTOOL_A_STRINGSET_ID = 1, + ETHTOOL_A_STRINGSET_COUNT = 2, + ETHTOOL_A_STRINGSET_STRINGS = 3, + __ETHTOOL_A_STRINGSET_CNT = 4, + ETHTOOL_A_STRINGSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC = 0, + ETHTOOL_A_STRINGSETS_STRINGSET = 1, + __ETHTOOL_A_STRINGSETS_CNT = 2, + ETHTOOL_A_STRINGSETS_MAX = 1, +}; + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, +}; + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[32]; +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[21]; +}; + +enum ethtool_reset_flags { + ETH_RESET_MGMT = 1, + ETH_RESET_IRQ = 2, + ETH_RESET_DMA = 4, + ETH_RESET_FILTER = 8, + ETH_RESET_OFFLOAD = 16, + ETH_RESET_MAC = 32, + ETH_RESET_PHY = 64, + ETH_RESET_RAM = 128, + ETH_RESET_AP = 256, + ETH_RESET_DEDICATED = 65535, + ETH_RESET_ALL = 4294967295, +}; + +struct ethnl_module_fw_flash_ntf_params { + u32 portid; + u32 seq; + bool closed_sock; +}; + +struct ethtool_module_fw_flash_params { + __be32 password; + u8 password_valid: 1; +}; + +struct ethtool_cmis_fw_update_params { + struct net_device *dev; + struct ethtool_module_fw_flash_params params; + struct ethnl_module_fw_flash_ntf_params ntf_params; + const struct firmware *fw; +}; + +struct ethtool_cmis_cdb { + u8 cmis_rev; + u8 read_write_len_ext; + u16 max_completion_time; +}; + +enum ethtool_cmis_cdb_cmd_id { + ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 64, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 65, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 257, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 259, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL = 260, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 263, + ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 265, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 266, +}; + +struct ethtool_cmis_cdb_request { + __be16 id; + union { + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + }; + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + } body; + }; + u8 *epl; +}; + +struct ethtool_cmis_cdb_cmd_args { + struct ethtool_cmis_cdb_request req; + u16 max_duration; + u8 read_write_len_ext; + u8 msleep_pre_rpl; + u8 rpl_exp_len; + u8 flags; + char *err_msg; +}; + +struct cmis_fw_update_fw_mng_features { + u8 start_cmd_payload_size; + u8 write_mechanism; + u16 max_duration_start; + u16 max_duration_write; + u16 max_duration_complete; +}; + +struct cmis_cdb_fw_mng_features_rpl { + u8 resv1; + u8 resv2; + u8 start_cmd_payload_size; + u8 resv3; + u8 read_write_len_ext; + u8 write_mechanism; + u8 resv4; + u8 resv5; + __be16 max_duration_start; + __be16 resv6; + __be16 max_duration_write; + __be16 max_duration_complete; + __be16 resv7; +}; + +enum cmis_cdb_fw_write_mechanism { + CMIS_CDB_FW_WRITE_MECHANISM_NONE = 0, + CMIS_CDB_FW_WRITE_MECHANISM_LPL = 1, + CMIS_CDB_FW_WRITE_MECHANISM_EPL = 16, + CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 17, +}; + +struct cmis_cdb_start_fw_download_pl_h { + __be32 image_size; + __be32 resv1; +}; + +struct cmis_cdb_start_fw_download_pl { + union { + struct { + __be32 image_size; + __be32 resv1; + }; + struct cmis_cdb_start_fw_download_pl_h head; + }; + u8 vendor_data[112]; +}; + +struct cmis_cdb_write_fw_block_lpl_pl { + __be32 block_address; + u8 fw_block[116]; +}; + +struct cmis_cdb_write_fw_block_epl_pl { + u8 fw_block[2048]; +}; + +enum { + CMIS_MODULE_LOW_PWR = 1, + CMIS_MODULE_READY = 3, +}; + +struct cmis_cdb_run_fw_image_pl { + u8 resv1; + u8 image_to_run; + u16 delay_to_reset; +}; + +enum ip_conntrack_events { + IPCT_NEW = 0, + IPCT_RELATED = 1, + IPCT_DESTROY = 2, + IPCT_REPLY = 3, + IPCT_ASSURED = 4, + IPCT_PROTOINFO = 5, + IPCT_HELPER = 6, + IPCT_MARK = 7, + IPCT_SEQADJ = 8, + IPCT_NATSEQADJ = 8, + IPCT_SECMARK = 9, + IPCT_LABEL = 10, + IPCT_SYNPROXY = 11, + __IPCT_MAX = 12, +}; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); + void (*decode_session)(struct sk_buff *, struct flowi *); + void (*remove_nat_bysrc)(struct nf_conn *); +}; + +struct nf_conntrack_tuple_mask { + struct { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + } src; +}; + +struct nf_ct_iter_data { + struct net *net; + void *data; + u32 portid; + int report; +}; + +struct nf_conntrack_helper; + +struct nf_conntrack_expect { + struct hlist_node lnode; + struct hlist_node hnode; + struct nf_conntrack_tuple tuple; + struct nf_conntrack_tuple_mask mask; + refcount_t use; + unsigned int flags; + unsigned int class; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); + struct nf_conntrack_helper *helper; + struct nf_conn *master; + struct timer_list timeout; + union nf_inet_addr saved_addr; + union nf_conntrack_man_proto saved_proto; + enum ip_conntrack_dir dir; + struct callback_head rcu; +}; + +struct nf_conntrack_expect_policy; + +struct nf_conntrack_helper { + struct hlist_node hnode; + char name[16]; + refcount_t refcnt; + struct module *me; + const struct nf_conntrack_expect_policy *expect_policy; + struct nf_conntrack_tuple tuple; + int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info); + void (*destroy)(struct nf_conn *); + int (*from_nlattr)(struct nlattr *, struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, const struct nf_conn *); + unsigned int expect_class_max; + unsigned int flags; + unsigned int queue_num; + u16 data_len; + char nat_mod_name[16]; +}; + +struct nf_conntrack_expect_policy { + unsigned int max_expected; + unsigned int timeout; + char name[16]; +}; + +struct nf_conn_help { + struct nf_conntrack_helper *helper; + struct hlist_head expectations; + u8 expecting[4]; + long: 0; + char data[32]; +}; + +struct nf_ct_helper_expectfn { + struct list_head head; + const char *name; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); +}; + +struct nf_ct_seqadj { + u32 correction_pos; + s32 offset_before; + s32 offset_after; +}; + +struct nf_conn_seqadj { + struct nf_ct_seqadj seq[2]; +}; + +struct nf_conn_tstamp { + u_int64_t start; + u_int64_t stop; +}; + +struct nf_conn_labels { + long unsigned int bits[2]; +}; + +struct nf_conn_synproxy { + u32 isn; + u32 its; + u32 tsoff; +}; + +enum cntl_msg_types { + IPCTNL_MSG_CT_NEW = 0, + IPCTNL_MSG_CT_GET = 1, + IPCTNL_MSG_CT_DELETE = 2, + IPCTNL_MSG_CT_GET_CTRZERO = 3, + IPCTNL_MSG_CT_GET_STATS_CPU = 4, + IPCTNL_MSG_CT_GET_STATS = 5, + IPCTNL_MSG_CT_GET_DYING = 6, + IPCTNL_MSG_CT_GET_UNCONFIRMED = 7, + IPCTNL_MSG_MAX = 8, +}; + +enum ctnl_exp_msg_types { + IPCTNL_MSG_EXP_NEW = 0, + IPCTNL_MSG_EXP_GET = 1, + IPCTNL_MSG_EXP_DELETE = 2, + IPCTNL_MSG_EXP_GET_STATS_CPU = 3, + IPCTNL_MSG_EXP_MAX = 4, +}; + +enum ctattr_type { + CTA_UNSPEC = 0, + CTA_TUPLE_ORIG = 1, + CTA_TUPLE_REPLY = 2, + CTA_STATUS = 3, + CTA_PROTOINFO = 4, + CTA_HELP = 5, + CTA_NAT_SRC = 6, + CTA_TIMEOUT = 7, + CTA_MARK = 8, + CTA_COUNTERS_ORIG = 9, + CTA_COUNTERS_REPLY = 10, + CTA_USE = 11, + CTA_ID = 12, + CTA_NAT_DST = 13, + CTA_TUPLE_MASTER = 14, + CTA_SEQ_ADJ_ORIG = 15, + CTA_NAT_SEQ_ADJ_ORIG = 15, + CTA_SEQ_ADJ_REPLY = 16, + CTA_NAT_SEQ_ADJ_REPLY = 16, + CTA_SECMARK = 17, + CTA_ZONE = 18, + CTA_SECCTX = 19, + CTA_TIMESTAMP = 20, + CTA_MARK_MASK = 21, + CTA_LABELS = 22, + CTA_LABELS_MASK = 23, + CTA_SYNPROXY = 24, + CTA_FILTER = 25, + CTA_STATUS_MASK = 26, + __CTA_MAX = 27, +}; + +enum ctattr_tuple { + CTA_TUPLE_UNSPEC = 0, + CTA_TUPLE_IP = 1, + CTA_TUPLE_PROTO = 2, + CTA_TUPLE_ZONE = 3, + __CTA_TUPLE_MAX = 4, +}; + +enum ctattr_ip { + CTA_IP_UNSPEC = 0, + CTA_IP_V4_SRC = 1, + CTA_IP_V4_DST = 2, + CTA_IP_V6_SRC = 3, + CTA_IP_V6_DST = 4, + __CTA_IP_MAX = 5, +}; + +enum ctattr_l4proto { + CTA_PROTO_UNSPEC = 0, + CTA_PROTO_NUM = 1, + CTA_PROTO_SRC_PORT = 2, + CTA_PROTO_DST_PORT = 3, + CTA_PROTO_ICMP_ID = 4, + CTA_PROTO_ICMP_TYPE = 5, + CTA_PROTO_ICMP_CODE = 6, + CTA_PROTO_ICMPV6_ID = 7, + CTA_PROTO_ICMPV6_TYPE = 8, + CTA_PROTO_ICMPV6_CODE = 9, + __CTA_PROTO_MAX = 10, +}; + +enum ctattr_protoinfo { + CTA_PROTOINFO_UNSPEC = 0, + CTA_PROTOINFO_TCP = 1, + CTA_PROTOINFO_DCCP = 2, + CTA_PROTOINFO_SCTP = 3, + __CTA_PROTOINFO_MAX = 4, +}; + +enum ctattr_counters { + CTA_COUNTERS_UNSPEC = 0, + CTA_COUNTERS_PACKETS = 1, + CTA_COUNTERS_BYTES = 2, + CTA_COUNTERS32_PACKETS = 3, + CTA_COUNTERS32_BYTES = 4, + CTA_COUNTERS_PAD = 5, + __CTA_COUNTERS_MAX = 6, +}; + +enum ctattr_tstamp { + CTA_TIMESTAMP_UNSPEC = 0, + CTA_TIMESTAMP_START = 1, + CTA_TIMESTAMP_STOP = 2, + CTA_TIMESTAMP_PAD = 3, + __CTA_TIMESTAMP_MAX = 4, +}; + +enum ctattr_seqadj { + CTA_SEQADJ_UNSPEC = 0, + CTA_SEQADJ_CORRECTION_POS = 1, + CTA_SEQADJ_OFFSET_BEFORE = 2, + CTA_SEQADJ_OFFSET_AFTER = 3, + __CTA_SEQADJ_MAX = 4, +}; + +enum ctattr_synproxy { + CTA_SYNPROXY_UNSPEC = 0, + CTA_SYNPROXY_ISN = 1, + CTA_SYNPROXY_ITS = 2, + CTA_SYNPROXY_TSOFF = 3, + __CTA_SYNPROXY_MAX = 4, +}; + +enum ctattr_expect { + CTA_EXPECT_UNSPEC = 0, + CTA_EXPECT_MASTER = 1, + CTA_EXPECT_TUPLE = 2, + CTA_EXPECT_MASK = 3, + CTA_EXPECT_TIMEOUT = 4, + CTA_EXPECT_ID = 5, + CTA_EXPECT_HELP_NAME = 6, + CTA_EXPECT_ZONE = 7, + CTA_EXPECT_FLAGS = 8, + CTA_EXPECT_CLASS = 9, + CTA_EXPECT_NAT = 10, + CTA_EXPECT_FN = 11, + __CTA_EXPECT_MAX = 12, +}; + +enum ctattr_expect_nat { + CTA_EXPECT_NAT_UNSPEC = 0, + CTA_EXPECT_NAT_DIR = 1, + CTA_EXPECT_NAT_TUPLE = 2, + __CTA_EXPECT_NAT_MAX = 3, +}; + +enum ctattr_help { + CTA_HELP_UNSPEC = 0, + CTA_HELP_NAME = 1, + CTA_HELP_INFO = 2, + __CTA_HELP_MAX = 3, +}; + +enum ctattr_stats_cpu { + CTA_STATS_UNSPEC = 0, + CTA_STATS_SEARCHED = 1, + CTA_STATS_FOUND = 2, + CTA_STATS_NEW = 3, + CTA_STATS_INVALID = 4, + CTA_STATS_IGNORE = 5, + CTA_STATS_DELETE = 6, + CTA_STATS_DELETE_LIST = 7, + CTA_STATS_INSERT = 8, + CTA_STATS_INSERT_FAILED = 9, + CTA_STATS_DROP = 10, + CTA_STATS_EARLY_DROP = 11, + CTA_STATS_ERROR = 12, + CTA_STATS_SEARCH_RESTART = 13, + CTA_STATS_CLASH_RESOLVE = 14, + CTA_STATS_CHAIN_TOOLONG = 15, + __CTA_STATS_MAX = 16, +}; + +enum ctattr_stats_global { + CTA_STATS_GLOBAL_UNSPEC = 0, + CTA_STATS_GLOBAL_ENTRIES = 1, + CTA_STATS_GLOBAL_MAX_ENTRIES = 2, + __CTA_STATS_GLOBAL_MAX = 3, +}; + +enum ctattr_expect_stats { + CTA_STATS_EXP_UNSPEC = 0, + CTA_STATS_EXP_NEW = 1, + CTA_STATS_EXP_CREATE = 2, + CTA_STATS_EXP_DELETE = 3, + __CTA_STATS_EXP_MAX = 4, +}; + +enum ctattr_filter { + CTA_FILTER_UNSPEC = 0, + CTA_FILTER_ORIG_FLAGS = 1, + CTA_FILTER_REPLY_FLAGS = 2, + __CTA_FILTER_MAX = 3, +}; + +struct ctnetlink_list_dump_ctx { + struct nf_conn *last; + unsigned int cpu; + bool done; +}; + +struct ctnetlink_filter_u32 { + u32 val; + u32 mask; +}; + +struct ctnetlink_filter { + u8 family; + bool zone_filter; + u_int32_t orig_flags; + u_int32_t reply_flags; + struct nf_conntrack_tuple orig; + struct nf_conntrack_tuple reply; + struct nf_conntrack_zone zone; + struct ctnetlink_filter_u32 mark; + struct ctnetlink_filter_u32 status; +}; + +struct nf_nat_ipv4_range { + unsigned int flags; + __be32 min_ip; + __be32 max_ip; + union nf_conntrack_man_proto min; + union nf_conntrack_man_proto max; +}; + +struct nf_nat_ipv4_multi_range_compat { + unsigned int rangesize; + struct nf_nat_ipv4_range range[1]; +}; + +struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; +}; + +struct inet_protosw { + struct list_head list; + short unsigned int type; + short unsigned int protocol; + struct proto *prot; + const struct proto_ops *ops; + unsigned char flags; +}; + +struct xfrm_tunnel { + int (*handler)(struct sk_buff *); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, u32); + struct xfrm_tunnel *next; + int priority; +}; + +enum flow_dissector_ctrl_flags { + FLOW_DIS_IS_FRAGMENT = 1, + FLOW_DIS_FIRST_FRAG = 2, + FLOW_DIS_F_TUNNEL_CSUM = 4, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = 8, + FLOW_DIS_F_TUNNEL_OAM = 16, + FLOW_DIS_F_TUNNEL_CRIT_OPT = 32, + FLOW_DIS_ENCAPSULATION = 64, +}; + +enum rtattr_type_t { + RTA_UNSPEC = 0, + RTA_DST = 1, + RTA_SRC = 2, + RTA_IIF = 3, + RTA_OIF = 4, + RTA_GATEWAY = 5, + RTA_PRIORITY = 6, + RTA_PREFSRC = 7, + RTA_METRICS = 8, + RTA_MULTIPATH = 9, + RTA_PROTOINFO = 10, + RTA_FLOW = 11, + RTA_CACHEINFO = 12, + RTA_SESSION = 13, + RTA_MP_ALGO = 14, + RTA_TABLE = 15, + RTA_MARK = 16, + RTA_MFC_STATS = 17, + RTA_VIA = 18, + RTA_NEWDST = 19, + RTA_PREF = 20, + RTA_ENCAP_TYPE = 21, + RTA_ENCAP = 22, + RTA_EXPIRES = 23, + RTA_PAD = 24, + RTA_UID = 25, + RTA_TTL_PROPAGATE = 26, + RTA_IP_PROTO = 27, + RTA_SPORT = 28, + RTA_DPORT = 29, + RTA_NH_ID = 30, + __RTA_MAX = 31, +}; + +struct rtnexthop { + short unsigned int rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +struct uncached_list { + spinlock_t lock; + struct list_head head; +}; + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + long unsigned int rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +struct fib6_config { + u32 fc_table; + u32 fc_metric; + int fc_dst_len; + int fc_src_len; + int fc_ifindex; + u32 fc_flags; + u32 fc_protocol; + u16 fc_type; + u16 fc_delete_all_nh: 1; + u16 fc_ignore_dev_down: 1; + u16 __unused: 14; + u32 fc_nh_id; + struct in6_addr fc_dst; + struct in6_addr fc_src; + struct in6_addr fc_prefsrc; + struct in6_addr fc_gateway; + long unsigned int fc_expires; + struct nlattr *fc_mx; + int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; + bool fc_is_fdb; +}; + +struct rt6_exception { + struct hlist_node hlist; + struct rt6_info *rt6i; + long unsigned int stamp; + struct callback_head rcu; +}; + +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, + NETEVENT_REDIRECT = 2, + NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, + NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, + NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, +}; + +struct trace_event_raw_fib6_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[16]; + __u8 dst[16]; + u16 sport; + u16 dport; + u8 proto; + u8 rt_type; + char name[16]; + __u8 gw[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib6_table_lookup {}; + +typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); + +enum rt6_nud_state { + RT6_NUD_FAIL_HARD = -3, + RT6_NUD_FAIL_PROBE = -2, + RT6_NUD_FAIL_DO_RR = -1, + RT6_NUD_SUCCEED = 1, +}; + +struct fib6_nh_dm_arg { + struct net *net; + const struct in6_addr *saddr; + int oif; + int flags; + struct fib6_nh *nh; +}; + +struct fib6_nh_frl_arg { + u32 flags; + int oif; + int strict; + int *mpri; + bool *do_rr; + struct fib6_nh *nh; +}; + +struct fib6_nh_excptn_arg { + struct rt6_info *rt; + int plen; +}; + +struct fib6_nh_match_arg { + const struct net_device *dev; + const struct in6_addr *gw; + struct fib6_nh *match; +}; + +struct fib6_nh_age_excptn_arg { + struct fib6_gc_args *gc_args; + long unsigned int now; +}; + +struct fib6_nh_rd_arg { + struct fib6_result *res; + struct flowi6 *fl6; + const struct in6_addr *gw; + struct rt6_info **ret; +}; + +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +struct fib6_nh_del_cached_rt_arg { + struct fib6_config *cfg; + struct fib6_info *f6i; +}; + +struct arg_dev_net_ip { + struct net *net; + struct in6_addr *addr; +}; + +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned char nh_flags; + long unsigned int event; + }; +}; + +struct rt6_mtu_change_arg { + struct net_device *dev; + unsigned int mtu; + struct fib6_info *f6i; +}; + +struct rt6_nh { + struct fib6_info *fib6_info; + struct fib6_config r_cfg; + struct list_head next; +}; + +struct fib6_nh_exception_dump_walker { + struct rt6_rtnl_dump_arg *dump; + struct fib6_info *rt; + unsigned int flags; + unsigned int skip; + unsigned int count; +}; + +enum aarch64_insn_imm_type { + AARCH64_INSN_IMM_ADR = 0, + AARCH64_INSN_IMM_26 = 1, + AARCH64_INSN_IMM_19 = 2, + AARCH64_INSN_IMM_16 = 3, + AARCH64_INSN_IMM_14 = 4, + AARCH64_INSN_IMM_12 = 5, + AARCH64_INSN_IMM_9 = 6, + AARCH64_INSN_IMM_7 = 7, + AARCH64_INSN_IMM_6 = 8, + AARCH64_INSN_IMM_S = 9, + AARCH64_INSN_IMM_R = 10, + AARCH64_INSN_IMM_N = 11, + AARCH64_INSN_IMM_MAX = 12, +}; + +typedef __be64 fdt64_t; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +typedef __u32 Elf32_Word; + +struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +}; + +struct tlb_inv_context { + struct kvm_s2_mmu *mmu; + long unsigned int flags; + u64 tcr; + u64 sctlr; +}; + +enum cc_attr { + CC_ATTR_MEM_ENCRYPT = 0, + CC_ATTR_HOST_MEM_ENCRYPT = 1, + CC_ATTR_GUEST_MEM_ENCRYPT = 2, + CC_ATTR_GUEST_STATE_ENCRYPT = 3, + CC_ATTR_GUEST_UNROLL_STRING_IO = 4, + CC_ATTR_GUEST_SEV_SNP = 5, + CC_ATTR_HOST_SEV_SNP = 6, +}; + +struct io_tlb_area { + long unsigned int used; + unsigned int index; + spinlock_t lock; +}; + +struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + short unsigned int list; + short unsigned int pad_slots; +}; + +struct trace_event_raw_swiotlb_bounced { + struct trace_entry ent; + u32 __data_loc_dev_name; + u64 dma_mask; + dma_addr_t dev_addr; + size_t size; + bool force; + char __data[0]; +}; + +struct trace_event_data_offsets_swiotlb_bounced { + u32 dev_name; + const void *dev_name_ptr_; +}; + +typedef void (*btf_trace_swiotlb_bounced)(void *, struct device *, dma_addr_t, size_t); + +union futex_key { + struct { + u64 i_seq; + long unsigned int pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + long unsigned int address; + unsigned int offset; + } private; + struct { + u64 ptr; + long unsigned int word; + unsigned int offset; + } both; +}; + +struct futex_pi_state { + struct list_head list; + struct rt_mutex_base pi_mutex; + struct task_struct *owner; + refcount_t refcount; + union futex_key key; +}; + +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; +}; + +struct futex_waitv { + __u64 val; + __u64 uaddr; + __u32 flags; + __u32 __reserved; +}; + +struct futex_hash_bucket { + atomic_t waiters; + spinlock_t lock; + struct plist_head chain; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct futex_q; + +typedef void futex_wake_fn(struct wake_q_head *, struct futex_q *); + +struct futex_q { + struct plist_node list; + struct task_struct *task; + spinlock_t *lock_ptr; + futex_wake_fn *wake; + void *wake_data; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; + atomic_t requeue_state; +}; + +enum futex_access { + FUTEX_READ = 0, + FUTEX_WRITE = 1, +}; + +struct futex_vector { + struct futex_waitv w; + struct futex_q q; +}; + +struct trace_bprintk_fmt { + struct list_head list; + const char *fmt; +}; + +enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, + BPF_TOKEN_CREATE = 36, + __MAX_BPF_CMD = 37, +}; + +enum bpf_perf_event_type { + BPF_PERF_EVENT_UNSPEC = 0, + BPF_PERF_EVENT_UPROBE = 1, + BPF_PERF_EVENT_URETPROBE = 2, + BPF_PERF_EVENT_KPROBE = 3, + BPF_PERF_EVENT_KRETPROBE = 4, + BPF_PERF_EVENT_TRACEPOINT = 5, + BPF_PERF_EVENT_EVENT = 6, +}; + +enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +}; + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 jited_prog_insns; + __u64 xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 map_ids; + char name[16]; + __u32 ifindex; + __u32 gpl_compatible: 1; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 jited_ksyms; + __u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 line_info; + __u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; + __u64 recursion_misses; + __u32 verified_insns; + __u32 attach_btf_obj_id; + __u32 attach_btf_id; +}; + +struct bpf_btf_info { + __u64 btf; + __u32 btf_size; + __u32 id; + __u64 name; + __u32 name_len; + __u32 kernel_btf; +}; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT = 0, + BPF_FD_TYPE_TRACEPOINT = 1, + BPF_FD_TYPE_KPROBE = 2, + BPF_FD_TYPE_KRETPROBE = 3, + BPF_FD_TYPE_UPROBE = 4, + BPF_FD_TYPE_URETPROBE = 5, +}; + +struct bpf_spin_lock { + __u32 val; +}; + +struct bpf_attach_target_info { + struct btf_func_model fmodel; + long int tgt_addr; + struct module *tgt_mod; + const char *tgt_name; + const struct btf_type *tgt_type; +}; + +struct bpf_tracing_link { + struct bpf_tramp_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; +}; + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX = 3, +}; + +enum bpf_audit { + BPF_AUDIT_LOAD = 0, + BPF_AUDIT_UNLOAD = 1, + BPF_AUDIT_MAX = 2, +}; + +struct bpf_prog_kstats { + u64 nsecs; + u64 cnt; + u64 misses; +}; + +struct bpf_perf_link { + struct bpf_link link; + struct file *perf_file; +}; + +typedef u64 (*btf_bpf_sys_bpf)(int, union bpf_attr *, u32); + +typedef u64 (*btf_bpf_sys_close)(u32); + +typedef u64 (*btf_bpf_kallsyms_lookup_name)(const char *, int, int, u64 *); + +struct audit_buffer; + +enum ctx_state { + CT_STATE_DISABLED = -1, + CT_STATE_KERNEL = 0, + CT_STATE_IDLE = 1, + CT_STATE_USER = 2, + CT_STATE_GUEST = 3, + CT_STATE_MAX = 4, +}; + +struct context_tracking { + atomic_t state; + long int nesting; + long int nmi_nesting; +}; + +struct trace_event_raw_mm_lru_insertion { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + enum lru_list lru; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_mm_lru_activate { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_lru_insertion {}; + +struct trace_event_data_offsets_mm_lru_activate {}; + +typedef void (*btf_trace_mm_lru_insertion)(void *, struct folio *); + +typedef void (*btf_trace_mm_lru_activate)(void *, struct folio *); + +struct cpu_fbatches { + local_lock_t lock; + struct folio_batch lru_add; + struct folio_batch lru_deactivate_file; + struct folio_batch lru_deactivate; + struct folio_batch lru_lazyfree; + struct folio_batch lru_activate; + local_lock_t lock_irq; + struct folio_batch lru_move_tail; +}; + +typedef void (*move_fn_t)(struct lruvec *, struct folio *); + +enum vmpressure_levels { + VMPRESSURE_LOW = 0, + VMPRESSURE_MEDIUM = 1, + VMPRESSURE_CRITICAL = 2, + VMPRESSURE_NUM_LEVELS = 3, +}; + +enum vmpressure_modes { + VMPRESSURE_NO_PASSTHROUGH = 0, + VMPRESSURE_HIERARCHY = 1, + VMPRESSURE_LOCAL = 2, + VMPRESSURE_NUM_MODES = 3, +}; + +struct eventfd_ctx; + +struct vmpressure_event { + struct eventfd_ctx *efd; + enum vmpressure_levels level; + enum vmpressure_modes mode; + struct list_head node; +}; + +struct inodes_stat_t { + long int nr_inodes; + long int nr_unused; + long int dummy[5]; +}; + +struct trace_event_raw_ctime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + u32 ctime_ns; + u32 gen; + char __data[0]; +}; + +struct trace_event_raw_ctime_ns_xchg { + struct trace_entry ent; + dev_t dev; + ino_t ino; + u32 gen; + u32 old; + u32 new; + u32 cur; + char __data[0]; +}; + +struct trace_event_raw_fill_mg_cmtime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + time64_t mtime_s; + u32 ctime_ns; + u32 mtime_ns; + u32 gen; + char __data[0]; +}; + +struct trace_event_data_offsets_ctime {}; + +struct trace_event_data_offsets_ctime_ns_xchg {}; + +struct trace_event_data_offsets_fill_mg_cmtime {}; + +typedef void (*btf_trace_inode_set_ctime_to_ts)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_xchg_skip)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_ns_xchg)(void *, struct inode *, u32, u32, u32); + +typedef void (*btf_trace_fill_mg_cmtime)(void *, struct inode *, struct timespec64 *, struct timespec64 *); + +struct epoll_event { + __poll_t events; + __u64 data; +}; + +struct epoll_params { + __u32 busy_poll_usecs; + __u16 busy_poll_budget; + __u8 prefer_busy_poll; + __u8 __pad; +}; + +struct epoll_filefd { + struct file *file; + int fd; +} __attribute__((packed)); + +struct epitem; + +struct eppoll_entry { + struct eppoll_entry *next; + struct epitem *base; + wait_queue_entry_t wait; + wait_queue_head_t *whead; +}; + +struct eventpoll; + +struct epitem { + union { + struct rb_node rbn; + struct callback_head rcu; + }; + struct list_head rdllink; + struct epitem *next; + struct epoll_filefd ffd; + bool dying; + struct eppoll_entry *pwqlist; + struct eventpoll *ep; + struct hlist_node fllink; + struct wakeup_source *ws; + struct epoll_event event; +}; + +struct eventpoll { + struct mutex mtx; + wait_queue_head_t wq; + wait_queue_head_t poll_wait; + struct list_head rdllist; + rwlock_t lock; + struct rb_root_cached rbr; + struct epitem *ovflist; + struct wakeup_source *ws; + struct user_struct *user; + struct file *file; + u64 gen; + struct hlist_head refs; + refcount_t refcount; + unsigned int napi_id; + u32 busy_poll_usecs; + u16 busy_poll_budget; + bool prefer_busy_poll; +}; + +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + +struct epitems_head { + struct hlist_head epitems; + struct epitems_head *next; +}; + +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[8]; + __be64 h_commit_sec; + __be32 h_commit_nsec; +}; + +struct journal_block_tag3_s { + __be32 t_blocknr; + __be32 t_flags; + __be32 t_blocknr_high; + __be32 t_checksum; +}; + +typedef struct journal_block_tag3_s journal_block_tag3_t; + +struct journal_block_tag_s { + __be32 t_blocknr; + __be16 t_checksum; + __be16 t_flags; + __be32 t_blocknr_high; +}; + +typedef struct journal_block_tag_s journal_block_tag_t; + +enum jbd2_shrink_type { + JBD2_SHRINK_DESTROY = 0, + JBD2_SHRINK_BUSY_STOP = 1, + JBD2_SHRINK_BUSY_SKIP = 2, +}; + +typedef s64 int64_t; + +struct fuse_attr { + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint32_t rdev; + uint32_t blksize; + uint32_t flags; +}; + +struct fuse_sx_time { + int64_t tv_sec; + uint32_t tv_nsec; + int32_t __reserved; +}; + +struct fuse_statx { + uint32_t mask; + uint32_t blksize; + uint64_t attributes; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint16_t mode; + uint16_t __spare0[1]; + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t attributes_mask; + struct fuse_sx_time atime; + struct fuse_sx_time btime; + struct fuse_sx_time ctime; + struct fuse_sx_time mtime; + uint32_t rdev_major; + uint32_t rdev_minor; + uint32_t dev_major; + uint32_t dev_minor; + uint64_t __spare2[14]; +}; + +struct fuse_entry_out { + uint64_t nodeid; + uint64_t generation; + uint64_t entry_valid; + uint64_t attr_valid; + uint32_t entry_valid_nsec; + uint32_t attr_valid_nsec; + struct fuse_attr attr; +}; + +struct fuse_forget_in { + uint64_t nlookup; +}; + +struct fuse_read_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t read_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t write_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_out { + uint32_t size; + uint32_t padding; +}; + +struct fuse_dirent { + uint64_t ino; + uint64_t off; + uint32_t namelen; + uint32_t type; + char name[0]; +}; + +struct fuse_direntplus { + struct fuse_entry_out entry_out; + struct fuse_dirent dirent; +}; + +struct fuse_folio_desc { + unsigned int length; + unsigned int offset; +}; + +struct fuse_args_pages { + struct fuse_args args; + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int num_folios; +}; + +struct fuse_io_priv { + struct kref refcnt; + int async; + spinlock_t lock; + unsigned int reqs; + ssize_t bytes; + size_t size; + __u64 offset; + bool write; + bool should_dirty; + int err; + struct kiocb *iocb; + struct completion *done; + bool blocking; +}; + +struct fuse_io_args { + union { + struct { + struct fuse_read_in in; + u64 attr_ver; + } read; + struct { + struct fuse_write_in in; + struct fuse_write_out out; + bool folio_locked; + } write; + }; + struct fuse_args_pages ap; + struct fuse_io_priv *io; + struct fuse_file *ff; +}; + +enum fuse_parse_result { + FOUND_ERR = -1, + FOUND_NONE = 0, + FOUND_SOME = 1, + FOUND_ALL = 2, +}; + +struct btrfs_ioctl_balance_args { + __u64 flags; + __u64 state; + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + struct btrfs_balance_progress stat; + __u64 unused[72]; +}; + +struct btrfs_ioctl_get_dev_stats { + __u64 devid; + __u64 nr_items; + __u64 flags; + __u64 values[5]; + __u64 unused[121]; +}; + +enum btrfs_err_code { + BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, + BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET = 2, + BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET = 3, + BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET = 4, + BTRFS_ERROR_DEV_TGT_REPLACE = 5, + BTRFS_ERROR_DEV_MISSING_NOT_FOUND = 6, + BTRFS_ERROR_DEV_ONLY_WRITABLE = 7, + BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS = 8, + BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET = 9, + BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET = 10, +}; + +struct btrfs_stripe { + __le64 devid; + __le64 offset; + __u8 dev_uuid[16]; +}; + +struct btrfs_chunk { + __le64 length; + __le64 owner; + __le64 stripe_len; + __le64 type; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le16 num_stripes; + __le16 sub_stripes; + struct btrfs_stripe stripe; +}; + +struct btrfs_dev_extent { + __le64 chunk_tree; + __le64 chunk_objectid; + __le64 chunk_offset; + __le64 length; + __u8 chunk_tree_uuid[16]; +}; + +struct btrfs_disk_balance_args { + __le64 profiles; + union { + __le64 usage; + struct { + __le32 usage_min; + __le32 usage_max; + }; + }; + __le64 devid; + __le64 pstart; + __le64 pend; + __le64 vstart; + __le64 vend; + __le64 target; + __le64 flags; + union { + __le64 limit; + struct { + __le32 limit_min; + __le32 limit_max; + }; + }; + __le32 stripes_min; + __le32 stripes_max; + __le64 unused[6]; +}; + +struct btrfs_balance_item { + __le64 flags; + struct btrfs_disk_balance_args data; + struct btrfs_disk_balance_args meta; + struct btrfs_disk_balance_args sys; + __le64 unused[4]; +}; + +struct btrfs_dev_stats_item { + __le64 values[5]; +}; + +struct btrfs_discard_stripe { + struct btrfs_device *dev; + u64 physical; + u64 length; +}; + +struct btrfs_dev_lookup_args { + u64 devid; + u8 *uuid; + u8 *fsid; + bool missing; +}; + +struct btrfs_io_geometry { + u32 stripe_index; + u32 stripe_nr; + int mirror_num; + int num_stripes; + u64 stripe_offset; + u64 raid56_full_stripe_start; + int max_errors; + enum btrfs_map_op op; +}; + +struct alloc_chunk_ctl { + u64 start; + u64 type; + int num_stripes; + int sub_stripes; + int dev_stripes; + int devs_max; + int devs_min; + int devs_increment; + int ncopies; + int nparity; + u64 max_stripe_size; + u64 max_chunk_size; + u64 dev_extent_min; + u64 stripe_size; + u64 chunk_size; + int ndevs; +}; + +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct crypto_akcipher { + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct akcipher_alg { + int (*encrypt)(struct akcipher_request *); + int (*decrypt)(struct akcipher_request *); + int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); + unsigned int (*max_size)(struct crypto_akcipher *); + int (*init)(struct crypto_akcipher *); + void (*exit)(struct crypto_akcipher *); + struct crypto_alg base; +}; + +struct akcipher_instance { + void (*free)(struct akcipher_instance *); + union { + struct { + char head[56]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct pkcs1pad_inst_ctx { + struct crypto_akcipher_spawn spawn; +}; + +struct pkcs1pad_request { + struct scatterlist in_sg[2]; + struct scatterlist out_sg[1]; + uint8_t *in_buf; + uint8_t *out_buf; + struct akcipher_request child_req; +}; + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +struct trace_event_raw_block_buffer { + struct trace_entry ent; + dev_t dev; + sector_t sector; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_block_rq_requeue { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq_completion { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + unsigned int bytes; + short unsigned int ioprio; + char rwbs[8]; + char comm[16]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_bio_complete { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_bio { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_plug { + struct trace_entry ent; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_unplug { + struct trace_entry ent; + int nr_rq; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_split { + struct trace_entry ent; + dev_t dev; + sector_t sector; + sector_t new_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_rq_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + unsigned int nr_bios; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_data_offsets_block_buffer {}; + +struct trace_event_data_offsets_block_rq_requeue { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq_completion { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_bio_complete {}; + +struct trace_event_data_offsets_block_bio {}; + +struct trace_event_data_offsets_block_plug {}; + +struct trace_event_data_offsets_block_unplug {}; + +struct trace_event_data_offsets_block_split {}; + +struct trace_event_data_offsets_block_bio_remap {}; + +struct trace_event_data_offsets_block_rq_remap {}; + +typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_complete)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_error)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_insert)(void *, struct request *); + +typedef void (*btf_trace_block_rq_issue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_merge)(void *, struct request *); + +typedef void (*btf_trace_block_io_start)(void *, struct request *); + +typedef void (*btf_trace_block_io_done)(void *, struct request *); + +typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); + +typedef void (*btf_trace_block_getrq)(void *, struct bio *); + +typedef void (*btf_trace_block_plug)(void *, struct request_queue *); + +typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); + +typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); + +typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); + +typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); + +struct throtl_grp; + +struct throtl_qnode { + struct list_head node; + struct bio_list bios; + struct throtl_grp *tg; +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; + struct list_head queued[2]; + unsigned int nr_queued[2]; + struct rb_root_cached pending_tree; + unsigned int nr_pending; + long unsigned int first_pending_disptime; + struct timer_list pending_timer; +}; + +struct throtl_grp { + struct blkg_policy_data pd; + struct rb_node rb_node; + struct throtl_data *td; + struct throtl_service_queue service_queue; + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + long unsigned int disptime; + unsigned int flags; + bool has_rules_bps[2]; + bool has_rules_iops[2]; + uint64_t bps[2]; + unsigned int iops[2]; + uint64_t bytes_disp[2]; + unsigned int io_disp[2]; + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + long long int carryover_bytes[2]; + int carryover_ios[2]; + long unsigned int last_check_time; + long unsigned int slice_start[2]; + long unsigned int slice_end[2]; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +enum io_uring_msg_ring_flags { + IORING_MSG_DATA = 0, + IORING_MSG_SEND_FD = 1, +}; + +struct io_msg { + struct file *file; + struct file *src_file; + struct callback_head tw; + u64 user_data; + u32 len; + u32 cmd; + u32 src_fd; + union { + u32 dst_fd; + u32 cqe_flags; + }; + u32 flags; +}; + +enum devm_ioremap_type { + DEVM_IOREMAP = 0, + DEVM_IOREMAP_UC = 1, + DEVM_IOREMAP_WC = 2, + DEVM_IOREMAP_NP = 3, +}; + +struct arch_io_reserve_memtype_wc_devres { + resource_size_t start; + resource_size_t size; +}; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum dim_tune_state { + DIM_PARKING_ON_TOP = 0, + DIM_PARKING_TIRED = 1, + DIM_GOING_RIGHT = 2, + DIM_GOING_LEFT = 3, +}; + +union gic_base { + void *common_base; + void **percpu_base; +}; + +struct gic_chip_data___2 { + union gic_base dist_base; + union gic_base cpu_base; + void *raw_dist_base; + void *raw_cpu_base; + u32 percpu_offset; + u32 saved_spi_enable[32]; + u32 saved_spi_active[32]; + u32 saved_spi_conf[64]; + u32 saved_spi_target[255]; + u32 *saved_ppi_enable; + u32 *saved_ppi_active; + u32 *saved_ppi_conf; + struct irq_domain *domain; + unsigned int gic_irqs; +}; + +enum pci_dev_flags { + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, + PCI_DEV_FLAGS_NO_D3 = 2, + PCI_DEV_FLAGS_ASSIGNED = 4, + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, + PCI_DEV_FLAGS_NO_BUS_RESET = 64, + PCI_DEV_FLAGS_NO_PM_RESET = 128, + PCI_DEV_FLAGS_VPD_REF_F0 = 256, + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, + PCI_DEV_FLAGS_NO_FLR_RESET = 1024, + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, + PCI_DEV_FLAGS_HAS_MSI_MASKING = 4096, +}; + +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = 1, + PCI_BUS_FLAGS_NO_MMRBC = 2, + PCI_BUS_FLAGS_NO_AERSID = 4, + PCI_BUS_FLAGS_NO_EXTCFG = 8, +}; + +enum pci_bus_speed { + PCI_SPEED_33MHz = 0, + PCI_SPEED_66MHz = 1, + PCI_SPEED_66MHz_PCIX = 2, + PCI_SPEED_100MHz_PCIX = 3, + PCI_SPEED_133MHz_PCIX = 4, + PCI_SPEED_66MHz_PCIX_ECC = 5, + PCI_SPEED_100MHz_PCIX_ECC = 6, + PCI_SPEED_133MHz_PCIX_ECC = 7, + PCI_SPEED_66MHz_PCIX_266 = 9, + PCI_SPEED_100MHz_PCIX_266 = 10, + PCI_SPEED_133MHz_PCIX_266 = 11, + AGP_UNKNOWN = 12, + AGP_1X = 13, + AGP_2X = 14, + AGP_4X = 15, + AGP_8X = 16, + PCI_SPEED_66MHz_PCIX_533 = 17, + PCI_SPEED_100MHz_PCIX_533 = 18, + PCI_SPEED_133MHz_PCIX_533 = 19, + PCIE_SPEED_2_5GT = 20, + PCIE_SPEED_5_0GT = 21, + PCIE_SPEED_8_0GT = 22, + PCIE_SPEED_16_0GT = 23, + PCIE_SPEED_32_0GT = 24, + PCIE_SPEED_64_0GT = 25, + PCI_SPEED_UNKNOWN = 255, +}; + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF = 0, + PCIE_BUS_DEFAULT = 1, + PCIE_BUS_SAFE = 2, + PCIE_BUS_PERFORMANCE = 3, + PCIE_BUS_PEER2PEER = 4, +}; + +struct pci_fixup { + u16 vendor; + u16 device; + u32 class; + unsigned int class_shift; + int hook_offset; +}; + +enum { + NVME_REG_CAP = 0, + NVME_REG_VS = 8, + NVME_REG_INTMS = 12, + NVME_REG_INTMC = 16, + NVME_REG_CC = 20, + NVME_REG_CSTS = 28, + NVME_REG_NSSR = 32, + NVME_REG_AQA = 36, + NVME_REG_ASQ = 40, + NVME_REG_ACQ = 48, + NVME_REG_CMBLOC = 56, + NVME_REG_CMBSZ = 60, + NVME_REG_BPINFO = 64, + NVME_REG_BPRSEL = 68, + NVME_REG_BPMBL = 72, + NVME_REG_CMBMSC = 80, + NVME_REG_CRTO = 104, + NVME_REG_PMRCAP = 3584, + NVME_REG_PMRCTL = 3588, + NVME_REG_PMRSTS = 3592, + NVME_REG_PMREBS = 3596, + NVME_REG_PMRSWTP = 3600, + NVME_REG_DBS = 4096, +}; + +enum { + NVME_CC_ENABLE = 1, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 96, + NVME_CC_CSS_MASK = 112, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 2048, + NVME_CC_AMS_VS = 14336, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 16384, + NVME_CC_SHN_ABRUPT = 32768, + NVME_CC_SHN_MASK = 49152, + NVME_CC_IOSQES = 393216, + NVME_CC_IOCQES = 4194304, + NVME_CC_CRIME = 16777216, +}; + +enum { + NVME_CSTS_RDY = 1, + NVME_CSTS_CFS = 2, + NVME_CSTS_NSSRO = 16, + NVME_CSTS_PP = 32, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 4, + NVME_CSTS_SHST_CMPLT = 8, + NVME_CSTS_SHST_MASK = 12, +}; + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, + SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, + SWITCHTEC_GAS_NTB_OFFSET = 65536, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, +}; + +struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info ntp_info[48]; +} __attribute__((packed)); + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; + u64 lut_entry[512]; +}; + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct pci_dev_reset_methods { + u16 vendor; + u16 device; + int (*reset)(struct pci_dev *, bool); +}; + +struct pci_dev_acs_enabled { + u16 vendor; + u16 device; + int (*acs_enabled)(struct pci_dev *, u16); +}; + +struct pci_dev_acs_ops { + u16 vendor; + u16 device; + int (*enable_acs)(struct pci_dev *); + int (*disable_acs_redir)(struct pci_dev *); +}; + +enum arm_smccc_conduit { + SMCCC_CONDUIT_NONE = 0, + SMCCC_CONDUIT_SMC = 1, + SMCCC_CONDUIT_HVC = 2, +}; + +struct timer_rand_state { + long unsigned int last_time; + long int last_delta; + long int last_delta2; +}; + +enum chacha_constants { + CHACHA_CONSTANT_EXPA = 1634760805, + CHACHA_CONSTANT_ND_3 = 857760878, + CHACHA_CONSTANT_2_BY = 2036477234, + CHACHA_CONSTANT_TE_K = 1797285236, +}; + +enum blake2s_iv { + BLAKE2S_IV0 = 1779033703, + BLAKE2S_IV1 = 3144134277, + BLAKE2S_IV2 = 1013904242, + BLAKE2S_IV3 = 2773480762, + BLAKE2S_IV4 = 1359893119, + BLAKE2S_IV5 = 2600822924, + BLAKE2S_IV6 = 528734635, + BLAKE2S_IV7 = 1541459225, +}; + +struct vdso_rng_data { + u64 generation; + u8 is_ready; +}; + +enum { + CRNG_EMPTY = 0, + CRNG_EARLY = 1, + CRNG_READY = 2, +}; + +enum { + CRNG_RESEED_START_INTERVAL = 1000, + CRNG_RESEED_INTERVAL = 60000, +}; + +struct crng { + u8 key[32]; + long unsigned int generation; + local_lock_t lock; +}; + +struct batch_u8 { + u8 entropy[96]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u16 { + u16 entropy[48]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u32 { + u32 entropy[24]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u64 { + u64 entropy[12]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +enum { + POOL_BITS = 256, + POOL_READY_BITS = 256, + POOL_EARLY_BITS = 128, +}; + +struct fast_pool { + long unsigned int pool[4]; + long unsigned int last; + unsigned int count; + struct timer_list mix; +}; + +struct entropy_timer_state { + long unsigned int entropy; + struct timer_list timer; + atomic_t samples; + unsigned int samples_per_bit; +}; + +enum { + NUM_TRIAL_SAMPLES = 8192, + MAX_SAMPLES_PER_BIT = 66, +}; + +enum { + MIX_INFLIGHT = 2147483648, +}; + +enum ata_quirks { + __ATA_QUIRK_DIAGNOSTIC = 0, + __ATA_QUIRK_NODMA = 1, + __ATA_QUIRK_NONCQ = 2, + __ATA_QUIRK_MAX_SEC_128 = 3, + __ATA_QUIRK_BROKEN_HPA = 4, + __ATA_QUIRK_DISABLE = 5, + __ATA_QUIRK_HPA_SIZE = 6, + __ATA_QUIRK_IVB = 7, + __ATA_QUIRK_STUCK_ERR = 8, + __ATA_QUIRK_BRIDGE_OK = 9, + __ATA_QUIRK_ATAPI_MOD16_DMA = 10, + __ATA_QUIRK_FIRMWARE_WARN = 11, + __ATA_QUIRK_1_5_GBPS = 12, + __ATA_QUIRK_NOSETXFER = 13, + __ATA_QUIRK_BROKEN_FPDMA_AA = 14, + __ATA_QUIRK_DUMP_ID = 15, + __ATA_QUIRK_MAX_SEC_LBA48 = 16, + __ATA_QUIRK_ATAPI_DMADIR = 17, + __ATA_QUIRK_NO_NCQ_TRIM = 18, + __ATA_QUIRK_NOLPM = 19, + __ATA_QUIRK_WD_BROKEN_LPM = 20, + __ATA_QUIRK_ZERO_AFTER_TRIM = 21, + __ATA_QUIRK_NO_DMA_LOG = 22, + __ATA_QUIRK_NOTRIM = 23, + __ATA_QUIRK_MAX_SEC_1024 = 24, + __ATA_QUIRK_MAX_TRIM_128M = 25, + __ATA_QUIRK_NO_NCQ_ON_ATI = 26, + __ATA_QUIRK_NO_ID_DEV_LOG = 27, + __ATA_QUIRK_NO_LOG_DIR = 28, + __ATA_QUIRK_NO_FUA = 29, + __ATA_QUIRK_MAX = 30, +}; + +enum { + AHCI_PCI_BAR_STA2X11 = 0, + AHCI_PCI_BAR_CAVIUM = 0, + AHCI_PCI_BAR_LOONGSON = 0, + AHCI_PCI_BAR_ENMOTUS = 2, + AHCI_PCI_BAR_CAVIUM_GEN5 = 4, + AHCI_PCI_BAR_STANDARD = 5, +}; + +enum board_ids { + board_ahci = 0, + board_ahci_43bit_dma = 1, + board_ahci_ign_iferr = 2, + board_ahci_no_debounce_delay = 3, + board_ahci_no_msi = 4, + board_ahci_pcs_quirk = 5, + board_ahci_pcs_quirk_no_devslp = 6, + board_ahci_pcs_quirk_no_sntf = 7, + board_ahci_yes_fbs = 8, + board_ahci_al = 9, + board_ahci_avn = 10, + board_ahci_mcp65 = 11, + board_ahci_mcp77 = 12, + board_ahci_mcp89 = 13, + board_ahci_mv = 14, + board_ahci_sb600 = 15, + board_ahci_sb700 = 16, + board_ahci_vt8251 = 17, + board_ahci_mcp_linux = 11, + board_ahci_mcp67 = 11, + board_ahci_mcp73 = 11, + board_ahci_mcp79 = 12, +}; + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf = 1, + e1000_mng_mode_pt = 2, + e1000_mng_mode_ipmi = 3, + e1000_mng_mode_host_if_only = 4, +}; + +struct kthread_work; + +typedef void (*kthread_work_func_t)(struct kthread_work *); + +struct kthread_worker; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + int canceling; +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +struct posix_clock; + +struct posix_clock_context; + +struct posix_clock_operations { + struct module *owner; + int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); + int (*clock_gettime)(struct posix_clock *, struct timespec64 *); + int (*clock_getres)(struct posix_clock *, struct timespec64 *); + int (*clock_settime)(struct posix_clock *, const struct timespec64 *); + long int (*ioctl)(struct posix_clock_context *, unsigned int, long unsigned int); + int (*open)(struct posix_clock_context *, fmode_t); + __poll_t (*poll)(struct posix_clock_context *, struct file *, poll_table *); + int (*release)(struct posix_clock_context *); + ssize_t (*read)(struct posix_clock_context *, uint, char *, size_t); +}; + +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +struct posix_clock_context { + struct posix_clock *clk; + void *private_clkdata; +}; + +struct ptp_extts_event { + struct ptp_clock_time t; + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_kparams { + int api_version; + int mode; + struct pps_ktime assert_off_tu; + struct pps_ktime clear_off_tu; +}; + +struct pps_device; + +struct pps_source_info { + char name[32]; + char path[32]; + int mode; + void (*echo)(struct pps_device *, int, void *); + struct module *owner; + struct device *dev; +}; + +struct pps_device { + struct pps_source_info info; + struct pps_kparams params; + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + unsigned int last_ev; + wait_queue_head_t queue; + unsigned int id; + const void *lookup_cookie; + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; + spinlock_t lock; +}; + +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + +struct timestamp_event_queue { + struct ptp_extts_event buf[128]; + int head; + int tail; + spinlock_t lock; + struct list_head qlist; + long unsigned int *mask; + struct dentry *debugfs_instance; + struct debugfs_u32_array dfs_bitmap; +}; + +struct ptp_clock { + struct posix_clock clock; + struct device dev; + struct ptp_clock_info *info; + dev_t devid; + int index; + struct pps_device *pps_source; + long int dialed_frequency; + struct list_head tsevqs; + spinlock_t tsevqs_lock; + struct mutex pincfg_mux; + wait_queue_head_t tsev_wq; + int defunct; + struct device_attribute *pin_dev_attr; + struct attribute **pin_attr; + struct attribute_group pin_attr_group; + const struct attribute_group *pin_attr_groups[2]; + struct kthread_worker *kworker; + struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + int *vclock_index; + struct mutex n_vclocks_mux; + bool is_virtual_clock; + bool has_cycles; + struct dentry *debugfs_root; +}; + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct hlist_node vclock_hash_node; + struct cyclecounter cc; + struct timecounter tc; + struct mutex lock; +}; + +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN = 1, + CLOCK_EVT_STATE_PERIODIC = 2, + CLOCK_EVT_STATE_ONESHOT = 3, + CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, +}; + +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(long unsigned int, struct clock_event_device *); + int (*set_next_ktime)(ktime_t, struct clock_event_device *); + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + long unsigned int retries; + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + long unsigned int min_delta_ticks; + long unsigned int max_delta_ticks; + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct of_timer_irq { + int irq; + int index; + const char *name; + long unsigned int flags; + irq_handler_t handler; +}; + +struct of_timer_base { + void *base; + const char *name; + int index; +}; + +struct of_timer_clk { + struct clk *clk; + const char *name; + int index; + long unsigned int rate; + long unsigned int period; +}; + +struct timer_of { + unsigned int flags; + struct device_node *np; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct clock_event_device clkevt; + struct of_timer_base of_base; + struct of_timer_irq of_irq; + struct of_timer_clk of_clk; + void *private_data; + long: 64; + long: 64; + long: 64; +}; + +typedef size_t (*iov_step_f)(void *, size_t, size_t, void *, void *); + +typedef size_t (*iov_ustep_f)(void *, size_t, size_t, void *, void *); + +struct csum_state { + __wsum csum; + size_t off; +}; + +enum { + BPF_F_INGRESS = 1, + BPF_F_BROADCAST = 8, + BPF_F_EXCLUDE_INGRESS = 16, +}; + +struct __una_u32 { + u32 x; +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *stream_parser; + struct bpf_prog *stream_verdict; + struct bpf_prog *skb_verdict; + struct bpf_link *msg_parser_link; + struct bpf_link *stream_parser_link; + struct bpf_link *stream_verdict_link; + struct bpf_link *skb_verdict_link; +}; + +struct sk_psock_work_state { + u32 len; + u32 off; +}; + +struct sk_msg; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + bool redir_ingress; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + spinlock_t ingress_lock; + long unsigned int state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *); + void (*saved_destroy)(struct sock *); + void (*saved_close)(struct sock *, long int); + void (*saved_write_space)(struct sock *); + void (*saved_data_ready)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + struct proto *sk_proto; + struct mutex work_mutex; + struct sk_psock_work_state work_state; + struct delayed_work work; + struct sock *sk_pair; + struct rcu_work rwork; +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + long unsigned int copy[1]; + struct scatterlist data[19]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + spinlock_t lock; +}; + +typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); + +struct sock_map_seq_info { + struct bpf_map *map; + struct sock *sk; + u32 index; +}; + +struct bpf_iter__sockmap { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + struct sock *sk; + }; +}; + +struct bpf_shtab_elem { + struct callback_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct bpf_shtab_bucket { + struct hlist_head head; + spinlock_t lock; +}; + +struct bpf_shtab { + struct bpf_map map; + struct bpf_shtab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; +}; + +typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); + +struct sock_hash_seq_info { + struct bpf_map *map; + struct bpf_shtab *htab; + u32 bucket_id; +}; + +struct sockmap_link { + struct bpf_link link; + struct bpf_map *map; + enum bpf_attach_type attach_type; +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_keee eee; +}; + +union nf_conntrack_nat_help {}; + +struct nf_conn_nat { + union nf_conntrack_nat_help help; + int masq_index; +}; + +struct nf_nat_lookup_hook_priv { + struct nf_hook_entries *entries; + struct callback_head callback_head; +}; + +struct nf_nat_hooks_net { + struct nf_hook_ops *nat_hook_ops; + unsigned int users; +}; + +struct nat_net { + struct nf_nat_hooks_net nat_proto_net[11]; +}; + +struct nf_nat_proto_clean { + u8 l3proto; + u8 l4proto; +}; + +enum ctattr_nat { + CTA_NAT_UNSPEC = 0, + CTA_NAT_V4_MINIP = 1, + CTA_NAT_V4_MAXIP = 2, + CTA_NAT_PROTO = 3, + CTA_NAT_V6_MINIP = 4, + CTA_NAT_V6_MAXIP = 5, + __CTA_NAT_MAX = 6, +}; + +enum ctattr_protonat { + CTA_PROTONAT_UNSPEC = 0, + CTA_PROTONAT_PORT_MIN = 1, + CTA_PROTONAT_PORT_MAX = 2, + __CTA_PROTONAT_MAX = 3, +}; + +enum nft_nat_types { + NFT_NAT_SNAT = 0, + NFT_NAT_DNAT = 1, +}; + +enum nft_nat_attributes { + NFTA_NAT_UNSPEC = 0, + NFTA_NAT_TYPE = 1, + NFTA_NAT_FAMILY = 2, + NFTA_NAT_REG_ADDR_MIN = 3, + NFTA_NAT_REG_ADDR_MAX = 4, + NFTA_NAT_REG_PROTO_MIN = 5, + NFTA_NAT_REG_PROTO_MAX = 6, + NFTA_NAT_FLAGS = 7, + __NFTA_NAT_MAX = 8, +}; + +struct nf_nat_range { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; +}; + +enum nft_chain_types { + NFT_CHAIN_T_DEFAULT = 0, + NFT_CHAIN_T_ROUTE = 1, + NFT_CHAIN_T_NAT = 2, + NFT_CHAIN_T_MAX = 3, +}; + +struct nft_nat { + u8 sreg_addr_min; + u8 sreg_addr_max; + u8 sreg_proto_min; + u8 sreg_proto_max; + enum nf_nat_manip_type type: 8; + u8 family; + u16 flags; +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; + +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; + +struct unix_edge; + +struct scm_fp_list { + short int count; + short int count_unix; + short int max; + bool inflight; + bool dead; + struct list_head vertices; + struct unix_edge *edges; + struct user_struct *user; + struct file *fp[253]; +}; + +struct unix_sock; + +struct unix_edge { + struct unix_sock *predecessor; + struct unix_sock *successor; + struct list_head vertex_entry; + struct list_head stack_entry; +}; + +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct unix_vertex { + struct list_head edges; + struct list_head entry; + struct list_head scc_entry; + long unsigned int out_degree; + long unsigned int index; + long unsigned int scc_index; +}; + +struct scm_stat { + atomic_t nr_fds; + long unsigned int nr_unix_fds; +}; + +struct unix_address; + +struct unix_sock { + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock; + struct mutex bindlock; + struct sock *peer; + struct sock *listener; + struct unix_vertex *vertex; + spinlock_t lock; + long: 64; + long: 64; + long: 64; + long: 64; + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; + struct sk_buff *oob_skb; +}; + +struct unix_address { + refcount_t refcnt; + int len; + struct sockaddr_un name[0]; +}; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + u32 consumed; +}; + +enum unix_vertex_index { + UNIX_VERTEX_INDEX_MARK1 = 0, + UNIX_VERTEX_INDEX_MARK2 = 1, + UNIX_VERTEX_INDEX_START = 2, +}; + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; +}; + +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, +}; + +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; + struct dentry *proc_thread_self; + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; + struct callback_head rcu; +}; + +struct static_key_false_deferred { + struct static_key_false key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct ip6fl_iter_state { + struct seq_net_private p; + struct pid_namespace *pid_ns; + int bucket; +}; + +struct nd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + __u8 opt[0]; +}; + +struct mac_addr { + unsigned char addr[6]; +}; + +typedef struct mac_addr mac_addr; + +struct net_bridge_vlan_group { + struct rhashtable vlan_hash; + struct rhashtable tunnel_hash; + struct list_head vlan_list; + u16 num_vlans; + u16 pvid; + u8 pvid_state; +}; + +struct net_bridge_fdb_key { + mac_addr addr; + u16 vlan_id; +}; + +struct net_bridge_fdb_entry { + struct rhash_head rhnode; + struct net_bridge_port *dst; + struct net_bridge_fdb_key key; + struct hlist_node fdb_node; + long unsigned int flags; + long: 64; + long: 64; + long unsigned int updated; + long unsigned int used; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct net_bridge_mcast_gc { + struct hlist_node gc_node; + void (*destroy)(struct net_bridge_mcast_gc *); +}; + +struct net_bridge_port_group_sg_key { + struct net_bridge_port *port; + struct br_ip addr; +}; + +struct net_bridge_port_group { + struct net_bridge_port_group *next; + struct net_bridge_port_group_sg_key key; + unsigned char eth_addr[6]; + unsigned char flags; + unsigned char filter_mode; + unsigned char grp_query_rexmit_cnt; + unsigned char rt_protocol; + struct hlist_head src_list; + unsigned int src_ents; + struct timer_list timer; + struct timer_list rexmit_timer; + struct hlist_node mglist; + struct rb_root eht_set_tree; + struct rb_root eht_host_tree; + struct rhash_head rhnode; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct net_bridge_mdb_entry { + struct rhash_head rhnode; + struct net_bridge *br; + struct net_bridge_port_group *ports; + struct br_ip addr; + bool host_joined; + struct timer_list timer; + struct hlist_node mdb_node; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct br_input_skb_cb { + struct net_device *brdev; + u16 frag_max_size; + u8 igmp; + u8 mrouters_only: 1; + u8 proxyarp_replied: 1; + u8 src_port_isolated: 1; + u8 promisc: 1; + u8 br_netfilter_broute: 1; + u32 backup_nhid; +}; + +enum br_pkt_type { + BR_PKT_UNICAST = 0, + BR_PKT_MULTICAST = 1, + BR_PKT_BROADCAST = 2, +}; + +struct nf_br_ops { + int (*br_dev_xmit_hook)(struct sk_buff *); +}; + +struct pin_cookie {}; + +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX = 1, + UCLAMP_CNT = 2, +}; + +struct dl_bw { + raw_spinlock_t lock; + u64 bw; + u64 total_bw; +}; + +struct cpudl_item; + +struct cpudl { + raw_spinlock_t lock; + int size; + cpumask_var_t free_cpus; + struct cpudl_item *elements; +}; + +struct cpupri_vec { + atomic_t count; + cpumask_var_t mask; +}; + +struct cpupri { + struct cpupri_vec pri_to_cpu[101]; + int *cpu_to_pri; +}; + +struct perf_domain; + +struct root_domain { + atomic_t refcount; + atomic_t rto_count; + struct callback_head rcu; + cpumask_var_t span; + cpumask_var_t online; + bool overloaded; + bool overutilized; + cpumask_var_t dlo_mask; + atomic_t dlo_count; + struct dl_bw dl_bw; + struct cpudl cpudl; + u64 visit_gen; + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + int rto_loop; + int rto_cpu; + atomic_t rto_loop_next; + atomic_t rto_loop_start; + cpumask_var_t rto_mask; + struct cpupri cpupri; + struct perf_domain *pd; +}; + +struct cfs_rq { + struct load_weight load; + unsigned int nr_running; + unsigned int h_nr_running; + unsigned int idle_nr_running; + unsigned int idle_h_nr_running; + s64 avg_vruntime; + u64 avg_load; + u64 min_vruntime; + unsigned int forceidle_seq; + u64 min_vruntime_fi; + struct rb_root_cached tasks_timeline; + struct sched_entity *curr; + struct sched_entity *next; + long: 64; + long: 64; + long: 64; + struct sched_avg avg; + struct { + raw_spinlock_t lock; + int nr; + long unsigned int load_avg; + long unsigned int util_avg; + long unsigned int runnable_avg; + long: 64; + long: 64; + long: 64; + long: 64; + } removed; + u64 last_update_tg_load_avg; + long unsigned int tg_load_avg_contrib; + long int propagate; + long int prop_runnable_sum; + long unsigned int h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; + struct rq *rq; + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; + int idle; + int runtime_enabled; + s64 runtime_remaining; + u64 throttled_pelt_idle; + u64 throttled_clock; + u64 throttled_clock_pelt; + u64 throttled_clock_pelt_time; + u64 throttled_clock_self; + u64 throttled_clock_self_time; + int throttled; + int throttle_count; + struct list_head throttled_list; + struct list_head throttled_csd_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct rt_prio_array { + long unsigned int bitmap[2]; + struct list_head queue[100]; +}; + +struct rt_rq { + struct rt_prio_array active; + unsigned int rt_nr_running; + unsigned int rr_nr_running; + struct { + int curr; + int next; + } highest_prio; + bool overloaded; + struct plist_head pushable_tasks; + int rt_queued; +}; + +struct dl_rq { + struct rb_root_cached root; + unsigned int dl_nr_running; + struct { + u64 curr; + u64 next; + } earliest_dl; + bool overloaded; + struct rb_root_cached pushable_dl_tasks_root; + u64 running_bw; + u64 this_bw; + u64 extra_bw; + u64 max_bw; + u64 bw_ratio; +}; + +struct balance_callback { + struct balance_callback *next; + void (*func)(struct rq *); +}; + +struct scx_rq { + struct scx_dispatch_q local_dsq; + struct list_head runnable_list; + struct list_head ddsp_deferred_locals; + long unsigned int ops_qseq; + u64 extra_enq_flags; + u32 nr_running; + u32 flags; + u32 cpuperf_target; + bool cpu_released; + cpumask_var_t cpus_to_kick; + cpumask_var_t cpus_to_kick_if_idle; + cpumask_var_t cpus_to_preempt; + cpumask_var_t cpus_to_wait; + long unsigned int pnt_seq; + struct balance_callback deferred_bal_cb; + struct irq_work deferred_irq_work; + struct irq_work kick_cpus_irq_work; +}; + +struct cpu_stop_done; + +struct cpu_stop_work { + struct list_head list; + cpu_stop_fn_t fn; + long unsigned int caller; + void *arg; + struct cpu_stop_done *done; +}; + +struct sched_domain; + +struct rq { + raw_spinlock_t __lock; + unsigned int nr_running; + unsigned int ttwu_pending; + u64 nr_switches; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cfs_rq cfs; + struct rt_rq rt; + struct dl_rq dl; + struct scx_rq scx; + struct sched_dl_entity fair_server; + struct list_head leaf_cfs_rq_list; + struct list_head *tmp_alone_branch; + unsigned int nr_uninterruptible; + union { + struct task_struct *donor; + struct task_struct *curr; + }; + struct sched_dl_entity *dl_server; + struct task_struct *idle; + struct task_struct *stop; + long unsigned int next_balance; + struct mm_struct *prev_mm; + unsigned int clock_update_flags; + u64 clock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u64 clock_task; + u64 clock_pelt; + long unsigned int lost_idle_time; + u64 clock_pelt_idle; + u64 clock_idle; + atomic_t nr_iowait; + u64 last_seen_need_resched_ns; + int ticks_without_resched; + int membarrier_state; + struct root_domain *rd; + struct sched_domain *sd; + long unsigned int cpu_capacity; + struct balance_callback *balance_callback; + unsigned char nohz_idle_balance; + unsigned char idle_balance; + long unsigned int misfit_task_load; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + int cpu; + int online; + struct list_head cfs_tasks; + struct sched_avg avg_rt; + struct sched_avg avg_dl; + u64 idle_stamp; + u64 avg_idle; + u64 max_idle_balance_cost; + struct rcuwait hotplug_wait; + long unsigned int calc_load_update; + long int calc_load_active; + long: 64; + long: 64; + call_single_data_t hrtick_csd; + struct hrtimer hrtick_timer; + ktime_t hrtick_time; + unsigned int nr_pinned; + unsigned int push_busy; + struct cpu_stop_work push_work; + struct rq *core; + struct task_struct *core_pick; + struct sched_dl_entity *core_dl_server; + unsigned int core_enabled; + unsigned int core_sched_seq; + struct rb_root core_tree; + unsigned int core_task_seq; + unsigned int core_pick_seq; + long unsigned int core_cookie; + unsigned int core_forceidle_count; + unsigned int core_forceidle_seq; + unsigned int core_forceidle_occupation; + u64 core_forceidle_start; + cpumask_var_t scratch_mask; + long: 64; + call_single_data_t cfsb_csd; + struct list_head cfsb_csd_list; + long: 64; + long: 64; +}; + +struct cfs_bandwidth { + raw_spinlock_t lock; + ktime_t period; + u64 quota; + u64 runtime; + u64 burst; + u64 runtime_snap; + s64 hierarchical_quota; + u8 idle; + u8 period_active; + u8 slack_started; + struct hrtimer period_timer; + struct hrtimer slack_timer; + struct list_head throttled_cfs_rq; + int nr_periods; + int nr_throttled; + int nr_burst; + u64 throttled_time; + u64 burst_time; +}; + +struct task_group { + struct cgroup_subsys_state css; + int idle; + struct sched_entity **se; + struct cfs_rq **cfs_rq; + long unsigned int shares; + long: 64; + long: 64; + atomic_long_t load_avg; + u32 scx_flags; + u32 scx_weight; + struct callback_head rcu; + struct list_head list; + struct task_group *parent; + struct list_head siblings; + struct list_head children; + struct cfs_bandwidth cfs_bandwidth; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum cpu_idle_type { + __CPU_NOT_IDLE = 0, + CPU_IDLE = 1, + CPU_NEWLY_IDLE = 2, + CPU_MAX_IDLE_TYPES = 3, +}; + +enum { + SD_BALANCE_NEWIDLE = 1, + SD_BALANCE_EXEC = 2, + SD_BALANCE_FORK = 4, + SD_BALANCE_WAKE = 8, + SD_WAKE_AFFINE = 16, + SD_ASYM_CPUCAPACITY = 32, + SD_ASYM_CPUCAPACITY_FULL = 64, + SD_SHARE_CPUCAPACITY = 128, + SD_CLUSTER = 256, + SD_SHARE_LLC = 512, + SD_SERIALIZE = 1024, + SD_ASYM_PACKING = 2048, + SD_PREFER_SIBLING = 4096, + SD_OVERLAP = 8192, + SD_NUMA = 16384, +}; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; + int nr_idle_scan; +}; + +struct sched_group; + +struct sched_domain { + struct sched_domain *parent; + struct sched_domain *child; + struct sched_group *groups; + long unsigned int min_interval; + long unsigned int max_interval; + unsigned int busy_factor; + unsigned int imbalance_pct; + unsigned int cache_nice_tries; + unsigned int imb_numa_nr; + int nohz_idle; + int flags; + int level; + long unsigned int last_balance; + unsigned int balance_interval; + unsigned int nr_balance_failed; + u64 max_newidle_lb_cost; + long unsigned int last_decay_max_lb_cost; + char *name; + union { + void *private; + struct callback_head rcu; + }; + struct sched_domain_shared *shared; + unsigned int span_weight; + long unsigned int span[0]; +}; + +struct sched_group_capacity; + +struct sched_group { + struct sched_group *next; + atomic_t ref; + unsigned int group_weight; + unsigned int cores; + struct sched_group_capacity *sgc; + int asym_prefer_cpu; + int flags; + long unsigned int cpumask[0]; +}; + +struct sched_group_capacity { + atomic_t ref; + long unsigned int capacity; + long unsigned int min_capacity; + long unsigned int max_capacity; + long unsigned int next_update; + int imbalance; + int id; + long unsigned int cpumask[0]; +}; + +struct em_perf_state { + long unsigned int performance; + long unsigned int frequency; + long unsigned int power; + long unsigned int cost; + long unsigned int flags; +}; + +struct em_perf_table { + struct callback_head rcu; + struct kref kref; + struct em_perf_state state[0]; +}; + +struct em_perf_domain { + struct em_perf_table *em_table; + int nr_perf_states; + int min_perf_state; + int max_perf_state; + long unsigned int flags; + long unsigned int cpus[0]; +}; + +struct cpuidle_state_usage { + long long unsigned int disable; + long long unsigned int usage; + u64 time_ns; + long long unsigned int above; + long long unsigned int below; + long long unsigned int rejected; + long long unsigned int s2idle_usage; + long long unsigned int s2idle_time; +}; + +struct cpuidle_device; + +struct cpuidle_driver; + +struct cpuidle_state { + char name[16]; + char desc[32]; + s64 exit_latency_ns; + s64 target_residency_ns; + unsigned int flags; + unsigned int exit_latency; + int power_usage; + unsigned int target_residency; + int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); + void (*enter_dead)(struct cpuidle_device *, int); + int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); +}; + +struct cpuidle_state_kobj; + +struct cpuidle_driver_kobj; + +struct cpuidle_device_kobj; + +struct cpuidle_device { + unsigned int registered: 1; + unsigned int enabled: 1; + unsigned int poll_time_limit: 1; + unsigned int cpu; + ktime_t next_hrtimer; + int last_state_idx; + u64 last_residency_ns; + u64 poll_limit_ns; + u64 forced_idle_latency_limit_ns; + struct cpuidle_state_usage states_usage[10]; + struct cpuidle_state_kobj *kobjs[10]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; +}; + +struct cpuidle_driver { + const char *name; + struct module *owner; + unsigned int bctimer: 1; + struct cpuidle_state states[10]; + int state_count; + int safe_state_index; + struct cpumask *cpumask; + const char *governor; +}; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE = 0, + SCHED_TUNABLESCALING_LOG = 1, + SCHED_TUNABLESCALING_LINEAR = 2, + SCHED_TUNABLESCALING_END = 3, +}; + +struct cpudl_item { + u64 dl; + int cpu; + int idx; +}; + +struct asym_cap_data { + struct list_head link; + struct callback_head rcu; + long unsigned int capacity; + long unsigned int cpus[0]; +}; + +typedef int (*tg_visitor)(struct task_group *, void *); + +struct perf_domain { + struct em_perf_domain *em_pd; + struct perf_domain *next; + struct callback_head rcu; +}; + +struct rq_flags { + long unsigned int flags; + struct pin_cookie cookie; + unsigned int clock_update_flags; +}; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irqsave_t; + +enum { + __SCHED_FEAT_PLACE_LAG = 0, + __SCHED_FEAT_PLACE_DEADLINE_INITIAL = 1, + __SCHED_FEAT_PLACE_REL_DEADLINE = 2, + __SCHED_FEAT_RUN_TO_PARITY = 3, + __SCHED_FEAT_PREEMPT_SHORT = 4, + __SCHED_FEAT_NEXT_BUDDY = 5, + __SCHED_FEAT_CACHE_HOT_BUDDY = 6, + __SCHED_FEAT_DELAY_DEQUEUE = 7, + __SCHED_FEAT_DELAY_ZERO = 8, + __SCHED_FEAT_WAKEUP_PREEMPTION = 9, + __SCHED_FEAT_HRTICK = 10, + __SCHED_FEAT_HRTICK_DL = 11, + __SCHED_FEAT_NONTASK_CAPACITY = 12, + __SCHED_FEAT_TTWU_QUEUE = 13, + __SCHED_FEAT_SIS_UTIL = 14, + __SCHED_FEAT_WARN_DOUBLE_CLOCK = 15, + __SCHED_FEAT_RT_PUSH_IPI = 16, + __SCHED_FEAT_RT_RUNTIME_SHARE = 17, + __SCHED_FEAT_LB_MIN = 18, + __SCHED_FEAT_ATTACH_AGE_LOAD = 19, + __SCHED_FEAT_WA_IDLE = 20, + __SCHED_FEAT_WA_WEIGHT = 21, + __SCHED_FEAT_WA_BIAS = 22, + __SCHED_FEAT_UTIL_EST = 23, + __SCHED_FEAT_LATENCY_WARN = 24, + __SCHED_FEAT_NR = 25, +}; + +struct affinity_context { + const struct cpumask *new_mask; + struct cpumask *user_mask; + unsigned int flags; +}; + +struct energy_env { + long unsigned int task_busy_time; + long unsigned int pd_busy_time; + long unsigned int cpu_cap; + long unsigned int pd_cap; +}; + +enum fbq_type { + regular = 0, + remote = 1, + all = 2, +}; + +enum group_type { + group_has_spare = 0, + group_fully_busy = 1, + group_misfit_task = 2, + group_smt_balance = 3, + group_asym_packing = 4, + group_imbalanced = 5, + group_overloaded = 6, +}; + +enum migration_type { + migrate_load = 0, + migrate_util = 1, + migrate_task = 2, + migrate_misfit = 3, +}; + +struct lb_env { + struct sched_domain *sd; + struct rq *src_rq; + int src_cpu; + int dst_cpu; + struct rq *dst_rq; + struct cpumask *dst_grpmask; + int new_dst_cpu; + enum cpu_idle_type idle; + long int imbalance; + struct cpumask *cpus; + unsigned int flags; + unsigned int loop; + unsigned int loop_break; + unsigned int loop_max; + enum fbq_type fbq_type; + enum migration_type migration_type; + struct list_head tasks; +}; + +struct sg_lb_stats { + long unsigned int avg_load; + long unsigned int group_load; + long unsigned int group_capacity; + long unsigned int group_util; + long unsigned int group_runnable; + unsigned int sum_nr_running; + unsigned int sum_h_nr_running; + unsigned int idle_cpus; + unsigned int group_weight; + enum group_type group_type; + unsigned int group_asym_packing; + unsigned int group_smt_balance; + long unsigned int group_misfit_task_load; +}; + +struct sd_lb_stats { + struct sched_group *busiest; + struct sched_group *local; + long unsigned int total_load; + long unsigned int total_capacity; + long unsigned int avg_load; + unsigned int prefer_sibling; + struct sg_lb_stats busiest_stat; + struct sg_lb_stats local_stat; +}; + +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4, + TICK_DEP_BIT_RCU_EXP = 5, +}; + +struct action_cache { + long unsigned int allow_native[8]; +}; + +struct notification; + +struct seccomp_filter { + refcount_t refs; + refcount_t users; + bool log; + bool wait_killable_recv; + struct action_cache cache; + struct seccomp_filter *prev; + struct bpf_prog *prog; + struct notification *notif; + struct mutex notify_lock; + wait_queue_head_t wqh; +}; + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +struct sock_fprog { + short unsigned int len; + struct sock_filter *filter; +}; + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); + +enum notify_state { + SECCOMP_NOTIFY_INIT = 0, + SECCOMP_NOTIFY_SENT = 1, + SECCOMP_NOTIFY_REPLIED = 2, +}; + +struct seccomp_knotif { + struct task_struct *task; + u64 id; + const struct seccomp_data *data; + enum notify_state state; + int error; + long int val; + u32 flags; + struct completion ready; + struct list_head list; + struct list_head addfd; +}; + +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + __u32 ioctl_flags; + union { + bool setfd; + int ret; + }; + struct completion completion; + struct list_head list; +}; + +struct notification { + atomic_t requests; + u32 flags; + u64 next_id; + struct list_head notifications; +}; + +struct seccomp_log_name { + u32 log; + const char *name; +}; + +enum bpf_stack_slot_type { + STACK_INVALID = 0, + STACK_SPILL = 1, + STACK_MISC = 2, + STACK_ZERO = 3, + STACK_DYNPTR = 4, + STACK_ITER = 5, +}; + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + struct net *net; + struct list_head node; +}; + +typedef int folio_walk_flags_t; + +enum folio_walk_level { + FW_LEVEL_PTE = 0, + FW_LEVEL_PMD = 1, + FW_LEVEL_PUD = 2, +}; + +struct folio_walk { + struct page *page; + enum folio_walk_level level; + union { + pte_t *ptep; + pud_t *pudp; + pmd_t *pmdp; + }; + union { + pte_t pte; + pud_t pud; + pmd_t pmd; + }; + struct vm_area_struct *vma; + spinlock_t *ptl; +}; + +struct crypto_wait { + struct completion completion; + int err; +}; + +enum zpool_mapmode { + ZPOOL_MM_RW = 0, + ZPOOL_MM_RO = 1, + ZPOOL_MM_WO = 2, + ZPOOL_MM_DEFAULT = 0, +}; + +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + void *__ctx[0]; +}; + +struct crypto_acomp { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_acomp_ctx { + struct crypto_acomp *acomp; + struct acomp_req *req; + struct crypto_wait wait; + u8 *buffer; + struct mutex mutex; + bool is_sleepable; +}; + +struct zpool; + +struct zswap_pool { + struct zpool *zpool; + struct crypto_acomp_ctx *acomp_ctx; + struct percpu_ref ref; + struct list_head list; + struct work_struct release_work; + struct hlist_node node; + char tfm_name[128]; +}; + +struct zswap_entry { + swp_entry_t swpentry; + unsigned int length; + bool referenced; + struct zswap_pool *pool; + long unsigned int handle; + struct obj_cgroup *objcg; + struct list_head lru; +}; + +enum zswap_init_type { + ZSWAP_UNINIT = 0, + ZSWAP_INIT_SUCCEED = 1, + ZSWAP_INIT_FAILED = 2, +}; + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} ext4_acl_entry; + +typedef struct { + __le32 a_version; +} ext4_acl_header; + +struct fuse_file_lock { + uint64_t start; + uint64_t end; + uint32_t type; + uint32_t pid; +}; + +struct fuse_open_in { + uint32_t flags; + uint32_t open_flags; +}; + +struct fuse_flush_in { + uint64_t fh; + uint32_t unused; + uint32_t padding; + uint64_t lock_owner; +}; + +struct fuse_fsync_in { + uint64_t fh; + uint32_t fsync_flags; + uint32_t padding; +}; + +struct fuse_lk_in { + uint64_t fh; + uint64_t owner; + struct fuse_file_lock lk; + uint32_t lk_flags; + uint32_t padding; +}; + +struct fuse_lk_out { + struct fuse_file_lock lk; +}; + +struct fuse_bmap_in { + uint64_t block; + uint32_t blocksize; + uint32_t padding; +}; + +struct fuse_bmap_out { + uint64_t block; +}; + +struct fuse_poll_in { + uint64_t fh; + uint64_t kh; + uint32_t flags; + uint32_t events; +}; + +struct fuse_poll_out { + uint32_t revents; + uint32_t padding; +}; + +struct fuse_notify_poll_wakeup_out { + uint64_t kh; +}; + +struct fuse_fallocate_in { + uint64_t fh; + uint64_t offset; + uint64_t length; + uint32_t mode; + uint32_t padding; +}; + +struct fuse_lseek_in { + uint64_t fh; + uint64_t offset; + uint32_t whence; + uint32_t padding; +}; + +struct fuse_lseek_out { + uint64_t offset; +}; + +struct fuse_copy_file_range_in { + uint64_t fh_in; + uint64_t off_in; + uint64_t nodeid_out; + uint64_t fh_out; + uint64_t off_out; + uint64_t len; + uint64_t flags; +}; + +struct fuse_writepage_args { + struct fuse_io_args ia; + struct rb_node writepages_entry; + struct list_head queue_entry; + struct fuse_writepage_args *next; + struct inode *inode; + struct fuse_sync_bucket *bucket; +}; + +struct fuse_fill_wb_data { + struct fuse_writepage_args *wpa; + struct fuse_file *ff; + struct inode *inode; + struct folio **orig_folios; + unsigned int max_folios; +}; + +enum { + BIOSET_NEED_BVECS = 1, + BIOSET_NEED_RESCUER = 2, + BIOSET_PERCPU_CACHE = 4, +}; + +struct compressed_bio { + unsigned int nr_folios; + struct folio **compressed_folios; + u64 start; + unsigned int len; + unsigned int compressed_len; + u8 compress_type; + bool writeback; + union { + struct btrfs_bio *orig_bbio; + struct work_struct write_end_work; + }; + struct btrfs_bio bbio; +}; + +struct workspace_manager { + struct list_head idle_ws; + spinlock_t ws_lock; + int free_ws; + atomic_t total_ws; + wait_queue_head_t ws_wait; +}; + +struct btrfs_compress_op { + struct workspace_manager *workspace_manager; + unsigned int max_level; + unsigned int default_level; +}; + +struct btrfs_compr_pool { + struct shrinker *shrinker; + spinlock_t lock; + struct list_head list; + int count; + int thresh; +}; + +struct bucket_item { + u32 count; +}; + +struct heuristic_ws { + u8 *sample; + u32 sample_size; + struct bucket_item *bucket; + struct bucket_item *bucket_b; + struct list_head list; +}; + +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + struct key_match_data match_data; + unsigned int flags; + int (*iterator)(const void *, void *); + int skipped_ret; + bool possessed; + key_ref_t result; + time64_t now; +}; + +struct authenc_esn_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; +}; + +struct crypto_authenc_esn_ctx { + unsigned int reqoff; + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_esn_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +enum prio_policy { + POLICY_NO_CHANGE = 0, + POLICY_PROMOTE_TO_RT = 1, + POLICY_RESTRICT_TO_BE = 2, + POLICY_ALL_TO_IDLE = 3, + POLICY_NONE_TO_RT = 4, +}; + +struct ioprio_blkcg { + struct blkcg_policy_data cpd; + enum prio_policy prio_policy; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + struct filename *filename; + struct statx *buffer; +}; + +struct lwq { + spinlock_t lock; + struct llist_node *ready; + struct llist_head new; +}; + +struct region { + unsigned int start; + unsigned int off; + unsigned int group_len; + unsigned int end; + unsigned int nbits; +}; + +struct gen_pool_chunk { + struct list_head next_chunk; + atomic_long_t avail; + phys_addr_t phys_addr; + void *owner; + long unsigned int start_addr; + long unsigned int end_addr; + long unsigned int bits[0]; +}; + +struct genpool_data_align { + int align; +}; + +struct genpool_data_fixed { + long unsigned int offset; +}; + +enum { + FB_BLANK_UNBLANK = 0, + FB_BLANK_NORMAL = 1, + FB_BLANK_VSYNC_SUSPEND = 2, + FB_BLANK_HSYNC_SUSPEND = 3, + FB_BLANK_POWERDOWN = 4, +}; + +struct fb_event { + struct fb_info *info; + void *data; +}; + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY = 0, + BACKLIGHT_UPDATE_SYSFS = 1, +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM = 2, + BACKLIGHT_FIRMWARE = 3, + BACKLIGHT_TYPE_MAX = 4, +}; + +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR = 1, + BACKLIGHT_SCALE_NON_LINEAR = 2, +}; + +struct backlight_device; + +struct backlight_ops { + unsigned int options; + int (*update_status)(struct backlight_device *); + int (*get_brightness)(struct backlight_device *); + bool (*controls_device)(struct backlight_device *, struct device *); +}; + +struct backlight_properties { + int brightness; + int max_brightness; + int power; + enum backlight_type type; + unsigned int state; + enum backlight_scale scale; +}; + +struct backlight_device { + struct backlight_properties props; + struct mutex update_lock; + struct mutex ops_lock; + const struct backlight_ops *ops; + struct notifier_block fb_notif; + struct list_head entry; + struct device dev; + bool fb_bl_on[32]; + int use_count; +}; + +struct virtio_pci_common_cfg { + __le32 device_feature_select; + __le32 device_feature; + __le32 guest_feature_select; + __le32 guest_feature; + __le16 msix_config; + __le16 num_queues; + __u8 device_status; + __u8 config_generation; + __le16 queue_select; + __le16 queue_size; + __le16 queue_msix_vector; + __le16 queue_enable; + __le16 queue_notify_off; + __le32 queue_desc_lo; + __le32 queue_desc_hi; + __le32 queue_avail_lo; + __le32 queue_avail_hi; + __le32 queue_used_lo; + __le32 queue_used_hi; +}; + +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + __le16 queue_notify_data; + __le16 queue_reset; + __le16 admin_queue_index; + __le16 admin_queue_num; +}; + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + struct virtio_pci_common_cfg *common; + void *device; + void *notify_base; + resource_size_t notify_pa; + u8 *isr; + size_t notify_len; + size_t device_len; + size_t common_len; + int notify_map_cap; + u32 notify_offset_multiplier; + int modern_bars; + struct virtio_device_id id; + int (*device_id_check)(struct pci_dev *); + u64 dma_mask; +}; + +struct resource_entry { + struct list_head node; + struct resource *res; + resource_size_t offset; + struct resource __res; +}; + +enum pci_p2pdma_map_type { + PCI_P2PDMA_MAP_UNKNOWN = 0, + PCI_P2PDMA_MAP_NOT_SUPPORTED = 1, + PCI_P2PDMA_MAP_BUS_ADDR = 2, + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE = 3, +}; + +struct pci_p2pdma_map_state { + struct dev_pagemap *pgmap; + int map; + u64 bus_off; +}; + +enum iommu_dma_cookie_type { + IOMMU_DMA_IOVA_COOKIE = 0, + IOMMU_DMA_MSI_COOKIE = 1, +}; + +struct iova { + struct rb_node node; + long unsigned int pfn_hi; + long unsigned int pfn_lo; +}; + +struct iova_rcache; + +struct iova_domain { + spinlock_t iova_rbtree_lock; + struct rb_root rbroot; + struct rb_node *cached_node; + struct rb_node *cached32_node; + long unsigned int granule; + long unsigned int start_pfn; + long unsigned int dma_32bit_pfn; + long unsigned int max32_alloc_size; + struct iova anchor; + struct iova_rcache *rcaches; + struct hlist_node cpuhp_dead; +}; + +enum iommu_dma_queue_type { + IOMMU_DMA_OPTS_PER_CPU_QUEUE = 0, + IOMMU_DMA_OPTS_SINGLE_QUEUE = 1, +}; + +struct iommu_dma_options { + enum iommu_dma_queue_type qt; + size_t fq_size; + unsigned int fq_timeout; +}; + +struct iova_fq; + +struct iommu_dma_cookie { + enum iommu_dma_cookie_type type; + union { + struct { + struct iova_domain iovad; + union { + struct iova_fq *single_fq; + struct iova_fq *percpu_fq; + }; + atomic64_t fq_flush_start_cnt; + atomic64_t fq_flush_finish_cnt; + struct timer_list fq_timer; + atomic_t fq_timer_on; + }; + dma_addr_t msi_iova; + }; + struct list_head msi_page_list; + struct iommu_domain *fq_domain; + struct iommu_dma_options options; + struct mutex mutex; +}; + +enum iommu_resv_type { + IOMMU_RESV_DIRECT = 0, + IOMMU_RESV_DIRECT_RELAXABLE = 1, + IOMMU_RESV_RESERVED = 2, + IOMMU_RESV_MSI = 3, + IOMMU_RESV_SW_MSI = 4, +}; + +struct iommu_resv_region { + struct list_head list; + phys_addr_t start; + size_t length; + int prot; + enum iommu_resv_type type; + void (*free)(struct device *, struct iommu_resv_region *); +}; + +struct iommu_dma_msi_page { + struct list_head list; + dma_addr_t iova; + phys_addr_t phys; +}; + +struct iova_fq_entry { + long unsigned int iova_pfn; + long unsigned int pages; + struct list_head freelist; + u64 counter; +}; + +struct iova_fq { + spinlock_t lock; + unsigned int head; + unsigned int tail; + unsigned int mod_mask; + struct iova_fq_entry entries[0]; +}; + +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; + +struct iosys_map { + union { + void *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +struct dma_buf_ops { + bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + int (*pin)(struct dma_buf_attachment *); + void (*unpin)(struct dma_buf_attachment *); + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); + void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*mmap)(struct dma_buf *, struct vm_area_struct *); + int (*vmap)(struct dma_buf *, struct iosys_map *); + void (*vunmap)(struct dma_buf *, struct iosys_map *); +}; + +struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + __poll_t active; +}; + +struct dma_resv; + +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + unsigned int vmapping_counter; + struct iosys_map vmap_ptr; + const char *exp_name; + const char *name; + spinlock_t name_lock; + struct module *owner; + struct list_head list_node; + void *priv; + struct dma_resv *resv; + wait_queue_head_t poll; + struct dma_buf_poll_cb_t cb_in; + struct dma_buf_poll_cb_t cb_out; +}; + +struct dma_buf_attach_ops; + +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; + bool peer2peer; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; + void *priv; +}; + +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +}; + +struct dma_resv_list; + +struct dma_resv { + struct ww_mutex lock; + struct dma_resv_list *fences; +}; + +struct dma_buf_attach_ops { + bool allow_peer2peer; + void (*move_notify)(struct dma_buf_attachment *); +}; + +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct dma_resv *resv; + void *priv; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + long unsigned int stamp; + unsigned int acquired; + short unsigned int wounded; + short unsigned int is_wait_die; +}; + +enum dma_resv_usage { + DMA_RESV_USAGE_KERNEL = 0, + DMA_RESV_USAGE_WRITE = 1, + DMA_RESV_USAGE_READ = 2, + DMA_RESV_USAGE_BOOKKEEP = 3, +}; + +struct dma_resv_iter { + struct dma_resv *obj; + enum dma_resv_usage usage; + struct dma_fence *fence; + enum dma_resv_usage fence_usage; + unsigned int index; + struct dma_resv_list *fences; + unsigned int num_fences; + bool is_restarted; +}; + +struct dma_buf_sync { + __u64 flags; +}; + +struct dma_buf_export_sync_file { + __u32 flags; + __s32 fd; +}; + +struct dma_buf_import_sync_file { + __u32 flags; + __s32 fd; +}; + +enum phy_upstream { + PHY_UPSTREAM_MAC = 0, + PHY_UPSTREAM_PHY = 1, +}; + +struct phy_fixup { + struct list_head list; + char bus_id[64]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *); +}; + +struct phylib_stubs { + int (*hwtstamp_get)(struct phy_device *, struct kernel_hwtstamp_config *); + int (*hwtstamp_set)(struct phy_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +struct sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; + u8 e10g_base_er: 1; + u8 e10g_base_lrm: 1; + u8 e10g_base_lr: 1; + u8 e10g_base_sr: 1; + u8 if_1x_sx: 1; + u8 if_1x_lx: 1; + u8 if_1x_copper_active: 1; + u8 if_1x_copper_passive: 1; + u8 escon_mmf_1310_led: 1; + u8 escon_smf_1310_laser: 1; + u8 sonet_oc192_short_reach: 1; + u8 sonet_reach_bit1: 1; + u8 sonet_reach_bit2: 1; + u8 sonet_oc48_long_reach: 1; + u8 sonet_oc48_intermediate_reach: 1; + u8 sonet_oc48_short_reach: 1; + u8 unallocated_5_7: 1; + u8 sonet_oc12_smf_long_reach: 1; + u8 sonet_oc12_smf_intermediate_reach: 1; + u8 sonet_oc12_short_reach: 1; + u8 unallocated_5_3: 1; + u8 sonet_oc3_smf_long_reach: 1; + u8 sonet_oc3_smf_intermediate_reach: 1; + u8 sonet_oc3_short_reach: 1; + u8 e_base_px: 1; + u8 e_base_bx10: 1; + u8 e100_base_fx: 1; + u8 e100_base_lx: 1; + u8 e1000_base_t: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_sx: 1; + u8 fc_ll_v: 1; + u8 fc_ll_s: 1; + u8 fc_ll_i: 1; + u8 fc_ll_l: 1; + u8 fc_ll_m: 1; + u8 fc_tech_sa: 1; + u8 fc_tech_lc: 1; + u8 fc_tech_electrical_inter_enclosure: 1; + u8 fc_tech_electrical_intra_enclosure: 1; + u8 fc_tech_sn: 1; + u8 fc_tech_sl: 1; + u8 fc_tech_ll: 1; + u8 sfp_ct_active: 1; + u8 sfp_ct_passive: 1; + u8 unallocated_8_1: 1; + u8 unallocated_8_0: 1; + u8 fc_media_tw: 1; + u8 fc_media_tp: 1; + u8 fc_media_mi: 1; + u8 fc_media_tv: 1; + u8 fc_media_m6: 1; + u8 fc_media_m5: 1; + u8 unallocated_9_1: 1; + u8 fc_media_sm: 1; + u8 fc_speed_1200: 1; + u8 fc_speed_800: 1; + u8 fc_speed_1600: 1; + u8 fc_speed_400: 1; + u8 fc_speed_3200: 1; + u8 fc_speed_200: 1; + u8 unallocated_10_1: 1; + u8 fc_speed_100: 1; + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + __be16 cable_compliance; + struct { + u8 reserved60_2: 6; + u8 fc_pi_4_app_h: 1; + u8 sff8431_app_e: 1; + u8 reserved61: 8; + } passive; + struct { + u8 reserved60_4: 4; + u8 fc_pi_4_lim: 1; + u8 sff8431_lim: 1; + u8 fc_pi_4_app_h: 1; + u8 sff8431_app_e: 1; + u8 reserved61: 8; + } active; + }; + u8 reserved62; + u8 cc_base; +}; + +struct sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +}; + +struct sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +}; + +struct sfp_upstream_ops { + void (*attach)(void *, struct sfp_bus *); + void (*detach)(void *, struct sfp_bus *); + int (*module_insert)(void *, const struct sfp_eeprom_id *); + void (*module_remove)(void *); + int (*module_start)(void *); + void (*module_stop)(void *); + void (*link_down)(void *); + void (*link_up)(void *); + int (*connect_phy)(void *, struct phy_device *); + void (*disconnect_phy)(void *, struct phy_device *); +}; + +struct usb_qualifier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __u8 bNumConfigurations; + __u8 bRESERVED; +}; + +struct usb_set_sel_req { + __u8 u1_sel; + __u8 u1_pel; + __le16 u2_sel; + __le16 u2_pel; +}; + +enum usb_port_connect_type { + USB_PORT_CONNECT_TYPE_UNKNOWN = 0, + USB_PORT_CONNECT_TYPE_HOT_PLUG = 1, + USB_PORT_CONNECT_TYPE_HARD_WIRED = 2, + USB_PORT_NOT_USED = 3, +}; + +struct usbdevfs_hub_portinfo { + char nports; + char port[127]; +}; + +struct usb_port_status { + __le16 wPortStatus; + __le16 wPortChange; + __le32 dwExtPortStatus; +}; + +struct usb_hub_status { + __le16 wHubStatus; + __le16 wHubChange; +}; + +enum hub_led_mode { + INDICATOR_AUTO = 0, + INDICATOR_CYCLE = 1, + INDICATOR_GREEN_BLINK = 2, + INDICATOR_GREEN_BLINK_OFF = 3, + INDICATOR_AMBER_BLINK = 4, + INDICATOR_AMBER_BLINK_OFF = 5, + INDICATOR_ALT_BLINK = 6, + INDICATOR_ALT_BLINK_OFF = 7, +} __attribute__((mode(byte))); + +struct usb_tt_clear { + struct list_head clear_list; + unsigned int tt; + u16 devinfo; + struct usb_hcd *hcd; + struct usb_host_endpoint *ep; +}; + +struct typec_connector { + void (*attach)(struct typec_connector *, struct device *); + void (*deattach)(struct typec_connector *, struct device *); +}; + +typedef u32 usb_port_location_t; + +struct usb_port; + +struct usb_hub { + struct device *intfdev; + struct usb_device *hdev; + struct kref kref; + struct urb *urb; + u8 (*buffer)[8]; + union { + struct usb_hub_status hub; + struct usb_port_status port; + } *status; + struct mutex status_mutex; + int error; + int nerrors; + long unsigned int event_bits[1]; + long unsigned int change_bits[1]; + long unsigned int removed_bits[1]; + long unsigned int wakeup_bits[1]; + long unsigned int power_bits[1]; + long unsigned int child_usage_bits[1]; + long unsigned int warm_reset_bits[1]; + struct usb_hub_descriptor *descriptor; + struct usb_tt tt; + unsigned int mA_per_port; + unsigned int wakeup_enabled_descendants; + unsigned int limited_power: 1; + unsigned int quiescing: 1; + unsigned int disconnected: 1; + unsigned int in_reset: 1; + unsigned int quirk_disable_autosuspend: 1; + unsigned int quirk_check_port_auto_suspend: 1; + unsigned int has_indicators: 1; + u8 indicator[31]; + struct delayed_work leds; + struct delayed_work init_work; + struct work_struct events; + spinlock_t irq_urb_lock; + struct timer_list irq_urb_retry; + struct usb_port **ports; + struct list_head onboard_devs; +}; + +struct usb_dev_state; + +struct usb_port { + struct usb_device *child; + struct device dev; + struct usb_dev_state *port_owner; + struct usb_port *peer; + struct typec_connector *connector; + struct dev_pm_qos_request *req; + enum usb_port_connect_type connect_type; + enum usb_device_state state; + struct kernfs_node *state_kn; + usb_port_location_t location; + struct mutex status_lock; + u32 over_current_count; + u8 portnum; + u32 quirks; + unsigned int early_stop: 1; + unsigned int ignore_event: 1; + unsigned int is_superspeed: 1; + unsigned int usb3_lpm_u1_permit: 1; + unsigned int usb3_lpm_u2_permit: 1; +}; + +enum hub_activation_type { + HUB_INIT = 0, + HUB_INIT2 = 1, + HUB_INIT3 = 2, + HUB_POST_RESET = 3, + HUB_RESUME = 4, + HUB_RESET_RESUME = 5, +}; + +enum hub_quiescing_type { + HUB_DISCONNECT = 0, + HUB_PRE_RESET = 1, + HUB_SUSPEND = 2, +}; + +struct serport { + struct tty_struct *tty; + wait_queue_head_t wait; + struct serio *serio; + struct serio_device_id id; + spinlock_t lock; + long unsigned int flags; +}; + +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); + int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); + struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); + int (*ipv6_route_input)(struct sk_buff *); + struct fib6_table * (*fib6_get_table)(struct net *, u32); + int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); + int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); + void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); + u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); + int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); + void (*fib6_nh_release)(struct fib6_nh *); + void (*fib6_nh_release_dsts)(struct fib6_nh *); + void (*fib6_update_sernum)(struct net *, struct fib6_info *); + int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); + void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); + void (*udpv6_encap_enable)(); + void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); + struct neigh_table *nd_tbl; + int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + struct net_device * (*ipv6_dev_find)(struct net *, const struct in6_addr *, struct net_device *); + int (*ip6_xmit)(const struct sock *, struct sk_buff *, struct flowi6 *, __u32, struct ipv6_txoptions *, int, u32); +}; + +struct sch_frag_data { + long unsigned int dst; + struct qdisc_skb_cb cb; + __be16 inner_protocol; + u16 vlan_tci; + __be16 vlan_proto; + unsigned int l2_len; + u8 l2_data[18]; + int (*xmit)(struct sk_buff *); +}; + +enum { + ETHTOOL_A_MM_STAT_UNSPEC = 0, + ETHTOOL_A_MM_STAT_PAD = 1, + ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS = 2, + ETHTOOL_A_MM_STAT_SMD_ERRORS = 3, + ETHTOOL_A_MM_STAT_REASSEMBLY_OK = 4, + ETHTOOL_A_MM_STAT_RX_FRAG_COUNT = 5, + ETHTOOL_A_MM_STAT_TX_FRAG_COUNT = 6, + ETHTOOL_A_MM_STAT_HOLD_COUNT = 7, + __ETHTOOL_A_MM_STAT_CNT = 8, + ETHTOOL_A_MM_STAT_MAX = 7, +}; + +struct mm_reply_data { + struct ethnl_reply_data base; + struct ethtool_mm_state state; + struct ethtool_mm_stats stats; +}; + +enum nf_ip6_hook_priorities { + NF_IP6_PRI_FIRST = -2147483648, + NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP6_PRI_CONNTRACK_DEFRAG = -400, + NF_IP6_PRI_RAW = -300, + NF_IP6_PRI_SELINUX_FIRST = -225, + NF_IP6_PRI_CONNTRACK = -200, + NF_IP6_PRI_MANGLE = -150, + NF_IP6_PRI_NAT_DST = -100, + NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, + NF_IP6_PRI_NAT_SRC = 100, + NF_IP6_PRI_SELINUX_LAST = 225, + NF_IP6_PRI_CONNTRACK_HELPER = 300, + NF_IP6_PRI_LAST = 2147483647, +}; + +enum nft_chain_flags { + NFT_CHAIN_BASE = 1, + NFT_CHAIN_HW_OFFLOAD = 2, + NFT_CHAIN_BINDING = 4, +}; + +enum nft_rule_attributes { + NFTA_RULE_UNSPEC = 0, + NFTA_RULE_TABLE = 1, + NFTA_RULE_CHAIN = 2, + NFTA_RULE_HANDLE = 3, + NFTA_RULE_EXPRESSIONS = 4, + NFTA_RULE_COMPAT = 5, + NFTA_RULE_POSITION = 6, + NFTA_RULE_USERDATA = 7, + NFTA_RULE_PAD = 8, + NFTA_RULE_ID = 9, + NFTA_RULE_POSITION_ID = 10, + NFTA_RULE_CHAIN_ID = 11, + __NFTA_RULE_MAX = 12, +}; + +enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = 1, + NFT_RULE_COMPAT_F_INV = 2, + NFT_RULE_COMPAT_F_MASK = 2, +}; + +enum nft_rule_compat_attributes { + NFTA_RULE_COMPAT_UNSPEC = 0, + NFTA_RULE_COMPAT_PROTO = 1, + NFTA_RULE_COMPAT_FLAGS = 2, + __NFTA_RULE_COMPAT_MAX = 3, +}; + +enum nft_target_attributes { + NFTA_TARGET_UNSPEC = 0, + NFTA_TARGET_NAME = 1, + NFTA_TARGET_REV = 2, + NFTA_TARGET_INFO = 3, + __NFTA_TARGET_MAX = 4, +}; + +enum nft_match_attributes { + NFTA_MATCH_UNSPEC = 0, + NFTA_MATCH_NAME = 1, + NFTA_MATCH_REV = 2, + NFTA_MATCH_INFO = 3, + __NFTA_MATCH_MAX = 4, +}; + +enum { + NFNL_MSG_COMPAT_GET = 0, + NFNL_MSG_COMPAT_MAX = 1, +}; + +enum { + NFTA_COMPAT_UNSPEC = 0, + NFTA_COMPAT_NAME = 1, + NFTA_COMPAT_REV = 2, + NFTA_COMPAT_TYPE = 3, + __NFTA_COMPAT_MAX = 4, +}; + +struct ip6t_entry { + struct ip6t_ip6 ipv6; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ebt_entry { + unsigned int bitmask; + unsigned int invflags; + __be16 ethproto; + char in[16]; + char logical_in[16]; + char out[16]; + char logical_out[16]; + unsigned char sourcemac[6]; + unsigned char sourcemsk[6]; + unsigned char destmac[6]; + unsigned char destmsk[6]; + union { + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + }; + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + } offsets; + }; + unsigned char elems[0]; +}; + +struct arpt_devaddr_info { + char addr[16]; + char mask[16]; +}; + +struct arpt_arp { + struct in_addr src; + struct in_addr tgt; + struct in_addr smsk; + struct in_addr tmsk; + __u8 arhln; + __u8 arhln_mask; + struct arpt_devaddr_info src_devaddr; + struct arpt_devaddr_info tgt_devaddr; + __be16 arpop; + __be16 arpop_mask; + __be16 arhrd; + __be16 arhrd_mask; + __be16 arpro; + __be16 arpro_mask; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u8 flags; + __u16 invflags; +}; + +struct arpt_entry { + struct arpt_arp arp; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct nft_chain_type { + const char *name; + enum nft_chain_types type; + int family; + struct module *owner; + unsigned int hook_mask; + nf_hookfn *hooks[6]; + int (*ops_register)(struct net *, const struct nf_hook_ops *); + void (*ops_unregister)(struct net *, const struct nf_hook_ops *); +}; + +struct nft_stats { + u64 bytes; + u64 pkts; + struct u64_stats_sync syncp; +}; + +struct nft_base_chain { + struct nf_hook_ops ops; + struct list_head hook_list; + const struct nft_chain_type *type; + u8 policy; + u8 flags; + struct nft_stats *stats; + struct nft_chain chain; + struct flow_block flow_block; +}; + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + u_int16_t flags; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +struct nft_xt_match_priv { + void *info; +}; + +union nft_entry { + struct ipt_entry e4; + struct ip6t_entry e6; + struct ebt_entry ebt; + struct arpt_entry arp; +}; + +enum tcp_metric_index { + TCP_METRIC_RTT = 0, + TCP_METRIC_RTTVAR = 1, + TCP_METRIC_SSTHRESH = 2, + TCP_METRIC_CWND = 3, + TCP_METRIC_REORDERING = 4, + TCP_METRIC_RTT_US = 5, + TCP_METRIC_RTTVAR_US = 6, + __TCP_METRIC_MAX = 7, +}; + +enum { + TCP_METRICS_ATTR_UNSPEC = 0, + TCP_METRICS_ATTR_ADDR_IPV4 = 1, + TCP_METRICS_ATTR_ADDR_IPV6 = 2, + TCP_METRICS_ATTR_AGE = 3, + TCP_METRICS_ATTR_TW_TSVAL = 4, + TCP_METRICS_ATTR_TW_TS_STAMP = 5, + TCP_METRICS_ATTR_VALS = 6, + TCP_METRICS_ATTR_FOPEN_MSS = 7, + TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, + TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, + TCP_METRICS_ATTR_FOPEN_COOKIE = 10, + TCP_METRICS_ATTR_SADDR_IPV4 = 11, + TCP_METRICS_ATTR_SADDR_IPV6 = 12, + TCP_METRICS_ATTR_PAD = 13, + __TCP_METRICS_ATTR_MAX = 14, +}; + +enum { + TCP_METRICS_CMD_UNSPEC = 0, + TCP_METRICS_CMD_GET = 1, + TCP_METRICS_CMD_DEL = 2, + __TCP_METRICS_CMD_MAX = 3, +}; + +struct tcp_fastopen_metrics { + u16 mss; + u16 syn_loss: 10; + u16 try_exp: 2; + long unsigned int last_syn_loss; + struct tcp_fastopen_cookie cookie; +}; + +struct tcp_metrics_block { + struct tcp_metrics_block *tcpm_next; + struct net *tcpm_net; + struct inetpeer_addr tcpm_saddr; + struct inetpeer_addr tcpm_daddr; + long unsigned int tcpm_stamp; + u32 tcpm_lock; + u32 tcpm_vals[5]; + struct tcp_fastopen_metrics tcpm_fastopen; + struct callback_head callback_head; +}; + +struct tcpm_hash_bucket { + struct tcp_metrics_block *chain; +}; + +struct xt_entry_target { + union { + struct { + __u16 target_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 target_size; + struct xt_target *target; + } kernel; + __u16 target_size; + } u; + unsigned char data[0]; +}; + +struct xt_standard_target { + struct xt_entry_target target; + int verdict; +}; + +struct ipt_standard { + struct ipt_entry entry; + struct xt_standard_target target; +}; + +struct midr_range { + u32 model; + u32 rv_min; + u32 rv_max; +}; + +struct arm64_midr_revidr { + u32 midr_rv; + u32 revidr_mask; +}; + +struct arm64_cpu_capabilities { + const char *desc; + u16 capability; + u16 type; + bool (*matches)(const struct arm64_cpu_capabilities *, int); + void (*cpu_enable)(const struct arm64_cpu_capabilities *); + union { + struct { + struct midr_range midr_range; + const struct arm64_midr_revidr * const fixed_revs; + }; + const struct midr_range *midr_range_list; + struct { + u32 sys_reg; + u8 field_pos; + u8 field_width; + u8 min_field_value; + u8 max_field_value; + u8 hwcap_type; + bool sign; + long unsigned int hwcap; + }; + }; + const struct arm64_cpu_capabilities *match_list; + const struct cpumask *cpus; +}; + +enum vec_type { + ARM64_VEC_SVE = 0, + ARM64_VEC_SME = 1, + ARM64_VEC_MAX = 2, +}; + +struct vl_info { + enum vec_type type; + const char *name; + int min_vl; + int max_vl; + int max_virtualisable_vl; + long unsigned int vq_map[8]; + long unsigned int vq_partial_map[8]; +}; + +struct vl_config { + int __default_vl; +}; + +struct s2_walk_info { + int (*read_desc)(phys_addr_t, u64 *, void *); + void *data; + u64 baddr; + unsigned int max_oa_bits; + unsigned int pgshift; + unsigned int sl; + unsigned int t0sz; + bool be; +}; + +enum arm64_hyp_spectre_vector { + HYP_VECTOR_DIRECT = 0, + HYP_VECTOR_SPECTRE_DIRECT = 1, + HYP_VECTOR_INDIRECT = 2, + HYP_VECTOR_SPECTRE_INDIRECT = 3, +}; + +struct kvm_pgtable_visit_ctx { + kvm_pte_t *ptep; + kvm_pte_t old; + void *arg; + struct kvm_pgtable_mm_ops *mm_ops; + u64 start; + u64 addr; + u64 end; + s8 level; + enum kvm_pgtable_walk_flags flags; +}; + +typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *, enum kvm_pgtable_walk_flags); + +struct kvm_pgtable_walker { + const kvm_pgtable_visitor_fn_t cb; + void * const arg; + const enum kvm_pgtable_walk_flags flags; +}; + +struct hyp_fixmap_slot { + u64 addr; + kvm_pte_t *ptep; +}; + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *, struct cred *); + void (*cleanup)(struct subprocess_info *); + void *data; +}; + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; + +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long int len; +}; + +struct tick_sched { + long unsigned int flags; + unsigned int stalled_jiffies; + long unsigned int last_tick_jiffies; + struct hrtimer sched_timer; + ktime_t last_tick; + ktime_t next_tick; + long unsigned int idle_jiffies; + ktime_t idle_waketime; + unsigned int got_idle_tick; + seqcount_t idle_sleeptime_seq; + ktime_t idle_entrytime; + long unsigned int last_jiffies; + u64 timer_expires_base; + u64 timer_expires; + u64 next_timer; + ktime_t idle_expires; + long unsigned int idle_calls; + long unsigned int idle_sleeps; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + atomic_t tick_dep_mask; + long unsigned int check_clocks; +}; + +struct cpu_stop_done { + atomic_t nr_todo; + int ret; + struct completion completion; +}; + +struct cpu_stopper { + struct task_struct *thread; + raw_spinlock_t lock; + bool enabled; + struct list_head works; + struct cpu_stop_work stop_work; + long unsigned int caller; + cpu_stop_fn_t fn; +}; + +enum multi_stop_state { + MULTI_STOP_NONE = 0, + MULTI_STOP_PREPARE = 1, + MULTI_STOP_DISABLE_IRQ = 2, + MULTI_STOP_RUN = 3, + MULTI_STOP_EXIT = 4, +}; + +struct multi_stop_data { + cpu_stop_fn_t fn; + void *data; + unsigned int num_threads; + const struct cpumask *active_cpus; + enum multi_stop_state state; + atomic_t thread_ack; +}; + +struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; +}; + +struct ftrace_event_field { + struct list_head link; + const char *name; + const char *type; + int filter_type; + int offset; + int size; + int is_signed; + int len; +}; + +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); + +enum fetch_op { + FETCH_OP_NOP = 0, + FETCH_OP_REG = 1, + FETCH_OP_STACK = 2, + FETCH_OP_STACKP = 3, + FETCH_OP_RETVAL = 4, + FETCH_OP_IMM = 5, + FETCH_OP_COMM = 6, + FETCH_OP_ARG = 7, + FETCH_OP_FOFFS = 8, + FETCH_OP_DATA = 9, + FETCH_OP_EDATA = 10, + FETCH_OP_DEREF = 11, + FETCH_OP_UDEREF = 12, + FETCH_OP_ST_RAW = 13, + FETCH_OP_ST_MEM = 14, + FETCH_OP_ST_UMEM = 15, + FETCH_OP_ST_STRING = 16, + FETCH_OP_ST_USTRING = 17, + FETCH_OP_ST_SYMSTR = 18, + FETCH_OP_ST_EDATA = 19, + FETCH_OP_MOD_BF = 20, + FETCH_OP_LP_ARRAY = 21, + FETCH_OP_TP_ARG = 22, + FETCH_OP_END = 23, + FETCH_NOP_SYMBOL = 24, +}; + +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + long unsigned int immediate; + void *data; + }; +}; + +struct fetch_type { + const char *name; + size_t size; + bool is_signed; + bool is_string; + print_type_func_t print; + const char *fmt; + const char *fmttype; +}; + +struct probe_arg { + struct fetch_insn *code; + bool dynamic; + unsigned int offset; + unsigned int count; + const char *name; + const char *comm; + char *fmt; + const struct fetch_type *type; +}; + +struct probe_entry_arg { + struct fetch_insn *code; + unsigned int size; +}; + +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + +struct trace_probe_event { + unsigned int flags; + struct trace_event_class class; + struct trace_event_call call; + struct list_head files; + struct list_head probes; + struct trace_uprobe_filter filter[0]; +}; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; + ssize_t size; + unsigned int nr_args; + struct probe_entry_arg *entry_arg; + struct probe_arg args[0]; +}; + +struct event_file_link { + struct trace_event_file *file; + struct list_head list; +}; + +struct traceprobe_parse_context { + struct trace_event_call *event; + const char *funcname; + const struct btf_type *proto; + const struct btf_param *params; + s32 nr_params; + struct btf *btf; + const struct btf_type *last_type; + u32 last_bitoffs; + u32 last_bitsize; + struct trace_probe *tp; + unsigned int flags; + int offset; +}; + +enum probe_print_type { + PROBE_PRINT_NORMAL = 0, + PROBE_PRINT_RETURN = 1, + PROBE_PRINT_EVENT = 2, +}; + +enum { + TP_ERR_FILE_NOT_FOUND = 0, + TP_ERR_NO_REGULAR_FILE = 1, + TP_ERR_BAD_REFCNT = 2, + TP_ERR_REFCNT_OPEN_BRACE = 3, + TP_ERR_BAD_REFCNT_SUFFIX = 4, + TP_ERR_BAD_UPROBE_OFFS = 5, + TP_ERR_BAD_MAXACT_TYPE = 6, + TP_ERR_BAD_MAXACT = 7, + TP_ERR_MAXACT_TOO_BIG = 8, + TP_ERR_BAD_PROBE_ADDR = 9, + TP_ERR_NON_UNIQ_SYMBOL = 10, + TP_ERR_BAD_RETPROBE = 11, + TP_ERR_NO_TRACEPOINT = 12, + TP_ERR_BAD_ADDR_SUFFIX = 13, + TP_ERR_NO_GROUP_NAME = 14, + TP_ERR_GROUP_TOO_LONG = 15, + TP_ERR_BAD_GROUP_NAME = 16, + TP_ERR_NO_EVENT_NAME = 17, + TP_ERR_EVENT_TOO_LONG = 18, + TP_ERR_BAD_EVENT_NAME = 19, + TP_ERR_EVENT_EXIST = 20, + TP_ERR_RETVAL_ON_PROBE = 21, + TP_ERR_NO_RETVAL = 22, + TP_ERR_BAD_STACK_NUM = 23, + TP_ERR_BAD_ARG_NUM = 24, + TP_ERR_BAD_VAR = 25, + TP_ERR_BAD_REG_NAME = 26, + TP_ERR_BAD_MEM_ADDR = 27, + TP_ERR_BAD_IMM = 28, + TP_ERR_IMMSTR_NO_CLOSE = 29, + TP_ERR_FILE_ON_KPROBE = 30, + TP_ERR_BAD_FILE_OFFS = 31, + TP_ERR_SYM_ON_UPROBE = 32, + TP_ERR_TOO_MANY_OPS = 33, + TP_ERR_DEREF_NEED_BRACE = 34, + TP_ERR_BAD_DEREF_OFFS = 35, + TP_ERR_DEREF_OPEN_BRACE = 36, + TP_ERR_COMM_CANT_DEREF = 37, + TP_ERR_BAD_FETCH_ARG = 38, + TP_ERR_ARRAY_NO_CLOSE = 39, + TP_ERR_BAD_ARRAY_SUFFIX = 40, + TP_ERR_BAD_ARRAY_NUM = 41, + TP_ERR_ARRAY_TOO_BIG = 42, + TP_ERR_BAD_TYPE = 43, + TP_ERR_BAD_STRING = 44, + TP_ERR_BAD_SYMSTRING = 45, + TP_ERR_BAD_BITFIELD = 46, + TP_ERR_ARG_NAME_TOO_LONG = 47, + TP_ERR_NO_ARG_NAME = 48, + TP_ERR_BAD_ARG_NAME = 49, + TP_ERR_USED_ARG_NAME = 50, + TP_ERR_ARG_TOO_LONG = 51, + TP_ERR_NO_ARG_BODY = 52, + TP_ERR_BAD_INSN_BNDRY = 53, + TP_ERR_FAIL_REG_PROBE = 54, + TP_ERR_DIFF_PROBE_TYPE = 55, + TP_ERR_DIFF_ARG_TYPE = 56, + TP_ERR_SAME_PROBE = 57, + TP_ERR_NO_EVENT_INFO = 58, + TP_ERR_BAD_ATTACH_EVENT = 59, + TP_ERR_BAD_ATTACH_ARG = 60, + TP_ERR_NO_EP_FILTER = 61, + TP_ERR_NOSUP_BTFARG = 62, + TP_ERR_NO_BTFARG = 63, + TP_ERR_NO_BTF_ENTRY = 64, + TP_ERR_BAD_VAR_ARGS = 65, + TP_ERR_NOFENTRY_ARGS = 66, + TP_ERR_DOUBLE_ARGS = 67, + TP_ERR_ARGS_2LONG = 68, + TP_ERR_ARGIDX_2BIG = 69, + TP_ERR_NO_PTR_STRCT = 70, + TP_ERR_NOSUP_DAT_ARG = 71, + TP_ERR_BAD_HYPHEN = 72, + TP_ERR_NO_BTF_FIELD = 73, + TP_ERR_BAD_BTF_TID = 74, + TP_ERR_BAD_TYPE4STR = 75, + TP_ERR_NEED_STRING_TYPE = 76, +}; + +struct trace_probe_log { + const char *subsystem; + const char **argv; + int argc; + int index; +}; + +struct bpf_cpumap_val { + __u32 qsize; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct bpf_cpu_map_entry; + +struct xdp_bulk_queue { + void *q[8]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; + unsigned int count; +}; + +struct bpf_cpu_map_entry { + u32 cpu; + int map_id; + struct xdp_bulk_queue *bulkq; + struct ptr_ring *queue; + struct task_struct *kthread; + struct bpf_cpumap_val value; + struct bpf_prog *prog; + struct completion kthread_running; + struct rcu_work free_work; +}; + +struct bpf_cpu_map { + struct bpf_map map; + struct bpf_cpu_map_entry **cpu_map; +}; + +typedef u32 depot_flags_t; + +struct kasan_track { + u32 pid; + depot_stack_handle_t stack; +}; + +enum kasan_report_type { + KASAN_REPORT_ACCESS = 0, + KASAN_REPORT_INVALID_FREE = 1, + KASAN_REPORT_DOUBLE_FREE = 2, +}; + +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; + +struct flock { + short int l_type; + short int l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; +}; + +typedef struct fsnotify_mark_connector *fsnotify_connp_t; + +enum { + VERBOSE_STATUS = 1, +}; + +enum { + Enabled = 0, + Magic = 1, +}; + +typedef struct { + struct list_head list; + long unsigned int flags; + int offset; + int size; + char *magic; + char *mask; + const char *interpreter; + char *name; + struct dentry *dentry; + struct file *interp_file; + refcount_t users; +} Node; + +struct fileattr { + u32 flags; + u32 fsx_xflags; + u32 fsx_extsize; + u32 fsx_nextents; + u32 fsx_projid; + u32 fsx_cowextsize; + bool flags_valid: 1; + bool fsx_valid: 1; +}; + +struct fiemap { + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; +}; + +struct fsuuid { + __u32 fsu_len; + __u32 fsu_flags; + __u8 fsu_uuid[0]; +}; + +struct move_extent { + __u32 reserved; + __u32 donor_fd; + __u64 orig_start; + __u64 donor_start; + __u64 len; + __u64 moved_len; +}; + +struct ext4_new_group_input { + __u32 group; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 unused; +}; + +struct fsmap { + __u32 fmr_device; + __u32 fmr_flags; + __u64 fmr_physical; + __u64 fmr_owner; + __u64 fmr_offset; + __u64 fmr_length; + __u64 fmr_reserved[3]; +}; + +struct fsmap_head { + __u32 fmh_iflags; + __u32 fmh_oflags; + __u32 fmh_count; + __u32 fmh_entries; + __u64 fmh_reserved[6]; + struct fsmap fmh_keys[2]; + struct fsmap fmh_recs[0]; +}; + +struct ext4_fsmap_head { + uint32_t fmh_iflags; + uint32_t fmh_oflags; + unsigned int fmh_count; + unsigned int fmh_entries; + struct ext4_fsmap fmh_keys[2]; +}; + +typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); + +typedef void ext4_update_sb_callback(struct ext4_super_block *, const void *); + +struct getfsmap_info { + struct super_block *gi_sb; + struct fsmap_head *gi_data; + unsigned int gi_idx; + __u32 gi_last_flags; +}; + +struct isofs_fid { + u32 block; + u16 offset; + u16 parent_offset; + u32 generation; + u32 parent_block; + u32 parent_generation; +}; + +typedef int (*eventfs_callback)(const char *, umode_t *, void **, const struct file_operations **); + +typedef void (*eventfs_release)(const char *, void *); + +struct eventfs_entry { + const char *name; + eventfs_callback callback; + eventfs_release release; +}; + +enum { + TRACEFS_EVENT_INODE = 2, + TRACEFS_GID_PERM_SET = 4, + TRACEFS_UID_PERM_SET = 8, + TRACEFS_INSTANCE_INODE = 16, +}; + +struct tracefs_inode { + struct inode vfs_inode; + struct list_head list; + long unsigned int flags; + void *private; +}; + +struct eventfs_attr { + int mode; + kuid_t uid; + kgid_t gid; +}; + +struct eventfs_inode { + union { + struct list_head list; + struct callback_head rcu; + }; + struct list_head children; + const struct eventfs_entry *entries; + const char *name; + struct eventfs_attr *entry_attrs; + void *data; + struct eventfs_attr attr; + struct kref kref; + unsigned int is_freed: 1; + unsigned int is_events: 1; + unsigned int nr_entries: 30; + unsigned int ino; +}; + +struct eventfs_root_inode { + struct eventfs_inode ei; + struct dentry *events_dir; +}; + +enum { + EVENTFS_SAVE_MODE = 65536, + EVENTFS_SAVE_UID = 131072, + EVENTFS_SAVE_GID = 262144, +}; + +struct workspace { + z_stream strm; + char *buf; + unsigned int buf_size; + struct list_head list; + int level; +}; + +struct crypto_lskcipher { + struct crypto_tfm base; +}; + +enum { + SKCIPHER_WALK_PHYS = 1, + SKCIPHER_WALK_SLOW = 2, + SKCIPHER_WALK_COPY = 4, + SKCIPHER_WALK_DIFF = 8, + SKCIPHER_WALK_SLEEP = 16, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[0]; +}; + +struct crypto_rng; + +struct rng_alg { + int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); + int (*seed)(struct crypto_rng *, const u8 *, unsigned int); + void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); + unsigned int seedsize; + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; + __u8 statelen; + __u8 blocklen_bytes; + char cra_name[128]; + char backend_cra_name[128]; +}; + +struct drbg_state; + +struct drbg_state_ops { + int (*update)(struct drbg_state *, struct list_head *, int); + int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); + int (*crypto_init)(struct drbg_state *); + int (*crypto_fini)(struct drbg_state *); +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED = 0, + DRBG_SEED_STATE_PARTIAL = 1, + DRBG_SEED_STATE_FULL = 2, +}; + +struct drbg_state { + struct mutex drbg_mutex; + unsigned char *V; + unsigned char *Vbuf; + unsigned char *C; + unsigned char *Cbuf; + size_t reseed_ctr; + size_t reseed_threshold; + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; + struct crypto_skcipher *ctr_handle; + struct skcipher_request *ctr_req; + __u8 *outscratchpadbuf; + __u8 *outscratchpad; + struct crypto_wait ctr_wait; + struct scatterlist sg_in; + struct scatterlist sg_out; + enum drbg_seed_state seeded; + long unsigned int last_seed_time; + bool pr; + bool fips_primed; + unsigned char *prev; + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +enum drbg_prefixes { + DRBG_PREFIX0 = 0, + DRBG_PREFIX1 = 1, + DRBG_PREFIX2 = 2, + DRBG_PREFIX3 = 3, +}; + +struct sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +struct blk_queue_stats { + struct list_head callbacks; + spinlock_t lock; + int accounting; +}; + +struct io_xattr { + struct file *file; + struct kernel_xattr_ctx ctx; + struct filename *filename; +}; + +struct once_work { + struct work_struct work; + struct static_key_true *key; + struct module *module; +}; + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +enum { + IRQCHIP_FWNODE_REAL = 0, + IRQCHIP_FWNODE_NAMED = 1, + IRQCHIP_FWNODE_NAMED_ID = 2, +}; + +struct pcim_iomap_devres { + void *table[6]; +}; + +struct pcim_intx_devres { + int orig_intx; +}; + +enum pcim_addr_devres_type { + PCIM_ADDR_DEVRES_TYPE_INVALID = 0, + PCIM_ADDR_DEVRES_TYPE_REGION = 1, + PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING = 2, + PCIM_ADDR_DEVRES_TYPE_MAPPING = 3, +}; + +struct pcim_addr_devres { + enum pcim_addr_devres_type type; + void *baseaddr; + long unsigned int offset; + long unsigned int len; + int bar; +}; + +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + +struct broken_edid { + u8 manufacturer[4]; + u32 model; + u32 fix; +}; + +struct __fb_timings { + u32 dclk; + u32 hfreq; + u32 vfreq; + u32 hactive; + u32 vactive; + u32 hblank; + u32 vblank; + u32 htotal; + u32 vtotal; +}; + +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +struct cache_type_info { + const char *size_prop; + const char *line_size_props[2]; + const char *nr_sets_prop; +}; + +typedef struct { + spinlock_t *lock; +} class_spinlock_irq_t; + +enum { + K2_FLAG_SATA_8_PORTS = 16777216, + K2_FLAG_NO_ATAPI_DMA = 33554432, + K2_FLAG_BAR_POS_3 = 67108864, + K2_SATA_TF_CMD_OFFSET = 0, + K2_SATA_TF_DATA_OFFSET = 0, + K2_SATA_TF_ERROR_OFFSET = 4, + K2_SATA_TF_NSECT_OFFSET = 8, + K2_SATA_TF_LBAL_OFFSET = 12, + K2_SATA_TF_LBAM_OFFSET = 16, + K2_SATA_TF_LBAH_OFFSET = 20, + K2_SATA_TF_DEVICE_OFFSET = 24, + K2_SATA_TF_CMDSTAT_OFFSET = 28, + K2_SATA_TF_CTL_OFFSET = 32, + K2_SATA_DMA_CMD_OFFSET = 48, + K2_SATA_SCR_STATUS_OFFSET = 64, + K2_SATA_SCR_ERROR_OFFSET = 68, + K2_SATA_SCR_CONTROL_OFFSET = 72, + K2_SATA_SICR1_OFFSET = 128, + K2_SATA_SICR2_OFFSET = 132, + K2_SATA_SIM_OFFSET = 136, + K2_SATA_PORT_OFFSET = 256, + chip_svw4 = 0, + chip_svw8 = 1, + chip_svw42 = 2, + chip_svw43 = 3, +}; + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; + void *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; + void *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void *context; +}; + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[256]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +struct usbdevfs_conninfo_ex { + __u32 size; + __u32 busnum; + __u32 devnum; + __u32 speed; + __u8 num_ports; + __u8 ports[7]; +}; + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void *buffer; + int buffer_length; + int actual_length; + int start_frame; + union { + int number_of_packets; + unsigned int stream_id; + }; + int error_count; + unsigned int signr; + void *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbdevfs_ioctl { + int ifno; + int ioctl_code; + void *data; +}; + +struct usbdevfs_disconnect_claim { + unsigned int interface; + unsigned int flags; + char driver[256]; +}; + +struct usbdevfs_streams { + unsigned int num_streams; + unsigned int num_eps; + unsigned char eps[0]; +}; + +struct usb_dev_state { + struct list_head list; + struct usb_device *dev; + struct file *file; + spinlock_t lock; + struct list_head async_pending; + struct list_head async_completed; + struct list_head memory_list; + wait_queue_head_t wait; + wait_queue_head_t wait_for_resume; + unsigned int discsignr; + struct pid *disc_pid; + const struct cred *cred; + sigval_t disccontext; + long unsigned int ifclaimed; + u32 disabled_bulk_eps; + long unsigned int interface_allowed_mask; + int not_yet_resumed; + bool suspend_allowed; + bool privileges_dropped; +}; + +struct usb_memory { + struct list_head memlist; + int vma_use_count; + int urb_use_count; + u32 size; + void *mem; + dma_addr_t dma_handle; + long unsigned int vm_start; + struct usb_dev_state *ps; +}; + +struct async { + struct list_head asynclist; + struct usb_dev_state *ps; + struct pid *pid; + const struct cred *cred; + unsigned int signr; + unsigned int ifnum; + void *userbuffer; + void *userurb; + sigval_t userurb_sigval; + struct urb *urb; + struct usb_memory *usbm; + unsigned int mem_usage; + int status; + u8 bulk_addr; + u8 bulk_status; +}; + +enum snoop_when { + SUBMIT = 0, + COMPLETE = 1, +}; + +struct i2c_board_info { + char type[20]; + short unsigned int flags; + short unsigned int addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct software_node *swnode; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +enum sanyo_state { + STATE_INACTIVE___4 = 0, + STATE_HEADER_SPACE___3 = 1, + STATE_BIT_PULSE___3 = 2, + STATE_BIT_SPACE___3 = 3, + STATE_TRAILER_PULSE___2 = 4, + STATE_TRAILER_SPACE___2 = 5, +}; + +struct firmware_map_entry { + u64 start; + u64 end; + const char *type; + struct list_head list; + struct kobject kobj; +}; + +struct memmap_attribute { + struct attribute attr; + ssize_t (*show)(struct firmware_map_entry *, char *); +}; + +struct amba_cs_uci_id { + unsigned int devarch; + unsigned int devarch_mask; + unsigned int devtype; + void *data; +}; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + struct device_dma_parameters dma_parms; + unsigned int periphid; + struct mutex periphid_lock; + unsigned int cid; + struct amba_cs_uci_id uci; + unsigned int irq[9]; + const char *driver_override; +}; + +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +struct trace_event_raw_kfree_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + void *rx_sk; + short unsigned int protocol; + enum skb_drop_reason reason; + char __data[0]; +}; + +struct trace_event_raw_consume_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + char __data[0]; +}; + +struct trace_event_raw_skb_copy_datagram_iovec { + struct trace_entry ent; + const void *skbaddr; + int len; + char __data[0]; +}; + +struct trace_event_data_offsets_kfree_skb {}; + +struct trace_event_data_offsets_consume_skb {}; + +struct trace_event_data_offsets_skb_copy_datagram_iovec {}; + +typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *, enum skb_drop_reason, struct sock *); + +typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *, void *); + +typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); + +struct trace_event_raw_net_dev_start_xmit { + struct trace_entry ent; + u32 __data_loc_name; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + unsigned int len; + unsigned int data_len; + int network_offset; + bool transport_offset_valid; + int transport_offset; + u8 tx_flags; + u16 gso_size; + u16 gso_segs; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + int rc; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit_timeout { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_driver; + int queue_index; + char __data[0]; +}; + +struct trace_event_raw_net_dev_template { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_verbose_template { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int napi_id; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + u32 hash; + bool l4_hash; + unsigned int len; + unsigned int data_len; + unsigned int truesize; + bool mac_header_valid; + int mac_header; + unsigned char nr_frags; + u16 gso_size; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_exit_template { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_net_dev_start_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit_timeout { + u32 name; + const void *name_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_net_dev_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_verbose_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_exit_template {}; + +typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); + +typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); + +typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); + +typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); + +typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); + +struct trace_event_raw_napi_poll { + struct trace_entry ent; + struct napi_struct *napi; + u32 __data_loc_dev_name; + int work; + int budget; + char __data[0]; +}; + +struct trace_event_raw_dql_stall_detected { + struct trace_entry ent; + short unsigned int thrs; + unsigned int len; + long unsigned int last_reap; + long unsigned int hist_head; + long unsigned int now; + long unsigned int hist[4]; + char __data[0]; +}; + +struct trace_event_data_offsets_napi_poll { + u32 dev_name; + const void *dev_name_ptr_; +}; + +struct trace_event_data_offsets_dql_stall_detected {}; + +typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); + +typedef void (*btf_trace_dql_stall_detected)(void *, short unsigned int, unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int *); + +struct trace_event_raw_sock_rcvqueue_full { + struct trace_entry ent; + int rmem_alloc; + unsigned int truesize; + int sk_rcvbuf; + char __data[0]; +}; + +struct trace_event_raw_sock_exceed_buf_limit { + struct trace_entry ent; + char name[32]; + long int sysctl_mem[3]; + long int allocated; + int sysctl_rmem; + int rmem_alloc; + int sysctl_wmem; + int wmem_alloc; + int wmem_queued; + int kind; + char __data[0]; +}; + +struct trace_event_raw_inet_sock_set_state { + struct trace_entry ent; + const void *skaddr; + int oldstate; + int newstate; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_inet_sk_error_report { + struct trace_entry ent; + int error; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_sk_data_ready { + struct trace_entry ent; + const void *skaddr; + __u16 family; + __u16 protocol; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_sock_msg_length { + struct trace_entry ent; + void *sk; + __u16 family; + __u16 protocol; + int ret; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_sock_rcvqueue_full {}; + +struct trace_event_data_offsets_sock_exceed_buf_limit {}; + +struct trace_event_data_offsets_inet_sock_set_state {}; + +struct trace_event_data_offsets_inet_sk_error_report {}; + +struct trace_event_data_offsets_sk_data_ready {}; + +struct trace_event_data_offsets_sock_msg_length {}; + +typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int); + +typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); + +typedef void (*btf_trace_inet_sk_error_report)(void *, const struct sock *); + +typedef void (*btf_trace_sk_data_ready)(void *, const struct sock *); + +typedef void (*btf_trace_sock_send_length)(void *, struct sock *, int, int); + +typedef void (*btf_trace_sock_recv_length)(void *, struct sock *, int, int); + +struct trace_event_raw_udp_fail_queue_rcv_skb { + struct trace_entry ent; + int rc; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; + +typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *, struct sk_buff *); + +struct trace_event_raw_tcp_event_sk_skb { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_send_reset { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + enum sk_rst_reason reason; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_retransmit_synack { + struct trace_entry ent; + const void *skaddr; + const void *req; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_probe { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 mark; + __u16 data_len; + __u32 snd_nxt; + __u32 snd_una; + __u32 snd_cwnd; + __u32 ssthresh; + __u32 snd_wnd; + __u32 srtt; + __u32 rcv_wnd; + __u64 sock_cookie; + const void *skbaddr; + const void *skaddr; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_skb { + struct trace_entry ent; + const void *skbaddr; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_cong_state_set { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u8 cong_state; + char __data[0]; +}; + +struct trace_event_raw_tcp_hash_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + __u8 keyid; + __u8 rnext; + __u8 maclen; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sk { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u8 keyid; + __u8 rnext; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sne { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 new_sne; + char __data[0]; +}; + +struct trace_event_data_offsets_tcp_event_sk_skb {}; + +struct trace_event_data_offsets_tcp_send_reset {}; + +struct trace_event_data_offsets_tcp_event_sk {}; + +struct trace_event_data_offsets_tcp_retransmit_synack {}; + +struct trace_event_data_offsets_tcp_probe {}; + +struct trace_event_data_offsets_tcp_event_skb {}; + +struct trace_event_data_offsets_tcp_cong_state_set {}; + +struct trace_event_data_offsets_tcp_hash_event {}; + +struct trace_event_data_offsets_tcp_ao_event {}; + +struct trace_event_data_offsets_tcp_ao_event_sk {}; + +struct trace_event_data_offsets_tcp_ao_event_sne {}; + +typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *, const enum sk_rst_reason); + +typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); + +typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); + +typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); + +typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_tcp_bad_csum)(void *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_cong_state_set)(void *, struct sock *, const u8); + +typedef void (*btf_trace_tcp_hash_bad_header)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_unexpected)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_mismatch)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_ao_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_ao_handshake_failure)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_wrong_maclen)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_mismatch)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_key_not_found)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_rnext_request)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_synack_no_key)(void *, const struct sock *, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_snd_sne_update)(void *, const struct sock *, __u32); + +typedef void (*btf_trace_tcp_ao_rcv_sne_update)(void *, const struct sock *, __u32); + +struct trace_event_raw_fib_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + u8 proto; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[4]; + __u8 dst[4]; + __u8 gw4[4]; + __u8 gw6[16]; + u16 sport; + u16 dport; + char name[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib_table_lookup {}; + +typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); + +struct trace_event_raw_qdisc_dequeue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + int packets; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + long unsigned int txq_state; + char __data[0]; +}; + +struct trace_event_raw_qdisc_enqueue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + char __data[0]; +}; + +struct trace_event_raw_qdisc_reset { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_destroy { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_create { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + char __data[0]; +}; + +struct trace_event_data_offsets_qdisc_dequeue {}; + +struct trace_event_data_offsets_qdisc_enqueue {}; + +struct trace_event_data_offsets_qdisc_reset { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_destroy { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_create { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); + +typedef void (*btf_trace_qdisc_enqueue)(void *, struct Qdisc *, const struct netdev_queue *, struct sk_buff *); + +typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); + +struct trace_event_raw_br_fdb_add { + struct trace_entry ent; + u8 ndm_flags; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + u16 nlh_flags; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_external_learn_add { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_fdb_delete { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_update { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_br_mdb_full { + struct trace_entry ent; + u32 __data_loc_dev; + int af; + u16 vid; + __u8 src[16]; + __u8 grp[16]; + __u8 grpmac[6]; + char __data[0]; +}; + +struct trace_event_data_offsets_br_fdb_add { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_external_learn_add { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_fdb_delete { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_update { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_mdb_full { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16); + +typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16); + +typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *); + +typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, long unsigned int); + +typedef void (*btf_trace_br_mdb_full)(void *, const struct net_device *, const struct br_ip *); + +struct trace_event_raw_page_pool_release { + struct trace_entry ent; + const struct page_pool *pool; + s32 inflight; + u32 hold; + u32 release; + u64 cnt; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_release { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 release; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_hold { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 hold; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_update_nid { + struct trace_entry ent; + const struct page_pool *pool; + int pool_nid; + int new_nid; + char __data[0]; +}; + +struct trace_event_data_offsets_page_pool_release {}; + +struct trace_event_data_offsets_page_pool_state_release {}; + +struct trace_event_data_offsets_page_pool_state_hold {}; + +struct trace_event_data_offsets_page_pool_update_nid {}; + +typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); + +typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); + +struct trace_event_raw_neigh_create { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + int entries; + u8 created; + u8 gc_exempt; + u8 primary_key4[4]; + u8 primary_key6[16]; + char __data[0]; +}; + +struct trace_event_raw_neigh_update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u8 new_lladdr[32]; + u8 new_state; + u32 update_flags; + u32 pid; + char __data[0]; +}; + +struct trace_event_raw_neigh__update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u32 err; + char __data[0]; +}; + +struct trace_event_data_offsets_neigh_create { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh_update { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh__update { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); + +typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); + +typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); + +enum ethtool_c33_pse_ext_state { + ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1, + ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID = 2, + ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE = 3, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED = 4, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM = 5, + ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED = 6, + ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE = 7, + ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE = 8, + ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED = 9, +}; + +enum ethtool_c33_pse_ext_substate_error_condition { + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON = 4, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS = 5, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF = 6, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN = 7, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE = 8, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP = 9, +}; + +enum ethtool_c33_pse_ext_substate_mr_pse_enable { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1, +}; + +enum ethtool_c33_pse_ext_substate_option_detect_ted { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR = 2, +}; + +enum ethtool_c33_pse_ext_substate_option_vport_lim { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION = 3, +}; + +enum ethtool_c33_pse_ext_substate_ovld_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1, +}; + +enum ethtool_c33_pse_ext_substate_power_not_available { + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT = 4, +}; + +enum ethtool_c33_pse_ext_substate_short_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1, +}; + +enum ethtool_c33_pse_admin_state { + ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_c33_pse_pw_d_status { + ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_C33_PSE_PW_D_STATUS_TEST = 5, + ETHTOOL_C33_PSE_PW_D_STATUS_FAULT = 6, + ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT = 7, +}; + +enum ethtool_podl_pse_admin_state { + ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_podl_pse_pw_d_status { + ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP = 5, + ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE = 6, + ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR = 7, +}; + +struct ethtool_c33_pse_ext_state_info { + enum ethtool_c33_pse_ext_state c33_pse_ext_state; + union { + enum ethtool_c33_pse_ext_substate_error_condition error_condition; + enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; + enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; + enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; + enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; + enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; + enum ethtool_c33_pse_ext_substate_short_detected short_detected; + u32 __c33_pse_ext_substate; + }; +}; + +struct ethtool_c33_pse_pw_limit_range { + u32 min; + u32 max; +}; + +struct pse_control_config { + enum ethtool_podl_pse_admin_state podl_admin_control; + enum ethtool_c33_pse_admin_state c33_admin_control; +}; + +struct pse_control_status { + enum ethtool_podl_pse_admin_state podl_admin_state; + enum ethtool_podl_pse_pw_d_status podl_pw_status; + enum ethtool_c33_pse_admin_state c33_admin_state; + enum ethtool_c33_pse_pw_d_status c33_pw_status; + u32 c33_pw_class; + u32 c33_actual_pw; + struct ethtool_c33_pse_ext_state_info c33_ext_state_info; + u32 c33_avail_pw_limit; + struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; + u32 c33_pw_limit_nb_ranges; +}; + +enum { + ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC = 0, + ETHTOOL_A_C33_PSE_PW_LIMIT_MIN = 1, + ETHTOOL_A_C33_PSE_PW_LIMIT_MAX = 2, +}; + +struct pse_reply_data { + struct ethnl_reply_data base; + struct pse_control_status status; +}; + +struct masq_dev_work { + struct work_struct work; + struct net *net; + netns_tracker ns_tracker; + union nf_inet_addr addr; + int ifindex; + int (*iter)(struct nf_conn *, void *); +}; + +enum nft_set_attributes { + NFTA_SET_UNSPEC = 0, + NFTA_SET_TABLE = 1, + NFTA_SET_NAME = 2, + NFTA_SET_FLAGS = 3, + NFTA_SET_KEY_TYPE = 4, + NFTA_SET_KEY_LEN = 5, + NFTA_SET_DATA_TYPE = 6, + NFTA_SET_DATA_LEN = 7, + NFTA_SET_POLICY = 8, + NFTA_SET_DESC = 9, + NFTA_SET_ID = 10, + NFTA_SET_TIMEOUT = 11, + NFTA_SET_GC_INTERVAL = 12, + NFTA_SET_USERDATA = 13, + NFTA_SET_PAD = 14, + NFTA_SET_OBJ_TYPE = 15, + NFTA_SET_HANDLE = 16, + NFTA_SET_EXPR = 17, + NFTA_SET_EXPRESSIONS = 18, + __NFTA_SET_MAX = 19, +}; + +struct nft_bitmap_elem { + struct nft_elem_priv priv; + struct list_head head; + struct nft_set_ext ext; +}; + +struct nft_bitmap { + struct list_head list; + u16 bitmap_size; + u8 bitmap[0]; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct net_proto_family { + int family; + int (*create)(struct net *, struct socket *, int, int); + struct module *owner; +}; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + __be32 sl_addr[0]; +}; + +struct ip_mc_socklist { + struct ip_mc_socklist *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; + struct ip_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + long unsigned int sf_count[2]; + __be32 sf_inaddr; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + long unsigned int sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list *next_rcu; + }; + struct ip_mc_list *next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; + unsigned char crcount; + struct callback_head rcu; +}; + +struct rtentry { + long unsigned int rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + long unsigned int rt_pad3; + void *rt_pad4; + short int rt_metric; + char *rt_dev; + long unsigned int rt_mtu; + long unsigned int rt_window; + short unsigned int rt_irtt; +}; + +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); + void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + int (*icmpv6_err_convert)(u8, u8, int *); + void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); +}; + +struct stp_proto { + unsigned char group_address[6]; + void (*rcv)(const struct stp_proto *, struct sk_buff *, struct net_device *); + void *data; +}; + +struct switchdev_brport { + struct net_device *dev; + const void *ctx; + struct notifier_block *atomic_nb; + struct notifier_block *blocking_nb; + bool tx_fwd_offload; +}; + +enum switchdev_notifier_type { + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, + SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, + SWITCHDEV_FDB_ADD_TO_DEVICE = 3, + SWITCHDEV_FDB_DEL_TO_DEVICE = 4, + SWITCHDEV_FDB_OFFLOADED = 5, + SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, + SWITCHDEV_PORT_OBJ_ADD = 7, + SWITCHDEV_PORT_OBJ_DEL = 8, + SWITCHDEV_PORT_ATTR_SET = 9, + SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, + SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, + SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, + SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, + SWITCHDEV_BRPORT_OFFLOADED = 15, + SWITCHDEV_BRPORT_UNOFFLOADED = 16, + SWITCHDEV_BRPORT_REPLAY = 17, +}; + +struct switchdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; + const void *ctx; +}; + +struct switchdev_notifier_fdb_info { + struct switchdev_notifier_info info; + const unsigned char *addr; + u16 vid; + u8 added_by_user: 1; + u8 is_local: 1; + u8 locked: 1; + u8 offloaded: 1; +}; + +struct switchdev_notifier_brport_info { + struct switchdev_notifier_info info; + const struct switchdev_brport brport; +}; + +struct br_boolopt_multi { + __u32 optval; + __u32 optmask; +}; + +typedef long unsigned int cycles_t; + +typedef void (*bp_hardening_cb_t)(); + +struct bp_hardening_data { + enum arm64_hyp_spectre_vector slot; + bp_hardening_cb_t fn; +}; + +struct stack_info { + long unsigned int low; + long unsigned int high; +}; + +struct secondary_data { + struct task_struct *task; + long int status; +}; + +struct vgic_vmcr { + u32 grpen0; + u32 grpen1; + u32 ackctl; + u32 fiqen; + u32 cbpr; + u32 eoim; + u32 abpr; + u32 bpr; + u32 pmr; +}; + +struct trace_event_raw_vgic_update_irq_pending { + struct trace_entry ent; + long unsigned int vcpu_id; + __u32 irq; + bool level; + char __data[0]; +}; + +struct trace_event_data_offsets_vgic_update_irq_pending {}; + +typedef void (*btf_trace_vgic_update_irq_pending)(void *, long unsigned int, __u32, bool); + +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, + IORES_DESC_CXL = 9, +}; + +struct resource_constraint { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_alignf alignf; + void *alignf_data; +}; + +enum { + REGION_INTERSECTS = 0, + REGION_DISJOINT = 1, + REGION_MIXED = 2, +}; + +enum { + MAX_IORES_LEVEL = 5, +}; + +struct region_devres { + struct resource *parent; + resource_size_t start; + resource_size_t n; +}; + +struct trace_event_raw_contention_begin { + struct trace_entry ent; + void *lock_addr; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_contention_end { + struct trace_entry ent; + void *lock_addr; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_contention_begin {}; + +struct trace_event_data_offsets_contention_end {}; + +typedef void (*btf_trace_contention_begin)(void *, void *, unsigned int); + +typedef void (*btf_trace_contention_end)(void *, void *, int); + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +}; + +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long int tm_year; + int tm_wday; + int tm_yday; +}; + +typedef __u64 timeu64_t; + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +struct rtc_wkalrm { + unsigned char enabled; + unsigned char pending; + struct rtc_time time; +}; + +struct rtc_param { + __u64 param; + union { + __u64 uvalue; + __s64 svalue; + __u64 ptr; + }; + __u32 index; + __u32 __pad; +}; + +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, long unsigned int); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*alarm_irq_enable)(struct device *, unsigned int); + int (*read_offset)(struct device *, long int *); + int (*set_offset)(struct device *, long int); + int (*param_get)(struct device *, struct rtc_param *); + int (*param_set)(struct device *, struct rtc_param *); +}; + +struct rtc_device; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(struct rtc_device *); + struct rtc_device *rtc; + int enabled; +}; + +struct rtc_device { + struct device dev; + struct module *owner; + int id; + const struct rtc_class_ops *ops; + struct mutex ops_lock; + struct cdev char_dev; + long unsigned int flags; + long unsigned int irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + int irq_freq; + int max_user_freq; + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; + int pie_enabled; + struct work_struct irqwork; + long unsigned int set_offset_nsec; + long unsigned int features[1]; + time64_t range_min; + timeu64_t range_max; + timeu64_t alarm_offset_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; +}; + +struct trace_event_raw_alarmtimer_suspend { + struct trace_entry ent; + s64 expires; + unsigned char alarm_type; + char __data[0]; +}; + +struct trace_event_raw_alarm_class { + struct trace_entry ent; + void *alarm; + unsigned char alarm_type; + s64 expires; + s64 now; + char __data[0]; +}; + +struct trace_event_data_offsets_alarmtimer_suspend {}; + +struct trace_event_data_offsets_alarm_class {}; + +typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); + +typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); + +struct alarm_base { + spinlock_t lock; + struct timerqueue_head timerqueue; + ktime_t (*get_ktime)(); + void (*get_timespec)(struct timespec64 *); + clockid_t base_clockid; +}; + +struct ftrace_func_entry { + struct hlist_node hlist; + long unsigned int ip; + long unsigned int direct; +}; + +struct ftrace_graph_ent_entry { + struct trace_entry ent; + struct ftrace_graph_ent graph_ent; +}; + +struct ftrace_graph_ret_entry { + struct trace_entry ent; + struct ftrace_graph_ret ret; +}; + +enum { + FTRACE_HASH_FL_MOD = 1, +}; + +enum { + TRACE_GRAPH_FL = 1, + TRACE_GRAPH_DEPTH_START_BIT = 2, + TRACE_GRAPH_DEPTH_END_BIT = 3, + TRACE_GRAPH_NOTRACE_BIT = 4, +}; + +struct fgraph_cpu_data { + pid_t last_pid; + int depth; + int depth_irq; + int ignore; + long unsigned int enter_funcs[50]; +}; + +struct fgraph_data { + struct fgraph_cpu_data *cpu_data; + union { + struct ftrace_graph_ent_entry ent; + struct ftrace_graph_ent_entry rent; + } ent; + struct ftrace_graph_ret_entry ret; + int failed; + int cpu; + long: 0; +} __attribute__((packed)); + +enum { + FLAGS_FILL_FULL = 268435456, + FLAGS_FILL_START = 536870912, + FLAGS_FILL_END = 805306368, +}; + +struct fgraph_times { + long long unsigned int calltime; + long long unsigned int sleeptime; +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *, struct pt_regs *, __u64 *); + int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *, __u64 *); + bool (*filter)(struct uprobe_consumer *, struct mm_struct *); + struct list_head cons_node; + __u64 id; +}; + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + bool is_uprobe; +}; + +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *, const void *); + +struct trace_uprobe; + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + long unsigned int bp_addr; +}; + +struct trace_uprobe { + struct dyn_event devent; + struct uprobe_consumer consumer; + struct path path; + char *filename; + struct uprobe *uprobe; + long unsigned int offset; + long unsigned int ref_ctr_offset; + long unsigned int *nhits; + struct trace_probe tp; +}; + +struct uprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int vaddr[0]; +}; + +struct uprobe_cpu_buffer { + struct mutex mutex; + void *buf; + int dsize; +}; + +typedef bool (*filter_func_t)(struct uprobe_consumer *, struct mm_struct *); + +enum { + BPF_F_SYSCTL_BASE_NAME = 1, +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __be16 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 data_meta; + union { + struct bpf_flow_keys *flow_keys; + }; + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { + struct bpf_sock *sk; + }; + __u32 gso_size; + __u8 tstamp_type; + __u64 hwtstamp; +}; + +struct bpf_cgroup_dev_ctx { + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_sysctl { + __u32 write; + __u32 file_pos; +}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; + int retval; +}; + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + u64 tmp_reg; + void *t_ctx; + u32 uaddrlen; +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + const struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + u64 tmp_reg; +}; + +struct bpf_sockopt_buf { + u8 data[32]; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + struct task_struct *current_task; + u64 tmp_reg; +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +struct bpf_prog_list { + struct hlist_node node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[2]; +}; + +typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_retval)(); + +typedef u64 (*btf_bpf_set_retval)(int); + +typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); + +typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); + +typedef u64 (*btf_bpf_get_netns_cookie_sockopt)(struct bpf_sockopt_kern *); + +struct kasan_source_location { + const char *filename; + int line_no; + int column_no; +}; + +struct kasan_global { + const void *beg; + size_t size; + size_t size_with_redzone; + const void *name; + const void *module_name; + long unsigned int has_dynamic_init; + struct kasan_source_location *location; + char *odr_indicator; +}; + +struct kasan_alloc_meta { + struct kasan_track alloc_track; + depot_stack_handle_t aux_stack[2]; +}; + +struct qlist_node { + struct qlist_node *next; +}; + +struct kasan_free_meta { + struct qlist_node quarantine_link; + struct kasan_track free_track; +}; + +struct core_name { + char *corename; + int used; + int size; +}; + +typedef __kernel_mode_t mode_t; + +struct orlov_stats { + __u64 free_clusters; + __u32 free_inodes; + __u32 used_dirs; +}; + +struct ramfs_mount_opts { + umode_t mode; +}; + +struct ramfs_fs_info { + struct ramfs_mount_opts mount_opts; +}; + +enum ramfs_param { + Opt_mode___2 = 0, +}; + +enum fuse_ext_type { + FUSE_MAX_NR_SECCTX = 31, + FUSE_EXT_GROUPS = 32, +}; + +struct fuse_getattr_in { + uint32_t getattr_flags; + uint32_t dummy; + uint64_t fh; +}; + +struct fuse_attr_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t dummy; + struct fuse_attr attr; +}; + +struct fuse_statx_in { + uint32_t getattr_flags; + uint32_t reserved; + uint64_t fh; + uint32_t sx_flags; + uint32_t sx_mask; +}; + +struct fuse_statx_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t flags; + uint64_t spare[2]; + struct fuse_statx stat; +}; + +struct fuse_mknod_in { + uint32_t mode; + uint32_t rdev; + uint32_t umask; + uint32_t padding; +}; + +struct fuse_mkdir_in { + uint32_t mode; + uint32_t umask; +}; + +struct fuse_rename2_in { + uint64_t newdir; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_link_in { + uint64_t oldnodeid; +}; + +struct fuse_setattr_in { + uint32_t valid; + uint32_t padding; + uint64_t fh; + uint64_t size; + uint64_t lock_owner; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t unused4; + uint32_t uid; + uint32_t gid; + uint32_t unused5; +}; + +struct fuse_create_in { + uint32_t flags; + uint32_t mode; + uint32_t umask; + uint32_t open_flags; +}; + +struct fuse_access_in { + uint32_t mask; + uint32_t padding; +}; + +struct fuse_secctx { + uint32_t size; + uint32_t padding; +}; + +struct fuse_secctx_header { + uint32_t size; + uint32_t nr_secctx; +}; + +struct fuse_ext_header { + uint32_t size; + uint32_t type; +}; + +struct fuse_supp_groups { + uint32_t nr_groups; + uint32_t groups[0]; +}; + +enum { + WORK_DONE_BIT = 0, + WORK_ORDER_DONE_BIT = 1, +}; + +struct posix_acl_xattr_header { + __le32 a_version; +}; + +struct fsverity_descriptor { + __u8 version; + __u8 hash_algorithm; + __u8 log_blocksize; + __u8 salt_size; + __le32 sig_size; + __le64 data_size; + __u8 root_hash[64]; + __u8 salt[32]; + __u8 __reserved[144]; + __u8 signature[0]; +}; + +struct btrfs_stream_header { + char magic[13]; + __le32 version; +} __attribute__((packed)); + +struct btrfs_cmd_header { + __le32 len; + __le16 cmd; + __le32 crc; +} __attribute__((packed)); + +struct btrfs_tlv_header { + __le16 tlv_type; + __le16 tlv_len; +}; + +enum btrfs_send_cmd { + BTRFS_SEND_C_UNSPEC = 0, + BTRFS_SEND_C_SUBVOL = 1, + BTRFS_SEND_C_SNAPSHOT = 2, + BTRFS_SEND_C_MKFILE = 3, + BTRFS_SEND_C_MKDIR = 4, + BTRFS_SEND_C_MKNOD = 5, + BTRFS_SEND_C_MKFIFO = 6, + BTRFS_SEND_C_MKSOCK = 7, + BTRFS_SEND_C_SYMLINK = 8, + BTRFS_SEND_C_RENAME = 9, + BTRFS_SEND_C_LINK = 10, + BTRFS_SEND_C_UNLINK = 11, + BTRFS_SEND_C_RMDIR = 12, + BTRFS_SEND_C_SET_XATTR = 13, + BTRFS_SEND_C_REMOVE_XATTR = 14, + BTRFS_SEND_C_WRITE = 15, + BTRFS_SEND_C_CLONE = 16, + BTRFS_SEND_C_TRUNCATE = 17, + BTRFS_SEND_C_CHMOD = 18, + BTRFS_SEND_C_CHOWN = 19, + BTRFS_SEND_C_UTIMES = 20, + BTRFS_SEND_C_END = 21, + BTRFS_SEND_C_UPDATE_EXTENT = 22, + BTRFS_SEND_C_MAX_V1 = 22, + BTRFS_SEND_C_FALLOCATE = 23, + BTRFS_SEND_C_FILEATTR = 24, + BTRFS_SEND_C_ENCODED_WRITE = 25, + BTRFS_SEND_C_MAX_V2 = 25, + BTRFS_SEND_C_ENABLE_VERITY = 26, + BTRFS_SEND_C_MAX_V3 = 26, + BTRFS_SEND_C_MAX = 26, +}; + +enum { + BTRFS_SEND_A_UNSPEC = 0, + BTRFS_SEND_A_UUID = 1, + BTRFS_SEND_A_CTRANSID = 2, + BTRFS_SEND_A_INO = 3, + BTRFS_SEND_A_SIZE = 4, + BTRFS_SEND_A_MODE = 5, + BTRFS_SEND_A_UID = 6, + BTRFS_SEND_A_GID = 7, + BTRFS_SEND_A_RDEV = 8, + BTRFS_SEND_A_CTIME = 9, + BTRFS_SEND_A_MTIME = 10, + BTRFS_SEND_A_ATIME = 11, + BTRFS_SEND_A_OTIME = 12, + BTRFS_SEND_A_XATTR_NAME = 13, + BTRFS_SEND_A_XATTR_DATA = 14, + BTRFS_SEND_A_PATH = 15, + BTRFS_SEND_A_PATH_TO = 16, + BTRFS_SEND_A_PATH_LINK = 17, + BTRFS_SEND_A_FILE_OFFSET = 18, + BTRFS_SEND_A_DATA = 19, + BTRFS_SEND_A_CLONE_UUID = 20, + BTRFS_SEND_A_CLONE_CTRANSID = 21, + BTRFS_SEND_A_CLONE_PATH = 22, + BTRFS_SEND_A_CLONE_OFFSET = 23, + BTRFS_SEND_A_CLONE_LEN = 24, + BTRFS_SEND_A_MAX_V1 = 24, + BTRFS_SEND_A_FALLOCATE_MODE = 25, + BTRFS_SEND_A_FILEATTR = 26, + BTRFS_SEND_A_UNENCODED_FILE_LEN = 27, + BTRFS_SEND_A_UNENCODED_LEN = 28, + BTRFS_SEND_A_UNENCODED_OFFSET = 29, + BTRFS_SEND_A_COMPRESSION = 30, + BTRFS_SEND_A_ENCRYPTION = 31, + BTRFS_SEND_A_MAX_V2 = 31, + BTRFS_SEND_A_VERITY_ALGORITHM = 32, + BTRFS_SEND_A_VERITY_BLOCK_SIZE = 33, + BTRFS_SEND_A_VERITY_SALT_DATA = 34, + BTRFS_SEND_A_VERITY_SIG_DATA = 35, + BTRFS_SEND_A_MAX_V3 = 35, + __BTRFS_SEND_A_MAX = 35, +}; + +struct btrfs_ioctl_send_args { + __s64 send_fd; + __u64 clone_sources_count; + __u64 *clone_sources; + __u64 parent_root; + __u64 flags; + __u32 version; + __u8 reserved[28]; +}; + +struct btrfs_lru_cache_entry { + struct list_head lru_list; + u64 key; + u64 gen; + struct list_head list; +}; + +struct btrfs_lru_cache { + struct list_head lru_list; + struct maple_tree entries; + unsigned int size; + unsigned int max_size; +}; + +struct fs_path { + union { + struct { + char *start; + char *end; + char *buf; + short unsigned int buf_len: 15; + short unsigned int reversed: 1; + char inline_buf[0]; + }; + char pad[256]; + }; +}; + +struct clone_root { + struct btrfs_root *root; + u64 ino; + u64 offset; + u64 num_bytes; + bool found_ref; +}; + +struct backref_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 root_ids[17]; + int num_roots; +}; + +struct send_ctx { + struct file *send_filp; + loff_t send_off; + char *send_buf; + u32 send_size; + u32 send_max_size; + bool put_data; + struct page **send_buf_pages; + u64 flags; + u32 proto; + struct btrfs_root *send_root; + struct btrfs_root *parent_root; + struct clone_root *clone_roots; + int clone_roots_cnt; + struct btrfs_path *left_path; + struct btrfs_path *right_path; + struct btrfs_key *cmp_key; + u64 last_reloc_trans; + u64 cur_ino; + u64 cur_inode_gen; + u64 cur_inode_size; + u64 cur_inode_mode; + u64 cur_inode_rdev; + u64 cur_inode_last_extent; + u64 cur_inode_next_write_offset; + bool cur_inode_new; + bool cur_inode_new_gen; + bool cur_inode_deleted; + bool ignore_cur_inode; + bool cur_inode_needs_verity; + void *verity_descriptor; + u64 send_progress; + struct list_head new_refs; + struct list_head deleted_refs; + struct btrfs_lru_cache name_cache; + struct inode *cur_inode; + struct file_ra_state ra; + u64 page_cache_clear_start; + bool clean_page_cache; + struct rb_root pending_dir_moves; + struct rb_root waiting_dir_moves; + struct rb_root orphan_dirs; + struct rb_root rbtree_new_refs; + struct rb_root rbtree_deleted_refs; + struct btrfs_lru_cache backref_cache; + u64 backref_cache_last_reloc_trans; + struct btrfs_lru_cache dir_created_cache; + struct btrfs_lru_cache dir_utimes_cache; +}; + +struct pending_dir_move { + struct rb_node node; + struct list_head list; + u64 parent_ino; + u64 ino; + u64 gen; + struct list_head update_refs; +}; + +struct waiting_dir_move { + struct rb_node node; + u64 ino; + u64 rmdir_ino; + u64 rmdir_gen; + bool orphanized; +}; + +struct orphan_dir_info { + struct rb_node node; + u64 ino; + u64 gen; + u64 last_dir_index_offset; + u64 dir_high_seq_ino; +}; + +struct name_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 parent_ino; + u64 parent_gen; + int ret; + int need_later_update; + int name_len; + char name[0]; +}; + +enum btrfs_compare_tree_result { + BTRFS_COMPARE_TREE_NEW = 0, + BTRFS_COMPARE_TREE_DELETED = 1, + BTRFS_COMPARE_TREE_CHANGED = 2, + BTRFS_COMPARE_TREE_SAME = 3, +}; + +struct btrfs_inode_info { + u64 size; + u64 gen; + u64 mode; + u64 uid; + u64 gid; + u64 rdev; + u64 fileattr; + u64 nlink; +}; + +typedef int (*iterate_inode_ref_t)(u64, struct fs_path *, void *); + +typedef int (*iterate_dir_item_t)(int, struct btrfs_key *, const char *, int, const char *, int, void *); + +struct backref_ctx { + struct send_ctx *sctx; + u64 found; + u64 cur_objectid; + u64 cur_offset; + u64 extent_len; + u64 bytenr; + u64 backref_owner; + u64 backref_offset; +}; + +enum inode_state { + inode_state_no_change = 0, + inode_state_will_create = 1, + inode_state_did_create = 2, + inode_state_will_delete = 3, + inode_state_did_delete = 4, +}; + +struct recorded_ref { + struct list_head list; + char *name; + struct fs_path *full_path; + u64 dir; + u64 dir_gen; + int name_len; + struct rb_node node; + struct rb_root *root; +}; + +struct find_xattr_ctx { + const char *name; + int name_len; + int found_idx; + char *found_data; + int found_data_len; +}; + +enum devcg_behavior { + DEVCG_DEFAULT_NONE = 0, + DEVCG_DEFAULT_ALLOW = 1, + DEVCG_DEFAULT_DENY = 2, +}; + +struct dev_exception_item { + u32 major; + u32 minor; + short int type; + short int access; + struct list_head list; + struct callback_head rcu; +}; + +struct dev_cgroup { + struct cgroup_subsys_state css; + struct list_head exceptions; + enum devcg_behavior behavior; +}; + +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + short unsigned int name_len; + void (*describe)(const struct key *, struct seq_file *); + void (*destroy)(void *, void *); + int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*verify_signature)(const struct key *, const struct public_key_signature *); +}; + +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + int (*parse)(struct key_preparsed_payload *); +}; + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP = 2, + IOPRIO_WHO_USER = 3, +}; + +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; + +struct io_open { + struct file *file; + int dfd; + u32 file_slot; + struct filename *filename; + struct open_how how; + long unsigned int nofile; +}; + +struct io_close { + struct file *file; + int fd; + u32 file_slot; +}; + +struct io_fixed_install { + struct file *file; + unsigned int o_flags; +}; + +enum io_uring_napi_op { + IO_URING_NAPI_REGISTER_OP = 0, + IO_URING_NAPI_STATIC_ADD_ID = 1, + IO_URING_NAPI_STATIC_DEL_ID = 2, +}; + +struct io_uring_napi { + __u32 busy_poll_to; + __u8 prefer_busy_poll; + __u8 opcode; + __u8 pad[2]; + __u32 op_param; + __u32 resv; +}; + +struct io_napi_entry { + unsigned int napi_id; + struct list_head list; + long unsigned int timeout; + struct hlist_node node; + struct callback_head rcu; +}; + +struct font_desc { + int idx; + const char *name; + unsigned int width; + unsigned int height; + unsigned int charcount; + const void *data; + int pref; +}; + +struct simple_pm_bus { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int); + void *priv; +}; + +enum msi_desc_filter { + MSI_DESC_ALL = 0, + MSI_DESC_NOTASSOCIATED = 1, + MSI_DESC_ASSOCIATED = 2, +}; + +struct clk_gate { + struct clk_hw hw; + void *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +struct virtio_admin_cmd { + __le16 opcode; + __le16 group_type; + __le64 group_member_id; + struct scatterlist *data_sg; + struct scatterlist *result_sg; + struct completion completion; + u32 result_sg_size; + int ret; +}; + +struct virtio_admin_cmd_hdr { + __le16 opcode; + __le16 group_type; + __u8 reserved1[12]; + __le64 group_member_id; +}; + +struct virtio_admin_cmd_status { + __le16 status; + __le16 status_qualifier; + __u8 reserved2[4]; +}; + +struct virtio_dev_parts_cap { + __u8 get_parts_resource_objects_limit; + __u8 set_parts_resource_objects_limit; +}; + +struct virtio_admin_cmd_query_cap_id_result { + __le64 supported_caps[1]; +}; + +struct virtio_admin_cmd_cap_get_data { + __le16 id; + __u8 reserved[6]; +}; + +struct virtio_admin_cmd_cap_set_data { + __le16 id; + __u8 reserved[6]; + __u8 cap_specific_data[0]; +}; + +struct virtio_admin_cmd_resource_obj_cmd_hdr { + __le16 type; + __u8 reserved[2]; + __le32 id; +}; + +struct virtio_admin_cmd_resource_obj_create_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __le64 flags; + __u8 resource_obj_specific_data[0]; +}; + +struct virtio_resource_obj_dev_parts { + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_admin_cmd_dev_parts_metadata_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_dev_part_hdr { + __le16 part_type; + __u8 flags; + __u8 reserved; + union { + struct { + __le32 offset; + __le32 reserved; + } pci_common_cfg; + struct { + __le16 index; + __u8 reserved[6]; + } vq_index; + } selector; + __le32 length; +}; + +struct virtio_admin_cmd_dev_parts_metadata_result { + union { + struct { + __le32 size; + __le32 reserved; + } parts_size; + struct { + __le32 count; + __le32 reserved; + } hdr_list_count; + struct { + __le32 count; + __le32 reserved; + struct virtio_dev_part_hdr hdrs[0]; + } hdr_list; + }; +}; + +struct virtio_admin_cmd_dev_parts_get_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; + struct virtio_dev_part_hdr hdr_list[0]; +}; + +struct virtio_admin_cmd_dev_mode_set_data { + __u8 flags; +}; + +struct virtio_pci_legacy_device { + struct pci_dev *pci_dev; + u8 *isr; + void *ioaddr; + struct virtio_device_id id; +}; + +struct virtio_pci_vq_info { + struct virtqueue *vq; + struct list_head node; + unsigned int msix_vector; +}; + +struct virtio_pci_admin_vq { + struct virtio_pci_vq_info *info; + spinlock_t lock; + u64 supported_cmds; + u64 supported_caps; + u8 max_dev_parts_objects; + struct ida dev_parts_ida; + char name[10]; + u16 vq_index; +}; + +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + union { + struct virtio_pci_legacy_device ldev; + struct virtio_pci_modern_device mdev; + }; + bool is_legacy; + u8 *isr; + spinlock_t lock; + struct list_head virtqueues; + struct list_head slow_virtqueues; + struct virtio_pci_vq_info **vqs; + struct virtio_pci_admin_vq admin_vq; + int msix_enabled; + int intx_enabled; + cpumask_var_t *msix_affinity_masks; + char (*msix_names)[256]; + unsigned int msix_vectors; + unsigned int msix_used_vectors; + bool per_vq_vectors; + struct virtqueue * (*setup_vq)(struct virtio_pci_device *, struct virtio_pci_vq_info *, unsigned int, void (*)(struct virtqueue *), const char *, bool, u16); + void (*del_vq)(struct virtio_pci_vq_info *); + u16 (*config_vector)(struct virtio_pci_device *, u16); + int (*avq_index)(struct virtio_device *, u16 *, u16 *); +}; + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 4294967295, +}; + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +struct scsi_transport_template { + struct transport_container host_attrs; + struct transport_container target_attrs; + struct transport_container device_attrs; + int (*user_scan)(struct Scsi_Host *, uint, uint, u64); + int device_size; + int device_private_offset; + int target_size; + int target_private_offset; + int host_size; + unsigned int create_work_queue: 1; + void (*eh_strategy_handler)(struct Scsi_Host *); +}; + +struct scsi_host_busy_iter_data { + bool (*fn)(struct scsi_cmnd *, void *); + void *priv; +}; + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + long unsigned int start; +}; + +typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *); + +struct usb_dynid { + struct list_head node; + struct usb_device_id id; +}; + +struct usb_device_driver { + const char *name; + bool (*match)(struct usb_device *); + int (*probe)(struct usb_device *); + void (*disconnect)(struct usb_device *); + int (*suspend)(struct usb_device *, pm_message_t); + int (*resume)(struct usb_device *, pm_message_t); + int (*choose_configuration)(struct usb_device *); + const struct attribute_group **dev_groups; + struct device_driver driver; + const struct usb_device_id *id_table; + unsigned int supports_autosuspend: 1; + unsigned int generic_subclass: 1; +}; + +struct usb_sg_request { + int status; + size_t bytes; + spinlock_t lock; + struct usb_device *dev; + int pipe; + int entries; + struct urb **urbs; + int count; + struct completion complete; +}; + +struct us_data; + +struct us_unusual_dev { + const char *vendorName; + const char *productName; + __u8 useProtocol; + __u8 useTransport; + int (*initFunction)(struct us_data *); +}; + +typedef int (*trans_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef int (*trans_reset)(struct us_data *); + +typedef void (*proto_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef void (*extra_data_destructor)(void *); + +typedef void (*pm_hook)(struct us_data *, int); + +struct us_data { + struct mutex dev_mutex; + struct usb_device *pusb_dev; + struct usb_interface *pusb_intf; + const struct us_unusual_dev *unusual_dev; + u64 fflags; + long unsigned int dflags; + unsigned int send_bulk_pipe; + unsigned int recv_bulk_pipe; + unsigned int send_ctrl_pipe; + unsigned int recv_ctrl_pipe; + unsigned int recv_intr_pipe; + char *transport_name; + char *protocol_name; + __le32 bcs_signature; + u8 subclass; + u8 protocol; + u8 max_lun; + u8 ifnum; + u8 ep_bInterval; + trans_cmnd transport; + trans_reset transport_reset; + proto_cmnd proto_handler; + struct scsi_cmnd *srb; + unsigned int tag; + char scsi_name[32]; + struct urb *current_urb; + struct usb_ctrlrequest *cr; + struct usb_sg_request current_sg; + unsigned char *iobuf; + dma_addr_t iobuf_dma; + struct task_struct *ctl_thread; + struct completion cmnd_ready; + struct completion notify; + wait_queue_head_t delay_wait; + struct delayed_work scan_dwork; + void *extra; + extra_data_destructor extra_destructor; + pm_hook suspend_resume_hook; + int use_last_sector_hacks; + int last_sector_retries; +}; + +struct swoc_info { + __u8 rev; + __u8 reserved[8]; + __u16 LinuxSKU; + __u16 LinuxVer; + __u8 reserved2[47]; +} __attribute__((packed)); + +struct pps_kinfo { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; +}; + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +struct pps_bind_args { + int tsformat; + int edge; + int consumer; +}; + +struct md_cluster_operations { + int (*join)(struct mddev *, int); + int (*leave)(struct mddev *); + int (*slot_number)(struct mddev *); + int (*resync_info_update)(struct mddev *, sector_t, sector_t); + int (*resync_start_notify)(struct mddev *); + int (*resync_status_get)(struct mddev *); + void (*resync_info_get)(struct mddev *, sector_t *, sector_t *); + int (*metadata_update_start)(struct mddev *); + int (*metadata_update_finish)(struct mddev *); + void (*metadata_update_cancel)(struct mddev *); + int (*resync_start)(struct mddev *); + int (*resync_finish)(struct mddev *); + int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); + int (*add_new_disk)(struct mddev *, struct md_rdev *); + void (*add_new_disk_cancel)(struct mddev *); + int (*new_disk_ack)(struct mddev *, bool); + int (*remove_disk)(struct mddev *, struct md_rdev *); + void (*load_bitmaps)(struct mddev *, int); + int (*gather_bitmaps)(struct md_rdev *); + int (*resize_bitmaps)(struct mddev *, sector_t, sector_t); + int (*lock_all_bitmaps)(struct mddev *); + void (*unlock_all_bitmaps)(struct mddev *); + void (*update_size)(struct mddev *, sector_t); +}; + +typedef __u16 bitmap_counter_t; + +enum bitmap_state { + BITMAP_STALE = 1, + BITMAP_WRITE_ERROR = 2, + BITMAP_HOSTENDIAN = 15, +}; + +struct bitmap_super_s { + __le32 magic; + __le32 version; + __u8 uuid[16]; + __le64 events; + __le64 events_cleared; + __le64 sync_size; + __le32 state; + __le32 chunksize; + __le32 daemon_sleep; + __le32 write_behind; + __le32 sectors_reserved; + __le32 nodes; + __u8 cluster_name[64]; + __u8 pad[120]; +}; + +typedef struct bitmap_super_s bitmap_super_t; + +struct bitmap_page { + char *map; + unsigned int hijacked: 1; + unsigned int pending: 1; + unsigned int count: 30; +}; + +struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + long unsigned int pages; + long unsigned int missing_pages; + long unsigned int chunkshift; + long unsigned int chunks; +}; + +struct bitmap_storage { + struct file *file; + struct page *sb_page; + long unsigned int sb_index; + struct page **filemap; + long unsigned int *filemap_attr; + long unsigned int file_pages; + long unsigned int bytes; +}; + +struct bitmap { + struct bitmap_counts counts; + struct mddev *mddev; + __u64 events_cleared; + int need_sync; + struct bitmap_storage storage; + long unsigned int flags; + int allclean; + atomic_t behind_writes; + long unsigned int behind_writes_used; + long unsigned int daemon_lastrun; + long unsigned int last_end_sync; + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + struct kernfs_node *sysfs_can_clear; + int cluster_slot; +}; + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, + BITMAP_PAGE_PENDING = 1, + BITMAP_PAGE_NEEDWRITE = 2, +}; + +struct bitmap_unplug_work { + struct work_struct work; + struct bitmap *bitmap; + struct completion *done; +}; + +enum { + NDA_UNSPEC = 0, + NDA_DST = 1, + NDA_LLADDR = 2, + NDA_CACHEINFO = 3, + NDA_PROBES = 4, + NDA_VLAN = 5, + NDA_PORT = 6, + NDA_VNI = 7, + NDA_IFINDEX = 8, + NDA_MASTER = 9, + NDA_LINK_NETNSID = 10, + NDA_SRC_VNI = 11, + NDA_PROTOCOL = 12, + NDA_NH_ID = 13, + NDA_FDB_EXT_ATTRS = 14, + NDA_FLAGS_EXT = 15, + NDA_NDM_STATE_MASK = 16, + NDA_NDM_FLAGS_MASK = 17, + __NDA_MAX = 18, +}; + +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + __u32 rx_compressed; + __u32 tx_compressed; + __u32 rx_nohandler; +}; + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; +}; + +enum { + IFLA_UNSPEC = 0, + IFLA_ADDRESS = 1, + IFLA_BROADCAST = 2, + IFLA_IFNAME = 3, + IFLA_MTU = 4, + IFLA_LINK = 5, + IFLA_QDISC = 6, + IFLA_STATS = 7, + IFLA_COST = 8, + IFLA_PRIORITY = 9, + IFLA_MASTER = 10, + IFLA_WIRELESS = 11, + IFLA_PROTINFO = 12, + IFLA_TXQLEN = 13, + IFLA_MAP = 14, + IFLA_WEIGHT = 15, + IFLA_OPERSTATE = 16, + IFLA_LINKMODE = 17, + IFLA_LINKINFO = 18, + IFLA_NET_NS_PID = 19, + IFLA_IFALIAS = 20, + IFLA_NUM_VF = 21, + IFLA_VFINFO_LIST = 22, + IFLA_STATS64 = 23, + IFLA_VF_PORTS = 24, + IFLA_PORT_SELF = 25, + IFLA_AF_SPEC = 26, + IFLA_GROUP = 27, + IFLA_NET_NS_FD = 28, + IFLA_EXT_MASK = 29, + IFLA_PROMISCUITY = 30, + IFLA_NUM_TX_QUEUES = 31, + IFLA_NUM_RX_QUEUES = 32, + IFLA_CARRIER = 33, + IFLA_PHYS_PORT_ID = 34, + IFLA_CARRIER_CHANGES = 35, + IFLA_PHYS_SWITCH_ID = 36, + IFLA_LINK_NETNSID = 37, + IFLA_PHYS_PORT_NAME = 38, + IFLA_PROTO_DOWN = 39, + IFLA_GSO_MAX_SEGS = 40, + IFLA_GSO_MAX_SIZE = 41, + IFLA_PAD = 42, + IFLA_XDP = 43, + IFLA_EVENT = 44, + IFLA_NEW_NETNSID = 45, + IFLA_IF_NETNSID = 46, + IFLA_TARGET_NETNSID = 46, + IFLA_CARRIER_UP_COUNT = 47, + IFLA_CARRIER_DOWN_COUNT = 48, + IFLA_NEW_IFINDEX = 49, + IFLA_MIN_MTU = 50, + IFLA_MAX_MTU = 51, + IFLA_PROP_LIST = 52, + IFLA_ALT_IFNAME = 53, + IFLA_PERM_ADDRESS = 54, + IFLA_PROTO_DOWN_REASON = 55, + IFLA_PARENT_DEV_NAME = 56, + IFLA_PARENT_DEV_BUS_NAME = 57, + IFLA_GRO_MAX_SIZE = 58, + IFLA_TSO_MAX_SIZE = 59, + IFLA_TSO_MAX_SEGS = 60, + IFLA_ALLMULTI = 61, + IFLA_DEVLINK_PORT = 62, + IFLA_GSO_IPV4_MAX_SIZE = 63, + IFLA_GRO_IPV4_MAX_SIZE = 64, + IFLA_DPLL_PIN = 65, + IFLA_MAX_PACING_OFFLOAD_HORIZON = 66, + __IFLA_MAX = 67, +}; + +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC = 0, + IFLA_PROTO_DOWN_REASON_MASK = 1, + IFLA_PROTO_DOWN_REASON_VALUE = 2, + __IFLA_PROTO_DOWN_REASON_CNT = 3, + IFLA_PROTO_DOWN_REASON_MAX = 2, +}; + +enum { + IFLA_BRPORT_UNSPEC = 0, + IFLA_BRPORT_STATE = 1, + IFLA_BRPORT_PRIORITY = 2, + IFLA_BRPORT_COST = 3, + IFLA_BRPORT_MODE = 4, + IFLA_BRPORT_GUARD = 5, + IFLA_BRPORT_PROTECT = 6, + IFLA_BRPORT_FAST_LEAVE = 7, + IFLA_BRPORT_LEARNING = 8, + IFLA_BRPORT_UNICAST_FLOOD = 9, + IFLA_BRPORT_PROXYARP = 10, + IFLA_BRPORT_LEARNING_SYNC = 11, + IFLA_BRPORT_PROXYARP_WIFI = 12, + IFLA_BRPORT_ROOT_ID = 13, + IFLA_BRPORT_BRIDGE_ID = 14, + IFLA_BRPORT_DESIGNATED_PORT = 15, + IFLA_BRPORT_DESIGNATED_COST = 16, + IFLA_BRPORT_ID = 17, + IFLA_BRPORT_NO = 18, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, + IFLA_BRPORT_CONFIG_PENDING = 20, + IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, + IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, + IFLA_BRPORT_HOLD_TIMER = 23, + IFLA_BRPORT_FLUSH = 24, + IFLA_BRPORT_MULTICAST_ROUTER = 25, + IFLA_BRPORT_PAD = 26, + IFLA_BRPORT_MCAST_FLOOD = 27, + IFLA_BRPORT_MCAST_TO_UCAST = 28, + IFLA_BRPORT_VLAN_TUNNEL = 29, + IFLA_BRPORT_BCAST_FLOOD = 30, + IFLA_BRPORT_GROUP_FWD_MASK = 31, + IFLA_BRPORT_NEIGH_SUPPRESS = 32, + IFLA_BRPORT_ISOLATED = 33, + IFLA_BRPORT_BACKUP_PORT = 34, + IFLA_BRPORT_MRP_RING_OPEN = 35, + IFLA_BRPORT_MRP_IN_OPEN = 36, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, + IFLA_BRPORT_LOCKED = 39, + IFLA_BRPORT_MAB = 40, + IFLA_BRPORT_MCAST_N_GROUPS = 41, + IFLA_BRPORT_MCAST_MAX_GROUPS = 42, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 43, + IFLA_BRPORT_BACKUP_NHID = 44, + __IFLA_BRPORT_MAX = 45, +}; + +enum { + IFLA_INFO_UNSPEC = 0, + IFLA_INFO_KIND = 1, + IFLA_INFO_DATA = 2, + IFLA_INFO_XSTATS = 3, + IFLA_INFO_SLAVE_KIND = 4, + IFLA_INFO_SLAVE_DATA = 5, + __IFLA_INFO_MAX = 6, +}; + +enum { + IFLA_VF_INFO_UNSPEC = 0, + IFLA_VF_INFO = 1, + __IFLA_VF_INFO_MAX = 2, +}; + +enum { + IFLA_VF_UNSPEC = 0, + IFLA_VF_MAC = 1, + IFLA_VF_VLAN = 2, + IFLA_VF_TX_RATE = 3, + IFLA_VF_SPOOFCHK = 4, + IFLA_VF_LINK_STATE = 5, + IFLA_VF_RATE = 6, + IFLA_VF_RSS_QUERY_EN = 7, + IFLA_VF_STATS = 8, + IFLA_VF_TRUST = 9, + IFLA_VF_IB_NODE_GUID = 10, + IFLA_VF_IB_PORT_GUID = 11, + IFLA_VF_VLAN_LIST = 12, + IFLA_VF_BROADCAST = 13, + __IFLA_VF_MAX = 14, +}; + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC = 0, + IFLA_VF_VLAN_INFO = 1, + __IFLA_VF_VLAN_INFO_MAX = 2, +}; + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_STATS_RX_PACKETS = 0, + IFLA_VF_STATS_TX_PACKETS = 1, + IFLA_VF_STATS_RX_BYTES = 2, + IFLA_VF_STATS_TX_BYTES = 3, + IFLA_VF_STATS_BROADCAST = 4, + IFLA_VF_STATS_MULTICAST = 5, + IFLA_VF_STATS_PAD = 6, + IFLA_VF_STATS_RX_DROPPED = 7, + IFLA_VF_STATS_TX_DROPPED = 8, + __IFLA_VF_STATS_MAX = 9, +}; + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_PORT_UNSPEC = 0, + IFLA_VF_PORT = 1, + __IFLA_VF_PORT_MAX = 2, +}; + +enum { + IFLA_PORT_UNSPEC = 0, + IFLA_PORT_VF = 1, + IFLA_PORT_PROFILE = 2, + IFLA_PORT_VSI_TYPE = 3, + IFLA_PORT_INSTANCE_UUID = 4, + IFLA_PORT_HOST_UUID = 5, + IFLA_PORT_REQUEST = 6, + IFLA_PORT_RESPONSE = 7, + __IFLA_PORT_MAX = 8, +}; + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +enum { + IFLA_STATS_UNSPEC = 0, + IFLA_STATS_LINK_64 = 1, + IFLA_STATS_LINK_XSTATS = 2, + IFLA_STATS_LINK_XSTATS_SLAVE = 3, + IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, + IFLA_STATS_AF_SPEC = 5, + __IFLA_STATS_MAX = 6, +}; + +enum { + IFLA_STATS_GETSET_UNSPEC = 0, + IFLA_STATS_GET_FILTERS = 1, + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 2, + __IFLA_STATS_GETSET_MAX = 3, +}; + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 2, + IFLA_OFFLOAD_XSTATS_L3_STATS = 3, + __IFLA_OFFLOAD_XSTATS_MAX = 4, +}; + +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 2, + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX = 3, +}; + +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV = 1, + XDP_ATTACHED_SKB = 2, + XDP_ATTACHED_HW = 3, + XDP_ATTACHED_MULTI = 4, +}; + +enum { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +}; + +enum { + IFLA_EVENT_NONE = 0, + IFLA_EVENT_REBOOT = 1, + IFLA_EVENT_FEATURES = 2, + IFLA_EVENT_BONDING_FAILOVER = 3, + IFLA_EVENT_NOTIFY_PEERS = 4, + IFLA_EVENT_IGMP_RESEND = 5, + IFLA_EVENT_BONDING_OPTIONS = 6, +}; + +enum { + IFLA_BRIDGE_FLAGS = 0, + IFLA_BRIDGE_MODE = 1, + IFLA_BRIDGE_VLAN_INFO = 2, + IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, + IFLA_BRIDGE_MRP = 4, + IFLA_BRIDGE_CFM = 5, + IFLA_BRIDGE_MST = 6, + __IFLA_BRIDGE_MAX = 7, +}; + +struct br_port_msg { + __u8 family; + __u32 ifindex; +}; + +struct br_mdb_entry { + __u32 ifindex; + __u8 state; + __u8 flags; + __u16 vid; + struct { + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } u; + __be16 proto; + } addr; +}; + +enum { + MDBA_SET_ENTRY_UNSPEC = 0, + MDBA_SET_ENTRY = 1, + MDBA_SET_ENTRY_ATTRS = 2, + __MDBA_SET_ENTRY_MAX = 3, +}; + +enum { + MDBA_GET_ENTRY_UNSPEC = 0, + MDBA_GET_ENTRY = 1, + MDBA_GET_ENTRY_ATTRS = 2, + __MDBA_GET_ENTRY_MAX = 3, +}; + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + short unsigned int ifi_type; + int ifi_index; + unsigned int ifi_flags; + unsigned int ifi_change; +}; + +enum rtnl_kinds { + RTNL_KIND_NEW = 0, + RTNL_KIND_DEL = 1, + RTNL_KIND_GET = 2, + RTNL_KIND_SET = 3, +}; + +struct rtnl_af_ops { + struct list_head list; + struct srcu_struct srcu; + int family; + int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); + size_t (*get_link_af_size)(const struct net_device *, u32); + int (*validate_link_af)(const struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*set_link_af)(struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*fill_stats_af)(struct sk_buff *, const struct net_device *); + size_t (*get_stats_af_size)(const struct net_device *); +}; + +struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + struct module *owner; + unsigned int flags; + struct callback_head rcu; +}; + +struct rtnl_nets { + struct net *net[3]; + unsigned char len; +}; + +struct rtnl_newlink_tbs { + struct nlattr *tb[67]; + struct nlattr *linkinfo[6]; + struct nlattr *attr[51]; + struct nlattr *slave_attr[45]; +}; + +struct rtnl_offload_xstats_request_used { + bool request; + bool used; +}; + +struct rtnl_stats_dump_filters { + u32 mask[6]; +}; + +struct rtnl_mdb_dump_ctx { + long int idx; +}; + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC = 0, + ETHTOOL_A_FEC_STAT_PAD = 1, + ETHTOOL_A_FEC_STAT_CORRECTED = 2, + ETHTOOL_A_FEC_STAT_UNCORR = 3, + ETHTOOL_A_FEC_STAT_CORR_BITS = 4, + __ETHTOOL_A_FEC_STAT_CNT = 5, + ETHTOOL_A_FEC_STAT_MAX = 4, +}; + +struct fec_stat_grp { + u64 stats[9]; + u8 cnt; +}; + +struct fec_reply_data { + struct ethnl_reply_data base; + long unsigned int fec_link_modes[2]; + u32 active_fec; + u8 fec_auto; + struct fec_stat_grp corr; + struct fec_stat_grp uncorr; + struct fec_stat_grp corr_bits; +}; + +enum nf_ct_helper_flags { + NF_CT_HELPER_F_USERSPACE = 1, + NF_CT_HELPER_F_CONFIGURED = 2, +}; + +enum nf_ct_ecache_state { + NFCT_ECACHE_DESTROY_FAIL = 0, + NFCT_ECACHE_DESTROY_SENT = 1, +}; + +struct nf_ct_timeout { + __u16 l3num; + const struct nf_conntrack_l4proto *l4proto; + char data[0]; +}; + +struct nf_conn_timeout { + struct nf_ct_timeout *timeout; +}; + +struct conntrack_gc_work { + struct delayed_work dwork; + u32 next_bucket; + u32 avg_timeout; + u32 count; + u32 start_time; + bool exiting; + bool early_drop; +}; + +enum nft_list_attributes { + NFTA_LIST_UNSPEC = 0, + NFTA_LIST_ELEM = 1, + __NFTA_LIST_MAX = 2, +}; + +enum nft_dynset_ops { + NFT_DYNSET_OP_ADD = 0, + NFT_DYNSET_OP_UPDATE = 1, + NFT_DYNSET_OP_DELETE = 2, +}; + +enum nft_dynset_flags { + NFT_DYNSET_F_INV = 1, + NFT_DYNSET_F_EXPR = 2, +}; + +enum nft_dynset_attributes { + NFTA_DYNSET_UNSPEC = 0, + NFTA_DYNSET_SET_NAME = 1, + NFTA_DYNSET_SET_ID = 2, + NFTA_DYNSET_OP = 3, + NFTA_DYNSET_SREG_KEY = 4, + NFTA_DYNSET_SREG_DATA = 5, + NFTA_DYNSET_TIMEOUT = 6, + NFTA_DYNSET_EXPR = 7, + NFTA_DYNSET_PAD = 8, + NFTA_DYNSET_FLAGS = 9, + NFTA_DYNSET_EXPRESSIONS = 10, + __NFTA_DYNSET_MAX = 11, +}; + +struct nft_set_ext_type { + u8 len; + u8 align; +}; + +struct nft_set_ext_tmpl { + u16 len; + u8 offset[8]; + u8 ext_len[8]; +}; + +struct nft_dynset { + struct nft_set *set; + struct nft_set_ext_tmpl tmpl; + enum nft_dynset_ops op: 8; + u8 sreg_key; + u8 sreg_data; + bool invert; + bool expr; + u8 num_exprs; + u64 timeout; + struct nft_expr *expr_array[2]; + struct nft_set_binding binding; +}; + +struct fib_config { + u8 fc_dst_len; + dscp_t fc_dscp; + u8 fc_protocol; + u8 fc_scope; + u8 fc_type; + u8 fc_gw_family; + u32 fc_table; + __be32 fc_dst; + union { + __be32 fc_gw4; + struct in6_addr fc_gw6; + }; + int fc_oif; + u32 fc_flags; + u32 fc_priority; + __be32 fc_prefsrc; + u32 fc_nh_id; + struct nlattr *fc_mx; + struct rtnexthop *fc_mp; + int fc_mx_len; + int fc_mp_len; + u32 fc_flow; + u32 fc_nlflags; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; +}; + +struct fib_table; + +struct fib_result { + __be32 prefix; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + u32 tclassid; + dscp_t dscp; + struct fib_nh_common *nhc; + struct fib_info *fi; + struct fib_table *table; + struct hlist_head *fa_head; +}; + +struct fib_table { + struct hlist_node tb_hlist; + u32 tb_id; + int tb_num_default; + struct callback_head rcu; + long unsigned int *tb_data; + long unsigned int __data[0]; +}; + +struct fib_rt_info { + struct fib_info *fi; + u32 tb_id; + __be32 dst; + int dst_len; + dscp_t dscp; + u8 type; + u8 offload: 1; + u8 trap: 1; + u8 offload_failed: 1; + u8 unused: 5; +}; + +struct fib_entry_notifier_info { + struct fib_notifier_info info; + u32 dst; + int dst_len; + struct fib_info *fi; + dscp_t dscp; + u8 type; + u32 tb_id; +}; + +struct fib_alias { + struct hlist_node fa_list; + struct fib_info *fa_info; + dscp_t fa_dscp; + u8 fa_type; + u8 fa_state; + u8 fa_slen; + u32 tb_id; + s16 fa_default; + u8 offload; + u8 trap; + u8 offload_failed; + struct callback_head rcu; +}; + +struct fib_prop { + int error; + u8 scope; +}; + +typedef unsigned int t_key; + +struct key_vector { + t_key key; + unsigned char pos; + unsigned char bits; + unsigned char slen; + union { + struct hlist_head leaf; + struct { + struct {} __empty_tnode; + struct key_vector *tnode[0]; + }; + }; +}; + +struct tnode { + struct callback_head rcu; + t_key empty_children; + t_key full_children; + struct key_vector *parent; + struct key_vector kv[1]; +}; + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int prefixes; + unsigned int nodesizes[32]; +}; + +struct trie { + struct key_vector kv[1]; +}; + +struct fib_trie_iter { + struct seq_net_private p; + struct fib_table *tb; + struct key_vector *tnode; + unsigned int index; + unsigned int depth; +}; + +struct fib_route_iter { + struct seq_net_private p; + struct fib_table *main_tb; + struct key_vector *tnode; + loff_t pos; + t_key key; +}; + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS = 1, + __SK_REDIRECT = 2, + __SK_NONE = 3, +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED = 0, + SK_PSOCK_RX_STRP_ENABLED = 1, +}; + +struct tls_crypto_info { + __u16 version; + __u16 cipher_type; +}; + +struct tls12_crypto_info_aes_gcm_128 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[32]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_chacha20_poly1305 { + struct tls_crypto_info info; + unsigned char iv[12]; + unsigned char key[32]; + unsigned char salt[0]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_rec; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + u8 async_capable: 1; + long unsigned int tx_bitmask; +}; + +struct tls_prot_info { + u16 version; + u16 cipher_type; + u16 prepend_size; + u16 tag_size; + u16 overhead_size; + u16 iv_size; + u16 salt_size; + u16 rec_seq_size; + u16 aad_size; + u16 tail_size; +}; + +struct cipher_context { + char iv[20]; + char rec_seq[8]; +}; + +union tls_crypto_context { + struct tls_crypto_info info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; + }; +}; + +struct tls_context { + struct tls_prot_info prot_info; + u8 tx_conf: 3; + u8 rx_conf: 3; + u8 zerocopy_sendfile: 1; + u8 rx_no_pad: 1; + int (*push_pending_record)(struct sock *, int); + void (*sk_write_space)(struct sock *); + void *priv_ctx_tx; + void *priv_ctx_rx; + struct net_device *netdev; + struct cipher_context tx; + struct cipher_context rx; + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + bool splicing_pages; + bool pending_open_record_frags; + struct mutex tx_lock; + long unsigned int flags; + struct proto *sk_proto; + struct sock *sk; + void (*sk_destruct)(struct sock *); + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + struct list_head list; + refcount_t refcount; + struct callback_head rcu; +}; + +enum { + TCP_BPF_IPV4 = 0, + TCP_BPF_IPV6 = 1, + TCP_BPF_NUM_PROTS = 2, +}; + +enum { + TCP_BPF_BASE = 0, + TCP_BPF_TX = 1, + TCP_BPF_RX = 2, + TCP_BPF_TXRX = 3, + TCP_BPF_NUM_CFGS = 4, +}; + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +struct ioam6_trace_hdr { + __be16 namespace_id; + __u8 nodelen: 5; + __u8 overflow: 1; + char: 2; + char: 1; + __u8 remlen: 7; + union { + __be32 type_be32; + struct { + __u32 bit0: 1; + __u32 bit1: 1; + __u32 bit2: 1; + __u32 bit3: 1; + __u32 bit4: 1; + __u32 bit5: 1; + __u32 bit6: 1; + __u32 bit7: 1; + __u32 bit8: 1; + __u32 bit9: 1; + __u32 bit10: 1; + __u32 bit11: 1; + __u32 bit12: 1; + __u32 bit13: 1; + __u32 bit14: 1; + __u32 bit15: 1; + __u32 bit16: 1; + __u32 bit17: 1; + __u32 bit18: 1; + __u32 bit19: 1; + __u32 bit20: 1; + __u32 bit21: 1; + __u32 bit22: 1; + __u32 bit23: 1; + } type; + }; + __u8 data[0]; +}; + +enum { + IOAM6_ATTR_UNSPEC = 0, + IOAM6_ATTR_NS_ID = 1, + IOAM6_ATTR_NS_DATA = 2, + IOAM6_ATTR_NS_DATA_WIDE = 3, + IOAM6_ATTR_SC_ID = 4, + IOAM6_ATTR_SC_DATA = 5, + IOAM6_ATTR_SC_NONE = 6, + IOAM6_ATTR_PAD = 7, + __IOAM6_ATTR_MAX = 8, +}; + +enum { + IOAM6_CMD_UNSPEC = 0, + IOAM6_CMD_ADD_NAMESPACE = 1, + IOAM6_CMD_DEL_NAMESPACE = 2, + IOAM6_CMD_DUMP_NAMESPACES = 3, + IOAM6_CMD_ADD_SCHEMA = 4, + IOAM6_CMD_DEL_SCHEMA = 5, + IOAM6_CMD_DUMP_SCHEMAS = 6, + IOAM6_CMD_NS_SET_SCHEMA = 7, + __IOAM6_CMD_MAX = 8, +}; + +enum ioam6_event_type { + IOAM6_EVENT_UNSPEC = 0, + IOAM6_EVENT_TRACE = 1, +}; + +enum ioam6_event_attr { + IOAM6_EVENT_ATTR_UNSPEC = 0, + IOAM6_EVENT_ATTR_TRACE_NAMESPACE = 1, + IOAM6_EVENT_ATTR_TRACE_NODELEN = 2, + IOAM6_EVENT_ATTR_TRACE_TYPE = 3, + IOAM6_EVENT_ATTR_TRACE_DATA = 4, + __IOAM6_EVENT_ATTR_MAX = 5, +}; + +struct ioam6_pernet_data { + struct mutex lock; + struct rhashtable namespaces; + struct rhashtable schemas; +}; + +struct ioam6_schema; + +struct ioam6_namespace { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_schema *schema; + __be16 id; + __be32 data; + __be64 data_wide; +}; + +struct ioam6_schema { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_namespace *ns; + u32 id; + int len; + __be32 hdr; + u8 data[0]; +}; + +enum { + MDBA_UNSPEC = 0, + MDBA_MDB = 1, + MDBA_ROUTER = 2, + __MDBA_MAX = 3, +}; + +enum { + MDBA_MDB_UNSPEC = 0, + MDBA_MDB_ENTRY = 1, + __MDBA_MDB_MAX = 2, +}; + +enum { + MDBA_MDB_ENTRY_UNSPEC = 0, + MDBA_MDB_ENTRY_INFO = 1, + __MDBA_MDB_ENTRY_MAX = 2, +}; + +enum { + MDBA_MDB_EATTR_UNSPEC = 0, + MDBA_MDB_EATTR_TIMER = 1, + MDBA_MDB_EATTR_SRC_LIST = 2, + MDBA_MDB_EATTR_GROUP_MODE = 3, + MDBA_MDB_EATTR_SOURCE = 4, + MDBA_MDB_EATTR_RTPROT = 5, + MDBA_MDB_EATTR_DST = 6, + MDBA_MDB_EATTR_DST_PORT = 7, + MDBA_MDB_EATTR_VNI = 8, + MDBA_MDB_EATTR_IFINDEX = 9, + MDBA_MDB_EATTR_SRC_VNI = 10, + __MDBA_MDB_EATTR_MAX = 11, +}; + +enum { + MDBA_MDB_SRCLIST_UNSPEC = 0, + MDBA_MDB_SRCLIST_ENTRY = 1, + __MDBA_MDB_SRCLIST_MAX = 2, +}; + +enum { + MDBA_MDB_SRCATTR_UNSPEC = 0, + MDBA_MDB_SRCATTR_ADDRESS = 1, + MDBA_MDB_SRCATTR_TIMER = 2, + __MDBA_MDB_SRCATTR_MAX = 3, +}; + +enum { + MDBA_ROUTER_UNSPEC = 0, + MDBA_ROUTER_PORT = 1, + __MDBA_ROUTER_MAX = 2, +}; + +enum { + MDBA_ROUTER_PATTR_UNSPEC = 0, + MDBA_ROUTER_PATTR_TIMER = 1, + MDBA_ROUTER_PATTR_TYPE = 2, + MDBA_ROUTER_PATTR_INET_TIMER = 3, + MDBA_ROUTER_PATTR_INET6_TIMER = 4, + MDBA_ROUTER_PATTR_VID = 5, + __MDBA_ROUTER_PATTR_MAX = 6, +}; + +enum { + MDBE_ATTR_UNSPEC = 0, + MDBE_ATTR_SOURCE = 1, + MDBE_ATTR_SRC_LIST = 2, + MDBE_ATTR_GROUP_MODE = 3, + MDBE_ATTR_RTPROT = 4, + MDBE_ATTR_DST = 5, + MDBE_ATTR_DST_PORT = 6, + MDBE_ATTR_VNI = 7, + MDBE_ATTR_IFINDEX = 8, + MDBE_ATTR_SRC_VNI = 9, + MDBE_ATTR_STATE_MASK = 10, + __MDBE_ATTR_MAX = 11, +}; + +enum { + MDBE_SRC_LIST_UNSPEC = 0, + MDBE_SRC_LIST_ENTRY = 1, + __MDBE_SRC_LIST_MAX = 2, +}; + +enum { + MDBE_SRCATTR_UNSPEC = 0, + MDBE_SRCATTR_ADDRESS = 1, + __MDBE_SRCATTR_MAX = 2, +}; + +struct br_mdb_src_entry { + struct br_ip addr; +}; + +struct br_mdb_config { + struct net_bridge *br; + struct net_bridge_port *p; + struct br_mdb_entry *entry; + struct br_ip group; + bool src_entry; + u8 filter_mode; + u16 nlflags; + struct br_mdb_src_entry *src_entries; + int num_src_entries; + u8 rt_protocol; +}; + +enum { + BR_VLFLAG_PER_PORT_STATS = 1, + BR_VLFLAG_ADDED_BY_SWITCHDEV = 2, + BR_VLFLAG_MCAST_ENABLED = 4, + BR_VLFLAG_GLOBAL_MCAST_ENABLED = 8, + BR_VLFLAG_NEIGH_SUPPRESS_ENABLED = 16, +}; + +struct net_bridge_group_src { + struct hlist_node node; + struct br_ip addr; + struct net_bridge_port_group *pg; + u8 flags; + u8 src_query_rexmit_cnt; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct br_mdb_flush_desc { + u32 port_ifindex; + u16 vid; + u8 rt_protocol; + u8 state; + u8 state_mask; +}; + +enum reboot_mode { + REBOOT_UNDEFINED = -1, + REBOOT_COLD = 0, + REBOOT_WARM = 1, + REBOOT_HARD = 2, + REBOOT_SOFT = 3, + REBOOT_GPIO = 4, +}; + +enum error_detector { + ERROR_DETECTOR_KFENCE = 0, + ERROR_DETECTOR_KASAN = 1, + ERROR_DETECTOR_WARN = 2, +}; + +struct warn_args { + const char *fmt; + va_list args; +}; + +enum { + KERNEL_PARAM_OPS_FL_NOARG = 1, +}; + +enum { + KERNEL_PARAM_FL_UNSAFE = 1, + KERNEL_PARAM_FL_HWPARAM = 2, +}; + +struct param_attribute { + struct module_attribute mattr; + const struct kernel_param *param; +}; + +struct module_param_attrs { + unsigned int num; + struct attribute_group grp; + struct param_attribute attrs[0]; +}; + +struct kmalloced_param { + struct list_head list; + char val[0]; +}; + +struct irqchip_fwid { + struct fwnode_handle fwnode; + unsigned int type; + char *name; + phys_addr_t *pa; +}; + +typedef __kernel_long_t __kernel_suseconds_t; + +typedef __kernel_suseconds_t suseconds_t; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct saved_cmdlines_buffer { + unsigned int map_pid_to_cmdline[32769]; + unsigned int *map_cmdline_to_pid; + unsigned int cmdline_num; + int cmdline_idx; + char saved_cmdlines[0]; +}; + +enum { + BPF_F_INDEX_MASK = 4294967295ULL, + BPF_F_CURRENT_CPU = 4294967295ULL, + BPF_F_CTXLEN_MASK = 4503595332403200ULL, +}; + +struct bpf_timer { + __u64 __opaque[2]; +}; + +struct bpf_wq { + __u64 __opaque[2]; +}; + +struct bpf_list_head { + __u64 __opaque[2]; +}; + +struct bpf_list_node { + __u64 __opaque[3]; +}; + +struct bpf_rb_root { + __u64 __opaque[2]; +}; + +struct bpf_rb_node { + __u64 __opaque[4]; +}; + +struct bpf_refcount { + __u32 __opaque[1]; +}; + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +enum { + BPF_F_TIMER_ABS = 1, + BPF_F_TIMER_CPU_PIN = 2, +}; + +enum bpf_kfunc_flags { + BPF_F_PAD_ZEROS = 1, +}; + +struct bpf_rb_node_kern { + struct rb_node rb_node; + void *owner; +}; + +struct bpf_list_node_kern { + struct list_head list_head; + void *owner; +}; + +struct bpf_bprintf_data { + u32 *bin_args; + char *buf; + bool get_bin_args; + bool get_buf; +}; + +typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + +typedef u64 (*btf_bpf_get_smp_processor_id)(); + +typedef u64 (*btf_bpf_get_numa_node_id)(); + +typedef u64 (*btf_bpf_ktime_get_ns)(); + +typedef u64 (*btf_bpf_ktime_get_boot_ns)(); + +typedef u64 (*btf_bpf_ktime_get_coarse_ns)(); + +typedef u64 (*btf_bpf_ktime_get_tai_ns)(); + +typedef u64 (*btf_bpf_get_current_pid_tgid)(); + +typedef u64 (*btf_bpf_get_current_uid_gid)(); + +typedef u64 (*btf_bpf_get_current_comm)(char *, u32); + +typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_jiffies64)(); + +typedef u64 (*btf_bpf_get_current_cgroup_id)(); + +typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); + +typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, s64 *); + +typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, u64 *); + +typedef u64 (*btf_bpf_strncmp)(const char *, u32, const char *); + +typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); + +typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_copy_from_user_task)(void *, u32, const void *, struct task_struct *, u64); + +typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); + +typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); + +struct bpf_bprintf_buffers { + char bin_args[512]; + char buf[1024]; +}; + +typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); + +struct bpf_async_cb { + struct bpf_map *map; + struct bpf_prog *prog; + void *callback_fn; + void *value; + union { + struct callback_head rcu; + struct work_struct delete_work; + }; + u64 flags; +}; + +struct bpf_hrtimer { + struct bpf_async_cb cb; + struct hrtimer timer; + atomic_t cancelling; +}; + +struct bpf_work { + struct bpf_async_cb cb; + struct work_struct work; + struct work_struct delete_work; +}; + +struct bpf_async_kern { + union { + struct bpf_async_cb *cb; + struct bpf_hrtimer *timer; + struct bpf_work *work; + }; + struct bpf_spin_lock lock; +}; + +enum bpf_async_type { + BPF_ASYNC_TYPE_TIMER = 0, + BPF_ASYNC_TYPE_WQ = 1, +}; + +typedef u64 (*btf_bpf_timer_init)(struct bpf_async_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_timer_set_callback)(struct bpf_async_kern *, void *, struct bpf_prog_aux *); + +typedef u64 (*btf_bpf_timer_start)(struct bpf_async_kern *, u64, u64); + +typedef u64 (*btf_bpf_timer_cancel)(struct bpf_async_kern *); + +typedef u64 (*btf_bpf_kptr_xchg)(void *, void *); + +typedef u64 (*btf_bpf_dynptr_from_mem)(void *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_dynptr_read)(void *, u32, const struct bpf_dynptr_kern *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_write)(const struct bpf_dynptr_kern *, u32, void *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_data)(const struct bpf_dynptr_kern *, u32, u32); + +typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); + +struct bpf_throw_ctx { + struct bpf_prog_aux *aux; + u64 sp; + u64 bp; + int cnt; +}; + +struct bpf_iter_bits { + __u64 __opaque[2]; +}; + +struct bpf_iter_bits_kern { + union { + __u64 *bits; + __u64 bits_copy; + }; + int nr_bits; + int bit; +}; + +struct trace_event_raw_oom_score_adj_update { + struct trace_entry ent; + pid_t pid; + char comm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_reclaim_retry_zone { + struct trace_entry ent; + int node; + int zone_idx; + int order; + long unsigned int reclaimable; + long unsigned int available; + long unsigned int min_wmark; + int no_progress_loops; + bool wmark_check; + char __data[0]; +}; + +struct trace_event_raw_mark_victim { + struct trace_entry ent; + int pid; + u32 __data_loc_comm; + long unsigned int total_vm; + long unsigned int anon_rss; + long unsigned int file_rss; + long unsigned int shmem_rss; + uid_t uid; + long unsigned int pgtables; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_wake_reaper { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_start_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_finish_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_skip_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_compact_retry { + struct trace_entry ent; + int order; + int priority; + int result; + int retries; + int max_retries; + bool ret; + char __data[0]; +}; + +struct trace_event_data_offsets_oom_score_adj_update {}; + +struct trace_event_data_offsets_reclaim_retry_zone {}; + +struct trace_event_data_offsets_mark_victim { + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_wake_reaper {}; + +struct trace_event_data_offsets_start_task_reaping {}; + +struct trace_event_data_offsets_finish_task_reaping {}; + +struct trace_event_data_offsets_skip_task_reaping {}; + +struct trace_event_data_offsets_compact_retry {}; + +typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); + +typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool); + +typedef void (*btf_trace_mark_victim)(void *, struct task_struct *, uid_t); + +typedef void (*btf_trace_wake_reaper)(void *, int); + +typedef void (*btf_trace_start_task_reaping)(void *, int); + +typedef void (*btf_trace_finish_task_reaping)(void *, int); + +typedef void (*btf_trace_skip_task_reaping)(void *, int); + +typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); + +enum pageblock_bits { + PB_migrate = 0, + PB_migrate_end = 2, + PB_migrate_skip = 3, + NR_PAGEBLOCK_BITS = 4, +}; + +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type highest_zoneidx; + bool spread_dirty_pages; +}; + +typedef int fpi_t; + +typedef struct { + rwlock_t *lock; +} class_read_lock_t; + +typedef struct { + rwlock_t *lock; +} class_write_lock_t; + +typedef struct rw_semaphore *class_rwsem_read_t; + +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +struct statmount { + __u32 size; + __u32 mnt_opts; + __u64 mask; + __u32 sb_dev_major; + __u32 sb_dev_minor; + __u64 sb_magic; + __u32 sb_flags; + __u32 fs_type; + __u64 mnt_id; + __u64 mnt_parent_id; + __u32 mnt_id_old; + __u32 mnt_parent_id_old; + __u64 mnt_attr; + __u64 mnt_propagation; + __u64 mnt_peer_group; + __u64 mnt_master; + __u64 propagate_from; + __u32 mnt_root; + __u32 mnt_point; + __u64 mnt_ns_id; + __u32 fs_subtype; + __u32 sb_source; + __u32 opt_num; + __u32 opt_array; + __u32 opt_sec_num; + __u32 opt_sec_array; + __u64 __spare2[46]; + char str[0]; +}; + +struct mnt_id_req { + __u32 size; + __u32 spare; + __u64 mnt_id; + __u64 param; + __u64 mnt_ns_id; +}; + +struct proc_mounts { + struct mnt_namespace *ns; + struct path root; + int (*show)(struct seq_file *, struct vfsmount *); +}; + +struct mount_kattr { + unsigned int attr_set; + unsigned int attr_clr; + unsigned int propagation; + unsigned int lookup_flags; + bool recurse; + struct user_namespace *mnt_userns; + struct mnt_idmap *mnt_idmap; +}; + +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; + +enum mnt_tree_flags_t { + MNT_TREE_MOVE = 1, + MNT_TREE_BENEATH = 2, +}; + +struct kstatmount { + struct statmount *buf; + size_t bufsize; + struct vfsmount *mnt; + u64 mask; + struct path root; + struct statmount sm; + struct seq_file seq; +}; + +typedef __kernel_rwf_t rwf_t; + +struct backing_file_ctx { + const struct cred *cred; + void (*accessed)(struct file *); + void (*end_write)(struct kiocb *, ssize_t); +}; + +struct backing_aio { + struct kiocb iocb; + refcount_t ref; + struct kiocb *orig_iocb; + void (*end_write)(struct kiocb *, ssize_t); + struct work_struct work; + long int res; +}; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); + int lsmid; +}; + +struct proc_inode { + struct pid *pid; + unsigned int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + const struct ctl_table *sysctl_entry; + struct hlist_node sibling_inodes; + const struct proc_ns_operations *ns_ops; + struct inode vfs_inode; +}; + +typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); + +struct fd_data { + fmode_t mode; + unsigned int fd; +}; + +struct jbd2_journal_block_tail { + __be32 t_checksum; +}; + +struct trace_event_raw_jbd2_checkpoint { + struct trace_entry ent; + dev_t dev; + int result; + char __data[0]; +}; + +struct trace_event_raw_jbd2_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + char __data[0]; +}; + +struct trace_event_raw_jbd2_end_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + tid_t head; + char __data[0]; +}; + +struct trace_event_raw_jbd2_submit_inode_data { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_start_class { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_extend { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int buffer_credits; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int interval; + int sync; + int requested_blocks; + int dirtied_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_run_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int wait; + long unsigned int request_delay; + long unsigned int running; + long unsigned int locked; + long unsigned int flushing; + long unsigned int logging; + __u32 handle_count; + __u32 blocks; + __u32 blocks_logged; + char __data[0]; +}; + +struct trace_event_raw_jbd2_checkpoint_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int chp_time; + __u32 forced_to_close; + __u32 written; + __u32 dropped; + char __data[0]; +}; + +struct trace_event_raw_jbd2_update_log_tail { + struct trace_entry ent; + dev_t dev; + tid_t tail_sequence; + tid_t first_tid; + long unsigned int block_nr; + long unsigned int freed; + char __data[0]; +}; + +struct trace_event_raw_jbd2_write_superblock { + struct trace_entry ent; + dev_t dev; + blk_opf_t write_flags; + char __data[0]; +}; + +struct trace_event_raw_jbd2_lock_buffer_stall { + struct trace_entry ent; + dev_t dev; + long unsigned int stall_ms; + char __data[0]; +}; + +struct trace_event_raw_jbd2_journal_shrink { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int nr_shrunk; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_checkpoint_list { + struct trace_entry ent; + dev_t dev; + tid_t first_tid; + tid_t tid; + tid_t last_tid; + long unsigned int nr_freed; + tid_t next_tid; + char __data[0]; +}; + +struct trace_event_data_offsets_jbd2_checkpoint {}; + +struct trace_event_data_offsets_jbd2_commit {}; + +struct trace_event_data_offsets_jbd2_end_commit {}; + +struct trace_event_data_offsets_jbd2_submit_inode_data {}; + +struct trace_event_data_offsets_jbd2_handle_start_class {}; + +struct trace_event_data_offsets_jbd2_handle_extend {}; + +struct trace_event_data_offsets_jbd2_handle_stats {}; + +struct trace_event_data_offsets_jbd2_run_stats {}; + +struct trace_event_data_offsets_jbd2_checkpoint_stats {}; + +struct trace_event_data_offsets_jbd2_update_log_tail {}; + +struct trace_event_data_offsets_jbd2_write_superblock {}; + +struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; + +struct trace_event_data_offsets_jbd2_journal_shrink {}; + +struct trace_event_data_offsets_jbd2_shrink_scan_exit {}; + +struct trace_event_data_offsets_jbd2_shrink_checkpoint_list {}; + +typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); + +typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int); + +typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int, int, int); + +typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, tid_t, struct transaction_run_stats_s *); + +typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, tid_t, struct transaction_chp_stats_s *); + +typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, blk_opf_t); + +typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_count)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_enter)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_exit)(void *, journal_t *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_checkpoint_list)(void *, journal_t *, tid_t, tid_t, tid_t, long unsigned int, tid_t); + +struct jbd2_stats_proc_session { + journal_t *journal; + struct transaction_stats_s *stats; + int start; + int max; +}; + +enum btrfs_feature_set { + FEAT_COMPAT = 0, + FEAT_COMPAT_RO = 1, + FEAT_INCOMPAT = 2, + FEAT_MAX = 3, +}; + +struct btrfs_feature_attr { + struct kobj_attribute kobj_attr; + enum btrfs_feature_set feature_set; + u64 feature_bit; +}; + +struct raid_kobject { + u64 flags; + struct kobject kobj; +}; + +struct assoc_array_ops { + long unsigned int (*get_key_chunk)(const void *, int); + long unsigned int (*get_object_key_chunk)(const void *, int); + bool (*compare_object)(const void *, const void *); + int (*diff_objects)(const void *, const void *); + void (*free_object)(void *); +}; + +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[16]; + long unsigned int nr_leaves_on_branch; +}; + +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + long unsigned int index_key[0]; +}; + +struct assoc_array_edit { + struct callback_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[16]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long int adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[17]; +}; + +struct keyring_read_iterator_context { + size_t buflen; + size_t count; + key_serial_t *buffer; +}; + +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *, const u8 *, unsigned int); + int (*encrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*decrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*init)(struct crypto_lskcipher *); + void (*exit)(struct crypto_lskcipher *); + struct skcipher_alg_common co; +}; + +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *); + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + +struct xor_block_template { + struct xor_block_template *next; + const char *name; + int speed; + void (*do_2)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict); + void (*do_3)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_4)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_5)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); +}; + +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void *data; +}; + +struct blkpg_partition { + long long int start; + long long int length; + int pno; + char devname[64]; + char volname[64]; +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +struct pr_keys { + u32 generation; + u32 num_keys; + u64 keys[0]; +}; + +struct pr_held_reservation { + u64 key; + u32 generation; + enum pr_type type; +}; + +struct blk_iou_cmd { + int res; + bool nowait; +}; + +struct io_poll_update { + struct file *file; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int nr_entries; + int error; + bool owning; + __poll_t result_mask; +}; + +enum { + IOU_POLL_DONE = 0, + IOU_POLL_NO_ACTION = 1, + IOU_POLL_REMOVE_POLL_USE_RES = 2, + IOU_POLL_REISSUE = 3, + IOU_POLL_REQUEUE = 4, +}; + +struct node_groups { + unsigned int id; + union { + unsigned int ngroups; + unsigned int ncpus; + }; +}; + +enum { + PCI_REASSIGN_ALL_RSRC = 1, + PCI_REASSIGN_ALL_BUS = 2, + PCI_PROBE_ONLY = 4, + PCI_CAN_SKIP_ISA_ALIGN = 8, + PCI_ENABLE_PROC_DOMAINS = 16, + PCI_COMPAT_DOMAIN_0 = 32, + PCI_SCAN_ALL_PCIE_DEVS = 64, +}; + +struct of_bus; + +struct of_pci_range_parser { + struct device_node *node; + const struct of_bus *bus; + const __be32 *range; + const __be32 *end; + int na; + int ns; + int pna; + bool dma; +}; + +struct of_bus { + const char *name; + const char *addresses; + int (*match)(struct device_node *); + void (*count_cells)(struct device_node *, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int, int); + int (*translate)(__be32 *, u64, int); + int flag_cells; + unsigned int (*get_flags)(const __be32 *); +}; + +struct of_pci_range { + union { + u64 pci_addr; + u64 bus_addr; + }; + u64 cpu_addr; + u64 size; + u32 flags; +}; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +struct component_master_ops { + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct aggregate_device; + +struct component { + struct list_head node; + struct aggregate_device *adev; + bool bound; + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct aggregate_device { + struct list_head node; + bool bound; + const struct component_master_ops *ops; + struct device *parent; + struct component_match *match; +}; + +struct sg_io_v4 { + __s32 guard; + __u32 protocol; + __u32 subprotocol; + __u32 request_len; + __u64 request; + __u64 request_tag; + __u32 request_attr; + __u32 request_priority; + __u32 request_extra; + __u32 max_response_len; + __u64 response; + __u32 dout_iovec_count; + __u32 dout_xfer_len; + __u32 din_iovec_count; + __u32 din_xfer_len; + __u64 dout_xferp; + __u64 din_xferp; + __u32 timeout; + __u32 flags; + __u64 usr_ptr; + __u32 spare_in; + __u32 driver_status; + __u32 transport_status; + __u32 device_status; + __u32 retry_delay; + __u32 info; + __u32 duration; + __u32 response_len; + __s32 din_resid; + __s32 dout_resid; + __u64 generated_tag; + __u32 spare_out; + __u32 padding; +}; + +typedef int bsg_sg_io_fn(struct request_queue *, struct sg_io_v4 *, bool, unsigned int); + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +struct swmii_regs { + u16 bmsr; + u16 lpa; + u16 lpagb; + u16 estat; +}; + +enum { + SWMII_SPEED_10 = 0, + SWMII_SPEED_100 = 1, + SWMII_SPEED_1000 = 2, + SWMII_DUPLEX_HALF = 0, + SWMII_DUPLEX_FULL = 1, +}; + +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +enum led_trigger_netdev_modes { + TRIGGER_NETDEV_LINK = 0, + TRIGGER_NETDEV_LINK_10 = 1, + TRIGGER_NETDEV_LINK_100 = 2, + TRIGGER_NETDEV_LINK_1000 = 3, + TRIGGER_NETDEV_LINK_2500 = 4, + TRIGGER_NETDEV_LINK_5000 = 5, + TRIGGER_NETDEV_LINK_10000 = 6, + TRIGGER_NETDEV_HALF_DUPLEX = 7, + TRIGGER_NETDEV_FULL_DUPLEX = 8, + TRIGGER_NETDEV_TX = 9, + TRIGGER_NETDEV_RX = 10, + TRIGGER_NETDEV_TX_ERR = 11, + TRIGGER_NETDEV_RX_ERR = 12, + __TRIGGER_NETDEV_MAX = 13, +}; + +struct rtl821x_priv { + u16 phycr1; + u16 phycr2; + bool has_phycr2; + struct clk *clk; +}; + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 1, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 2, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 3, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 4, + E1000_INVM_INVALIDATED_STRUCTURE = 15, +}; + +struct ep_device { + struct usb_endpoint_descriptor *desc; + struct usb_device *udev; + struct device dev; +}; + +struct dbc_regs { + __le32 capability; + __le32 doorbell; + __le32 ersts; + __le32 __reserved_0; + __le64 erstba; + __le64 erdp; + __le32 control; + __le32 status; + __le32 portsc; + __le32 __reserved_1; + __le64 dccp; + __le32 devinfo1; + __le32 devinfo2; +}; + +struct dbc_str_descs { + char string0[64]; + char manufacturer[64]; + char product[64]; + char serial[64]; +}; + +enum dbc_state { + DS_DISABLED = 0, + DS_INITIALIZED = 1, + DS_ENABLED = 2, + DS_CONNECTED = 3, + DS_CONFIGURED = 4, + DS_MAX = 5, +}; + +struct xhci_dbc; + +struct dbc_ep { + struct xhci_dbc *dbc; + struct list_head list_pending; + struct xhci_ring *ring; + unsigned int direction: 1; + unsigned int halted: 1; +}; + +struct dbc_driver; + +struct xhci_dbc { + spinlock_t lock; + struct device *dev; + struct xhci_hcd *xhci; + struct dbc_regs *regs; + struct xhci_ring *ring_evt; + struct xhci_ring *ring_in; + struct xhci_ring *ring_out; + struct xhci_erst erst; + struct xhci_container_ctx *ctx; + struct dbc_str_descs *string; + dma_addr_t string_dma; + size_t string_size; + u16 idVendor; + u16 idProduct; + u16 bcdDevice; + u8 bInterfaceProtocol; + enum dbc_state state; + struct delayed_work event_work; + unsigned int poll_interval; + unsigned int resume_required: 1; + struct dbc_ep eps[2]; + const struct dbc_driver *driver; + void *priv; +}; + +struct dbc_driver { + int (*configure)(struct xhci_dbc *); + void (*disconnect)(struct xhci_dbc *); +}; + +struct dbc_request { + void *buf; + unsigned int length; + dma_addr_t dma; + void (*complete)(struct xhci_dbc *, struct dbc_request *); + struct list_head list_pool; + int status; + unsigned int actual; + struct xhci_dbc *dbc; + struct list_head list_pending; + dma_addr_t trb_dma; + union xhci_trb *trb; + unsigned int direction: 1; +}; + +struct trace_event_raw_xhci_log_msg { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctx { + struct trace_entry ent; + int ctx_64; + unsigned int ctx_type; + dma_addr_t ctx_dma; + u8 *ctx_va; + unsigned int ctx_ep_num; + u32 __data_loc_ctx_data; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_trb { + struct trace_entry ent; + dma_addr_t dma; + u32 type; + u32 field0; + u32 field1; + u32 field2; + u32 field3; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_free_virt_dev { + struct trace_entry ent; + void *vdev; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int slot_id; + u16 current_mel; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_virt_dev { + struct trace_entry ent; + void *vdev; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int devnum; + int state; + int speed; + u8 portnum; + u8 level; + int slot_id; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_urb { + struct trace_entry ent; + u32 __data_loc_devname; + void *urb; + unsigned int pipe; + unsigned int stream; + int status; + unsigned int flags; + int num_mapped_sgs; + int num_sgs; + int length; + int actual; + int epnum; + int dir_in; + int type; + int slot_id; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_stream_ctx { + struct trace_entry ent; + unsigned int stream_id; + u64 stream_ring; + dma_addr_t ctx_array_dma; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ep_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u64 deq; + u32 tx_info; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_slot_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u32 tt_info; + u32 state; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctrl_ctx { + struct trace_entry ent; + u32 drop; + u32 add; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ring { + struct trace_entry ent; + u32 type; + void *ring; + dma_addr_t enq; + dma_addr_t deq; + unsigned int num_segs; + unsigned int stream_id; + unsigned int cycle_state; + unsigned int bounce_buf_len; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_portsc { + struct trace_entry ent; + u32 busnum; + u32 portnum; + u32 portsc; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_doorbell { + struct trace_entry ent; + u32 slot; + u32 doorbell; + char __data[0]; +}; + +struct trace_event_raw_xhci_dbc_log_request { + struct trace_entry ent; + struct dbc_request *req; + bool dir; + unsigned int actual; + unsigned int length; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_xhci_log_msg { + u32 msg; + const void *msg_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_ctx { + u32 ctx_data; + const void *ctx_data_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_trb {}; + +struct trace_event_data_offsets_xhci_log_free_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_urb { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_stream_ctx {}; + +struct trace_event_data_offsets_xhci_log_ep_ctx {}; + +struct trace_event_data_offsets_xhci_log_slot_ctx {}; + +struct trace_event_data_offsets_xhci_log_ctrl_ctx {}; + +struct trace_event_data_offsets_xhci_log_ring {}; + +struct trace_event_data_offsets_xhci_log_portsc {}; + +struct trace_event_data_offsets_xhci_log_doorbell {}; + +struct trace_event_data_offsets_xhci_dbc_log_request {}; + +typedef void (*btf_trace_xhci_dbg_address)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_context_change)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_quirks)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_reset_ep)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_cancel_urb)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_init)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_ring_expansion)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_address_ctx)(void *, struct xhci_hcd *, struct xhci_container_ctx *, unsigned int); + +typedef void (*btf_trace_xhci_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_command)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_queue_trb)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_gadget_ep_queue)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_free_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_alloc_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_addressable_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_stop_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_urb_enqueue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_giveback)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_dequeue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_alloc_stream_info_ctx)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_stream)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_stop_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_config_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_add_endpoint)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_alloc_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_free_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_disable_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_discover_or_reset_device)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_setup_device_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_addr_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_address_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_ring_alloc)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_free)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_expansion)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_enq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_deq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_handle_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_get_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_hub_status_data)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_ring_ep_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_ring_host_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_dbc_alloc_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_free_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_queue_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_giveback_request)(void *, struct dbc_request *); + +typedef struct thermal_cooling_device *class_cooling_dev_t; + +struct thermal_instance { + int id; + char name[20]; + struct thermal_cooling_device *cdev; + const struct thermal_trip *trip; + bool initialized; + long unsigned int upper; + long unsigned int lower; + long unsigned int target; + char attr_name[20]; + struct device_attribute attr; + char weight_attr_name[20]; + struct device_attribute weight_attr; + struct list_head trip_node; + struct list_head cdev_node; + unsigned int weight; + bool upper_no_limit; +}; + +struct hid_debug_list { + struct { + union { + struct __kfifo kfifo; + char *type; + const char *const_type; + char (*rectype)[0]; + char *ptr; + const char *ptr_const; + }; + char buf[0]; + } hid_debug_fifo; + struct fasync_struct *fasync; + struct hid_device *hdev; + struct list_head node; + struct mutex read_mutex; +}; + +struct hid_usage_entry { + unsigned int page; + unsigned int usage; + const char *description; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct dmabuf_token { + __u32 token_start; + __u32 token_count; +}; + +struct linger { + int l_onoff; + int l_linger; +}; + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +enum { + SOCK_WAKE_IO = 0, + SOCK_WAKE_WAITD = 1, + SOCK_WAKE_SPACE = 2, + SOCK_WAKE_URG = 3, +}; + +struct so_timestamping { + int flags; + int bind_phc; +}; + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = 1, + SOF_TXTIME_REPORT_ERRORS = 2, + SOF_TXTIME_FLAGS_LAST = 2, + SOF_TXTIME_FLAGS_MASK = 3, +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +struct sock_skb_cb { + u32 dropcount; +}; + +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; +}; + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats: 1; + u8 unused: 7; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC = 0, + SK_MEMINFO_RCVBUF = 1, + SK_MEMINFO_WMEM_ALLOC = 2, + SK_MEMINFO_SNDBUF = 3, + SK_MEMINFO_FWD_ALLOC = 4, + SK_MEMINFO_WMEM_QUEUED = 5, + SK_MEMINFO_OPTMEM = 6, + SK_MEMINFO_BACKLOG = 7, + SK_MEMINFO_DROPS = 8, + SK_MEMINFO_VARS = 9, +}; + +enum sknetlink_groups { + SKNLGRP_NONE = 0, + SKNLGRP_INET_TCP_DESTROY = 1, + SKNLGRP_INET_UDP_DESTROY = 2, + SKNLGRP_INET6_TCP_DESTROY = 3, + SKNLGRP_INET6_UDP_DESTROY = 4, + __SKNLGRP_MAX = 5, +}; + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + short unsigned int overhead; + short int cell_align; + short unsigned int mpu; + __u32 rate; +}; + +struct tc_prio_qopt { + int bands; + __u8 priomap[16]; +}; + +struct skb_array { + struct ptr_ring ring; +}; + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u16 mpu; + u8 linklayer; + u8 shift; +}; + +struct psched_pktrate { + u64 rate_pkts_ps; + u32 mult; + u8 shift; +}; + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc **p_miniq; +}; + +struct pfifo_fast_priv { + struct skb_array q[3]; +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +struct nf_ct_tcp_flags { + __u8 flags; + __u8 mask; +}; + +enum nf_ct_tcp_action { + NFCT_TCP_IGNORE = 0, + NFCT_TCP_INVALID = 1, + NFCT_TCP_ACCEPT = 2, +}; + +enum tcp_bit_set { + TCP_SYN_SET = 0, + TCP_SYNACK_SET = 1, + TCP_FIN_SET = 2, + TCP_ACK_SET = 3, + TCP_RST_SET = 4, + TCP_NONE_SET = 5, +}; + +enum ctattr_protoinfo_tcp { + CTA_PROTOINFO_TCP_UNSPEC = 0, + CTA_PROTOINFO_TCP_STATE = 1, + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2, + CTA_PROTOINFO_TCP_WSCALE_REPLY = 3, + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4, + CTA_PROTOINFO_TCP_FLAGS_REPLY = 5, + __CTA_PROTOINFO_TCP_MAX = 6, +}; + +struct nft_rhash { + struct rhashtable ht; + struct delayed_work gc_work; +}; + +struct nft_rhash_elem { + struct nft_elem_priv priv; + struct rhash_head node; + struct nft_set_ext ext; +}; + +struct nft_rhash_cmp_arg { + const struct nft_set *set; + const u32 *key; + u8 genmask; + u64 tstamp; +}; + +struct nft_rhash_ctx { + const struct nft_ctx ctx; + const struct nft_set *set; +}; + +struct nft_hash { + u32 seed; + u32 buckets; + struct hlist_head table[0]; +}; + +struct nft_hash_elem { + struct nft_elem_priv priv; + struct hlist_node node; + struct nft_set_ext ext; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + union { + __be32 imsf_slist[1]; + struct { + struct {} __empty_imsf_slist_flex; + __be32 imsf_slist_flex[0]; + }; + }; +}; + +struct igmphdr { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; +}; + +struct igmp_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *in_dev; +}; + +struct igmp_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *idev; + struct ip_mc_list *im; +}; + +struct ac6_iter_state { + struct seq_net_private p; + struct net_device *dev; +}; + +enum { + IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC = 0, + IFLA_BRIDGE_VLAN_TUNNEL_ID = 1, + IFLA_BRIDGE_VLAN_TUNNEL_VID = 2, + IFLA_BRIDGE_VLAN_TUNNEL_FLAGS = 3, + __IFLA_BRIDGE_VLAN_TUNNEL_MAX = 4, +}; + +struct vtunnel_info { + u32 tunid; + u16 vid; + u16 flags; +}; + +enum arch_timer_erratum_match_type { + ate_match_dt = 0, + ate_match_local_cap_id = 1, + ate_match_acpi_oem_info = 2, +}; + +struct arch_timer_erratum_workaround { + enum arch_timer_erratum_match_type match_type; + const void *id; + const char *desc; + u64 (*read_cntpct_el0)(); + u64 (*read_cntvct_el0)(); + int (*set_next_event_phys)(long unsigned int, struct clock_event_device *); + int (*set_next_event_virt)(long unsigned int, struct clock_event_device *); + bool disable_compat_vdso; +}; + +struct wchan_info { + long unsigned int pc; + int count; +}; + +struct irq_bypass_consumer; + +struct irq_bypass_producer { + struct list_head node; + void *token; + int irq; + int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); + void (*del_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); + void (*stop)(struct irq_bypass_producer *); + void (*start)(struct irq_bypass_producer *); +}; + +struct irq_bypass_consumer { + struct list_head node; + void *token; + int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); + void (*del_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); + void (*stop)(struct irq_bypass_consumer *); + void (*start)(struct irq_bypass_consumer *); +}; + +enum { + kvm_ioeventfd_flag_nr_datamatch = 0, + kvm_ioeventfd_flag_nr_pio = 1, + kvm_ioeventfd_flag_nr_deassign = 2, + kvm_ioeventfd_flag_nr_virtio_ccw_notify = 3, + kvm_ioeventfd_flag_nr_fast_mmio = 4, + kvm_ioeventfd_flag_nr_max = 5, +}; + +struct kvm_ioeventfd { + __u64 datamatch; + __u64 addr; + __u32 len; + __s32 fd; + __u32 flags; + __u8 pad[36]; +}; + +struct kvm_irqfd { + __u32 fd; + __u32 gsi; + __u32 flags; + __u32 resamplefd; + __u8 pad[16]; +}; + +struct kvm_irq_ack_notifier { + struct hlist_node link; + unsigned int gsi; + void (*irq_acked)(struct kvm_irq_ack_notifier *); +}; + +struct kvm_kernel_irqfd_resampler { + struct kvm *kvm; + struct list_head list; + struct kvm_irq_ack_notifier notifier; + struct list_head link; +}; + +struct kvm_kernel_irqfd { + struct kvm *kvm; + wait_queue_entry_t wait; + struct kvm_kernel_irq_routing_entry irq_entry; + seqcount_spinlock_t irq_entry_sc; + int gsi; + struct work_struct inject; + struct kvm_kernel_irqfd_resampler *resampler; + struct eventfd_ctx *resamplefd; + struct list_head resampler_link; + struct eventfd_ctx *eventfd; + struct list_head list; + poll_table pt; + struct work_struct shutdown; + struct irq_bypass_consumer consumer; + struct irq_bypass_producer *producer; +}; + +struct _ioeventfd { + struct list_head list; + u64 addr; + int length; + struct eventfd_ctx *eventfd; + u64 datamatch; + struct kvm_io_device dev; + u8 bus_idx; + bool wildcard; +}; + +typedef struct { + rwlock_t *lock; + long unsigned int flags; +} class_write_lock_irqsave_t; + +enum trans_regime { + TR_EL10 = 0, + TR_EL20 = 1, + TR_EL2 = 2, +}; + +struct s1_walk_info { + u64 baddr; + enum trans_regime regime; + unsigned int max_oa_bits; + unsigned int pgshift; + unsigned int txsz; + int sl; + bool hpd; + bool e0poe; + bool poe; + bool pan; + bool be; + bool s2; +}; + +struct s1_walk_result { + union { + struct { + u64 desc; + u64 pa; + s8 level; + u8 APTable; + bool UXNTable; + bool PXNTable; + bool uwxn; + bool uov; + bool ur; + bool uw; + bool ux; + bool pwxn; + bool pov; + bool pr; + bool pw; + bool px; + }; + struct { + u8 fst; + bool ptw; + bool s2; + }; + }; + bool failed; +}; + +struct mmu_config { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 tcr2; + u64 pir; + u64 pire0; + u64 por_el0; + u64 por_el1; + u64 sctlr; + u64 vttbr; + u64 vtcr; + u64 hcr; +}; + +struct ffa_mem_region_addr_range { + u64 address; + u32 pg_cnt; + u32 reserved; +}; + +struct ffa_composite_mem_region { + u32 total_pg_cnt; + u32 addr_range_cnt; + u64 reserved; + struct ffa_mem_region_addr_range constituents[0]; +}; + +struct ffa_mem_region_attributes { + u16 receiver; + u8 attrs; + u8 flag; + u32 composite_off; + u64 reserved; +}; + +struct ffa_mem_region { + u16 sender_id; + u16 attributes; + u32 flags; + u64 handle; + u64 tag; + u32 ep_mem_size; + u32 ep_count; + u32 ep_mem_offset; + u32 reserved[3]; +}; + +struct psci_0_1_function_ids { + u32 cpu_suspend; + u32 cpu_on; + u32 cpu_off; + u32 migrate; +}; + +struct kvm_host_psci_config { + u32 version; + u32 smccc_version; + struct psci_0_1_function_ids function_ids_0_1; + bool psci_0_1_cpu_suspend_implemented; + bool psci_0_1_cpu_on_implemented; + bool psci_0_1_cpu_off_implemented; + bool psci_0_1_migrate_implemented; +}; + +struct kvm_ffa_descriptor_buffer { + void *buf; + size_t len; +}; + +struct kvm_ffa_buffers { + hyp_spinlock_t lock; + void *tx; + void *rx; +}; + +struct suspend_stats { + unsigned int step_failures[8]; + unsigned int success; + unsigned int fail; + int last_failed_dev; + char failed_devs[80]; + int last_failed_errno; + int errno[2]; + int last_failed_step; + u64 last_hw_sleep; + u64 total_hw_sleep; + u64 max_hw_sleep; + enum suspend_stat_step failed_steps[2]; +}; + +typedef bool (*smp_cond_func_t)(int, void *); + +typedef struct { + unsigned int __softirq_pending; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +} irq_cpustat_t; + +struct trace_event_raw_csd_queue_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_raw_csd_function { + struct trace_entry ent; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_data_offsets_csd_queue_cpu {}; + +struct trace_event_data_offsets_csd_function {}; + +typedef void (*btf_trace_csd_queue_cpu)(void *, const unsigned int, long unsigned int, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_entry)(void *, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_exit)(void *, smp_call_func_t, call_single_data_t *); + +struct call_function_data { + call_single_data_t *csd; + cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; +}; + +struct smp_call_on_cpu_struct { + struct work_struct work; + struct completion done; + int (*func)(void *); + void *data; + int ret; + int cpu; +}; + +struct filter_pred; + +struct prog_entry { + int target; + int when_to_branch; + struct filter_pred *pred; +}; + +struct regex; + +typedef int (*regex_match_func)(char *, struct regex *, int); + +struct regex { + char pattern[256]; + int len; + int field_len; + regex_match_func match; +}; + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY = 1, + MATCH_MIDDLE_ONLY = 2, + MATCH_END_ONLY = 3, + MATCH_GLOB = 4, + MATCH_INDEX = 5, +}; + +enum filter_op_ids { + OP_GLOB = 0, + OP_NE = 1, + OP_EQ = 2, + OP_LE = 3, + OP_LT = 4, + OP_GE = 5, + OP_GT = 6, + OP_BAND = 7, + OP_MAX = 8, +}; + +enum filter_pred_fn { + FILTER_PRED_FN_NOP = 0, + FILTER_PRED_FN_64 = 1, + FILTER_PRED_FN_64_CPUMASK = 2, + FILTER_PRED_FN_S64 = 3, + FILTER_PRED_FN_U64 = 4, + FILTER_PRED_FN_32 = 5, + FILTER_PRED_FN_32_CPUMASK = 6, + FILTER_PRED_FN_S32 = 7, + FILTER_PRED_FN_U32 = 8, + FILTER_PRED_FN_16 = 9, + FILTER_PRED_FN_16_CPUMASK = 10, + FILTER_PRED_FN_S16 = 11, + FILTER_PRED_FN_U16 = 12, + FILTER_PRED_FN_8 = 13, + FILTER_PRED_FN_8_CPUMASK = 14, + FILTER_PRED_FN_S8 = 15, + FILTER_PRED_FN_U8 = 16, + FILTER_PRED_FN_COMM = 17, + FILTER_PRED_FN_STRING = 18, + FILTER_PRED_FN_STRLOC = 19, + FILTER_PRED_FN_STRRELLOC = 20, + FILTER_PRED_FN_PCHAR_USER = 21, + FILTER_PRED_FN_PCHAR = 22, + FILTER_PRED_FN_CPU = 23, + FILTER_PRED_FN_CPU_CPUMASK = 24, + FILTER_PRED_FN_CPUMASK = 25, + FILTER_PRED_FN_CPUMASK_CPU = 26, + FILTER_PRED_FN_FUNCTION = 27, + FILTER_PRED_FN_ = 28, + FILTER_PRED_TEST_VISITED = 29, +}; + +struct filter_pred { + struct regex *regex; + struct cpumask *mask; + short unsigned int *ops; + struct ftrace_event_field *field; + u64 val; + u64 val2; + enum filter_pred_fn fn_num; + int offset; + int not; + int op; +}; + +enum { + FILT_ERR_NONE = 0, + FILT_ERR_INVALID_OP = 1, + FILT_ERR_TOO_MANY_OPEN = 2, + FILT_ERR_TOO_MANY_CLOSE = 3, + FILT_ERR_MISSING_QUOTE = 4, + FILT_ERR_MISSING_BRACE_OPEN = 5, + FILT_ERR_MISSING_BRACE_CLOSE = 6, + FILT_ERR_OPERAND_TOO_LONG = 7, + FILT_ERR_EXPECT_STRING = 8, + FILT_ERR_EXPECT_DIGIT = 9, + FILT_ERR_ILLEGAL_FIELD_OP = 10, + FILT_ERR_FIELD_NOT_FOUND = 11, + FILT_ERR_ILLEGAL_INTVAL = 12, + FILT_ERR_BAD_SUBSYS_FILTER = 13, + FILT_ERR_TOO_MANY_PREDS = 14, + FILT_ERR_INVALID_FILTER = 15, + FILT_ERR_INVALID_CPULIST = 16, + FILT_ERR_IP_FIELD_ONLY = 17, + FILT_ERR_INVALID_VALUE = 18, + FILT_ERR_NO_FUNCTION = 19, + FILT_ERR_ERRNO = 20, + FILT_ERR_NO_FILTER = 21, +}; + +struct filter_parse_error { + int lasterr; + int lasterr_pos; +}; + +typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); + +enum { + INVERT = 1, + PROCESS_AND = 2, + PROCESS_OR = 4, +}; + +struct ustring_buffer { + char buffer[1024]; +}; + +enum { + TOO_MANY_CLOSE = -1, + TOO_MANY_OPEN = -2, + MISSING_QUOTE = -3, +}; + +struct filter_list { + struct list_head list; + struct event_filter *filter; +}; + +struct function_filter_data { + struct ftrace_ops *ops; + int first_filter; + int first_notrace; +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *, int, int); + int (*finalize)(struct bpf_verifier_env *); + int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); + int (*remove_insns)(struct bpf_verifier_env *, u32, u32); + int (*prepare)(struct bpf_prog *); + int (*translate)(struct bpf_prog *); + void (*destroy)(struct bpf_prog *); +}; + +struct bpf_offload_dev { + const struct bpf_prog_offload_ops *ops; + struct list_head netdevs; + void *priv; +}; + +enum xdp_rx_metadata { + XDP_METADATA_KFUNC_RX_TIMESTAMP = 0, + XDP_METADATA_KFUNC_RX_HASH = 1, + XDP_METADATA_KFUNC_RX_VLAN_TAG = 2, + MAX_XDP_METADATA_KFUNC = 3, +}; + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + struct list_head progs; + struct list_head maps; + struct list_head offdev_netdevs; +}; + +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +struct ns_get_path_bpf_map_args { + struct bpf_offloaded_map *offmap; + struct bpf_map_info *info; +}; + +struct page_frag_cache { + long unsigned int encoded_page; + __u32 offset; + __u32 pagecnt_bias; +}; + +typedef unsigned int isolate_mode_t; + +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *, struct page *, enum migrate_mode); + void (*putback_page)(struct page *); +}; + +struct page_vma_mapped_walk { + long unsigned int pfn; + long unsigned int nr_pages; + long unsigned int pgoff; + struct vm_area_struct *vma; + long unsigned int address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +enum rmp_flags { + RMP_LOCKED = 1, + RMP_USE_SHARED_ZEROPAGE = 2, +}; + +struct rmap_walk_control { + void *arg; + bool try_lock; + bool contended; + bool (*rmap_one)(struct folio *, struct vm_area_struct *, long unsigned int, void *); + int (*done)(struct folio *); + struct anon_vma * (*anon_lock)(const struct folio *, struct rmap_walk_control *); + bool (*invalid_vma)(struct vm_area_struct *, void *); +}; + +struct rmap_walk_arg { + struct folio *folio; + bool map_unused_to_zeropage; +}; + +enum { + PAGE_WAS_MAPPED = 1, + PAGE_WAS_MLOCKED = 2, + PAGE_OLD_STATES = 3, +}; + +struct migrate_pages_stats { + int nr_succeeded; + int nr_failed_pages; + int nr_thp_succeeded; + int nr_thp_failed; + int nr_thp_split; + int nr_split; +}; + +struct hugepage_subpool; + +enum legacy_fs_param { + LEGACY_FS_UNSET_PARAMS = 0, + LEGACY_FS_MONOLITHIC_PARAMS = 1, + LEGACY_FS_INDIVIDUAL_PARAMS = 2, +}; + +struct legacy_fs_context { + char *legacy_data; + size_t data_size; + enum legacy_fs_param param_type; +}; + +struct posix_acl_xattr_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +struct match_token { + int token; + const char *pattern; +}; + +enum { + MAX_OPT_ARGS = 3, +}; + +struct pts_mount_opts { + int setuid; + int setgid; + kuid_t uid; + kgid_t gid; + umode_t mode; + umode_t ptmxmode; + int reserve; + int max; +}; + +enum { + Opt_uid___2 = 0, + Opt_gid___2 = 1, + Opt_mode___3 = 2, + Opt_ptmxmode = 3, + Opt_newinstance = 4, + Opt_max = 5, + Opt_err___3 = 6, +}; + +struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; + struct super_block *sb; + struct dentry *ptmx_dentry; +}; + +struct btrfs_extent_data_ref { + __le64 root; + __le64 objectid; + __le64 offset; + __le32 count; +} __attribute__((packed)); + +struct btrfs_shared_data_ref { + __le32 count; +}; + +struct btrfs_extent_owner_ref { + __le64 root_id; +}; + +enum btrfs_chunk_alloc_enum { + CHUNK_ALLOC_NO_FORCE = 0, + CHUNK_ALLOC_LIMITED = 1, + CHUNK_ALLOC_FORCE = 2, + CHUNK_ALLOC_FORCE_FOR_EXTENT = 3, +}; + +enum btrfs_caching_type { + BTRFS_CACHE_NO = 0, + BTRFS_CACHE_STARTED = 1, + BTRFS_CACHE_FINISHED = 2, + BTRFS_CACHE_ERROR = 3, +}; + +struct btrfs_squota_delta { + u64 root; + u64 num_bytes; + u64 generation; + bool is_inc; + bool is_data; +}; + +enum btrfs_loop_type { + LOOP_CACHING_NOWAIT = 0, + LOOP_CACHING_WAIT = 1, + LOOP_UNSET_SIZE_CLASS = 2, + LOOP_ALLOC_CHUNK = 3, + LOOP_WRONG_SIZE_CLASS = 4, + LOOP_NO_EMPTY_SIZE = 5, +}; + +struct walk_control { + u64 refs[8]; + u64 flags[8]; + struct btrfs_key update_progress; + struct btrfs_key drop_progress; + int drop_level; + int stage; + int level; + int shared_level; + int update_ref; + int keep_locks; + int reada_slot; + int reada_count; + int restarted; + int lookup_info; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +struct crypto_sig { + struct crypto_tfm base; +}; + +struct sig_alg { + int (*sign)(struct crypto_sig *, const void *, unsigned int, void *, unsigned int); + int (*verify)(struct crypto_sig *, const void *, unsigned int, const void *, unsigned int); + int (*set_pub_key)(struct crypto_sig *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_sig *, const void *, unsigned int); + unsigned int (*key_size)(struct crypto_sig *); + unsigned int (*digest_size)(struct crypto_sig *); + unsigned int (*max_size)(struct crypto_sig *); + int (*init)(struct crypto_sig *); + void (*exit)(struct crypto_sig *); + struct crypto_alg base; +}; + +typedef struct { + __u8 b[16]; +} guid_t; + +struct parsed_partitions { + struct gendisk *disk; + char name[32]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +typedef struct { + struct folio *v; +} Sector; + +typedef guid_t efi_guid_t; + +struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; +} __attribute__((packed)); + +typedef struct _gpt_header gpt_header; + +struct _gpt_entry_attributes { + u64 required_to_function: 1; + u64 reserved: 47; + u64 type_guid_specific: 16; +}; + +typedef struct _gpt_entry_attributes gpt_entry_attributes; + +struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + __le16 partition_name[36]; +}; + +typedef struct _gpt_entry gpt_entry; + +struct _gpt_mbr_record { + u8 boot_indicator; + u8 start_head; + u8 start_sector; + u8 start_track; + u8 os_type; + u8 end_head; + u8 end_sector; + u8 end_track; + __le32 starting_lba; + __le32 size_in_lba; +}; + +typedef struct _gpt_mbr_record gpt_mbr_record; + +struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __attribute__((packed)); + +typedef struct _legacy_mbr legacy_mbr; + +struct io_nop { + struct file *file; + int result; + int fd; + int buffer; + unsigned int flags; +}; + +typedef unsigned int uInt; + +typedef unsigned char uch; + +typedef short unsigned int ush; + +typedef long unsigned int ulg; + +struct ct_data_s { + union { + ush freq; + ush code; + } fc; + union { + ush dad; + ush len; + } dl; +}; + +typedef struct ct_data_s ct_data; + +struct static_tree_desc_s { + const ct_data *static_tree; + const int *extra_bits; + int extra_base; + int elems; + int max_length; +}; + +typedef struct static_tree_desc_s static_tree_desc; + +struct tree_desc_s { + ct_data *dyn_tree; + int max_code; + static_tree_desc *stat_desc; +}; + +typedef ush Pos; + +typedef unsigned int IPos; + +struct deflate_state { + z_streamp strm; + int status; + Byte *pending_buf; + ulg pending_buf_size; + Byte *pending_out; + int pending; + int noheader; + Byte data_type; + Byte method; + int last_flush; + uInt w_size; + uInt w_bits; + uInt w_mask; + Byte *window; + ulg window_size; + Pos *prev; + Pos *head; + uInt ins_h; + uInt hash_size; + uInt hash_bits; + uInt hash_mask; + uInt hash_shift; + long int block_start; + uInt match_length; + IPos prev_match; + int match_available; + uInt strstart; + uInt match_start; + uInt lookahead; + uInt prev_length; + uInt max_chain_length; + uInt max_lazy_match; + int level; + int strategy; + uInt good_match; + int nice_match; + struct ct_data_s dyn_ltree[573]; + struct ct_data_s dyn_dtree[61]; + struct ct_data_s bl_tree[39]; + struct tree_desc_s l_desc; + struct tree_desc_s d_desc; + struct tree_desc_s bl_desc; + ush bl_count[16]; + int heap[573]; + int heap_len; + int heap_max; + uch depth[573]; + uch *l_buf; + uInt lit_bufsize; + uInt last_lit; + ush *d_buf; + ulg opt_len; + ulg static_len; + ulg compressed_len; + uInt matches; + int last_eob_len; + ush bi_buf; + int bi_valid; +}; + +typedef struct deflate_state deflate_state; + +typedef enum { + need_more = 0, + block_done = 1, + finish_started = 2, + finish_done = 3, +} block_state; + +typedef block_state (*compress_func)(deflate_state *, int); + +struct deflate_workspace { + deflate_state deflate_memory; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; +}; + +typedef struct deflate_workspace deflate_workspace; + +struct config_s { + ush good_length; + ush max_lazy; + ush nice_length; + ush max_chain; + compress_func func; +}; + +typedef struct config_s config; + +typedef struct { + ptrdiff_t value; + const void *stateTable; + const void *symbolTT; + unsigned int stateLog; +} FSE_CState_t; + +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; + +typedef U32 (*ZSTD_getAllMatchesFn)(ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, const BYTE *, const U32 *, const U32, const U32); + +typedef struct { + rawSeqStore_t seqStore; + U32 startPosInBlock; + U32 endPosInBlock; + U32 offset; +} ZSTD_optLdm_t; + +enum { + LOGIC_PIO_INDIRECT = 0, + LOGIC_PIO_CPU_MMIO = 1, +}; + +struct logic_pio_host_ops; + +struct logic_pio_hwaddr { + struct list_head list; + const struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + long unsigned int flags; + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *, long unsigned int, size_t); + void (*out)(void *, long unsigned int, u32, size_t); + u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); + void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); +}; + +struct hotplug_slot_ops; + +struct hotplug_slot { + const struct hotplug_slot_ops *ops; + struct list_head slot_list; + struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; +}; + +enum pcie_reset_state { + pcie_deassert_reset = 1, + pcie_warm_reset = 2, + pcie_hot_reset = 3, +}; + +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0, + PCIE_LNK_X1 = 1, + PCIE_LNK_X2 = 2, + PCIE_LNK_X4 = 4, + PCIE_LNK_X8 = 8, + PCIE_LNK_X12 = 12, + PCIE_LNK_X16 = 16, + PCIE_LNK_X32 = 32, + PCIE_LNK_WIDTH_UNKNOWN = 255, +}; + +typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); + +struct hotplug_slot_ops { + int (*enable_slot)(struct hotplug_slot *); + int (*disable_slot)(struct hotplug_slot *); + int (*set_attention_status)(struct hotplug_slot *, u8); + int (*hardware_test)(struct hotplug_slot *, u32); + int (*get_power_status)(struct hotplug_slot *, u8 *); + int (*get_attention_status)(struct hotplug_slot *, u8 *); + int (*get_latch_status)(struct hotplug_slot *, u8 *); + int (*get_adapter_status)(struct hotplug_slot *, u8 *); + int (*reset_slot)(struct hotplug_slot *, bool); +}; + +struct pcie_tlp_log { + u32 dw[4]; +}; + +struct pci_reset_fn_method { + int (*reset_fn)(struct pci_dev *, bool); + char *name; +}; + +struct pci_pme_device { + struct list_head list; + struct pci_dev *dev; +}; + +struct pci_acs { + u16 cap; + u16 ctrl; + u16 fw_ctrl; +}; + +struct pci_saved_state { + u32 config_space[16]; + struct pci_cap_saved_data cap[0]; +}; + +struct u32_fract { + __u32 numerator; + __u32 denominator; +}; + +struct clk_fractional_divider { + struct clk_hw hw; + void *reg; + u8 mshift; + u8 mwidth; + u8 nshift; + u8 nwidth; + u8 flags; + void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *); + spinlock_t *lock; +}; + +typedef int (*pm_callback_t)(struct device *); + +enum scsi_devinfo_key { + SCSI_DEVINFO_GLOBAL = 0, + SCSI_DEVINFO_SPI = 1, +}; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, struct device *); + int (*configure)(struct transport_container *, struct device *, struct device *); + int (*remove)(struct transport_container *, struct device *, struct device *); +}; + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +struct spi_transport_attrs { + int period; + int min_period; + int offset; + int max_offset; + unsigned int width: 1; + unsigned int max_width: 1; + unsigned int iu: 1; + unsigned int max_iu: 1; + unsigned int dt: 1; + unsigned int qas: 1; + unsigned int max_qas: 1; + unsigned int wr_flow: 1; + unsigned int rd_strm: 1; + unsigned int rti: 1; + unsigned int pcomp_en: 1; + unsigned int hold_mcs: 1; + unsigned int initial_dv: 1; + long unsigned int flags; + unsigned int support_sync: 1; + unsigned int support_wide: 1; + unsigned int support_dt: 1; + unsigned int support_dt_only; + unsigned int support_ius; + unsigned int support_qas; + unsigned int dv_pending: 1; + unsigned int dv_in_progress: 1; + struct mutex dv_mutex; +}; + +enum spi_signal_type { + SPI_SIGNAL_UNKNOWN = 1, + SPI_SIGNAL_SE = 2, + SPI_SIGNAL_LVD = 3, + SPI_SIGNAL_HVD = 4, +}; + +struct spi_host_attrs { + enum spi_signal_type signalling; +}; + +struct spi_function_template { + void (*get_period)(struct scsi_target *); + void (*set_period)(struct scsi_target *, int); + void (*get_offset)(struct scsi_target *); + void (*set_offset)(struct scsi_target *, int); + void (*get_width)(struct scsi_target *); + void (*set_width)(struct scsi_target *, int); + void (*get_iu)(struct scsi_target *); + void (*set_iu)(struct scsi_target *, int); + void (*get_dt)(struct scsi_target *); + void (*set_dt)(struct scsi_target *, int); + void (*get_qas)(struct scsi_target *); + void (*set_qas)(struct scsi_target *, int); + void (*get_wr_flow)(struct scsi_target *); + void (*set_wr_flow)(struct scsi_target *, int); + void (*get_rd_strm)(struct scsi_target *); + void (*set_rd_strm)(struct scsi_target *, int); + void (*get_rti)(struct scsi_target *); + void (*set_rti)(struct scsi_target *, int); + void (*get_pcomp_en)(struct scsi_target *); + void (*set_pcomp_en)(struct scsi_target *, int); + void (*get_hold_mcs)(struct scsi_target *); + void (*set_hold_mcs)(struct scsi_target *, int); + void (*get_signalling)(struct Scsi_Host *); + void (*set_signalling)(struct Scsi_Host *, enum spi_signal_type); + int (*deny_binding)(struct scsi_target *); + long unsigned int show_period: 1; + long unsigned int show_offset: 1; + long unsigned int show_width: 1; + long unsigned int show_iu: 1; + long unsigned int show_dt: 1; + long unsigned int show_qas: 1; + long unsigned int show_wr_flow: 1; + long unsigned int show_rd_strm: 1; + long unsigned int show_rti: 1; + long unsigned int show_pcomp_en: 1; + long unsigned int show_hold_mcs: 1; +}; + +enum { + SPI_BLIST_NOIUS = 1, +}; + +struct spi_internal { + struct scsi_transport_template t; + struct spi_function_template *f; +}; + +enum spi_compare_returns { + SPI_COMPARE_SUCCESS = 0, + SPI_COMPARE_FAILURE = 1, + SPI_COMPARE_SKIP_TEST = 2, +}; + +struct work_queue_wrapper { + struct work_struct work; + struct scsi_device *sdev; +}; + +enum e1000_state_t___2 { + __IGB_TESTING = 0, + __IGB_RESETTING = 1, + __IGB_DOWN = 2, + __IGB_PTP_TX_IN_PROGRESS = 3, +}; + +struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); +}; + +typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); + +enum xmp_state { + STATE_INACTIVE___5 = 0, + STATE_LEADER_PULSE = 1, + STATE_NIBBLE_SPACE = 2, +}; + +struct soc_device_attribute { + const char *machine; + const char *family; + const char *revision; + const char *serial_number; + const char *soc_id; + const void *data; + const struct attribute_group *custom_attr_group; +}; + +struct of_endpoint { + unsigned int port; + unsigned int id; + const struct device_node *local_node; +}; + +struct supplier_bindings { + struct device_node * (*parse_prop)(struct device_node *, const char *, int); + struct device_node * (*get_con_dev)(struct device_node *); + bool optional; + u8 fwlink_flags; +}; + +struct sk_buff_fclones { + struct sk_buff skb1; + struct sk_buff skb2; + refcount_t fclone_ref; +}; + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; + __u32 frag_off; +}; + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; + __be16 reserved; + __be32 spi; + __be32 seq_no; + __u8 auth_data[0]; +}; + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +enum skb_drop_reason_subsys { + SKB_DROP_REASON_SUBSYS_CORE = 0, + SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE = 1, + SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR = 2, + SKB_DROP_REASON_SUBSYS_OPENVSWITCH = 3, + SKB_DROP_REASON_SUBSYS_NUM = 4, +}; + +struct drop_reason_list { + const char * const *reasons; + size_t n_reasons; +}; + +struct ts_state { + unsigned int offset; + char cb[48]; +}; + +struct ts_config; + +struct ts_ops { + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +struct ts_config { + struct ts_ops *ops; + int flags; + unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); + void (*finish)(struct ts_config *, struct ts_state *); +}; + +struct page_frag_1k { + void *va; + u16 offset; + bool pfmemalloc; +}; + +struct napi_alloc_cache { + local_lock_t bh_lock; + struct page_frag_cache page; + struct page_frag_1k page_small; + unsigned int skb_count; + void *skb_cache[64]; +}; + +struct skb_free_array { + unsigned int skb_count; + void *skb_array[16]; +}; + +typedef int (*sendmsg_func)(struct sock *, struct msghdr *); + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC = 0, + ETHTOOL_A_BITSET_BIT_INDEX = 1, + ETHTOOL_A_BITSET_BIT_NAME = 2, + ETHTOOL_A_BITSET_BIT_VALUE = 3, + __ETHTOOL_A_BITSET_BIT_CNT = 4, + ETHTOOL_A_BITSET_BIT_MAX = 3, +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC = 0, + ETHTOOL_A_BITSET_BITS_BIT = 1, + __ETHTOOL_A_BITSET_BITS_CNT = 2, + ETHTOOL_A_BITSET_BITS_MAX = 1, +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC = 0, + ETHTOOL_A_BITSET_NOMASK = 1, + ETHTOOL_A_BITSET_SIZE = 2, + ETHTOOL_A_BITSET_BITS = 3, + ETHTOOL_A_BITSET_VALUE = 4, + ETHTOOL_A_BITSET_MASK = 5, + __ETHTOOL_A_BITSET_CNT = 6, + ETHTOOL_A_BITSET_MAX = 5, +}; + +struct nf_sockopt_ops { + struct list_head list; + u_int8_t pf; + int set_optmin; + int set_optmax; + int (*set)(struct sock *, int, sockptr_t, unsigned int); + int get_optmin; + int get_optmax; + int (*get)(struct sock *, int, void *, int *); + struct module *owner; +}; + +enum { + NFT_PKTINFO_L4PROTO = 1, + NFT_PKTINFO_INNER = 2, + NFT_PKTINFO_INNER_FULL = 4, +}; + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER = 0, + AUDIT_XT_OP_REPLACE = 1, + AUDIT_XT_OP_UNREGISTER = 2, + AUDIT_NFT_OP_TABLE_REGISTER = 3, + AUDIT_NFT_OP_TABLE_UNREGISTER = 4, + AUDIT_NFT_OP_CHAIN_REGISTER = 5, + AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, + AUDIT_NFT_OP_RULE_REGISTER = 7, + AUDIT_NFT_OP_RULE_UNREGISTER = 8, + AUDIT_NFT_OP_SET_REGISTER = 9, + AUDIT_NFT_OP_SET_UNREGISTER = 10, + AUDIT_NFT_OP_SETELEM_REGISTER = 11, + AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, + AUDIT_NFT_OP_GEN_REGISTER = 13, + AUDIT_NFT_OP_OBJ_REGISTER = 14, + AUDIT_NFT_OP_OBJ_UNREGISTER = 15, + AUDIT_NFT_OP_OBJ_RESET = 16, + AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, + AUDIT_NFT_OP_SETELEM_RESET = 19, + AUDIT_NFT_OP_RULE_RESET = 20, + AUDIT_NFT_OP_INVALID = 21, +}; + +struct xt_entry_match { + union { + struct { + __u16 match_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 match_size; + struct xt_match *match; + } kernel; + __u16 match_size; + } u; + unsigned char data[0]; +}; + +struct xt_error_target { + struct xt_entry_target target; + char errorname[30]; +}; + +struct xt_counters_info { + char name[32]; + unsigned int num_counters; + struct xt_counters counters[0]; +}; + +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char *mem; +}; + +struct xt_template { + struct list_head list; + int (*table_init)(struct net *); + struct module *me; + char name[32]; +}; + +struct xt_pernet { + struct list_head tables[11]; +}; + +struct xt_af { + struct mutex mutex; + struct list_head match; + struct list_head target; +}; + +struct nf_mttg_trav { + struct list_head *head; + struct list_head *curr; + uint8_t class; +}; + +enum { + MTTG_TRAV_INIT = 0, + MTTG_TRAV_NFP_UNSPEC = 1, + MTTG_TRAV_NFP_SPEC = 2, + MTTG_TRAV_DONE = 3, +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; + +struct ipcm_cookie { + struct sockcm_cookie sockc; + __be32 addr; + int oif; + struct ip_options_rcu *opt; + __u8 protocol; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; +}; + +struct icmp_filter { + __u32 data; +}; + +struct icmp_err { + int errno; + unsigned int fatal: 1; +}; + +struct raw_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct raw_sock { + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; + +enum ipt_reject_with { + IPT_ICMP_NET_UNREACHABLE = 0, + IPT_ICMP_HOST_UNREACHABLE = 1, + IPT_ICMP_PROT_UNREACHABLE = 2, + IPT_ICMP_PORT_UNREACHABLE = 3, + IPT_ICMP_ECHOREPLY = 4, + IPT_ICMP_NET_PROHIBITED = 5, + IPT_ICMP_HOST_PROHIBITED = 6, + IPT_TCP_RESET = 7, + IPT_ICMP_ADMIN_PROHIBITED = 8, +}; + +struct ipt_reject_info { + enum ipt_reject_with with; +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +struct pingfakehdr { + struct icmphdr icmph; + struct msghdr *msg; + sa_family_t family; + __wsum wcheck; +}; + +struct user_sve_header { + __u32 size; + __u32 max_size; + __u16 vl; + __u16 max_vl; + __u16 flags; + __u16 __reserved; +}; + +struct user_pac_mask { + __u64 data_mask; + __u64 insn_mask; +}; + +typedef u32 compat_ulong_t; + +typedef struct { + void *lock; +} class_rcu_tasks_trace_t; + +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +}; + +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_3 = 3, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_5 = 5, + HW_BREAKPOINT_LEN_6 = 6, + HW_BREAKPOINT_LEN_7 = 7, + HW_BREAKPOINT_LEN_8 = 8, +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = 3, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = 7, +}; + +enum bp_type_idx { + TYPE_INST = 0, + TYPE_DATA = 1, + TYPE_MAX = 2, +}; + +struct trace_event_raw_sys_enter { + struct trace_entry ent; + long int id; + long unsigned int args[6]; + char __data[0]; +}; + +struct trace_event_raw_sys_exit { + struct trace_entry ent; + long int id; + long int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_sys_enter {}; + +struct trace_event_data_offsets_sys_exit {}; + +typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int); + +typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int); + +struct pt_regs_offset { + const char *name; + int offset; +}; + +enum aarch64_regset { + REGSET_GPR = 0, + REGSET_FPR = 1, + REGSET_TLS = 2, + REGSET_HW_BREAK = 3, + REGSET_HW_WATCH = 4, + REGSET_FPMR = 5, + REGSET_SYSTEM_CALL = 6, + REGSET_SVE = 7, + REGSET_PAC_MASK = 8, + REGSET_PAC_ENABLED_KEYS = 9, + REGSET_TAGGED_ADDR_CTRL = 10, + REGSET_POE = 11, +}; + +enum ptrace_syscall_dir { + PTRACE_SYSCALL_ENTER = 0, + PTRACE_SYSCALL_EXIT = 1, +}; + +enum arm64_bp_harden_el1_vectors { + EL1_VECTOR_BHB_LOOP = 0, + EL1_VECTOR_BHB_FW = 1, + EL1_VECTOR_BHB_CLEAR_INSN = 2, + EL1_VECTOR_KPTI = 3, +}; + +enum spectre_v4_policy { + SPECTRE_V4_POLICY_MITIGATION_DYNAMIC = 0, + SPECTRE_V4_POLICY_MITIGATION_ENABLED = 1, + SPECTRE_V4_POLICY_MITIGATION_DISABLED = 2, +}; + +struct spectre_v4_param { + const char *str; + enum spectre_v4_policy policy; +}; + +enum bhb_mitigation_bits { + BHB_LOOP = 0, + BHB_FW = 1, + BHB_HW = 2, + BHB_INSN = 3, +}; + +enum aarch64_reloc_op { + RELOC_OP_NONE = 0, + RELOC_OP_ABS = 1, + RELOC_OP_PREL = 2, + RELOC_OP_PAGE = 3, +}; + +enum aarch64_insn_movw_imm_type { + AARCH64_INSN_IMM_MOVNZ = 0, + AARCH64_INSN_IMM_MOVKZ = 1, +}; + +struct kvm_vfio_file { + struct list_head node; + struct file *file; +}; + +struct kvm_vfio { + struct list_head file_list; + struct mutex lock; + bool noncoherent; +}; + +struct kvm_nvhe_stacktrace_info { + long unsigned int stack_base; + long unsigned int overflow_stack_base; + long unsigned int fp; + long unsigned int pc; +}; + +struct unwind_state { + long unsigned int fp; + long unsigned int pc; + struct stack_info stack; + struct stack_info *stacks; + int nr_stacks; +}; + +struct kvm_pmu_event_filter { + __u16 base_event; + __u16 nevents; + __u8 action; + __u8 pad[3]; +}; + +enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +}; + +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +}; + +struct arm_pmu_entry { + struct list_head entry; + struct arm_pmu *arm_pmu; +}; + +struct pmu_hw_events; + +struct arm_pmu { + struct pmu pmu; + cpumask_t supported_cpus; + char *name; + int pmuver; + irqreturn_t (*handle_irq)(struct arm_pmu *); + void (*enable)(struct perf_event *); + void (*disable)(struct perf_event *); + int (*get_event_idx)(struct pmu_hw_events *, struct perf_event *); + void (*clear_event_idx)(struct pmu_hw_events *, struct perf_event *); + int (*set_event_filter)(struct hw_perf_event *, struct perf_event_attr *); + u64 (*read_counter)(struct perf_event *); + void (*write_counter)(struct perf_event *, u64); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); + void (*reset)(void *); + int (*map_event)(struct perf_event *); + long unsigned int cntr_mask[1]; + bool secure_access; + long unsigned int pmceid_bitmap[1]; + long unsigned int pmceid_ext_bitmap[1]; + struct platform_device *plat_device; + struct pmu_hw_events *hw_events; + struct hlist_node node; + struct notifier_block cpu_pm_nb; + const struct attribute_group *attr_groups[5]; + u64 reg_pmmir; + long unsigned int acpi_cpuid; +}; + +struct pmu_hw_events { + struct perf_event *events[33]; + long unsigned int used_mask[1]; + struct arm_pmu *percpu_pmu; + int irq; +}; + +enum armpmu_attr_groups { + ARMPMU_ATTR_GROUP_COMMON = 0, + ARMPMU_ATTR_GROUP_EVENTS = 1, + ARMPMU_ATTR_GROUP_FORMATS = 2, + ARMPMU_ATTR_GROUP_CAPS = 3, + ARMPMU_NR_ATTR_GROUPS = 4, +}; + +struct wq_flusher; + +struct worker; + +struct workqueue_attrs; + +struct pool_workqueue; + +struct wq_device; + +struct wq_node_nr_active; + +struct workqueue_struct { + struct list_head pwqs; + struct list_head list; + struct mutex mutex; + int work_color; + int flush_color; + atomic_t nr_pwqs_to_flush; + struct wq_flusher *first_flusher; + struct list_head flusher_queue; + struct list_head flusher_overflow; + struct list_head maydays; + struct worker *rescuer; + int nr_drainers; + int max_active; + int min_active; + int saved_max_active; + int saved_min_active; + struct workqueue_attrs *unbound_attrs; + struct pool_workqueue *dfl_pwq; + struct wq_device *wq_dev; + char name[32]; + struct callback_head rcu; + long: 64; + long: 64; + unsigned int flags; + struct pool_workqueue **cpu_pwq; + struct wq_node_nr_active *node_nr_active[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum wq_affn_scope { + WQ_AFFN_DFL = 0, + WQ_AFFN_CPU = 1, + WQ_AFFN_SMT = 2, + WQ_AFFN_CACHE = 3, + WQ_AFFN_NUMA = 4, + WQ_AFFN_SYSTEM = 5, + WQ_AFFN_NR_TYPES = 6, +}; + +struct workqueue_attrs { + int nice; + cpumask_var_t cpumask; + cpumask_var_t __pod_cpumask; + bool affn_strict; + enum wq_affn_scope affn_scope; + bool ordered; +}; + +enum wq_consts { + WQ_MAX_ACTIVE = 2048, + WQ_UNBOUND_MAX_ACTIVE = 2048, + WQ_DFL_ACTIVE = 1024, + WQ_DFL_MIN_ACTIVE = 8, +}; + +struct worker_pool; + +struct worker { + union { + struct list_head entry; + struct hlist_node hentry; + }; + struct work_struct *current_work; + work_func_t current_func; + struct pool_workqueue *current_pwq; + u64 current_at; + unsigned int current_color; + int sleeping; + work_func_t last_func; + struct list_head scheduled; + struct task_struct *task; + struct worker_pool *pool; + struct list_head node; + long unsigned int last_active; + unsigned int flags; + int id; + char desc[32]; + struct workqueue_struct *rescue_wq; +}; + +struct pool_workqueue { + struct worker_pool *pool; + struct workqueue_struct *wq; + int work_color; + int flush_color; + int refcnt; + int nr_in_flight[16]; + bool plugged; + int nr_active; + struct list_head inactive_works; + struct list_head pending_node; + struct list_head pwqs_node; + struct list_head mayday_node; + u64 stats[8]; + struct kthread_work release_work; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct worker_pool { + raw_spinlock_t lock; + int cpu; + int node; + int id; + unsigned int flags; + long unsigned int watchdog_ts; + bool cpu_stall; + int nr_running; + struct list_head worklist; + int nr_workers; + int nr_idle; + struct list_head idle_list; + struct timer_list idle_timer; + struct work_struct idle_cull_work; + struct timer_list mayday_timer; + struct hlist_head busy_hash[64]; + struct worker *manager; + struct list_head workers; + struct ida worker_ida; + struct workqueue_attrs *attrs; + struct hlist_node hash_node; + int refcnt; + struct callback_head rcu; +}; + +enum worker_pool_flags { + POOL_BH = 1, + POOL_MANAGER_ACTIVE = 2, + POOL_DISASSOCIATED = 4, + POOL_BH_DRAINING = 8, +}; + +enum worker_flags { + WORKER_DIE = 2, + WORKER_IDLE = 4, + WORKER_PREP = 8, + WORKER_CPU_INTENSIVE = 64, + WORKER_UNBOUND = 128, + WORKER_REBOUND = 256, + WORKER_NOT_RUNNING = 456, +}; + +enum work_cancel_flags { + WORK_CANCEL_DELAYED = 1, + WORK_CANCEL_DISABLE = 2, +}; + +enum wq_internal_consts { + NR_STD_WORKER_POOLS = 2, + UNBOUND_POOL_HASH_ORDER = 6, + BUSY_WORKER_HASH_ORDER = 6, + MAX_IDLE_WORKERS_RATIO = 4, + IDLE_WORKER_TIMEOUT = 300000, + MAYDAY_INITIAL_TIMEOUT = 10, + MAYDAY_INTERVAL = 100, + CREATE_COOLDOWN = 1000, + RESCUER_NICE_LEVEL = -20, + HIGHPRI_NICE_LEVEL = -20, + WQ_NAME_LEN = 32, + WORKER_ID_LEN = 42, +}; + +enum pool_workqueue_stats { + PWQ_STAT_STARTED = 0, + PWQ_STAT_COMPLETED = 1, + PWQ_STAT_CPU_TIME = 2, + PWQ_STAT_CPU_INTENSIVE = 3, + PWQ_STAT_CM_WAKEUP = 4, + PWQ_STAT_REPATRIATED = 5, + PWQ_STAT_MAYDAY = 6, + PWQ_STAT_RESCUED = 7, + PWQ_NR_STATS = 8, +}; + +struct wq_flusher { + struct list_head list; + int flush_color; + struct completion done; +}; + +struct wq_node_nr_active { + int max; + atomic_t nr; + raw_spinlock_t lock; + struct list_head pending_pwqs; +}; + +struct wq_device { + struct workqueue_struct *wq; + struct device dev; +}; + +struct wq_pod_type { + int nr_pods; + cpumask_var_t *pod_cpus; + int *pod_node; + int *cpu_pod; +}; + +struct work_offq_data { + u32 pool_id; + u32 disable; + u32 flags; +}; + +struct trace_event_raw_workqueue_queue_work { + struct trace_entry ent; + void *work; + void *function; + u32 __data_loc_workqueue; + int req_cpu; + int cpu; + char __data[0]; +}; + +struct trace_event_raw_workqueue_activate_work { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_data_offsets_workqueue_queue_work { + u32 workqueue; + const void *workqueue_ptr_; +}; + +struct trace_event_data_offsets_workqueue_activate_work {}; + +struct trace_event_data_offsets_workqueue_execute_start {}; + +struct trace_event_data_offsets_workqueue_execute_end {}; + +typedef void (*btf_trace_workqueue_queue_work)(void *, int, struct pool_workqueue *, struct work_struct *); + +typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); + +struct wq_drain_dead_softirq_work { + struct work_struct work; + struct worker_pool *pool; + struct completion done; +}; + +struct wq_barrier { + struct work_struct work; + struct completion done; + struct task_struct *task; +}; + +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; + struct workqueue_attrs *attrs; + struct list_head list; + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[0]; +}; + +struct pr_cont_work_struct { + bool comma; + work_func_t func; + long int ctr; +}; + +struct work_for_cpu { + struct work_struct work; + long int (*fn)(void *); + void *arg; + long int ret; +}; + +struct dyn_arch_ftrace {}; + +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN = 0, + FTRACE_BUG_INIT = 1, + FTRACE_BUG_NOP = 2, + FTRACE_BUG_CALL = 3, + FTRACE_BUG_UPDATE = 4, +}; + +enum { + FTRACE_FL_ENABLED = 2147483648, + FTRACE_FL_REGS = 1073741824, + FTRACE_FL_REGS_EN = 536870912, + FTRACE_FL_TRAMP = 268435456, + FTRACE_FL_TRAMP_EN = 134217728, + FTRACE_FL_IPMODIFY = 67108864, + FTRACE_FL_DISABLED = 33554432, + FTRACE_FL_DIRECT = 16777216, + FTRACE_FL_DIRECT_EN = 8388608, + FTRACE_FL_CALL_OPS = 4194304, + FTRACE_FL_CALL_OPS_EN = 2097152, + FTRACE_FL_TOUCHED = 1048576, + FTRACE_FL_MODIFIED = 524288, +}; + +struct dyn_ftrace { + long unsigned int ip; + long unsigned int flags; + struct dyn_arch_ftrace arch; +}; + +enum { + FTRACE_UPDATE_IGNORE = 0, + FTRACE_UPDATE_MAKE_CALL = 1, + FTRACE_UPDATE_MODIFY_CALL = 2, + FTRACE_UPDATE_MAKE_NOP = 3, +}; + +enum { + FTRACE_ITER_FILTER = 1, + FTRACE_ITER_NOTRACE = 2, + FTRACE_ITER_PRINTALL = 4, + FTRACE_ITER_DO_PROBES = 8, + FTRACE_ITER_PROBE = 16, + FTRACE_ITER_MOD = 32, + FTRACE_ITER_ENABLED = 64, + FTRACE_ITER_TOUCHED = 128, + FTRACE_ITER_ADDRS = 256, +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, + PERF_RECORD_KSYMBOL_TYPE_MAX = 3, +}; + +enum { + TRACE_PIDS = 1, + TRACE_NO_PIDS = 2, +}; + +struct ftrace_mod_load { + struct list_head list; + char *func; + char *module; + int enable; +}; + +struct trace_parser { + bool cont; + char *buffer; + unsigned int idx; + unsigned int size; +}; + +enum { + FTRACE_MODIFY_ENABLE_FL = 1, + FTRACE_MODIFY_MAY_SLEEP_FL = 2, +}; + +struct ftrace_func_probe { + struct ftrace_probe_ops *probe_ops; + struct ftrace_ops ops; + struct trace_array *tr; + struct list_head list; + void *data; + int ref; +}; + +struct ftrace_page { + struct ftrace_page *next; + struct dyn_ftrace *records; + int index; + int order; +}; + +struct ftrace_rec_iter { + struct ftrace_page *pg; + int index; +}; + +struct ftrace_iterator { + loff_t pos; + loff_t func_pos; + loff_t mod_pos; + struct ftrace_page *pg; + struct dyn_ftrace *func; + struct ftrace_func_probe *probe; + struct ftrace_func_entry *probe_entry; + struct trace_parser parser; + struct ftrace_hash *hash; + struct ftrace_ops *ops; + struct trace_array *tr; + struct list_head *mod_list; + int pidx; + int idx; + unsigned int flags; +}; + +struct ftrace_glob { + char *search; + unsigned int len; + int type; +}; + +struct ftrace_func_map { + struct ftrace_func_entry entry; + void *data; +}; + +struct ftrace_func_mapper { + struct ftrace_hash hash; +}; + +enum graph_filter_type { + GRAPH_FILTER_NOTRACE = 0, + GRAPH_FILTER_FUNCTION = 1, +}; + +struct ftrace_graph_data { + struct ftrace_hash *hash; + struct ftrace_func_entry *entry; + int idx; + enum graph_filter_type type; + struct ftrace_hash *new_hash; + const struct seq_operations *seq_ops; + struct trace_parser parser; +}; + +struct ftrace_mod_func { + struct list_head list; + char *name; + long unsigned int ip; + unsigned int size; +}; + +struct ftrace_mod_map { + struct callback_head rcu; + struct list_head list; + struct module *mod; + long unsigned int start_addr; + long unsigned int end_addr; + struct list_head funcs; + unsigned int num_funcs; +}; + +struct ftrace_init_func { + struct list_head list; + long unsigned int ip; +}; + +struct kallsyms_data { + long unsigned int *addrs; + const char **syms; + size_t cnt; + size_t found; +}; + +enum { + XA_CHECK_SCHED = 4096, +}; + +enum wb_state { + WB_registered = 0, + WB_writeback_running = 1, + WB_has_dirty_io = 2, + WB_start_all = 3, +}; + +struct wb_lock_cookie { + bool locked; + long unsigned int flags; +}; + +struct dirty_throttle_control { + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + long unsigned int avail; + long unsigned int dirty; + long unsigned int thresh; + long unsigned int bg_thresh; + long unsigned int wb_dirty; + long unsigned int wb_thresh; + long unsigned int wb_bg_thresh; + long unsigned int pos_ratio; + bool freerun; + bool dirty_exceeded; +}; + +struct anon_vma_name { + struct kref kref; + char name[0]; +}; + +struct vma_prepare { + struct vm_area_struct *vma; + struct vm_area_struct *adj_next; + struct file *file; + struct address_space *mapping; + struct anon_vma *anon_vma; + struct vm_area_struct *insert; + struct vm_area_struct *remove; + struct vm_area_struct *remove2; +}; + +struct vma_munmap_struct { + struct vma_iterator *vmi; + struct vm_area_struct *vma; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct list_head *uf; + long unsigned int start; + long unsigned int end; + long unsigned int unmap_start; + long unsigned int unmap_end; + int vma_count; + bool unlock; + bool clear_ptes; + long unsigned int nr_pages; + long unsigned int locked_vm; + long unsigned int nr_accounted; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int data_vm; +}; + +enum vma_merge_state { + VMA_MERGE_START = 0, + VMA_MERGE_ERROR_NOMEM = 1, + VMA_MERGE_NOMERGE = 2, + VMA_MERGE_SUCCESS = 3, +}; + +enum vma_merge_flags { + VMG_FLAG_DEFAULT = 0, + VMG_FLAG_JUST_EXPAND = 1, +}; + +struct vma_merge_struct { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int pgoff; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vm_area_struct *vma; + long unsigned int start; + long unsigned int end; + long unsigned int flags; + struct file *file; + struct anon_vma *anon_vma; + struct mempolicy *policy; + struct vm_userfaultfd_ctx uffd_ctx; + struct anon_vma_name *anon_name; + enum vma_merge_flags merge_flags; + enum vma_merge_state state; +}; + +struct mmap_state { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int addr; + long unsigned int end; + long unsigned int pgoff; + long unsigned int pglen; + long unsigned int flags; + struct file *file; + long unsigned int charged; + bool retry_merge; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vma_munmap_struct vms; + struct ma_state mas_detach; + struct maple_tree mt_detach; +}; + +typedef struct fd class_fd_pos_t; + +typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); + +enum { + DIO_LOCKING = 1, + DIO_SKIP_HOLES = 2, +}; + +struct dio_submit { + struct bio *bio; + unsigned int blkbits; + unsigned int blkfactor; + unsigned int start_zero_done; + int pages_in_io; + sector_t block_in_file; + unsigned int blocks_available; + int reap_counter; + sector_t final_block_in_request; + int boundary; + get_block_t *get_block; + loff_t logical_offset_in_bio; + sector_t final_block_in_bio; + sector_t next_block_for_io; + struct page *cur_page; + unsigned int cur_page_offset; + unsigned int cur_page_len; + sector_t cur_page_block; + loff_t cur_page_fs_offset; + struct iov_iter *iter; + unsigned int head; + unsigned int tail; + size_t from; + size_t to; +}; + +struct dio { + int flags; + blk_opf_t opf; + struct gendisk *bio_disk; + struct inode *inode; + loff_t i_size; + dio_iodone_t *end_io; + bool is_pinned; + void *private; + spinlock_t bio_lock; + int page_errors; + int is_async; + bool defer_completion; + bool should_dirty; + int io_error; + long unsigned int refcount; + struct bio *bio_list; + struct task_struct *waiter; + struct kiocb *iocb; + ssize_t result; + union { + struct page *pages[64]; + struct work_struct complete_work; + }; + long: 64; +}; + +struct mb_cache { + struct hlist_bl_head *c_hash; + int c_bucket_bits; + long unsigned int c_max_entries; + spinlock_t c_list_lock; + struct list_head c_list; + long unsigned int c_entry_count; + struct shrinker *c_shrink; + struct work_struct c_shrink_work; +}; + +struct ext4_dir_entry { + __le32 inode; + __le16 rec_len; + __le16 name_len; + char name[255]; +}; + +struct ext4_dir_entry_tail { + __le32 det_reserved_zero1; + __le16 det_rec_len; + __u8 det_reserved_zero2; + __u8 det_reserved_ft; + __le32 det_checksum; +}; + +typedef enum { + EITHER = 0, + INDEX = 1, + DIRENT = 2, + DIRENT_HTREE = 3, +} dirblock_type_t; + +struct fake_dirent { + __le32 inode; + __le16 rec_len; + u8 name_len; + u8 file_type; +}; + +struct dx_countlimit { + __le16 limit; + __le16 count; +}; + +struct dx_entry { + __le32 hash; + __le32 block; +}; + +struct dx_root_info { + __le32 reserved_zero; + u8 hash_version; + u8 info_length; + u8 indirect_levels; + u8 unused_flags; +}; + +struct dx_root { + struct fake_dirent dot; + char dot_name[4]; + struct fake_dirent dotdot; + char dotdot_name[4]; + struct dx_root_info info; + struct dx_entry entries[0]; +}; + +struct dx_node { + struct fake_dirent fake; + struct dx_entry entries[0]; +}; + +struct dx_frame { + struct buffer_head *bh; + struct dx_entry *entries; + struct dx_entry *at; +}; + +struct dx_map_entry { + u32 hash; + u16 offs; + u16 size; +}; + +struct dx_tail { + u32 dt_reserved; + __le32 dt_checksum; +}; + +struct ext4_renament { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool is_dir; + int dir_nlink_delta; + struct buffer_head *bh; + struct ext4_dir_entry_2 *de; + int inlined; + struct buffer_head *dir_bh; + struct ext4_dir_entry_2 *parent_de; + int dir_inlined; +}; + +typedef unsigned int autofs_wqt_t; + +struct autofs_sb_info; + +struct autofs_info { + struct dentry *dentry; + int flags; + struct completion expire_complete; + struct list_head active; + struct list_head expiring; + struct autofs_sb_info *sbi; + long unsigned int exp_timeout; + long unsigned int last_used; + int count; + kuid_t uid; + kgid_t gid; + struct callback_head rcu; +}; + +struct autofs_wait_queue; + +struct autofs_sb_info { + u32 magic; + int pipefd; + struct file *pipe; + struct pid *oz_pgrp; + int version; + int sub_version; + int min_proto; + int max_proto; + unsigned int flags; + long unsigned int exp_timeout; + unsigned int type; + struct super_block *sb; + struct mutex wq_mutex; + struct mutex pipe_mutex; + spinlock_t fs_lock; + struct autofs_wait_queue *queues; + spinlock_t lookup_lock; + struct list_head active_list; + struct list_head expiring_list; + struct callback_head rcu; +}; + +struct autofs_wait_queue { + wait_queue_head_t queue; + struct autofs_wait_queue *next; + autofs_wqt_t wait_queue_token; + struct qstr name; + u32 offset; + u32 dev; + u64 ino; + kuid_t uid; + kgid_t gid; + pid_t pid; + pid_t tgid; + int status; + unsigned int wait_ctr; +}; + +enum btrfs_delayed_ref_flags { + BTRFS_DELAYED_REFS_FLUSHING = 0, +}; + +enum { + ____TRANS_FREEZABLE_BIT = 0, + __TRANS_FREEZABLE = 1, + ____TRANS_FREEZABLE_SEQ = 0, + ____TRANS_START_BIT = 1, + __TRANS_START = 2, + ____TRANS_START_SEQ = 1, + ____TRANS_ATTACH_BIT = 2, + __TRANS_ATTACH = 4, + ____TRANS_ATTACH_SEQ = 2, + ____TRANS_JOIN_BIT = 3, + __TRANS_JOIN = 8, + ____TRANS_JOIN_SEQ = 3, + ____TRANS_JOIN_NOLOCK_BIT = 4, + __TRANS_JOIN_NOLOCK = 16, + ____TRANS_JOIN_NOLOCK_SEQ = 4, + ____TRANS_DUMMY_BIT = 5, + __TRANS_DUMMY = 32, + ____TRANS_DUMMY_SEQ = 5, + ____TRANS_JOIN_NOSTART_BIT = 6, + __TRANS_JOIN_NOSTART = 64, + ____TRANS_JOIN_NOSTART_SEQ = 6, +}; + +struct msgbuf { + __kernel_long_t mtype; + char mtext[1]; +}; + +struct msg; + +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; + struct msg *msg_last; + __kernel_old_time_t msg_stime; + __kernel_old_time_t msg_rtime; + __kernel_old_time_t msg_ctime; + long unsigned int msg_lcbytes; + long unsigned int msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + __kernel_ipc_pid_t msg_lspid; + __kernel_ipc_pid_t msg_lrpid; +}; + +struct msqid64_ds { + struct ipc64_perm msg_perm; + long int msg_stime; + long int msg_rtime; + long int msg_ctime; + long unsigned int msg_cbytes; + long unsigned int msg_qnum; + long unsigned int msg_qbytes; + __kernel_pid_t msg_lspid; + __kernel_pid_t msg_lrpid; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + short unsigned int msgseg; +}; + +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; + time64_t q_rtime; + time64_t q_ctime; + long unsigned int q_cbytes; + long unsigned int q_qnum; + long unsigned int q_qbytes; + struct pid *q_lspid; + struct pid *q_lrpid; + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; + long: 64; + long: 64; +}; + +struct msg_receiver { + struct list_head r_list; + struct task_struct *r_tsk; + int r_mode; + long int r_msgtype; + long int r_maxsize; + struct msg_msg *r_msg; +}; + +struct msg_sender { + struct list_head list; + struct task_struct *tsk; + size_t msgsz; +}; + +typedef struct { + __le64 b; + __le64 a; +} le128; + +struct xts_tfm_ctx { + struct crypto_skcipher *child; + struct crypto_cipher *tweak; +}; + +struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; + struct crypto_cipher_spawn tweak_spawn; +}; + +struct xts_request_ctx { + le128 t; + struct scatterlist *tail; + struct scatterlist sg[2]; + struct skcipher_request subreq; +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned int done; +}; + +struct bio_alloc_cache { + struct bio *free_list; + struct bio *free_list_irq; + unsigned int nr; + unsigned int nr_irq; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; + +struct io_uring_rsrc_register { + __u32 nr; + __u32 flags; + __u64 resv2; + __u64 data; + __u64 tags; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __u64 data; + __u64 tags; + __u32 nr; + __u32 resv2; +}; + +enum { + IORING_REGISTER_SRC_REGISTERED = 1, + IORING_REGISTER_DST_REPLACE = 2, +}; + +struct io_uring_clone_buffers { + __u32 src_fd; + __u32 flags; + __u32 src_off; + __u32 dst_off; + __u32 nr; + __u32 pad[3]; +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + +struct io_imu_folio_data { + unsigned int nr_pages_head; + unsigned int nr_pages_mid; + unsigned int folio_shift; +}; + +struct io_rsrc_update { + struct file *file; + u64 arg; + u32 nr_args; + u32 offset; +}; + +typedef struct { + size_t bitContainer; + unsigned int bitsConsumed; + const char *ptr; + const char *start; + const char *limitPtr; +} BIT_DStream_t; + +typedef enum { + BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3, +} BIT_DStream_status; + +typedef unsigned int FSE_DTable; + +typedef struct { + size_t state; + const void *table; +} FSE_DState_t; + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; + +typedef struct { + short unsigned int newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; + +typedef struct { + short int ncount[256]; + FSE_DTable dtable[0]; +} FSE_DecompressWksp; + +struct pci_dev_resource { + struct list_head list; + struct resource *res; + struct pci_dev *dev; + resource_size_t start; + resource_size_t end; + resource_size_t add_size; + resource_size_t min_align; + long unsigned int flags; +}; + +enum release_type { + leaf_only = 0, + whole_subtree = 1, +}; + +enum enable_type { + undefined = -1, + user_disabled = 0, + auto_disabled = 1, + user_enabled = 2, + auto_enabled = 3, +}; + +struct fb_cmap_user { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct memdev { + const char *name; + const struct file_operations *fops; + fmode_t fmode; + umode_t mode; +}; + +typedef void (*async_func_t)(void *, async_cookie_t); + +struct async_domain { + struct list_head pending; + unsigned int registered: 1; +}; + +enum fw_opt { + FW_OPT_UEVENT = 1, + FW_OPT_NOWAIT = 2, + FW_OPT_USERHELPER = 4, + FW_OPT_NO_WARN = 8, + FW_OPT_NOCACHE = 16, + FW_OPT_NOFALLBACK_SYSFS = 32, + FW_OPT_FALLBACK_PLATFORM = 64, + FW_OPT_PARTIAL = 128, +}; + +enum fw_status { + FW_STATUS_UNKNOWN = 0, + FW_STATUS_LOADING = 1, + FW_STATUS_DONE = 2, + FW_STATUS_ABORTED = 3, +}; + +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct firmware_cache; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; + const char *fw_name; +}; + +struct firmware_cache { + spinlock_t lock; + struct list_head head; + int state; + spinlock_t name_lock; + struct list_head fw_names; + struct delayed_work work; + struct notifier_block pm_notify; +}; + +struct fw_cache_entry { + struct list_head list; + const char *name; +}; + +struct fw_name_devm { + long unsigned int magic; + const char *name; +}; + +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *, void *); + u32 opt_flags; +}; + +enum hsm_task_states { + HSM_ST_IDLE = 0, + HSM_ST_FIRST = 1, + HSM_ST = 2, + HSM_ST_LAST = 3, + HSM_ST_ERR = 4, +}; + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE = 0, + PKT_HASH_TYPE_L2 = 1, + PKT_HASH_TYPE_L3 = 2, + PKT_HASH_TYPE_L4 = 3, +}; + +enum gro_result { + GRO_MERGED = 0, + GRO_MERGED_FREE = 1, + GRO_HELD = 2, + GRO_NORMAL = 3, + GRO_CONSUMED = 4, +}; + +typedef enum gro_result gro_result_t; + +union e1000_rx_desc_packet_split { + struct { + __le64 buffer_addr[4]; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length0; + __le16 vlan; + } middle; + struct { + __le16 header_status; + __le16 length[3]; + } upper; + __le64 reserved; + } wb; +}; + +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; + u8 ipcso; + __le16 ipcse; + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; + u8 tucso; + __le16 tucse; + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; + u8 hdr_len; + __le16 mss; + } fields; + } tcp_seg_setup; +}; + +enum e1000_boards { + board_82571 = 0, + board_82572 = 1, + board_82573 = 2, + board_82574 = 3, + board_82583 = 4, + board_80003es2lan = 5, + board_ich8lan = 6, + board_ich9lan = 7, + board_ich10lan = 8, + board_pchlan = 9, + board_pch2lan = 10, + board_pch_lpt = 11, + board_pch_spt = 12, + board_pch_cnp = 13, + board_pch_tgp = 14, + board_pch_adp = 15, + board_pch_mtp = 16, +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255, +}; + +struct trace_event_raw_e1000e_trace_mac_register { + struct trace_entry ent; + uint32_t reg; + char __data[0]; +}; + +struct trace_event_data_offsets_e1000e_trace_mac_register {}; + +typedef void (*btf_trace_e1000e_trace_mac_register)(void *, uint32_t); + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +struct my_u0 { + __le64 a; + __le64 b; +}; + +struct my_u1 { + __le64 a; + __le64 b; + __le64 c; + __le64 d; +}; + +struct kvm_ptp_clock { + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; +}; + +struct usage_priority { + __u32 usage; + bool global; + unsigned int slot_overwrite; +}; + +typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int); + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; +}; + +struct scm_timestamping { + struct __kernel_old_timespec ts[3]; +}; + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; +}; + +struct netdev_queue_stats_rx { + u64 bytes; + u64 packets; + u64 alloc_fail; + u64 hw_drops; + u64 hw_drop_overruns; + u64 csum_unnecessary; + u64 csum_none; + u64 csum_bad; + u64 hw_gro_packets; + u64 hw_gro_bytes; + u64 hw_gro_wire_packets; + u64 hw_gro_wire_bytes; + u64 hw_drop_ratelimits; +}; + +struct netdev_queue_stats_tx { + u64 bytes; + u64 packets; + u64 hw_drops; + u64 hw_drop_errors; + u64 csum_none; + u64 needs_csum; + u64 hw_gso_packets; + u64 hw_gso_bytes; + u64 hw_gso_wire_packets; + u64 hw_gso_wire_bytes; + u64 hw_drop_ratelimits; + u64 stop; + u64 wake; +}; + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + short unsigned int nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED = 0, + NLMSGERR_ATTR_MSG = 1, + NLMSGERR_ATTR_OFFS = 2, + NLMSGERR_ATTR_COOKIE = 3, + NLMSGERR_ATTR_POLICY = 4, + NLMSGERR_ATTR_MISS_TYPE = 5, + NLMSGERR_ATTR_MISS_NEST = 6, + __NLMSGERR_ATTR_MAX = 7, + NLMSGERR_ATTR_MAX = 6, +}; + +struct nl_pktinfo { + __u32 group; +}; + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED = 1, +}; + +enum netlink_skb_flags { + NETLINK_SKB_DST = 8, +}; + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +struct trace_event_raw_netlink_extack { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_netlink_extack { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_netlink_extack)(void *, const char *); + +enum { + NETLINK_F_KERNEL_SOCKET = 0, + NETLINK_F_RECV_PKTINFO = 1, + NETLINK_F_BROADCAST_SEND_ERROR = 2, + NETLINK_F_RECV_NO_ENOBUFS = 3, + NETLINK_F_LISTEN_ALL_NSID = 4, + NETLINK_F_CAP_ACK = 5, + NETLINK_F_EXT_ACK = 6, + NETLINK_F_STRICT_CHK = 7, +}; + +struct netlink_sock { + struct sock sk; + long unsigned int flags; + u32 portid; + u32 dst_portid; + u32 dst_group; + u32 subscriptions; + u32 ngroups; + long unsigned int *groups; + long unsigned int state; + size_t max_recvmsg_len; + wait_queue_head_t wait; + bool bound; + bool cb_running; + int dump_done_errno; + struct netlink_callback cb; + struct mutex nl_cb_mutex; + void (*netlink_rcv)(struct sk_buff *); + int (*netlink_bind)(struct net *, int); + void (*netlink_unbind)(struct net *, int); + void (*netlink_release)(struct sock *, long unsigned int *); + struct module *module; + struct rhash_head node; + struct callback_head rcu; +}; + +struct listeners; + +struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; + struct listeners *listeners; + unsigned int flags; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); + int registered; +}; + +struct listeners { + struct callback_head rcu; + long unsigned int masks[0]; +}; + +struct netlink_tap_net { + struct list_head netlink_tap_all; + struct mutex netlink_tap_lock; +}; + +struct netlink_compare_arg { + possible_net_t pnet; + u32 portid; +}; + +struct netlink_broadcast_data { + struct sock *exclude_sk; + struct net *net; + u32 portid; + u32 group; + int failure; + int delivery_failure; + int congested; + int delivered; + gfp_t allocation; + struct sk_buff *skb; + struct sk_buff *skb2; + int (*tx_filter)(struct sock *, struct sk_buff *, void *); + void *tx_data; +}; + +struct netlink_set_err_data { + struct sock *exclude_sk; + u32 portid; + u32 group; + int code; +}; + +struct nl_seq_iter { + struct seq_net_private p; + struct rhashtable_iter hti; + int link; +}; + +struct bpf_iter__netlink { + union { + struct bpf_iter_meta *meta; + }; + union { + struct netlink_sock *sk; + }; +}; + +struct tasklet_struct { + struct tasklet_struct *next; + long unsigned int state; + atomic_t count; + bool use_callback; + union { + void (*func)(long unsigned int); + void (*callback)(struct tasklet_struct *); + }; + long unsigned int data; +}; + +enum { + TASKLET_STATE_SCHED = 0, + TASKLET_STATE_RUN = 1, +}; + +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, +}; + +enum tsq_flags { + TSQF_THROTTLED = 1, + TSQF_QUEUED = 2, + TCPF_TSQ_DEFERRED = 4, + TCPF_WRITE_TIMER_DEFERRED = 8, + TCPF_DELACK_TIMER_DEFERRED = 16, + TCPF_MTU_REDUCED_DEFERRED = 32, + TCPF_ACK_DEFERRED = 64, +}; + +struct tcp_ao_key { + struct hlist_node node; + union tcp_ao_addr addr; + u8 key[80]; + unsigned int tcp_sigpool_id; + unsigned int digest_size; + int l3index; + u8 prefixlen; + u8 family; + u8 keylen; + u8 keyflags; + u8 sndid; + u8 rcvid; + u8 maclen; + struct callback_head rcu; + atomic64_t pkt_good; + atomic64_t pkt_bad; + u8 traffic_keys[0]; +}; + +struct mptcp_out_options {}; + +enum tcp_queue { + TCP_FRAG_IN_WRITE_QUEUE = 0, + TCP_FRAG_IN_RTX_QUEUE = 1, +}; + +struct tcp_key { + union { + struct { + struct tcp_ao_key *ao_key; + char *traffic_key; + u32 sne; + u8 rcv_next; + }; + struct tcp_md5sig_key *md5_key; + }; + enum { + TCP_KEY_NONE = 0, + TCP_KEY_MD5 = 1, + TCP_KEY_AO = 2, + } type; +}; + +struct tcp_out_options { + u16 options; + u16 mss; + u8 ws; + u8 num_sack_blocks; + u8 hash_size; + u8 bpf_opt_len; + __u8 *hash_location; + __u32 tsval; + __u32 tsecr; + struct tcp_fastopen_cookie *fastopen_cookie; + struct mptcp_out_options mptcp; +}; + +struct tsq_tasklet { + struct tasklet_struct tasklet; + struct list_head head; +}; + +struct iptable_nat_pernet { + struct nf_hook_ops *nf_nat_ops; +}; + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; +}; + +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u32 cmpri: 4; + __u32 cmpre: 4; + __u32 pad: 4; + __u32 reserved: 20; + union { + struct { + struct {} __empty_addr; + struct in6_addr addr[0]; + }; + struct { + struct {} __empty_data; + __u8 data[0]; + }; + } segments; +}; + +struct ioam6_hdr { + __u8 opt_type; + __u8 opt_len; + char: 8; + __u8 type; +}; + +struct sockaddr_pkt { + short unsigned int spkt_family; + unsigned char spkt_device[14]; + __be16 spkt_protocol; +}; + +struct sockaddr_ll { + short unsigned int sll_family; + __be16 sll_protocol; + int sll_ifindex; + short unsigned int sll_hatype; + unsigned char sll_pkttype; + unsigned char sll_halen; + unsigned char sll_addr[8]; +}; + +struct tpacket_stats { + unsigned int tp_packets; + unsigned int tp_drops; +}; + +struct tpacket_stats_v3 { + unsigned int tp_packets; + unsigned int tp_drops; + unsigned int tp_freeze_q_cnt; +}; + +struct tpacket_rollover_stats { + __u64 tp_all; + __u64 tp_huge; + __u64 tp_failed; +}; + +union tpacket_stats_u { + struct tpacket_stats stats1; + struct tpacket_stats_v3 stats3; +}; + +struct tpacket_auxdata { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; +}; + +struct tpacket_hdr { + long unsigned int tp_status; + unsigned int tp_len; + unsigned int tp_snaplen; + short unsigned int tp_mac; + short unsigned int tp_net; + unsigned int tp_sec; + unsigned int tp_usec; +}; + +struct tpacket2_hdr { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u8 tp_padding[4]; +}; + +struct tpacket_hdr_variant1 { + __u32 tp_rxhash; + __u32 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u16 tp_padding; +}; + +struct tpacket3_hdr { + __u32 tp_next_offset; + __u32 tp_sec; + __u32 tp_nsec; + __u32 tp_snaplen; + __u32 tp_len; + __u32 tp_status; + __u16 tp_mac; + __u16 tp_net; + union { + struct tpacket_hdr_variant1 hv1; + }; + __u8 tp_padding[8]; +}; + +struct tpacket_bd_ts { + unsigned int ts_sec; + union { + unsigned int ts_usec; + unsigned int ts_nsec; + }; +}; + +struct tpacket_hdr_v1 { + __u32 block_status; + __u32 num_pkts; + __u32 offset_to_first_pkt; + __u32 blk_len; + __u64 seq_num; + struct tpacket_bd_ts ts_first_pkt; + struct tpacket_bd_ts ts_last_pkt; +}; + +union tpacket_bd_header_u { + struct tpacket_hdr_v1 bh1; +}; + +struct tpacket_block_desc { + __u32 version; + __u32 offset_to_priv; + union tpacket_bd_header_u hdr; +}; + +enum tpacket_versions { + TPACKET_V1 = 0, + TPACKET_V2 = 1, + TPACKET_V3 = 2, +}; + +struct tpacket_req { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; +}; + +struct tpacket_req3 { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; + unsigned int tp_retire_blk_tov; + unsigned int tp_sizeof_priv; + unsigned int tp_feature_req_word; +}; + +union tpacket_req_u { + struct tpacket_req req; + struct tpacket_req3 req3; +}; + +struct fanout_args { + __u16 type_flags; + __u16 id; + __u32 max_num_members; +}; + +struct virtio_net_hdr { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + __virtio16 csum_start; + __virtio16 csum_offset; +}; + +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + __virtio16 num_buffers; +}; + +struct packet_mclist { + struct packet_mclist *next; + int ifindex; + int count; + short unsigned int type; + short unsigned int alen; + unsigned char addr[32]; +}; + +struct pgv; + +struct tpacket_kbdq_core { + struct pgv *pkbdq; + unsigned int feature_req_word; + unsigned int hdrlen; + unsigned char reset_pending_on_curr_blk; + unsigned char delete_blk_timer; + short unsigned int kactive_blk_num; + short unsigned int blk_sizeof_priv; + short unsigned int last_kactive_blk_num; + char *pkblk_start; + char *pkblk_end; + int kblk_size; + unsigned int max_frame_len; + unsigned int knum_blocks; + uint64_t knxt_seq_num; + char *prev; + char *nxt_offset; + struct sk_buff *skb; + rwlock_t blk_fill_in_prog_lock; + short unsigned int retire_blk_tov; + short unsigned int version; + long unsigned int tov_in_jiffies; + struct timer_list retire_blk_timer; +}; + +struct pgv { + char *buffer; +}; + +struct packet_ring_buffer { + struct pgv *pg_vec; + unsigned int head; + unsigned int frames_per_block; + unsigned int frame_size; + unsigned int frame_max; + unsigned int pg_vec_order; + unsigned int pg_vec_pages; + unsigned int pg_vec_len; + unsigned int *pending_refcnt; + union { + long unsigned int *rx_owner_map; + struct tpacket_kbdq_core prb_bdqc; + }; +}; + +struct packet_fanout { + possible_net_t net; + unsigned int num_members; + u32 max_num_members; + u16 id; + u8 type; + u8 flags; + union { + atomic_t rr_cur; + struct bpf_prog *bpf_prog; + }; + struct list_head list; + spinlock_t lock; + refcount_t sk_ref; + long: 64; + struct packet_type prot_hook; + struct sock *arr[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct packet_rollover { + int sock; + atomic_long_t num; + atomic_long_t num_huge; + atomic_long_t num_failed; + long: 64; + long: 64; + long: 64; + long: 64; + u32 history[16]; +}; + +struct packet_sock { + struct sock sk; + struct packet_fanout *fanout; + union tpacket_stats_u stats; + struct packet_ring_buffer rx_ring; + struct packet_ring_buffer tx_ring; + int copy_thresh; + spinlock_t bind_lock; + struct mutex pg_vec_lock; + long unsigned int flags; + int ifindex; + u8 vnet_hdr_sz; + __be16 num; + struct packet_rollover *rollover; + struct packet_mclist *mclist; + atomic_long_t mapped; + enum tpacket_versions tp_version; + unsigned int tp_hdrlen; + unsigned int tp_reserve; + unsigned int tp_tstamp; + struct completion skb_completion; + struct net_device *cached_dev; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct packet_type prot_hook; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_t tp_drops; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum packet_sock_flags { + PACKET_SOCK_ORIGDEV = 0, + PACKET_SOCK_AUXDATA = 1, + PACKET_SOCK_TX_HAS_OFF = 2, + PACKET_SOCK_TP_LOSS = 3, + PACKET_SOCK_RUNNING = 4, + PACKET_SOCK_PRESSURE = 5, + PACKET_SOCK_QDISC_BYPASS = 6, +}; + +struct packet_mreq_max { + int mr_ifindex; + short unsigned int mr_type; + short unsigned int mr_alen; + unsigned char mr_address[32]; +}; + +union tpacket_uhdr { + struct tpacket_hdr *h1; + struct tpacket2_hdr *h2; + struct tpacket3_hdr *h3; + void *raw; +}; + +struct packet_skb_cb { + union { + struct sockaddr_pkt pkt; + union { + unsigned int origlen; + struct sockaddr_ll ll; + }; + } sa; +}; + +struct mpidr_hash { + u64 mask; + u32 shift_aff[4]; + u32 bits; +}; + +enum dbg_active_el { + DBG_ACTIVE_EL0 = 0, + DBG_ACTIVE_EL1 = 1, +}; + +enum hw_breakpoint_ops { + HW_BREAKPOINT_INSTALL = 0, + HW_BREAKPOINT_UNINSTALL = 1, + HW_BREAKPOINT_RESTORE = 2, +}; + +enum aarch64_insn_special_register { + AARCH64_INSN_SPCLREG_SPSR_EL1 = 49664, + AARCH64_INSN_SPCLREG_ELR_EL1 = 49665, + AARCH64_INSN_SPCLREG_SP_EL0 = 49672, + AARCH64_INSN_SPCLREG_SPSEL = 49680, + AARCH64_INSN_SPCLREG_CURRENTEL = 49682, + AARCH64_INSN_SPCLREG_DAIF = 55825, + AARCH64_INSN_SPCLREG_NZCV = 55824, + AARCH64_INSN_SPCLREG_FPCR = 55840, + AARCH64_INSN_SPCLREG_DSPSR_EL0 = 55848, + AARCH64_INSN_SPCLREG_DLR_EL0 = 55849, + AARCH64_INSN_SPCLREG_SPSR_EL2 = 57856, + AARCH64_INSN_SPCLREG_ELR_EL2 = 57857, + AARCH64_INSN_SPCLREG_SP_EL1 = 57864, + AARCH64_INSN_SPCLREG_SPSR_INQ = 57880, + AARCH64_INSN_SPCLREG_SPSR_ABT = 57881, + AARCH64_INSN_SPCLREG_SPSR_UND = 57882, + AARCH64_INSN_SPCLREG_SPSR_FIQ = 57883, + AARCH64_INSN_SPCLREG_SPSR_EL3 = 61952, + AARCH64_INSN_SPCLREG_ELR_EL3 = 61953, + AARCH64_INSN_SPCLREG_SP_EL2 = 61968, +}; + +typedef __le32 kprobe_opcode_t; + +struct arch_specific_insn { + struct arch_probe_insn api; + kprobe_opcode_t *xol_insn; + long unsigned int xol_restore; +}; + +enum probe_insn { + INSN_REJECTED = 0, + INSN_GOOD_NO_SLOT = 1, + INSN_GOOD = 2, +}; + +struct arm_smccc_quirk { + int id; + union { + long unsigned int a6; + } state; +}; + +enum ripas { + RSI_RIPAS_EMPTY = 0, + RSI_RIPAS_RAM = 1, + RSI_RIPAS_DESTROYED = 2, + RSI_RIPAS_DEV = 3, +}; + +struct arm64_mem_crypt_ops { + int (*encrypt)(long unsigned int, int); + int (*decrypt)(long unsigned int, int); +}; + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +struct kvm_irq_routing_irqchip { + __u32 irqchip; + __u32 pin; +}; + +struct kvm_irq_routing_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + union { + __u32 pad; + __u32 devid; + }; +}; + +struct kvm_irq_routing_s390_adapter { + __u64 ind_addr; + __u64 summary_addr; + __u64 ind_offset; + __u32 summary_offset; + __u32 adapter_id; +}; + +struct kvm_irq_routing_hv_sint { + __u32 vcpu; + __u32 sint; +}; + +struct kvm_irq_routing_xen_evtchn { + __u32 port; + __u32 vcpu; + __u32 priority; +}; + +struct kvm_irq_routing_entry { + __u32 gsi; + __u32 type; + __u32 flags; + __u32 pad; + union { + struct kvm_irq_routing_irqchip irqchip; + struct kvm_irq_routing_msi msi; + struct kvm_irq_routing_s390_adapter adapter; + struct kvm_irq_routing_hv_sint hv_sint; + struct kvm_irq_routing_xen_evtchn xen_evtchn; + __u32 pad[8]; + } u; +}; + +enum kvm_mode { + KVM_MODE_DEFAULT = 0, + KVM_MODE_PROTECTED = 1, + KVM_MODE_NV = 2, + KVM_MODE_NONE = 3, +}; + +struct dma_map_ops { + void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); + void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); + struct page * (*alloc_pages_op)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); + void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); + int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); + int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); + dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); + int (*dma_supported)(struct device *, u64); + u64 (*get_required_mask)(struct device *); + size_t (*max_mapping_size)(struct device *); + size_t (*opt_mapping_size)(); + long unsigned int (*get_merge_boundary)(struct device *); +}; + +struct trace_event_raw_dma_map { + struct trace_entry ent; + u32 __data_loc_device; + u64 phys_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_unmap { + struct trace_entry ent; + u32 __data_loc_device; + u64 addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_alloc_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + gfp_t flags; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_alloc_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + gfp_t flags; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_free_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_free_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg_err { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + int err; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_unmap_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_addrs; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_single { + struct trace_entry ent; + u32 __data_loc_device; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_map { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_free_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_free_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg_err { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap_sg { + u32 device; + const void *device_ptr_; + u32 addrs; + const void *addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_single { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_sg { + u32 device; + const void *device_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +typedef void (*btf_trace_dma_map_page)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_resource)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_page)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_resource)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_alloc)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt_err)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_free)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_map_sg)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_sg_err)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_sg)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_sync_single_for_cpu)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_single_for_device)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_cpu)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_device)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + long unsigned int attrs; +}; + +struct trace_buffer_meta { + __u32 meta_page_size; + __u32 meta_struct_len; + __u32 subbuf_size; + __u32 nr_subbufs; + struct { + __u64 lost_events; + __u32 id; + __u32 read; + } reader; + __u64 flags; + __u64 entries; + __u64 overrun; + __u64 read; + __u64 Reserved1; + __u64 Reserved2; +}; + +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING = 29, + RINGBUF_TYPE_TIME_EXTEND = 30, + RINGBUF_TYPE_TIME_STAMP = 31, +}; + +typedef bool (*ring_buffer_cond_fn)(void *); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1, +}; + +struct ring_buffer_per_cpu; + +struct buffer_page; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + long unsigned int head; + long unsigned int next_event; + struct buffer_page *head_page; + struct buffer_page *cache_reader_page; + long unsigned int cache_read; + long unsigned int cache_pages_removed; + u64 read_stamp; + u64 page_stamp; + struct ring_buffer_event *event; + size_t event_size; + int missed_events; +}; + +struct rb_irq_work { + struct irq_work work; + wait_queue_head_t waiters; + wait_queue_head_t full_waiters; + atomic_t seq; + bool waiters_pending; + bool full_waiters_pending; + bool wakeup_full; +}; + +struct trace_buffer { + unsigned int flags; + int cpus; + atomic_t record_disabled; + atomic_t resizing; + cpumask_var_t cpumask; + struct lock_class_key *reader_lock_key; + struct mutex mutex; + struct ring_buffer_per_cpu **buffers; + struct hlist_node node; + u64 (*clock)(); + struct rb_irq_work irq_work; + bool time_stamp_abs; + long unsigned int range_addr_start; + long unsigned int range_addr_end; + long int last_text_delta; + long int last_data_delta; + unsigned int subbuf_size; + unsigned int subbuf_order; + unsigned int max_data_size; +}; + +struct ring_buffer_meta { + int magic; + int struct_size; + long unsigned int text_addr; + long unsigned int data_addr; + long unsigned int first_buffer; + long unsigned int head_buffer; + long unsigned int commit_buffer; + __u32 subbuf_size; + __u32 nr_subbufs; + int buffers[0]; +}; + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 8, +}; + +struct buffer_data_page { + u64 time_stamp; + local_t commit; + unsigned char data[0]; +}; + +struct buffer_data_read_page { + unsigned int order; + struct buffer_data_page *data; +}; + +struct buffer_page { + struct list_head list; + local_t write; + unsigned int read; + local_t entries; + long unsigned int real_end; + unsigned int order; + u32 id: 30; + u32 range: 1; + struct buffer_data_page *page; +}; + +struct rb_event_info { + u64 ts; + u64 delta; + u64 before; + u64 after; + long unsigned int length; + struct buffer_page *tail_page; + int add_timestamp; +}; + +enum { + RB_ADD_STAMP_NONE = 0, + RB_ADD_STAMP_EXTEND = 2, + RB_ADD_STAMP_ABSOLUTE = 4, + RB_ADD_STAMP_FORCE = 8, +}; + +enum { + RB_CTX_TRANSITION = 0, + RB_CTX_NMI = 1, + RB_CTX_IRQ = 2, + RB_CTX_SOFTIRQ = 3, + RB_CTX_NORMAL = 4, + RB_CTX_MAX = 5, +}; + +struct rb_time_struct { + local64_t time; +}; + +typedef struct rb_time_struct rb_time_t; + +struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + atomic_t resize_disabled; + struct trace_buffer *buffer; + raw_spinlock_t reader_lock; + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct buffer_data_page *free_page; + long unsigned int nr_pages; + unsigned int current_context; + struct list_head *pages; + long unsigned int cnt; + struct buffer_page *head_page; + struct buffer_page *tail_page; + struct buffer_page *commit_page; + struct buffer_page *reader_page; + long unsigned int lost_events; + long unsigned int last_overrun; + long unsigned int nest; + local_t entries_bytes; + local_t entries; + local_t overrun; + local_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; + local_t pages_touched; + local_t pages_lost; + local_t pages_read; + long int last_pages_touch; + size_t shortest_full; + long unsigned int read; + long unsigned int read_bytes; + rb_time_t write_stamp; + rb_time_t before_stamp; + u64 event_stamp[5]; + u64 read_stamp; + long unsigned int pages_removed; + unsigned int mapped; + unsigned int user_mapped; + struct mutex mapping_lock; + long unsigned int *subbuf_ids; + struct trace_buffer_meta *meta_page; + struct ring_buffer_meta *ring_meta; + long int nr_pages_to_update; + struct list_head new_pages; + struct work_struct update_pages_work; + struct completion update_done; + struct rb_irq_work irq_work; +}; + +struct rb_wait_data { + struct rb_irq_work *irq_work; + int seq; +}; + +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, + BPF_CORE_FIELD_BYTE_SIZE = 1, + BPF_CORE_FIELD_EXISTS = 2, + BPF_CORE_FIELD_SIGNED = 3, + BPF_CORE_FIELD_LSHIFT_U64 = 4, + BPF_CORE_FIELD_RSHIFT_U64 = 5, + BPF_CORE_TYPE_ID_LOCAL = 6, + BPF_CORE_TYPE_ID_TARGET = 7, + BPF_CORE_TYPE_EXISTS = 8, + BPF_CORE_TYPE_SIZE = 9, + BPF_CORE_ENUMVAL_EXISTS = 10, + BPF_CORE_ENUMVAL_VALUE = 11, + BPF_CORE_TYPE_MATCHES = 12, +}; + +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + +struct btf_enum { + __u32 name_off; + __s32 val; +}; + +struct btf_enum64 { + __u32 name_off; + __u32 val_lo32; + __u32 val_hi32; +}; + +struct bpf_core_cand { + const struct btf *btf; + __u32 id; +}; + +struct bpf_core_cand_list { + struct bpf_core_cand *cands; + int len; +}; + +struct bpf_core_accessor { + __u32 type_id; + __u32 idx; + const char *name; +}; + +struct bpf_core_spec { + const struct btf *btf; + struct bpf_core_accessor spec[64]; + __u32 root_type_id; + enum bpf_core_relo_kind relo_kind; + int len; + int raw_spec[64]; + int raw_len; + __u32 bit_offset; +}; + +struct bpf_core_relo_res { + __u64 orig_val; + __u64 new_val; + bool poison; + bool validate; + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; +}; + +typedef __le32 uprobe_opcode_t; + +struct uprobe { + struct rb_node rb_node; + refcount_t ref; + struct rw_semaphore register_rwsem; + struct rw_semaphore consumer_rwsem; + struct list_head pending_list; + struct list_head consumers; + struct inode *inode; + union { + struct callback_head rcu; + struct work_struct work; + }; + loff_t offset; + loff_t ref_ctr_offset; + long unsigned int flags; + struct arch_uprobe arch; +}; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +struct xol_area { + wait_queue_head_t wq; + long unsigned int *bitmap; + struct page *page; + long unsigned int vaddr; +}; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); + void (*close)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +typedef struct { + struct srcu_struct *lock; + int idx; +} class_srcu_t; + +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +struct __uprobe_key { + struct inode *inode; + loff_t offset; +}; + +struct map_info { + struct map_info *next; + struct mm_struct *mm; + long unsigned int vaddr; +}; + +struct zpool_driver { + char *type; + struct module *owner; + atomic_t refcount; + struct list_head list; + void * (*create)(const char *, gfp_t); + void (*destroy)(void *); + bool malloc_support_movable; + int (*malloc)(void *, size_t, gfp_t, long unsigned int *); + void (*free)(void *, long unsigned int); + bool sleep_mapped; + void * (*map)(void *, long unsigned int, enum zpool_mapmode); + void (*unmap)(void *, long unsigned int); + u64 (*total_pages)(void *); +}; + +struct zpool { + struct zpool_driver *driver; + void *pool; +}; + +struct saved { + struct path link; + struct delayed_call done; + const char *name; + unsigned int seq; +}; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; + unsigned int flags; + unsigned int state; + unsigned int seq; + unsigned int next_seq; + unsigned int m_seq; + unsigned int r_seq; + int last_type; + unsigned int depth; + int total_link_count; + struct saved *stack; + struct saved internal[2]; + struct filename *name; + const char *pathname; + struct nameidata *saved; + unsigned int root_seq; + int dfd; + vfsuid_t dir_vfsuid; + umode_t dir_mode; +}; + +struct renamedata { + struct mnt_idmap *old_mnt_idmap; + struct inode *old_dir; + struct dentry *old_dentry; + struct mnt_idmap *new_mnt_idmap; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +}; + +enum { + LAST_NORM = 0, + LAST_ROOT = 1, + LAST_DOT = 2, + LAST_DOTDOT = 3, +}; + +enum { + WALK_TRAILING = 1, + WALK_MORE = 2, + WALK_NOFOLLOW = 4, +}; + +struct timerfd_ctx { + union { + struct hrtimer tmr; + struct alarm alarm; + } t; + ktime_t tintv; + ktime_t moffs; + wait_queue_head_t wqh; + u64 ticks; + int clockid; + short unsigned int expired; + short unsigned int settime_flags; + struct callback_head rcu; + struct list_head clist; + spinlock_t cancel_lock; + bool might_cancel; +}; + +struct pending_reservation { + struct rb_node rb_node; + ext4_lblk_t lclu; +}; + +struct rsvd_count { + int ndelayed; + bool first_do_lblk_found; + ext4_lblk_t first_do_lblk; + ext4_lblk_t last_do_lblk; + struct extent_status *left_es; + bool partial; + ext4_lblk_t lclu; +}; + +struct fat_cache { + struct list_head cache_list; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct fat_cache_id { + unsigned int id; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct reserve_ticket { + u64 bytes; + int error; + bool steal; + struct list_head list; + wait_queue_head_t wait; +}; + +enum { + Opt_err___4 = 0, + Opt_enc = 1, + Opt_hash = 2, +}; + +struct sig_instance { + void (*free)(struct sig_instance *); + union { + struct { + char head[72]; + struct crypto_instance base; + }; + struct sig_alg alg; + }; +}; + +struct crypto_sig_spawn { + struct crypto_spawn base; +}; + +struct rq_qos_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wait *rqw; + acquire_inflight_cb_t *cb; + void *private_data; + bool got_token; +}; + +struct io_uring_file_index_range { + __u32 off; + __u32 len; + __u64 resv; +}; + +struct waitid_info { + pid_t pid; + uid_t uid; + int status; + int cause; +}; + +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + struct waitid_info *wo_info; + int wo_stat; + struct rusage *wo_rusage; + wait_queue_entry_t child_wait; + int notask_error; +}; + +struct io_waitid_async { + struct io_kiocb *req; + struct wait_opts wo; +}; + +struct io_waitid { + struct file *file; + int which; + pid_t upid; + int options; + atomic_t refs; + struct wait_queue_head *head; + struct siginfo *infop; + struct waitid_info info; +}; + +struct pci_bus_resource { + struct list_head list; + struct resource *res; +}; + +struct kbdiacr { + unsigned char diacr; + unsigned char base; + unsigned char result; +}; + +struct kbdiacrs { + unsigned int kb_cnt; + struct kbdiacr kbdiacr[256]; +}; + +struct kbdiacruc { + unsigned int diacr; + unsigned int base; + unsigned int result; +}; + +struct kbdiacrsuc { + unsigned int kb_cnt; + struct kbdiacruc kbdiacruc[256]; +}; + +struct keyboard_notifier_param { + struct vc_data *vc; + int down; + int shift; + int ledstate; + unsigned int value; +}; + +struct kbd_struct { + unsigned char lockstate; + unsigned char slockstate; + unsigned char ledmode: 1; + unsigned char ledflagstate: 4; + char: 3; + unsigned char default_ledflagstate: 4; + unsigned char kbdmode: 3; + int: 1; + unsigned char modeflags: 5; +}; + +typedef void k_handler_fn(struct vc_data *, unsigned char, char); + +typedef void fn_handler_fn(struct vc_data *); + +struct getset_keycode_data { + struct input_keymap_entry ke; + int error; +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = -1, + PM_QOS_FLAGS_NONE = 0, + PM_QOS_FLAGS_SOME = 1, + PM_QOS_FLAGS_ALL = 2, +}; + +enum pm_qos_req_action { + PM_QOS_ADD_REQ = 0, + PM_QOS_UPDATE_REQ = 1, + PM_QOS_REMOVE_REQ = 2, +}; + +enum blk_integrity_flags { + BLK_INTEGRITY_NOVERIFY = 1, + BLK_INTEGRITY_NOGENERATE = 2, + BLK_INTEGRITY_DEVICE_CAPABLE = 4, + BLK_INTEGRITY_REF_TAG = 8, + BLK_INTEGRITY_STACKED = 16, +}; + +enum { + NVME_CAP_CSS_NVM = 1, + NVME_CAP_CSS_CSI = 64, +}; + +enum { + NVME_CAP_CRMS_CRWMS = 576460752303423488ULL, + NVME_CAP_CRMS_CRIMS = 1152921504606846976ULL, +}; + +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1, + NVME_PS_FLAGS_NON_OP_STATE = 2, +}; + +enum nvme_ctrl_attr { + NVME_CTRL_ATTR_HID_128_BIT = 1, + NVME_CTRL_ATTR_TBKAS = 64, + NVME_CTRL_ATTR_ELBAS = 32768, +}; + +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 cmic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[11]; + __u8 cntrltype; + __u8 fguid[16]; + __le16 crdt1; + __le16 crdt2; + __le16 crdt3; + __u8 rsvd134[122]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __le16 edstt; + __u8 dsto; + __u8 fwug; + __le16 kas; + __le16 hctma; + __le16 mntmt; + __le16 mxtmt; + __le32 sanicap; + __le32 hmminds; + __le16 hmmaxd; + __le16 nvmsetidmax; + __le16 endgidmax; + __u8 anatt; + __u8 anacap; + __le32 anagrpmax; + __le32 nanagrpid; + __u8 rsvd352[160]; + __u8 sqes; + __u8 cqes; + __le16 maxcmd; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 nwpc; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __le32 mnan; + __u8 rsvd544[224]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[2]; + __u8 dctype; + __u8 rsvd1807[241]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; + +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 dlfeat; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __le16 noiob; + __u8 nvmcap[16]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __u8 rsvd74[18]; + __le32 anagrpid; + __u8 rsvd96[3]; + __u8 nsattr; + __le16 nvmsetid; + __le16 endgid; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[64]; + __u8 vs[3712]; +}; + +struct nvme_id_ns_cs_indep { + __u8 nsfeat; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __le32 anagrpid; + __u8 nsattr; + __u8 rsvd9; + __le16 nvmsetid; + __le16 endgid; + __u8 nstat; + __u8 rsvd15[4081]; +}; + +struct nvme_id_ns_nvm { + __le64 lbstm; + __u8 pic; + __u8 rsvd9[3]; + __le32 elbaf[64]; + __u8 rsvd268[3828]; +}; + +enum { + NVME_ID_NS_NVM_STS_MASK = 127, + NVME_ID_NS_NVM_GUARD_SHIFT = 7, + NVME_ID_NS_NVM_GUARD_MASK = 3, + NVME_ID_NS_NVM_QPIF_SHIFT = 9, + NVME_ID_NS_NVM_QPIF_MASK = 15, + NVME_ID_NS_NVM_QPIFS = 8, +}; + +struct nvme_id_ctrl_nvm { + __u8 vsl; + __u8 wzsl; + __u8 wusl; + __u8 dmrl; + __le32 dmrsl; + __le64 dmsl; + __u8 rsvd16[4080]; +}; + +enum { + NVME_CSI_NVM = 0, + NVME_CSI_ZNS = 2, +}; + +enum { + NVME_NS_FEAT_THIN = 1, + NVME_NS_FEAT_ATOMICS = 2, + NVME_NS_FEAT_IO_OPT = 16, + NVME_NS_ATTR_RO = 1, + NVME_NS_FLBAS_LBA_MASK = 15, + NVME_NS_FLBAS_LBA_UMASK = 96, + NVME_NS_FLBAS_LBA_SHIFT = 1, + NVME_NS_FLBAS_META_EXT = 16, + NVME_NS_NMIC_SHARED = 1, + NVME_NS_ROTATIONAL = 16, + NVME_NS_VWC_NOT_PRESENT = 32, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 16, + NVME_NS_DPC_PI_FIRST = 8, + NVME_NS_DPC_PI_TYPE3 = 4, + NVME_NS_DPC_PI_TYPE2 = 2, + NVME_NS_DPC_PI_TYPE1 = 1, + NVME_NS_DPS_PI_FIRST = 8, + NVME_NS_DPS_PI_MASK = 7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +enum { + NVME_NSTAT_NRDY = 1, +}; + +enum { + NVME_NVM_NS_16B_GUARD = 0, + NVME_NVM_NS_32B_GUARD = 1, + NVME_NVM_NS_64B_GUARD = 2, + NVME_NVM_NS_QTYPE_GUARD = 3, +}; + +struct nvme_ns_id_desc { + __u8 nidt; + __u8 nidl; + __le16 reserved; +}; + +enum { + NVME_NIDT_EUI64 = 1, + NVME_NIDT_NGUID = 2, + NVME_NIDT_UUID = 3, + NVME_NIDT_CSI = 4, +}; + +struct nvme_fw_slot_info_log { + __u8 afi; + __u8 rsvd1[7]; + __le64 frs[7]; + __u8 rsvd64[448]; +}; + +enum { + NVME_AER_ERROR = 0, + NVME_AER_SMART = 1, + NVME_AER_NOTICE = 2, + NVME_AER_CSS = 6, + NVME_AER_VS = 7, +}; + +enum { + NVME_AER_ERROR_PERSIST_INT_ERR = 3, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0, + NVME_AER_NOTICE_FW_ACT_STARTING = 1, + NVME_AER_NOTICE_ANA = 3, + NVME_AER_NOTICE_DISC_CHANGED = 240, +}; + +enum { + NVME_AEN_CFG_NS_ATTR = 256, + NVME_AEN_CFG_FW_ACT = 512, + NVME_AEN_CFG_ANA_CHANGE = 2048, + NVME_AEN_CFG_DISC_CHANGE = -2147483648, +}; + +enum { + NVME_CMD_FUSE_FIRST = 1, + NVME_CMD_FUSE_SECOND = 2, + NVME_CMD_SGL_METABUF = 64, + NVME_CMD_SGL_METASEG = 128, + NVME_CMD_SGL_ALL = 192, +}; + +enum { + NVME_DSMGMT_IDR = 1, + NVME_DSMGMT_IDW = 2, + NVME_DSMGMT_AD = 4, +}; + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +enum nvme_zone_mgmt_action { + NVME_ZONE_CLOSE = 1, + NVME_ZONE_FINISH = 2, + NVME_ZONE_OPEN = 3, + NVME_ZONE_RESET = 4, + NVME_ZONE_OFFLINE = 5, + NVME_ZONE_SET_DESC_EXT = 16, +}; + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + +struct nvme_feat_host_behavior { + __u8 acre; + __u8 etdas; + __u8 lbafee; + __u8 resv1[509]; +}; + +enum { + NVME_ENABLE_ACRE = 1, + NVME_ENABLE_LBAFEE = 1, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = 1, + NVME_CQ_IRQ_ENABLED = 2, + NVME_SQ_PRIO_URGENT = 0, + NVME_SQ_PRIO_HIGH = 2, + NVME_SQ_PRIO_MEDIUM = 4, + NVME_SQ_PRIO_LOW = 6, + NVME_FEAT_ARBITRATION = 1, + NVME_FEAT_POWER_MGMT = 2, + NVME_FEAT_LBA_RANGE = 3, + NVME_FEAT_TEMP_THRESH = 4, + NVME_FEAT_ERR_RECOVERY = 5, + NVME_FEAT_VOLATILE_WC = 6, + NVME_FEAT_NUM_QUEUES = 7, + NVME_FEAT_IRQ_COALESCE = 8, + NVME_FEAT_IRQ_CONFIG = 9, + NVME_FEAT_WRITE_ATOMIC = 10, + NVME_FEAT_ASYNC_EVENT = 11, + NVME_FEAT_AUTO_PST = 12, + NVME_FEAT_HOST_MEM_BUF = 13, + NVME_FEAT_TIMESTAMP = 14, + NVME_FEAT_KATO = 15, + NVME_FEAT_HCTM = 16, + NVME_FEAT_NOPSC = 17, + NVME_FEAT_RRL = 18, + NVME_FEAT_PLM_CONFIG = 19, + NVME_FEAT_PLM_WINDOW = 20, + NVME_FEAT_HOST_BEHAVIOR = 22, + NVME_FEAT_SANITIZE = 23, + NVME_FEAT_SW_PROGRESS = 128, + NVME_FEAT_HOST_ID = 129, + NVME_FEAT_RESV_MASK = 130, + NVME_FEAT_RESV_PERSIST = 131, + NVME_FEAT_WRITE_PROTECT = 132, + NVME_FEAT_VENDOR_START = 192, + NVME_FEAT_VENDOR_END = 255, + NVME_LOG_SUPPORTED = 0, + NVME_LOG_ERROR = 1, + NVME_LOG_SMART = 2, + NVME_LOG_FW_SLOT = 3, + NVME_LOG_CHANGED_NS = 4, + NVME_LOG_CMD_EFFECTS = 5, + NVME_LOG_DEVICE_SELF_TEST = 6, + NVME_LOG_TELEMETRY_HOST = 7, + NVME_LOG_TELEMETRY_CTRL = 8, + NVME_LOG_ENDURANCE_GROUP = 9, + NVME_LOG_ANA = 12, + NVME_LOG_FEATURES = 18, + NVME_LOG_RMI = 22, + NVME_LOG_DISC = 112, + NVME_LOG_RESERVATION = 128, + NVME_FWACT_REPL = 0, + NVME_FWACT_REPL_ACTV = 8, + NVME_FWACT_ACTV = 16, +}; + +enum { + NVME_SCT_GENERIC = 0, + NVME_SC_SUCCESS = 0, + NVME_SC_INVALID_OPCODE = 1, + NVME_SC_INVALID_FIELD = 2, + NVME_SC_CMDID_CONFLICT = 3, + NVME_SC_DATA_XFER_ERROR = 4, + NVME_SC_POWER_LOSS = 5, + NVME_SC_INTERNAL = 6, + NVME_SC_ABORT_REQ = 7, + NVME_SC_ABORT_QUEUE = 8, + NVME_SC_FUSED_FAIL = 9, + NVME_SC_FUSED_MISSING = 10, + NVME_SC_INVALID_NS = 11, + NVME_SC_CMD_SEQ_ERROR = 12, + NVME_SC_SGL_INVALID_LAST = 13, + NVME_SC_SGL_INVALID_COUNT = 14, + NVME_SC_SGL_INVALID_DATA = 15, + NVME_SC_SGL_INVALID_METADATA = 16, + NVME_SC_SGL_INVALID_TYPE = 17, + NVME_SC_CMB_INVALID_USE = 18, + NVME_SC_PRP_INVALID_OFFSET = 19, + NVME_SC_ATOMIC_WU_EXCEEDED = 20, + NVME_SC_OP_DENIED = 21, + NVME_SC_SGL_INVALID_OFFSET = 22, + NVME_SC_RESERVED = 23, + NVME_SC_HOST_ID_INCONSIST = 24, + NVME_SC_KA_TIMEOUT_EXPIRED = 25, + NVME_SC_KA_TIMEOUT_INVALID = 26, + NVME_SC_ABORTED_PREEMPT_ABORT = 27, + NVME_SC_SANITIZE_FAILED = 28, + NVME_SC_SANITIZE_IN_PROGRESS = 29, + NVME_SC_SGL_INVALID_GRANULARITY = 30, + NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 31, + NVME_SC_NS_WRITE_PROTECTED = 32, + NVME_SC_CMD_INTERRUPTED = 33, + NVME_SC_TRANSIENT_TR_ERR = 34, + NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 36, + NVME_SC_INVALID_IO_CMD_SET = 44, + NVME_SC_LBA_RANGE = 128, + NVME_SC_CAP_EXCEEDED = 129, + NVME_SC_NS_NOT_READY = 130, + NVME_SC_RESERVATION_CONFLICT = 131, + NVME_SC_FORMAT_IN_PROGRESS = 132, + NVME_SCT_COMMAND_SPECIFIC = 256, + NVME_SC_CQ_INVALID = 256, + NVME_SC_QID_INVALID = 257, + NVME_SC_QUEUE_SIZE = 258, + NVME_SC_ABORT_LIMIT = 259, + NVME_SC_ABORT_MISSING = 260, + NVME_SC_ASYNC_LIMIT = 261, + NVME_SC_FIRMWARE_SLOT = 262, + NVME_SC_FIRMWARE_IMAGE = 263, + NVME_SC_INVALID_VECTOR = 264, + NVME_SC_INVALID_LOG_PAGE = 265, + NVME_SC_INVALID_FORMAT = 266, + NVME_SC_FW_NEEDS_CONV_RESET = 267, + NVME_SC_INVALID_QUEUE = 268, + NVME_SC_FEATURE_NOT_SAVEABLE = 269, + NVME_SC_FEATURE_NOT_CHANGEABLE = 270, + NVME_SC_FEATURE_NOT_PER_NS = 271, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 272, + NVME_SC_FW_NEEDS_RESET = 273, + NVME_SC_FW_NEEDS_MAX_TIME = 274, + NVME_SC_FW_ACTIVATE_PROHIBITED = 275, + NVME_SC_OVERLAPPING_RANGE = 276, + NVME_SC_NS_INSUFFICIENT_CAP = 277, + NVME_SC_NS_ID_UNAVAILABLE = 278, + NVME_SC_NS_ALREADY_ATTACHED = 280, + NVME_SC_NS_IS_PRIVATE = 281, + NVME_SC_NS_NOT_ATTACHED = 282, + NVME_SC_THIN_PROV_NOT_SUPP = 283, + NVME_SC_CTRL_LIST_INVALID = 284, + NVME_SC_SELT_TEST_IN_PROGRESS = 285, + NVME_SC_BP_WRITE_PROHIBITED = 286, + NVME_SC_CTRL_ID_INVALID = 287, + NVME_SC_SEC_CTRL_STATE_INVALID = 288, + NVME_SC_CTRL_RES_NUM_INVALID = 289, + NVME_SC_RES_ID_INVALID = 290, + NVME_SC_PMR_SAN_PROHIBITED = 291, + NVME_SC_ANA_GROUP_ID_INVALID = 292, + NVME_SC_ANA_ATTACH_FAILED = 293, + NVME_SC_BAD_ATTRIBUTES = 384, + NVME_SC_INVALID_PI = 385, + NVME_SC_READ_ONLY = 386, + NVME_SC_ONCS_NOT_SUPPORTED = 387, + NVME_SC_CONNECT_FORMAT = 384, + NVME_SC_CONNECT_CTRL_BUSY = 385, + NVME_SC_CONNECT_INVALID_PARAM = 386, + NVME_SC_CONNECT_RESTART_DISC = 387, + NVME_SC_CONNECT_INVALID_HOST = 388, + NVME_SC_DISCOVERY_RESTART = 400, + NVME_SC_AUTH_REQUIRED = 401, + NVME_SC_ZONE_BOUNDARY_ERROR = 440, + NVME_SC_ZONE_FULL = 441, + NVME_SC_ZONE_READ_ONLY = 442, + NVME_SC_ZONE_OFFLINE = 443, + NVME_SC_ZONE_INVALID_WRITE = 444, + NVME_SC_ZONE_TOO_MANY_ACTIVE = 445, + NVME_SC_ZONE_TOO_MANY_OPEN = 446, + NVME_SC_ZONE_INVALID_TRANSITION = 447, + NVME_SCT_MEDIA_ERROR = 512, + NVME_SC_WRITE_FAULT = 640, + NVME_SC_READ_ERROR = 641, + NVME_SC_GUARD_CHECK = 642, + NVME_SC_APPTAG_CHECK = 643, + NVME_SC_REFTAG_CHECK = 644, + NVME_SC_COMPARE_FAILED = 645, + NVME_SC_ACCESS_DENIED = 646, + NVME_SC_UNWRITTEN_BLOCK = 647, + NVME_SCT_PATH = 768, + NVME_SC_INTERNAL_PATH_ERROR = 768, + NVME_SC_ANA_PERSISTENT_LOSS = 769, + NVME_SC_ANA_INACCESSIBLE = 770, + NVME_SC_ANA_TRANSITION = 771, + NVME_SC_CTRL_PATH_ERROR = 864, + NVME_SC_HOST_PATH_ERROR = 880, + NVME_SC_HOST_ABORTED_CMD = 881, + NVME_SC_MASK = 255, + NVME_SCT_MASK = 1792, + NVME_SCT_SC_MASK = 2047, + NVME_STATUS_CRD = 6144, + NVME_STATUS_MORE = 8192, + NVME_STATUS_DNR = 16384, +}; + +enum nvme_quirks { + NVME_QUIRK_STRIPE_SIZE = 1, + NVME_QUIRK_IDENTIFY_CNS = 2, + NVME_QUIRK_DEALLOCATE_ZEROES = 4, + NVME_QUIRK_DELAY_BEFORE_CHK_RDY = 8, + NVME_QUIRK_NO_APST = 16, + NVME_QUIRK_NO_DEEPEST_PS = 32, + NVME_QUIRK_QDEPTH_ONE = 64, + NVME_QUIRK_MEDIUM_PRIO_SQ = 128, + NVME_QUIRK_IGNORE_DEV_SUBNQN = 256, + NVME_QUIRK_DISABLE_WRITE_ZEROES = 512, + NVME_QUIRK_SIMPLE_SUSPEND = 1024, + NVME_QUIRK_SINGLE_VECTOR = 2048, + NVME_QUIRK_128_BYTES_SQES = 4096, + NVME_QUIRK_SHARED_TAGS = 8192, + NVME_QUIRK_NO_TEMP_THRESH_CHANGE = 16384, + NVME_QUIRK_NO_NS_DESC_LIST = 32768, + NVME_QUIRK_DMA_ADDRESS_BITS_48 = 65536, + NVME_QUIRK_SKIP_CID_GEN = 131072, + NVME_QUIRK_BOGUS_NID = 262144, + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = 524288, + NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = 1048576, + NVME_QUIRK_BROKEN_MSI = 2097152, +}; + +enum nvme_ctrl_flags { + NVME_CTRL_FAILFAST_EXPIRED = 0, + NVME_CTRL_ADMIN_Q_STOPPED = 1, + NVME_CTRL_STARTED_ONCE = 2, + NVME_CTRL_STOPPED = 3, + NVME_CTRL_SKIP_ID_CNS_CS = 4, + NVME_CTRL_DIRTY_CAPABILITY = 5, + NVME_CTRL_FROZEN = 6, +}; + +struct nvmf_host; + +struct nvmf_ctrl_options { + unsigned int mask; + int max_reconnects; + char *transport; + char *subsysnqn; + char *traddr; + char *trsvcid; + char *host_traddr; + char *host_iface; + size_t queue_size; + unsigned int nr_io_queues; + unsigned int reconnect_delay; + bool discovery_nqn; + bool duplicate_connect; + unsigned int kato; + struct nvmf_host *host; + char *dhchap_secret; + char *dhchap_ctrl_secret; + struct key *keyring; + struct key *tls_key; + bool tls; + bool disable_sqflow; + bool hdr_digest; + bool data_digest; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; + int tos; + int fast_io_fail_tmo; +}; + +enum { + NVME_SUBMIT_AT_HEAD = 1, + NVME_SUBMIT_NOWAIT = 2, + NVME_SUBMIT_RESERVED = 4, + NVME_SUBMIT_RETRY = 8, +}; + +struct nvme_zone_info { + u64 zone_size; + unsigned int max_open_zones; + unsigned int max_active_zones; +}; + +struct nvmf_host { + struct kref ref; + struct list_head list; + char nqn[223]; + uuid_t id; +}; + +struct trace_event_raw_nvme_setup_cmd { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + u8 opcode; + u8 flags; + u8 fctype; + u16 cid; + u32 nsid; + bool metadata; + u8 cdw10[24]; + char __data[0]; +}; + +struct trace_event_raw_nvme_complete_rq { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + int cid; + u64 result; + u8 retries; + u8 flags; + u16 status; + char __data[0]; +}; + +struct trace_event_raw_nvme_async_event { + struct trace_entry ent; + int ctrl_id; + u32 result; + char __data[0]; +}; + +struct trace_event_raw_nvme_sq { + struct trace_entry ent; + int ctrl_id; + char disk[32]; + int qid; + u16 sq_head; + u16 sq_tail; + char __data[0]; +}; + +struct trace_event_data_offsets_nvme_setup_cmd {}; + +struct trace_event_data_offsets_nvme_complete_rq {}; + +struct trace_event_data_offsets_nvme_async_event {}; + +struct trace_event_data_offsets_nvme_sq {}; + +typedef void (*btf_trace_nvme_setup_cmd)(void *, struct request *, struct nvme_command *); + +typedef void (*btf_trace_nvme_complete_rq)(void *, struct request *); + +typedef void (*btf_trace_nvme_async_event)(void *, struct nvme_ctrl *, u32); + +typedef void (*btf_trace_nvme_sq)(void *, struct request *, __le16, int); + +struct nvme_ns_info { + struct nvme_ns_ids ids; + u32 nsid; + __le32 anagrpid; + u8 pi_offset; + bool is_shared; + bool is_readonly; + bool is_ready; + bool is_removed; + bool is_rotational; + bool no_vwc; +}; + +enum nvme_disposition { + COMPLETE___2 = 0, + RETRY = 1, + FAILOVER = 2, + AUTHENTICATE = 3, +}; + +struct nvme_core_quirk_entry { + u16 vid; + const char *mn; + const char *fr; + long unsigned int quirks; +}; + +struct async_scan_info { + struct nvme_ctrl *ctrl; + atomic_t next_nsid; + __le32 *ns_list; +}; + +enum usb_led_event { + USB_LED_EVENT_HOST = 0, + USB_LED_EVENT_GADGET = 1, +}; + +enum xhci_overhead_type { + LS_OVERHEAD_TYPE = 0, + FS_OVERHEAD_TYPE = 1, + HS_OVERHEAD_TYPE = 2, +}; + +struct pps_event_time { + struct timespec64 ts_real; +}; + +enum ptp_clock_events { + PTP_CLOCK_ALARM = 0, + PTP_CLOCK_EXTTS = 1, + PTP_CLOCK_EXTOFF = 2, + PTP_CLOCK_PPS = 3, + PTP_CLOCK_PPSUSR = 4, +}; + +struct ptp_clock_event { + int type; + int index; + union { + u64 timestamp; + s64 offset; + struct pps_event_time pps_times; + }; +}; + +struct alias_prop { + struct list_head link; + const char *alias; + struct device_node *np; + int id; + char stem[0]; +}; + +typedef int (*armpmu_init_fn)(struct arm_pmu *); + +struct pmu_probe_info { + unsigned int cpuid; + unsigned int mask; + armpmu_init_fn init; +}; + +enum ethtool_multicast_groups { + ETHNL_MCGRP_MONITOR = 0, +}; + +struct phy_device_node { + enum phy_upstream upstream_type; + union { + struct net_device *netdev; + struct phy_device *phydev; + } upstream; + struct sfp_bus *parent_sfp_bus; + struct phy_device *phy; +}; + +enum ethnl_sock_type { + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH = 0, +}; + +struct ethnl_sock_priv { + struct net_device *dev; + u32 portid; + enum ethnl_sock_type type; +}; + +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + long unsigned int pos_ifindex; +}; + +typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); + +enum ethtool_module_fw_flash_status { + ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS = 2, + ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED = 3, + ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR = 4, +}; + +enum { + SFF8024_ID_UNK = 0, + SFF8024_ID_SFF_8472 = 2, + SFF8024_ID_SFP = 3, + SFF8024_ID_DWDM_SFP = 11, + SFF8024_ID_QSFP_8438 = 12, + SFF8024_ID_QSFP_8436_8636 = 13, + SFF8024_ID_QSFP28_8636 = 17, + SFF8024_ID_QSFP_DD = 24, + SFF8024_ID_OSFP = 25, + SFF8024_ID_DSFP = 27, + SFF8024_ID_QSFP_PLUS_CMIS = 30, + SFF8024_ID_SFP_DD_CMIS = 31, + SFF8024_ID_SFP_PLUS_CMIS = 32, + SFF8024_ENCODING_UNSPEC = 0, + SFF8024_ENCODING_8B10B = 1, + SFF8024_ENCODING_4B5B = 2, + SFF8024_ENCODING_NRZ = 3, + SFF8024_ENCODING_8472_MANCHESTER = 4, + SFF8024_ENCODING_8472_SONET = 5, + SFF8024_ENCODING_8472_64B66B = 6, + SFF8024_ENCODING_8436_MANCHESTER = 6, + SFF8024_ENCODING_8436_SONET = 4, + SFF8024_ENCODING_8436_64B66B = 5, + SFF8024_ENCODING_256B257B = 7, + SFF8024_ENCODING_PAM4 = 8, + SFF8024_CONNECTOR_UNSPEC = 0, + SFF8024_CONNECTOR_SC = 1, + SFF8024_CONNECTOR_FIBERJACK = 6, + SFF8024_CONNECTOR_LC = 7, + SFF8024_CONNECTOR_MT_RJ = 8, + SFF8024_CONNECTOR_MU = 9, + SFF8024_CONNECTOR_SG = 10, + SFF8024_CONNECTOR_OPTICAL_PIGTAIL = 11, + SFF8024_CONNECTOR_MPO_1X12 = 12, + SFF8024_CONNECTOR_MPO_2X16 = 13, + SFF8024_CONNECTOR_HSSDC_II = 32, + SFF8024_CONNECTOR_COPPER_PIGTAIL = 33, + SFF8024_CONNECTOR_RJ45 = 34, + SFF8024_CONNECTOR_NOSEPARATE = 35, + SFF8024_CONNECTOR_MXC_2X16 = 36, + SFF8024_ECC_UNSPEC = 0, + SFF8024_ECC_100G_25GAUI_C2M_AOC = 1, + SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 2, + SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 3, + SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 4, + SFF8024_ECC_100GBASE_SR10 = 5, + SFF8024_ECC_100GBASE_CR4 = 11, + SFF8024_ECC_25GBASE_CR_S = 12, + SFF8024_ECC_25GBASE_CR_N = 13, + SFF8024_ECC_10GBASE_T_SFI = 22, + SFF8024_ECC_10GBASE_T_SR = 28, + SFF8024_ECC_5GBASE_T = 29, + SFF8024_ECC_2_5GBASE_T = 30, +}; + +enum { + SFP_PHYS_ID = 0, + SFP_PHYS_EXT_ID = 1, + SFP_PHYS_EXT_ID_SFP = 4, + SFP_CONNECTOR = 2, + SFP_COMPLIANCE = 3, + SFP_ENCODING = 11, + SFP_BR_NOMINAL = 12, + SFP_RATE_ID = 13, + SFF_RID_8079 = 1, + SFF_RID_8431_RX_ONLY = 2, + SFF_RID_8431_TX_ONLY = 4, + SFF_RID_8431 = 6, + SFF_RID_10G8G = 14, + SFP_LINK_LEN_SM_KM = 14, + SFP_LINK_LEN_SM_100M = 15, + SFP_LINK_LEN_50UM_OM2_10M = 16, + SFP_LINK_LEN_62_5UM_OM1_10M = 17, + SFP_LINK_LEN_COPPER_1M = 18, + SFP_LINK_LEN_50UM_OM4_10M = 18, + SFP_LINK_LEN_50UM_OM3_10M = 19, + SFP_VENDOR_NAME = 20, + SFP_VENDOR_OUI = 37, + SFP_VENDOR_PN = 40, + SFP_VENDOR_REV = 56, + SFP_OPTICAL_WAVELENGTH_MSB = 60, + SFP_OPTICAL_WAVELENGTH_LSB = 61, + SFP_CABLE_SPEC = 60, + SFP_CC_BASE = 63, + SFP_OPTIONS = 64, + SFP_OPTIONS_HIGH_POWER_LEVEL = 8192, + SFP_OPTIONS_PAGING_A2 = 4096, + SFP_OPTIONS_RETIMER = 2048, + SFP_OPTIONS_COOLED_XCVR = 1024, + SFP_OPTIONS_POWER_DECL = 512, + SFP_OPTIONS_RX_LINEAR_OUT = 256, + SFP_OPTIONS_RX_DECISION_THRESH = 128, + SFP_OPTIONS_TUNABLE_TX = 64, + SFP_OPTIONS_RATE_SELECT = 32, + SFP_OPTIONS_TX_DISABLE = 16, + SFP_OPTIONS_TX_FAULT = 8, + SFP_OPTIONS_LOS_INVERTED = 4, + SFP_OPTIONS_LOS_NORMAL = 2, + SFP_BR_MAX = 66, + SFP_BR_MIN = 67, + SFP_VENDOR_SN = 68, + SFP_DATECODE = 84, + SFP_DIAGMON = 92, + SFP_DIAGMON_DDM = 64, + SFP_DIAGMON_INT_CAL = 32, + SFP_DIAGMON_EXT_CAL = 16, + SFP_DIAGMON_RXPWR_AVG = 8, + SFP_DIAGMON_ADDRMODE = 4, + SFP_ENHOPTS = 93, + SFP_ENHOPTS_ALARMWARN = 128, + SFP_ENHOPTS_SOFT_TX_DISABLE = 64, + SFP_ENHOPTS_SOFT_TX_FAULT = 32, + SFP_ENHOPTS_SOFT_RX_LOS = 16, + SFP_ENHOPTS_SOFT_RATE_SELECT = 8, + SFP_ENHOPTS_APP_SELECT_SFF8079 = 4, + SFP_ENHOPTS_SOFT_RATE_SFF8431 = 2, + SFP_SFF8472_COMPLIANCE = 94, + SFP_SFF8472_COMPLIANCE_NONE = 0, + SFP_SFF8472_COMPLIANCE_REV9_3 = 1, + SFP_SFF8472_COMPLIANCE_REV9_5 = 2, + SFP_SFF8472_COMPLIANCE_REV10_2 = 3, + SFP_SFF8472_COMPLIANCE_REV10_4 = 4, + SFP_SFF8472_COMPLIANCE_REV11_0 = 5, + SFP_SFF8472_COMPLIANCE_REV11_3 = 6, + SFP_SFF8472_COMPLIANCE_REV11_4 = 7, + SFP_SFF8472_COMPLIANCE_REV12_0 = 8, + SFP_CC_EXT = 95, +}; + +struct ethtool_module_fw_flash { + struct list_head list; + netdevice_tracker dev_tracker; + struct work_struct work; + struct ethtool_cmis_fw_update_params fw_update; +}; + +struct module_reply_data { + struct ethnl_reply_data base; + struct ethtool_module_power_mode_params power; +}; + +enum nft_range_ops { + NFT_RANGE_EQ = 0, + NFT_RANGE_NEQ = 1, +}; + +enum nft_range_attributes { + NFTA_RANGE_UNSPEC = 0, + NFTA_RANGE_SREG = 1, + NFTA_RANGE_OP = 2, + NFTA_RANGE_FROM_DATA = 3, + NFTA_RANGE_TO_DATA = 4, + __NFTA_RANGE_MAX = 5, +}; + +struct nft_data_desc { + enum nft_data_types type; + unsigned int size; + unsigned int len; + unsigned int flags; +}; + +struct nft_range_expr { + struct nft_data data_from; + struct nft_data data_to; + u8 sreg; + u8 len; + enum nft_range_ops op: 8; +}; + +enum { + IFLA_INET6_UNSPEC = 0, + IFLA_INET6_FLAGS = 1, + IFLA_INET6_CONF = 2, + IFLA_INET6_STATS = 3, + IFLA_INET6_MCAST = 4, + IFLA_INET6_CACHEINFO = 5, + IFLA_INET6_ICMP6STATS = 6, + IFLA_INET6_TOKEN = 7, + IFLA_INET6_ADDR_GEN_MODE = 8, + IFLA_INET6_RA_MTU = 9, + __IFLA_INET6_MAX = 10, +}; + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64 = 0, + IN6_ADDR_GEN_MODE_NONE = 1, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, + IN6_ADDR_GEN_MODE_RANDOM = 3, +}; + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; + +enum { + IFA_UNSPEC = 0, + IFA_ADDRESS = 1, + IFA_LOCAL = 2, + IFA_LABEL = 3, + IFA_BROADCAST = 4, + IFA_ANYCAST = 5, + IFA_CACHEINFO = 6, + IFA_MULTICAST = 7, + IFA_FLAGS = 8, + IFA_RT_PRIORITY = 9, + IFA_TARGET_NETNSID = 10, + IFA_PROTO = 11, + __IFA_MAX = 12, +}; + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + short unsigned int prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum { + PREFIX_UNSPEC = 0, + PREFIX_ADDRESS = 1, + PREFIX_CACHEINFO = 2, + __PREFIX_MAX = 3, +}; + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT = 1, + DEVCONF_MTU6 = 2, + DEVCONF_ACCEPT_RA = 3, + DEVCONF_ACCEPT_REDIRECTS = 4, + DEVCONF_AUTOCONF = 5, + DEVCONF_DAD_TRANSMITS = 6, + DEVCONF_RTR_SOLICITS = 7, + DEVCONF_RTR_SOLICIT_INTERVAL = 8, + DEVCONF_RTR_SOLICIT_DELAY = 9, + DEVCONF_USE_TEMPADDR = 10, + DEVCONF_TEMP_VALID_LFT = 11, + DEVCONF_TEMP_PREFERED_LFT = 12, + DEVCONF_REGEN_MAX_RETRY = 13, + DEVCONF_MAX_DESYNC_FACTOR = 14, + DEVCONF_MAX_ADDRESSES = 15, + DEVCONF_FORCE_MLD_VERSION = 16, + DEVCONF_ACCEPT_RA_DEFRTR = 17, + DEVCONF_ACCEPT_RA_PINFO = 18, + DEVCONF_ACCEPT_RA_RTR_PREF = 19, + DEVCONF_RTR_PROBE_INTERVAL = 20, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, + DEVCONF_PROXY_NDP = 22, + DEVCONF_OPTIMISTIC_DAD = 23, + DEVCONF_ACCEPT_SOURCE_ROUTE = 24, + DEVCONF_MC_FORWARDING = 25, + DEVCONF_DISABLE_IPV6 = 26, + DEVCONF_ACCEPT_DAD = 27, + DEVCONF_FORCE_TLLAO = 28, + DEVCONF_NDISC_NOTIFY = 29, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, + DEVCONF_SUPPRESS_FRAG_NDISC = 32, + DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, + DEVCONF_USE_OPTIMISTIC = 34, + DEVCONF_ACCEPT_RA_MTU = 35, + DEVCONF_STABLE_SECRET = 36, + DEVCONF_USE_OIF_ADDRS_ONLY = 37, + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, + DEVCONF_DROP_UNSOLICITED_NA = 41, + DEVCONF_KEEP_ADDR_ON_DOWN = 42, + DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, + DEVCONF_SEG6_ENABLED = 44, + DEVCONF_SEG6_REQUIRE_HMAC = 45, + DEVCONF_ENHANCED_DAD = 46, + DEVCONF_ADDR_GEN_MODE = 47, + DEVCONF_DISABLE_POLICY = 48, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, + DEVCONF_NDISC_TCLASS = 50, + DEVCONF_RPL_SEG_ENABLED = 51, + DEVCONF_RA_DEFRTR_METRIC = 52, + DEVCONF_IOAM6_ENABLED = 53, + DEVCONF_IOAM6_ID = 54, + DEVCONF_IOAM6_ID_WIDE = 55, + DEVCONF_NDISC_EVICT_NOCARRIER = 56, + DEVCONF_ACCEPT_UNTRACKED_NA = 57, + DEVCONF_ACCEPT_RA_MIN_LFT = 58, + DEVCONF_MAX = 59, +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; + +enum { + INET6_IFADDR_STATE_PREDAD = 0, + INET6_IFADDR_STATE_DAD = 1, + INET6_IFADDR_STATE_POSTDAD = 2, + INET6_IFADDR_STATE_ERRDAD = 3, + INET6_IFADDR_STATE_DEAD = 4, +}; + +struct in6_validator_info { + struct in6_addr i6vi_addr; + struct inet6_dev *i6vi_dev; + struct netlink_ext_ack *extack; +}; + +struct ifa6_config { + const struct in6_addr *pfx; + unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; + u32 rt_priority; + u32 ifa_flags; + u32 preferred_lft; + u32 valid_lft; + u16 scope; +}; + +union fwnet_hwaddr { + u8 u[16]; + struct { + __be64 uniq_id; + u8 max_rec; + u8 sspd; + u8 fifo[6]; + } uc; +}; + +struct netconfmsg { + __u8 ncm_family; +}; + +enum { + NETCONFA_UNSPEC = 0, + NETCONFA_IFINDEX = 1, + NETCONFA_FORWARDING = 2, + NETCONFA_RP_FILTER = 3, + NETCONFA_MC_FORWARDING = 4, + NETCONFA_PROXY_NEIGH = 5, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, + NETCONFA_INPUT = 7, + NETCONFA_BC_FORWARDING = 8, + __NETCONFA_MAX = 9, +}; + +enum cleanup_prefix_rt_t { + CLEANUP_PREFIX_RT_NOP = 0, + CLEANUP_PREFIX_RT_DEL = 1, + CLEANUP_PREFIX_RT_EXPIRE = 2, +}; + +enum { + IPV6_SADDR_RULE_INIT = 0, + IPV6_SADDR_RULE_LOCAL = 1, + IPV6_SADDR_RULE_SCOPE = 2, + IPV6_SADDR_RULE_PREFERRED = 3, + IPV6_SADDR_RULE_OIF = 4, + IPV6_SADDR_RULE_LABEL = 5, + IPV6_SADDR_RULE_PRIVACY = 6, + IPV6_SADDR_RULE_ORCHID = 7, + IPV6_SADDR_RULE_PREFIX = 8, + IPV6_SADDR_RULE_MAX = 9, +}; + +struct ipv6_saddr_score { + int rule; + int addr_type; + struct inet6_ifaddr *ifa; + long unsigned int scorebits[1]; + int scopedist; + int matchlen; +}; + +struct ipv6_saddr_dst { + const struct in6_addr *addr; + int ifindex; + int scope; + int label; + unsigned int prefs; +}; + +struct if6_iter_state { + struct seq_net_private p; + int bucket; + int offset; +}; + +enum addr_type_t { + UNICAST_ADDR = 0, + MULTICAST_ADDR = 1, + ANYCAST_ADDR = 2, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; +}; + +enum { + DAD_PROCESS = 0, + DAD_BEGIN = 1, + DAD_ABORT = 2, +}; + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __sum16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; + __u8 resv: 4; + __u8 suppress: 1; + __u8 qrv: 3; + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +enum { + PIM_TYPE_HELLO = 0, + PIM_TYPE_REGISTER = 1, + PIM_TYPE_REGISTER_STOP = 2, + PIM_TYPE_JOIN_PRUNE = 3, + PIM_TYPE_BOOTSTRAP = 4, + PIM_TYPE_ASSERT = 5, + PIM_TYPE_GRAFT = 6, + PIM_TYPE_GRAFT_ACK = 7, + PIM_TYPE_CANDIDATE_RP_ADV = 8, +}; + +struct pimhdr { + __u8 type; + __u8 reserved; + __be16 csum; +}; + +enum { + MDB_RTR_TYPE_DISABLED = 0, + MDB_RTR_TYPE_TEMP_QUERY = 1, + MDB_RTR_TYPE_PERM = 2, + MDB_RTR_TYPE_TEMP = 3, +}; + +enum { + BRIDGE_QUERIER_UNSPEC = 0, + BRIDGE_QUERIER_IP_ADDRESS = 1, + BRIDGE_QUERIER_IP_PORT = 2, + BRIDGE_QUERIER_IP_OTHER_TIMER = 3, + BRIDGE_QUERIER_PAD = 4, + BRIDGE_QUERIER_IPV6_ADDRESS = 5, + BRIDGE_QUERIER_IPV6_PORT = 6, + BRIDGE_QUERIER_IPV6_OTHER_TIMER = 7, + __BRIDGE_QUERIER_MAX = 8, +}; + +struct br_ip_list { + struct list_head list; + struct br_ip addr; +}; + +struct sigcontext { + __u64 fault_address; + __u64 regs[31]; + __u64 sp; + __u64 pc; + __u64 pstate; + long: 64; + __u8 __reserved[4096]; +}; + +struct _aarch64_ctx { + __u32 magic; + __u32 size; +}; + +struct fpsimd_context { + struct _aarch64_ctx head; + __u32 fpsr; + __u32 fpcr; + __int128 unsigned vregs[32]; +}; + +struct esr_context { + struct _aarch64_ctx head; + __u64 esr; +}; + +struct poe_context { + struct _aarch64_ctx head; + __u64 por_el0; +}; + +struct extra_context { + struct _aarch64_ctx head; + __u64 datap; + __u32 size; + __u32 __reserved[3]; +}; + +struct sve_context { + struct _aarch64_ctx head; + __u16 vl; + __u16 flags; + __u16 __reserved[2]; +}; + +struct tpidr2_context { + struct _aarch64_ctx head; + __u64 tpidr2; +}; + +struct fpmr_context { + struct _aarch64_ctx head; + __u64 fpmr; +}; + +struct za_context { + struct _aarch64_ctx head; + __u16 vl; + __u16 __reserved[3]; +}; + +struct zt_context { + struct _aarch64_ctx head; + __u16 nregs; + __u16 __reserved[3]; +}; + +struct gcs_context { + struct _aarch64_ctx head; + __u64 gcspr; + __u64 features_enabled; + __u64 reserved; +}; + +struct sigaltstack { + void *ss_sp; + int ss_flags; + __kernel_size_t ss_size; +}; + +typedef struct sigaltstack stack_t; + +struct ucontext { + long unsigned int uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + sigset_t uc_sigmask; + __u8 __unused[120]; + long: 64; + struct sigcontext uc_mcontext; +}; + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +struct rt_sigframe_user_layout { + struct rt_sigframe *sigframe; + struct frame_record *next_frame; + long unsigned int size; + long unsigned int limit; + long unsigned int fpsimd_offset; + long unsigned int esr_offset; + long unsigned int gcs_offset; + long unsigned int sve_offset; + long unsigned int tpidr2_offset; + long unsigned int za_offset; + long unsigned int zt_offset; + long unsigned int fpmr_offset; + long unsigned int poe_offset; + long unsigned int extra_offset; + long unsigned int end_offset; +}; + +struct user_access_state { + u64 por_el0; +}; + +struct user_ctxs { + struct fpsimd_context *fpsimd; + u32 fpsimd_size; + struct sve_context *sve; + u32 sve_size; + struct tpidr2_context *tpidr2; + u32 tpidr2_size; + struct za_context *za; + u32 za_size; + struct zt_context *zt; + u32 zt_size; + struct fpmr_context *fpmr; + u32 fpmr_size; + struct poe_context *poe; + u32 poe_size; + struct gcs_context *gcs; + u32 gcs_size; +}; + +struct softirq_action { + void (*action)(); +}; + +struct trace_event_raw_irq_handler_entry { + struct trace_entry ent; + int irq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_irq_handler_exit { + struct trace_entry ent; + int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_softirq { + struct trace_entry ent; + unsigned int vec; + char __data[0]; +}; + +struct trace_event_raw_tasklet { + struct trace_entry ent; + void *tasklet; + void *func; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_handler_entry { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_irq_handler_exit {}; + +struct trace_event_data_offsets_softirq {}; + +struct trace_event_data_offsets_tasklet {}; + +typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); + +typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); + +typedef void (*btf_trace_softirq_entry)(void *, unsigned int); + +typedef void (*btf_trace_softirq_exit)(void *, unsigned int); + +typedef void (*btf_trace_softirq_raise)(void *, unsigned int); + +typedef void (*btf_trace_tasklet_entry)(void *, struct tasklet_struct *, void *); + +typedef void (*btf_trace_tasklet_exit)(void *, struct tasklet_struct *, void *); + +struct tasklet_head { + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +typedef struct { + void *lock; + long unsigned int flags; +} class_irqsave_t; + +enum scx_public_consts { + SCX_OPS_NAME_LEN = 128ULL, + SCX_SLICE_DFL = 20000000ULL, + SCX_SLICE_INF = 18446744073709551615ULL, +}; + +enum scx_dsq_id_flags { + SCX_DSQ_FLAG_BUILTIN = 9223372036854775808ULL, + SCX_DSQ_FLAG_LOCAL_ON = 4611686018427387904ULL, + SCX_DSQ_INVALID = 9223372036854775808ULL, + SCX_DSQ_GLOBAL = 9223372036854775809ULL, + SCX_DSQ_LOCAL = 9223372036854775810ULL, + SCX_DSQ_LOCAL_ON = 13835058055282163712ULL, + SCX_DSQ_LOCAL_CPU_MASK = 4294967295ULL, +}; + +enum scx_ent_flags { + SCX_TASK_QUEUED = 1, + SCX_TASK_RESET_RUNNABLE_AT = 4, + SCX_TASK_DEQD_FOR_SLEEP = 8, + SCX_TASK_STATE_SHIFT = 8, + SCX_TASK_STATE_BITS = 2, + SCX_TASK_STATE_MASK = 768, + SCX_TASK_CURSOR = -2147483648, +}; + +enum scx_task_state { + SCX_TASK_NONE = 0, + SCX_TASK_INIT = 1, + SCX_TASK_READY = 2, + SCX_TASK_ENABLED = 3, + SCX_TASK_NR_STATES = 4, +}; + +enum scx_ent_dsq_flags { + SCX_TASK_DSQ_ON_PRIQ = 1, +}; + +enum scx_kf_mask { + SCX_KF_UNLOCKED = 0, + SCX_KF_CPU_RELEASE = 1, + SCX_KF_DISPATCH = 2, + SCX_KF_ENQUEUE = 4, + SCX_KF_SELECT_CPU = 8, + SCX_KF_REST = 16, + __SCX_KF_RQ_LOCKED = 31, + __SCX_KF_TERMINAL = 28, +}; + +enum scx_dsq_lnode_flags { + SCX_DSQ_LNODE_ITER_CURSOR = 1, + __SCX_DSQ_LNODE_PRIV_SHIFT = 16, +}; + +struct sched_param { + int sched_priority; +}; + +struct sched_attr { + __u32 size; + __u32 sched_policy; + __u64 sched_flags; + __s32 sched_nice; + __u32 sched_priority; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; + __u32 sched_util_min; + __u32 sched_util_max; +}; + +enum scx_rq_flags { + SCX_RQ_ONLINE = 1, + SCX_RQ_CAN_STOP_TICK = 2, + SCX_RQ_BAL_PENDING = 4, + SCX_RQ_BAL_KEEP = 8, + SCX_RQ_BYPASSING = 16, + SCX_RQ_IN_WAKEUP = 65536, + SCX_RQ_IN_BALANCE = 131072, +}; + +typedef struct { + struct task_struct *lock; + struct rq *rq; + struct rq_flags rf; +} class_task_rq_lock_t; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_t; + +typedef struct { + struct rq *lock; + struct rq *lock2; +} class_double_rq_lock_t; + +struct sched_enq_and_set_ctx { + struct task_struct *p; + int queue_flags; + bool queued; + bool running; +}; + +struct idle_timer { + struct hrtimer timer; + int done; +}; + +typedef struct rt_rq *rt_rq_iter_t; + +enum dl_bw_request { + dl_bw_req_check_overflow = 0, + dl_bw_req_alloc = 1, + dl_bw_req_free = 2, +}; + +enum scx_consts { + SCX_DSP_DFL_MAX_BATCH = 32, + SCX_DSP_MAX_LOOPS = 32, + SCX_WATCHDOG_MAX_TIMEOUT = 30000, + SCX_EXIT_BT_LEN = 64, + SCX_EXIT_MSG_LEN = 1024, + SCX_EXIT_DUMP_DFL_LEN = 32768, + SCX_CPUPERF_ONE = 1024, + SCX_OPS_TASK_ITER_BATCH = 32, +}; + +enum scx_exit_kind { + SCX_EXIT_NONE = 0, + SCX_EXIT_DONE = 1, + SCX_EXIT_UNREG = 64, + SCX_EXIT_UNREG_BPF = 65, + SCX_EXIT_UNREG_KERN = 66, + SCX_EXIT_SYSRQ = 67, + SCX_EXIT_ERROR = 1024, + SCX_EXIT_ERROR_BPF = 1025, + SCX_EXIT_ERROR_STALL = 1026, +}; + +enum scx_exit_code { + SCX_ECODE_RSN_HOTPLUG = 4294967296ULL, + SCX_ECODE_ACT_RESTART = 281474976710656ULL, +}; + +struct scx_exit_info { + enum scx_exit_kind kind; + s64 exit_code; + const char *reason; + long unsigned int *bt; + u32 bt_len; + char *msg; + char *dump; +}; + +enum scx_ops_flags { + SCX_OPS_KEEP_BUILTIN_IDLE = 1, + SCX_OPS_ENQ_LAST = 2, + SCX_OPS_ENQ_EXITING = 4, + SCX_OPS_SWITCH_PARTIAL = 8, + SCX_OPS_HAS_CGROUP_WEIGHT = 65536, + SCX_OPS_ALL_FLAGS = 65551, +}; + +struct scx_init_task_args { + bool fork; + struct cgroup *cgroup; +}; + +struct scx_exit_task_args { + bool cancelled; +}; + +struct scx_cgroup_init_args { + u32 weight; +}; + +enum scx_cpu_preempt_reason { + SCX_CPU_PREEMPT_RT = 0, + SCX_CPU_PREEMPT_DL = 1, + SCX_CPU_PREEMPT_STOP = 2, + SCX_CPU_PREEMPT_UNKNOWN = 3, +}; + +struct scx_cpu_acquire_args {}; + +struct scx_cpu_release_args { + enum scx_cpu_preempt_reason reason; + struct task_struct *task; +}; + +struct scx_dump_ctx { + enum scx_exit_kind kind; + s64 exit_code; + const char *reason; + u64 at_ns; + u64 at_jiffies; +}; + +struct sched_ext_ops { + s32 (*select_cpu)(struct task_struct *, s32, u64); + void (*enqueue)(struct task_struct *, u64); + void (*dequeue)(struct task_struct *, u64); + void (*dispatch)(s32, struct task_struct *); + void (*tick)(struct task_struct *); + void (*runnable)(struct task_struct *, u64); + void (*running)(struct task_struct *); + void (*stopping)(struct task_struct *, bool); + void (*quiescent)(struct task_struct *, u64); + bool (*yield)(struct task_struct *, struct task_struct *); + bool (*core_sched_before)(struct task_struct *, struct task_struct *); + void (*set_weight)(struct task_struct *, u32); + void (*set_cpumask)(struct task_struct *, const struct cpumask *); + void (*update_idle)(s32, bool); + void (*cpu_acquire)(s32, struct scx_cpu_acquire_args *); + void (*cpu_release)(s32, struct scx_cpu_release_args *); + s32 (*init_task)(struct task_struct *, struct scx_init_task_args *); + void (*exit_task)(struct task_struct *, struct scx_exit_task_args *); + void (*enable)(struct task_struct *); + void (*disable)(struct task_struct *); + void (*dump)(struct scx_dump_ctx *); + void (*dump_cpu)(struct scx_dump_ctx *, s32, bool); + void (*dump_task)(struct scx_dump_ctx *, struct task_struct *); + s32 (*cgroup_init)(struct cgroup *, struct scx_cgroup_init_args *); + void (*cgroup_exit)(struct cgroup *); + s32 (*cgroup_prep_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_cancel_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_set_weight)(struct cgroup *, u32); + void (*cpu_online)(s32); + void (*cpu_offline)(s32); + s32 (*init)(); + void (*exit)(struct scx_exit_info *); + u32 dispatch_max_batch; + u64 flags; + u32 timeout_ms; + u32 exit_dump_len; + u64 hotplug_seq; + char name[128]; +}; + +enum scx_opi { + SCX_OPI_BEGIN = 0, + SCX_OPI_NORMAL_BEGIN = 0, + SCX_OPI_NORMAL_END = 29, + SCX_OPI_CPU_HOTPLUG_BEGIN = 29, + SCX_OPI_CPU_HOTPLUG_END = 31, + SCX_OPI_END = 31, +}; + +enum scx_wake_flags { + SCX_WAKE_FORK = 4, + SCX_WAKE_TTWU = 8, + SCX_WAKE_SYNC = 16, +}; + +enum scx_enq_flags { + SCX_ENQ_WAKEUP = 1ULL, + SCX_ENQ_HEAD = 16ULL, + SCX_ENQ_CPU_SELECTED = 1024ULL, + SCX_ENQ_PREEMPT = 4294967296ULL, + SCX_ENQ_REENQ = 1099511627776ULL, + SCX_ENQ_LAST = 2199023255552ULL, + __SCX_ENQ_INTERNAL_MASK = 18374686479671623680ULL, + SCX_ENQ_CLEAR_OPSS = 72057594037927936ULL, + SCX_ENQ_DSQ_PRIQ = 144115188075855872ULL, +}; + +enum scx_deq_flags { + SCX_DEQ_SLEEP = 1ULL, + SCX_DEQ_CORE_SCHED_EXEC = 4294967296ULL, +}; + +enum scx_pick_idle_cpu_flags { + SCX_PICK_IDLE_CORE = 1, +}; + +enum scx_kick_flags { + SCX_KICK_IDLE = 1, + SCX_KICK_PREEMPT = 2, + SCX_KICK_WAIT = 4, +}; + +enum scx_tg_flags { + SCX_TG_ONLINE = 1, + SCX_TG_INITED = 2, +}; + +enum scx_ops_enable_state { + SCX_OPS_ENABLING = 0, + SCX_OPS_ENABLED = 1, + SCX_OPS_DISABLING = 2, + SCX_OPS_DISABLED = 3, +}; + +enum scx_ops_state { + SCX_OPSS_NONE = 0, + SCX_OPSS_QUEUEING = 1, + SCX_OPSS_QUEUED = 2, + SCX_OPSS_DISPATCHING = 3, + SCX_OPSS_QSEQ_SHIFT = 2, +}; + +struct scx_dsp_buf_ent { + struct task_struct *task; + long unsigned int qseq; + u64 dsq_id; + u64 enq_flags; +}; + +struct scx_dsp_ctx { + struct rq *rq; + u32 cursor; + u32 nr_tasks; + struct scx_dsp_buf_ent buf[0]; +}; + +struct scx_bstr_buf { + u64 data[12]; + char line[1024]; +}; + +struct scx_dump_data { + s32 cpu; + bool first; + s32 cursor; + struct seq_buf *s; + const char *prefix; + struct scx_bstr_buf buf; +}; + +struct trace_event_raw_sched_ext_dump { + struct trace_entry ent; + u32 __data_loc_line; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_ext_dump { + u32 line; + const void *line_ptr_; +}; + +typedef void (*btf_trace_sched_ext_dump)(void *, const char *); + +enum scx_dsq_iter_flags { + SCX_DSQ_ITER_REV = 65536, + __SCX_DSQ_ITER_HAS_SLICE = 1073741824, + __SCX_DSQ_ITER_HAS_VTIME = 2147483648, + __SCX_DSQ_ITER_USER_FLAGS = 65536, + __SCX_DSQ_ITER_ALL_FLAGS = 3221291008, +}; + +struct bpf_iter_scx_dsq_kern { + struct scx_dsq_list_node cursor; + struct scx_dispatch_q *dsq; + u64 slice; + u64 vtime; +}; + +struct bpf_iter_scx_dsq { + u64 __opaque[6]; +}; + +struct scx_task_iter { + struct sched_ext_entity cursor; + struct task_struct *locked; + struct rq *rq; + struct rq_flags rf; + u32 cnt; +}; + +typedef struct task_struct *class_find_get_task_t; + +struct bpf_struct_ops_sched_ext_ops { + struct bpf_struct_ops_common_value common; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sched_ext_ops data; + long: 64; + long: 64; + long: 64; +}; + +typedef int (*tracing_map_cmp_fn_t)(void *, void *); + +struct tracing_map_field { + tracing_map_cmp_fn_t cmp_fn; + union { + atomic64_t sum; + unsigned int offset; + }; +}; + +struct tracing_map; + +struct tracing_map_elt { + struct tracing_map *map; + struct tracing_map_field *fields; + atomic64_t *vars; + bool *var_set; + void *key; + void *private_data; +}; + +struct tracing_map_sort_key { + unsigned int field_idx; + bool descending; +}; + +struct tracing_map_array; + +struct tracing_map_ops; + +struct tracing_map { + unsigned int key_size; + unsigned int map_bits; + unsigned int map_size; + unsigned int max_elts; + atomic_t next_elt; + struct tracing_map_array *elts; + struct tracing_map_array *map; + const struct tracing_map_ops *ops; + void *private_data; + struct tracing_map_field fields[6]; + unsigned int n_fields; + int key_idx[3]; + unsigned int n_keys; + struct tracing_map_sort_key sort_key; + unsigned int n_vars; + atomic64_t hits; + atomic64_t drops; +}; + +struct tracing_map_entry { + u32 key; + struct tracing_map_elt *val; +}; + +struct tracing_map_sort_entry { + void *key; + struct tracing_map_elt *elt; + bool elt_copied; + bool dup; +}; + +struct tracing_map_array { + unsigned int entries_per_page; + unsigned int entry_size_shift; + unsigned int entry_shift; + unsigned int entry_mask; + unsigned int n_pages; + void **pages; +}; + +struct tracing_map_ops { + int (*elt_alloc)(struct tracing_map_elt *); + void (*elt_free)(struct tracing_map_elt *); + void (*elt_clear)(struct tracing_map_elt *); + void (*elt_init)(struct tracing_map_elt *); +}; + +enum blktrace_notify { + __BLK_TN_PROCESS = 0, + __BLK_TN_TIMESTAMP = 1, + __BLK_TN_MESSAGE = 2, + __BLK_TN_CGROUP = 256, +}; + +struct blk_io_trace { + __u32 magic; + __u32 sequence; + __u64 time; + __u64 sector; + __u32 bytes; + __u32 action; + __u32 pid; + __u32 device; + __u32 cpu; + __u16 error; + __u16 pdu_len; +}; + +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running = 2, + Blktrace_stopped = 3, +}; + +struct blk_user_trace_setup { + char name[32]; + __u16 act_mask; + __u32 buf_size; + __u32 buf_nr; + __u64 start_lba; + __u64 end_lba; + __u32 pid; +}; + +typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); + +enum { + BPF_RB_NO_WAKEUP = 1, + BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +enum { + BPF_RINGBUF_BUSY_BIT = 2147483648, + BPF_RINGBUF_DISCARD_BIT = 1073741824, + BPF_RINGBUF_HDR_SZ = 8, +}; + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + u64 mask; + struct page **pages; + int nr_pages; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + raw_spinlock_t spinlock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_t busy; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int consumer_pos; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int producer_pos; + long unsigned int pending_pos; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + char data[0]; +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_ringbuf *rb; +}; + +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_ringbuf_reserve_dynptr)(struct bpf_map *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_ringbuf_submit_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_user_ringbuf_drain)(struct bpf_map *, void *, void *, u64); + +struct bpf_cpumask { + cpumask_t cpumask; + refcount_t usage; +}; + +struct vm_unmapped_area_info { + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + long unsigned int start_gap; +}; + +struct trace_event_raw_vm_unmapped_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int total_vm; + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + char __data[0]; +}; + +struct trace_event_raw_vma_mas_szero { + struct trace_entry ent; + struct maple_tree *mt; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_vma_store { + struct trace_entry ent; + struct maple_tree *mt; + struct vm_area_struct *vma; + long unsigned int vm_start; + long unsigned int vm_end; + char __data[0]; +}; + +struct trace_event_raw_exit_mmap { + struct trace_entry ent; + struct mm_struct *mm; + struct maple_tree *mt; + char __data[0]; +}; + +struct trace_event_data_offsets_vm_unmapped_area {}; + +struct trace_event_data_offsets_vma_mas_szero {}; + +struct trace_event_data_offsets_vma_store {}; + +struct trace_event_data_offsets_exit_mmap {}; + +typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *); + +typedef void (*btf_trace_vma_mas_szero)(void *, struct maple_tree *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_vma_store)(void *, struct maple_tree *, struct vm_area_struct *); + +typedef void (*btf_trace_exit_mmap)(void *, struct mm_struct *); + +struct qlist_head { + struct qlist_node *head; + struct qlist_node *tail; + size_t bytes; + bool offline; +}; + +struct cpu_shrink_qlist { + raw_spinlock_t lock; + struct qlist_head qlist; +}; + +struct user_arg_ptr { + union { + const char * const *native; + } ptr; +}; + +struct flock64 { + short int l_type; + short int l_whence; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; +}; + +struct trace_event_raw_locks_get_lock_context { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + unsigned char type; + struct file_lock_context *ctx; + char __data[0]; +}; + +struct trace_event_raw_filelock_lock { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int pid; + unsigned int flags; + unsigned char type; + loff_t fl_start; + loff_t fl_end; + int ret; + char __data[0]; +}; + +struct trace_event_raw_filelock_lease { + struct trace_entry ent; + struct file_lease *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + long unsigned int break_time; + long unsigned int downgrade_time; + char __data[0]; +}; + +struct trace_event_raw_generic_add_lease { + struct trace_entry ent; + long unsigned int i_ino; + int wcount; + int rcount; + int icount; + dev_t s_dev; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + char __data[0]; +}; + +struct trace_event_raw_leases_conflict { + struct trace_entry ent; + void *lease; + void *breaker; + unsigned int l_fl_flags; + unsigned int b_fl_flags; + unsigned char l_fl_type; + unsigned char b_fl_type; + bool conflict; + char __data[0]; +}; + +struct trace_event_data_offsets_locks_get_lock_context {}; + +struct trace_event_data_offsets_filelock_lock {}; + +struct trace_event_data_offsets_filelock_lease {}; + +struct trace_event_data_offsets_generic_add_lease {}; + +struct trace_event_data_offsets_leases_conflict {}; + +typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); + +typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lease *, struct file_lease *); + +struct file_lock_list_struct { + spinlock_t lock; + struct hlist_head hlist; +}; + +struct locks_iterator { + int li_cpu; + loff_t li_pos; +}; + +typedef short unsigned int __kernel_uid16_t; + +typedef short unsigned int __kernel_gid16_t; + +typedef __kernel_uid16_t uid16_t; + +typedef __kernel_gid16_t gid16_t; + +struct ext4_io_end_vec { + struct list_head list; + loff_t offset; + ssize_t size; +}; + +struct ext4_io_end { + struct list_head list; + handle_t *handle; + struct inode *inode; + struct bio *bio; + unsigned int flag; + refcount_t count; + struct list_head list_vec; +}; + +typedef struct ext4_io_end ext4_io_end_t; + +struct ext4_io_submit { + struct writeback_control *io_wbc; + struct bio *io_bio; + ext4_io_end_t *io_end; + sector_t io_next_block; +}; + +struct mpage_da_data { + struct inode *inode; + struct writeback_control *wbc; + unsigned int can_map: 1; + long unsigned int first_page; + long unsigned int next_page; + long unsigned int last_page; + struct ext4_map_blocks map; + struct ext4_io_submit io_submit; + unsigned int do_map: 1; + unsigned int scanned_until_end: 1; + unsigned int journalled_more_data: 1; +}; + +struct fscrypt_inode_info; + +struct fuse_backing_map { + int32_t fd; + uint32_t flags; + uint64_t padding; +}; + +struct btrfs_fid { + u64 objectid; + u64 root_objectid; + u32 gen; + u64 parent_objectid; + u32 parent_gen; + u64 parent_root_objectid; +} __attribute__((packed)); + +struct btrfs_block_group_item { + __le64 used; + __le64 chunk_objectid; + __le64 flags; +}; + +enum btrfs_tree_block_status { + BTRFS_TREE_BLOCK_CLEAN = 0, + BTRFS_TREE_BLOCK_INVALID_NRITEMS = 1, + BTRFS_TREE_BLOCK_INVALID_PARENT_KEY = 2, + BTRFS_TREE_BLOCK_BAD_KEY_ORDER = 3, + BTRFS_TREE_BLOCK_INVALID_LEVEL = 4, + BTRFS_TREE_BLOCK_INVALID_FREE_SPACE = 5, + BTRFS_TREE_BLOCK_INVALID_OFFSETS = 6, + BTRFS_TREE_BLOCK_INVALID_BLOCKPTR = 7, + BTRFS_TREE_BLOCK_INVALID_ITEM = 8, + BTRFS_TREE_BLOCK_INVALID_OWNER = 9, + BTRFS_TREE_BLOCK_WRITTEN_NOT_SET = 10, +}; + +struct user_key_payload { + struct callback_head rcu; + short unsigned int datalen; + long: 0; + char data[0]; +}; + +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct crypto_kpp { + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); + int (*generate_public_key)(struct kpp_request *); + int (*compute_shared_secret)(struct kpp_request *); + unsigned int (*max_size)(struct crypto_kpp *); + int (*init)(struct crypto_kpp *); + void (*exit)(struct crypto_kpp *); + struct crypto_alg base; +}; + +struct kpp_instance { + void (*free)(struct kpp_instance *); + union { + struct { + char head[48]; + struct crypto_instance base; + } s; + struct kpp_alg alg; + }; +}; + +struct crypto_kpp_spawn { + struct crypto_spawn base; +}; + +struct bsg_device { + struct request_queue *queue; + struct device device; + struct cdev cdev; + int max_queue; + unsigned int timeout; + unsigned int reserved_size; + bsg_sg_io_fn *sg_io_fn; +}; + +struct user_msghdr { + void *msg_name; + int msg_namelen; + struct iovec *msg_iov; + __kernel_size_t msg_iovlen; + void *msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + +typedef u32 compat_size_t; + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct io_uring_recvmsg_out { + __u32 namelen; + __u32 controllen; + __u32 payloadlen; + __u32 flags; +}; + +enum { + KBUF_MODE_EXPAND = 1, + KBUF_MODE_FREE = 2, +}; + +struct buf_sel_arg { + struct iovec *iovs; + size_t out_len; + size_t max_len; + short unsigned int nr_iovs; + short unsigned int mode; +}; + +struct io_async_msghdr { + struct iovec fast_iov; + struct iovec *free_iov; + int free_iov_nr; + int namelen; + __kernel_size_t controllen; + __kernel_size_t payloadlen; + struct sockaddr *uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_notif_data { + struct file *file; + struct ubuf_info uarg; + struct io_notif_data *next; + struct io_notif_data *head; + unsigned int account_pages; + bool zc_report; + bool zc_used; + bool zc_copied; +}; + +struct io_shutdown { + struct file *file; + int how; +}; + +struct io_accept { + struct file *file; + struct sockaddr *addr; + int *addr_len; + int flags; + int iou_flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_socket { + struct file *file; + int domain; + int type; + int protocol; + int flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_connect { + struct file *file; + struct sockaddr *addr; + int addr_len; + bool in_progress; + bool seen_econnaborted; +}; + +struct io_bind { + struct file *file; + int addr_len; +}; + +struct io_listen { + struct file *file; + int backlog; +}; + +struct io_sr_msg { + struct file *file; + union { + struct compat_msghdr *umsg_compat; + struct user_msghdr *umsg; + void *buf; + }; + int len; + unsigned int done_io; + unsigned int msg_flags; + unsigned int nr_multishot_loops; + u16 flags; + u16 buf_group; + u16 buf_index; + void *msg_control; + struct io_kiocb *notif; +}; + +struct io_recvmsg_multishot_hdr { + struct io_uring_recvmsg_out msg; + struct __kernel_sockaddr_storage addr; +}; + +typedef struct { + U32 fastMode; + U32 tableLog; +} ZSTD_seqSymbol_header; + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; +} seq_t; + +typedef struct { + size_t state; + const ZSTD_seqSymbol *table; +} ZSTD_fseState; + +typedef struct { + BIT_DStream_t DStream; + ZSTD_fseState stateLL; + ZSTD_fseState stateOffb; + ZSTD_fseState stateML; + size_t prevOffset[3]; +} seqState_t; + +typedef enum { + ZSTD_lo_isRegularOffset = 0, + ZSTD_lo_isLongOffset = 1, +} ZSTD_longOffset_e; + +struct mbi_range { + u32 spi_start; + u32 nr_spis; + long unsigned int *bm; +}; + +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +enum vp_vq_vector_policy { + VP_VQ_VECTOR_POLICY_EACH = 0, + VP_VQ_VECTOR_POLICY_SHARED_SLOW = 1, + VP_VQ_VECTOR_POLICY_SHARED = 2, +}; + +enum dpm_order { + DPM_ORDER_NONE = 0, + DPM_ORDER_DEV_AFTER_PARENT = 1, + DPM_ORDER_PARENT_BEFORE_DEV = 2, + DPM_ORDER_DEV_LAST = 3, +}; + +struct fwnode_link { + struct fwnode_handle *supplier; + struct list_head s_hook; + struct fwnode_handle *consumer; + struct list_head c_hook; + u8 flags; +}; + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +struct class_dir { + struct kobject kobj; + const struct class *class; +}; + +struct root_device { + struct device dev; + struct module *owner; +}; + +enum { + NVME_CMBSZ_SQS = 1, + NVME_CMBSZ_CQS = 2, + NVME_CMBSZ_LISTS = 4, + NVME_CMBSZ_RDS = 8, + NVME_CMBSZ_WDS = 16, + NVME_CMBSZ_SZ_SHIFT = 12, + NVME_CMBSZ_SZ_MASK = 1048575, + NVME_CMBSZ_SZU_SHIFT = 8, + NVME_CMBSZ_SZU_MASK = 15, +}; + +enum { + NVME_CMBMSC_CRE = 1, + NVME_CMBMSC_CMSE = 2, +}; + +enum { + NVME_SGL_FMT_DATA_DESC = 0, + NVME_SGL_FMT_SEG_DESC = 2, + NVME_SGL_FMT_LAST_SEG_DESC = 3, + NVME_KEY_SGL_FMT_DATA_DESC = 4, + NVME_TRANSPORT_SGL_DATA_DESC = 5, +}; + +enum { + NVME_HOST_MEM_ENABLE = 1, + NVME_HOST_MEM_RETURN = 2, +}; + +struct nvme_host_mem_buf_desc { + __le64 addr; + __le32 size; + __u32 rsvd; +}; + +struct nvme_completion { + union nvme_result result; + __le16 sq_head; + __le16 sq_id; + __u16 command_id; + __le16 status; +}; + +struct nvme_queue; + +struct nvme_dev { + struct nvme_queue *queues; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; + u32 *dbs; + struct device *dev; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + unsigned int online_queues; + unsigned int max_qid; + unsigned int io_queues[3]; + unsigned int num_vecs; + u32 q_depth; + int io_sqes; + u32 db_stride; + void *bar; + long unsigned int bar_mapped_size; + struct mutex shutdown_lock; + bool subsystem; + u64 cmb_size; + bool cmb_use_sqes; + u32 cmbsz; + u32 cmbloc; + struct nvme_ctrl ctrl; + u32 last_ps; + bool hmb; + struct sg_table *hmb_sgt; + mempool_t *iod_mempool; + mempool_t *iod_meta_mempool; + __le32 *dbbuf_dbs; + dma_addr_t dbbuf_dbs_dma_addr; + __le32 *dbbuf_eis; + dma_addr_t dbbuf_eis_dma_addr; + u64 host_mem_size; + u32 nr_host_mem_descs; + u32 host_mem_descs_size; + dma_addr_t host_mem_descs_dma; + struct nvme_host_mem_buf_desc *host_mem_descs; + void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; +}; + +struct nvme_queue { + struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t cq_poll_lock; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 *q_db; + u32 q_depth; + u16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + long unsigned int flags; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +union nvme_descriptor { + struct nvme_sgl_desc *sg_list; + __le64 *prp_list; +}; + +struct nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + bool aborted; + s8 nr_allocations; + unsigned int dma_len; + dma_addr_t first_dma; + dma_addr_t meta_dma; + struct sg_table sgt; + struct sg_table meta_sgt; + union nvme_descriptor meta_list; + union nvme_descriptor list[5]; +}; + +struct usb_class_driver { + char *name; + char * (*devnode)(const struct device *, umode_t *); + const struct file_operations *fops; + int minor_base; +}; + +enum usb_phy_interface { + USBPHY_INTERFACE_MODE_UNKNOWN = 0, + USBPHY_INTERFACE_MODE_UTMI = 1, + USBPHY_INTERFACE_MODE_UTMIW = 2, + USBPHY_INTERFACE_MODE_ULPI = 3, + USBPHY_INTERFACE_MODE_SERIAL = 4, + USBPHY_INTERFACE_MODE_HSIC = 5, +}; + +struct i2c_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct i2c_device_identity { + u16 manufacturer_id; + u16 part_id; + u8 die_revision; +}; + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT = 0, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, +}; + +struct i2c_driver { + unsigned int class; + int (*probe)(struct i2c_client *); + void (*remove)(struct i2c_client *); + void (*shutdown)(struct i2c_client *); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); + int (*command)(struct i2c_client *, unsigned int, void *); + struct device_driver driver; + const struct i2c_device_id *id_table; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const short unsigned int *address_list; + struct list_head clients; + u32 flags; +}; + +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; + u32 digital_filter_width_ns; + u32 analog_filter_cutoff_freq_hz; +}; + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +struct trace_event_raw_i2c_write { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_read { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + char __data[0]; +}; + +struct trace_event_raw_i2c_reply { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_result { + struct trace_entry ent; + int adapter_nr; + __u16 nr_msgs; + __s16 ret; + char __data[0]; +}; + +struct trace_event_data_offsets_i2c_write { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_read {}; + +struct trace_event_data_offsets_i2c_reply { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_result {}; + +typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); + +struct i2c_cmd_arg { + unsigned int cmd; + void *arg; +}; + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; + u8 ver; + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __attribute__((packed)); + +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +struct plca_reply_data { + struct ethnl_reply_data base; + struct phy_plca_cfg plca_cfg; + struct phy_plca_status plca_st; +}; + +enum nft_payload_bases { + NFT_PAYLOAD_LL_HEADER = 0, + NFT_PAYLOAD_NETWORK_HEADER = 1, + NFT_PAYLOAD_TRANSPORT_HEADER = 2, + NFT_PAYLOAD_INNER_HEADER = 3, + NFT_PAYLOAD_TUN_HEADER = 4, +}; + +enum nft_trace_types { + NFT_TRACETYPE_UNSPEC = 0, + NFT_TRACETYPE_POLICY = 1, + NFT_TRACETYPE_RETURN = 2, + NFT_TRACETYPE_RULE = 3, + __NFT_TRACETYPE_MAX = 4, +}; + +struct nft_rule_dp { + u64 is_last: 1; + u64 dlen: 12; + u64 handle: 42; + long: 0; + unsigned char data[0]; +}; + +struct nft_traceinfo { + bool trace; + bool nf_trace; + bool packet_dumped; + enum nft_trace_types type: 8; + u32 skbid; + const struct nft_base_chain *basechain; +}; + +struct nft_bitwise_fast_expr { + u32 mask; + u32 xor; + u8 sreg; + u8 dreg; +}; + +struct nft_cmp_fast_expr { + u32 data; + u32 mask; + u8 sreg; + u8 len; + bool inv; +}; + +struct nft_cmp16_fast_expr { + struct nft_data data; + struct nft_data mask; + u8 sreg; + u8 len; + bool inv; +}; + +struct nft_payload { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 dreg; +}; + +struct nft_jumpstack { + const struct nft_rule_dp *rule; +}; + +struct ping_table { + struct hlist_head hash[64]; + spinlock_t lock; +}; + +enum { + XFRM_LOOKUP_ICMP = 1, + XFRM_LOOKUP_QUEUE = 2, + XFRM_LOOKUP_KEEP_DST_REF = 4, +}; + +typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *, const struct inet6_skb_parm *); + +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + +struct icmpv6_msg { + struct sk_buff *skb; + int offset; + uint8_t type; +}; + +struct icmp6_err { + int err; + int fatal; +}; + +enum { + PER_LINUX = 0, + PER_LINUX_32BIT = 8388608, + PER_LINUX_FDPIC = 524288, + PER_SVR4 = 68157441, + PER_SVR3 = 83886082, + PER_SCOSVR3 = 117440515, + PER_OSR5 = 100663299, + PER_WYSEV386 = 83886084, + PER_ISCR4 = 67108869, + PER_BSD = 6, + PER_SUNOS = 67108870, + PER_XENIX = 83886087, + PER_LINUX32 = 8, + PER_LINUX32_3GB = 134217736, + PER_IRIX32 = 67108873, + PER_IRIXN32 = 67108874, + PER_IRIX64 = 67108875, + PER_RISCOS = 12, + PER_SOLARIS = 67108877, + PER_UW7 = 68157454, + PER_OSF4 = 15, + PER_HPUX = 16, + PER_MASK = 255, +}; + +typedef long int (*syscall_fn_t)(const struct pt_regs *); + +typedef struct { + rwlock_t *lock; +} class_write_lock_irq_t; + +typedef struct task_struct *class_task_lock_t; + +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + +struct ptrace_syscall_info { + __u8 op; + __u8 pad[3]; + __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + } seccomp; + }; +}; + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + +typedef int (*task_call_f)(struct task_struct *, void *); + +struct trace_event_raw_rcu_utilization { + struct trace_entry ent; + const char *s; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_future_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long int gp_seq_req; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period_init { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + u8 level; + int grplo; + int grphi; + long unsigned int qsmask; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gpseq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_funnel_lock { + struct trace_entry ent; + const char *rcuname; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_preempt_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_unlock_preempted_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_quiescent_state_report { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long unsigned int mask; + long unsigned int qsmask; + u8 level; + int grplo; + int grphi; + u8 gp_tasks; + char __data[0]; +}; + +struct trace_event_raw_rcu_fqs { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int cpu; + const char *qsevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_stall_warning { + struct trace_entry ent; + const char *rcuname; + const char *msg; + char __data[0]; +}; + +struct trace_event_raw_rcu_watching { + struct trace_entry ent; + const char *polarity; + long int oldnesting; + long int newnesting; + int counter; + char __data[0]; +}; + +struct trace_event_raw_rcu_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_segcb_stats { + struct trace_entry ent; + const char *ctx; + long unsigned int gp_seq[4]; + long int seglen[4]; + char __data[0]; +}; + +struct trace_event_raw_rcu_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_start { + struct trace_entry ent; + const char *rcuname; + long int qlen; + long int blimit; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kfree_bulk_callback { + struct trace_entry ent; + const char *rcuname; + long unsigned int nr_records; + void **p; + char __data[0]; +}; + +struct trace_event_raw_rcu_sr_normal { + struct trace_entry ent; + const char *rcuname; + void *rhp; + const char *srevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_end { + struct trace_entry ent; + const char *rcuname; + int callbacks_invoked; + char cb; + char nr; + char iit; + char risk; + char __data[0]; +}; + +struct trace_event_raw_rcu_torture_read { + struct trace_entry ent; + char rcutorturename[8]; + struct callback_head *rhp; + long unsigned int secs; + long unsigned int c_old; + long unsigned int c; + char __data[0]; +}; + +struct trace_event_raw_rcu_barrier { + struct trace_entry ent; + const char *rcuname; + const char *s; + int cpu; + int cnt; + long unsigned int done; + char __data[0]; +}; + +struct trace_event_data_offsets_rcu_utilization {}; + +struct trace_event_data_offsets_rcu_grace_period {}; + +struct trace_event_data_offsets_rcu_future_grace_period {}; + +struct trace_event_data_offsets_rcu_grace_period_init {}; + +struct trace_event_data_offsets_rcu_exp_grace_period {}; + +struct trace_event_data_offsets_rcu_exp_funnel_lock {}; + +struct trace_event_data_offsets_rcu_preempt_task {}; + +struct trace_event_data_offsets_rcu_unlock_preempted_task {}; + +struct trace_event_data_offsets_rcu_quiescent_state_report {}; + +struct trace_event_data_offsets_rcu_fqs {}; + +struct trace_event_data_offsets_rcu_stall_warning {}; + +struct trace_event_data_offsets_rcu_watching {}; + +struct trace_event_data_offsets_rcu_callback {}; + +struct trace_event_data_offsets_rcu_segcb_stats {}; + +struct trace_event_data_offsets_rcu_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_batch_start {}; + +struct trace_event_data_offsets_rcu_invoke_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {}; + +struct trace_event_data_offsets_rcu_sr_normal {}; + +struct trace_event_data_offsets_rcu_batch_end {}; + +struct trace_event_data_offsets_rcu_torture_read {}; + +struct trace_event_data_offsets_rcu_barrier {}; + +typedef void (*btf_trace_rcu_utilization)(void *, const char *); + +typedef void (*btf_trace_rcu_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, long unsigned int, long unsigned int, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, long unsigned int, u8, int, int, long unsigned int); + +typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, long unsigned int, int); + +typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, long unsigned int, long unsigned int, long unsigned int, u8, int, int, int); + +typedef void (*btf_trace_rcu_fqs)(void *, const char *, long unsigned int, int, const char *); + +typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); + +typedef void (*btf_trace_rcu_watching)(void *, const char *, long int, long int, int); + +typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long int); + +typedef void (*btf_trace_rcu_segcb_stats)(void *, struct rcu_segcblist *, const char *); + +typedef void (*btf_trace_rcu_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int, long int); + +typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long int, long int); + +typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *); + +typedef void (*btf_trace_rcu_invoke_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int); + +typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, long unsigned int, void **); + +typedef void (*btf_trace_rcu_sr_normal)(void *, const char *, struct callback_head *, const char *); + +typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char); + +typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, long unsigned int); + +struct rcu_tasks; + +typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); + +typedef void (*pregp_func_t)(struct list_head *); + +typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); + +typedef void (*postscan_func_t)(struct list_head *); + +typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); + +typedef void (*postgp_func_t)(struct rcu_tasks *); + +struct rcu_tasks_percpu; + +struct rcu_tasks { + struct rcuwait cbs_wait; + raw_spinlock_t cbs_gbl_lock; + struct mutex tasks_gp_mutex; + int gp_state; + int gp_sleep; + int init_fract; + long unsigned int gp_jiffies; + long unsigned int gp_start; + long unsigned int tasks_gp_seq; + long unsigned int n_ipis; + long unsigned int n_ipis_fails; + struct task_struct *kthread_ptr; + long unsigned int lazy_jiffies; + rcu_tasks_gp_func_t gp_func; + pregp_func_t pregp_func; + pertask_func_t pertask_func; + postscan_func_t postscan_func; + holdouts_func_t holdouts_func; + postgp_func_t postgp_func; + call_rcu_func_t call_func; + unsigned int wait_state; + struct rcu_tasks_percpu *rtpcpu; + struct rcu_tasks_percpu **rtpcp_array; + int percpu_enqueue_shift; + int percpu_enqueue_lim; + int percpu_dequeue_lim; + long unsigned int percpu_dequeue_gpseq; + struct mutex barrier_q_mutex; + atomic_t barrier_q_count; + struct completion barrier_q_completion; + long unsigned int barrier_q_seq; + long unsigned int barrier_q_start; + char *name; + char *kname; +}; + +struct rcu_tasks_percpu { + struct rcu_segcblist cblist; + raw_spinlock_t lock; + long unsigned int rtp_jiffies; + long unsigned int rtp_n_lock_retries; + struct timer_list lazy_timer; + unsigned int urgent_gp; + struct work_struct rtp_work; + struct irq_work rtp_irq_work; + struct callback_head barrier_q_head; + struct list_head rtp_blkd_tasks; + struct list_head rtp_exit_list; + int cpu; + int index; + struct rcu_tasks *rtpp; +}; + +struct trc_stall_chk_rdr { + int nesting; + int ipi_to_cpu; + u8 needqs; +}; + +enum event_trigger_type { + ETT_NONE = 0, + ETT_TRACE_ONOFF = 1, + ETT_SNAPSHOT = 2, + ETT_STACKTRACE = 4, + ETT_EVENT_ENABLE = 8, + ETT_EVENT_HIST = 16, + ETT_HIST_ENABLE = 32, + ETT_EVENT_EPROBE = 64, +}; + +typedef bool (*cond_update_fn_t)(struct trace_array *, void *); + +struct event_trigger_ops; + +struct event_command; + +struct event_trigger_data { + long unsigned int count; + int ref; + int flags; + struct event_trigger_ops *ops; + struct event_command *cmd_ops; + struct event_filter *filter; + char *filter_str; + void *private_data; + bool paused; + bool paused_tmp; + struct list_head list; + char *name; + struct list_head named_list; + struct event_trigger_data *named_data; +}; + +struct event_trigger_ops { + void (*trigger)(struct event_trigger_data *, struct trace_buffer *, void *, struct ring_buffer_event *); + int (*init)(struct event_trigger_data *); + void (*free)(struct event_trigger_data *); + int (*print)(struct seq_file *, struct event_trigger_data *); +}; + +struct event_command { + struct list_head list; + char *name; + enum event_trigger_type trigger_type; + int flags; + int (*parse)(struct event_command *, struct trace_event_file *, char *, char *, char *); + int (*reg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg_all)(struct trace_event_file *); + int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); + struct event_trigger_ops * (*get_trigger_ops)(char *, char *); +}; + +struct enable_trigger_data { + struct trace_event_file *file; + bool enable; + bool hist; +}; + +enum event_command_flags { + EVENT_CMD_FL_POST_TRIGGER = 1, + EVENT_CMD_FL_NEEDS_REC = 2, +}; + +enum { + HIST_ERR_NONE = 0, + HIST_ERR_DUPLICATE_VAR = 1, + HIST_ERR_VAR_NOT_UNIQUE = 2, + HIST_ERR_TOO_MANY_VARS = 3, + HIST_ERR_MALFORMED_ASSIGNMENT = 4, + HIST_ERR_NAMED_MISMATCH = 5, + HIST_ERR_TRIGGER_EEXIST = 6, + HIST_ERR_TRIGGER_ENOENT_CLEAR = 7, + HIST_ERR_SET_CLOCK_FAIL = 8, + HIST_ERR_BAD_FIELD_MODIFIER = 9, + HIST_ERR_TOO_MANY_SUBEXPR = 10, + HIST_ERR_TIMESTAMP_MISMATCH = 11, + HIST_ERR_TOO_MANY_FIELD_VARS = 12, + HIST_ERR_EVENT_FILE_NOT_FOUND = 13, + HIST_ERR_HIST_NOT_FOUND = 14, + HIST_ERR_HIST_CREATE_FAIL = 15, + HIST_ERR_SYNTH_VAR_NOT_FOUND = 16, + HIST_ERR_SYNTH_EVENT_NOT_FOUND = 17, + HIST_ERR_SYNTH_TYPE_MISMATCH = 18, + HIST_ERR_SYNTH_COUNT_MISMATCH = 19, + HIST_ERR_FIELD_VAR_PARSE_FAIL = 20, + HIST_ERR_VAR_CREATE_FIND_FAIL = 21, + HIST_ERR_ONX_NOT_VAR = 22, + HIST_ERR_ONX_VAR_NOT_FOUND = 23, + HIST_ERR_ONX_VAR_CREATE_FAIL = 24, + HIST_ERR_FIELD_VAR_CREATE_FAIL = 25, + HIST_ERR_TOO_MANY_PARAMS = 26, + HIST_ERR_PARAM_NOT_FOUND = 27, + HIST_ERR_INVALID_PARAM = 28, + HIST_ERR_ACTION_NOT_FOUND = 29, + HIST_ERR_NO_SAVE_PARAMS = 30, + HIST_ERR_TOO_MANY_SAVE_ACTIONS = 31, + HIST_ERR_ACTION_MISMATCH = 32, + HIST_ERR_NO_CLOSING_PAREN = 33, + HIST_ERR_SUBSYS_NOT_FOUND = 34, + HIST_ERR_INVALID_SUBSYS_EVENT = 35, + HIST_ERR_INVALID_REF_KEY = 36, + HIST_ERR_VAR_NOT_FOUND = 37, + HIST_ERR_FIELD_NOT_FOUND = 38, + HIST_ERR_EMPTY_ASSIGNMENT = 39, + HIST_ERR_INVALID_SORT_MODIFIER = 40, + HIST_ERR_EMPTY_SORT_FIELD = 41, + HIST_ERR_TOO_MANY_SORT_FIELDS = 42, + HIST_ERR_INVALID_SORT_FIELD = 43, + HIST_ERR_INVALID_STR_OPERAND = 44, + HIST_ERR_EXPECT_NUMBER = 45, + HIST_ERR_UNARY_MINUS_SUBEXPR = 46, + HIST_ERR_DIVISION_BY_ZERO = 47, + HIST_ERR_NEED_NOHC_VAL = 48, +}; + +enum hist_field_fn { + HIST_FIELD_FN_NOP = 0, + HIST_FIELD_FN_VAR_REF = 1, + HIST_FIELD_FN_COUNTER = 2, + HIST_FIELD_FN_CONST = 3, + HIST_FIELD_FN_LOG2 = 4, + HIST_FIELD_FN_BUCKET = 5, + HIST_FIELD_FN_TIMESTAMP = 6, + HIST_FIELD_FN_CPU = 7, + HIST_FIELD_FN_STRING = 8, + HIST_FIELD_FN_DYNSTRING = 9, + HIST_FIELD_FN_RELDYNSTRING = 10, + HIST_FIELD_FN_PSTRING = 11, + HIST_FIELD_FN_S64 = 12, + HIST_FIELD_FN_U64 = 13, + HIST_FIELD_FN_S32 = 14, + HIST_FIELD_FN_U32 = 15, + HIST_FIELD_FN_S16 = 16, + HIST_FIELD_FN_U16 = 17, + HIST_FIELD_FN_S8 = 18, + HIST_FIELD_FN_U8 = 19, + HIST_FIELD_FN_UMINUS = 20, + HIST_FIELD_FN_MINUS = 21, + HIST_FIELD_FN_PLUS = 22, + HIST_FIELD_FN_DIV = 23, + HIST_FIELD_FN_MULT = 24, + HIST_FIELD_FN_DIV_POWER2 = 25, + HIST_FIELD_FN_DIV_NOT_POWER2 = 26, + HIST_FIELD_FN_DIV_MULT_SHIFT = 27, + HIST_FIELD_FN_EXECNAME = 28, + HIST_FIELD_FN_STACK = 29, +}; + +struct hist_trigger_data; + +struct hist_var { + char *name; + struct hist_trigger_data *hist_data; + unsigned int idx; +}; + +enum field_op_id { + FIELD_OP_NONE = 0, + FIELD_OP_PLUS = 1, + FIELD_OP_MINUS = 2, + FIELD_OP_UNARY_MINUS = 3, + FIELD_OP_DIV = 4, + FIELD_OP_MULT = 5, +}; + +struct hist_field { + struct ftrace_event_field *field; + long unsigned int flags; + long unsigned int buckets; + const char *type; + struct hist_field *operands[2]; + struct hist_trigger_data *hist_data; + enum hist_field_fn fn_num; + unsigned int ref; + unsigned int size; + unsigned int offset; + unsigned int is_signed; + struct hist_var var; + enum field_op_id operator; + char *system; + char *event_name; + char *name; + unsigned int var_ref_idx; + bool read_once; + unsigned int var_str_idx; + u64 constant; + u64 div_multiplier; +}; + +struct hist_trigger_attrs; + +struct action_data; + +struct field_var; + +struct field_var_hist; + +struct hist_trigger_data { + struct hist_field *fields[22]; + unsigned int n_vals; + unsigned int n_keys; + unsigned int n_fields; + unsigned int n_vars; + unsigned int n_var_str; + unsigned int key_size; + struct tracing_map_sort_key sort_keys[2]; + unsigned int n_sort_keys; + struct trace_event_file *event_file; + struct hist_trigger_attrs *attrs; + struct tracing_map *map; + bool enable_timestamps; + bool remove; + struct hist_field *var_refs[16]; + unsigned int n_var_refs; + struct action_data *actions[8]; + unsigned int n_actions; + struct field_var *field_vars[64]; + unsigned int n_field_vars; + unsigned int n_field_var_str; + struct field_var_hist *field_var_hists[64]; + unsigned int n_field_var_hists; + struct field_var *save_vars[64]; + unsigned int n_save_vars; + unsigned int n_save_var_str; +}; + +enum hist_field_flags { + HIST_FIELD_FL_HITCOUNT = 1, + HIST_FIELD_FL_KEY = 2, + HIST_FIELD_FL_STRING = 4, + HIST_FIELD_FL_HEX = 8, + HIST_FIELD_FL_SYM = 16, + HIST_FIELD_FL_SYM_OFFSET = 32, + HIST_FIELD_FL_EXECNAME = 64, + HIST_FIELD_FL_SYSCALL = 128, + HIST_FIELD_FL_STACKTRACE = 256, + HIST_FIELD_FL_LOG2 = 512, + HIST_FIELD_FL_TIMESTAMP = 1024, + HIST_FIELD_FL_TIMESTAMP_USECS = 2048, + HIST_FIELD_FL_VAR = 4096, + HIST_FIELD_FL_EXPR = 8192, + HIST_FIELD_FL_VAR_REF = 16384, + HIST_FIELD_FL_CPU = 32768, + HIST_FIELD_FL_ALIAS = 65536, + HIST_FIELD_FL_BUCKET = 131072, + HIST_FIELD_FL_CONST = 262144, + HIST_FIELD_FL_PERCENT = 524288, + HIST_FIELD_FL_GRAPH = 1048576, +}; + +struct var_defs { + unsigned int n_vars; + char *name[16]; + char *expr[16]; +}; + +struct hist_trigger_attrs { + char *keys_str; + char *vals_str; + char *sort_key_str; + char *name; + char *clock; + bool pause; + bool cont; + bool clear; + bool ts_in_usecs; + bool no_hitcount; + unsigned int map_bits; + char *assignment_str[16]; + unsigned int n_assignments; + char *action_str[8]; + unsigned int n_actions; + struct var_defs var_defs; +}; + +struct field_var { + struct hist_field *var; + struct hist_field *val; +}; + +struct field_var_hist { + struct hist_trigger_data *hist_data; + char *cmd; +}; + +enum handler_id { + HANDLER_ONMATCH = 1, + HANDLER_ONMAX = 2, + HANDLER_ONCHANGE = 3, +}; + +enum action_id { + ACTION_SAVE = 1, + ACTION_TRACE = 2, + ACTION_SNAPSHOT = 3, +}; + +typedef void (*action_fn_t)(struct hist_trigger_data *, struct tracing_map_elt *, struct trace_buffer *, void *, struct ring_buffer_event *, void *, struct action_data *, u64 *); + +typedef bool (*check_track_val_fn_t)(u64, u64); + +struct action_data { + enum handler_id handler; + enum action_id action; + char *action_name; + action_fn_t fn; + unsigned int n_params; + char *params[64]; + unsigned int var_ref_idx[64]; + struct synth_event *synth_event; + bool use_trace_keyword; + char *synth_event_name; + union { + struct { + char *event; + char *event_system; + } match_data; + struct { + char *var_str; + struct hist_field *var_ref; + struct hist_field *track_var; + check_track_val_fn_t check_val; + action_fn_t save_data; + } track_data; + }; +}; + +struct track_data { + u64 track_val; + bool updated; + unsigned int key_len; + void *key; + struct tracing_map_elt elt; + struct action_data *action_data; + struct hist_trigger_data *hist_data; +}; + +struct hist_elt_data { + char *comm; + u64 *var_ref_vals; + char **field_var_str; + int n_field_var_str; +}; + +typedef void (*synth_probe_func_t)(void *, u64 *, unsigned int *); + +struct hist_var_data { + struct list_head list; + struct hist_trigger_data *hist_data; +}; + +struct hist_val_stat { + u64 max; + u64 total; +}; + +struct bpf_lpm_trie_key_hdr { + __u32 prefixlen; +}; + +struct bpf_lpm_trie_key_u8 { + union { + struct bpf_lpm_trie_key_hdr hdr; + __u32 prefixlen; + }; + __u8 data[0]; +}; + +struct lpm_trie_node { + struct callback_head rcu; + struct lpm_trie_node *child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node *root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + spinlock_t lock; +}; + +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS = 0, + BTF_FIELD_ITER_STRS = 1, +}; + +struct btf_field_desc { + int t_off_cnt; + int t_offs[2]; + int m_sz; + int m_off_cnt; + int m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + +struct btf_relocate { + struct btf *btf; + const struct btf *base_btf; + const struct btf *dist_base_btf; + unsigned int nr_base_types; + unsigned int nr_split_types; + unsigned int nr_dist_base_types; + int dist_str_len; + int base_str_len; + __u32 *id_map; + __u32 *str_map; +}; + +struct btf_name_info { + const char *name; + bool needs_size: 1; + unsigned int size: 31; + __u32 id; +}; + +enum { + FOLL_TOUCH = 65536, + FOLL_TRIED = 131072, + FOLL_REMOTE = 262144, + FOLL_PIN = 524288, + FOLL_FAST_ONLY = 1048576, + FOLL_UNLOCKABLE = 2097152, + FOLL_MADV_POPULATE = 4194304, +}; + +struct follow_page_context { + struct dev_pagemap *pgmap; + unsigned int page_mask; +}; + +struct pages_or_folios { + union { + struct page **pages; + struct folio **folios; + void **entries; + }; + bool has_folios; + long int nr_entries; +}; + +union thread_union { + struct task_struct task; + long unsigned int stack[4096]; +}; + +struct kasan_report_info { + enum kasan_report_type type; + const void *access_addr; + size_t access_size; + bool is_write; + long unsigned int ip; + const void *first_bad_addr; + struct kmem_cache *cache; + void *object; + size_t alloc_size; + const char *bug_type; + struct kasan_track alloc_track; + struct kasan_track free_track; +}; + +enum kasan_arg_fault { + KASAN_ARG_FAULT_DEFAULT = 0, + KASAN_ARG_FAULT_REPORT = 1, + KASAN_ARG_FAULT_PANIC = 2, + KASAN_ARG_FAULT_PANIC_ON_WRITE = 3, +}; + +struct swap_cgroup_ctrl { + struct page **map; + long unsigned int length; + spinlock_t lock; +}; + +struct swap_cgroup { + short unsigned int id; +}; + +struct mpage_readpage_args { + struct bio *bio; + struct folio *folio; + unsigned int nr_pages; + bool is_readahead; + sector_t last_block_in_bio; + struct buffer_head map_bh; + long unsigned int first_logical_block; + get_block_t *get_block; +}; + +struct mpage_data { + struct bio *bio; + sector_t last_block_in_bio; + get_block_t *get_block; +}; + +struct iomap_swapfile_info { + struct iomap iomap; + struct swap_info_struct *sis; + uint64_t lowest_ppage; + uint64_t highest_ppage; + long unsigned int nr_pages; + int nr_extents; + struct file *file; +}; + +enum bio_post_read_step { + STEP_INITIAL = 0, + STEP_DECRYPT = 1, + STEP_VERITY = 2, + STEP_MAX = 3, +}; + +struct bio_post_read_ctx { + struct bio *bio; + struct work_struct work; + unsigned int cur_step; + unsigned int enabled_steps; +}; + +struct fatent_ra { + sector_t cur; + sector_t limit; + unsigned int ra_blocks; + sector_t ra_advance; + sector_t ra_next; + sector_t ra_limit; +}; + +struct btrfs_failed_bio { + struct btrfs_bio *bbio; + int num_copies; + atomic_t repair_count; +}; + +struct async_submit_bio { + struct btrfs_bio *bbio; + struct btrfs_io_context *bioc; + struct btrfs_io_stripe smap; + int mirror_num; + struct btrfs_work work; +}; + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + unsigned int partial; + u8 buf[144]; +}; + +struct sbq_wait { + struct sbitmap_queue *sbq; + struct wait_queue_entry wait; +}; + +struct trace_event_raw_kyber_latency { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char type[8]; + u8 percentile; + u8 numerator; + u8 denominator; + unsigned int samples; + char __data[0]; +}; + +struct trace_event_raw_kyber_adjust { + struct trace_entry ent; + dev_t dev; + char domain[16]; + unsigned int depth; + char __data[0]; +}; + +struct trace_event_raw_kyber_throttled { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_kyber_latency {}; + +struct trace_event_data_offsets_kyber_adjust {}; + +struct trace_event_data_offsets_kyber_throttled {}; + +typedef void (*btf_trace_kyber_latency)(void *, dev_t, const char *, const char *, unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_kyber_adjust)(void *, dev_t, const char *, unsigned int); + +typedef void (*btf_trace_kyber_throttled)(void *, dev_t, const char *); + +enum { + KYBER_READ = 0, + KYBER_WRITE = 1, + KYBER_DISCARD = 2, + KYBER_OTHER = 3, + KYBER_NUM_DOMAINS = 4, +}; + +enum { + KYBER_ASYNC_PERCENT = 75, +}; + +enum { + KYBER_LATENCY_SHIFT = 2, + KYBER_GOOD_BUCKETS = 4, + KYBER_LATENCY_BUCKETS = 8, +}; + +enum { + KYBER_TOTAL_LATENCY = 0, + KYBER_IO_LATENCY = 1, +}; + +struct kyber_cpu_latency { + atomic_t buckets[48]; +}; + +struct kyber_ctx_queue { + spinlock_t lock; + struct list_head rq_list[4]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kyber_queue_data { + struct request_queue *q; + dev_t dev; + struct sbitmap_queue domain_tokens[4]; + unsigned int async_depth; + struct kyber_cpu_latency *cpu_latency; + struct timer_list timer; + unsigned int latency_buckets[48]; + long unsigned int latency_timeout[3]; + int domain_p99[3]; + u64 latency_targets[3]; +}; + +struct kyber_hctx_data { + spinlock_t lock; + struct list_head rqs[4]; + unsigned int cur_domain; + unsigned int batching; + struct kyber_ctx_queue *kcqs; + struct sbitmap kcq_map[4]; + struct sbq_wait domain_wait[4]; + struct sbq_wait_state *domain_ws[4]; + atomic_t wait_index[4]; +}; + +struct flush_kcq_data { + struct kyber_hctx_data *khd; + unsigned int sched_domain; + struct list_head *list; +}; + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; + struct callback_head rcu; +}; + +struct dim_irq_moder { + u8 profile_flags; + u8 coal_flags; + u8 dim_rx_mode; + u8 dim_tx_mode; + struct dim_cq_moder *rx_profile; + struct dim_cq_moder *tx_profile; + void (*rx_dim_work)(struct work_struct *); + void (*tx_dim_work)(struct work_struct *); +}; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 1, + DIM_CQ_PERIOD_NUM_MODES = 2, +}; + +enum dim_state { + DIM_START_MEASURE = 0, + DIM_MEASURE_IN_PROGRESS = 1, + DIM_APPLY_NEW_PROFILE = 2, +}; + +enum dim_stats_state { + DIM_STATS_WORSE = 0, + DIM_STATS_SAME = 1, + DIM_STATS_BETTER = 2, +}; + +enum dim_step_result { + DIM_STEPPED = 0, + DIM_TOO_TIRED = 1, + DIM_ON_EDGE = 2, +}; + +typedef unsigned int u_int; + +typedef void * (*devcon_match_fn_t)(const struct fwnode_handle *, const char *, void *); + +typedef void (*activate_complete)(void *, int); + +struct scsi_device_handler { + struct list_head list; + struct module *module; + const char *name; + enum scsi_disposition (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); + int (*attach)(struct scsi_device *); + void (*detach)(struct scsi_device *); + int (*activate)(struct scsi_device *, activate_complete, void *); + blk_status_t (*prep_fn)(struct scsi_device *, struct request *); + int (*set_params)(struct scsi_device *, const char *); + void (*rescan)(struct scsi_device *); +}; + +struct scsi_eh_save { + int result; + unsigned int resid_len; + int eh_eflags; + enum dma_data_direction data_direction; + unsigned int underflow; + unsigned char cmd_len; + unsigned char prot_op; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scatterlist sense_sgl; +}; + +enum scsi_ml_status { + SCSIML_STAT_OK = 0, + SCSIML_STAT_RESV_CONFLICT = 1, + SCSIML_STAT_NOSPC = 2, + SCSIML_STAT_MED_ERROR = 3, + SCSIML_STAT_TGT_FAILURE = 4, + SCSIML_STAT_DL_TIMEOUT = 5, +}; + +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + +struct usb_cdc_header_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdCDC; +} __attribute__((packed)); + +struct usb_cdc_call_mgmt_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; + __u8 bDataInterface; +}; + +struct usb_cdc_acm_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; +}; + +struct usb_cdc_union_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bMasterInterface0; + __u8 bSlaveInterface0; +}; + +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; +}; + +struct usb_cdc_network_terminal_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bEntityId; + __u8 iName; + __u8 bChannelIndex; + __u8 bPhysicalInterface; +}; + +struct usb_cdc_ether_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iMACAddress; + __le32 bmEthernetStatistics; + __le16 wMaxSegmentSize; + __le16 wNumberMCFilters; + __u8 bNumberPowerFilters; +} __attribute__((packed)); + +struct usb_cdc_dmm_desc { + __u8 bFunctionLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u16 bcdVersion; + __le16 wMaxCommand; +} __attribute__((packed)); + +struct usb_cdc_mdlm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; + __u8 bGUID[16]; +} __attribute__((packed)); + +struct usb_cdc_mdlm_detail_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bGuidDescriptorType; + __u8 bDetailData[0]; +}; + +struct usb_cdc_obex_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; +} __attribute__((packed)); + +struct usb_cdc_ncm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdNcmVersion; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMVersion; + __le16 wMaxControlMessage; + __u8 bNumberFilters; + __u8 bMaxFilterSize; + __le16 wMaxSegmentSize; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_extended_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMExtendedVersion; + __u8 bMaxOutstandingCommandMessages; + __le16 wMTU; +} __attribute__((packed)); + +struct usb_cdc_parsed_header { + struct usb_cdc_union_desc *usb_cdc_union_desc; + struct usb_cdc_header_desc *usb_cdc_header_desc; + struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; + struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; + struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; + struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; + struct usb_cdc_ether_desc *usb_cdc_ether_desc; + struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; + struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; + struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; + struct usb_cdc_obex_desc *usb_cdc_obex_desc; + struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; + struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; + struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; + bool phonet_magic_present; +}; + +struct api_context { + struct completion done; + int status; +}; + +struct set_config_request { + struct usb_device *udev; + int config; + struct work_struct work; + struct list_head node; +}; + +enum sharp_state { + STATE_INACTIVE___6 = 0, + STATE_BIT_PULSE___4 = 1, + STATE_BIT_SPACE___4 = 2, + STATE_TRAILER_PULSE___3 = 3, + STATE_ECHO_SPACE = 4, + STATE_TRAILER_SPACE___3 = 5, +}; + +struct mdu_array_info_s { + int major_version; + int minor_version; + int patch_version; + unsigned int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + unsigned int utime; + int state; + int active_disks; + int working_disks; + int failed_disks; + int spare_disks; + int layout; + int chunk_size; +}; + +struct mdu_disk_info_s { + int number; + int major; + int minor; + int raid_disk; + int state; +}; + +struct md_setup_args { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +}; + +struct of_phandle_iterator { + const char *cells_name; + int cell_count; + const struct device_node *parent; + const __be32 *list_end; + const __be32 *phandle_end; + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + +struct of_intc_desc { + struct list_head list; + of_irq_init_cb_t irq_init_cb; + struct device_node *dev; + struct device_node *interrupt_parent; +}; + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +enum { + NDTPA_UNSPEC = 0, + NDTPA_IFINDEX = 1, + NDTPA_REFCNT = 2, + NDTPA_REACHABLE_TIME = 3, + NDTPA_BASE_REACHABLE_TIME = 4, + NDTPA_RETRANS_TIME = 5, + NDTPA_GC_STALETIME = 6, + NDTPA_DELAY_PROBE_TIME = 7, + NDTPA_QUEUE_LEN = 8, + NDTPA_APP_PROBES = 9, + NDTPA_UCAST_PROBES = 10, + NDTPA_MCAST_PROBES = 11, + NDTPA_ANYCAST_DELAY = 12, + NDTPA_PROXY_DELAY = 13, + NDTPA_PROXY_QLEN = 14, + NDTPA_LOCKTIME = 15, + NDTPA_QUEUE_LENBYTES = 16, + NDTPA_MCAST_REPROBES = 17, + NDTPA_PAD = 18, + NDTPA_INTERVAL_PROBE_TIME_MS = 19, + __NDTPA_MAX = 20, +}; + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC = 0, + NDTA_NAME = 1, + NDTA_THRESH1 = 2, + NDTA_THRESH2 = 3, + NDTA_THRESH3 = 4, + NDTA_CONFIG = 5, + NDTA_PARMS = 6, + NDTA_STATS = 7, + NDTA_GC_INTERVAL = 8, + NDTA_PAD = 9, + __NDTA_MAX = 10, +}; + +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + +struct neigh_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table neigh_vars[21]; +}; + +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID = 0, + NL_ATTR_TYPE_FLAG = 1, + NL_ATTR_TYPE_U8 = 2, + NL_ATTR_TYPE_U16 = 3, + NL_ATTR_TYPE_U32 = 4, + NL_ATTR_TYPE_U64 = 5, + NL_ATTR_TYPE_S8 = 6, + NL_ATTR_TYPE_S16 = 7, + NL_ATTR_TYPE_S32 = 8, + NL_ATTR_TYPE_S64 = 9, + NL_ATTR_TYPE_BINARY = 10, + NL_ATTR_TYPE_STRING = 11, + NL_ATTR_TYPE_NUL_STRING = 12, + NL_ATTR_TYPE_NESTED = 13, + NL_ATTR_TYPE_NESTED_ARRAY = 14, + NL_ATTR_TYPE_BITFIELD32 = 15, + NL_ATTR_TYPE_SINT = 16, + NL_ATTR_TYPE_UINT = 17, +}; + +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC = 0, + NL_POLICY_TYPE_ATTR_TYPE = 1, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, + NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, + NL_POLICY_TYPE_ATTR_PAD = 11, + NL_POLICY_TYPE_ATTR_MASK = 12, + __NL_POLICY_TYPE_ATTR_MAX = 13, + NL_POLICY_TYPE_ATTR_MAX = 12, +}; + +struct netlink_policy_dump_state { + unsigned int policy_idx; + unsigned int attr_idx; + unsigned int n_alloc; + struct { + const struct nla_policy *policy; + unsigned int maxtype; + } policies[0]; +}; + +struct bpf_dummy_ops_state { + int val; +}; + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *); + int (*test_2)(struct bpf_dummy_ops_state *, int, short unsigned int, char, long unsigned int); + int (*test_sleepable)(struct bpf_dummy_ops_state *); +}; + +typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *, ...); + +struct bpf_dummy_ops_test_args { + u64 args[12]; + struct bpf_dummy_ops_state state; +}; + +struct bpf_struct_ops_bpf_dummy_ops { + struct bpf_struct_ops_common_value common; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct bpf_dummy_ops data; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct nf_hook_entries_rcu_head { + struct callback_head head; + void *allocation; +}; + +struct ip_esp_hdr { + __be32 spi; + __be32 seq_no; + __u8 enc_data[0]; +}; + +struct arppayload { + unsigned char mac_src[6]; + unsigned char ip_src[4]; + unsigned char mac_dst[6]; + unsigned char ip_dst[4]; +}; + +struct nft_pipapo_elem; + +union nft_pipapo_map_bucket { + struct { + u32 to; + u32 n; + }; + struct nft_pipapo_elem *e; +}; + +struct nft_pipapo_elem { + struct nft_elem_priv priv; + struct nft_set_ext ext; +}; + +struct nft_pipapo_field { + unsigned int rules; + unsigned int bsize; + unsigned int rules_alloc; + u8 groups; + u8 bb; + long unsigned int *lt; + union nft_pipapo_map_bucket *mt; +}; + +struct nft_pipapo_scratch { + u8 map_index; + u32 align_off; + long unsigned int map[0]; +}; + +struct nft_pipapo_match { + u8 field_count; + unsigned int bsize_max; + struct nft_pipapo_scratch **scratch; + struct callback_head rcu; + struct nft_pipapo_field f[0]; +}; + +struct nft_pipapo { + struct nft_pipapo_match *match; + struct nft_pipapo_match *clone; + int width; + long unsigned int last_gc; +}; + +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + int ipv6mr_ifindex; +}; + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +} __attribute__((packed)); + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +} __attribute__((packed)); + +struct compat_group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + } __attribute__((packed)); + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + } __attribute__((packed)); + }; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +struct kvm_regs { + struct user_pt_regs regs; + __u64 sp_el1; + __u64 elr_el1; + __u64 spsr[5]; + long: 64; + struct user_fpsimd_state fp_regs; +}; + +struct kvm_sregs {}; + +struct kvm_fpu {}; + +struct kvm_vcpu_events { + struct { + __u8 serror_pending; + __u8 serror_has_esr; + __u8 ext_dabt_pending; + __u8 pad[5]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; +}; + +struct kvm_arm_copy_mte_tags { + __u64 guest_ipa; + __u64 length; + void *addr; + __u64 flags; + __u64 reserved[2]; +}; + +struct kvm_translation { + __u64 linear_address; + __u64 physical_address; + __u8 valid; + __u8 writeable; + __u8 usermode; + __u8 pad[5]; +}; + +struct kvm_guest_debug { + __u32 control; + __u32 pad; + struct kvm_guest_debug_arch arch; +}; + +struct kvm_stats_header { + __u32 flags; + __u32 name_size; + __u32 num_desc; + __u32 id_offset; + __u32 desc_offset; + __u32 data_offset; +}; + +struct sve_state_reg_region { + unsigned int koffset; + unsigned int klen; + unsigned int upad; +}; + +struct kvm_nvhe_init_params { + long unsigned int mair_el2; + long unsigned int tcr_el2; + long unsigned int tpidr_el2; + long unsigned int stack_hyp_va; + long unsigned int stack_pa; + phys_addr_t pgd_pa; + long unsigned int hcr_el2; + long unsigned int vttbr; + long unsigned int vtcr; + long unsigned int tmp; +}; + +struct kvm_exception_table_entry { + int insn; + int fixup; +}; + +typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); + +struct prb_data_block { + long unsigned int id; + char data[0]; +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET = 0, + AUDIT_NTP_FREQ = 1, + AUDIT_NTP_STATUS = 2, + AUDIT_NTP_TAI = 3, + AUDIT_NTP_TICK = 4, + AUDIT_NTP_ADJUST = 5, + AUDIT_NTP_NVALS = 6, +}; + +struct ntp_data { + long unsigned int tick_usec; + u64 tick_length; + u64 tick_length_base; + int time_state; + int time_status; + s64 time_offset; + long int time_constant; + long int time_maxerror; + long int time_esterror; + s64 time_freq; + time64_t time_reftime; + long int time_adjust; + s64 ntp_tick_adj; + time64_t ntp_next_leap_sec; +}; + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC = 0, + TICKDEV_MODE_ONESHOT = 1, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +struct timer_list_iter { + int cpu; + bool second_pass; + u64 now; +}; + +struct rt_waiter_node { + struct rb_node entry; + int prio; + u64 deadline; +}; + +struct rt_mutex_waiter { + struct rt_waiter_node tree; + struct rt_waiter_node pi_tree; + struct task_struct *task; + struct rt_mutex_base *lock; + unsigned int wake_state; + struct ww_acquire_ctx *ww_ctx; +}; + +enum { + Q_REQUEUE_PI_NONE = 0, + Q_REQUEUE_PI_IGNORE = 1, + Q_REQUEUE_PI_IN_PROGRESS = 2, + Q_REQUEUE_PI_WAIT = 3, + Q_REQUEUE_PI_DONE = 4, + Q_REQUEUE_PI_LOCKED = 5, +}; + +enum pidcg_event { + PIDCG_MAX = 0, + PIDCG_FORKFAIL = 1, + NR_PIDCG_EVENTS = 2, +}; + +struct pids_cgroup { + struct cgroup_subsys_state css; + atomic64_t counter; + atomic64_t limit; + int64_t watermark; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + atomic64_t events[2]; + atomic64_t events_local[2]; +}; + +enum die_val { + DIE_UNUSED = 0, + DIE_OOPS = 1, +}; + +struct trace_export { + struct trace_export *next; + void (*write)(struct trace_export *, const void *, unsigned int); + int flags; +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + +struct stack_entry { + struct trace_entry ent; + int size; + long unsigned int caller[0]; +}; + +struct userstack_entry { + struct trace_entry ent; + unsigned int tgid; + long unsigned int caller[8]; +}; + +struct bprint_entry { + struct trace_entry ent; + long unsigned int ip; + const char *fmt; + u32 buf[0]; +}; + +struct print_entry { + struct trace_entry ent; + long unsigned int ip; + char buf[0]; +}; + +struct raw_data_entry { + struct trace_entry ent; + unsigned int id; + char buf[0]; +}; + +struct bputs_entry { + struct trace_entry ent; + long unsigned int ip; + const char *str; +}; + +struct func_repeats_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; + u16 count; + u16 top_delta_ts; + u32 bottom_delta_ts; +}; + +struct trace_min_max_param { + struct mutex *lock; + u64 *val; + u64 *min; + u64 *max; +}; + +struct pipe_wait { + struct trace_iterator *iter; + int wait_index; +}; + +struct ftrace_stack { + long unsigned int calls[1024]; +}; + +struct ftrace_stacks { + struct ftrace_stack stacks[4]; +}; + +struct trace_buffer_struct { + int nesting; + char buffer[4096]; +}; + +struct ftrace_buffer_info { + struct trace_iterator iter; + void *spare; + unsigned int spare_cpu; + unsigned int spare_size; + unsigned int read; +}; + +struct err_info { + const char **errs; + u8 type; + u16 pos; + u64 ts; +}; + +struct tracing_log_err { + struct list_head list; + struct err_info info; + char loc[128]; + char *cmd; +}; + +struct buffer_ref { + struct trace_buffer *buffer; + void *page; + int cpu; + refcount_t refcount; +}; + +struct trace_event_raw_mmap_lock { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + char __data[0]; +}; + +struct trace_event_raw_mmap_lock_acquire_returned { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + bool success; + char __data[0]; +}; + +struct trace_event_data_offsets_mmap_lock { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +struct trace_event_data_offsets_mmap_lock_acquire_returned { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, const char *, bool, bool); + +typedef struct { + long unsigned int fds_bits[16]; +} __kernel_fd_set; + +typedef __kernel_fd_set fd_set; + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +struct poll_table_page; + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[9]; +}; + +struct poll_table_page { + struct poll_table_page *next; + struct poll_table_entry *entry; + struct poll_table_entry entries[0]; +}; + +enum poll_time_type { + PT_TIMEVAL = 0, + PT_OLD_TIMEVAL = 1, + PT_TIMESPEC = 2, + PT_OLD_TIMESPEC = 3, +}; + +typedef struct { + long unsigned int *in; + long unsigned int *out; + long unsigned int *ex; + long unsigned int *res_in; + long unsigned int *res_out; + long unsigned int *res_ex; +} fd_set_bits; + +struct sigset_argpack { + sigset_t *p; + size_t size; +}; + +struct poll_list { + struct poll_list *next; + unsigned int len; + struct pollfd entries[0]; +}; + +struct trace_event_raw_iomap_readpage_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + int nr_pages; + char __data[0]; +}; + +struct trace_event_raw_iomap_range_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t size; + loff_t offset; + u64 length; + char __data[0]; +}; + +struct trace_event_raw_iomap_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_writepage_map { + struct trace_entry ent; + dev_t dev; + u64 ino; + u64 pos; + u64 dirty_len; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_iter { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t pos; + u64 length; + s64 processed; + unsigned int flags; + const void *ops; + long unsigned int caller; + char __data[0]; +}; + +struct trace_event_raw_iomap_dio_rw_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + size_t count; + size_t done_before; + int ki_flags; + unsigned int dio_flags; + bool aio; + char __data[0]; +}; + +struct trace_event_raw_iomap_dio_complete { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + int ki_flags; + bool aio; + int error; + ssize_t ret; + char __data[0]; +}; + +struct trace_event_data_offsets_iomap_readpage_class {}; + +struct trace_event_data_offsets_iomap_range_class {}; + +struct trace_event_data_offsets_iomap_class {}; + +struct trace_event_data_offsets_iomap_writepage_map {}; + +struct trace_event_data_offsets_iomap_iter {}; + +struct trace_event_data_offsets_iomap_dio_rw_begin {}; + +struct trace_event_data_offsets_iomap_dio_complete {}; + +typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_release_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_invalidate_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_rw_queued)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_iter_dstmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_iter_srcmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_writepage_map)(void *, struct inode *, u64, unsigned int, struct iomap *); + +typedef void (*btf_trace_iomap_iter)(void *, struct iomap_iter *, const void *, long unsigned int); + +typedef void (*btf_trace_iomap_dio_rw_begin)(void *, struct kiocb *, struct iov_iter *, unsigned int, size_t); + +typedef void (*btf_trace_iomap_dio_complete)(void *, struct kiocb *, int, ssize_t); + +struct cdrom_msf0 { + __u8 minute; + __u8 second; + __u8 frame; +}; + +union cdrom_addr { + struct cdrom_msf0 msf; + int lba; +}; + +struct cdrom_tocentry { + __u8 cdte_track; + __u8 cdte_adr: 4; + __u8 cdte_ctrl: 4; + __u8 cdte_format; + union cdrom_addr cdte_addr; + __u8 cdte_datamode; +}; + +struct cdrom_multisession { + union cdrom_addr addr; + __u8 xa_flag; + __u8 addr_format; +}; + +struct cdrom_mcn { + __u8 medium_catalog_number[14]; +}; + +struct packet_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +struct cdrom_device_ops; + +struct cdrom_device_info { + const struct cdrom_device_ops *ops; + struct list_head list; + struct gendisk *disk; + void *handle; + int mask; + int speed; + int capacity; + unsigned int options: 30; + unsigned int mc_flags: 2; + unsigned int vfs_events; + unsigned int ioctl_events; + int use_count; + char name[20]; + __u8 sanyo_slot: 2; + __u8 keeplocked: 1; + __u8 reserved: 5; + int cdda_method; + __u8 last_sense; + __u8 media_written; + short unsigned int mmc3_profile; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; + bool opened_for_data; + __s64 last_media_change_ms; +}; + +struct cdrom_device_ops { + int (*open)(struct cdrom_device_info *, int); + void (*release)(struct cdrom_device_info *); + int (*drive_status)(struct cdrom_device_info *, int); + unsigned int (*check_events)(struct cdrom_device_info *, unsigned int, int); + int (*tray_move)(struct cdrom_device_info *, int); + int (*lock_door)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, long unsigned int); + int (*get_last_session)(struct cdrom_device_info *, struct cdrom_multisession *); + int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); + int (*reset)(struct cdrom_device_info *); + int (*audio_ioctl)(struct cdrom_device_info *, unsigned int, void *); + int (*generic_packet)(struct cdrom_device_info *, struct packet_command *); + int (*read_cdda_bpc)(struct cdrom_device_info *, void *, u32, u32, u8 *); + const int capability; +}; + +struct iso_volume_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2041]; +}; + +struct iso_primary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct iso_supplementary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 flags[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 escape[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct hs_volume_descriptor { + __u8 foo[8]; + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2033]; +}; + +struct hs_primary_descriptor { + __u8 foo[8]; + __u8 type[1]; + __u8 id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 unused4[28]; + __u8 root_directory_record[34]; +}; + +struct isofs_options { + unsigned int rock: 1; + unsigned int joliet: 1; + unsigned int cruft: 1; + unsigned int hide: 1; + unsigned int showassoc: 1; + unsigned int nocompress: 1; + unsigned int overriderockperm: 1; + unsigned int uid_set: 1; + unsigned int gid_set: 1; + unsigned char map; + unsigned char check; + unsigned int blocksize; + umode_t fmode; + umode_t dmode; + kgid_t gid; + kuid_t uid; + char *iocharset; + s32 session; + s32 sbsector; +}; + +enum { + Opt_block = 0, + Opt_check = 1, + Opt_cruft = 2, + Opt_gid___3 = 3, + Opt_ignore = 4, + Opt_iocharset = 5, + Opt_map = 6, + Opt_mode___4 = 7, + Opt_nojoliet = 8, + Opt_norock = 9, + Opt_sb___2 = 10, + Opt_session = 11, + Opt_uid___3 = 12, + Opt_unhide = 13, + Opt_utf8 = 14, + Opt_err___5 = 15, + Opt_nocompress = 16, + Opt_hide = 17, + Opt_showassoc = 18, + Opt_dmode = 19, + Opt_overriderockperm = 20, +}; + +struct isofs_iget5_callback_data { + long unsigned int block; + long unsigned int offset; +}; + +struct btrfs_csum_item { + __u8 csum; +}; + +struct btrfs_ioctl_quota_ctl_args { + __u64 cmd; + __u64 status; +}; + +struct btrfs_qgroup_status_item { + __le64 version; + __le64 generation; + __le64 flags; + __le64 rescan; + __le64 enable_gen; +}; + +struct btrfs_qgroup_info_item { + __le64 generation; + __le64 rfer; + __le64 rfer_cmpr; + __le64 excl; + __le64 excl_cmpr; +}; + +struct btrfs_qgroup_limit_item { + __le64 flags; + __le64 max_rfer; + __le64 max_excl; + __le64 rsv_rfer; + __le64 rsv_excl; +}; + +struct btrfs_qgroup_swapped_block { + struct rb_node node; + int level; + bool trace_leaf; + u64 subvol_bytenr; + u64 subvol_generation; + u64 reloc_bytenr; + u64 reloc_generation; + u64 last_snapshot; + struct btrfs_key first_key; +}; + +struct btrfs_qgroup_list { + struct list_head next_group; + struct list_head next_member; + struct btrfs_qgroup *group; + struct btrfs_qgroup *member; +}; + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[0]; +}; + +enum submit_disposition { + ASYNC_TX_SUBMITTED = 0, + ASYNC_TX_CHANNEL_SWITCH = 1, + ASYNC_TX_DIRECT_SUBMIT = 2, +}; + +struct fat_boot_sector { + __u8 ignored[3]; + __u8 system_id[8]; + __u8 sector_size[2]; + __u8 sec_per_clus; + __le16 reserved; + __u8 fats; + __u8 dir_entries[2]; + __u8 sectors[2]; + __u8 media; + __le16 fat_length; + __le16 secs_track; + __le16 heads; + __le32 hidden; + __le32 total_sect; + union { + struct { + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat16; + struct { + __le32 length; + __le16 flags; + __u8 version[2]; + __le32 root_cluster; + __le16 info_sector; + __le16 backup_boot; + __le16 reserved2[6]; + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat32; + }; +}; + +enum msdos_sys_ind { + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 133, + WIN98_EXTENDED_PARTITION = 15, + LINUX_DATA_PARTITION = 131, + LINUX_LVM_PARTITION = 142, + LINUX_RAID_PARTITION = 253, + SOLARIS_X86_PARTITION = 130, + NEW_SOLARIS_X86_PARTITION = 191, + DM6_AUX1PARTITION = 81, + DM6_AUX3PARTITION = 83, + DM6_PARTITION = 84, + EZD_PARTITION = 85, + FREEBSD_PARTITION = 165, + OPENBSD_PARTITION = 166, + NETBSD_PARTITION = 169, + BSDI_PARTITION = 183, + MINIX_PARTITION = 129, + UNIXWARE_PARTITION = 99, +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty = 0, + assoc_array_walk_found_terminal_node = 1, + assoc_array_walk_found_wrong_shortcut = 2, +}; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + long unsigned int sc_segments; + long unsigned int dissimilarity; + } wrong_shortcut; +}; + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +typedef u64 unative_t___2; + +enum pci_mmap_state { + pci_mmap_io = 0, + pci_mmap_mem = 1, +}; + +enum pci_mmap_api { + PCI_MMAP_SYSFS = 0, + PCI_MMAP_PROCFS = 1, +}; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +typedef u32 note_buf_t[106]; + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask * const map; +}; + +struct ich8_hsfsts { + u16 flcdone: 1; + u16 flcerr: 1; + u16 dael: 1; + u16 berasesz: 2; + u16 flcinprog: 1; + u16 reserved1: 2; + u16 reserved2: 6; + u16 fldesvalid: 1; + u16 flockdn: 1; +}; + +union ich8_hws_flash_status { + struct ich8_hsfsts hsf_status; + u16 regval; +}; + +struct ich8_hsflctl { + u16 flcgo: 1; + u16 flcycle: 2; + u16 reserved: 5; + u16 fldbcount: 2; + u16 flockdn: 6; +}; + +union ich8_hws_flash_ctrl { + struct ich8_hsflctl hsf_ctrl; + u16 regval; +}; + +struct ich8_pr { + u32 base: 13; + u32 reserved1: 2; + u32 rpe: 1; + u32 limit: 13; + u32 reserved2: 2; + u32 wpe: 1; +}; + +union ich8_flash_protected_range { + struct ich8_pr range; + u32 regval; +}; + +enum { + US_FL_SINGLE_LUN = 1, + US_FL_NEED_OVERRIDE = 2, + US_FL_SCM_MULT_TARG = 4, + US_FL_FIX_INQUIRY = 8, + US_FL_FIX_CAPACITY = 16, + US_FL_IGNORE_RESIDUE = 32, + US_FL_BULK32 = 64, + US_FL_NOT_LOCKABLE = 128, + US_FL_GO_SLOW = 256, + US_FL_NO_WP_DETECT = 512, + US_FL_MAX_SECTORS_64 = 1024, + US_FL_IGNORE_DEVICE = 2048, + US_FL_CAPACITY_HEURISTICS = 4096, + US_FL_MAX_SECTORS_MIN = 8192, + US_FL_BULK_IGNORE_TAG = 16384, + US_FL_SANE_SENSE = 32768, + US_FL_CAPACITY_OK = 65536, + US_FL_BAD_SENSE = 131072, + US_FL_NO_READ_DISC_INFO = 262144, + US_FL_NO_READ_CAPACITY_16 = 524288, + US_FL_INITIAL_READ10 = 1048576, + US_FL_WRITE_CACHE = 2097152, + US_FL_NEEDS_CAP16 = 4194304, + US_FL_IGNORE_UAS = 8388608, + US_FL_BROKEN_FUA = 16777216, + US_FL_NO_ATA_1X = 33554432, + US_FL_NO_REPORT_OPCODES = 67108864, + US_FL_MAX_SECTORS_240 = 134217728, + US_FL_NO_REPORT_LUNS = 268435456, + US_FL_ALWAYS_SYNC = 536870912, + US_FL_NO_SAME = 1073741824, + US_FL_SENSE_AFTER_SYNC = 2147483648, +}; + +struct ignore_entry { + u16 vid; + u16 pid; + u16 bcdmin; + u16 bcdmax; +}; + +struct i2c_smbus_alert_setup { + int irq; +}; + +struct i2c_smbus_alert { + struct work_struct alert; + struct i2c_client *ara; +}; + +struct alert_data { + short unsigned int addr; + enum i2c_alert_protocol type; + unsigned int data; +}; + +struct lirc_scancode { + __u64 timestamp; + __u16 flags; + __u16 rc_proto; + __u32 keycode; + __u64 scancode; +}; + +enum mce_kbd_mode { + MCIR2_MODE_KEYBOARD = 0, + MCIR2_MODE_MOUSE = 1, + MCIR2_MODE_UNKNOWN = 2, +}; + +enum mce_kbd_state { + STATE_INACTIVE___7 = 0, + STATE_HEADER_BIT_START = 1, + STATE_HEADER_BIT_END = 2, + STATE_BODY_BIT_START = 3, + STATE_BODY_BIT_END = 4, + STATE_FINISHED___3 = 5, +}; + +struct mdp_device_descriptor_s { + __u32 number; + __u32 major; + __u32 minor; + __u32 raid_disk; + __u32 state; + __u32 reserved[27]; +}; + +typedef struct mdp_device_descriptor_s mdp_disk_t; + +struct mdp_superblock_s { + __u32 md_magic; + __u32 major_version; + __u32 minor_version; + __u32 patch_version; + __u32 gvalid_words; + __u32 set_uuid0; + __u32 ctime; + __u32 level; + __u32 size; + __u32 nr_disks; + __u32 raid_disks; + __u32 md_minor; + __u32 not_persistent; + __u32 set_uuid1; + __u32 set_uuid2; + __u32 set_uuid3; + __u32 gstate_creserved[16]; + __u32 utime; + __u32 state; + __u32 active_disks; + __u32 working_disks; + __u32 failed_disks; + __u32 spare_disks; + __u32 sb_csum; + __u32 events_hi; + __u32 events_lo; + __u32 cp_events_hi; + __u32 cp_events_lo; + __u32 recovery_cp; + __u64 reshape_position; + __u32 new_level; + __u32 delta_disks; + __u32 new_layout; + __u32 new_chunk; + __u32 gstate_sreserved[14]; + __u32 layout; + __u32 chunk_size; + __u32 root_pv; + __u32 root_block; + __u32 pstate_reserved[60]; + mdp_disk_t disks[27]; + __u32 reserved[0]; + mdp_disk_t this_disk; +}; + +typedef struct mdp_superblock_s mdp_super_t; + +struct mdp_superblock_1 { + __le32 magic; + __le32 major_version; + __le32 feature_map; + __le32 pad0; + __u8 set_uuid[16]; + char set_name[32]; + __le64 ctime; + __le32 level; + __le32 layout; + __le64 size; + __le32 chunksize; + __le32 raid_disks; + union { + __le32 bitmap_offset; + struct { + __le16 offset; + __le16 size; + } ppl; + }; + __le32 new_level; + __le64 reshape_position; + __le32 delta_disks; + __le32 new_layout; + __le32 new_chunk; + __le32 new_offset; + __le64 data_offset; + __le64 data_size; + __le64 super_offset; + union { + __le64 recovery_offset; + __le64 journal_tail; + }; + __le32 dev_number; + __le32 cnt_corrected_read; + __u8 device_uuid[16]; + __u8 devflags; + __u8 bblog_shift; + __le16 bblog_size; + __le32 bblog_offset; + __le64 utime; + __le64 events; + __le64 resync_offset; + __le32 sb_csum; + __le32 max_dev; + __u8 pad3[32]; + __le16 dev_roles[0]; +}; + +struct mdu_version_s { + int major; + int minor; + int patchlevel; +}; + +typedef struct mdu_version_s mdu_version_t; + +typedef struct mdu_array_info_s mdu_array_info_t; + +typedef struct mdu_disk_info_s mdu_disk_info_t; + +struct mdu_bitmap_file_s { + char pathname[4096]; +}; + +typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; + +enum { + MD_RESYNC_NONE = 0, + MD_RESYNC_YIELDED = 1, + MD_RESYNC_DELAYED = 2, + MD_RESYNC_ACTIVE = 3, +}; + +enum md_ro_state { + MD_RDWR = 0, + MD_RDONLY = 1, + MD_AUTO_READ = 2, + MD_MAX_STATE = 3, +}; + +struct md_io_clone { + struct mddev *mddev; + struct bio *orig_bio; + long unsigned int start_time; + struct bio bio_clone; +}; + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(struct md_rdev *, struct md_rdev *, int); + int (*validate_super)(struct mddev *, struct md_rdev *, struct md_rdev *); + void (*sync_super)(struct mddev *, struct md_rdev *); + long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t); + int (*allow_new_offset)(struct md_rdev *, long long unsigned int); +}; + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct md_rdev *, char *); + ssize_t (*store)(struct md_rdev *, const char *, size_t); +}; + +enum array_state { + clear = 0, + inactive = 1, + suspended = 2, + readonly = 3, + read_auto = 4, + clean = 5, + active = 6, + write_pending = 7, + active_idle = 8, + broken = 9, + bad_word = 10, +}; + +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + NETDEV_XDP_ACT_MASK = 127, +}; + +enum netdev_xdp_rx_metadata { + NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, + NETDEV_XDP_RX_METADATA_HASH = 2, + NETDEV_XDP_RX_METADATA_VLAN_TAG = 4, +}; + +enum netdev_xsk_flags { + NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, + NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, +}; + +enum netdev_qstats_scope { + NETDEV_QSTATS_SCOPE_QUEUE = 1, +}; + +struct netdev_nl_dump_ctx { + long unsigned int ifindex; + unsigned int rxq_idx; + unsigned int txq_idx; + unsigned int napi_id; +}; + +struct tc_qopt_offload_stats { + struct gnet_stats_basic_sync *bstats; + struct gnet_stats_queue *qstats; +}; + +enum tc_mq_command { + TC_MQ_CREATE = 0, + TC_MQ_DESTROY = 1, + TC_MQ_STATS = 2, + TC_MQ_GRAFT = 3, +}; + +struct tc_mq_opt_offload_graft_params { + long unsigned int queue; + u32 child_handle; +}; + +struct tc_mq_qopt_offload { + enum tc_mq_command command; + u32 handle; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; +}; + +struct mq_sched { + struct Qdisc **qdiscs; +}; + +struct eeprom_req_info { + struct ethnl_req_info base; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; +}; + +struct eeprom_reply_data { + struct ethnl_reply_data base; + u32 length; + u8 *data; +}; + +struct nf_conntrack_nat_helper { + struct list_head list; + char mod_name[16]; + struct module *module; +}; + +enum nft_bitwise_ops { + NFT_BITWISE_MASK_XOR = 0, + NFT_BITWISE_LSHIFT = 1, + NFT_BITWISE_RSHIFT = 2, + NFT_BITWISE_AND = 3, + NFT_BITWISE_OR = 4, + NFT_BITWISE_XOR = 5, +}; + +enum nft_bitwise_attributes { + NFTA_BITWISE_UNSPEC = 0, + NFTA_BITWISE_SREG = 1, + NFTA_BITWISE_DREG = 2, + NFTA_BITWISE_LEN = 3, + NFTA_BITWISE_MASK = 4, + NFTA_BITWISE_XOR = 5, + NFTA_BITWISE_OP = 6, + NFTA_BITWISE_DATA = 7, + NFTA_BITWISE_SREG2 = 8, + __NFTA_BITWISE_MAX = 9, +}; + +struct nft_bitwise { + u8 sreg; + u8 sreg2; + u8 dreg; + enum nft_bitwise_ops op: 8; + u8 len; + struct nft_data mask; + struct nft_data xor; + struct nft_data data; +}; + +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + +struct rt_cache_stat { + unsigned int in_slow_tot; + unsigned int in_slow_mc; + unsigned int in_no_route; + unsigned int in_brd; + unsigned int in_martian_dst; + unsigned int in_martian_src; + unsigned int out_slow_tot; + unsigned int out_slow_mc; +}; + +struct ifaddrlblmsg { + __u8 ifal_family; + __u8 __ifal_reserved; + __u8 ifal_prefixlen; + __u8 ifal_flags; + __u32 ifal_index; + __u32 ifal_seq; +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX = 3, +}; + +struct ip6addrlbl_entry { + struct in6_addr prefix; + int prefixlen; + int ifindex; + int addrtype; + u32 label; + struct hlist_node list; + struct callback_head rcu; +}; + +struct ip6addrlbl_init_table { + const struct in6_addr *prefix; + int prefixlen; + u32 label; +}; + +struct cpio_data { + void *data; + size_t size; + char name[18]; +}; + +enum cpio_fields { + C_MAGIC = 0, + C_INO = 1, + C_MODE = 2, + C_UID = 3, + C_GID = 4, + C_NLINK = 5, + C_MTIME = 6, + C_FILESIZE = 7, + C_MAJ = 8, + C_MIN = 9, + C_RMAJ = 10, + C_RMIN = 11, + C_NAMESIZE = 12, + C_CHKSUM = 13, + C_NFIELDS = 14, +}; + +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; + +enum kunwind_source { + KUNWIND_SOURCE_UNKNOWN = 0, + KUNWIND_SOURCE_FRAME = 1, + KUNWIND_SOURCE_CALLER = 2, + KUNWIND_SOURCE_TASK = 3, + KUNWIND_SOURCE_REGS_PC = 4, + KUNWIND_SOURCE_REGS_LR = 5, +}; + +union unwind_flags { + long unsigned int all; + struct { + long unsigned int fgraph: 1; + long unsigned int kretprobe: 1; + }; +}; + +struct kunwind_state { + struct unwind_state common; + struct task_struct *task; + int graph_idx; + struct llist_node *kr_cur; + enum kunwind_source source; + union unwind_flags flags; + struct pt_regs *regs; +}; + +typedef bool (*kunwind_consume_fn)(const struct kunwind_state *, void *); + +struct kunwind_consume_entry_data { + stack_trace_consume_fn consume_entry; + void *cookie; +}; + +struct bpf_unwind_consume_entry_data { + bool (*consume_entry)(void *, u64, u64, u64); + void *cookie; +}; + +struct frame_tail { + struct frame_tail *fp; + long unsigned int lr; +}; + +enum pkvm_page_state { + PKVM_PAGE_OWNED = 0ULL, + PKVM_PAGE_SHARED_OWNED = 36028797018963968ULL, + PKVM_PAGE_SHARED_BORROWED = 72057594037927936ULL, + __PKVM_PAGE_RESERVED = 108086391056891904ULL, + PKVM_NOPAGE = 108086391056891905ULL, +}; + +enum pkvm_component_id { + PKVM_ID_HOST = 0, + PKVM_ID_HYP = 1, + PKVM_ID_FFA = 2, +}; + +enum { + __SD_BALANCE_NEWIDLE = 0, + __SD_BALANCE_EXEC = 1, + __SD_BALANCE_FORK = 2, + __SD_BALANCE_WAKE = 3, + __SD_WAKE_AFFINE = 4, + __SD_ASYM_CPUCAPACITY = 5, + __SD_ASYM_CPUCAPACITY_FULL = 6, + __SD_SHARE_CPUCAPACITY = 7, + __SD_CLUSTER = 8, + __SD_SHARE_LLC = 9, + __SD_SERIALIZE = 10, + __SD_ASYM_PACKING = 11, + __SD_PREFER_SIBLING = 12, + __SD_OVERLAP = 13, + __SD_NUMA = 14, + __SD_FLAG_CNT = 15, +}; + +struct sd_flag_debug { + unsigned int meta_flags; + char *name; +}; + +struct sched_domain_attr { + int relax_domain_level; +}; + +typedef const struct cpumask * (*sched_domain_mask_f)(int); + +typedef int (*sched_domain_flags_f)(); + +struct sd_data { + struct sched_domain **sd; + struct sched_domain_shared **sds; + struct sched_group **sg; + struct sched_group_capacity **sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; + char *name; +}; + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = 1, + MEMBARRIER_FLAG_RSEQ = 2, +}; + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = 1, + MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, + MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, + MEMBARRIER_CMD_GET_REGISTRATIONS = 512, + MEMBARRIER_CMD_SHARED = 1, +}; + +enum membarrier_cmd_flag { + MEMBARRIER_CMD_FLAG_CPU = 1, +}; + +struct psi_window { + u64 size; + u64 start_time; + u64 start_value; + u64 prev_growth; +}; + +struct psi_trigger { + enum psi_states state; + u64 threshold; + struct list_head node; + struct psi_group *group; + wait_queue_head_t event_wait; + struct kernfs_open_file *of; + int event; + struct psi_window win; + u64 last_event_time; + bool pending_event; + enum psi_aggregators aggregator; +}; + +enum cpuacct_stat_index { + CPUACCT_STAT_USER = 0, + CPUACCT_STAT_SYSTEM = 1, + CPUACCT_STAT_NSTATS = 2, +}; + +struct cpuacct { + struct cgroup_subsys_state css; + u64 *cpuusage; + struct kernel_cpustat *cpustat; +}; + +enum dl_param { + DL_RUNTIME = 0, + DL_PERIOD = 1, +}; + +struct s_data { + struct sched_domain **sd; + struct root_domain *rd; +}; + +enum s_alloc { + sa_rootdomain = 0, + sa_sd = 1, + sa_sd_storage = 2, + sa_none = 3, +}; + +struct sched_core_cookie { + refcount_t refcnt; +}; + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = 2, + HK_FLAG_MISC = 4, + HK_FLAG_SCHED = 8, + HK_FLAG_TICK = 16, + HK_FLAG_DOMAIN = 32, + HK_FLAG_WQ = 64, + HK_FLAG_MANAGED_IRQ = 128, + HK_FLAG_KTHREAD = 256, +}; + +struct housekeeping { + struct cpumask cpumasks[9]; + long unsigned int flags; +}; + +enum misc_res_type { + MISC_CG_RES_TYPES = 0, +}; + +struct misc_res { + u64 max; + atomic64_t watermark; + atomic64_t usage; + atomic64_t events; + atomic64_t events_local; +}; + +struct misc_cg { + struct cgroup_subsys_state css; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct misc_res res[0]; +}; + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID = 1, + TASKSTATS_TYPE_TGID = 2, + TASKSTATS_TYPE_STATS = 3, + TASKSTATS_TYPE_AGGR_PID = 4, + TASKSTATS_TYPE_AGGR_TGID = 5, + TASKSTATS_TYPE_NULL = 6, + __TASKSTATS_TYPE_MAX = 7, +}; + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID = 1, + TASKSTATS_CMD_ATTR_TGID = 2, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, + __TASKSTATS_CMD_ATTR_MAX = 5, +}; + +enum { + CGROUPSTATS_CMD_UNSPEC = 3, + CGROUPSTATS_CMD_GET = 4, + CGROUPSTATS_CMD_NEW = 5, + __CGROUPSTATS_CMD_MAX = 6, +}; + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS = 1, + __CGROUPSTATS_TYPE_MAX = 2, +}; + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD = 1, + __CGROUPSTATS_CMD_ATTR_MAX = 2, +}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; + +enum actions { + REGISTER = 0, + DEREGISTER = 1, + CPU_DONT_CARE = 2, +}; + +enum { + BPF_F_UPROBE_MULTI_RETURN = 1, +}; + +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = 1, +}; + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; +}; + +enum { + BTF_F_COMPACT = 1, + BTF_F_NONAME = 2, + BTF_F_PTR_RAW = 4, + BTF_F_ZERO = 8, +}; + +struct btf_id_set { + u32 cnt; + u32 ids[0]; +}; + +struct kprobe; + +typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); + +typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); + +struct kprobe { + struct hlist_node hlist; + struct list_head list; + long unsigned int nmissed; + kprobe_opcode_t *addr; + const char *symbol_name; + unsigned int offset; + kprobe_pre_handler_t pre_handler; + kprobe_post_handler_t post_handler; + kprobe_opcode_t opcode; + struct arch_specific_insn ainsn; + u32 flags; +}; + +struct bpf_key { + struct key *key; + bool has_ref; +}; + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; + +struct perf_event_query_bpf { + __u32 ids_len; + __u32 prog_cnt; + __u32 ids[0]; +}; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +typedef int perf_snapshot_branch_stack_t(struct perf_branch_entry *, unsigned int); + +struct trace_event_raw_bpf_trace_printk { + struct trace_entry ent; + u32 __data_loc_bpf_string; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trace_printk { + u32 bpf_string; + const void *bpf_string_ptr_; +}; + +typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); + +struct bpf_trace_module { + struct module *module; + struct list_head list; +}; + +typedef u64 (*btf_bpf_override_return)(struct pt_regs *, long unsigned int); + +typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32); + +typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_trace_vprintk)(char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); + +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); + +struct bpf_nested_pt_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_get_current_task)(); + +typedef u64 (*btf_bpf_get_current_task_btf)(); + +typedef u64 (*btf_bpf_task_pt_regs)(struct task_struct *); + +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; + enum pid_type type; + bool has_siginfo; + struct kernel_siginfo info; +}; + +typedef u64 (*btf_bpf_send_signal)(u32); + +typedef u64 (*btf_bpf_send_signal_thread)(u32); + +typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); + +typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_get_func_ip_tracing)(void *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_trace)(void *); + +typedef u64 (*btf_bpf_get_attach_cookie_pe)(struct bpf_perf_event_data_kern *); + +typedef u64 (*btf_bpf_get_attach_cookie_tracing)(void *); + +typedef u64 (*btf_bpf_get_branch_snapshot)(void *, u32, u64); + +typedef u64 (*btf_get_func_arg)(void *, u32, u64 *); + +typedef u64 (*btf_get_func_ret)(void *, u64 *); + +typedef u64 (*btf_get_func_arg_cnt)(void *); + +typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); + +typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); + +typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); + +struct bpf_session_run_ctx { + struct bpf_run_ctx run_ctx; + bool is_return; + void *data; +}; + +struct bpf_uprobe_multi_link; + +struct bpf_uprobe { + struct bpf_uprobe_multi_link *link; + loff_t offset; + long unsigned int ref_ctr_offset; + u64 cookie; + struct uprobe *uprobe; + struct uprobe_consumer consumer; + bool session; +}; + +struct bpf_uprobe_multi_link { + struct path path; + struct bpf_link link; + u32 cnt; + u32 flags; + struct bpf_uprobe *uprobes; + struct task_struct *task; +}; + +struct bpf_uprobe_multi_run_ctx { + struct bpf_session_run_ctx session_ctx; + long unsigned int entry_ip; + struct bpf_uprobe *uprobe; +}; + +struct wb_stats { + long unsigned int nr_dirty; + long unsigned int nr_io; + long unsigned int nr_more_io; + long unsigned int nr_dirty_time; + long unsigned int nr_writeback; + long unsigned int nr_reclaimable; + long unsigned int nr_dirtied; + long unsigned int nr_written; + long unsigned int dirty_thresh; + long unsigned int wb_thresh; +}; + +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, + FSCONFIG_SET_STRING = 1, + FSCONFIG_SET_BINARY = 2, + FSCONFIG_SET_PATH = 3, + FSCONFIG_SET_PATH_EMPTY = 4, + FSCONFIG_SET_FD = 5, + FSCONFIG_CMD_CREATE = 6, + FSCONFIG_CMD_RECONFIGURE = 7, + FSCONFIG_CMD_CREATE_EXCL = 8, +}; + +enum { + attr_noop = 0, + attr_delayed_allocation_blocks = 1, + attr_session_write_kbytes = 2, + attr_lifetime_write_kbytes = 3, + attr_reserved_clusters = 4, + attr_sra_exceeded_retry_limit = 5, + attr_inode_readahead = 6, + attr_trigger_test_error = 7, + attr_first_error_time = 8, + attr_last_error_time = 9, + attr_clusters_in_group = 10, + attr_mb_order = 11, + attr_feature = 12, + attr_pointer_pi = 13, + attr_pointer_ui = 14, + attr_pointer_ul = 15, + attr_pointer_u64 = 16, + attr_pointer_u8 = 17, + attr_pointer_string = 18, + attr_pointer_atomic = 19, + attr_journal_task = 20, +}; + +enum { + ptr_explicit = 0, + ptr_ext4_sb_info_offset = 1, + ptr_ext4_super_block_offset = 2, +}; + +struct ext4_attr { + struct attribute attr; + short int attr_id; + short int attr_ptr; + short unsigned int attr_size; + union { + int offset; + void *explicit_ptr; + } u; +}; + +enum btrfs_delayed_item_type { + BTRFS_DELAYED_INSERTION_ITEM = 0, + BTRFS_DELAYED_DELETION_ITEM = 1, +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + u64 index; + struct list_head tree_list; + struct list_head readdir_list; + struct list_head log_list; + u64 bytes_reserved; + struct btrfs_delayed_node *delayed_node; + refcount_t refs; + enum btrfs_delayed_item_type type: 8; + bool logged; + u16 data_len; + char data[0]; +}; + +struct btrfs_async_delayed_work { + struct btrfs_delayed_root *delayed_root; + int nr; + struct btrfs_work work; +}; + +enum { + CRYPTO_MSG_ALG_REQUEST = 0, + CRYPTO_MSG_ALG_REGISTER = 1, + CRYPTO_MSG_ALG_LOADED = 2, +}; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; + bool test_started; +}; + +struct acomp_alg { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + int (*init)(struct crypto_acomp *); + void (*exit)(struct crypto_acomp *); + unsigned int reqsize; + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +enum asn1_class { + ASN1_UNIV = 0, + ASN1_APPL = 1, + ASN1_CONT = 2, + ASN1_PRIV = 3, +}; + +enum x509_actions { + ACT_x509_extract_key_data = 0, + ACT_x509_extract_name_segment = 1, + ACT_x509_note_OID = 2, + ACT_x509_note_issuer = 3, + ACT_x509_note_not_after = 4, + ACT_x509_note_not_before = 5, + ACT_x509_note_params = 6, + ACT_x509_note_serial = 7, + ACT_x509_note_sig_algo = 8, + ACT_x509_note_signature = 9, + ACT_x509_note_subject = 10, + ACT_x509_note_tbs_certificate = 11, + ACT_x509_process_extension = 12, + NR__x509_actions = 13, +}; + +enum blacklist_hash_type { + BLACKLIST_HASH_X509_TBS = 1, + BLACKLIST_HASH_BINARY = 2, +}; + +struct disk_events { + struct list_head node; + struct gendisk *disk; + spinlock_t lock; + struct mutex block_mutex; + int block; + unsigned int pending; + unsigned int clearing; + long int poll_msecs; + struct delayed_work dwork; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_mkdir { + struct file *file; + int dfd; + umode_t mode; + struct filename *filename; +}; + +struct io_link { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +enum string_size_units { + STRING_UNITS_10 = 0, + STRING_UNITS_2 = 1, + STRING_UNITS_MASK = 1, + STRING_UNITS_NO_SPACE = 1073741824, + STRING_UNITS_NO_BYTES = 2147483648, +}; + +struct strarray { + char **array; + size_t n; +}; + +typedef struct { + size_t bitContainer; + unsigned int bitPos; + char *startPtr; + char *ptr; + char *endPtr; +} BIT_CStream_t; + +typedef struct { + S16 norm[53]; + U32 wksp[285]; +} ZSTD_BuildCTableWksp; + +struct raid6_recov_calls { + void (*data2)(int, size_t, int, int, void **); + void (*datap)(int, size_t, int, void **); + int (*valid)(); + const char *name; + int priority; +}; + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +struct v2m_data { + struct list_head entry; + struct fwnode_handle *fwnode; + struct resource res; + void *base; + u32 spi_start; + u32 nr_spis; + u32 spi_offset; + long unsigned int *bm; + u32 flags; +}; + +struct fb_cvt_data { + u32 xres; + u32 yres; + u32 refresh; + u32 f_refresh; + u32 pixclock; + u32 hperiod; + u32 hblank; + u32 hfreq; + u32 htotal; + u32 vtotal; + u32 vsync; + u32 hsync; + u32 h_front_porch; + u32 h_back_porch; + u32 v_front_porch; + u32 v_back_porch; + u32 h_margin; + u32 v_margin; + u32 interlace; + u32 aspect_ratio; + u32 active_pixels; + u32 flags; + u32 status; +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +enum { + FBCON_LOGO_CANSHOW = -1, + FBCON_LOGO_DRAW = -2, + FBCON_LOGO_DONTSHOW = -3, +}; + +struct vt_notifier_param { + struct vc_data *vc; + unsigned int c; +}; + +struct tiocl_selection { + short unsigned int xs; + short unsigned int ys; + short unsigned int xe; + short unsigned int ye; + short unsigned int sel_mode; +}; + +struct con_driver { + const struct consw *con; + const char *desc; + struct device *dev; + int node; + int first; + int last; + int flag; +}; + +enum { + blank_off = 0, + blank_normal_wait = 1, + blank_vesa_wait = 2, +}; + +enum { + EPecma = 0, + EPdec = 1, + EPeq = 2, + EPgt = 3, + EPlt = 4, +}; + +enum CSI_J { + CSI_J_CURSOR_TO_END = 0, + CSI_J_START_TO_CURSOR = 1, + CSI_J_VISIBLE = 2, + CSI_J_FULL = 3, +}; + +enum { + CSI_K_CURSOR_TO_LINEEND = 0, + CSI_K_LINESTART_TO_CURSOR = 1, + CSI_K_LINE = 2, +}; + +struct rgb { + u8 r; + u8 g; + u8 b; +}; + +enum { + CSI_m_DEFAULT = 0, + CSI_m_BOLD = 1, + CSI_m_HALF_BRIGHT = 2, + CSI_m_ITALIC = 3, + CSI_m_UNDERLINE = 4, + CSI_m_BLINK = 5, + CSI_m_REVERSE = 7, + CSI_m_PRI_FONT = 10, + CSI_m_ALT_FONT1 = 11, + CSI_m_ALT_FONT2 = 12, + CSI_m_DOUBLE_UNDERLINE = 21, + CSI_m_NORMAL_INTENSITY = 22, + CSI_m_NO_ITALIC = 23, + CSI_m_NO_UNDERLINE = 24, + CSI_m_NO_BLINK = 25, + CSI_m_NO_REVERSE = 27, + CSI_m_FG_COLOR_BEG = 30, + CSI_m_FG_COLOR_END = 37, + CSI_m_FG_COLOR = 38, + CSI_m_DEFAULT_FG_COLOR = 39, + CSI_m_BG_COLOR_BEG = 40, + CSI_m_BG_COLOR_END = 47, + CSI_m_BG_COLOR = 48, + CSI_m_DEFAULT_BG_COLOR = 49, + CSI_m_BRIGHT_FG_COLOR_BEG = 90, + CSI_m_BRIGHT_FG_COLOR_END = 97, + CSI_m_BRIGHT_FG_COLOR_OFF = 60, + CSI_m_BRIGHT_BG_COLOR_BEG = 100, + CSI_m_BRIGHT_BG_COLOR_END = 107, + CSI_m_BRIGHT_BG_COLOR_OFF = 60, +}; + +enum { + CSI_DEC_hl_CURSOR_KEYS = 1, + CSI_DEC_hl_132_COLUMNS = 3, + CSI_DEC_hl_REVERSE_VIDEO = 5, + CSI_DEC_hl_ORIGIN_MODE = 6, + CSI_DEC_hl_AUTOWRAP = 7, + CSI_DEC_hl_AUTOREPEAT = 8, + CSI_DEC_hl_MOUSE_X10 = 9, + CSI_DEC_hl_SHOW_CURSOR = 25, + CSI_DEC_hl_MOUSE_VT200 = 1000, +}; + +enum { + CSI_hl_DISPLAY_CTRL = 3, + CSI_hl_INSERT = 4, + CSI_hl_AUTO_NL = 20, +}; + +enum CSI_right_square_bracket { + CSI_RSB_COLOR_FOR_UNDERLINE = 1, + CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2, + CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8, + CSI_RSB_BLANKING_INTERVAL = 9, + CSI_RSB_BELL_FREQUENCY = 10, + CSI_RSB_BELL_DURATION = 11, + CSI_RSB_BRING_CONSOLE_TO_FRONT = 12, + CSI_RSB_UNBLANK = 13, + CSI_RSB_VESA_OFF_INTERVAL = 14, + CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15, + CSI_RSB_CURSOR_BLINK_INTERVAL = 16, +}; + +enum vc_ctl_state { + ESnormal = 0, + ESesc = 1, + ESsquare = 2, + ESgetpars = 3, + ESfunckey = 4, + EShash = 5, + ESsetG0 = 6, + ESsetG1 = 7, + ESpercent = 8, + EScsiignore = 9, + ESnonstd = 10, + ESpalette = 11, + ESosc = 12, + ESANSI_first = 12, + ESapc = 13, + ESpm = 14, + ESdcs = 15, + ESANSI_last = 15, +}; + +enum { + ASCII_NULL = 0, + ASCII_BELL = 7, + ASCII_BACKSPACE = 8, + ASCII_IGNORE_FIRST = 8, + ASCII_HTAB = 9, + ASCII_LINEFEED = 10, + ASCII_VTAB = 11, + ASCII_FORMFEED = 12, + ASCII_CAR_RET = 13, + ASCII_IGNORE_LAST = 13, + ASCII_SHIFTOUT = 14, + ASCII_SHIFTIN = 15, + ASCII_CANCEL = 24, + ASCII_SUBSTITUTE = 26, + ASCII_ESCAPE = 27, + ASCII_CSI_IGNORE_FIRST = 32, + ASCII_CSI_IGNORE_LAST = 63, + ASCII_DEL = 127, + ASCII_EXT_CSI = 155, +}; + +struct interval { + uint32_t first; + uint32_t last; +}; + +struct vc_draw_region { + long unsigned int from; + long unsigned int to; + int x; +}; + +struct dma_fence_array; + +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +struct dma_fence_array { + struct dma_fence base; + spinlock_t lock; + unsigned int num_fences; + atomic_t num_pending; + struct dma_fence **fences; + struct irq_work work; + struct dma_fence_array_cb callbacks[0]; +}; + +struct dma_fence_chain { + struct dma_fence base; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + union { + struct dma_fence_cb cb; + struct irq_work work; + }; + spinlock_t lock; +}; + +struct scsi_event { + enum scsi_device_event evt_type; + struct list_head node; +}; + +enum scsi_host_prot_capabilities { + SHOST_DIF_TYPE1_PROTECTION = 1, + SHOST_DIF_TYPE2_PROTECTION = 2, + SHOST_DIF_TYPE3_PROTECTION = 4, + SHOST_DIX_TYPE0_PROTECTION = 8, + SHOST_DIX_TYPE1_PROTECTION = 16, + SHOST_DIX_TYPE2_PROTECTION = 32, + SHOST_DIX_TYPE3_PROTECTION = 64, +}; + +enum { + ACTION_FAIL = 0, + ACTION_REPREP = 1, + ACTION_DELAYED_REPREP = 2, + ACTION_RETRY = 3, + ACTION_DELAYED_RETRY = 4, +}; + +enum { + VETH_INFO_UNSPEC = 0, + VETH_INFO_PEER = 1, + __VETH_INFO_MAX = 2, +}; + +struct veth_stats { + u64 rx_drops; + u64 xdp_packets; + u64 xdp_bytes; + u64 xdp_redirect; + u64 xdp_drops; + u64 xdp_tx; + u64 xdp_tx_err; + u64 peer_tq_xdp_xmit; + u64 peer_tq_xdp_xmit_err; +}; + +struct veth_rq_stats { + struct veth_stats vs; + struct u64_stats_sync syncp; +}; + +struct veth_rq { + struct napi_struct xdp_napi; + struct napi_struct *napi; + struct net_device *dev; + struct bpf_prog *xdp_prog; + struct xdp_mem_info xdp_mem; + struct veth_rq_stats stats; + bool rx_notify_masked; + long: 64; + long: 64; + long: 64; + long: 64; + struct ptr_ring xdp_ring; + struct xdp_rxq_info xdp_rxq; + struct page_pool *page_pool; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct veth_priv { + struct net_device *peer; + atomic64_t dropped; + struct bpf_prog *_xdp_prog; + struct veth_rq *rq; + unsigned int requested_headroom; +}; + +struct veth_xdp_tx_bq { + struct xdp_frame *q[16]; + unsigned int count; +}; + +struct veth_q_stat_desc { + char desc[32]; + size_t offset; +}; + +struct veth_xdp_buff { + struct xdp_buff xdp; + struct sk_buff *skb; +}; + +struct usb_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; +}; + +struct usb_dev_cap_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +struct quirks_list_struct { + struct hid_device_id hid_bl_item; + struct list_head node; +}; + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + +struct net_rate_estimator { + struct gnet_stats_basic_sync *bstats; + spinlock_t *stats_lock; + bool running; + struct gnet_stats_basic_sync *cpu_bstats; + u8 ewma_log; + u8 intvl_log; + seqcount_t seq; + u64 last_packets; + u64 last_bytes; + u64 avpps; + u64 avbps; + long unsigned int next_jiffies; + struct timer_list timer; + struct callback_head rcu; +}; + +struct dst_cache_pcpu { + long unsigned int refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +struct bpf_sk_lookup { + union { + union { + struct bpf_sock *sk; + }; + __u64 cookie; + }; + __u32 family; + __u32 protocol; + __u32 remote_ip4; + __u32 remote_ip6[4]; + __be16 remote_port; + __u32 local_ip4; + __u32 local_ip6[4]; + __u32 local_port; + __u32 ingress_ifindex; +}; + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + const void *data; + const void *data_end; +}; + +struct trace_event_raw_bpf_trigger_tp { + struct trace_entry ent; + int nonce; + char __data[0]; +}; + +struct trace_event_raw_bpf_test_finish { + struct trace_entry ent; + int err; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trigger_tp {}; + +struct trace_event_data_offsets_bpf_test_finish {}; + +typedef void (*btf_trace_bpf_trigger_tp)(void *, int); + +typedef void (*btf_trace_bpf_test_finish)(void *, int *); + +struct bpf_test_timer { + enum { + NO_PREEMPT = 0, + NO_MIGRATE = 1, + } mode; + u32 i; + u64 time_start; + u64 time_spent; +}; + +struct xdp_page_head { + struct xdp_buff orig_ctx; + struct xdp_buff ctx; + union { + struct { + struct {} __empty_frame; + struct xdp_frame frame[0]; + }; + struct { + struct {} __empty_data; + u8 data[0]; + }; + }; +}; + +struct xdp_test_data { + struct xdp_buff *orig_ctx; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info rxq; + struct net_device *dev; + struct page_pool *pp; + struct xdp_frame **frames; + struct sk_buff **skbs; + struct xdp_mem_info mem; + u32 batch_size; + u32 frame_cnt; + long: 64; + long: 64; +}; + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; + +struct bpf_raw_tp_test_run_info { + struct bpf_prog *prog; + void *ctx; + u32 retval; +}; + +struct phy_req_info { + struct ethnl_req_info base; + struct phy_device_node *pdn; +}; + +struct ethnl_phy_dump_ctx { + struct phy_req_info *phy_req_info; + long unsigned int ifindex; + long unsigned int phy_index; +}; + +struct nf_conn___init { + struct nf_conn ct; +}; + +struct xt_tcp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 option; + __u8 flg_mask; + __u8 flg_cmp; + __u8 invflags; +}; + +struct xt_udp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 invflags; +}; + +struct ipt_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +struct ip6t_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +struct xt_get_revision { + char name[29]; + __u8 revision; +}; + +struct ipt_getinfo { + char name[32]; + unsigned int valid_hooks; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_entries; + unsigned int size; +}; + +struct ipt_get_entries { + char name[32]; + unsigned int size; + struct ipt_entry entrytable[0]; +}; + +struct ipt_error { + struct ipt_entry entry; + struct xt_error_target target; +}; + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +union net_bridge_eht_addr { + __be32 ip4; + struct in6_addr ip6; +}; + +struct net_bridge_group_eht_host { + struct rb_node rb_node; + union net_bridge_eht_addr h_addr; + struct hlist_head set_entries; + unsigned int num_entries; + unsigned char filter_mode; + struct net_bridge_port_group *pg; +}; + +struct net_bridge_group_eht_set; + +struct net_bridge_group_eht_set_entry { + struct rb_node rb_node; + struct hlist_node host_list; + union net_bridge_eht_addr h_addr; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_group_eht_set *eht_set; + struct net_bridge_group_eht_host *h_parent; + struct net_bridge_mcast_gc mcast_gc; +}; + +struct net_bridge_group_eht_set { + struct rb_node rb_node; + union net_bridge_eht_addr src_addr; + struct rb_root entry_tree; + struct timer_list timer; + struct net_bridge_port_group *pg; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; +}; + +typedef bool pstate_check_t(long unsigned int); + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +enum siginfo_layout { + SIL_KILL = 0, + SIL_TIMER = 1, + SIL_POLL = 2, + SIL_FAULT = 3, + SIL_FAULT_TRAPNO = 4, + SIL_FAULT_MCEERR = 5, + SIL_FAULT_BNDERR = 6, + SIL_FAULT_PKUERR = 7, + SIL_FAULT_PERF_EVENT = 8, + SIL_CHLD = 9, + SIL_RT = 10, + SIL_SYS = 11, +}; + +struct break_hook { + struct list_head node; + int (*fn)(struct pt_regs *, long unsigned int); + u16 imm; + u16 mask; +}; + +struct sys64_hook { + long unsigned int esr_mask; + long unsigned int esr_val; + void (*handler)(long unsigned int, struct pt_regs *); +}; + +struct arm_cpuidle_irq_context {}; + +struct cpu_suspend_ctx { + u64 ctx_regs[13]; + u64 sp; +}; + +struct sleep_stack_data { + struct cpu_suspend_ctx system_regs; + long unsigned int callee_saved_regs[12]; +}; + +struct trans_pgd_info { + void * (*trans_alloc_page)(void *); + void *trans_alloc_arg; +}; + +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[0]; +}; + +struct objpool_head; + +typedef int (*objpool_fini_cb)(struct objpool_head *, void *); + +struct objpool_head { + int obj_size; + int nr_objs; + int nr_possible_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + long unsigned int flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +struct kprobe_ctlblk { + unsigned int kprobe_status; + long unsigned int saved_irqflag; + struct prev_kprobe prev_kprobe; +}; + +struct kretprobe_instance; + +typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); + +struct kretprobe_holder; + +struct kretprobe_instance { + struct callback_head rcu; + struct llist_node llist; + struct kretprobe_holder *rph; + kprobe_opcode_t *ret_addr; + void *fp; + char data[0]; +}; + +struct kretprobe; + +struct kretprobe_holder { + struct kretprobe *rp; + struct objpool_head pool; +}; + +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct kretprobe_holder *rph; +}; + +struct kprobe_insn_cache { + struct mutex mutex; + void * (*alloc)(); + void (*free)(void *); + const char *sym; + struct list_head pages; + size_t insn_size; + int nr_garbage; +}; + +struct fault_info { + int (*fn)(long unsigned int, long unsigned int, struct pt_regs *); + int sig; + int code; + const char *name; +}; + +enum { + KVM_REG_ARM_STD_BIT_TRNG_V1_0 = 0, + KVM_REG_ARM_STD_BMAP_BIT_COUNT = 1, +}; + +enum { + KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0, + KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT = 1, +}; + +enum { + KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0, + KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, + KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT = 2, +}; + +enum kvm_smccc_filter_action { + KVM_SMCCC_FILTER_HANDLE = 0, + KVM_SMCCC_FILTER_DENY = 1, + KVM_SMCCC_FILTER_FWD_TO_USER = 2, + NR_SMCCC_FILTER_ACTIONS = 3, +}; + +struct kvm_smccc_filter { + __u32 base; + __u32 nr_functions; + __u8 action; + __u8 pad[15]; +}; + +typedef int (*objpool_init_obj_cb)(void *, void *); + +struct kprobe_blacklist_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; +}; + +struct kprobe_insn_page { + struct list_head list; + kprobe_opcode_t *insns; + struct kprobe_insn_cache *cache; + int nused; + int ngarbage; + char slot_used[0]; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + +struct btf_var_secinfo { + __u32 type; + __u32 offset; + __u32 size; +}; + +struct bpf_kfunc_desc { + struct btf_func_model func_model; + u32 func_id; + s32 imm; + u16 offset; + long unsigned int addr; +}; + +struct bpf_kfunc_desc_tab { + struct bpf_kfunc_desc descs[256]; + u32 nr_descs; +}; + +struct bpf_kfunc_btf { + struct btf *btf; + struct module *module; + u16 offset; +}; + +struct bpf_kfunc_btf_tab { + struct bpf_kfunc_btf descs[256]; + u32 nr_descs; +}; + +enum { + BPF_MAX_LOOPS = 8388608, +}; + +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + +struct bpf_core_ctx { + struct bpf_verifier_log *log; + const struct btf *btf; +}; + +struct bpf_verifier_stack_elem { + struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; + struct bpf_verifier_stack_elem *next; + u32 log_pos; +}; + +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; + bool raw_mode; + bool pkt_access; + u8 release_regno; + int regno; + int access_size; + int mem_size; + u64 msize_max_value; + int ref_obj_id; + int dynptr_id; + int map_uid; + int func_id; + struct btf *btf; + u32 btf_id; + struct btf *ret_btf; + u32 ret_btf_id; + u32 subprogno; + struct btf_field *kptr_field; +}; + +struct bpf_kfunc_call_arg_meta { + struct btf *btf; + u32 func_id; + u32 kfunc_flags; + const struct btf_type *func_proto; + const char *func_name; + u32 ref_obj_id; + u8 release_regno; + bool r0_rdonly; + u32 ret_btf_id; + u64 r0_size; + u32 subprogno; + struct { + u64 value; + bool found; + } arg_constant; + struct btf *arg_btf; + u32 arg_btf_id; + bool arg_owning_ref; + struct { + struct btf_field *field; + } arg_list_head; + struct { + struct btf_field *field; + } arg_rbtree_root; + struct { + enum bpf_dynptr_type type; + u32 id; + u32 ref_obj_id; + } initialized_dynptr; + struct { + u8 spi; + u8 frameno; + } iter; + struct { + struct bpf_map *ptr; + int uid; + } map; + u64 mem_size; +}; + +enum reg_arg_type { + SRC_OP = 0, + DST_OP = 1, + DST_OP_NO_MARK = 2, +}; + +struct linked_reg { + u8 frameno; + union { + u8 spi; + u8 regno; + }; + bool is_reg; +}; + +struct linked_regs { + int cnt; + struct linked_reg entries[6]; +}; + +enum bpf_access_src { + ACCESS_DIRECT = 1, + ACCESS_HELPER = 2, +}; + +struct task_struct__safe_rcu { + const cpumask_t *cpus_ptr; + struct css_set *cgroups; + struct task_struct *real_parent; + struct task_struct *group_leader; +}; + +struct cgroup__safe_rcu { + struct kernfs_node *kn; +}; + +struct css_set__safe_rcu { + struct cgroup *dfl_cgrp; +}; + +struct mm_struct__safe_rcu_or_null { + struct file *exe_file; +}; + +struct sk_buff__safe_rcu_or_null { + struct sock *sk; +}; + +struct request_sock__safe_rcu_or_null { + struct sock *sk; +}; + +struct bpf_iter_meta__safe_trusted { + struct seq_file *seq; +}; + +struct bpf_iter__task__safe_trusted { + struct bpf_iter_meta *meta; + struct task_struct *task; +}; + +struct linux_binprm__safe_trusted { + struct file *file; +}; + +struct file__safe_trusted { + struct inode *f_inode; +}; + +struct dentry__safe_trusted { + struct inode *d_inode; +}; + +struct socket__safe_trusted_or_null { + struct sock *sk; +}; + +struct bpf_reg_types { + const enum bpf_reg_type types[10]; + u32 *btf_id; +}; + +enum { + AT_PKT_END = -1, + BEYOND_PKT_END = -2, +}; + +typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *, int); + +enum { + KF_ARG_DYNPTR_ID = 0, + KF_ARG_LIST_HEAD_ID = 1, + KF_ARG_LIST_NODE_ID = 2, + KF_ARG_RB_ROOT_ID = 3, + KF_ARG_RB_NODE_ID = 4, + KF_ARG_WORKQUEUE_ID = 5, +}; + +enum kfunc_ptr_arg_type { + KF_ARG_PTR_TO_CTX = 0, + KF_ARG_PTR_TO_ALLOC_BTF_ID = 1, + KF_ARG_PTR_TO_REFCOUNTED_KPTR = 2, + KF_ARG_PTR_TO_DYNPTR = 3, + KF_ARG_PTR_TO_ITER = 4, + KF_ARG_PTR_TO_LIST_HEAD = 5, + KF_ARG_PTR_TO_LIST_NODE = 6, + KF_ARG_PTR_TO_BTF_ID = 7, + KF_ARG_PTR_TO_MEM = 8, + KF_ARG_PTR_TO_MEM_SIZE = 9, + KF_ARG_PTR_TO_CALLBACK = 10, + KF_ARG_PTR_TO_RB_ROOT = 11, + KF_ARG_PTR_TO_RB_NODE = 12, + KF_ARG_PTR_TO_NULL = 13, + KF_ARG_PTR_TO_CONST_STR = 14, + KF_ARG_PTR_TO_MAP = 15, + KF_ARG_PTR_TO_WORKQUEUE = 16, +}; + +enum special_kfunc_type { + KF_bpf_obj_new_impl = 0, + KF_bpf_obj_drop_impl = 1, + KF_bpf_refcount_acquire_impl = 2, + KF_bpf_list_push_front_impl = 3, + KF_bpf_list_push_back_impl = 4, + KF_bpf_list_pop_front = 5, + KF_bpf_list_pop_back = 6, + KF_bpf_cast_to_kern_ctx = 7, + KF_bpf_rdonly_cast = 8, + KF_bpf_rcu_read_lock = 9, + KF_bpf_rcu_read_unlock = 10, + KF_bpf_rbtree_remove = 11, + KF_bpf_rbtree_add_impl = 12, + KF_bpf_rbtree_first = 13, + KF_bpf_dynptr_from_skb = 14, + KF_bpf_dynptr_from_xdp = 15, + KF_bpf_dynptr_slice = 16, + KF_bpf_dynptr_slice_rdwr = 17, + KF_bpf_dynptr_clone = 18, + KF_bpf_percpu_obj_new_impl = 19, + KF_bpf_percpu_obj_drop_impl = 20, + KF_bpf_throw = 21, + KF_bpf_wq_set_callback_impl = 22, + KF_bpf_preempt_disable = 23, + KF_bpf_preempt_enable = 24, + KF_bpf_iter_css_task_new = 25, + KF_bpf_session_cookie = 26, + KF_bpf_get_kmem_cache = 27, +}; + +enum { + REASON_BOUNDS = -1, + REASON_TYPE = -2, + REASON_PATHS = -3, + REASON_LIMIT = -4, + REASON_STACK = -5, +}; + +struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; + bool mask_to_left; +}; + +enum { + DISCOVERED = 16, + EXPLORED = 32, + FALLTHROUGH = 1, + BRANCH = 2, +}; + +enum { + DONE_EXPLORING = 0, + KEEP_EXPLORING = 1, +}; + +enum exact_level { + NOT_EXACT = 0, + EXACT = 1, + RANGE_WITHIN = 2, +}; + +struct bpf_iter; + +typedef int cydp_t; + +struct madvise_walk_private { + struct mmu_gather *tlb; + bool pageout; +}; + +typedef struct { + void *lock; +} class_preempt_t; + +struct files_stat_struct { + long unsigned int nr_files; + long unsigned int nr_free_files; + long unsigned int max_files; +}; + +struct backing_file { + struct file file; + union { + struct path user_path; + freeptr_t bf_freeptr; + }; +}; + +struct ext4_fc_tl { + __le16 fc_tag; + __le16 fc_len; +}; + +struct ext4_fc_head { + __le32 fc_features; + __le32 fc_tid; +}; + +struct ext4_fc_add_range { + __le32 fc_ino; + __u8 fc_ex[12]; +}; + +struct ext4_fc_del_range { + __le32 fc_ino; + __le32 fc_lblk; + __le32 fc_len; +}; + +struct ext4_fc_dentry_info { + __le32 fc_parent_ino; + __le32 fc_ino; + __u8 fc_dname[0]; +}; + +struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[0]; +}; + +struct ext4_fc_tail { + __le32 fc_tid; + __le32 fc_crc; +}; + +enum { + EXT4_FC_STATUS_OK = 0, + EXT4_FC_STATUS_INELIGIBLE = 1, + EXT4_FC_STATUS_SKIPPED = 2, + EXT4_FC_STATUS_FAILED = 3, +}; + +struct ext4_fc_dentry_update { + int fcd_op; + int fcd_parent; + int fcd_ino; + struct qstr fcd_name; + unsigned char fcd_iname[40]; + struct list_head fcd_list; + struct list_head fcd_dilist; +}; + +struct __track_dentry_update_args { + struct dentry *dentry; + int op; +}; + +struct __track_range_args { + ext4_lblk_t start; + ext4_lblk_t end; +}; + +struct dentry_info_args { + int parent_ino; + int dname_len; + int ino; + int inode_len; + char *dname; +}; + +struct ext4_fc_tl_mem { + u16 fc_tag; + u16 fc_len; +}; + +enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_INVAL_INODE = 2, + FUSE_NOTIFY_INVAL_ENTRY = 3, + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, + FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, + FUSE_NOTIFY_CODE_MAX = 8, +}; + +struct fuse_batch_forget_in { + uint32_t count; + uint32_t dummy; +}; + +struct fuse_interrupt_in { + uint64_t unique; +}; + +struct fuse_notify_inval_inode_out { + uint64_t ino; + int64_t off; + int64_t len; +}; + +struct fuse_notify_inval_entry_out { + uint64_t parent; + uint32_t namelen; + uint32_t flags; +}; + +struct fuse_notify_delete_out { + uint64_t parent; + uint64_t child; + uint32_t namelen; + uint32_t padding; +}; + +struct fuse_notify_store_out { + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_out { + uint64_t notify_unique; + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_in { + uint64_t dummy1; + uint64_t offset; + uint32_t size; + uint32_t dummy2; + uint64_t dummy3; + uint64_t dummy4; +}; + +enum fuse_req_flag { + FR_ISREPLY = 0, + FR_FORCE = 1, + FR_BACKGROUND = 2, + FR_WAITING = 3, + FR_ABORTED = 4, + FR_INTERRUPTED = 5, + FR_LOCKED = 6, + FR_PENDING = 7, + FR_SENT = 8, + FR_FINISHED = 9, + FR_PRIVATE = 10, + FR_ASYNC = 11, +}; + +struct fuse_pqueue { + unsigned int connected; + spinlock_t lock; + struct list_head *processing; + struct list_head io; +}; + +struct fuse_dev { + struct fuse_conn *fc; + struct fuse_pqueue pq; + struct list_head entry; +}; + +struct trace_event_raw_fuse_request_send { + struct trace_entry ent; + dev_t connection; + uint64_t unique; + enum fuse_opcode opcode; + uint32_t len; + char __data[0]; +}; + +struct trace_event_raw_fuse_request_end { + struct trace_entry ent; + dev_t connection; + uint64_t unique; + uint32_t len; + int32_t error; + char __data[0]; +}; + +struct trace_event_data_offsets_fuse_request_send {}; + +struct trace_event_data_offsets_fuse_request_end {}; + +typedef void (*btf_trace_fuse_request_send)(void *, const struct fuse_req *); + +typedef void (*btf_trace_fuse_request_end)(void *, const struct fuse_req *); + +struct fuse_copy_state { + int write; + struct fuse_req *req; + struct iov_iter *iter; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; + long unsigned int nr_segs; + struct page *pg; + unsigned int len; + unsigned int offset; + unsigned int move_pages: 1; +}; + +struct fuse_retrieve_args { + struct fuse_args_pages ap; + struct fuse_notify_retrieve_in inarg; +}; + +struct btrfs_ioctl_dev_replace_start_params { + __u64 srcdevid; + __u64 cont_reading_from_srcdev_mode; + __u8 srcdev_name[1025]; + __u8 tgtdev_name[1025]; +}; + +struct btrfs_ioctl_dev_replace_status_params { + __u64 replace_state; + __u64 progress_1000; + __u64 time_started; + __u64 time_stopped; + __u64 num_write_errors; + __u64 num_uncorrectable_read_errors; +}; + +struct btrfs_ioctl_dev_replace_args { + __u64 cmd; + __u64 result; + union { + struct btrfs_ioctl_dev_replace_start_params start; + struct btrfs_ioctl_dev_replace_status_params status; + }; + __u64 spare[64]; +}; + +struct btrfs_dev_replace_item { + __le64 src_devid; + __le64 cursor_left; + __le64 cursor_right; + __le64 cont_reading_from_srcdev_mode; + __le64 replace_state; + __le64 time_started; + __le64 time_stopped; + __le64 num_write_errors; + __le64 num_uncorrectable_read_errors; +}; + +struct ipc_proc_iface { + const char *path; + const char *header; + int ids; + int (*show)(struct seq_file *, void *); +}; + +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct pid_namespace *pid_ns; + struct ipc_proc_iface *iface; +}; + +struct sha256_state { + u32 state[8]; + u64 count; + u8 buf[64]; +}; + +enum dd_data_dir { + DD_READ = 0, + DD_WRITE = 1, +}; + +enum { + DD_DIR_COUNT = 2, +}; + +enum dd_prio { + DD_RT_PRIO = 0, + DD_BE_PRIO = 1, + DD_IDLE_PRIO = 2, + DD_PRIO_MAX = 2, +}; + +enum { + DD_PRIO_COUNT = 3, +}; + +struct io_stats_per_prio { + uint32_t inserted; + uint32_t merged; + uint32_t dispatched; + atomic_t completed; +}; + +struct dd_per_prio { + struct list_head dispatch; + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + sector_t latest_pos[2]; + struct io_stats_per_prio stats; +}; + +struct deadline_data { + struct dd_per_prio per_prio[3]; + enum dd_data_dir last_dir; + unsigned int batching; + unsigned int starved; + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; + u32 async_depth; + int prio_aging_expire; + spinlock_t lock; +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +typedef struct { + __be64 a; + __be64 b; +} be128; + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_sequenceLength; + +struct font_data { + unsigned int extra[4]; + const unsigned char data[0]; +}; + +struct partition_desc { + int nr_parts; + struct partition_affinity *parts; + struct irq_domain *domain; + struct irq_desc *chained_desc; + long unsigned int *bitmap; + struct irq_domain_ops ops; +}; + +struct pcie_link_state { + struct pci_dev *pdev; + struct pci_dev *downstream; + struct pcie_link_state *root; + struct pcie_link_state *parent; + struct list_head sibling; + u32 aspm_support: 7; + u32 aspm_enabled: 7; + u32 aspm_capable: 7; + u32 aspm_default: 7; + int: 4; + u32 aspm_disable: 7; + u32 clkpm_capable: 1; + u32 clkpm_enabled: 1; + u32 clkpm_default: 1; + u32 clkpm_disable: 1; +}; + +struct clk_multiplier { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +struct clk_mux { + struct clk_hw hw; + void *reg; + const u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +struct eeprom_93cx6 { + void *data; + void (*register_read)(struct eeprom_93cx6 *); + void (*register_write)(struct eeprom_93cx6 *); + int width; + unsigned int quirks; + char drive_data; + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +struct pci_bits { + unsigned int reg; + unsigned int width; + long unsigned int mask; + long unsigned int val; +}; + +enum { + PIIX_IOCFG = 84, + ICH5_PMR = 144, + ICH5_PCS = 146, + PIIX_SIDPR_BAR = 5, + PIIX_SIDPR_LEN = 16, + PIIX_SIDPR_IDX = 0, + PIIX_SIDPR_DATA = 4, + PIIX_FLAG_CHECKINTR = 268435456, + PIIX_FLAG_SIDPR = 536870912, + PIIX_PATA_FLAGS = 1, + PIIX_SATA_FLAGS = 268435458, + PIIX_FLAG_PIO16 = 1073741824, + PIIX_80C_PRI = 48, + PIIX_80C_SEC = 192, + P0 = 0, + P1 = 1, + P2 = 2, + P3 = 3, + IDE = -1, + NA = -2, + RV = -3, + PIIX_AHCI_DEVICE = 6, + PIIX_HOST_BROKEN_SUSPEND = 16777216, +}; + +enum piix_controller_ids { + piix_pata_mwdma = 0, + piix_pata_33 = 1, + ich_pata_33 = 2, + ich_pata_66 = 3, + ich_pata_100 = 4, + ich_pata_100_nomwdma1 = 5, + ich5_sata = 6, + ich6_sata = 7, + ich6m_sata = 8, + ich8_sata = 9, + ich8_2port_sata = 10, + ich8m_apple_sata = 11, + tolapai_sata = 12, + piix_pata_vmw = 13, + ich8_sata_snb = 14, + ich8_2port_sata_snb = 15, + ich8_2port_sata_byt = 16, +}; + +struct piix_map_db { + const u32 mask; + const u16 port_enable; + const int map[0]; +}; + +struct piix_host_priv { + const int *map; + u32 saved_iocfg; + void *sidpr; +}; + +struct ich_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key; + struct flow_dissector_key_basic *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key; + struct flow_dissector_key_control *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key; + struct flow_dissector_key_eth_addrs *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key; + struct flow_dissector_key_vlan *mask; +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, + FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, +}; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; + struct Qdisc *sch; + struct list_head *cb_list_head; +}; + +enum flow_cls_command { + FLOW_CLS_REPLACE = 0, + FLOW_CLS_DESTROY = 1, + FLOW_CLS_STATS = 2, + FLOW_CLS_TMPLT_CREATE = 3, + FLOW_CLS_TMPLT_DESTROY = 4, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + bool skip_sw; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + bool use_act_stats; + long unsigned int cookie; + struct flow_rule *rule; + struct flow_stats stats; + u32 classid; +}; + +struct tc_query_caps_base { + enum tc_setup_type type; + void *caps; +}; + +struct tc_cbs_qopt_offload { + u8 enable; + s32 queue; + s32 hicredit; + s32 locredit; + s32 idleslope; + s32 sendslope; +}; + +struct tc_etf_qopt_offload { + u8 enable; + s32 queue; +}; + +struct tc_taprio_caps { + bool supports_queue_max_sdu: 1; + bool gate_mask_per_txq: 1; + bool broken_mqprio: 1; +}; + +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + } read; + struct { + struct { + struct { + __le16 pkt_info; + __le16 hdr_info; + } lo_dword; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +enum igb_tx_flags { + IGB_TX_FLAGS_VLAN = 1, + IGB_TX_FLAGS_TSO = 2, + IGB_TX_FLAGS_TSTAMP = 4, + IGB_TX_FLAGS_IPV4 = 16, + IGB_TX_FLAGS_CSUM = 32, +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER = 0, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED = 1, + IGB_RING_FLAG_RX_SCTP_CSUM = 2, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP = 3, + IGB_RING_FLAG_TX_CTX_IDX = 4, + IGB_RING_FLAG_TX_DETECT_HANG = 5, +}; + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 1, + IGB_FILTER_FLAG_VLAN_TCI = 2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 8, +}; + +struct igb_nfc_input { + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[6]; + u8 dst_addr[6]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + long unsigned int cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +enum igb_boards { + board_82575 = 0, +}; + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY = 0, + QUEUE_MODE_STREAM_RESERVATION = 1, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH = 0, + TX_QUEUE_PRIO_LOW = 1, +}; + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +enum rc6_mode { + RC6_MODE_0 = 0, + RC6_MODE_6A = 1, + RC6_MODE_UNKNOWN = 2, +}; + +enum rc6_state { + STATE_INACTIVE___8 = 0, + STATE_PREFIX_SPACE = 1, + STATE_HEADER_BIT_START___2 = 2, + STATE_HEADER_BIT_END___2 = 3, + STATE_TOGGLE_START = 4, + STATE_TOGGLE_END = 5, + STATE_BODY_BIT_START___2 = 6, + STATE_BODY_BIT_END___2 = 7, + STATE_FINISHED___4 = 8, +}; + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +struct of_bus___2 { + void (*count_cells)(const void *, int, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int); + int (*translate)(__be32 *, u64, int); +}; + +struct reserved_mem_ops; + +struct reserved_mem { + const char *name; + long unsigned int fdt_node; + const struct reserved_mem_ops *ops; + phys_addr_t base; + phys_addr_t size; + void *priv; +}; + +struct reserved_mem_ops { + int (*device_init)(struct reserved_mem *, struct device *); + void (*device_release)(struct reserved_mem *, struct device *); +}; + +typedef int (*reservedmem_of_init_fn)(struct reserved_mem *); + +struct rmem_assigned_device { + struct device *dev; + struct reserved_mem *rmem; + struct list_head list; +}; + +typedef int (*pp_nl_fill_cb)(struct sk_buff *, const struct page_pool *, const struct genl_info *); + +struct page_pool_dump_cb { + long unsigned int ifindex; + u32 pp_id; +}; + +struct ethtool_forced_speed_map { + u32 speed; + long unsigned int caps[2]; + const u32 *cap_arr; + u32 arr_size; +}; + +struct ethtool_cmis_cdb_rpl_hdr { + u8 rpl_len; + u8 rpl_chk_code; +}; + +struct ethtool_cmis_cdb_rpl { + struct ethtool_cmis_cdb_rpl_hdr hdr; + u8 payload[120]; +}; + +struct cmis_rev_rpl { + u8 rev; +}; + +struct cmis_cdb_advert_rpl { + u8 inst_supported; + u8 read_write_len_ext; + u8 resv1; + u8 resv2; +}; + +struct cmis_password_entry_pl { + __be32 password; +}; + +struct cmis_cdb_query_status_pl { + u16 response_delay; +}; + +struct cmis_cdb_query_status_rpl { + u8 length; + u8 status; +}; + +struct cmis_cdb_module_features_rpl { + u8 resv1[34]; + __be16 max_completion_time; +}; + +struct cmis_wait_for_cond_rpl { + u8 state; +}; + +enum nft_last_attributes { + NFTA_LAST_UNSPEC = 0, + NFTA_LAST_SET = 1, + NFTA_LAST_MSECS = 2, + NFTA_LAST_PAD = 3, + __NFTA_LAST_MAX = 4, +}; + +struct nft_last { + long unsigned int jiffies; + unsigned int set; +}; + +struct nft_last_priv { + struct nft_last *last; +}; + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +struct ip_reply_arg { + struct kvec iov[1]; + int flags; + __wsum csum; + int csumoffset; + int bound_dev_if; + u8 tos; + kuid_t uid; +}; + +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); + void (*add_port)(struct net_device *, struct udp_tunnel_info *); + void (*del_port)(struct net_device *, struct udp_tunnel_info *); + void (*reset_ntf)(struct net_device *); + size_t (*dump_size)(struct net_device *, unsigned int); + int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); +}; + +struct nf_bridge_frag_data; + +enum format_type { + FORMAT_TYPE_NONE = 0, + FORMAT_TYPE_WIDTH = 1, + FORMAT_TYPE_PRECISION = 2, + FORMAT_TYPE_CHAR = 3, + FORMAT_TYPE_STR = 4, + FORMAT_TYPE_PTR = 5, + FORMAT_TYPE_PERCENT_CHAR = 6, + FORMAT_TYPE_INVALID = 7, + FORMAT_TYPE_LONG_LONG = 8, + FORMAT_TYPE_ULONG = 9, + FORMAT_TYPE_LONG = 10, + FORMAT_TYPE_UBYTE = 11, + FORMAT_TYPE_BYTE = 12, + FORMAT_TYPE_USHORT = 13, + FORMAT_TYPE_SHORT = 14, + FORMAT_TYPE_UINT = 15, + FORMAT_TYPE_INT = 16, + FORMAT_TYPE_SIZE_T = 17, + FORMAT_TYPE_PTRDIFF = 18, +}; + +struct printf_spec { + unsigned int type: 8; + int field_width: 24; + unsigned int flags: 8; + unsigned int base: 8; + int precision: 16; +}; + +struct page_flags_fields { + int width; + int shift; + int mask; + const struct printf_spec *spec; + const char *name; +}; + +typedef void text_poke_f(void *, void *, size_t, size_t); + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +enum exception_type { + except_type_sync = 0, + except_type_irq = 128, + except_type_fiq = 256, + except_type_serror = 384, +}; + +enum { + KTW_FREEZABLE = 1, +}; + +struct kthread_create_info { + char *full_name; + int (*threadfn)(void *); + void *data; + int node; + struct task_struct *result; + struct completion *done; + struct list_head list; +}; + +struct kthread { + long unsigned int flags; + unsigned int cpu; + int result; + int (*threadfn)(void *); + void *data; + struct completion parked; + struct completion exited; + struct cgroup_subsys_state *blkcg_css; + char *full_name; +}; + +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP = 1, + KTHREAD_SHOULD_PARK = 2, +}; + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +struct optimistic_spin_node { + struct optimistic_spin_node *next; + struct optimistic_spin_node *prev; + int locked; + int cpu; +}; + +struct pm_vt_switch { + struct list_head head; + struct device *dev; + bool required; +}; + +struct msi_dev_domain { + struct xarray store; + struct irq_domain *domain; +}; + +struct msi_device_data { + long unsigned int properties; + struct mutex mutex; + struct msi_dev_domain __domains[1]; + long unsigned int __iter_idx; +}; + +struct msi_map { + int index; + int virq; +}; + +struct msi_ctrl { + unsigned int domid; + unsigned int first; + unsigned int last; + unsigned int nirqs; +}; + +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = 1, + CGROUP_FREEZING_SELF = 2, + CGROUP_FREEZING_PARENT = 4, + CGROUP_FROZEN = 8, + CGROUP_FREEZING = 6, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; +}; + +struct rchan_percpu_buf_dispatcher { + struct rchan_buf *buf; + struct dentry *dentry; +}; + +struct bpf_iter_num { + __u64 __opaque[1]; +}; + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + u64 session_id; + u64 seq_num; + bool done_stop; + long: 0; + u8 target_private[0]; +}; + +typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_loop)(u32, void *, void *, u64); + +struct bpf_iter_num_kern { + int cur; + int end; +}; + +enum bpf_stack_build_id_status { + BPF_STACK_BUILD_ID_EMPTY = 0, + BPF_STACK_BUILD_ID_VALID = 1, + BPF_STACK_BUILD_ID_IP = 2, +}; + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + +enum { + BPF_F_SKIP_FIELD_MASK = 255, + BPF_F_USER_STACK = 256, + BPF_F_FAST_STACK_CMP = 512, + BPF_F_REUSE_STACKID = 1024, + BPF_F_USER_BUILD_ID = 2048, +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = 18446744073709551584ULL, + PERF_CONTEXT_KERNEL = 18446744073709551488ULL, + PERF_CONTEXT_USER = 18446744073709551104ULL, + PERF_CONTEXT_GUEST = 18446744073709549568ULL, + PERF_CONTEXT_GUEST_KERNEL = 18446744073709549440ULL, + PERF_CONTEXT_GUEST_USER = 18446744073709549056ULL, + PERF_CONTEXT_MAX = 18446744073709547521ULL, +}; + +struct stack_map_bucket { + struct pcpu_freelist_node fnode; + u32 hash; + u32 nr; + u64 data[0]; +}; + +struct bpf_stack_map { + struct bpf_map map; + void *elems; + struct pcpu_freelist freelist; + u32 n_buckets; + struct stack_map_bucket *buckets[0]; +}; + +typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_sleepable)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack_sleepable)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct mmu_notifier_subscriptions { + struct hlist_head list; + bool has_itree; + spinlock_t lock; + long unsigned int invalidate_seq; + long unsigned int active_invalidate_ranges; + struct rb_root_cached itree; + wait_queue_head_t wq; + struct hlist_head deferred_list; +}; + +struct mmu_interval_notifier; + +struct mmu_interval_notifier_ops { + bool (*invalidate)(struct mmu_interval_notifier *, const struct mmu_notifier_range *, long unsigned int); +}; + +struct mmu_interval_notifier { + struct interval_tree_node interval_tree; + const struct mmu_interval_notifier_ops *ops; + struct mm_struct *mm; + struct hlist_node deferred_item; + long unsigned int invalidate_seq; +}; + +struct zbud_pool { + spinlock_t lock; + union { + struct list_head buddied; + struct list_head unbuddied[63]; + }; + u64 pages_nr; +}; + +struct zbud_header { + struct list_head buddy; + unsigned int first_chunks; + unsigned int last_chunks; +}; + +enum buddy { + FIRST = 0, + LAST = 1, +}; + +typedef void (*iomap_punch_t)(struct inode *, loff_t, loff_t, struct iomap *); + +struct iomap_ioend { + struct list_head io_list; + u16 io_type; + u16 io_flags; + struct inode *io_inode; + size_t io_size; + loff_t io_offset; + sector_t io_sector; + struct bio io_bio; +}; + +struct iomap_writepage_ctx; + +struct iomap_writeback_ops { + int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t, unsigned int); + int (*prepare_ioend)(struct iomap_ioend *, int); + void (*discard_folio)(struct folio *, loff_t); +}; + +struct iomap_writepage_ctx { + struct iomap iomap; + struct iomap_ioend *ioend; + const struct iomap_writeback_ops *ops; + u32 nr_folios; +}; + +struct iomap_folio_state { + spinlock_t state_lock; + unsigned int read_bytes_pending; + atomic_t write_bytes_pending; + long unsigned int state[0]; +}; + +struct iomap_readpage_ctx { + struct folio *cur_folio; + bool cur_folio_in_bio; + struct bio *bio; + struct readahead_control *rac; +}; + +struct migrate_struct { + ext4_lblk_t first_block; + ext4_lblk_t last_block; + ext4_lblk_t curr_block; + ext4_fsblk_t first_pblock; + ext4_fsblk_t last_pblock; +}; + +struct fat_boot_fsinfo { + __le32 signature1; + __le32 reserved1[120]; + __le32 signature2; + __le32 free_clusters; + __le32 next_cluster; + __le32 reserved2[4]; +}; + +struct fat_bios_param_block { + u16 fat_sector_size; + u8 fat_sec_per_clus; + u16 fat_reserved; + u8 fat_fats; + u16 fat_dir_entries; + u16 fat_sectors; + u16 fat_fat_length; + u32 fat_total_sect; + u8 fat16_state; + u32 fat16_vol_id; + u32 fat32_length; + u32 fat32_root_cluster; + u16 fat32_info_sector; + u8 fat32_state; + u32 fat32_vol_id; +}; + +struct fat_floppy_defaults { + unsigned int nr_sectors; + unsigned int sec_per_clus; + unsigned int dir_entries; + unsigned int media; + unsigned int fat_length; +}; + +enum { + Opt_check___2 = 0, + Opt_uid___4 = 1, + Opt_gid___4 = 2, + Opt_umask = 3, + Opt_dmask = 4, + Opt_fmask = 5, + Opt_allow_utime = 6, + Opt_codepage = 7, + Opt_usefree = 8, + Opt_nocase = 9, + Opt_quiet = 10, + Opt_showexec = 11, + Opt_debug___2 = 12, + Opt_immutable = 13, + Opt_dots = 14, + Opt_dotsOK = 15, + Opt_charset = 16, + Opt_shortname = 17, + Opt_utf8___2 = 18, + Opt_utf8_bool = 19, + Opt_uni_xl = 20, + Opt_uni_xl_bool = 21, + Opt_nonumtail = 22, + Opt_nonumtail_bool = 23, + Opt_obsolete = 24, + Opt_flush = 25, + Opt_tz = 26, + Opt_rodir = 27, + Opt_errors___2 = 28, + Opt_discard___3 = 29, + Opt_nfs = 30, + Opt_nfs_enum = 31, + Opt_time_offset = 32, + Opt_dos1xfloppy = 33, +}; + +struct sem_undo_list { + refcount_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + +struct sem; + +struct sem_queue; + +struct sem_undo; + +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + short unsigned int sem_nsems; +}; + +struct sem { + int semval; + struct pid *sempid; + spinlock_t lock; + struct list_head pending_alter; + struct list_head pending_const; + time64_t sem_otime; +}; + +struct sembuf; + +struct sem_queue { + struct list_head list; + struct task_struct *sleeper; + struct sem_undo *undo; + struct pid *pid; + int status; + struct sembuf *sops; + struct sembuf *blocking; + int nsops; + bool alter; + bool dupsop; +}; + +struct sem_undo { + struct list_head list_proc; + struct callback_head rcu; + struct sem_undo_list *ulp; + struct list_head list_id; + int semid; + short int semadj[0]; +}; + +struct semid64_ds { + struct ipc64_perm sem_perm; + long int sem_otime; + long int sem_ctime; + long unsigned int sem_nsems; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct sembuf { + short unsigned int sem_num; + short int sem_op; + short int sem_flg; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +struct sem_array { + struct kern_ipc_perm sem_perm; + time64_t sem_ctime; + struct list_head pending_alter; + struct list_head pending_const; + struct list_head list_id; + int sem_nsems; + int complex_count; + unsigned int use_global_lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sem sems[0]; +}; + +struct hmac_ctx { + struct crypto_shash *hash; + u8 pads[0]; +}; + +struct blk_ia_range_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_independent_access_range *, char *); +}; + +struct bd_holder_disk { + struct list_head list; + struct kobject *holder_dir; + int refcnt; +}; + +struct io_splice { + struct file *file_out; + loff_t off_out; + loff_t off_in; + u64 len; + int splice_fd_in; + unsigned int flags; + struct io_rsrc_node *rsrc_node; +}; + +struct wrapper { + cmp_func_t cmp; + swap_func_t swap; +}; + +struct iov_iter_state { + size_t iov_offset; + size_t count; + long unsigned int nr_segs; +}; + +typedef s32 compat_ssize_t; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +struct device_attach_data { + struct device *dev; + bool check_async; + bool want_async; + bool have_async; +}; + +struct ata_internal { + struct scsi_transport_template t; + struct device_attribute private_port_attrs[3]; + struct device_attribute private_link_attrs[3]; + struct device_attribute private_dev_attrs[9]; + struct transport_container link_attr_cont; + struct transport_container dev_attr_cont; + struct device_attribute *link_attrs[4]; + struct device_attribute *port_attrs[4]; + struct device_attribute *dev_attrs[10]; +}; + +struct ata_show_ering_arg { + char *buf; + int written; +}; + +struct e1000_opt_list { + int i; + char *str; +}; + +struct e1000_option { + enum { + enable_option = 0, + range_option = 1, + list_option = 2, + } type; + const char *name; + const char *err; + int def; + union { + struct { + int min; + int max; + } r; + struct { + int nr; + struct e1000_opt_list *p; + } l; + } arg; +}; + +struct trace_event_raw_smbus_write { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_read { + struct trace_entry ent; + int adapter_nr; + __u16 flags; + __u16 addr; + __u8 command; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_reply { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_result { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 read_write; + __u8 command; + __s16 res; + __u32 protocol; + char __data[0]; +}; + +struct trace_event_data_offsets_smbus_write {}; + +struct trace_event_data_offsets_smbus_read {}; + +struct trace_event_data_offsets_smbus_reply {}; + +struct trace_event_data_offsets_smbus_result {}; + +typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *); + +typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int); + +typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *, int); + +typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, int); + +struct thermal_hwmon_device { + char type[20]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; + struct thermal_hwmon_attr temp_crit; +}; + +enum { + ETHTOOL_A_TS_STAT_UNSPEC = 0, + ETHTOOL_A_TS_STAT_TX_PKTS = 1, + ETHTOOL_A_TS_STAT_TX_LOST = 2, + ETHTOOL_A_TS_STAT_TX_ERR = 3, + __ETHTOOL_A_TS_STAT_CNT = 4, + ETHTOOL_A_TS_STAT_MAX = 3, +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct kernel_ethtool_ts_info ts_info; + struct ethtool_ts_stats stats; +}; + +enum nfnetlink_groups { + NFNLGRP_NONE = 0, + NFNLGRP_CONNTRACK_NEW = 1, + NFNLGRP_CONNTRACK_UPDATE = 2, + NFNLGRP_CONNTRACK_DESTROY = 3, + NFNLGRP_CONNTRACK_EXP_NEW = 4, + NFNLGRP_CONNTRACK_EXP_UPDATE = 5, + NFNLGRP_CONNTRACK_EXP_DESTROY = 6, + NFNLGRP_NFTABLES = 7, + NFNLGRP_ACCT_QUOTA = 8, + NFNLGRP_NFTRACE = 9, + __NFNLGRP_MAX = 10, +}; + +enum nfnl_batch_attributes { + NFNL_BATCH_UNSPEC = 0, + NFNL_BATCH_GENID = 1, + __NFNL_BATCH_MAX = 2, +}; + +struct nfnl_net { + struct sock *nfnl; +}; + +struct nfnl_err { + struct list_head head; + struct nlmsghdr *nlh; + int err; + struct netlink_ext_ack extack; +}; + +enum { + NFNL_BATCH_FAILURE = 1, + NFNL_BATCH_DONE = 2, + NFNL_BATCH_REPLAY = 4, +}; + +enum nf_tables_msg_types { + NFT_MSG_NEWTABLE = 0, + NFT_MSG_GETTABLE = 1, + NFT_MSG_DELTABLE = 2, + NFT_MSG_NEWCHAIN = 3, + NFT_MSG_GETCHAIN = 4, + NFT_MSG_DELCHAIN = 5, + NFT_MSG_NEWRULE = 6, + NFT_MSG_GETRULE = 7, + NFT_MSG_DELRULE = 8, + NFT_MSG_NEWSET = 9, + NFT_MSG_GETSET = 10, + NFT_MSG_DELSET = 11, + NFT_MSG_NEWSETELEM = 12, + NFT_MSG_GETSETELEM = 13, + NFT_MSG_DELSETELEM = 14, + NFT_MSG_NEWGEN = 15, + NFT_MSG_GETGEN = 16, + NFT_MSG_TRACE = 17, + NFT_MSG_NEWOBJ = 18, + NFT_MSG_GETOBJ = 19, + NFT_MSG_DELOBJ = 20, + NFT_MSG_GETOBJ_RESET = 21, + NFT_MSG_NEWFLOWTABLE = 22, + NFT_MSG_GETFLOWTABLE = 23, + NFT_MSG_DELFLOWTABLE = 24, + NFT_MSG_GETRULE_RESET = 25, + NFT_MSG_DESTROYTABLE = 26, + NFT_MSG_DESTROYCHAIN = 27, + NFT_MSG_DESTROYRULE = 28, + NFT_MSG_DESTROYSET = 29, + NFT_MSG_DESTROYSETELEM = 30, + NFT_MSG_DESTROYOBJ = 31, + NFT_MSG_DESTROYFLOWTABLE = 32, + NFT_MSG_GETSETELEM_RESET = 33, + NFT_MSG_MAX = 34, +}; + +enum nft_trace_attributes { + NFTA_TRACE_UNSPEC = 0, + NFTA_TRACE_TABLE = 1, + NFTA_TRACE_CHAIN = 2, + NFTA_TRACE_RULE_HANDLE = 3, + NFTA_TRACE_TYPE = 4, + NFTA_TRACE_VERDICT = 5, + NFTA_TRACE_ID = 6, + NFTA_TRACE_LL_HEADER = 7, + NFTA_TRACE_NETWORK_HEADER = 8, + NFTA_TRACE_TRANSPORT_HEADER = 9, + NFTA_TRACE_IIF = 10, + NFTA_TRACE_IIFTYPE = 11, + NFTA_TRACE_OIF = 12, + NFTA_TRACE_OIFTYPE = 13, + NFTA_TRACE_MARK = 14, + NFTA_TRACE_NFPROTO = 15, + NFTA_TRACE_POLICY = 16, + NFTA_TRACE_PAD = 17, + __NFTA_TRACE_MAX = 18, +}; + +struct nft_rule_dp_last { + struct nft_rule_dp end; + struct callback_head h; + struct nft_rule_blob *blob; + const struct nft_chain *chain; +}; + +struct ipq { + struct inet_frag_queue q; + u8 ecn; + u16 max_df_size; + int iif; + unsigned int rid; + struct inet_peer *peer; +}; + +struct icmp_ext_hdr { + __u8 version: 4; + __u8 reserved1: 4; + __u8 reserved2; + __sum16 checksum; +}; + +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; + +struct icmp_ext_echo_ctype3_hdr { + __be16 afi; + __u8 addrlen; + __u8 reserved; +}; + +struct icmp_ext_echo_iio { + struct icmp_extobj_hdr extobj_hdr; + union { + char name[16]; + __be32 ifindex; + struct { + struct icmp_ext_echo_ctype3_hdr ctype3_hdr; + union { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + } ip_addr; + } addr; + } ident; +}; + +struct trace_event_raw_icmp_send { + struct trace_entry ent; + const void *skbaddr; + int type; + int code; + __u8 saddr[4]; + __u8 daddr[4]; + __u16 sport; + __u16 dport; + short unsigned int ulen; + char __data[0]; +}; + +struct trace_event_data_offsets_icmp_send {}; + +typedef void (*btf_trace_icmp_send)(void *, const struct sk_buff *, int, int); + +struct icmp_bxm { + struct sk_buff *skb; + int offset; + int data_len; + struct { + struct icmphdr icmph; + __be32 times[3]; + } data; + int head_len; + struct ip_options_data replyopts; +}; + +struct icmp_control { + enum skb_drop_reason (*handler)(struct sk_buff *); + short int error; +}; + +struct ipv6_bpf_stub { + int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); + struct sock * (*udp6_lib_lookup)(const struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); + int (*ipv6_setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*ipv6_getsockopt)(struct sock *, int, int, sockptr_t, sockptr_t); + int (*ipv6_dev_get_saddr)(struct net *, const struct net_device *, const struct in6_addr *, unsigned int, struct in6_addr *); +}; + +enum tunnel_encap_types { + TUNNEL_ENCAP_NONE = 0, + TUNNEL_ENCAP_FOU = 1, + TUNNEL_ENCAP_GUE = 2, + TUNNEL_ENCAP_MPLS = 3, +}; + +struct ip_tunnel_prl { + __be32 addr; + __u16 flags; + __u16 __reserved; + __u32 datalen; + __u32 __reserved2; +}; + +struct gro_cell; + +struct gro_cells { + struct gro_cell *cells; +}; + +struct ip_tunnel_prl_entry { + struct ip_tunnel_prl_entry *next; + __be32 addr; + u16 flags; + struct callback_head callback_head; +}; + +struct ip_tunnel { + struct ip_tunnel *next; + struct hlist_node hash_node; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net *net; + long unsigned int err_time; + int err_count; + u32 i_seqno; + atomic_t o_seqno; + int tun_hlen; + u32 index; + u8 erspan_ver; + u8 dir; + u16 hwid; + struct dst_cache dst_cache; + struct ip_tunnel_parm_kern parms; + int mlink; + int encap_hlen; + int hlen; + struct ip_tunnel_encap encap; + struct ip_tunnel_prl_entry *prl; + unsigned int prl_count; + unsigned int ip_tnl_net_id; + struct gro_cells gro_cells; + __u32 fwmark; + bool collect_md; + bool ignore_df; +}; + +struct tnl_ptk_info { + long unsigned int flags[1]; + __be16 proto; + __be32 key; + __be32 seq; + int hdr_len; +}; + +struct sit_net { + struct ip_tunnel *tunnels_r_l[16]; + struct ip_tunnel *tunnels_r[16]; + struct ip_tunnel *tunnels_l[16]; + struct ip_tunnel *tunnels_wc[1]; + struct ip_tunnel **tunnels[4]; + struct net_device *fb_tunnel_dev; +}; + +union vdso_data_store { + struct vdso_data data[2]; + u8 page[4096]; +}; + +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET = 0, + VVAR_TIMENS_PAGE_OFFSET = 1, + VVAR_NR_PAGES = 2, +}; + +enum vdso_abi { + VDSO_ABI_AA64 = 0, + VDSO_ABI_AA32 = 1, +}; + +struct vdso_abi_info { + const char *name; + const char *vdso_code_start; + const char *vdso_code_end; + long unsigned int vdso_pages; + struct vm_special_mapping *cm; +}; + +struct vgic_state_iter { + int nr_cpus; + int nr_spis; + int nr_lpis; + int dist_id; + int vcpu_id; + long unsigned int intid; + int lpi_idx; +}; + +struct async_entry { + struct list_head domain_list; + struct list_head global_list; + struct work_struct work; + async_cookie_t cookie; + async_func_t func; + void *data; + struct async_domain *domain; +}; + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +struct ce_unbind { + struct clock_event_device *ce; + int res; +}; + +struct trace_event_raw_cgroup_root { + struct trace_entry ent; + int root; + u16 ss_mask; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_cgroup { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + char __data[0]; +}; + +struct trace_event_raw_cgroup_migrate { + struct trace_entry ent; + int dst_root; + int dst_level; + u64 dst_id; + int pid; + u32 __data_loc_dst_path; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_cgroup_event { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + int val; + char __data[0]; +}; + +struct trace_event_raw_cgroup_rstat { + struct trace_entry ent; + int root; + int level; + u64 id; + int cpu; + bool contended; + char __data[0]; +}; + +struct trace_event_data_offsets_cgroup_root { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cgroup { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_migrate { + u32 dst_path; + const void *dst_path_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_cgroup_event { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_rstat {}; + +typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_rstat_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock_fastpath)(void *, struct cgroup *, int, bool); + +enum cgroup_opt_features { + OPT_FEATURE_PRESSURE = 0, + OPT_FEATURE_COUNT = 1, +}; + +enum cgroup2_param { + Opt_nsdelegate = 0, + Opt_favordynmods___2 = 1, + Opt_memory_localevents = 2, + Opt_memory_recursiveprot = 3, + Opt_memory_hugetlb_accounting = 4, + Opt_pids_localevents = 5, + nr__cgroup2_params = 6, +}; + +enum bpf_lru_list_type { + BPF_LRU_LIST_T_ACTIVE = 0, + BPF_LRU_LIST_T_INACTIVE = 1, + BPF_LRU_LIST_T_FREE = 2, + BPF_LRU_LOCAL_LIST_T_FREE = 3, + BPF_LRU_LOCAL_LIST_T_PENDING = 4, +}; + +struct bpf_lru_node { + struct list_head list; + u16 cpu; + u8 type; + u8 ref; +}; + +struct bpf_lru_list { + struct list_head lists[3]; + unsigned int counts[2]; + struct list_head *next_inactive_rotation; + raw_spinlock_t lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_lru_locallist { + struct list_head lists[2]; + u16 next_steal; + raw_spinlock_t lock; +}; + +struct bpf_common_lru { + struct bpf_lru_list lru_list; + struct bpf_lru_locallist *local_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); + +struct bpf_lru { + union { + struct bpf_common_lru common_lru; + struct bpf_lru_list *percpu_lru; + }; + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; + unsigned int nr_scans; + bool percpu; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_cgroup_storage_map { + struct bpf_map map; + spinlock_t lock; + struct rb_root root; + struct list_head list; +}; + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0: 1; + __u64 cap_bit0_is_deprecated: 1; + __u64 cap_user_rdpmc: 1; + __u64 cap_user_time: 1; + __u64 cap_user_time_zero: 1; + __u64 cap_user_time_short: 1; + __u64 cap_____res: 58; + }; + }; + __u16 pmc_width; + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + __u64 time_zero; + __u32 size; + __u32 __reserved_1; + __u64 time_cycles; + __u64 time_mask; + __u8 __reserved[928]; + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_AUX_OUTPUT_HW_ID = 21, + PERF_RECORD_MAX = 22, +}; + +struct perf_buffer { + refcount_t refcount; + struct callback_head callback_head; + int nr_pages; + int overwrite; + int paused; + atomic_t poll; + local_t head; + unsigned int nest; + local_t events; + local_t wakeup; + local_t lost; + long int watermark; + long int aux_watermark; + spinlock_t event_lock; + struct list_head event_list; + atomic_t mmap_count; + long unsigned int mmap_locked; + struct user_struct *mmap_user; + struct mutex aux_mutex; + long int aux_head; + unsigned int aux_nest; + long int aux_wakeup; + long unsigned int aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + atomic_t aux_mmap_count; + long unsigned int aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + int aux_in_pause_resume; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + __s32 status; + __u32 reserved; +}; + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + +typedef long int intptr_t; + +struct genradix_root; + +struct __genradix { + struct genradix_root *root; +}; + +struct genradix_node { + union { + struct genradix_node *children[64]; + u8 data[512]; + }; +}; + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +enum resctrl_conf_type { + CDP_NONE = 0, + CDP_CODE = 1, + CDP_DATA = 2, +}; + +enum proc_mem_force { + PROC_MEM_FORCE_ALWAYS = 0, + PROC_MEM_FORCE_PTRACE = 1, + PROC_MEM_FORCE_NEVER = 2, +}; + +struct pid_entry { + const char *name; + unsigned int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; + union proc_op op; +}; + +struct limit_names { + const char *name; + const char *unit; +}; + +struct map_files_info { + long unsigned int start; + long unsigned int end; + fmode_t mode; +}; + +struct tgid_iter { + unsigned int tgid; + struct task_struct *task; +}; + +struct getdents_callback { + struct dir_context ctx; + char *name; + u64 ino; + int found; + int sequence; +}; + +enum btrfs_csum_type { + BTRFS_CSUM_TYPE_CRC32 = 0, + BTRFS_CSUM_TYPE_XXHASH = 1, + BTRFS_CSUM_TYPE_SHA256 = 2, + BTRFS_CSUM_TYPE_BLAKE2 = 3, +}; + +enum btrfs_mod_log_op { + BTRFS_MOD_LOG_KEY_REPLACE = 0, + BTRFS_MOD_LOG_KEY_ADD = 1, + BTRFS_MOD_LOG_KEY_REMOVE = 2, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING = 3, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING = 4, + BTRFS_MOD_LOG_MOVE_KEYS = 5, + BTRFS_MOD_LOG_ROOT_REPLACE = 6, +}; + +struct btrfs_csums { + u16 size; + const char name[10]; + const char driver[12]; +}; + +struct btrfs_backref_shared_cache_entry { + u64 bytenr; + u64 gen; + bool is_shared; +}; + +struct btrfs_backref_share_check_ctx { + struct ulist refs; + u64 curr_leaf_bytenr; + u64 prev_leaf_bytenr; + struct btrfs_backref_shared_cache_entry path_cache_entries[8]; + bool use_path_cache; + struct { + u64 bytenr; + bool is_shared; + } prev_extents_cache[8]; + int prev_extents_cache_slot; +}; + +struct extent_inode_elem { + u64 inum; + u64 offset; + u64 num_bytes; + struct extent_inode_elem *next; +}; + +struct btrfs_seq_list { + struct list_head list; + u64 seq; +}; + +struct preftree { + struct rb_root_cached root; + unsigned int count; +}; + +struct preftrees { + struct preftree direct; + struct preftree indirect; + struct preftree indirect_missing_keys; +}; + +struct share_check { + struct btrfs_backref_share_check_ctx *ctx; + struct btrfs_root *root; + u64 inum; + u64 data_bytenr; + u64 data_extent_gen; + int share_count; + int self_ref_count; + bool have_delayed_delete_refs; +}; + +struct badblocks_context { + sector_t start; + sector_t len; + int ack; +}; + +enum io_uring_register_pbuf_ring_flags { + IOU_PBUF_RING_MMAP = 1, + IOU_PBUF_RING_INC = 2, +}; + +struct io_uring_buf_reg { + __u64 ring_addr; + __u32 ring_entries; + __u16 bgid; + __u16 flags; + __u64 resv[3]; +}; + +struct io_uring_buf_status { + __u32 buf_group; + __u32 head; + __u32 resv[8]; +}; + +struct io_provide_buf { + struct file *file; + __u64 addr; + __u32 len; + __u32 bgid; + __u32 nbufs; + __u16 bid; +}; + +enum { + IORING_MEM_REGION_TYPE_USER = 1, +}; + +struct io_uring_region_desc { + __u64 user_addr; + __u64 size; + __u32 flags; + __u32 id; + __u64 mmap_offset; + __u64 __resv[4]; +}; + +typedef struct { + BYTE maxTableLog; + BYTE tableType; + BYTE tableLog; + BYTE reserved; +} DTableDesc; + +typedef struct { + BYTE nbBits; + BYTE byte; +} HUF_DEltX1; + +typedef struct { + U32 rankVal[13]; + U32 rankStart[13]; + U32 statsWksp[218]; + BYTE symbols[256]; + BYTE huffWeight[256]; +} HUF_ReadDTableX1_Workspace; + +typedef struct { + U16 sequence; + BYTE nbBits; + BYTE length; +} HUF_DEltX2; + +typedef struct { + BYTE symbol; +} sortedSymbol_t; + +typedef U32 rankValCol_t[13]; + +typedef struct { + U32 rankVal[156]; + U32 rankStats[13]; + U32 rankStart0[15]; + sortedSymbol_t sortedSymbol[256]; + BYTE weightList[256]; + U32 calleeWksp[218]; +} HUF_ReadDTableX2_Workspace; + +typedef struct { + U32 tableTime; + U32 decode256Time; +} algo_time_t; + +struct ldsem_waiter { + struct list_head list; + struct task_struct *task; +}; + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(const struct class *, const struct class_attribute *, char *); + ssize_t (*store)(const struct class *, const struct class_attribute *, const char *, size_t); +}; + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +struct class_compat { + struct kobject *kobj; +}; + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[0]; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED = 1, + PCE_STATUS_PREPARED = 2, + PCE_STATUS_ENABLED = 3, + PCE_STATUS_ERROR = 4, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; + bool enabled_when_prepared; +}; + +enum scsi_scan_mode { + SCSI_SCAN_INITIAL = 0, + SCSI_SCAN_RESCAN = 1, + SCSI_SCAN_MANUAL = 2, +}; + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + bool of_node_reused; + const char *name; + int id; + const struct resource *res; + unsigned int num_res; + const void *data; + size_t size_data; + u64 dma_mask; + const struct property_entry *properties; +}; + +struct fixed_mdio_bus { + struct mii_bus *mii_bus; + struct list_head phys; +}; + +struct fixed_phy { + int addr; + struct phy_device *phydev; + struct fixed_phy_status status; + bool no_carrier; + int (*link_update)(struct net_device *, struct fixed_phy_status *); + struct list_head node; + struct gpio_desc *link_gpiod; +}; + +typedef u64 acpi_size; + +typedef u64 acpi_io_address; + +typedef u32 acpi_status; + +typedef char *acpi_string; + +typedef void *acpi_handle; + +typedef u32 acpi_object_type; + +union acpi_object { + acpi_object_type type; + struct { + acpi_object_type type; + u64 value; + } integer; + struct { + acpi_object_type type; + u32 length; + char *pointer; + } string; + struct { + acpi_object_type type; + u32 length; + u8 *pointer; + } buffer; + struct { + acpi_object_type type; + u32 count; + union acpi_object *elements; + } package; + struct { + acpi_object_type type; + acpi_object_type actual_type; + acpi_handle handle; + } reference; + struct { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + } processor; + struct { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +struct acpi_buffer { + acpi_size length; + void *pointer; +}; + +enum spd_duplex { + NWAY_10M_HALF = 0, + NWAY_10M_FULL = 1, + NWAY_100M_HALF = 2, + NWAY_100M_FULL = 3, + NWAY_1000M_FULL = 4, + FORCE_10M_HALF = 5, + FORCE_10M_FULL = 6, + FORCE_100M_HALF = 7, + FORCE_100M_FULL = 8, + FORCE_1000M_FULL = 9, + NWAY_2500M_FULL = 10, +}; + +enum rtl_register_content { + _2500bps = 1024, + _1250bps = 512, + _500bps = 256, + _tx_flow = 64, + _rx_flow = 32, + _1000bps = 16, + _100bps = 8, + _10bps = 4, + LINK_STATUS = 2, + FULL_DUP = 1, +}; + +enum rtl8152_flags { + RTL8152_INACCESSIBLE = 0, + RTL8152_SET_RX_MODE = 1, + WORK_ENABLE = 2, + RTL8152_LINK_CHG = 3, + SELECTIVE_SUSPEND = 4, + PHY_RESET = 5, + SCHEDULE_TASKLET = 6, + GREEN_ETHERNET = 7, + RX_EPROTO = 8, + IN_PRE_RESET = 9, + PROBED_WITH_NO_ERRORS = 10, + PROBE_SHOULD_RETRY = 11, +}; + +struct tally_counter { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; +}; + +struct rx_desc { + __le32 opts1; + __le32 opts2; + __le32 opts3; + __le32 opts4; + __le32 opts5; + __le32 opts6; +}; + +struct tx_desc { + __le32 opts1; + __le32 opts2; +}; + +struct r8152; + +struct rx_agg { + struct list_head list; + struct list_head info_list; + struct urb *urb; + struct r8152 *context; + struct page *page; + void *buffer; +}; + +struct tx_agg { + struct list_head list; + struct urb *urb; + struct r8152 *context; + void *buffer; + void *head; + u32 skb_num; + u32 skb_len; +}; + +struct rtl_ops { + void (*init)(struct r8152 *); + int (*enable)(struct r8152 *); + void (*disable)(struct r8152 *); + void (*up)(struct r8152 *); + void (*down)(struct r8152 *); + void (*unload)(struct r8152 *); + int (*eee_get)(struct r8152 *, struct ethtool_keee *); + int (*eee_set)(struct r8152 *, struct ethtool_keee *); + bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); + void (*autosuspend_en)(struct r8152 *, bool); + void (*change_mtu)(struct r8152 *); +}; + +struct ups_info { + u32 r_tune: 1; + u32 _10m_ckdiv: 1; + u32 _250m_ckdiv: 1; + u32 aldps: 1; + u32 lite_mode: 2; + u32 speed_duplex: 4; + u32 eee: 1; + u32 eee_lite: 1; + u32 eee_ckdiv: 1; + u32 eee_plloff_100: 1; + u32 eee_plloff_giga: 1; + u32 eee_cmod_lv: 1; + u32 green: 1; + u32 flow_control: 1; + u32 ctap_short_off: 1; +}; + +struct rtl_fw { + const char *fw_name; + const struct firmware *fw; + char version[32]; + int (*pre_fw)(struct r8152 *); + int (*post_fw)(struct r8152 *); + bool retry; +}; + +struct r8152 { + long unsigned int flags; + struct usb_device *udev; + struct napi_struct napi; + struct usb_interface *intf; + struct net_device *netdev; + struct urb *intr_urb; + struct tx_agg tx_info[4]; + struct list_head rx_info; + struct list_head rx_used; + struct list_head rx_done; + struct list_head tx_free; + struct sk_buff_head tx_queue; + struct sk_buff_head rx_queue; + spinlock_t rx_lock; + spinlock_t tx_lock; + struct delayed_work schedule; + struct delayed_work hw_phy_work; + struct mii_if_info mii; + struct mutex control; + struct notifier_block pm_notifier; + struct tasklet_struct tx_tl; + struct rtl_ops rtl_ops; + struct ups_info ups_info; + struct rtl_fw rtl_fw; + atomic_t rx_count; + bool eee_en; + int intr_interval; + u32 saved_wolopts; + u32 msg_enable; + u32 tx_qlen; + u32 coalesce; + u32 advertising; + u32 rx_buf_sz; + u32 rx_copybreak; + u32 rx_pending; + u32 fc_pause_on; + u32 fc_pause_off; + unsigned int pipe_in; + unsigned int pipe_out; + unsigned int pipe_intr; + unsigned int pipe_ctrl_in; + unsigned int pipe_ctrl_out; + u32 support_2500full: 1; + u32 lenovo_macpassthru: 1; + u32 dell_tb_rx_agg_bug: 1; + u16 ocp_base; + u16 speed; + u16 eee_adv; + u8 *intr_buff; + u8 version; + u8 duplex; + u8 autoneg; + unsigned int reg_access_reset_count; +}; + +struct fw_block { + __le32 type; + __le32 length; +}; + +struct fw_header { + u8 checksum[32]; + char version[32]; + struct fw_block blocks[0]; +}; + +enum rtl8152_fw_flags { + FW_FLAGS_USB = 0, + FW_FLAGS_PLA = 1, + FW_FLAGS_START = 2, + FW_FLAGS_STOP = 3, + FW_FLAGS_NC = 4, + FW_FLAGS_NC1 = 5, + FW_FLAGS_NC2 = 6, + FW_FLAGS_UC2 = 7, + FW_FLAGS_UC = 8, + FW_FLAGS_SPEED_UP = 9, + FW_FLAGS_VER = 10, +}; + +enum rtl8152_fw_fixup_cmd { + FW_FIXUP_AND = 0, + FW_FIXUP_OR = 1, + FW_FIXUP_NOT = 2, + FW_FIXUP_XOR = 3, +}; + +struct fw_phy_set { + __le16 addr; + __le16 data; +}; + +struct fw_phy_speed_up { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 version; + __le16 fw_reg; + __le16 reserved; + char info[0]; +}; + +struct fw_phy_ver { + struct fw_block blk_hdr; + struct fw_phy_set ver; + __le32 reserved; +}; + +struct fw_phy_fixup { + struct fw_block blk_hdr; + struct fw_phy_set setting; + __le16 bit_cmd; + __le16 reserved; +}; + +struct fw_phy_union { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + struct fw_phy_set pre_set[2]; + struct fw_phy_set bp[8]; + struct fw_phy_set bp_en; + u8 pre_num; + u8 bp_num; + char info[0]; +} __attribute__((packed)); + +struct fw_mac { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 bp_ba_addr; + __le16 bp_ba_value; + __le16 bp_en_addr; + __le16 bp_en_value; + __le16 bp_start; + __le16 bp_num; + __le16 bp[16]; + __le32 reserved; + __le16 fw_ver_reg; + u8 fw_ver_data; + char info[0]; +} __attribute__((packed)); + +struct fw_phy_patch_key { + struct fw_block blk_hdr; + __le16 key_reg; + __le16 key_data; + __le32 reserved; +}; + +struct fw_phy_nc { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 ba_reg; + __le16 ba_data; + __le16 patch_en_addr; + __le16 patch_en_value; + __le16 mode_reg; + __le16 mode_pre; + __le16 mode_post; + __le16 reserved; + __le16 bp_start; + __le16 bp_num; + __le16 bp[4]; + char info[0]; +}; + +enum rtl_fw_type { + RTL_FW_END = 0, + RTL_FW_PLA = 1, + RTL_FW_USB = 2, + RTL_FW_PHY_START = 3, + RTL_FW_PHY_STOP = 4, + RTL_FW_PHY_NC = 5, + RTL_FW_PHY_FIXUP = 6, + RTL_FW_PHY_UNION_NC = 7, + RTL_FW_PHY_UNION_NC1 = 8, + RTL_FW_PHY_UNION_NC2 = 9, + RTL_FW_PHY_UNION_UC2 = 10, + RTL_FW_PHY_UNION_UC = 11, + RTL_FW_PHY_UNION_MISC = 12, + RTL_FW_PHY_SPEED_UP = 13, + RTL_FW_PHY_VER = 14, +}; + +enum rtl_version { + RTL_VER_UNKNOWN = 0, + RTL_VER_01 = 1, + RTL_VER_02 = 2, + RTL_VER_03 = 3, + RTL_VER_04 = 4, + RTL_VER_05 = 5, + RTL_VER_06 = 6, + RTL_VER_07 = 7, + RTL_VER_08 = 8, + RTL_VER_09 = 9, + RTL_TEST_01 = 10, + RTL_VER_10 = 11, + RTL_VER_11 = 12, + RTL_VER_12 = 13, + RTL_VER_13 = 14, + RTL_VER_14 = 15, + RTL_VER_15 = 16, + RTL_VER_MAX = 17, +}; + +enum tx_csum_stat { + TX_CSUM_SUCCESS = 0, + TX_CSUM_TSO = 1, + TX_CSUM_NONE = 2, +}; + +struct r5l_payload_header { + __le16 type; + __le16 flags; +}; + +enum r5l_payload_type { + R5LOG_PAYLOAD_DATA = 0, + R5LOG_PAYLOAD_PARITY = 1, + R5LOG_PAYLOAD_FLUSH = 2, +}; + +struct r5l_payload_data_parity { + struct r5l_payload_header header; + __le32 size; + __le64 location; + __le32 checksum[0]; +}; + +struct r5l_payload_flush { + struct r5l_payload_header header; + __le32 size; + __le64 flush_stripes[0]; +}; + +struct r5l_meta_block { + __le32 magic; + __le32 checksum; + __u8 version; + __u8 __zero_pading_1; + __le16 __zero_pading_2; + __le32 meta_size; + __le64 seq; + __le64 position; + struct r5l_payload_header payloads[0]; +}; + +struct r5l_io_unit { + struct r5l_log *log; + struct page *meta_page; + int meta_offset; + struct bio *current_bio; + atomic_t pending_stripe; + u64 seq; + sector_t log_start; + sector_t log_end; + struct list_head log_sibling; + struct list_head stripe_list; + int state; + bool need_split_bio; + struct bio *split_bio; + unsigned int has_flush: 1; + unsigned int has_fua: 1; + unsigned int has_null_flush: 1; + unsigned int has_flush_payload: 1; + unsigned int io_deferred: 1; + struct bio_list flush_barriers; +}; + +enum r5c_journal_mode { + R5C_JOURNAL_MODE_WRITE_THROUGH = 0, + R5C_JOURNAL_MODE_WRITE_BACK = 1, +}; + +struct r5l_log { + struct md_rdev *rdev; + u32 uuid_checksum; + sector_t device_size; + sector_t max_free_space; + sector_t last_checkpoint; + u64 last_cp_seq; + sector_t log_start; + u64 seq; + sector_t next_checkpoint; + struct mutex io_mutex; + struct r5l_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head running_ios; + struct list_head io_end_ios; + struct list_head flushing_ios; + struct list_head finished_ios; + struct bio flush_bio; + struct list_head no_mem_stripes; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + mempool_t meta_pool; + struct md_thread *reclaim_thread; + long unsigned int reclaim_target; + wait_queue_head_t iounit_wait; + struct list_head no_space_stripes; + spinlock_t no_space_stripes_lock; + bool need_cache_flush; + enum r5c_journal_mode r5c_journal_mode; + struct list_head stripe_in_journal_list; + spinlock_t stripe_in_journal_lock; + atomic_t stripe_in_journal_count; + struct work_struct deferred_io_work; + struct work_struct disable_writeback_work; + spinlock_t tree_lock; + struct xarray big_stripe_tree; +}; + +enum r5l_io_unit_state { + IO_UNIT_RUNNING = 0, + IO_UNIT_IO_START = 1, + IO_UNIT_IO_END = 2, + IO_UNIT_STRIPE_END = 3, +}; + +struct r5l_recovery_ctx { + struct page *meta_page; + sector_t meta_total_blocks; + sector_t pos; + u64 seq; + int data_parity_stripes; + int data_only_stripes; + struct list_head cached_list; + struct page *ra_pool[256]; + struct bio_vec ra_bvec[256]; + sector_t pool_offset; + int total_pages; + int valid_pages; +}; + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +struct percpu_free_defer { + struct callback_head rcu; + void *ptr; +}; + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +enum nft_hook_attributes { + NFTA_HOOK_UNSPEC = 0, + NFTA_HOOK_HOOKNUM = 1, + NFTA_HOOK_PRIORITY = 2, + NFTA_HOOK_DEV = 3, + NFTA_HOOK_DEVS = 4, + __NFTA_HOOK_MAX = 5, +}; + +enum nft_table_flags { + NFT_TABLE_F_DORMANT = 1, + NFT_TABLE_F_OWNER = 2, + NFT_TABLE_F_PERSIST = 4, +}; + +enum nft_table_attributes { + NFTA_TABLE_UNSPEC = 0, + NFTA_TABLE_NAME = 1, + NFTA_TABLE_FLAGS = 2, + NFTA_TABLE_USE = 3, + NFTA_TABLE_HANDLE = 4, + NFTA_TABLE_PAD = 5, + NFTA_TABLE_USERDATA = 6, + NFTA_TABLE_OWNER = 7, + __NFTA_TABLE_MAX = 8, +}; + +enum nft_chain_attributes { + NFTA_CHAIN_UNSPEC = 0, + NFTA_CHAIN_TABLE = 1, + NFTA_CHAIN_HANDLE = 2, + NFTA_CHAIN_NAME = 3, + NFTA_CHAIN_HOOK = 4, + NFTA_CHAIN_POLICY = 5, + NFTA_CHAIN_USE = 6, + NFTA_CHAIN_TYPE = 7, + NFTA_CHAIN_COUNTERS = 8, + NFTA_CHAIN_PAD = 9, + NFTA_CHAIN_FLAGS = 10, + NFTA_CHAIN_ID = 11, + NFTA_CHAIN_USERDATA = 12, + __NFTA_CHAIN_MAX = 13, +}; + +enum nft_set_policies { + NFT_SET_POL_PERFORMANCE = 0, + NFT_SET_POL_MEMORY = 1, +}; + +enum nft_set_desc_attributes { + NFTA_SET_DESC_UNSPEC = 0, + NFTA_SET_DESC_SIZE = 1, + NFTA_SET_DESC_CONCAT = 2, + __NFTA_SET_DESC_MAX = 3, +}; + +enum nft_set_field_attributes { + NFTA_SET_FIELD_UNSPEC = 0, + NFTA_SET_FIELD_LEN = 1, + __NFTA_SET_FIELD_MAX = 2, +}; + +enum nft_set_elem_attributes { + NFTA_SET_ELEM_UNSPEC = 0, + NFTA_SET_ELEM_KEY = 1, + NFTA_SET_ELEM_DATA = 2, + NFTA_SET_ELEM_FLAGS = 3, + NFTA_SET_ELEM_TIMEOUT = 4, + NFTA_SET_ELEM_EXPIRATION = 5, + NFTA_SET_ELEM_USERDATA = 6, + NFTA_SET_ELEM_EXPR = 7, + NFTA_SET_ELEM_PAD = 8, + NFTA_SET_ELEM_OBJREF = 9, + NFTA_SET_ELEM_KEY_END = 10, + NFTA_SET_ELEM_EXPRESSIONS = 11, + __NFTA_SET_ELEM_MAX = 12, +}; + +enum nft_set_elem_list_attributes { + NFTA_SET_ELEM_LIST_UNSPEC = 0, + NFTA_SET_ELEM_LIST_TABLE = 1, + NFTA_SET_ELEM_LIST_SET = 2, + NFTA_SET_ELEM_LIST_ELEMENTS = 3, + NFTA_SET_ELEM_LIST_SET_ID = 4, + __NFTA_SET_ELEM_LIST_MAX = 5, +}; + +enum nft_data_attributes { + NFTA_DATA_UNSPEC = 0, + NFTA_DATA_VALUE = 1, + NFTA_DATA_VERDICT = 2, + __NFTA_DATA_MAX = 3, +}; + +enum nft_verdict_attributes { + NFTA_VERDICT_UNSPEC = 0, + NFTA_VERDICT_CODE = 1, + NFTA_VERDICT_CHAIN = 2, + NFTA_VERDICT_CHAIN_ID = 3, + __NFTA_VERDICT_MAX = 4, +}; + +enum nft_expr_attributes { + NFTA_EXPR_UNSPEC = 0, + NFTA_EXPR_NAME = 1, + NFTA_EXPR_DATA = 2, + __NFTA_EXPR_MAX = 3, +}; + +enum nft_gen_attributes { + NFTA_GEN_UNSPEC = 0, + NFTA_GEN_ID = 1, + NFTA_GEN_PROC_PID = 2, + NFTA_GEN_PROC_NAME = 3, + __NFTA_GEN_MAX = 4, +}; + +enum nft_object_attributes { + NFTA_OBJ_UNSPEC = 0, + NFTA_OBJ_TABLE = 1, + NFTA_OBJ_NAME = 2, + NFTA_OBJ_TYPE = 3, + NFTA_OBJ_DATA = 4, + NFTA_OBJ_USE = 5, + NFTA_OBJ_HANDLE = 6, + NFTA_OBJ_PAD = 7, + NFTA_OBJ_USERDATA = 8, + __NFTA_OBJ_MAX = 9, +}; + +enum nft_flowtable_flags { + NFT_FLOWTABLE_HW_OFFLOAD = 1, + NFT_FLOWTABLE_COUNTER = 2, + NFT_FLOWTABLE_MASK = 3, +}; + +enum nft_flowtable_attributes { + NFTA_FLOWTABLE_UNSPEC = 0, + NFTA_FLOWTABLE_TABLE = 1, + NFTA_FLOWTABLE_NAME = 2, + NFTA_FLOWTABLE_HOOK = 3, + NFTA_FLOWTABLE_USE = 4, + NFTA_FLOWTABLE_HANDLE = 5, + NFTA_FLOWTABLE_PAD = 6, + NFTA_FLOWTABLE_FLAGS = 7, + __NFTA_FLOWTABLE_MAX = 8, +}; + +enum nft_flowtable_hook_attributes { + NFTA_FLOWTABLE_HOOK_UNSPEC = 0, + NFTA_FLOWTABLE_HOOK_NUM = 1, + NFTA_FLOWTABLE_HOOK_PRIORITY = 2, + NFTA_FLOWTABLE_HOOK_DEVS = 3, + __NFTA_FLOWTABLE_HOOK_MAX = 4, +}; + +enum nft_devices_attributes { + NFTA_DEVICE_UNSPEC = 0, + NFTA_DEVICE_NAME = 1, + __NFTA_DEVICE_MAX = 2, +}; + +enum nft_data_desc_flags { + NFT_DATA_DESC_SETELEM = 1, +}; + +struct nft_userdata { + u8 len; + unsigned char data[0]; +}; + +struct nft_rule { + struct list_head list; + u64 handle: 42; + u64 genmask: 2; + u64 dlen: 12; + u64 udata: 1; + unsigned char data[0]; +}; + +struct nft_hook { + struct list_head list; + struct nf_hook_ops ops; + struct callback_head rcu; +}; + +struct nft_flowtable { + struct list_head list; + struct nft_table *table; + char *name; + int hooknum; + int ops_len; + u32 genmask: 2; + u32 use; + u64 handle; + long: 64; + struct list_head hook_list; + struct nf_flowtable data; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct nft_trans { + struct list_head list; + struct net *net; + struct nft_table *table; + int msg_type; + u32 seq; + u16 flags; + u8 report: 1; + u8 put_net: 1; +}; + +struct nft_trans_binding { + struct nft_trans nft_trans; + struct list_head binding_list; +}; + +struct nft_trans_rule { + struct nft_trans nft_trans; + struct nft_rule *rule; + struct nft_chain *chain; + struct nft_flow_rule *flow; + u32 rule_id; + bool bound; +}; + +struct nft_trans_set { + struct nft_trans_binding nft_trans_binding; + struct list_head list_trans_newset; + struct nft_set *set; + u32 set_id; + u32 gc_int; + u64 timeout; + bool update; + bool bound; + u32 size; +}; + +struct nft_trans_chain { + struct nft_trans_binding nft_trans_binding; + struct nft_chain *chain; + char *name; + struct nft_stats *stats; + u8 policy; + bool update; + bool bound; + u32 chain_id; + struct nft_base_chain *basechain; + struct list_head hook_list; +}; + +struct nft_trans_table { + struct nft_trans nft_trans; + bool update; +}; + +enum nft_trans_elem_flags { + NFT_TRANS_UPD_TIMEOUT = 1, + NFT_TRANS_UPD_EXPIRATION = 2, +}; + +struct nft_elem_update { + u64 timeout; + u64 expiration; + u8 flags; +}; + +struct nft_trans_one_elem { + struct nft_elem_priv *priv; + struct nft_elem_update *update; +}; + +struct nft_trans_elem { + struct nft_trans nft_trans; + struct nft_set *set; + bool bound; + unsigned int nelems; + struct nft_trans_one_elem elems[0]; +}; + +struct nft_trans_obj { + struct nft_trans nft_trans; + struct nft_object *obj; + struct nft_object *newobj; + bool update; +}; + +struct nft_trans_flowtable { + struct nft_trans nft_trans; + struct nft_flowtable *flowtable; + struct list_head hook_list; + u32 flags; + bool update; +}; + +enum { + NFT_VALIDATE_SKIP = 0, + NFT_VALIDATE_NEED = 1, + NFT_VALIDATE_DO = 2, +}; + +struct nft_audit_data { + struct nft_table *table; + int entries; + int op; + struct list_head list; +}; + +struct nft_set_elem_catchall { + struct list_head list; + struct callback_head rcu; + struct nft_elem_priv *elem; +}; + +struct nft_module_request { + struct list_head list; + char module[56]; + bool done; +}; + +struct nftnl_skb_parms { + bool report; +}; + +struct nft_chain_hook { + u32 num; + s32 priority; + const struct nft_chain_type *type; + struct list_head list; +}; + +struct nft_expr_info { + const struct nft_expr_ops *ops; + const struct nlattr *attr; + struct nlattr *tb[17]; +}; + +struct nft_rule_dump_ctx { + unsigned int s_idx; + char *table; + char *chain; + bool reset; +}; + +struct nft_set_dump_args { + const struct netlink_callback *cb; + struct nft_set_iter iter; + struct sk_buff *skb; + bool reset; +}; + +struct nft_set_dump_ctx { + const struct nft_set *set; + struct nft_ctx ctx; + bool reset; +}; + +struct nft_obj_dump_ctx { + unsigned int s_idx; + char *table; + u32 type; + bool reset; +}; + +struct nft_flowtable_hook { + u32 num; + int priority; + struct list_head list; +}; + +struct nft_flowtable_filter { + char *table; +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; + struct fib_nh *fib_nh; +}; + +struct udp_dev_scratch { + u32 _tsize_state; + u16 len; + bool is_linear; + bool csum_unnecessary; +}; + +struct udp_iter_state { + struct seq_net_private p; + int bucket; +}; + +typedef __u32 Elf32_Addr; + +typedef __u16 Elf32_Half; + +typedef __u32 Elf32_Off; + +struct elf32_hdr { + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +}; + +typedef struct elf32_hdr Elf32_Ehdr; + +struct elf32_phdr { + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +}; + +typedef struct elf32_phdr Elf32_Phdr; + +typedef struct elf64_phdr Elf64_Phdr; + +typedef struct elf32_note Elf32_Nhdr; + +struct freader { + void *buf; + u32 buf_sz; + int err; + union { + struct { + struct file *file; + struct folio *folio; + void *addr; + loff_t folio_off; + bool may_fault; + }; + struct { + const char *data; + u64 data_sz; + }; + }; +}; + +struct step_hook { + struct list_head node; + int (*fn)(struct pt_regs *, long unsigned int); +}; + +enum execmem_range_flags { + EXECMEM_KASAN_SHADOW = 1, + EXECMEM_ROX_CACHE = 2, +}; + +struct execmem_range { + long unsigned int start; + long unsigned int end; + long unsigned int fallback_start; + long unsigned int fallback_end; + pgprot_t pgprot; + unsigned int alignment; + enum execmem_range_flags flags; +}; + +struct execmem_info { + struct execmem_range ranges[5]; +}; + +struct tlb_inv_context___2 { + struct kvm_s2_mmu *mmu; + u64 tcr; + u64 sctlr; +}; + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE = 1, + HP_THREAD_PARKED = 2, +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT = 0, + TICK_BROADCAST_ENTER = 1, +}; + +struct trace_event_raw_rpm_internal { + struct trace_entry ent; + u32 __data_loc_name; + int flags; + int usage_count; + int disable_depth; + int runtime_auto; + int request_pending; + int irq_safe; + int child_count; + char __data[0]; +}; + +struct trace_event_raw_rpm_return_int { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int ip; + int ret; + char __data[0]; +}; + +struct trace_event_raw_rpm_status { + struct trace_entry ent; + u32 __data_loc_name; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_rpm_internal { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_return_int { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_status { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_return_int)(void *, struct device *, long unsigned int, int); + +typedef void (*btf_trace_rpm_status)(void *, struct device *, enum rpm_status); + +struct range_node { + struct rb_node rn_rbnode; + struct rb_node rb_range_size; + u32 rn_start; + u32 rn_last; + u32 __rn_subtree_last; +}; + +struct bpf_iter_kmem_cache { + __u64 __opaque[1]; +}; + +struct bpf_iter_kmem_cache_kern { + struct kmem_cache *pos; +}; + +struct bpf_iter__kmem_cache { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kmem_cache *s; + }; +}; + +union kmem_cache_iter_priv { + struct bpf_iter_kmem_cache it; + struct bpf_iter_kmem_cache_kern kit; +}; + +struct kmalloc_info_struct { + const char *name[4]; + unsigned int size; +}; + +struct trace_event_raw_kmem_cache_alloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + bool accounted; + char __data[0]; +}; + +struct trace_event_raw_kmalloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + char __data[0]; +}; + +struct trace_event_raw_kfree { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + char __data[0]; +}; + +struct trace_event_raw_kmem_cache_free { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free_batched { + struct trace_entry ent; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + long unsigned int gfp_flags; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + int percpu_refill; + char __data[0]; +}; + +struct trace_event_raw_mm_page_pcpu_drain { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc_extfrag { + struct trace_entry ent; + long unsigned int pfn; + int alloc_order; + int fallback_order; + int alloc_migratetype; + int fallback_migratetype; + int change_ownership; + char __data[0]; +}; + +struct trace_event_raw_mm_alloc_contig_migrate_range_info { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + long unsigned int nr_migrated; + long unsigned int nr_reclaimed; + long unsigned int nr_mapped; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_rss_stat { + struct trace_entry ent; + unsigned int mm_id; + unsigned int curr; + int member; + long int size; + char __data[0]; +}; + +struct trace_event_data_offsets_kmem_cache_alloc {}; + +struct trace_event_data_offsets_kmalloc {}; + +struct trace_event_data_offsets_kfree {}; + +struct trace_event_data_offsets_kmem_cache_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_mm_page_free {}; + +struct trace_event_data_offsets_mm_page_free_batched {}; + +struct trace_event_data_offsets_mm_page_alloc {}; + +struct trace_event_data_offsets_mm_page {}; + +struct trace_event_data_offsets_mm_page_pcpu_drain {}; + +struct trace_event_data_offsets_mm_page_alloc_extfrag {}; + +struct trace_event_data_offsets_mm_alloc_contig_migrate_range_info {}; + +struct trace_event_data_offsets_rss_stat {}; + +typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, struct kmem_cache *, gfp_t, int); + +typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *); + +typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *, const struct kmem_cache *); + +typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); + +typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); + +typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); + +typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int, int); + +typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); + +typedef void (*btf_trace_mm_alloc_contig_migrate_range_info)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int); + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + short unsigned int d_reclen; + unsigned char d_type; + char d_name[0]; +}; + +struct linux_dirent { + long unsigned int d_ino; + long unsigned int d_off; + short unsigned int d_reclen; + char d_name[0]; +}; + +struct getdents_callback___2 { + struct dir_context ctx; + struct linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct kioctx; + +struct kioctx_table { + struct callback_head rcu; + unsigned int nr; + struct kioctx *table[0]; +}; + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + +struct iocb { + __u64 aio_data; + __kernel_rwf_t aio_rw_flags; + __u32 aio_key; + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + __u64 aio_reserved2; + __u32 aio_flags; + __u32 aio_resfd; +}; + +typedef int kiocb_cancel_fn(struct kiocb *); + +struct aio_ring { + unsigned int id; + unsigned int nr; + unsigned int head; + unsigned int tail; + unsigned int magic; + unsigned int compat_features; + unsigned int incompat_features; + unsigned int header_length; + struct io_event io_events[0]; +}; + +struct kioctx_cpu; + +struct ctx_rq_wait; + +struct kioctx { + struct percpu_ref users; + atomic_t dead; + struct percpu_ref reqs; + long unsigned int user_id; + struct kioctx_cpu *cpu; + unsigned int req_batch; + unsigned int max_reqs; + unsigned int nr_events; + long unsigned int mmap_base; + long unsigned int mmap_size; + struct folio **ring_folios; + long int nr_pages; + struct rcu_work free_rwork; + struct ctx_rq_wait *rq_wait; + long: 64; + long: 64; + long: 64; + struct { + atomic_t reqs_available; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + spinlock_t ctx_lock; + struct list_head active_reqs; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + struct mutex ring_lock; + wait_queue_head_t wait; + long: 64; + }; + struct { + unsigned int tail; + unsigned int completed_events; + spinlock_t completion_lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct folio *internal_folios[8]; + struct file *aio_ring_file; + unsigned int id; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kioctx_cpu { + unsigned int reqs_available; +}; + +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + +struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; + struct cred *creds; +}; + +struct poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool cancelled; + bool work_scheduled; + bool work_need_resched; + struct wait_queue_entry wait; + struct work_struct work; +}; + +struct aio_kiocb { + union { + struct file *ki_filp; + struct kiocb rw; + struct fsync_iocb fsync; + struct poll_iocb poll; + }; + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + struct io_event ki_res; + struct list_head ki_list; + refcount_t ki_refcnt; + struct eventfd_ctx *ki_eventfd; +}; + +struct aio_waiter { + struct wait_queue_entry w; + size_t min_nr; +}; + +struct aio_poll_table { + struct poll_table_struct pt; + struct aio_kiocb *iocb; + bool queued; + int error; +}; + +struct __aio_sigset { + const sigset_t *sigmask; + size_t sigsetsize; +}; + +struct fat_fid { + u32 i_gen; + u32 i_pos_low; + u16 i_pos_hi; + u16 parent_i_pos_hi; + u32 parent_i_pos_low; + u32 parent_i_gen; +}; + +struct fuse_ioctl_in { + uint64_t fh; + uint32_t flags; + uint32_t cmd; + uint64_t arg; + uint32_t in_size; + uint32_t out_size; +}; + +struct fuse_ioctl_iovec { + uint64_t base; + uint64_t len; +}; + +struct fuse_ioctl_out { + int32_t result; + uint32_t flags; + uint32_t in_iovs; + uint32_t out_iovs; +}; + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; + __u8 digest[0]; +}; + +struct workspace___2 { + void *mem; + void *buf; + void *cbuf; + struct list_head list; +}; + +struct btrfs_fiemap_entry { + u64 offset; + u64 phys; + u64 len; + u32 flags; +}; + +struct fiemap_cache { + struct btrfs_fiemap_entry *entries; + int entries_size; + int entries_pos; + u64 next_search_offset; + unsigned int extents_mapped; + u64 offset; + u64 phys; + u64 len; + u32 flags; + bool cached; +}; + +struct pkcs7_parse_context { + struct pkcs7_message *msg; + struct pkcs7_signed_info *sinfo; + struct pkcs7_signed_info **ppsinfo; + struct x509_certificate *certs; + struct x509_certificate **ppcerts; + long unsigned int data; + enum OID last_oid; + unsigned int x509_index; + unsigned int sinfo_index; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_skid; + unsigned int raw_skid_size; + bool expect_skid; +}; + +struct bio_map_data { + bool is_our_pages: 1; + bool is_null_mapped: 1; + struct iov_iter iter; + struct iovec iov[0]; +}; + +struct wait_page_key { + struct folio *folio; + int bit_nr; + int page_match; +}; + +struct io_async_rw { + size_t bytes_done; + struct iov_iter iter; + struct iov_iter_state iter_state; + struct iovec fast_iov; + struct iovec *free_iovec; + int free_iov_nr; + struct wait_page_queue wpq; +}; + +struct io_rw { + struct kiocb kiocb; + u64 addr; + u32 len; + rwf_t flags; +}; + +enum io_wq_type { + IO_WQ_BOUND = 0, + IO_WQ_UNBOUND = 1, +}; + +typedef struct io_wq_work *free_work_fn(struct io_wq_work *); + +typedef void io_wq_work_fn(struct io_wq_work *); + +struct io_wq_acct { + unsigned int nr_workers; + unsigned int max_workers; + int index; + atomic_t nr_running; + raw_spinlock_t lock; + struct io_wq_work_list work_list; + long unsigned int flags; +}; + +struct io_wq { + long unsigned int state; + free_work_fn *free_work; + io_wq_work_fn *do_work; + struct io_wq_hash *hash; + atomic_t worker_refs; + struct completion worker_done; + struct hlist_node cpuhp_node; + struct task_struct *task; + struct io_wq_acct acct[2]; + raw_spinlock_t lock; + struct hlist_nulls_head free_list; + struct list_head all_list; + struct wait_queue_entry wait; + struct io_wq_work *hash_tail[64]; + cpumask_var_t cpu_mask; +}; + +struct io_wq_data { + struct io_wq_hash *hash; + struct task_struct *task; + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +enum { + IO_WORKER_F_UP = 0, + IO_WORKER_F_RUNNING = 1, + IO_WORKER_F_FREE = 2, + IO_WORKER_F_BOUND = 3, +}; + +enum { + IO_WQ_BIT_EXIT = 0, +}; + +enum { + IO_ACCT_STALLED_BIT = 0, +}; + +struct io_worker { + refcount_t ref; + int create_index; + long unsigned int flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wq *wq; + struct io_wq_work *cur_work; + raw_spinlock_t lock; + struct completion ref_done; + long unsigned int create_state; + struct callback_head create_work; + int init_retries; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +enum { + IO_WQ_ACCT_BOUND = 0, + IO_WQ_ACCT_UNBOUND = 1, + IO_WQ_ACCT_NR = 2, +}; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +struct online_data { + unsigned int cpu; + bool online; +}; + +struct ddebug_class_param { + union { + long unsigned int *bits; + unsigned int *lvl; + }; + char flags[8]; + const struct ddebug_class_map *map; +}; + +enum ib_uverbs_write_cmds { + IB_USER_VERBS_CMD_GET_CONTEXT = 0, + IB_USER_VERBS_CMD_QUERY_DEVICE = 1, + IB_USER_VERBS_CMD_QUERY_PORT = 2, + IB_USER_VERBS_CMD_ALLOC_PD = 3, + IB_USER_VERBS_CMD_DEALLOC_PD = 4, + IB_USER_VERBS_CMD_CREATE_AH = 5, + IB_USER_VERBS_CMD_MODIFY_AH = 6, + IB_USER_VERBS_CMD_QUERY_AH = 7, + IB_USER_VERBS_CMD_DESTROY_AH = 8, + IB_USER_VERBS_CMD_REG_MR = 9, + IB_USER_VERBS_CMD_REG_SMR = 10, + IB_USER_VERBS_CMD_REREG_MR = 11, + IB_USER_VERBS_CMD_QUERY_MR = 12, + IB_USER_VERBS_CMD_DEREG_MR = 13, + IB_USER_VERBS_CMD_ALLOC_MW = 14, + IB_USER_VERBS_CMD_BIND_MW = 15, + IB_USER_VERBS_CMD_DEALLOC_MW = 16, + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, + IB_USER_VERBS_CMD_CREATE_CQ = 18, + IB_USER_VERBS_CMD_RESIZE_CQ = 19, + IB_USER_VERBS_CMD_DESTROY_CQ = 20, + IB_USER_VERBS_CMD_POLL_CQ = 21, + IB_USER_VERBS_CMD_PEEK_CQ = 22, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, + IB_USER_VERBS_CMD_CREATE_QP = 24, + IB_USER_VERBS_CMD_QUERY_QP = 25, + IB_USER_VERBS_CMD_MODIFY_QP = 26, + IB_USER_VERBS_CMD_DESTROY_QP = 27, + IB_USER_VERBS_CMD_POST_SEND = 28, + IB_USER_VERBS_CMD_POST_RECV = 29, + IB_USER_VERBS_CMD_ATTACH_MCAST = 30, + IB_USER_VERBS_CMD_DETACH_MCAST = 31, + IB_USER_VERBS_CMD_CREATE_SRQ = 32, + IB_USER_VERBS_CMD_MODIFY_SRQ = 33, + IB_USER_VERBS_CMD_QUERY_SRQ = 34, + IB_USER_VERBS_CMD_DESTROY_SRQ = 35, + IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, + IB_USER_VERBS_CMD_OPEN_XRCD = 37, + IB_USER_VERBS_CMD_CLOSE_XRCD = 38, + IB_USER_VERBS_CMD_CREATE_XSRQ = 39, + IB_USER_VERBS_CMD_OPEN_QP = 40, +}; + +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, + IB_UVERBS_WC_FLUSH = 8, + IB_UVERBS_WC_ATOMIC_WRITE = 9, +}; + +enum ib_uverbs_create_qp_mask { + IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, +}; + +enum ib_uverbs_wr_opcode { + IB_UVERBS_WR_RDMA_WRITE = 0, + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, + IB_UVERBS_WR_SEND = 2, + IB_UVERBS_WR_SEND_WITH_IMM = 3, + IB_UVERBS_WR_RDMA_READ = 4, + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_UVERBS_WR_LOCAL_INV = 7, + IB_UVERBS_WR_BIND_MW = 8, + IB_UVERBS_WR_SEND_WITH_INV = 9, + IB_UVERBS_WR_TSO = 10, + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + IB_UVERBS_WR_FLUSH = 14, + IB_UVERBS_WR_ATOMIC_WRITE = 15, +}; + +enum ib_uverbs_device_cap_flags { + IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1ULL, + IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 2ULL, + IB_UVERBS_DEVICE_BAD_QKEY_CNTR = 4ULL, + IB_UVERBS_DEVICE_RAW_MULTI = 8ULL, + IB_UVERBS_DEVICE_AUTO_PATH_MIG = 16ULL, + IB_UVERBS_DEVICE_CHANGE_PHY_PORT = 32ULL, + IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = 64ULL, + IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = 128ULL, + IB_UVERBS_DEVICE_SHUTDOWN_PORT = 256ULL, + IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = 1024ULL, + IB_UVERBS_DEVICE_SYS_IMAGE_GUID = 2048ULL, + IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = 4096ULL, + IB_UVERBS_DEVICE_SRQ_RESIZE = 8192ULL, + IB_UVERBS_DEVICE_N_NOTIFY_CQ = 16384ULL, + IB_UVERBS_DEVICE_MEM_WINDOW = 131072ULL, + IB_UVERBS_DEVICE_UD_IP_CSUM = 262144ULL, + IB_UVERBS_DEVICE_XRC = 1048576ULL, + IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = 2097152ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = 8388608ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = 16777216ULL, + IB_UVERBS_DEVICE_RC_IP_CSUM = 33554432ULL, + IB_UVERBS_DEVICE_RAW_IP_CSUM = 67108864ULL, + IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = 536870912ULL, + IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 17179869184ULL, + IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 68719476736ULL, + IB_UVERBS_DEVICE_FLUSH_GLOBAL = 274877906944ULL, + IB_UVERBS_DEVICE_FLUSH_PERSISTENT = 549755813888ULL, + IB_UVERBS_DEVICE_ATOMIC_WRITE = 1099511627776ULL, +}; + +enum ib_uverbs_raw_packet_caps { + IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = 1, + IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = 2, + IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = 4, + IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = 8, +}; + +enum ib_uverbs_access_flags { + IB_UVERBS_ACCESS_LOCAL_WRITE = 1, + IB_UVERBS_ACCESS_REMOTE_WRITE = 2, + IB_UVERBS_ACCESS_REMOTE_READ = 4, + IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, + IB_UVERBS_ACCESS_MW_BIND = 16, + IB_UVERBS_ACCESS_ZERO_BASED = 32, + IB_UVERBS_ACCESS_ON_DEMAND = 64, + IB_UVERBS_ACCESS_HUGETLB = 128, + IB_UVERBS_ACCESS_FLUSH_GLOBAL = 256, + IB_UVERBS_ACCESS_FLUSH_PERSISTENT = 512, + IB_UVERBS_ACCESS_RELAXED_ORDERING = 1048576, + IB_UVERBS_ACCESS_OPTIONAL_RANGE = 1072693248, +}; + +enum ib_uverbs_srq_type { + IB_UVERBS_SRQT_BASIC = 0, + IB_UVERBS_SRQT_XRC = 1, + IB_UVERBS_SRQT_TM = 2, +}; + +enum ib_uverbs_wq_type { + IB_UVERBS_WQT_RQ = 0, +}; + +enum ib_uverbs_wq_flags { + IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1, + IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 2, + IB_UVERBS_WQ_FLAGS_DELAY_DROP = 4, + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 8, +}; + +enum ib_uverbs_qp_type { + IB_UVERBS_QPT_RC = 2, + IB_UVERBS_QPT_UC = 3, + IB_UVERBS_QPT_UD = 4, + IB_UVERBS_QPT_RAW_PACKET = 8, + IB_UVERBS_QPT_XRC_INI = 9, + IB_UVERBS_QPT_XRC_TGT = 10, + IB_UVERBS_QPT_DRIVER = 255, +}; + +enum ib_uverbs_qp_create_flags { + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, + IB_UVERBS_QP_CREATE_SCATTER_FCS = 256, + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 512, + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 2048, + IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 4096, +}; + +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB = 0, + IB_UVERBS_GID_TYPE_ROCE_V1 = 1, + IB_UVERBS_GID_TYPE_ROCE_V2 = 2, +}; + +enum ib_poll_context { + IB_POLL_SOFTIRQ = 0, + IB_POLL_WORKQUEUE = 1, + IB_POLL_UNBOUND_WORKQUEUE = 2, + IB_POLL_LAST_POOL_TYPE = 2, + IB_POLL_DIRECT = 3, +}; + +struct ddebug_table { + struct list_head link; + struct list_head maps; + const char *mod_name; + unsigned int num_ddebugs; + struct _ddebug *ddebugs; +}; + +struct ddebug_query { + const char *filename; + const char *module; + const char *function; + const char *format; + const char *class_string; + unsigned int first_lineno; + unsigned int last_lineno; +}; + +struct ddebug_iter { + struct ddebug_table *table; + int idx; +}; + +struct flag_settings { + unsigned int flags; + unsigned int mask; +}; + +struct flagsbuf { + char buf[8]; +}; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +struct clk_lookup_alloc { + struct clk_lookup cl; + char dev_id[24]; + char con_id[16]; +}; + +struct devres_node { + struct list_head entry; + dr_release_t release; + const char *name; + size_t size; +}; + +struct devres { + struct devres_node node; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u8 data[0]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; +}; + +struct action_devres { + void *data; + void (*action)(void *); +}; + +struct pages_devres { + long unsigned int addr; + unsigned int order; +}; + +struct trace_event_raw_devres { + struct trace_entry ent; + u32 __data_loc_devname; + struct device *dev; + const char *op; + void *node; + u32 __data_loc_name; + size_t size; + char __data[0]; +}; + +struct trace_event_data_offsets_devres { + u32 devname; + const void *devname_ptr_; + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_devres_log)(void *, struct device *, const char *, void *, const char *, size_t); + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct quirk_entry { + u16 vid; + u16 pid; + u32 flags; +}; + +struct bulk_cb_wrap { + __le32 Signature; + __u32 Tag; + __le32 DataTransferLength; + __u8 Flags; + __u8 Lun; + __u8 Length; + __u8 CDB[16]; +}; + +struct bulk_cs_wrap { + __le32 Signature; + __u32 Tag; + __le32 Residue; + __u8 Status; +}; + +struct power_supply_hwmon { + struct power_supply *psy; + long unsigned int *props; +}; + +struct hwmon_type_attr_list { + const u32 *attrs; + size_t n_attrs; +}; + +struct pmu_irq_ops { + void (*enable_pmuirq)(unsigned int); + void (*disable_pmuirq)(unsigned int); + void (*free_pmuirq)(unsigned int, int, void *); +}; + +struct fib_notifier_net { + struct list_head fib_notifier_ops; + struct atomic_notifier_head fib_chain; +}; + +struct strp_msg { + int full_len; + int offset; +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +enum nft_meta_keys { + NFT_META_LEN = 0, + NFT_META_PROTOCOL = 1, + NFT_META_PRIORITY = 2, + NFT_META_MARK = 3, + NFT_META_IIF = 4, + NFT_META_OIF = 5, + NFT_META_IIFNAME = 6, + NFT_META_OIFNAME = 7, + NFT_META_IFTYPE = 8, + NFT_META_OIFTYPE = 9, + NFT_META_SKUID = 10, + NFT_META_SKGID = 11, + NFT_META_NFTRACE = 12, + NFT_META_RTCLASSID = 13, + NFT_META_SECMARK = 14, + NFT_META_NFPROTO = 15, + NFT_META_L4PROTO = 16, + NFT_META_BRI_IIFNAME = 17, + NFT_META_BRI_OIFNAME = 18, + NFT_META_PKTTYPE = 19, + NFT_META_CPU = 20, + NFT_META_IIFGROUP = 21, + NFT_META_OIFGROUP = 22, + NFT_META_CGROUP = 23, + NFT_META_PRANDOM = 24, + NFT_META_SECPATH = 25, + NFT_META_IIFKIND = 26, + NFT_META_OIFKIND = 27, + NFT_META_BRI_IIFPVID = 28, + NFT_META_BRI_IIFVPROTO = 29, + NFT_META_TIME_NS = 30, + NFT_META_TIME_DAY = 31, + NFT_META_TIME_HOUR = 32, + NFT_META_SDIF = 33, + NFT_META_SDIFNAME = 34, + NFT_META_BRI_BROUTE = 35, + __NFT_META_IIFTYPE = 36, +}; + +enum nft_meta_attributes { + NFTA_META_UNSPEC = 0, + NFTA_META_DREG = 1, + NFTA_META_KEY = 2, + NFTA_META_SREG = 3, + __NFTA_META_MAX = 4, +}; + +enum { + NFT_PAYLOAD_CTX_INNER_TUN = 1, + NFT_PAYLOAD_CTX_INNER_LL = 2, + NFT_PAYLOAD_CTX_INNER_NH = 4, + NFT_PAYLOAD_CTX_INNER_TH = 8, +}; + +struct nft_inner_tun_ctx { + u16 type; + u16 inner_tunoff; + u16 inner_lloff; + u16 inner_nhoff; + u16 inner_thoff; + __be16 llproto; + u8 l4proto; + u8 flags; +}; + +struct nft_meta { + enum nft_meta_keys key: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +enum { + IFLA_INET_UNSPEC = 0, + IFLA_INET_CONF = 1, + __IFLA_INET_MAX = 2, +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[33]; +}; + +struct bictcp { + u32 cnt; + u32 last_max_cwnd; + u32 last_cwnd; + u32 last_time; + u32 bic_origin_point; + u32 bic_K; + u32 delay_min; + u32 epoch_start; + u32 ack_cnt; + u32 tcp_cwnd; + u16 unused; + u8 sample_cnt; + u8 found; + u32 round_start; + u32 end_seq; + u32 last_ack; + u32 curr_rtt; +}; + +struct nft_ct_frag6_pernet { + struct ctl_table_header *nf_frag_frags_hdr; + struct fqdir *fqdir; +}; + +enum { + IFLA_BR_UNSPEC = 0, + IFLA_BR_FORWARD_DELAY = 1, + IFLA_BR_HELLO_TIME = 2, + IFLA_BR_MAX_AGE = 3, + IFLA_BR_AGEING_TIME = 4, + IFLA_BR_STP_STATE = 5, + IFLA_BR_PRIORITY = 6, + IFLA_BR_VLAN_FILTERING = 7, + IFLA_BR_VLAN_PROTOCOL = 8, + IFLA_BR_GROUP_FWD_MASK = 9, + IFLA_BR_ROOT_ID = 10, + IFLA_BR_BRIDGE_ID = 11, + IFLA_BR_ROOT_PORT = 12, + IFLA_BR_ROOT_PATH_COST = 13, + IFLA_BR_TOPOLOGY_CHANGE = 14, + IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 15, + IFLA_BR_HELLO_TIMER = 16, + IFLA_BR_TCN_TIMER = 17, + IFLA_BR_TOPOLOGY_CHANGE_TIMER = 18, + IFLA_BR_GC_TIMER = 19, + IFLA_BR_GROUP_ADDR = 20, + IFLA_BR_FDB_FLUSH = 21, + IFLA_BR_MCAST_ROUTER = 22, + IFLA_BR_MCAST_SNOOPING = 23, + IFLA_BR_MCAST_QUERY_USE_IFADDR = 24, + IFLA_BR_MCAST_QUERIER = 25, + IFLA_BR_MCAST_HASH_ELASTICITY = 26, + IFLA_BR_MCAST_HASH_MAX = 27, + IFLA_BR_MCAST_LAST_MEMBER_CNT = 28, + IFLA_BR_MCAST_STARTUP_QUERY_CNT = 29, + IFLA_BR_MCAST_LAST_MEMBER_INTVL = 30, + IFLA_BR_MCAST_MEMBERSHIP_INTVL = 31, + IFLA_BR_MCAST_QUERIER_INTVL = 32, + IFLA_BR_MCAST_QUERY_INTVL = 33, + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 34, + IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 35, + IFLA_BR_NF_CALL_IPTABLES = 36, + IFLA_BR_NF_CALL_IP6TABLES = 37, + IFLA_BR_NF_CALL_ARPTABLES = 38, + IFLA_BR_VLAN_DEFAULT_PVID = 39, + IFLA_BR_PAD = 40, + IFLA_BR_VLAN_STATS_ENABLED = 41, + IFLA_BR_MCAST_STATS_ENABLED = 42, + IFLA_BR_MCAST_IGMP_VERSION = 43, + IFLA_BR_MCAST_MLD_VERSION = 44, + IFLA_BR_VLAN_STATS_PER_PORT = 45, + IFLA_BR_MULTI_BOOLOPT = 46, + IFLA_BR_MCAST_QUERIER_STATE = 47, + IFLA_BR_FDB_N_LEARNED = 48, + IFLA_BR_FDB_MAX_LEARNED = 49, + __IFLA_BR_MAX = 50, +}; + +enum { + LINK_XSTATS_TYPE_UNSPEC = 0, + LINK_XSTATS_TYPE_BRIDGE = 1, + LINK_XSTATS_TYPE_BOND = 2, + __LINK_XSTATS_TYPE_MAX = 3, +}; + +struct bridge_vlan_info { + __u16 flags; + __u16 vid; +}; + +struct bridge_vlan_xstats { + __u64 rx_bytes; + __u64 rx_packets; + __u64 tx_bytes; + __u64 tx_packets; + __u16 vid; + __u16 flags; + __u32 pad2; +}; + +enum { + BRIDGE_XSTATS_UNSPEC = 0, + BRIDGE_XSTATS_VLAN = 1, + BRIDGE_XSTATS_MCAST = 2, + BRIDGE_XSTATS_PAD = 3, + BRIDGE_XSTATS_STP = 4, + __BRIDGE_XSTATS_MAX = 5, +}; + +struct psci_operations { + u32 (*get_version)(); + int (*cpu_suspend)(u32, long unsigned int); + int (*cpu_off)(u32); + int (*cpu_on)(long unsigned int, long unsigned int); + int (*migrate)(long unsigned int); + int (*affinity_info)(long unsigned int, long unsigned int); + int (*migrate_info_type)(); +}; + +typedef void (*hcall_t)(struct kvm_cpu_context *); + +struct die_args { + struct pt_regs *regs; + const char *str; + long int err; + int trapnr; + int signr; +}; + +struct trace_event_raw_notifier_info { + struct trace_entry ent; + void *cb; + char __data[0]; +}; + +struct trace_event_data_offsets_notifier_info {}; + +typedef void (*btf_trace_notifier_register)(void *, void *); + +typedef void (*btf_trace_notifier_unregister)(void *, void *); + +typedef void (*btf_trace_notifier_run)(void *, void *); + +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +struct irq_desc_devres { + unsigned int from; + unsigned int cnt; +}; + +enum { + GP_IDLE = 0, + GP_ENTER = 1, + GP_PASSED = 2, + GP_EXIT = 3, + GP_REPLAY = 4, +}; + +struct rcu_gp_oldstate { + long unsigned int rgos_norm; + long unsigned int rgos_exp; +}; + +struct rcu_exp_work { + long unsigned int rew_s; + struct kthread_work rew_work; +}; + +struct rcu_node { + raw_spinlock_t lock; + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + long unsigned int completedqs; + long unsigned int qsmask; + long unsigned int rcu_gp_init_mask; + long unsigned int qsmaskinit; + long unsigned int qsmaskinitnext; + long unsigned int expmask; + long unsigned int expmaskinit; + long unsigned int expmaskinitnext; + struct kthread_worker *exp_kworker; + long unsigned int cbovldmask; + long unsigned int ffmask; + long unsigned int grpmask; + int grplo; + int grphi; + u8 grpnum; + u8 level; + bool wait_blkd_tasks; + struct rcu_node *parent; + struct list_head blkd_tasks; + struct list_head *gp_tasks; + struct list_head *exp_tasks; + struct list_head *boost_tasks; + struct rt_mutex boost_mtx; + long unsigned int boost_time; + struct mutex kthread_mutex; + struct task_struct *boost_kthread_task; + unsigned int boost_kthread_status; + long unsigned int n_boosts; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + raw_spinlock_t fqslock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t exp_lock; + long unsigned int exp_seq_rq; + wait_queue_head_t exp_wq[4]; + struct rcu_exp_work rew; + bool exp_need_flush; + raw_spinlock_t exp_poll_lock; + long unsigned int exp_seq_poll_rq; + struct work_struct exp_poll_wq; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +union rcu_noqs { + struct { + u8 norm; + u8 exp; + } b; + u16 s; +}; + +struct rcu_snap_record { + long unsigned int gp_seq; + u64 cputime_irq; + u64 cputime_softirq; + u64 cputime_system; + long unsigned int nr_hardirqs; + unsigned int nr_softirqs; + long long unsigned int nr_csw; + long unsigned int jiffies; +}; + +struct rcu_data { + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + union rcu_noqs cpu_no_qs; + bool core_needs_qs; + bool beenonline; + bool gpwrap; + bool cpu_started; + struct rcu_node *mynode; + long unsigned int grpmask; + long unsigned int ticks_this_gp; + struct irq_work defer_qs_iw; + bool defer_qs_iw_pending; + struct work_struct strict_work; + struct rcu_segcblist cblist; + long int qlen_last_fqs_check; + long unsigned int n_cbs_invoked; + long unsigned int n_force_qs_snap; + long int blimit; + int watching_snap; + bool rcu_need_heavy_qs; + bool rcu_urgent_qs; + bool rcu_forced_tick; + bool rcu_forced_tick_exp; + long unsigned int barrier_seq_snap; + struct callback_head barrier_head; + int exp_watching_snap; + struct task_struct *rcu_cpu_kthread_task; + unsigned int rcu_cpu_kthread_status; + char rcu_cpu_has_work; + long unsigned int rcuc_activity; + unsigned int softirq_snap; + struct irq_work rcu_iw; + bool rcu_iw_pending; + long unsigned int rcu_iw_gp_seq; + long unsigned int rcu_ofl_gp_seq; + short int rcu_ofl_gp_state; + long unsigned int rcu_onl_gp_seq; + short int rcu_onl_gp_state; + long unsigned int last_fqs_resched; + long unsigned int last_sched_clock; + struct rcu_snap_record snap_record; + long int lazy_len; + int cpu; +}; + +struct sr_wait_node { + atomic_t inuse; + struct llist_node node; +}; + +struct rcu_state { + struct rcu_node node[3]; + struct rcu_node *level[3]; + int ncpus; + int n_online_cpus; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int gp_seq; + long unsigned int gp_max; + struct task_struct *gp_kthread; + struct swait_queue_head gp_wq; + short int gp_flags; + short int gp_state; + long unsigned int gp_wake_time; + long unsigned int gp_wake_seq; + long unsigned int gp_seq_polled; + long unsigned int gp_seq_polled_snap; + long unsigned int gp_seq_polled_exp_snap; + struct mutex barrier_mutex; + atomic_t barrier_cpu_count; + struct completion barrier_completion; + long unsigned int barrier_sequence; + raw_spinlock_t barrier_lock; + struct mutex exp_mutex; + struct mutex exp_wake_mutex; + long unsigned int expedited_sequence; + atomic_t expedited_need_qs; + struct swait_queue_head expedited_wq; + int ncpus_snap; + u8 cbovld; + u8 cbovldnext; + long unsigned int jiffies_force_qs; + long unsigned int jiffies_kick_kthreads; + long unsigned int n_force_qs; + long unsigned int gp_start; + long unsigned int gp_end; + long unsigned int gp_activity; + long unsigned int gp_req_activity; + long unsigned int jiffies_stall; + int nr_fqs_jiffies_stall; + long unsigned int jiffies_resched; + long unsigned int n_force_qs_gpstart; + const char *name; + char abbr; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + arch_spinlock_t ofl_lock; + struct llist_head srs_next; + struct llist_node *srs_wait_tail; + struct llist_node *srs_done_tail; + struct sr_wait_node srs_wait_nodes[5]; + struct work_struct srs_cleanup_work; + atomic_t srs_cleanups_pending; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kvfree_rcu_bulk_data { + struct list_head list; + struct rcu_gp_oldstate gp_snap; + long unsigned int nr_records; + void *records[0]; +}; + +struct kfree_rcu_cpu; + +struct kfree_rcu_cpu_work { + struct rcu_work rcu_work; + struct callback_head *head_free; + struct rcu_gp_oldstate head_free_gp_snap; + struct list_head bulk_head_free[2]; + struct kfree_rcu_cpu *krcp; +}; + +struct kfree_rcu_cpu { + struct callback_head *head; + long unsigned int head_gp_snap; + atomic_t head_count; + struct list_head bulk_head[2]; + atomic_t bulk_count[2]; + struct kfree_rcu_cpu_work krw_arr[2]; + raw_spinlock_t lock; + struct delayed_work monitor_work; + bool initialized; + struct delayed_work page_cache_work; + atomic_t backoff_page_cache_fill; + atomic_t work_in_progress; + struct hrtimer hrtimer; + struct llist_head bkvcache; + int nr_bkv_objs; +}; + +struct rcu_stall_chk_rdr { + int nesting; + union rcu_special rs; + bool on_blkd_list; +}; + +enum { + EVENT_TRIGGER_FL_PROBE = 1, +}; + +struct bpf_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; + u64 delegate_cmds; + u64 delegate_maps; + u64 delegate_progs; + u64 delegate_attachs; +}; + +enum { + BTF_VAR_STATIC = 0, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +struct btf_var { + __u32 linkage; +}; + +struct btf_decl_tag { + __s32 component_idx; +}; + +struct sk_msg_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + union { + struct bpf_sock *sk; + }; +}; + +struct sk_reuseport_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 len; + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; + union { + struct bpf_sock *sk; + }; + union { + struct bpf_sock *migrating_sk; + }; +}; + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + __u32 user_ip6[4]; + __u32 user_port; + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + __u32 msg_src_ip6[4]; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { + struct bpf_sock *sk; + }; + union { + void *skb_data; + }; + union { + void *skb_data_end; + }; + __u32 skb_len; + __u32 skb_tcp_flags; + __u64 skb_hwtstamp; +}; + +struct bpf_sockopt { + union { + struct bpf_sock *sk; + }; + union { + void *optval; + }; + union { + void *optval_end; + }; + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +struct btf_struct_metas { + u32 cnt; + struct btf_struct_meta types[0]; +}; + +enum { + BTF_FIELDS_MAX = 11, +}; + +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + struct sock *migrating_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +enum btf_kfunc_hook { + BTF_KFUNC_HOOK_COMMON = 0, + BTF_KFUNC_HOOK_XDP = 1, + BTF_KFUNC_HOOK_TC = 2, + BTF_KFUNC_HOOK_STRUCT_OPS = 3, + BTF_KFUNC_HOOK_TRACING = 4, + BTF_KFUNC_HOOK_SYSCALL = 5, + BTF_KFUNC_HOOK_FMODRET = 6, + BTF_KFUNC_HOOK_CGROUP = 7, + BTF_KFUNC_HOOK_SCHED_ACT = 8, + BTF_KFUNC_HOOK_SK_SKB = 9, + BTF_KFUNC_HOOK_SOCKET_FILTER = 10, + BTF_KFUNC_HOOK_LWT = 11, + BTF_KFUNC_HOOK_NETFILTER = 12, + BTF_KFUNC_HOOK_KPROBE = 13, + BTF_KFUNC_HOOK_MAX = 14, +}; + +enum { + BTF_KFUNC_SET_MAX_CNT = 256, + BTF_DTOR_KFUNC_MAX_CNT = 256, + BTF_KFUNC_FILTER_MAX_CNT = 16, +}; + +struct btf_kfunc_hook_filter { + btf_kfunc_filter_t filters[16]; + u32 nr_filters; +}; + +struct btf_kfunc_set_tab { + struct btf_id_set8 *sets[14]; + struct btf_kfunc_hook_filter hook_filters[14]; +}; + +struct btf_id_dtor_kfunc_tab { + u32 cnt; + struct btf_id_dtor_kfunc dtors[0]; +}; + +struct btf_struct_ops_tab { + u32 cnt; + u32 capacity; + struct bpf_struct_ops_desc ops[0]; +}; + +enum verifier_phase { + CHECK_META = 0, + CHECK_TYPE = 1, +}; + +struct resolve_vertex { + const struct btf_type *t; + u32 type_id; + u16 next_member; +}; + +enum visit_state { + NOT_VISITED = 0, + VISITED = 1, + RESOLVED = 2, +}; + +enum resolve_mode { + RESOLVE_TBD = 0, + RESOLVE_PTR = 1, + RESOLVE_STRUCT_OR_ARRAY = 2, +}; + +struct btf_sec_info { + u32 off; + u32 len; +}; + +struct btf_verifier_env { + struct btf *btf; + u8 *visit_states; + struct resolve_vertex stack[32]; + struct bpf_verifier_log log; + u32 log_type_id; + u32 top_stack; + enum verifier_phase phase; + enum resolve_mode resolve_mode; +}; + +struct btf_show { + u64 flags; + void *target; + void (*showfn)(struct btf_show *, const char *, va_list); + const struct btf *btf; + struct { + u8 depth; + u8 depth_to_show; + u8 depth_check; + u8 array_member: 1; + u8 array_terminated: 1; + u16 array_encoding; + u32 type_id; + int status; + const struct btf_type *type; + const struct btf_member *member; + char name[80]; + } state; + struct { + u32 size; + void *head; + void *data; + u8 safe[32]; + } obj; +}; + +struct btf_kind_operations { + s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); + int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); + int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + void (*log_details)(struct btf_verifier_env *, const struct btf_type *); + void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); +}; + +enum { + BTF_FIELD_IGNORE = 0, + BTF_FIELD_FOUND = 1, +}; + +struct btf_field_info { + enum btf_field_type type; + u32 off; + union { + struct { + u32 type_id; + } kptr; + struct { + const char *node_name; + u32 value_btf_id; + } graph_root; + }; +}; + +struct bpf_ctx_convert { + struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; + struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; + struct xdp_md BPF_PROG_TYPE_XDP_prog; + struct xdp_buff BPF_PROG_TYPE_XDP_kern; + struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; + struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; + struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; + struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; + struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; + struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; + struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; + struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; + struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; + struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; + struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; + struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; + struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; + struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; + struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; + struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; + bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; + struct pt_regs BPF_PROG_TYPE_KPROBE_kern; + __u64 BPF_PROG_TYPE_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_TRACEPOINT_kern; + struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; + struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; + void *BPF_PROG_TYPE_TRACING_prog; + void *BPF_PROG_TYPE_TRACING_kern; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; + struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; + struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; + struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; + struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; + struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; + struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; + struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; + struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; + void *BPF_PROG_TYPE_STRUCT_OPS_prog; + void *BPF_PROG_TYPE_STRUCT_OPS_kern; + void *BPF_PROG_TYPE_EXT_prog; + void *BPF_PROG_TYPE_EXT_kern; + void *BPF_PROG_TYPE_SYSCALL_prog; + void *BPF_PROG_TYPE_SYSCALL_kern; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_prog; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_kern; +}; + +enum { + __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0, + __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1, + __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2, + __ctx_convertBPF_PROG_TYPE_XDP = 3, + __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6, + __ctx_convertBPF_PROG_TYPE_LWT_IN = 7, + __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8, + __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9, + __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10, + __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11, + __ctx_convertBPF_PROG_TYPE_SK_SKB = 12, + __ctx_convertBPF_PROG_TYPE_SK_MSG = 13, + __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14, + __ctx_convertBPF_PROG_TYPE_KPROBE = 15, + __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16, + __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19, + __ctx_convertBPF_PROG_TYPE_TRACING = 20, + __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21, + __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23, + __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 24, + __ctx_convertBPF_PROG_TYPE_SK_LOOKUP = 25, + __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 26, + __ctx_convertBPF_PROG_TYPE_EXT = 27, + __ctx_convertBPF_PROG_TYPE_SYSCALL = 28, + __ctx_convertBPF_PROG_TYPE_NETFILTER = 29, + __ctx_convert_unused = 30, +}; + +enum bpf_struct_walk_result { + WALK_SCALAR = 0, + WALK_PTR = 1, + WALK_STRUCT = 2, +}; + +struct bpf_cand_cache { + const char *name; + u32 name_len; + u16 kind; + u16 cnt; + struct { + const struct btf *btf; + u32 id; + } cands[0]; +}; + +enum btf_arg_tag { + ARG_TAG_CTX = 1, + ARG_TAG_NONNULL = 2, + ARG_TAG_TRUSTED = 4, + ARG_TAG_NULLABLE = 8, + ARG_TAG_ARENA = 16, +}; + +struct btf_show_snprintf { + struct btf_show show; + int len_left; + int len; +}; + +enum { + BTF_MODULE_F_LIVE = 1, +}; + +struct btf_module { + struct list_head list; + struct module *module; + struct btf *btf; + struct bin_attribute *sysfs_attr; + int flags; +}; + +typedef u64 (*btf_bpf_btf_find_by_name_kind)(char *, int, u32, int); + +enum vm_stat_item { + NR_DIRTY_THRESHOLD = 0, + NR_DIRTY_BG_THRESHOLD = 1, + NR_MEMMAP_PAGES = 2, + NR_MEMMAP_BOOT_PAGES = 3, + NR_VM_STAT_ITEMS = 4, +}; + +struct contig_page_info { + long unsigned int free_pages; + long unsigned int free_blocks_total; + long unsigned int free_blocks_suitable; +}; + +struct lruvec_stats_percpu { + long int state[30]; + long int state_prev[30]; +}; + +struct lruvec_stats { + long int state[30]; + long int state_local[30]; + long int state_pending[30]; +}; + +struct memcg_vmstats { + long int state[37]; + long unsigned int events[20]; + long int state_local[37]; + long unsigned int events_local[20]; + long int state_pending[37]; + long unsigned int events_pending[20]; + atomic64_t stats_updates; +}; + +struct memcg_vmstats_percpu { + unsigned int stats_updates; + struct memcg_vmstats_percpu *parent; + struct memcg_vmstats *vmstats; + long int state[37]; + long unsigned int events[20]; + long int state_prev[37]; + long unsigned int events_prev[20]; + long: 64; + long: 64; + long: 64; +}; + +struct trace_event_raw_memcg_rstat_stats { + struct trace_entry ent; + u64 id; + int item; + int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_rstat_events { + struct trace_entry ent; + u64 id; + int item; + long unsigned int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_flush_stats { + struct trace_entry ent; + u64 id; + s64 stats_updates; + bool force; + bool needs_flush; + char __data[0]; +}; + +struct trace_event_data_offsets_memcg_rstat_stats {}; + +struct trace_event_data_offsets_memcg_rstat_events {}; + +struct trace_event_data_offsets_memcg_flush_stats {}; + +typedef void (*btf_trace_mod_memcg_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_mod_memcg_lruvec_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_count_memcg_events)(void *, struct mem_cgroup *, int, long unsigned int); + +typedef void (*btf_trace_memcg_flush_stats)(void *, struct mem_cgroup *, s64, bool, bool); + +struct memory_stat { + const char *name; + unsigned int idx; +}; + +struct memcg_stock_pcp { + local_lock_t stock_lock; + struct mem_cgroup *cached; + unsigned int nr_pages; + struct obj_cgroup *cached_objcg; + struct pglist_data *cached_pgdat; + unsigned int nr_bytes; + int nr_slab_reclaimable_b; + int nr_slab_unreclaimable_b; + struct work_struct work; + long unsigned int flags; +}; + +struct aggregate_control { + long int *aggregate; + long int *local; + long int *pending; + long int *ppending; + long int *cstat; + long int *cstat_prev; + int size; +}; + +enum { + MEMORY_RECLAIM_SWAPPINESS = 0, + MEMORY_RECLAIM_NULL = 1, +}; + +struct uncharge_gather { + struct mem_cgroup *memcg; + long unsigned int nr_memory; + long unsigned int pgpgout; + long unsigned int nr_kmem; + int nid; +}; + +enum SHIFT_DIRECTION { + SHIFT_LEFT = 0, + SHIFT_RIGHT = 1, +}; + +struct ext4_extent_tail { + __le32 et_checksum; +}; + +struct btrfs_raid_stride { + __le64 devid; + __le64 physical; +}; + +struct btrfs_stripe_extent { + struct { + struct {} __empty_strides; + struct btrfs_raid_stride strides[0]; + }; +}; + +struct root_name_map { + u64 id; + const char *name; +}; + +struct cmac_tfm_ctx { + struct crypto_cipher *child; + __be64 consts[0]; +}; + +struct cmac_desc_ctx { + unsigned int len; + u8 odds[0]; +}; + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct gendisk *, char *); + ssize_t (*store)(struct gendisk *, const char *, size_t); + void (*load_module)(struct gendisk *, const char *, size_t); +}; + +struct io_fadvise { + struct file *file; + u64 offset; + u64 len; + u32 advice; +}; + +struct io_madvise { + struct file *file; + u64 addr; + u64 len; + u32 advice; +}; + +typedef enum { + HEAD = 0, + FLAGS = 1, + TIME = 2, + OS = 3, + EXLEN = 4, + EXTRA = 5, + NAME = 6, + COMMENT = 7, + HCRC = 8, + DICTID = 9, + DICT = 10, + TYPE = 11, + TYPEDO = 12, + STORED = 13, + COPY = 14, + TABLE = 15, + LENLENS = 16, + CODELENS = 17, + LEN = 18, + LENEXT = 19, + DIST = 20, + DISTEXT = 21, + MATCH = 22, + LIT = 23, + CHECK = 24, + LENGTH = 25, + DONE = 26, + BAD = 27, + MEM = 28, + SYNC = 29, +} inflate_mode; + +struct inflate_state { + inflate_mode mode; + int last; + int wrap; + int havedict; + int flags; + unsigned int dmax; + long unsigned int check; + long unsigned int total; + unsigned int wbits; + unsigned int wsize; + unsigned int whave; + unsigned int write; + unsigned char *window; + long unsigned int hold; + unsigned int bits; + unsigned int length; + unsigned int offset; + unsigned int extra; + const code *lencode; + const code *distcode; + unsigned int lenbits; + unsigned int distbits; + unsigned int ncode; + unsigned int nlen; + unsigned int ndist; + unsigned int have; + code *next; + short unsigned int lens[320]; + short unsigned int work[288]; + code codes[2048]; +}; + +struct inflate_workspace { + struct inflate_state inflate_state; + unsigned char working_window[32768]; +}; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; + +typedef void (*of_init_fn_1)(struct device_node *); + +struct clk_fixed_rate { + struct clk_hw hw; + long unsigned int fixed_rate; + long unsigned int fixed_accuracy; + long unsigned int flags; +}; + +struct irq_affinity_devres { + unsigned int count; + unsigned int irq[0]; +}; + +struct platform_object { + struct platform_device pdev; + char name[0]; +}; + +struct virtio_blk_geometry { + __virtio16 cylinders; + __u8 heads; + __u8 sectors; +}; + +struct virtio_blk_zoned_characteristics { + __virtio32 zone_sectors; + __virtio32 max_open_zones; + __virtio32 max_active_zones; + __virtio32 max_append_sectors; + __virtio32 write_granularity; + __u8 model; + __u8 unused2[3]; +}; + +struct virtio_blk_config { + __virtio64 capacity; + __virtio32 size_max; + __virtio32 seg_max; + struct virtio_blk_geometry geometry; + __virtio32 blk_size; + __u8 physical_block_exp; + __u8 alignment_offset; + __virtio16 min_io_size; + __virtio32 opt_io_size; + __u8 wce; + __u8 unused; + __virtio16 num_queues; + __virtio32 max_discard_sectors; + __virtio32 max_discard_seg; + __virtio32 discard_sector_alignment; + __virtio32 max_write_zeroes_sectors; + __virtio32 max_write_zeroes_seg; + __u8 write_zeroes_may_unmap; + __u8 unused1[3]; + __virtio32 max_secure_erase_sectors; + __virtio32 max_secure_erase_seg; + __virtio32 secure_erase_sector_alignment; + struct virtio_blk_zoned_characteristics zoned; +}; + +struct virtio_blk_outhdr { + __virtio32 type; + __virtio32 ioprio; + __virtio64 sector; +}; + +struct virtio_blk_discard_write_zeroes { + __le64 sector; + __le32 num_sectors; + __le32 flags; +}; + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[16]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct virtio_blk { + struct mutex vdev_mutex; + struct virtio_device *vdev; + struct gendisk *disk; + struct blk_mq_tag_set tag_set; + struct work_struct config_work; + int index; + int num_vqs; + int io_queues[3]; + struct virtio_blk_vq *vqs; + unsigned int zone_sectors; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + union { + u8 status; + struct { + __virtio64 sector; + u8 status; + } zone_append; + } in_hdr; + size_t in_hdr_len; + struct sg_table sg_table; + struct scatterlist sg[0]; +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = 127, + ATA_MASK_MWDMA = 3968, + ATA_MASK_UDMA = 1044480, +}; + +struct trace_event_raw_ata_qc_issue_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + unsigned char proto; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_qc_complete_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char status; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char error; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_tf_load { + struct trace_entry ent; + unsigned int ata_port; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_exec_command_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char cmd; + unsigned char feature; + unsigned char hob_nsect; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_bmdma_status { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char host_stat; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy_qc { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_action_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_begin_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + long unsigned int deadline; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_end_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + int rc; + char __data[0]; +}; + +struct trace_event_raw_ata_port_eh_begin_template { + struct trace_entry ent; + unsigned int ata_port; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_hsm_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int protocol; + unsigned int hsm_state; + unsigned char dev_state; + char __data[0]; +}; + +struct trace_event_raw_ata_transfer_data_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int flags; + unsigned int offset; + unsigned int bytes; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned char hsm_state; + char __data[0]; +}; + +struct trace_event_data_offsets_ata_qc_issue_template {}; + +struct trace_event_data_offsets_ata_qc_complete_template {}; + +struct trace_event_data_offsets_ata_tf_load {}; + +struct trace_event_data_offsets_ata_exec_command_template {}; + +struct trace_event_data_offsets_ata_bmdma_status {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy_qc {}; + +struct trace_event_data_offsets_ata_eh_action_template {}; + +struct trace_event_data_offsets_ata_link_reset_begin_template {}; + +struct trace_event_data_offsets_ata_link_reset_end_template {}; + +struct trace_event_data_offsets_ata_port_eh_begin_template {}; + +struct trace_event_data_offsets_ata_sff_hsm_template {}; + +struct trace_event_data_offsets_ata_transfer_data_template {}; + +struct trace_event_data_offsets_ata_sff_template {}; + +typedef void (*btf_trace_ata_qc_prep)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_issue)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_internal)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_failed)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_done)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_tf_load)(void *, struct ata_port *, const struct ata_taskfile *); + +typedef void (*btf_trace_ata_exec_command)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_setup)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_start)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_stop)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_status)(void *, struct ata_port *, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy)(void *, struct ata_device *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy_qc)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_eh_about_to_do)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_done)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_slave_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_softreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_softreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_std_sched_eh)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_freeze)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_thaw)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_sff_hsm_state)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_hsm_command_complete)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_port_intr)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_send_cdb)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_sff_flush_pio_task)(void *, struct ata_port *); + +struct ata_force_param { + const char *name; + u8 cbl; + u8 spd_limit; + unsigned int xfer_mask; + unsigned int quirk_on; + unsigned int quirk_off; + u16 lflags_on; + u16 lflags_off; +}; + +struct ata_force_ent { + int port; + int device; + struct ata_force_param param; +}; + +struct ata_xfer_ent { + int shift; + int bits; + u8 base; +}; + +struct ata_dev_quirks_entry { + const char *model_num; + const char *model_rev; + unsigned int quirks; +}; + +enum mac_version { + RTL_GIGA_MAC_VER_02 = 0, + RTL_GIGA_MAC_VER_03 = 1, + RTL_GIGA_MAC_VER_04 = 2, + RTL_GIGA_MAC_VER_05 = 3, + RTL_GIGA_MAC_VER_06 = 4, + RTL_GIGA_MAC_VER_07 = 5, + RTL_GIGA_MAC_VER_08 = 6, + RTL_GIGA_MAC_VER_09 = 7, + RTL_GIGA_MAC_VER_10 = 8, + RTL_GIGA_MAC_VER_11 = 9, + RTL_GIGA_MAC_VER_14 = 10, + RTL_GIGA_MAC_VER_17 = 11, + RTL_GIGA_MAC_VER_18 = 12, + RTL_GIGA_MAC_VER_19 = 13, + RTL_GIGA_MAC_VER_20 = 14, + RTL_GIGA_MAC_VER_21 = 15, + RTL_GIGA_MAC_VER_22 = 16, + RTL_GIGA_MAC_VER_23 = 17, + RTL_GIGA_MAC_VER_24 = 18, + RTL_GIGA_MAC_VER_25 = 19, + RTL_GIGA_MAC_VER_26 = 20, + RTL_GIGA_MAC_VER_28 = 21, + RTL_GIGA_MAC_VER_29 = 22, + RTL_GIGA_MAC_VER_30 = 23, + RTL_GIGA_MAC_VER_31 = 24, + RTL_GIGA_MAC_VER_32 = 25, + RTL_GIGA_MAC_VER_33 = 26, + RTL_GIGA_MAC_VER_34 = 27, + RTL_GIGA_MAC_VER_35 = 28, + RTL_GIGA_MAC_VER_36 = 29, + RTL_GIGA_MAC_VER_37 = 30, + RTL_GIGA_MAC_VER_38 = 31, + RTL_GIGA_MAC_VER_39 = 32, + RTL_GIGA_MAC_VER_40 = 33, + RTL_GIGA_MAC_VER_42 = 34, + RTL_GIGA_MAC_VER_43 = 35, + RTL_GIGA_MAC_VER_44 = 36, + RTL_GIGA_MAC_VER_46 = 37, + RTL_GIGA_MAC_VER_48 = 38, + RTL_GIGA_MAC_VER_51 = 39, + RTL_GIGA_MAC_VER_52 = 40, + RTL_GIGA_MAC_VER_53 = 41, + RTL_GIGA_MAC_VER_61 = 42, + RTL_GIGA_MAC_VER_63 = 43, + RTL_GIGA_MAC_VER_64 = 44, + RTL_GIGA_MAC_VER_65 = 45, + RTL_GIGA_MAC_VER_66 = 46, + RTL_GIGA_MAC_NONE = 47, +}; + +struct rtl8169_private; + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *, struct phy_device *); + +struct phy_reg { + u16 reg; + u16 val; +}; + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +struct ifconf { + int ifc_len; + union { + char *ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; + +typedef u32 compat_caddr_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + +struct used_address { + struct __kernel_sockaddr_storage name; + unsigned int name_len; +}; + +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN = 0, + NETDEV_LAG_TX_TYPE_RANDOM = 1, + NETDEV_LAG_TX_TYPE_BROADCAST = 2, + NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4, + NETDEV_LAG_TX_TYPE_HASH = 5, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE = 0, + NETDEV_LAG_HASH_L2 = 1, + NETDEV_LAG_HASH_L34 = 2, + NETDEV_LAG_HASH_L23 = 3, + NETDEV_LAG_HASH_E23 = 4, + NETDEV_LAG_HASH_E34 = 5, + NETDEV_LAG_HASH_VLAN_SRCMAC = 6, + NETDEV_LAG_HASH_UNKNOWN = 7, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +struct failover_ops { + int (*slave_pre_register)(struct net_device *, struct net_device *); + int (*slave_register)(struct net_device *, struct net_device *); + int (*slave_pre_unregister)(struct net_device *, struct net_device *); + int (*slave_unregister)(struct net_device *, struct net_device *); + int (*slave_link_change)(struct net_device *, struct net_device *); + int (*slave_name_change)(struct net_device *, struct net_device *); + rx_handler_result_t (*slave_handle_frame)(struct sk_buff **); +}; + +struct failover { + struct list_head list; + struct net_device *failover_dev; + netdevice_tracker dev_tracker; + struct failover_ops *ops; +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +enum nfnl_acct_msg_types { + NFNL_MSG_ACCT_NEW = 0, + NFNL_MSG_ACCT_GET = 1, + NFNL_MSG_ACCT_GET_CTRZERO = 2, + NFNL_MSG_ACCT_DEL = 3, + NFNL_MSG_ACCT_OVERQUOTA = 4, + NFNL_MSG_ACCT_MAX = 5, +}; + +enum nfnl_acct_flags { + NFACCT_F_QUOTA_PKTS = 1, + NFACCT_F_QUOTA_BYTES = 2, + NFACCT_F_OVERQUOTA = 4, +}; + +enum nfnl_acct_type { + NFACCT_UNSPEC = 0, + NFACCT_NAME = 1, + NFACCT_PKTS = 2, + NFACCT_BYTES = 3, + NFACCT_USE = 4, + NFACCT_FLAGS = 5, + NFACCT_QUOTA = 6, + NFACCT_FILTER = 7, + NFACCT_PAD = 8, + __NFACCT_MAX = 9, +}; + +enum nfnl_attr_filter_type { + NFACCT_FILTER_UNSPEC = 0, + NFACCT_FILTER_MASK = 1, + NFACCT_FILTER_VALUE = 2, + __NFACCT_FILTER_MAX = 3, +}; + +enum { + NFACCT_NO_QUOTA = -1, + NFACCT_UNDERQUOTA = 0, + NFACCT_OVERQUOTA = 1, +}; + +struct nf_acct { + atomic64_t pkts; + atomic64_t bytes; + long unsigned int flags; + struct list_head head; + refcount_t refcnt; + char name[32]; + struct callback_head callback_head; + char data[0]; +}; + +struct nfacct_filter { + u32 value; + u32 mask; +}; + +struct nfnl_acct_net { + struct list_head nfnl_acct_list; +}; + +enum nft_immediate_attributes { + NFTA_IMMEDIATE_UNSPEC = 0, + NFTA_IMMEDIATE_DREG = 1, + NFTA_IMMEDIATE_DATA = 2, + __NFTA_IMMEDIATE_MAX = 3, +}; + +struct nft_immediate_expr { + struct nft_data data; + u8 dreg; + u8 dlen; +}; + +struct ip_tunnel_parm { + char name[16]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +struct ip_tunnel_net { + struct net_device *fb_tunnel_dev; + struct rtnl_link_ops *rtnl_link_ops; + struct hlist_head tunnels[128]; + struct ip_tunnel *collect_md_tun; + int type; +}; + +struct br_frame_type { + __be16 type; + int (*frame_handler)(struct net_bridge_port *, struct sk_buff *); + struct hlist_node list; +}; + +typedef int (*ioremap_prot_hook_t)(phys_addr_t, size_t, pgprot_t *); + +enum { + AFFINITY = 0, + AFFINITY_LIST = 1, + EFFECTIVE = 2, + EFFECTIVE_LIST = 3, +}; + +struct dma_coherent_mem { + void *virt_base; + dma_addr_t device_base; + long unsigned int pfn_base; + int size; + long unsigned int *bitmap; + spinlock_t spinlock; + bool use_dev_dma_pfn_offset; +}; + +struct kallsym_iter { + loff_t pos; + loff_t pos_mod_end; + loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; + long unsigned int value; + unsigned int nameoff; + char type; + char name[512]; + char module_name[56]; + int exported; + int show_value; +}; + +struct bpf_iter__ksym { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kallsym_iter *ksym; + }; +}; + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +enum tp_func_state { + TP_FUNC_0 = 0, + TP_FUNC_1 = 1, + TP_FUNC_2 = 2, + TP_FUNC_N = 3, +}; + +enum tp_transition_sync { + TP_TRANSITION_SYNC_1_0_1 = 0, + TP_TRANSITION_SYNC_N_2_1 = 1, + _NR_TP_TRANSITION_SYNC = 2, +}; + +struct tp_transition_snapshot { + long unsigned int rcu; + bool ongoing; +}; + +struct tp_probes { + struct callback_head rcu; + struct tracepoint_func probes[0]; +}; + +struct bpf_preload_info { + char link_name[16]; + struct bpf_link *link; +}; + +struct bpf_preload_ops { + int (*preload)(struct bpf_preload_info *); + struct module *owner; +}; + +enum bpf_type { + BPF_TYPE_UNSPEC = 0, + BPF_TYPE_PROG = 1, + BPF_TYPE_MAP = 2, + BPF_TYPE_LINK = 3, +}; + +struct map_iter { + void *key; + bool done; +}; + +struct bpffs_btf_enums { + const struct btf *btf; + const struct btf_type *cmd_t; + const struct btf_type *map_t; + const struct btf_type *prog_t; + const struct btf_type *attach_t; +}; + +enum { + OPT_UID = 0, + OPT_GID = 1, + OPT_MODE = 2, + OPT_DELEGATE_CMDS = 3, + OPT_DELEGATE_MAPS = 4, + OPT_DELEGATE_PROGS = 5, + OPT_DELEGATE_ATTACHS = 6, +}; + +struct bpf_iter__cgroup { + union { + struct bpf_iter_meta *meta; + }; + union { + struct cgroup *cgroup; + }; +}; + +struct cgroup_iter_priv { + struct cgroup_subsys_state *start_css; + bool visited_all; + bool terminate; + int order; +}; + +struct bpf_iter_css { + __u64 __opaque[3]; +}; + +struct bpf_iter_css_kern { + struct cgroup_subsys_state *start; + struct cgroup_subsys_state *pos; + unsigned int flags; +}; + +struct shmem_quota_limits { + qsize_t usrquota_bhardlimit; + qsize_t usrquota_ihardlimit; + qsize_t grpquota_bhardlimit; + qsize_t grpquota_ihardlimit; +}; + +struct shmem_sb_info { + long unsigned int max_blocks; + struct percpu_counter used_blocks; + long unsigned int max_inodes; + long unsigned int free_ispace; + raw_spinlock_t stat_lock; + umode_t mode; + unsigned char huge; + kuid_t uid; + kgid_t gid; + bool full_inums; + bool noswap; + ino_t next_ino; + ino_t *ino_batch; + struct mempolicy *mpol; + spinlock_t shrinklist_lock; + struct list_head shrinklist; + long unsigned int shrinklist_len; + struct shmem_quota_limits qlimits; +}; + +enum sgp_type { + SGP_READ = 0, + SGP_NOALLOC = 1, + SGP_CACHE = 2, + SGP_WRITE = 3, + SGP_FALLOC = 4, +}; + +struct shmem_falloc { + wait_queue_head_t *waitq; + long unsigned int start; + long unsigned int next; + long unsigned int nr_falloced; + long unsigned int nr_unswapped; +}; + +struct shmem_options { + long long unsigned int blocks; + long long unsigned int inodes; + struct mempolicy *mpol; + kuid_t uid; + kgid_t gid; + umode_t mode; + bool full_inums; + int huge; + int seen; + bool noswap; + short unsigned int quota_types; + struct shmem_quota_limits qlimits; +}; + +enum shmem_param { + Opt_gid___5 = 0, + Opt_huge = 1, + Opt_mode___5 = 2, + Opt_mpol = 3, + Opt_nr_blocks = 4, + Opt_nr_inodes = 5, + Opt_size = 6, + Opt_uid___5 = 7, + Opt_inode32 = 8, + Opt_inode64 = 9, + Opt_noswap = 10, + Opt_quota___2 = 11, + Opt_usrquota___2 = 12, + Opt_grpquota___2 = 13, + Opt_usrquota_block_hardlimit = 14, + Opt_usrquota_inode_hardlimit = 15, + Opt_grpquota_block_hardlimit = 16, + Opt_grpquota_inode_hardlimit = 17, + Opt_casefold_version = 18, + Opt_casefold = 19, + Opt_strict_encoding = 20, +}; + +struct dma_block { + struct dma_block *next_block; + dma_addr_t dma; +}; + +struct dma_pool { + struct list_head page_list; + spinlock_t lock; + struct dma_block *next_block; + size_t nr_blocks; + size_t nr_active; + size_t nr_pages; + struct device *dev; + unsigned int size; + unsigned int allocation; + unsigned int boundary; + char name[32]; + struct list_head pools; +}; + +struct dma_page { + struct list_head page_list; + void *vaddr; + dma_addr_t dma; +}; + +struct trace_event_raw_cma_release { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_start { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int count; + unsigned int align; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_finish { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + unsigned int align; + int errorno; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_busy_retry { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + unsigned int align; + char __data[0]; +}; + +struct trace_event_data_offsets_cma_release { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_start { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_finish { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_busy_retry { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_cma_release)(void *, const char *, long unsigned int, const struct page *, long unsigned int); + +typedef void (*btf_trace_cma_alloc_start)(void *, const char *, long unsigned int, unsigned int); + +typedef void (*btf_trace_cma_alloc_finish)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int, int); + +typedef void (*btf_trace_cma_alloc_busy_retry)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int); + +struct cma { + long unsigned int base_pfn; + long unsigned int count; + long unsigned int *bitmap; + unsigned int order_per_bit; + spinlock_t lock; + char name[64]; + bool reserve_pages_on_error; +}; + +typedef int __kernel_daddr_t; + +struct ustat { + __kernel_daddr_t f_tfree; + long unsigned int f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +struct statfs { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __kernel_long_t f_blocks; + __kernel_long_t f_bfree; + __kernel_long_t f_bavail; + __kernel_long_t f_files; + __kernel_long_t f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +struct statfs64 { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + __u16 __pad2; + __s32 ssi_syscall; + __u64 ssi_call_addr; + __u32 ssi_arch; + __u8 __pad[28]; +}; + +struct signalfd_ctx { + sigset_t sigmask; +}; + +struct sysctl_alias { + const char *kernel_param; + const char *sysctl_param; +}; + +struct debugfs_blob_wrapper { + void *data; + long unsigned int size; +}; + +struct debugfs_devm_entry { + int (*read)(struct seq_file *, void *); + struct device *dev; +}; + +struct btrfs_free_space_entry { + __le64 offset; + __le64 bytes; + __u8 type; +} __attribute__((packed)); + +struct btrfs_free_space_header { + struct btrfs_disk_key location; + __le64 generation; + __le64 num_entries; + __le64 num_bitmaps; +} __attribute__((packed)); + +enum btrfs_disk_cache_state { + BTRFS_DC_WRITTEN = 0, + BTRFS_DC_ERROR = 1, + BTRFS_DC_CLEAR = 2, + BTRFS_DC_SETUP = 3, +}; + +struct btrfs_trim_range { + u64 start; + u64 bytes; + struct list_head list; +}; + +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + +typedef unsigned char u8___2; + +struct rand_data { + void *hash_state; + __u64 prev_time; + __u64 last_delta; + __s64 last_delta2; + unsigned int flags; + unsigned int osr; + unsigned char *mem; + unsigned int memlocation; + unsigned int memblocks; + unsigned int memblocksize; + unsigned int memaccessloops; + unsigned int rct_count; + unsigned int apt_cutoff; + unsigned int apt_cutoff_permanent; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + unsigned int health_failure; + unsigned int apt_base_set: 1; +}; + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data *entropy_collector; + struct crypto_shash *tfm; + struct shash_desc *sdesc; +}; + +struct bt_iter_data { + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + busy_tag_iter_fn *fn; + void *data; + bool reserved; +}; + +struct bt_tags_iter_data { + struct blk_mq_tags *tags; + busy_tag_iter_fn *fn; + void *data; + unsigned int flags; +}; + +struct blkg_rwstat_sample { + u64 cnt[5]; +}; + +struct bfq_group_data { + struct blkcg_policy_data pd; + unsigned int weight; +}; + +struct io_futex { + struct file *file; + union { + u32 *uaddr; + struct futex_waitv *uwaitv; + }; + long unsigned int futex_val; + long unsigned int futex_mask; + long unsigned int futexv_owned; + u32 futex_flags; + unsigned int futex_nr; + bool futexv_unqueued; +}; + +struct io_futex_data { + struct futex_q q; + struct io_kiocb *req; +}; + +typedef ZSTD_compressionParameters zstd_compression_parameters; + +typedef ZSTD_parameters zstd_parameters; + +typedef ZSTD_CCtx zstd_cctx; + +typedef ZSTD_CDict zstd_cdict; + +typedef ZSTD_CStream zstd_cstream; + +struct n_tty_data { + size_t read_head; + size_t commit_head; + size_t canon_head; + size_t echo_head; + size_t echo_commit; + size_t echo_mark; + long unsigned int char_map[4]; + long unsigned int overrun_time; + unsigned int num_overrun; + bool no_room; + unsigned char lnext: 1; + unsigned char erasing: 1; + unsigned char raw: 1; + unsigned char real_raw: 1; + unsigned char icanon: 1; + unsigned char push: 1; + u8 read_buf[4096]; + long unsigned int read_flags[64]; + u8 echo_buf[4096]; + size_t read_tail; + size_t line_start; + size_t lookahead_count; + unsigned int column; + unsigned int canon_column; + size_t echo_tail; + struct mutex atomic_read_lock; + struct mutex output_lock; +}; + +enum { + ERASE = 0, + WERASE = 1, + KILL = 2, +}; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *); +}; + +enum scsi_timeouts { + SCSI_DEFAULT_EH_TIMEOUT = 10000, +}; + +struct async_scan_data { + struct list_head list; + struct Scsi_Host *shost; + struct completion prev_finished; +}; + +enum phy_state_work { + PHY_STATE_WORK_NONE = 0, + PHY_STATE_WORK_ANEG = 1, + PHY_STATE_WORK_SUSPEND = 2, +}; + +struct netdev_lag_lower_state_info { + u8 link_up: 1; + u8 tx_enabled: 1; +}; + +struct net_failover_info { + struct net_device *primary_dev; + struct net_device *standby_dev; + struct rtnl_link_stats64 primary_stats; + struct rtnl_link_stats64 standby_stats; + struct rtnl_link_stats64 failover_stats; + spinlock_t stats_lock; +}; + +enum xfer_buf_dir { + TO_XFER_BUF = 0, + FROM_XFER_BUF = 1, +}; + +typedef struct thermal_zone_device *class_thermal_zone_reverse_t; + +struct trace_event_raw_thermal_temperature { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int temp_prev; + int temp; + char __data[0]; +}; + +struct trace_event_raw_cdev_update { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int target; + char __data[0]; +}; + +struct trace_event_raw_thermal_zone_trip { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int trip; + enum thermal_trip_type trip_type; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_temperature { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +struct trace_event_data_offsets_cdev_update { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_thermal_zone_trip { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); + +typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int); + +typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); + +struct mbox_client { + struct device *dev; + bool tx_block; + long unsigned int tx_tout; + bool knows_txdone; + void (*rx_callback)(struct mbox_client *, void *); + void (*tx_prepare)(struct mbox_client *, void *); + void (*tx_done)(struct mbox_client *, void *, int); +}; + +struct mbox_chan; + +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *, void *); + int (*flush)(struct mbox_chan *, long unsigned int); + int (*startup)(struct mbox_chan *); + void (*shutdown)(struct mbox_chan *); + bool (*last_tx_done)(struct mbox_chan *); + bool (*peek_data)(struct mbox_chan *); +}; + +struct mbox_controller; + +struct mbox_chan { + struct mbox_controller *mbox; + unsigned int txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned int msg_count; + unsigned int msg_free; + void *msg_data[20]; + spinlock_t lock; + void *con_priv; +}; + +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned int txpoll_period; + struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); + struct hrtimer poll_hrt; + spinlock_t poll_hrt_lock; + struct list_head node; +}; + +enum { + INET_DIAG_REQ_NONE = 0, + INET_DIAG_REQ_BYTECODE = 1, + INET_DIAG_REQ_SK_BPF_STORAGES = 2, + INET_DIAG_REQ_PROTOCOL = 3, + __INET_DIAG_REQ_MAX = 4, +}; + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +struct sock_diag_handler { + struct module *owner; + __u8 family; + int (*dump)(struct sk_buff *, struct nlmsghdr *); + int (*get_info)(struct sk_buff *, struct sock *); + int (*destroy)(struct sk_buff *, struct nlmsghdr *); +}; + +struct sock_diag_inet_compat { + struct module *owner; + int (*fn)(struct sk_buff *, struct nlmsghdr *); +}; + +struct broadcast_sk { + struct sock *sk; + struct work_struct work; +}; + +struct datalink_proto { + unsigned char type[8]; + struct llc_sap *sap; + short unsigned int header_length; + int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + int (*request)(struct datalink_proto *, struct sk_buff *, const unsigned char *); + struct list_head node; +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[32]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +struct nf_ct_bridge_info { + struct nf_hook_ops *ops; + unsigned int ops_size; + struct module *me; +}; + +enum nft_payload_csum_types { + NFT_PAYLOAD_CSUM_NONE = 0, + NFT_PAYLOAD_CSUM_INET = 1, + NFT_PAYLOAD_CSUM_SCTP = 2, +}; + +enum nft_payload_csum_flags { + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 1, +}; + +enum nft_payload_attributes { + NFTA_PAYLOAD_UNSPEC = 0, + NFTA_PAYLOAD_DREG = 1, + NFTA_PAYLOAD_BASE = 2, + NFTA_PAYLOAD_OFFSET = 3, + NFTA_PAYLOAD_LEN = 4, + NFTA_PAYLOAD_SREG = 5, + NFTA_PAYLOAD_CSUM_TYPE = 6, + NFTA_PAYLOAD_CSUM_OFFSET = 7, + NFTA_PAYLOAD_CSUM_FLAGS = 8, + __NFTA_PAYLOAD_MAX = 9, +}; + +enum nft_offload_reg_flags { + NFT_OFFLOAD_F_NETWORK2HOST = 1, +}; + +struct gre_full_hdr { + struct gre_base_hdr fixed_header; + __be16 csum; + __be16 reserved1; + __be32 key; + __be32 seq; +}; + +struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +}; + +struct nft_payload_set { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 sreg; + u8 csum_type; + u8 csum_offset; + u8 csum_flags; +}; + +struct nft_payload_vlan_hdr { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; +}; + +struct fib_result_nl { + __be32 fl_addr; + u32 fl_mark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + unsigned char tb_id; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + short unsigned int nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + short unsigned int nduseropt_pad2; + unsigned int nduseropt_pad3; +}; + +enum { + NDUSEROPT_UNSPEC = 0, + NDUSEROPT_SRCADDR = 1, + __NDUSEROPT_MAX = 2, +}; + +struct rs_msg { + struct icmp6hdr icmph; + __u8 opt[0]; +}; + +struct ra_msg { + struct icmp6hdr icmph; + __be32 reachable_time; + __be32 retrans_timer; +}; + +struct kvm_vcpu_init { + __u32 target; + __u32 features[7]; +}; + +struct kvm_arm_counter_offset { + __u64 counter_offset; + __u64 reserved; +}; + +struct kvm_irq_level { + union { + __u32 irq; + __s32 status; + }; + __u32 level; +}; + +struct kvm_enable_cap { + __u32 cap; + __u32 flags; + __u64 args[4]; + __u8 pad[64]; +}; + +struct kvm_reg_list { + __u64 n; + __u64 reg[0]; +}; + +struct kvm_arm_device_addr { + __u64 id; + __u64 addr; +}; + +struct timer_map { + struct arch_timer_context *direct_vtimer; + struct arch_timer_context *direct_ptimer; + struct arch_timer_context *emul_vtimer; + struct arch_timer_context *emul_ptimer; +}; + +enum { + OUTSIDE_GUEST_MODE = 0, + IN_GUEST_MODE = 1, + EXITING_GUEST_MODE = 2, + READING_SHADOW_PAGE_TABLES = 3, +}; + +struct trace_event_raw_kvm_entry { + struct trace_entry ent; + long unsigned int vcpu_pc; + char __data[0]; +}; + +struct trace_event_raw_kvm_exit { + struct trace_entry ent; + int ret; + unsigned int esr_ec; + long unsigned int vcpu_pc; + char __data[0]; +}; + +struct trace_event_raw_kvm_guest_fault { + struct trace_entry ent; + long unsigned int vcpu_pc; + long unsigned int hsr; + long unsigned int hxfar; + long long unsigned int ipa; + char __data[0]; +}; + +struct trace_event_raw_kvm_access_fault { + struct trace_entry ent; + long unsigned int ipa; + char __data[0]; +}; + +struct trace_event_raw_kvm_irq_line { + struct trace_entry ent; + unsigned int type; + int vcpu_idx; + int irq_num; + int level; + char __data[0]; +}; + +struct trace_event_raw_kvm_mmio_emulate { + struct trace_entry ent; + long unsigned int vcpu_pc; + long unsigned int instr; + long unsigned int cpsr; + char __data[0]; +}; + +struct trace_event_raw_kvm_mmio_nisv { + struct trace_entry ent; + long unsigned int vcpu_pc; + long unsigned int esr; + long unsigned int far; + long unsigned int ipa; + char __data[0]; +}; + +struct trace_event_raw_kvm_set_way_flush { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool cache; + char __data[0]; +}; + +struct trace_event_raw_kvm_toggle_cache { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool was; + bool now; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_update_irq { + struct trace_entry ent; + long unsigned int vcpu_id; + __u32 irq; + int level; + char __data[0]; +}; + +struct trace_event_raw_kvm_get_timer_map { + struct trace_entry ent; + long unsigned int vcpu_id; + int direct_vtimer; + int direct_ptimer; + int emul_vtimer; + int emul_ptimer; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_save_state { + struct trace_entry ent; + long unsigned int ctl; + long long unsigned int cval; + int timer_idx; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_restore_state { + struct trace_entry ent; + long unsigned int ctl; + long long unsigned int cval; + int timer_idx; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_hrtimer_expire { + struct trace_entry ent; + int timer_idx; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_emulate { + struct trace_entry ent; + int timer_idx; + bool should_fire; + char __data[0]; +}; + +struct trace_event_raw_kvm_nested_eret { + struct trace_entry ent; + struct kvm_vcpu *vcpu; + long unsigned int elr_el2; + long unsigned int spsr_el2; + long unsigned int target_mode; + long unsigned int hcr_el2; + char __data[0]; +}; + +struct trace_event_raw_kvm_inject_nested_exception { + struct trace_entry ent; + struct kvm_vcpu *vcpu; + long unsigned int esr_el2; + int type; + long unsigned int spsr_el2; + long unsigned int pc; + long unsigned int source_mode; + long unsigned int hcr_el2; + char __data[0]; +}; + +struct trace_event_raw_kvm_forward_sysreg_trap { + struct trace_entry ent; + u64 pc; + u32 sysreg; + bool is_read; + char __data[0]; +}; + +struct trace_event_data_offsets_kvm_entry {}; + +struct trace_event_data_offsets_kvm_exit {}; + +struct trace_event_data_offsets_kvm_guest_fault {}; + +struct trace_event_data_offsets_kvm_access_fault {}; + +struct trace_event_data_offsets_kvm_irq_line {}; + +struct trace_event_data_offsets_kvm_mmio_emulate {}; + +struct trace_event_data_offsets_kvm_mmio_nisv {}; + +struct trace_event_data_offsets_kvm_set_way_flush {}; + +struct trace_event_data_offsets_kvm_toggle_cache {}; + +struct trace_event_data_offsets_kvm_timer_update_irq {}; + +struct trace_event_data_offsets_kvm_get_timer_map {}; + +struct trace_event_data_offsets_kvm_timer_save_state {}; + +struct trace_event_data_offsets_kvm_timer_restore_state {}; + +struct trace_event_data_offsets_kvm_timer_hrtimer_expire {}; + +struct trace_event_data_offsets_kvm_timer_emulate {}; + +struct trace_event_data_offsets_kvm_nested_eret {}; + +struct trace_event_data_offsets_kvm_inject_nested_exception {}; + +struct trace_event_data_offsets_kvm_forward_sysreg_trap {}; + +typedef void (*btf_trace_kvm_entry)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_exit)(void *, int, unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_guest_fault)(void *, long unsigned int, long unsigned int, long unsigned int, long long unsigned int); + +typedef void (*btf_trace_kvm_access_fault)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_irq_line)(void *, unsigned int, int, int, int); + +typedef void (*btf_trace_kvm_mmio_emulate)(void *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_mmio_nisv)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_set_way_flush)(void *, long unsigned int, bool); + +typedef void (*btf_trace_kvm_toggle_cache)(void *, long unsigned int, bool, bool); + +typedef void (*btf_trace_kvm_timer_update_irq)(void *, long unsigned int, __u32, int); + +typedef void (*btf_trace_kvm_get_timer_map)(void *, long unsigned int, struct timer_map *); + +typedef void (*btf_trace_kvm_timer_save_state)(void *, struct arch_timer_context *); + +typedef void (*btf_trace_kvm_timer_restore_state)(void *, struct arch_timer_context *); + +typedef void (*btf_trace_kvm_timer_hrtimer_expire)(void *, struct arch_timer_context *); + +typedef void (*btf_trace_kvm_timer_emulate)(void *, struct arch_timer_context *, bool); + +typedef void (*btf_trace_kvm_nested_eret)(void *, struct kvm_vcpu *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_inject_nested_exception)(void *, struct kvm_vcpu *, u64, int); + +typedef void (*btf_trace_kvm_forward_sysreg_trap)(void *, struct kvm_vcpu *, u32, bool); + +enum kvm_wfx_trap_policy { + KVM_WFX_NOTRAP_SINGLE_TASK = 0, + KVM_WFX_NOTRAP = 1, + KVM_WFX_TRAP = 2, +}; + +typedef struct { + void *lock; +} class_irq_t; + +typedef struct { + void *lock; +} class_cpus_read_lock_t; + +struct trace_event_raw_sched_kthread_stop { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_stop_ret { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_queue_work { + struct trace_entry ent; + void *work; + void *function; + void *worker; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_wakeup_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int target_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_switch { + struct trace_entry ent; + char prev_comm[16]; + pid_t prev_pid; + int prev_prio; + long int prev_state; + char next_comm[16]; + pid_t next_pid; + int next_prio; + char __data[0]; +}; + +struct trace_event_raw_sched_migrate_task { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int orig_cpu; + int dest_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_process_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_wait { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_fork { + struct trace_entry ent; + char parent_comm[16]; + pid_t parent_pid; + char child_comm[16]; + pid_t child_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_process_exec { + struct trace_entry ent; + u32 __data_loc_filename; + pid_t pid; + pid_t old_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_prepare_exec { + struct trace_entry ent; + u32 __data_loc_interp; + u32 __data_loc_filename; + pid_t pid; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_runtime { + struct trace_entry ent; + char comm[16]; + pid_t pid; + u64 runtime; + char __data[0]; +}; + +struct trace_event_raw_sched_pi_setprio { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int oldprio; + int newprio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_hang { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_move_numa { + struct trace_entry ent; + pid_t pid; + pid_t tgid; + pid_t ngid; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_numa_pair_template { + struct trace_entry ent; + pid_t src_pid; + pid_t src_tgid; + pid_t src_ngid; + int src_cpu; + int src_nid; + pid_t dst_pid; + pid_t dst_tgid; + pid_t dst_ngid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_wake_idle_without_ipi { + struct trace_entry ent; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_kthread_stop {}; + +struct trace_event_data_offsets_sched_kthread_stop_ret {}; + +struct trace_event_data_offsets_sched_kthread_work_queue_work {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_start {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_end {}; + +struct trace_event_data_offsets_sched_wakeup_template {}; + +struct trace_event_data_offsets_sched_switch {}; + +struct trace_event_data_offsets_sched_migrate_task {}; + +struct trace_event_data_offsets_sched_process_template {}; + +struct trace_event_data_offsets_sched_process_wait {}; + +struct trace_event_data_offsets_sched_process_fork {}; + +struct trace_event_data_offsets_sched_process_exec { + u32 filename; + const void *filename_ptr_; +}; + +struct trace_event_data_offsets_sched_prepare_exec { + u32 interp; + const void *interp_ptr_; + u32 filename; + const void *filename_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_sched_stat_runtime {}; + +struct trace_event_data_offsets_sched_pi_setprio {}; + +struct trace_event_data_offsets_sched_process_hang {}; + +struct trace_event_data_offsets_sched_move_numa {}; + +struct trace_event_data_offsets_sched_numa_pair_template {}; + +struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; + +typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); + +typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, kthread_work_func_t); + +typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *, unsigned int); + +typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); + +typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); + +typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); + +typedef void (*btf_trace_sched_prepare_exec)(void *, struct task_struct *, struct linux_binprm *); + +typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); + +typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); + +typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_hw_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); + +typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); + +typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); + +typedef void (*btf_trace_sched_compute_energy_tp)(void *, struct task_struct *, int, long unsigned int, long unsigned int, long unsigned int); + +struct trace_event_raw_ipi_raise { + struct trace_entry ent; + u32 __data_loc_target_cpus; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpumask { + struct trace_entry ent; + u32 __data_loc_cpumask; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_handler { + struct trace_entry ent; + const char *reason; + char __data[0]; +}; + +struct trace_event_data_offsets_ipi_raise { + u32 target_cpus; + const void *target_cpus_ptr_; +}; + +struct trace_event_data_offsets_ipi_send_cpu {}; + +struct trace_event_data_offsets_ipi_send_cpumask { + u32 cpumask; + const void *cpumask_ptr_; +}; + +struct trace_event_data_offsets_ipi_handler {}; + +typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); + +typedef void (*btf_trace_ipi_send_cpu)(void *, const unsigned int, long unsigned int, void *); + +typedef void (*btf_trace_ipi_send_cpumask)(void *, const struct cpumask *, long unsigned int, void *); + +typedef void (*btf_trace_ipi_entry)(void *, const char *); + +typedef void (*btf_trace_ipi_exit)(void *, const char *); + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irq_t; + +struct set_affinity_pending; + +struct migration_arg { + struct task_struct *task; + int dest_cpu; + struct set_affinity_pending *pending; +}; + +struct set_affinity_pending { + refcount_t refs; + unsigned int stop_pending; + struct completion done; + struct cpu_stop_work stop_work; + struct migration_arg arg; +}; + +typedef struct { + int *lock; + long unsigned int flags; +} class_core_lock_t; + +struct cfs_schedulable_data { + struct task_group *tg; + u64 period; + u64 quota; +}; + +enum { + cpuset = 0, + possible = 1, + fail = 2, +}; + +union cpumask_rcuhead { + cpumask_t cpumask; + struct callback_head rcu; +}; + +struct eprobe_trace_entry_head { + struct trace_entry ent; +}; + +struct trace_eprobe { + const char *event_system; + const char *event_name; + char *filter_str; + struct trace_event_call *event; + struct dyn_event devent; + struct trace_probe tp; +}; + +struct eprobe_data { + struct trace_event_file *file; + struct trace_eprobe *ep; +}; + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +struct bpf_iter__bpf_prog { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_prog *prog; + }; +}; + +struct reuseport_array { + struct bpf_map map; + struct sock *ptrs[0]; +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH = 0, + TLB_REMOTE_SHOOTDOWN = 1, + TLB_LOCAL_SHOOTDOWN = 2, + TLB_LOCAL_MM_SHOOTDOWN = 3, + TLB_REMOTE_SEND_IPI = 4, + NR_TLB_FLUSH_REASONS = 5, +}; + +struct trace_event_raw_tlb_flush { + struct trace_entry ent; + int reason; + long unsigned int pages; + char __data[0]; +}; + +struct trace_event_data_offsets_tlb_flush {}; + +typedef void (*btf_trace_tlb_flush)(void *, int, long unsigned int); + +struct trace_event_raw_mm_migrate_pages { + struct trace_entry ent; + long unsigned int succeeded; + long unsigned int failed; + long unsigned int thp_succeeded; + long unsigned int thp_failed; + long unsigned int thp_split; + long unsigned int large_folio_split; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_mm_migrate_pages_start { + struct trace_entry ent; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_migration_pte { + struct trace_entry ent; + long unsigned int addr; + long unsigned int pte; + int order; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_migrate_pages {}; + +struct trace_event_data_offsets_mm_migrate_pages_start {}; + +struct trace_event_data_offsets_migration_pte {}; + +typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, enum migrate_mode, int); + +typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); + +typedef void (*btf_trace_set_migration_pte)(void *, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_remove_migration_pte)(void *, long unsigned int, long unsigned int, int); + +struct folio_referenced_arg { + int mapcount; + int referenced; + long unsigned int vm_flags; + struct mem_cgroup *memcg; +}; + +struct dentry_stat_t { + long int nr_dentry; + long int nr_unused; + long int age_limit; + long int want_pages; + long int nr_negative; + long int dummy; +}; + +struct external_name { + union { + atomic_t count; + struct callback_head head; + } u; + unsigned char name[0]; +}; + +enum d_walk_ret { + D_WALK_CONTINUE = 0, + D_WALK_QUIT = 1, + D_WALK_NORETRY = 2, + D_WALK_SKIP = 3, +}; + +struct check_mount { + struct vfsmount *mnt; + unsigned int mounted; +}; + +struct select_data { + struct dentry *start; + union { + long int found; + struct dentry *victim; + }; + struct list_head dispose; +}; + +struct proc_fs_context { + struct pid_namespace *pid_ns; + unsigned int mask; + enum proc_hidepid hidepid; + int gid; + enum proc_pidonly pidonly; +}; + +enum proc_param { + Opt_gid___6 = 0, + Opt_hidepid = 1, + Opt_subset = 2, +}; + +struct mmp_struct { + __le32 mmp_magic; + __le32 mmp_seq; + __le64 mmp_time; + char mmp_nodename[64]; + char mmp_bdevname[32]; + __le16 mmp_check_interval; + __le16 mmp_pad1; + __le32 mmp_pad2[226]; + __le32 mmp_checksum; +}; + +struct args_protover { + __u32 version; +}; + +struct args_protosubver { + __u32 sub_version; +}; + +struct args_openmount { + __u32 devid; +}; + +struct args_ready { + __u32 token; +}; + +struct args_fail { + __u32 token; + __s32 status; +}; + +struct args_setpipefd { + __s32 pipefd; +}; + +struct args_timeout { + __u64 timeout; +}; + +struct args_requester { + __u32 uid; + __u32 gid; +}; + +struct args_expire { + __u32 how; +}; + +struct args_askumount { + __u32 may_umount; +}; + +struct args_in { + __u32 type; +}; + +struct args_out { + __u32 devid; + __u32 magic; +}; + +struct args_ismountpoint { + union { + struct args_in in; + struct args_out out; + }; +}; + +struct autofs_dev_ioctl { + __u32 ver_major; + __u32 ver_minor; + __u32 size; + __s32 ioctlfd; + union { + struct args_protover protover; + struct args_protosubver protosubver; + struct args_openmount openmount; + struct args_ready ready; + struct args_fail fail; + struct args_setpipefd setpipefd; + struct args_timeout timeout; + struct args_requester requester; + struct args_expire expire; + struct args_askumount askumount; + struct args_ismountpoint ismountpoint; + }; + char path[0]; +}; + +enum { + AUTOFS_DEV_IOCTL_VERSION_CMD = 113, + AUTOFS_DEV_IOCTL_PROTOVER_CMD = 114, + AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD = 115, + AUTOFS_DEV_IOCTL_OPENMOUNT_CMD = 116, + AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD = 117, + AUTOFS_DEV_IOCTL_READY_CMD = 118, + AUTOFS_DEV_IOCTL_FAIL_CMD = 119, + AUTOFS_DEV_IOCTL_SETPIPEFD_CMD = 120, + AUTOFS_DEV_IOCTL_CATATONIC_CMD = 121, + AUTOFS_DEV_IOCTL_TIMEOUT_CMD = 122, + AUTOFS_DEV_IOCTL_REQUESTER_CMD = 123, + AUTOFS_DEV_IOCTL_EXPIRE_CMD = 124, + AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD = 125, + AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD = 126, +}; + +typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *, struct autofs_dev_ioctl *); + +struct btrfs_ioctl_qgroup_limit_args { + __u64 qgroupid; + struct btrfs_qgroup_limit lim; +}; + +struct btrfs_ioctl_vol_args_v2 { + __s64 fd; + __u64 transid; + __u64 flags; + union { + struct { + __u64 size; + struct btrfs_qgroup_inherit *qgroup_inherit; + }; + __u64 unused[4]; + }; + union { + char name[4040]; + __u64 devid; + __u64 subvolid; + }; +}; + +struct btrfs_ioctl_scrub_args { + __u64 devid; + __u64 start; + __u64 end; + __u64 flags; + struct btrfs_scrub_progress progress; + __u64 unused[109]; +}; + +struct btrfs_ioctl_dev_info_args { + __u64 devid; + __u8 uuid[16]; + __u64 bytes_used; + __u64 total_bytes; + __u8 fsid[16]; + __u64 unused[377]; + __u8 path[1024]; +}; + +struct btrfs_ioctl_fs_info_args { + __u64 max_id; + __u64 num_devices; + __u8 fsid[16]; + __u32 nodesize; + __u32 sectorsize; + __u32 clone_alignment; + __u16 csum_type; + __u16 csum_size; + __u64 flags; + __u64 generation; + __u8 metadata_uuid[16]; + __u8 reserved[944]; +}; + +struct btrfs_ioctl_feature_flags { + __u64 compat_flags; + __u64 compat_ro_flags; + __u64 incompat_flags; +}; + +struct btrfs_ioctl_ino_lookup_args { + __u64 treeid; + __u64 objectid; + char name[4080]; +}; + +struct btrfs_ioctl_ino_lookup_user_args { + __u64 dirid; + __u64 treeid; + char name[256]; + char path[3824]; +}; + +struct btrfs_ioctl_search_key { + __u64 tree_id; + __u64 min_objectid; + __u64 max_objectid; + __u64 min_offset; + __u64 max_offset; + __u64 min_transid; + __u64 max_transid; + __u32 min_type; + __u32 max_type; + __u32 nr_items; + __u32 unused; + __u64 unused1; + __u64 unused2; + __u64 unused3; + __u64 unused4; +}; + +struct btrfs_ioctl_search_header { + __u64 transid; + __u64 objectid; + __u64 offset; + __u32 type; + __u32 len; +}; + +struct btrfs_ioctl_search_args { + struct btrfs_ioctl_search_key key; + char buf[3992]; +}; + +struct btrfs_ioctl_search_args_v2 { + struct btrfs_ioctl_search_key key; + __u64 buf_size; + __u64 buf[0]; +}; + +struct btrfs_ioctl_defrag_range_args { + __u64 start; + __u64 len; + __u64 flags; + __u32 extent_thresh; + __u32 compress_type; + __u32 unused[4]; +}; + +struct btrfs_ioctl_space_info { + __u64 flags; + __u64 total_bytes; + __u64 used_bytes; +}; + +struct btrfs_ioctl_space_args { + __u64 space_slots; + __u64 total_spaces; + struct btrfs_ioctl_space_info spaces[0]; +}; + +struct btrfs_ioctl_ino_path_args { + __u64 inum; + __u64 size; + __u64 reserved[4]; + __u64 fspath; +}; + +struct btrfs_ioctl_logical_ino_args { + __u64 logical; + __u64 size; + __u64 reserved[3]; + __u64 flags; + __u64 inodes; +}; + +struct btrfs_ioctl_quota_rescan_args { + __u64 flags; + __u64 progress; + __u64 reserved[6]; +}; + +struct btrfs_ioctl_qgroup_assign_args { + __u64 assign; + __u64 src; + __u64 dst; +}; + +struct btrfs_ioctl_qgroup_create_args { + __u64 create; + __u64 qgroupid; +}; + +struct btrfs_ioctl_timespec { + __u64 sec; + __u32 nsec; +}; + +struct btrfs_ioctl_received_subvol_args { + char uuid[16]; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 flags; + __u64 reserved[16]; +}; + +struct btrfs_ioctl_get_subvol_info_args { + __u64 treeid; + char name[256]; + __u64 parent_id; + __u64 dirid; + __u64 generation; + __u64 flags; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __u64 ctransid; + __u64 otransid; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec ctime; + struct btrfs_ioctl_timespec otime; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 reserved[8]; +}; + +struct btrfs_ioctl_get_subvol_rootref_args { + __u64 min_treeid; + struct { + __u64 treeid; + __u64 dirid; + } rootref[255]; + __u8 num_items; + __u8 align[7]; +}; + +struct btrfs_ioctl_subvol_wait { + __u64 subvolid; + __u32 mode; + __u32 count; +}; + +struct btrfs_ioctl_timespec_32 { + __u64 sec; + __u32 nsec; +} __attribute__((packed)); + +struct btrfs_ioctl_received_subvol_args_32 { + char uuid[16]; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec_32 stime; + struct btrfs_ioctl_timespec_32 rtime; + __u64 flags; + __u64 reserved[16]; +}; + +struct btrfs_uring_priv { + struct io_uring_cmd *cmd; + struct page **pages; + long unsigned int nr_pages; + struct kiocb iocb; + struct iovec *iov; + struct iov_iter iter; + struct extent_state *cached_state; + u64 count; + u64 start; + u64 lockend; + int err; + bool compressed; +}; + +struct io_btrfs_cmd { + struct btrfs_uring_priv *priv; +}; + +struct msg_msgseg { + struct msg_msgseg *next; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + unsigned int qlen; + unsigned int max_qlen; +}; + +struct crypto_attr_alg { + char name[128]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +enum { + CRYPTOA_UNSPEC = 0, + CRYPTOA_ALG = 1, + CRYPTOA_TYPE = 2, + __CRYPTOA_MAX = 3, +}; + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[16]; + u32 bytes; +}; + +struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; + void (*probe)(dev_t); +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async; + unsigned int last_cq_tail; + refcount_t refs; + atomic_t ops; + struct callback_head rcu; +}; + +enum { + IO_EVENTFD_OP_SIGNAL_BIT = 0, +}; + +struct io_uring_sync_cancel_reg { + __u64 addr; + __s32 fd; + __u32 flags; + struct __kernel_timespec timeout; + __u8 opcode; + __u8 pad[7]; + __u64 pad2[3]; +}; + +struct io_cancel { + struct file *file; + u64 addr; + u32 flags; + s32 fd; + u8 opcode; +}; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +struct iova_magazine; + +struct iova_cpu_rcache; + +struct iova_rcache { + spinlock_t lock; + unsigned int depot_size; + struct iova_magazine *depot; + struct iova_cpu_rcache *cpu_rcaches; + struct iova_domain *iovad; + struct delayed_work work; +}; + +struct iova_magazine { + union { + long unsigned int size; + struct iova_magazine *next; + }; + long unsigned int pfns[127]; +}; + +struct iova_cpu_rcache { + spinlock_t lock; + struct iova_magazine *loaded; + struct iova_magazine *prev; +}; + +typedef unsigned int __kernel_old_dev_t; + +enum { + LO_FLAGS_READ_ONLY = 1, + LO_FLAGS_AUTOCLEAR = 4, + LO_FLAGS_PARTSCAN = 8, + LO_FLAGS_DIRECT_IO = 16, +}; + +struct loop_info { + int lo_number; + __kernel_old_dev_t lo_device; + long unsigned int lo_inode; + __kernel_old_dev_t lo_rdevice; + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; + int lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + long unsigned int lo_init[2]; + char reserved[4]; +}; + +struct loop_info64 { + __u64 lo_device; + __u64 lo_inode; + __u64 lo_rdevice; + __u64 lo_offset; + __u64 lo_sizelimit; + __u32 lo_number; + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; + __u32 lo_flags; + __u8 lo_file_name[64]; + __u8 lo_crypt_name[64]; + __u8 lo_encrypt_key[32]; + __u64 lo_init[2]; +}; + +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + +enum { + Lo_unbound = 0, + Lo_bound = 1, + Lo_rundown = 2, + Lo_deleting = 3, +}; + +struct loop_device { + int lo_number; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + char lo_file_name[64]; + struct file *lo_backing_file; + struct block_device *lo_device; + gfp_t old_gfp_mask; + spinlock_t lo_lock; + int lo_state; + spinlock_t lo_work_lock; + struct workqueue_struct *workqueue; + struct work_struct rootcg_work; + struct list_head rootcg_cmd_list; + struct list_head idle_worker_list; + struct rb_root worker_tree; + struct timer_list timer; + bool use_dio; + bool sysfs_inited; + struct request_queue *lo_queue; + struct blk_mq_tag_set tag_set; + struct gendisk *lo_disk; + struct mutex lo_mutex; + bool idr_visible; +}; + +struct loop_cmd { + struct list_head list_entry; + bool use_aio; + atomic_t ref; + long int ret; + struct kiocb iocb; + struct bio_vec *bvec; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; +}; + +struct loop_worker { + struct rb_node rb_node; + struct work_struct work; + struct list_head cmd_list; + struct list_head idle_list; + struct loop_device *lo; + struct cgroup_subsys_state *blkcg_css; + long unsigned int last_ran_at; +}; + +struct input_mt_slot { + int abs[14]; + unsigned int frame; + unsigned int key; +}; + +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[0]; +}; + +struct input_seq_state { + short unsigned int pos; + bool mutex_acquired; + int input_devices_state; +}; + +struct input_devres { + struct input_dev *input; +}; + +struct serial_info { + struct rb_node node; + sector_t start; + sector_t last; + sector_t _subtree_last; +}; + +struct raid1_info { + struct md_rdev *rdev; + sector_t head_position; + sector_t next_seq_sect; + sector_t seq_start; +}; + +struct pool_info { + struct mddev *mddev; + int raid_disks; +}; + +struct r1conf { + struct mddev *mddev; + struct raid1_info *mirrors; + int raid_disks; + int nonrot_disks; + spinlock_t device_lock; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + wait_queue_head_t wait_barrier; + spinlock_t resync_lock; + atomic_t nr_sync_pending; + atomic_t *nr_pending; + atomic_t *nr_waiting; + atomic_t *nr_queued; + atomic_t *barrier; + int array_frozen; + int fullsync; + int recovery_disabled; + struct pool_info *poolinfo; + mempool_t r1bio_pool; + mempool_t r1buf_pool; + struct bio_set bio_split; + struct page *tmppage; + struct md_thread *thread; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r1bio { + atomic_t remaining; + atomic_t behind_remaining; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_disk; + struct list_head retry_list; + struct bio *behind_master_bio; + struct bio *bios[0]; +}; + +enum r1bio_state { + R1BIO_Uptodate = 0, + R1BIO_IsSync = 1, + R1BIO_Degraded = 2, + R1BIO_BehindIO = 3, + R1BIO_ReadError = 4, + R1BIO_Returned = 5, + R1BIO_MadeGood = 6, + R1BIO_WriteError = 7, + R1BIO_FailFast = 8, +}; + +struct resync_pages { + void *raid_bio; + struct page *pages[16]; +}; + +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + unsigned int count; +}; + +struct read_balance_ctl { + sector_t closest_dist; + int closest_dist_disk; + int min_pending; + int min_pending_disk; + int sequential_disk; + int readable_disks; +}; + +enum { + BPF_F_RECOMPUTE_CSUM = 1, + BPF_F_INVALIDATE_HASH = 2, +}; + +enum { + BPF_F_HDR_FIELD_MASK = 15, +}; + +enum { + BPF_F_PSEUDO_HDR = 16, + BPF_F_MARK_MANGLED_0 = 32, + BPF_F_MARK_ENFORCE = 64, +}; + +enum { + BPF_F_TUNINFO_IPV6 = 1, +}; + +enum { + BPF_F_ZERO_CSUM_TX = 2, + BPF_F_DONT_FRAGMENT = 4, + BPF_F_SEQ_NUMBER = 8, + BPF_F_NO_TUNNEL_KEY = 16, +}; + +enum { + BPF_F_TUNINFO_FLAGS = 16, +}; + +enum { + BPF_CSUM_LEVEL_QUERY = 0, + BPF_CSUM_LEVEL_INC = 1, + BPF_CSUM_LEVEL_DEC = 2, + BPF_CSUM_LEVEL_RESET = 3, +}; + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = 1, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128, + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256, +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + +enum { + BPF_SK_LOOKUP_F_REPLACE = 1, + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, +}; + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET = 0, + BPF_ADJ_ROOM_MAC = 1, +}; + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC = 0, + BPF_HDR_START_NET = 1, +}; + +enum { + BPF_SKB_TSTAMP_UNSPEC = 0, + BPF_SKB_TSTAMP_DELIVERY_MONO = 1, + BPF_SKB_CLOCK_REALTIME = 0, + BPF_SKB_CLOCK_MONOTONIC = 1, + BPF_SKB_CLOCK_TAI = 2, +}; + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + union { + __u16 tunnel_ext; + __be16 tunnel_flags; + }; + __u32 tunnel_label; + union { + __u32 local_ipv4; + __u32 local_ipv6[4]; + }; +}; + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + __u64 bytes_acked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +struct bpf_xdp_sock { + __u32 queue_id; +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, + TCP_BPF_DELACK_MAX = 1003, + TCP_BPF_RTO_MIN = 1004, + TCP_BPF_SYN = 1005, + TCP_BPF_SYN_IP = 1006, + TCP_BPF_SYN_MAC = 1007, + TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = 1, +}; + +enum { + BPF_FIB_LOOKUP_DIRECT = 1, + BPF_FIB_LOOKUP_OUTPUT = 2, + BPF_FIB_LOOKUP_SKIP_NEIGH = 4, + BPF_FIB_LOOKUP_TBID = 8, + BPF_FIB_LOOKUP_SRC = 16, + BPF_FIB_LOOKUP_MARK = 32, +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS = 0, + BPF_FIB_LKUP_RET_BLACKHOLE = 1, + BPF_FIB_LKUP_RET_UNREACHABLE = 2, + BPF_FIB_LKUP_RET_PROHIBIT = 3, + BPF_FIB_LKUP_RET_NOT_FWDED = 4, + BPF_FIB_LKUP_RET_FWD_DISABLED = 5, + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, + BPF_FIB_LKUP_RET_NO_NEIGH = 7, + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9, +}; + +struct bpf_fib_lookup { + __u8 family; + __u8 l4_protocol; + __be16 sport; + __be16 dport; + union { + __u16 tot_len; + __u16 mtu_result; + }; + __u32 ifindex; + union { + __u8 tos; + __be32 flowinfo; + __u32 rt_metric; + }; + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + union { + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + __u32 tbid; + }; + union { + struct { + __u32 mark; + }; + struct { + __u8 smac[6]; + __u8 dmac[6]; + }; + }; +}; + +struct bpf_redir_neigh { + __u32 nh_family; + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; + }; +}; + +enum bpf_check_mtu_flags { + BPF_MTU_CHK_SEGS = 1, +}; + +enum bpf_check_mtu_ret { + BPF_MTU_CHK_RET_SUCCESS = 0, + BPF_MTU_CHK_RET_FRAG_NEEDED = 1, + BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, +}; + +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct mptcp_sock {}; + +struct bpf_tcp_req_attrs { + u32 rcv_tsval; + u32 rcv_tsecr; + u16 mss; + u8 rcv_wscale; + u8 snd_wscale; + u8 ecn_ok; + u8 wscale_ok; + u8 sack_ok; + u8 tstamp_ok; + u8 usec_ts_ok; + u8 reserved[3]; +}; + +struct _strp_msg { + struct strp_msg strp; + int accum_len; +}; + +struct tls_msg { + u8 control; +}; + +struct sk_skb_cb { + unsigned char data[20]; + unsigned char pad[4]; + struct _strp_msg strp; + struct tls_msg tls; + u64 temp_reg; +}; + +struct xdp_sock { + struct sock sk; + long: 64; + long: 64; + long: 64; + struct xsk_queue *rx; + struct net_device *dev; + struct xdp_umem *umem; + struct list_head flush_node; + struct xsk_buff_pool *pool; + u16 queue_id; + bool zc; + bool sg; + enum { + XSK_READY = 0, + XSK_BOUND = 1, + XSK_UNBOUND = 2, + } state; + long: 64; + struct xsk_queue *tx; + struct list_head tx_list; + u32 tx_budget_spent; + spinlock_t rx_lock; + u64 rx_dropped; + u64 rx_queue_full; + struct sk_buff *skb; + struct list_head map_list; + spinlock_t map_list_lock; + struct mutex mutex; + struct xsk_queue *fq_tmp; + struct xsk_queue *cq_tmp; +}; + +struct tls_strparser { + struct sock *sk; + u32 mark: 8; + u32 stopped: 1; + u32 copy_mode: 1; + u32 mixed_decrypted: 1; + bool msg_ready; + struct strp_msg stm; + struct sk_buff *anchor; + struct work_struct work; +}; + +struct tls_sw_context_rx { + struct crypto_aead *aead_recv; + struct crypto_wait async_wait; + struct sk_buff_head rx_list; + void (*saved_data_ready)(struct sock *); + u8 reader_present; + u8 async_capable: 1; + u8 zc_capable: 1; + u8 reader_contended: 1; + struct tls_strparser strp; + atomic_t decrypt_pending; + struct sk_buff_head async_hold; + struct wait_queue_head wq; +}; + +typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); + +typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); + +typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); + +typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); + +typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); + +typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); + +enum { + BPF_F_NEIGH = 65536, + BPF_F_PEER = 131072, + BPF_F_NEXTHOP = 262144, +}; + +typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_redirect)(u32, u64); + +typedef u64 (*btf_bpf_redirect_peer)(u32, u64); + +typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); + +typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_get_cgroup_classid_curr)(); + +typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); + +typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); + +typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); + +typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_xdp_get_buff_len)(struct xdp_buff *); + +typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_load_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_store_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); + +typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); + +typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); + +typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); + +typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); + +typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); + +typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); + +typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sk_msg)(struct sk_msg *); + +typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); + +typedef u64 (*btf_bpf_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); + +typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); + +typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_release)(struct sock *); + +typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); + +typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); + +typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); + +typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tstamp)(struct sk_buff *, u64, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *, struct tcphdr *); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *); + +typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); + +typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_unix_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_mptcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_sock_from_file)(struct file *); + +struct nf_log_buf { + unsigned int count; + char buf[1020]; +}; + +enum nfnl_cthelper_msg_types { + NFNL_MSG_CTHELPER_NEW = 0, + NFNL_MSG_CTHELPER_GET = 1, + NFNL_MSG_CTHELPER_DEL = 2, + NFNL_MSG_CTHELPER_MAX = 3, +}; + +enum nfnl_cthelper_type { + NFCTH_UNSPEC = 0, + NFCTH_NAME = 1, + NFCTH_TUPLE = 2, + NFCTH_QUEUE_NUM = 3, + NFCTH_POLICY = 4, + NFCTH_PRIV_DATA_LEN = 5, + NFCTH_STATUS = 6, + __NFCTH_MAX = 7, +}; + +enum nfnl_cthelper_policy_type { + NFCTH_POLICY_SET_UNSPEC = 0, + NFCTH_POLICY_SET_NUM = 1, + NFCTH_POLICY_SET = 2, + NFCTH_POLICY_SET1 = 2, + NFCTH_POLICY_SET2 = 3, + NFCTH_POLICY_SET3 = 4, + NFCTH_POLICY_SET4 = 5, + __NFCTH_POLICY_SET_MAX = 6, +}; + +enum nfnl_cthelper_pol_type { + NFCTH_POLICY_UNSPEC = 0, + NFCTH_POLICY_NAME = 1, + NFCTH_POLICY_EXPECT_MAX = 2, + NFCTH_POLICY_EXPECT_TIMEOUT = 3, + __NFCTH_POLICY_MAX = 4, +}; + +enum nfnl_cthelper_tuple_type { + NFCTH_TUPLE_UNSPEC = 0, + NFCTH_TUPLE_L3PROTONUM = 1, + NFCTH_TUPLE_L4PROTONUM = 2, + __NFCTH_TUPLE_MAX = 3, +}; + +struct nfnl_cthelper { + struct list_head list; + struct nf_conntrack_helper helper; +}; + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + struct Qdisc *sch; + enum flow_block_binder_type binder_type; + void *data; + void *cb_priv; + void (*cleanup)(struct flow_block_cb *); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +struct nft_offload_ethertype { + __be16 value; + __be16 mask; +}; + +enum tcp_fastopen_client_fail { + TFO_STATUS_UNSPEC = 0, + TFO_COOKIE_UNAVAILABLE = 1, + TFO_DATA_NOT_ACKED = 2, + TFO_SYN_RETRANSMITTED = 3, +}; + +enum nf_br_hook_priorities { + NF_BR_PRI_FIRST = -2147483648, + NF_BR_PRI_NAT_DST_BRIDGED = -300, + NF_BR_PRI_FILTER_BRIDGED = -200, + NF_BR_PRI_BRNF = 0, + NF_BR_PRI_NAT_DST_OTHER = 100, + NF_BR_PRI_FILTER_OTHER = 200, + NF_BR_PRI_NAT_SRC = 300, + NF_BR_PRI_LAST = 2147483647, +}; + +struct brnf_net { + bool enabled; + struct ctl_table_header *ctl_hdr; + int call_iptables; + int call_ip6tables; + int call_arptables; + int filter_vlan_tagged; + int filter_pppoe_tagged; + int pass_vlan_indev; +}; + +struct brnf_frag_data { + local_lock_t bh_lock; + char mac[22]; + u8 encap_size; + u8 size; + u16 vlan_tci; + __be16 vlan_proto; +}; + +struct cpuinfo_32bit { + u32 reg_id_dfr0; + u32 reg_id_dfr1; + u32 reg_id_isar0; + u32 reg_id_isar1; + u32 reg_id_isar2; + u32 reg_id_isar3; + u32 reg_id_isar4; + u32 reg_id_isar5; + u32 reg_id_isar6; + u32 reg_id_mmfr0; + u32 reg_id_mmfr1; + u32 reg_id_mmfr2; + u32 reg_id_mmfr3; + u32 reg_id_mmfr4; + u32 reg_id_mmfr5; + u32 reg_id_pfr0; + u32 reg_id_pfr1; + u32 reg_id_pfr2; + u32 reg_mvfr0; + u32 reg_mvfr1; + u32 reg_mvfr2; +}; + +struct cpuinfo_arm64 { + struct kobject kobj; + u64 reg_ctr; + u64 reg_cntfrq; + u64 reg_dczid; + u64 reg_midr; + u64 reg_revidr; + u64 reg_gmid; + u64 reg_smidr; + u64 reg_mpamidr; + u64 reg_id_aa64dfr0; + u64 reg_id_aa64dfr1; + u64 reg_id_aa64isar0; + u64 reg_id_aa64isar1; + u64 reg_id_aa64isar2; + u64 reg_id_aa64isar3; + u64 reg_id_aa64mmfr0; + u64 reg_id_aa64mmfr1; + u64 reg_id_aa64mmfr2; + u64 reg_id_aa64mmfr3; + u64 reg_id_aa64mmfr4; + u64 reg_id_aa64pfr0; + u64 reg_id_aa64pfr1; + u64 reg_id_aa64pfr2; + u64 reg_id_aa64zfr0; + u64 reg_id_aa64smfr0; + u64 reg_id_aa64fpfr0; + struct cpuinfo_32bit aarch32; +}; + +enum trap_behaviour { + BEHAVE_HANDLE_LOCALLY = 0, + BEHAVE_FORWARD_READ = 1, + BEHAVE_FORWARD_WRITE = 2, + BEHAVE_FORWARD_RW = 3, + BEHAVE_FORWARD_IN_HOST_EL0 = 4, +}; + +struct trap_bits { + const enum vcpu_sysreg index; + const enum trap_behaviour behaviour; + const u64 value; + const u64 mask; +}; + +enum cgt_group_id { + __RESERVED__ = 0, + CGT_HCR_TID1 = 1, + CGT_HCR_TID2 = 2, + CGT_HCR_TID3 = 3, + CGT_HCR_IMO = 4, + CGT_HCR_FMO = 5, + CGT_HCR_TIDCP = 6, + CGT_HCR_TACR = 7, + CGT_HCR_TSW = 8, + CGT_HCR_TPC = 9, + CGT_HCR_TPU = 10, + CGT_HCR_TTLB = 11, + CGT_HCR_TVM = 12, + CGT_HCR_TDZ = 13, + CGT_HCR_TRVM = 14, + CGT_HCR_TLOR = 15, + CGT_HCR_TERR = 16, + CGT_HCR_APK = 17, + CGT_HCR_NV = 18, + CGT_HCR_NV_nNV2 = 19, + CGT_HCR_NV1_nNV2 = 20, + CGT_HCR_AT = 21, + CGT_HCR_nFIEN = 22, + CGT_HCR_TID4 = 23, + CGT_HCR_TICAB = 24, + CGT_HCR_TOCU = 25, + CGT_HCR_ENSCXT = 26, + CGT_HCR_TTLBIS = 27, + CGT_HCR_TTLBOS = 28, + CGT_MDCR_TPMCR = 29, + CGT_MDCR_TPM = 30, + CGT_MDCR_TDE = 31, + CGT_MDCR_TDA = 32, + CGT_MDCR_TDOSA = 33, + CGT_MDCR_TDRA = 34, + CGT_MDCR_E2PB = 35, + CGT_MDCR_TPMS = 36, + CGT_MDCR_TTRF = 37, + CGT_MDCR_E2TB = 38, + CGT_MDCR_TDCC = 39, + CGT_CPTR_TAM = 40, + CGT_CPTR_TCPAC = 41, + CGT_HCRX_EnFPM = 42, + CGT_HCRX_TCR2En = 43, + CGT_ICH_HCR_TC = 44, + CGT_ICH_HCR_TALL0 = 45, + CGT_ICH_HCR_TALL1 = 46, + CGT_ICH_HCR_TDIR = 47, + __MULTIPLE_CONTROL_BITS__ = 48, + CGT_HCR_IMO_FMO_ICH_HCR_TC = 48, + CGT_HCR_TID2_TID4 = 49, + CGT_HCR_TTLB_TTLBIS = 50, + CGT_HCR_TTLB_TTLBOS = 51, + CGT_HCR_TVM_TRVM = 52, + CGT_HCR_TVM_TRVM_HCRX_TCR2En = 53, + CGT_HCR_TPU_TICAB = 54, + CGT_HCR_TPU_TOCU = 55, + CGT_HCR_NV1_nNV2_ENSCXT = 56, + CGT_MDCR_TPM_TPMCR = 57, + CGT_MDCR_TPM_HPMN = 58, + CGT_MDCR_TDE_TDA = 59, + CGT_MDCR_TDE_TDOSA = 60, + CGT_MDCR_TDE_TDRA = 61, + CGT_MDCR_TDCC_TDE_TDA = 62, + CGT_ICH_HCR_TC_TDIR = 63, + __COMPLEX_CONDITIONS__ = 64, + CGT_CNTHCTL_EL1PCTEN = 64, + CGT_CNTHCTL_EL1PTEN = 65, + CGT_CPTR_TTA = 66, + CGT_MDCR_HPMN = 67, + __NR_CGT_GROUP_IDS__ = 68, +}; + +typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); + +union trap_config { + u64 val; + struct { + long unsigned int cgt: 10; + long unsigned int fgt: 4; + long unsigned int bit: 6; + long unsigned int pol: 1; + long unsigned int fgf: 5; + long unsigned int sri: 10; + long unsigned int unused: 27; + long unsigned int mbz: 1; + }; +}; + +struct encoding_to_trap_config { + const u32 encoding; + const u32 end; + const union trap_config tc; + const unsigned int line; +}; + +enum fg_filter_id { + __NO_FGF__ = 0, + HCRX_FGTnXS = 1, + __NR_FG_FILTER_IDS__ = 2, +}; + +enum cpuhp_smt_control { + CPU_SMT_ENABLED = 0, + CPU_SMT_DISABLED = 1, + CPU_SMT_FORCE_DISABLED = 2, + CPU_SMT_NOT_SUPPORTED = 3, + CPU_SMT_NOT_IMPLEMENTED = 4, +}; + +struct trace_event_raw_cpuhp_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_multi_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_exit { + struct trace_entry ent; + unsigned int cpu; + int state; + int idx; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_cpuhp_enter {}; + +struct trace_event_data_offsets_cpuhp_multi_enter {}; + +struct trace_event_data_offsets_cpuhp_exit {}; + +typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); + +typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); + +typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); + +struct cpuhp_cpu_state { + enum cpuhp_state state; + enum cpuhp_state target; + enum cpuhp_state fail; + struct task_struct *thread; + bool should_run; + bool rollback; + bool single; + bool bringup; + struct hlist_node *node; + struct hlist_node *last; + enum cpuhp_state cb_state; + int result; + atomic_t ap_sync_state; + struct completion done_up; + struct completion done_down; +}; + +struct cpuhp_step { + const char *name; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } startup; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } teardown; + struct hlist_head list; + bool cant_stop; + bool multi_instance; +}; + +enum cpuhp_sync_state { + SYNC_STATE_DEAD = 0, + SYNC_STATE_KICKED = 1, + SYNC_STATE_SHOULD_DIE = 2, + SYNC_STATE_ALIVE = 3, + SYNC_STATE_SHOULD_ONLINE = 4, + SYNC_STATE_ONLINE = 5, +}; + +struct cpu_down_work { + unsigned int cpu; + enum cpuhp_state target; +}; + +enum cpu_mitigations { + CPU_MITIGATIONS_OFF = 0, + CPU_MITIGATIONS_AUTO = 1, + CPU_MITIGATIONS_AUTO_NOSMT = 2, +}; + +struct mcs_spinlock { + struct mcs_spinlock *next; + int locked; + int count; +}; + +struct qnode { + struct mcs_spinlock mcs; +}; + +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +struct posix_clock_desc { + struct file *fp; + struct posix_clock *clk; +}; + +struct fmeter { + int cnt; + int val; + time64_t time; + spinlock_t lock; +}; + +enum prs_errcode { + PERR_NONE = 0, + PERR_INVCPUS = 1, + PERR_INVPARENT = 2, + PERR_NOTPART = 3, + PERR_NOTEXCL = 4, + PERR_NOCPUS = 5, + PERR_HOTPLUG = 6, + PERR_CPUSEMPTY = 7, + PERR_HKEEPING = 8, + PERR_ACCESS = 9, +}; + +typedef enum { + CS_ONLINE = 0, + CS_CPU_EXCLUSIVE = 1, + CS_MEM_EXCLUSIVE = 2, + CS_MEM_HARDWALL = 3, + CS_MEMORY_MIGRATE = 4, + CS_SCHED_LOAD_BALANCE = 5, + CS_SPREAD_PAGE = 6, + CS_SPREAD_SLAB = 7, +} cpuset_flagbits_t; + +typedef enum { + FILE_MEMORY_MIGRATE = 0, + FILE_CPULIST = 1, + FILE_MEMLIST = 2, + FILE_EFFECTIVE_CPULIST = 3, + FILE_EFFECTIVE_MEMLIST = 4, + FILE_SUBPARTS_CPULIST = 5, + FILE_EXCLUSIVE_CPULIST = 6, + FILE_EFFECTIVE_XCPULIST = 7, + FILE_ISOLATED_CPULIST = 8, + FILE_CPU_EXCLUSIVE = 9, + FILE_MEM_EXCLUSIVE = 10, + FILE_MEM_HARDWALL = 11, + FILE_SCHED_LOAD_BALANCE = 12, + FILE_PARTITION_ROOT = 13, + FILE_SCHED_RELAX_DOMAIN_LEVEL = 14, + FILE_MEMORY_PRESSURE_ENABLED = 15, + FILE_MEMORY_PRESSURE = 16, + FILE_SPREAD_PAGE = 17, + FILE_SPREAD_SLAB = 18, +} cpuset_filetype_t; + +struct cpuset { + struct cgroup_subsys_state css; + long unsigned int flags; + cpumask_var_t cpus_allowed; + nodemask_t mems_allowed; + cpumask_var_t effective_cpus; + nodemask_t effective_mems; + cpumask_var_t effective_xcpus; + cpumask_var_t exclusive_cpus; + nodemask_t old_mems_allowed; + struct fmeter fmeter; + int attach_in_progress; + int relax_domain_level; + int nr_subparts; + int partition_root_state; + int nr_deadline_tasks; + int nr_migrate_dl_tasks; + u64 sum_migrate_dl_bw; + enum prs_errcode prs_err; + struct cgroup_file partition_file; + struct list_head remote_sibling; + struct uf_node node; +}; + +struct tmpmasks { + cpumask_var_t addmask; + cpumask_var_t delmask; + cpumask_var_t new_cpus; +}; + +enum partition_cmd { + partcmd_enable = 0, + partcmd_enablei = 1, + partcmd_disable = 2, + partcmd_update = 3, + partcmd_invalidate = 4, +}; + +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +struct trace_event_raw_cpu { + struct trace_entry ent; + u32 state; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_idle_miss { + struct trace_entry ent; + u32 cpu_id; + u32 state; + bool below; + char __data[0]; +}; + +struct trace_event_raw_powernv_throttle { + struct trace_entry ent; + int chip_id; + u32 __data_loc_reason; + int pmax; + char __data[0]; +}; + +struct trace_event_raw_pstate_sample { + struct trace_entry ent; + u32 core_busy; + u32 scaled_busy; + u32 from; + u32 to; + u64 mperf; + u64 aperf; + u64 tsc; + u32 freq; + u32 io_boost; + char __data[0]; +}; + +struct trace_event_raw_cpu_frequency_limits { + struct trace_entry ent; + u32 min_freq; + u32 max_freq; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_start { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u32 __data_loc_parent; + u32 __data_loc_pm_ops; + int event; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_end { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + int error; + char __data[0]; +}; + +struct trace_event_raw_suspend_resume { + struct trace_entry ent; + const char *action; + int val; + bool start; + char __data[0]; +}; + +struct trace_event_raw_wakeup_source { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + char __data[0]; +}; + +struct trace_event_raw_clock { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_power_domain { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_latency_qos_request { + struct trace_entry ent; + s32 value; + char __data[0]; +}; + +struct trace_event_raw_pm_qos_update { + struct trace_entry ent; + enum pm_qos_req_action action; + int prev_value; + int curr_value; + char __data[0]; +}; + +struct trace_event_raw_dev_pm_qos_request { + struct trace_entry ent; + u32 __data_loc_name; + enum dev_pm_qos_req_type type; + s32 new_value; + char __data[0]; +}; + +struct trace_event_raw_guest_halt_poll_ns { + struct trace_entry ent; + bool grow; + unsigned int new; + unsigned int old; + char __data[0]; +}; + +struct trace_event_data_offsets_cpu {}; + +struct trace_event_data_offsets_cpu_idle_miss {}; + +struct trace_event_data_offsets_powernv_throttle { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_pstate_sample {}; + +struct trace_event_data_offsets_cpu_frequency_limits {}; + +struct trace_event_data_offsets_device_pm_callback_start { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; + u32 parent; + const void *parent_ptr_; + u32 pm_ops; + const void *pm_ops_ptr_; +}; + +struct trace_event_data_offsets_device_pm_callback_end { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_suspend_resume {}; + +struct trace_event_data_offsets_wakeup_source { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clock { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_power_domain { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cpu_latency_qos_request {}; + +struct trace_event_data_offsets_pm_qos_update {}; + +struct trace_event_data_offsets_dev_pm_qos_request { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_guest_halt_poll_ns {}; + +typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_idle_miss)(void *, unsigned int, unsigned int, bool); + +typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); + +typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); + +typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); + +typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); + +typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); + +typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); + +typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_pm_qos_add_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_guest_halt_poll_ns)(void *, bool, unsigned int, unsigned int); + +typedef u64 (*btf_bpf_task_storage_get_recur)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_delete_recur)(struct bpf_map *, struct task_struct *); + +typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context = 1, + perf_nr_task_contexts = 2, +}; + +struct bp_slots_histogram { + atomic_t *count; +}; + +struct bp_cpuinfo { + unsigned int cpu_pinned; + struct bp_slots_histogram tsk_pinned; +}; + +struct reserve_mem_table { + char name[16]; + phys_addr_t start; + phys_addr_t size; +}; + +struct trace_event_raw_test_pages_isolated { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int fin_pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_test_pages_isolated {}; + +typedef void (*btf_trace_test_pages_isolated)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct wb_writeback_work { + long int nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages: 1; + unsigned int for_kupdate: 1; + unsigned int range_cyclic: 1; + unsigned int for_background: 1; + unsigned int for_sync: 1; + unsigned int auto_free: 1; + enum wb_reason reason; + struct list_head list; + struct wb_completion *done; +}; + +struct trace_event_raw_writeback_folio_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_writeback_dirty_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_inode_foreign_history { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t cgroup_ino; + unsigned int history; + char __data[0]; +}; + +struct trace_event_raw_inode_switch_wbs { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t old_cgroup_ino; + ino_t new_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_track_foreign_dirty { + struct trace_entry ent; + char name[32]; + u64 bdi_id; + ino_t ino; + unsigned int memcg_id; + ino_t cgroup_ino; + ino_t page_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_flush_foreign { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + unsigned int frn_bdi_id; + unsigned int frn_memcg_id; + char __data[0]; +}; + +struct trace_event_raw_writeback_write_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + int sync_mode; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_work_class { + struct trace_entry ent; + char name[32]; + long int nr_pages; + dev_t sb_dev; + int sync_mode; + int for_kupdate; + int range_cyclic; + int for_background; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_pages_written { + struct trace_entry ent; + long int pages; + char __data[0]; +}; + +struct trace_event_raw_writeback_class { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_bdi_register { + struct trace_entry ent; + char name[32]; + char __data[0]; +}; + +struct trace_event_raw_wbc_class { + struct trace_entry ent; + char name[32]; + long int nr_to_write; + long int pages_skipped; + int sync_mode; + int for_kupdate; + int for_background; + int for_reclaim; + int range_cyclic; + long int range_start; + long int range_end; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_queue_io { + struct trace_entry ent; + char name[32]; + long unsigned int older; + long int age; + int moved; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_global_dirty_state { + struct trace_entry ent; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int background_thresh; + long unsigned int dirty_thresh; + long unsigned int dirty_limit; + long unsigned int nr_dirtied; + long unsigned int nr_written; + char __data[0]; +}; + +struct trace_event_raw_bdi_dirty_ratelimit { + struct trace_entry ent; + char bdi[32]; + long unsigned int write_bw; + long unsigned int avg_write_bw; + long unsigned int dirty_rate; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + long unsigned int balanced_dirty_ratelimit; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_balance_dirty_pages { + struct trace_entry ent; + char bdi[32]; + long unsigned int limit; + long unsigned int setpoint; + long unsigned int dirty; + long unsigned int bdi_setpoint; + long unsigned int bdi_dirty; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + unsigned int dirtied; + unsigned int dirtied_pause; + long unsigned int paused; + long int pause; + long unsigned int period; + long int think; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_sb_inodes_requeue { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_single_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + long unsigned int writeback_index; + long int nr_to_write; + long unsigned int wrote; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_inode_template { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int state; + __u16 mode; + long unsigned int dirtied_when; + char __data[0]; +}; + +struct trace_event_data_offsets_writeback_folio_template {}; + +struct trace_event_data_offsets_writeback_dirty_inode_template {}; + +struct trace_event_data_offsets_inode_foreign_history {}; + +struct trace_event_data_offsets_inode_switch_wbs {}; + +struct trace_event_data_offsets_track_foreign_dirty {}; + +struct trace_event_data_offsets_flush_foreign {}; + +struct trace_event_data_offsets_writeback_write_inode_template {}; + +struct trace_event_data_offsets_writeback_work_class {}; + +struct trace_event_data_offsets_writeback_pages_written {}; + +struct trace_event_data_offsets_writeback_class {}; + +struct trace_event_data_offsets_writeback_bdi_register {}; + +struct trace_event_data_offsets_wbc_class {}; + +struct trace_event_data_offsets_writeback_queue_io {}; + +struct trace_event_data_offsets_global_dirty_state {}; + +struct trace_event_data_offsets_bdi_dirty_ratelimit {}; + +struct trace_event_data_offsets_balance_dirty_pages {}; + +struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; + +struct trace_event_data_offsets_writeback_single_inode_template {}; + +struct trace_event_data_offsets_writeback_inode_template {}; + +typedef void (*btf_trace_writeback_dirty_folio)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_folio_wait_writeback)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); + +typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); + +typedef void (*btf_trace_track_foreign_dirty)(void *, struct folio *, struct bdi_writeback *); + +typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_pages_written)(void *, long int); + +typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); + +typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); + +typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); + +typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, long unsigned int, int); + +typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int); + +typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); + +typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); + +typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); + +typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); + +typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); + +typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); + +struct inode_switch_wbs_context { + struct rcu_work work; + struct bdi_writeback *new_wb; + struct inode *inodes[0]; +}; + +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + +enum procmap_query_flags { + PROCMAP_QUERY_VMA_READABLE = 1, + PROCMAP_QUERY_VMA_WRITABLE = 2, + PROCMAP_QUERY_VMA_EXECUTABLE = 4, + PROCMAP_QUERY_VMA_SHARED = 8, + PROCMAP_QUERY_COVERING_OR_NEXT_VMA = 16, + PROCMAP_QUERY_FILE_BACKED_VMA = 32, +}; + +struct procmap_query { + __u64 size; + __u64 query_flags; + __u64 query_addr; + __u64 vma_start; + __u64 vma_end; + __u64 vma_flags; + __u64 vma_page_size; + __u64 vma_offset; + __u64 inode; + __u32 dev_major; + __u32 dev_minor; + __u32 vma_name_size; + __u32 build_id_size; + __u64 vma_name_addr; + __u64 build_id_addr; +}; + +struct proc_maps_private { + struct inode *inode; + struct task_struct *task; + struct mm_struct *mm; + struct vma_iterator iter; +}; + +struct mem_size_stats { + long unsigned int resident; + long unsigned int shared_clean; + long unsigned int shared_dirty; + long unsigned int private_clean; + long unsigned int private_dirty; + long unsigned int referenced; + long unsigned int anonymous; + long unsigned int lazyfree; + long unsigned int anonymous_thp; + long unsigned int shmem_thp; + long unsigned int file_thp; + long unsigned int swap; + long unsigned int shared_hugetlb; + long unsigned int private_hugetlb; + long unsigned int ksm; + u64 pss; + u64 pss_anon; + u64 pss_file; + u64 pss_shmem; + u64 pss_dirty; + u64 pss_locked; + u64 swap_pss; +}; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON = 2, + CLEAR_REFS_MAPPED = 3, + CLEAR_REFS_SOFT_DIRTY = 4, + CLEAR_REFS_MM_HIWATER_RSS = 5, + CLEAR_REFS_LAST = 6, +}; + +struct clear_refs_private { + enum clear_refs_types type; +}; + +typedef struct { + u64 pme; +} pagemap_entry_t; + +struct pagemapread { + int pos; + int len; + pagemap_entry_t *buffer; + bool show_pfn; +}; + +struct pagemap_scan_private { + struct pm_scan_arg arg; + long unsigned int masks_of_interest; + long unsigned int cur_vma_category; + struct page_region *vec_buf; + long unsigned int vec_buf_len; + long unsigned int vec_buf_index; + long unsigned int found_pages; + struct page_region *vec_out; +}; + +typedef struct { + __le32 *p; + __le32 key; + struct buffer_head *bh; +} Indirect; + +struct jbd2_journal_revoke_header_s { + journal_header_t r_header; + __be32 r_count; +}; + +typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; + +struct jbd2_revoke_table_s { + int hash_size; + int hash_shift; + struct list_head *hash_table; +}; + +struct jbd2_revoke_record_s { + struct list_head hash; + tid_t sequence; + long long unsigned int blocknr; +}; + +struct fuse_kstatfs { + uint64_t blocks; + uint64_t bfree; + uint64_t bavail; + uint64_t files; + uint64_t ffree; + uint32_t bsize; + uint32_t namelen; + uint32_t frsize; + uint32_t padding; + uint32_t spare[6]; +}; + +struct fuse_statfs_out { + struct fuse_kstatfs st; +}; + +struct fuse_init_in { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint32_t flags2; + uint32_t unused[11]; +}; + +struct fuse_init_out { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint16_t max_background; + uint16_t congestion_threshold; + uint32_t max_write; + uint32_t time_gran; + uint16_t max_pages; + uint16_t map_alignment; + uint32_t flags2; + uint32_t max_stack_depth; + uint32_t unused[6]; +}; + +struct fuse_syncfs_in { + uint64_t padding; +}; + +enum fuse_dax_mode { + FUSE_DAX_INODE_DEFAULT = 0, + FUSE_DAX_ALWAYS = 1, + FUSE_DAX_NEVER = 2, + FUSE_DAX_INODE_USER = 3, +}; + +struct fuse_fs_context { + int fd; + struct file *file; + unsigned int rootmode; + kuid_t user_id; + kgid_t group_id; + bool is_bdev: 1; + bool fd_present: 1; + bool rootmode_present: 1; + bool user_id_present: 1; + bool group_id_present: 1; + bool default_permissions: 1; + bool allow_other: 1; + bool destroy: 1; + bool no_control: 1; + bool no_force_umount: 1; + bool legacy_opts_show: 1; + enum fuse_dax_mode dax_mode; + unsigned int max_read; + unsigned int blksize; + const char *subtype; + struct dax_device *dax_dev; + void **fudptr; +}; + +enum { + OPT_SOURCE = 0, + OPT_SUBTYPE = 1, + OPT_FD = 2, + OPT_ROOTMODE = 3, + OPT_USER_ID = 4, + OPT_GROUP_ID = 5, + OPT_DEFAULT_PERMISSIONS = 6, + OPT_ALLOW_OTHER = 7, + OPT_MAX_READ = 8, + OPT_BLKSIZE = 9, + OPT_ERR = 10, +}; + +struct fuse_inode_handle { + u64 nodeid; + u32 generation; +}; + +struct fuse_init_args { + struct fuse_args args; + struct fuse_init_in in; + struct fuse_init_out out; +}; + +struct scrub_sector_verification; + +struct scrub_stripe { + struct scrub_ctx *sctx; + struct btrfs_block_group *bg; + struct page *pages[16]; + struct scrub_sector_verification *sectors; + struct btrfs_device *dev; + u64 logical; + u64 physical; + u16 mirror_num; + u16 nr_sectors; + u16 nr_data_extents; + u16 nr_meta_extents; + atomic_t pending_io; + wait_queue_head_t io_wait; + wait_queue_head_t repair_wait; + long unsigned int state; + long unsigned int extent_sector_bitmap; + long unsigned int init_error_bitmap; + unsigned int init_nr_io_errors; + unsigned int init_nr_csum_errors; + unsigned int init_nr_meta_errors; + long unsigned int error_bitmap; + long unsigned int io_error_bitmap; + long unsigned int csum_error_bitmap; + long unsigned int meta_error_bitmap; + long unsigned int write_error_bitmap; + spinlock_t write_error_lock; + u8 *csums; + struct work_struct work; +}; + +struct scrub_ctx { + struct scrub_stripe stripes[128]; + struct scrub_stripe *raid56_data_stripes; + struct btrfs_fs_info *fs_info; + struct btrfs_path extent_path; + struct btrfs_path csum_path; + int first_free; + int cur_stripe; + atomic_t cancel_req; + int readonly; + ktime_t throttle_deadline; + u64 throttle_sent; + int is_dev_replace; + u64 write_pointer; + struct mutex wr_lock; + struct btrfs_device *wr_tgtdev; + struct btrfs_scrub_progress stat; + spinlock_t stat_lock; + refcount_t refs; +}; + +struct scrub_sector_verification { + bool is_metadata; + union { + u8 *csum; + u64 generation; + }; +}; + +enum scrub_stripe_flags { + SCRUB_STRIPE_FLAG_INITIALIZED = 0, + SCRUB_STRIPE_FLAG_REPAIR_DONE = 1, + SCRUB_STRIPE_FLAG_NO_REPORT = 2, +}; + +struct scrub_warning { + struct btrfs_path *path; + u64 extent_item_size; + const char *errstr; + u64 physical; + u64 logical; + struct btrfs_device *dev; +}; + +enum rsapubkey_actions { + ACT_rsa_get_e = 0, + ACT_rsa_get_n = 1, + NR__rsapubkey_actions = 2, +}; + +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; + MPI p; + MPI q; + MPI dp; + MPI dq; + MPI qinv; +}; + +struct gcm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; +}; + +struct crypto_gcm_ctx { + struct crypto_skcipher *ctr; + struct crypto_ahash *ghash; +}; + +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4106_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct aead_request subreq; +}; + +struct crypto_rfc4543_instance_ctx { + struct crypto_aead_spawn aead; +}; + +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + struct crypto_sync_skcipher *null; + u8 nonce[4]; +}; + +struct crypto_rfc4543_req_ctx { + struct aead_request subreq; +}; + +struct crypto_gcm_ghash_ctx { + unsigned int cryptlen; + struct scatterlist *src; + int (*complete)(struct aead_request *, u32); +}; + +struct crypto_gcm_req_priv_ctx { + u8 iv[16]; + u8 auth_tag[16]; + u8 iauth_tag[16]; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct scatterlist sg; + struct crypto_gcm_ghash_ctx ghash_ctx; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + } u; +}; + +union nested_table { + union nested_table *table; + struct rhash_lock_head *bucket; +}; + +typedef struct { + U64 rolling; + U64 stopMask; +} ldmRollingHashState_t; + +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 pool_index_plus_1: 17; + u32 offset: 10; + u32 extra: 5; + }; +}; + +struct stack_record { + struct list_head hash_list; + u32 hash; + u32 size; + union handle_parts handle; + refcount_t count; + union { + long unsigned int entries[64]; + struct { + struct list_head free_list; + long unsigned int rcu_state; + }; + }; +}; + +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS = 0, + DEPOT_COUNTER_REFD_FREES = 1, + DEPOT_COUNTER_REFD_INUSE = 2, + DEPOT_COUNTER_FREELIST_SIZE = 3, + DEPOT_COUNTER_PERSIST_COUNT = 4, + DEPOT_COUNTER_PERSIST_BYTES = 5, + DEPOT_COUNTER_COUNT = 6, +}; + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; + long unsigned int acc; + unsigned int flags; +}; + +struct iommu_group { + struct kobject kobj; + struct kobject *devices_kobj; + struct list_head devices; + struct xarray pasid_array; + struct mutex mutex; + void *iommu_data; + void (*iommu_data_release)(void *); + char *name; + int id; + struct iommu_domain *default_domain; + struct iommu_domain *blocking_domain; + struct iommu_domain *domain; + struct list_head entry; + unsigned int owner_cnt; + void *owner; +}; + +struct fsl_mc_obj_desc { + char type[16]; + int id; + u16 vendor; + u16 ver_major; + u16 ver_minor; + u8 irq_count; + u8 region_count; + u32 state; + char label[16]; + u16 flags; +}; + +struct fsl_mc_io; + +struct fsl_mc_device_irq; + +struct fsl_mc_resource; + +struct fsl_mc_device { + struct device dev; + u64 dma_mask; + u16 flags; + u32 icid; + u16 mc_handle; + struct fsl_mc_io *mc_io; + struct fsl_mc_obj_desc obj_desc; + struct resource *regions; + struct fsl_mc_device_irq **irqs; + struct fsl_mc_resource *resource; + struct device_link *consumer_link; + const char *driver_override; +}; + +enum fsl_mc_pool_type { + FSL_MC_POOL_DPMCP = 0, + FSL_MC_POOL_DPBP = 1, + FSL_MC_POOL_DPCON = 2, + FSL_MC_POOL_IRQ = 3, + FSL_MC_NUM_POOL_TYPES = 4, +}; + +struct fsl_mc_resource_pool; + +struct fsl_mc_resource { + enum fsl_mc_pool_type type; + s32 id; + void *data; + struct fsl_mc_resource_pool *parent_pool; + struct list_head node; +}; + +struct fsl_mc_device_irq { + unsigned int virq; + struct fsl_mc_device *mc_dev; + u8 dev_irq_index; + struct fsl_mc_resource resource; +}; + +struct fsl_mc_io { + struct device *dev; + u16 flags; + u32 portal_size; + phys_addr_t portal_phys_addr; + void *portal_virt_addr; + struct fsl_mc_device *dpmcp_dev; + union { + struct mutex mutex; + raw_spinlock_t spinlock; + }; +}; + +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 2, + IOMMU_HWPT_FAULT_ID_VALID = 4, + IOMMU_HWPT_ALLOC_PASID = 8, +}; + +struct group_device { + struct list_head list; + struct device *dev; + char *name; +}; + +struct iommu_group_attribute { + struct attribute attr; + ssize_t (*show)(struct iommu_group *, char *); + ssize_t (*store)(struct iommu_group *, const char *, size_t); +}; + +enum { + IOMMU_SET_DOMAIN_MUST_SUCCEED = 1, +}; + +struct group_for_pci_data { + struct pci_dev *pdev; + struct iommu_group *group; +}; + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; + unsigned int is_wait_die; +}; + +struct dma_resv_list { + struct callback_head rcu; + u32 num_fences; + u32 max_fences; + struct dma_fence *table[0]; +}; + +enum pr_status { + PR_STS_SUCCESS = 0, + PR_STS_IOERR = 2, + PR_STS_RESERVATION_CONFLICT = 24, + PR_STS_RETRY_PATH_FAILURE = 917504, + PR_STS_PATH_FAST_FAILED = 983040, + PR_STS_PATH_FAILED = 65536, +}; + +enum nvme_pr_type { + NVME_PR_WRITE_EXCLUSIVE = 1, + NVME_PR_EXCLUSIVE_ACCESS = 2, + NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +enum nvme_eds { + NVME_EXTENDED_DATA_STRUCT = 1, +}; + +struct nvme_registered_ctrl { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 hostid; + __le64 rkey; +}; + +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + struct nvme_registered_ctrl regctl_ds[0]; +}; + +struct nvme_registered_ctrl_ext { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 rkey; + __u8 hostid[16]; + __u8 rsvd32[32]; +}; + +struct nvme_reservation_status_ext { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + __u8 rsvd24[40]; + struct nvme_registered_ctrl_ext regctl_eds[0]; +}; + +struct nvmet_pr_register_data { + __le64 crkey; + __le64 nrkey; +}; + +struct nvmet_pr_acquire_data { + __le64 crkey; + __le64 prkey; +}; + +struct nvmet_pr_release_data { + __le64 crkey; +}; + +enum nvme_pr_register_action { + NVME_PR_REGISTER_ACT_REG = 0, + NVME_PR_REGISTER_ACT_UNREG = 1, + NVME_PR_REGISTER_ACT_REPLACE = 2, +}; + +enum nvme_pr_acquire_action { + NVME_PR_ACQUIRE_ACT_ACQUIRE = 0, + NVME_PR_ACQUIRE_ACT_PREEMPT = 1, + NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 2, +}; + +enum nvme_pr_release_action { + NVME_PR_RELEASE_ACT_RELEASE = 0, + NVME_PR_RELEASE_ACT_CLEAR = 1, +}; + +enum nvme_pr_change_ptpl { + NVME_PR_CPTPL_NO_CHANGE = 0, + NVME_PR_CPTPL_RESV = 1073741824, + NVME_PR_CPTPL_CLEARED = -2147483648, + NVME_PR_CPTPL_PERSIST = -1073741824, +}; + +struct igb_stats { + char stat_string[32]; + int sizeof_stat; + int stat_offset; +}; + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP = 1, + TEST_IRQ = 2, + TEST_LOOP = 3, + TEST_LINK = 4, +}; + +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +struct input_mt_pos { + s16 x; + s16 y; +}; + +enum nec_state { + STATE_INACTIVE___9 = 0, + STATE_HEADER_SPACE___4 = 1, + STATE_BIT_PULSE___5 = 2, + STATE_BIT_SPACE___5 = 3, + STATE_TRAILER_PULSE___4 = 4, + STATE_TRAILER_SPACE___4 = 5, +}; + +struct raid10_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + sector_t head_position; + int recovery_disabled; +}; + +struct geom { + int raid_disks; + int near_copies; + int far_copies; + int far_offset; + sector_t stride; + int far_set_size; + int chunk_shift; + sector_t chunk_mask; +}; + +struct r10conf { + struct mddev *mddev; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new; + struct raid10_info *mirrors_old; + spinlock_t device_lock; + struct geom prev; + struct geom geo; + int copies; + sector_t dev_sectors; + sector_t reshape_progress; + sector_t reshape_safe; + long unsigned int reshape_checkpoint; + sector_t offset_diff; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + seqlock_t resync_lock; + atomic_t nr_pending; + int nr_waiting; + int nr_queued; + int barrier; + int array_freeze_pending; + sector_t next_resync; + int fullsync; + int have_replacement; + wait_queue_head_t wait_barrier; + mempool_t r10bio_pool; + mempool_t r10buf_pool; + struct page *tmppage; + struct bio_set bio_split; + struct md_thread *thread; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r10dev { + struct bio *bio; + union { + struct bio *repl_bio; + struct md_rdev *rdev; + }; + sector_t addr; + int devnum; +}; + +struct r10bio { + atomic_t remaining; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_slot; + struct list_head retry_list; + struct r10dev devs[0]; +}; + +enum r10bio_state { + R10BIO_Uptodate = 0, + R10BIO_IsSync = 1, + R10BIO_IsRecover = 2, + R10BIO_IsReshape = 3, + R10BIO_Degraded = 4, + R10BIO_ReadError = 5, + R10BIO_MadeGood = 6, + R10BIO_WriteError = 7, + R10BIO_Previous = 8, + R10BIO_FailFast = 9, + R10BIO_Discard = 10, +}; + +enum geo_type { + geo_new = 0, + geo_old = 1, + geo_start = 2, +}; + +struct rsync_pages; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +enum hwtstamp_flags { + HWTSTAMP_FLAG_BONDED_PHC_INDEX = 1, + HWTSTAMP_FLAG_LAST = 1, + HWTSTAMP_FLAG_MASK = 1, +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[2]; + u32 wanted[2]; + u32 active[2]; + u32 nochange[2]; + u32 all[2]; +}; + +enum ip_conntrack_expect_events { + IPEXP_NEW = 0, + IPEXP_DESTROY = 1, +}; + +struct ct_expect_iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum nft_byteorder_ops { + NFT_BYTEORDER_NTOH = 0, + NFT_BYTEORDER_HTON = 1, +}; + +enum nft_byteorder_attributes { + NFTA_BYTEORDER_UNSPEC = 0, + NFTA_BYTEORDER_SREG = 1, + NFTA_BYTEORDER_DREG = 2, + NFTA_BYTEORDER_OP = 3, + NFTA_BYTEORDER_LEN = 4, + NFTA_BYTEORDER_SIZE = 5, + __NFTA_BYTEORDER_MAX = 6, +}; + +struct nft_byteorder { + u8 sreg; + u8 dreg; + enum nft_byteorder_ops op: 8; + u8 len; + u8 size; +}; + +struct xt_log_info { + unsigned char level; + unsigned char logflags; + char prefix[30]; +}; + +struct sock_bh_locked { + struct sock *sock; + local_lock_t bh_lock; +}; + +enum tcp_seq_states { + TCP_SEQ_STATE_LISTENING = 0, + TCP_SEQ_STATE_ESTABLISHED = 1, +}; + +struct tcp_seq_afinfo { + sa_family_t family; +}; + +struct tcp_iter_state { + struct seq_net_private p; + enum tcp_seq_states state; + struct sock *syn_wait_sk; + int bucket; + int offset; + int sbucket; + int num; + loff_t last_pos; +}; + +struct bpf_tcp_iter_state { + struct tcp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct bpf_iter__tcp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct sock_common *sk_common; + }; + uid_t uid; +}; + +enum { + UDP_BPF_IPV4 = 0, + UDP_BPF_IPV6 = 1, + UDP_BPF_NUM_PROTS = 2, +}; + +struct maple_metadata { + unsigned char end; + unsigned char gap; +}; + +struct maple_pnode; + +struct maple_range_64 { + struct maple_pnode *parent; + long unsigned int pivot[15]; + union { + void *slot[16]; + struct { + void *pad[15]; + struct maple_metadata meta; + }; + }; +}; + +struct maple_arange_64 { + struct maple_pnode *parent; + long unsigned int pivot[9]; + void *slot[10]; + long unsigned int gap[10]; + struct maple_metadata meta; +}; + +struct maple_topiary { + struct maple_pnode *parent; + struct maple_enode *next; +}; + +enum maple_type { + maple_dense = 0, + maple_leaf_64 = 1, + maple_range_64 = 2, + maple_arange_64 = 3, +}; + +struct maple_node { + union { + struct { + struct maple_pnode *parent; + void *slot[31]; + }; + struct { + void *pad; + struct callback_head rcu; + struct maple_enode *piv_parent; + unsigned char parent_slot; + enum maple_type type; + unsigned char slot_len; + unsigned int ma_flags; + }; + struct maple_range_64 mr64; + struct maple_arange_64 ma64; + struct maple_alloc alloc; + }; +}; + +struct ma_topiary { + struct maple_enode *head; + struct maple_enode *tail; + struct maple_tree *mtree; +}; + +struct ma_wr_state { + struct ma_state *mas; + struct maple_node *node; + long unsigned int r_min; + long unsigned int r_max; + enum maple_type type; + unsigned char offset_end; + long unsigned int *pivots; + long unsigned int end_piv; + void **slots; + void *entry; + void *content; +}; + +struct trace_event_raw_ma_op { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_read { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_write { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + long unsigned int piv; + void *val; + void *node; + char __data[0]; +}; + +struct trace_event_data_offsets_ma_op {}; + +struct trace_event_data_offsets_ma_read {}; + +struct trace_event_data_offsets_ma_write {}; + +typedef void (*btf_trace_ma_op)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_read)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_write)(void *, const char *, struct ma_state *, long unsigned int, void *); + +struct maple_big_node { + long unsigned int pivot[33]; + union { + struct maple_enode *slot[34]; + struct { + long unsigned int padding[21]; + long unsigned int gap[21]; + }; + }; + unsigned char b_end; + enum maple_type type; +}; + +struct maple_subtree_state { + struct ma_state *orig_l; + struct ma_state *orig_r; + struct ma_state *l; + struct ma_state *m; + struct ma_state *r; + struct ma_topiary *free; + struct ma_topiary *destroy; + struct maple_big_node *bn; +}; + +typedef volatile long int prel64_t; + +typedef bool filter_t(u64); + +struct ftr_set_desc { + char name[20]; + union { + struct arm64_ftr_override *override; + prel64_t override_prel; + }; + struct { + char name[10]; + u8 shift; + u8 width; + union { + filter_t *filter; + prel64_t filter_prel; + }; + } fields[0]; +}; + +struct realm_config { + union { + struct { + long unsigned int ipa_bits; + long unsigned int hash_algo; + }; + u8 pad[512]; + }; + union { + u8 rpv[64]; + u8 pad2[3584]; + }; +}; + +struct fregs_offset { + const char *name; + int offset; +}; + +struct arch_timer_kvm_info { + struct timecounter timecounter; + int virtual_irq; + int physical_irq; +}; + +struct psci_boot_args { + atomic_t lock; + long unsigned int pc; + long unsigned int r0; +}; + +enum { + TRACE_SIGNAL_DELIVERED = 0, + TRACE_SIGNAL_IGNORED = 1, + TRACE_SIGNAL_ALREADY_PENDING = 2, + TRACE_SIGNAL_OVERFLOW_FAIL = 3, + TRACE_SIGNAL_LOSE_INFO = 4, +}; + +struct trace_event_raw_signal_generate { + struct trace_entry ent; + int sig; + int errno; + int code; + char comm[16]; + pid_t pid; + int group; + int result; + char __data[0]; +}; + +struct trace_event_raw_signal_deliver { + struct trace_entry ent; + int sig; + int errno; + int code; + long unsigned int sa_handler; + long unsigned int sa_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_signal_generate {}; + +struct trace_event_data_offsets_signal_deliver {}; + +typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); + +typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); + +enum sig_handler { + HANDLER_CURRENT = 0, + HANDLER_SIG_DFL = 1, + HANDLER_EXIT = 2, +}; + +struct module_string { + struct list_head next; + struct module *module; + char *str; +}; + +enum { + FORMAT_HEADER = 1, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, +}; + +struct boot_triggers { + const char *event; + char *trigger; +}; + +struct event_probe_data { + struct trace_event_file *file; + long unsigned int count; + int ref; + bool enable; +}; + +struct bpf_iter_seq_link_info { + u32 link_id; +}; + +struct bpf_iter__bpf_link { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_link *link; + }; +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1, + PERF_SAMPLE_BRANCH_KERNEL = 2, + PERF_SAMPLE_BRANCH_HV = 4, + PERF_SAMPLE_BRANCH_ANY = 8, + PERF_SAMPLE_BRANCH_ANY_CALL = 16, + PERF_SAMPLE_BRANCH_ANY_RETURN = 32, + PERF_SAMPLE_BRANCH_IND_CALL = 64, + PERF_SAMPLE_BRANCH_ABORT_TX = 128, + PERF_SAMPLE_BRANCH_IN_TX = 256, + PERF_SAMPLE_BRANCH_NO_TX = 512, + PERF_SAMPLE_BRANCH_COND = 1024, + PERF_SAMPLE_BRANCH_CALL_STACK = 2048, + PERF_SAMPLE_BRANCH_IND_JUMP = 4096, + PERF_SAMPLE_BRANCH_CALL = 8192, + PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, + PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, + PERF_SAMPLE_BRANCH_HW_INDEX = 131072, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 262144, + PERF_SAMPLE_BRANCH_COUNTERS = 524288, + PERF_SAMPLE_BRANCH_MAX = 1048576, +}; + +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_ID = 4, + PERF_FORMAT_GROUP = 8, + PERF_FORMAT_LOST = 16, + PERF_FORMAT_MAX = 32, +}; + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1, +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + NR_NAMESPACES = 7, +}; + +struct perf_guest_info_callbacks { + unsigned int (*state)(); + long unsigned int (*get_ip)(); + unsigned int (*handle_intel_pt_intr)(); +}; + +enum perf_event_arm_regs { + PERF_REG_ARM64_X0 = 0, + PERF_REG_ARM64_X1 = 1, + PERF_REG_ARM64_X2 = 2, + PERF_REG_ARM64_X3 = 3, + PERF_REG_ARM64_X4 = 4, + PERF_REG_ARM64_X5 = 5, + PERF_REG_ARM64_X6 = 6, + PERF_REG_ARM64_X7 = 7, + PERF_REG_ARM64_X8 = 8, + PERF_REG_ARM64_X9 = 9, + PERF_REG_ARM64_X10 = 10, + PERF_REG_ARM64_X11 = 11, + PERF_REG_ARM64_X12 = 12, + PERF_REG_ARM64_X13 = 13, + PERF_REG_ARM64_X14 = 14, + PERF_REG_ARM64_X15 = 15, + PERF_REG_ARM64_X16 = 16, + PERF_REG_ARM64_X17 = 17, + PERF_REG_ARM64_X18 = 18, + PERF_REG_ARM64_X19 = 19, + PERF_REG_ARM64_X20 = 20, + PERF_REG_ARM64_X21 = 21, + PERF_REG_ARM64_X22 = 22, + PERF_REG_ARM64_X23 = 23, + PERF_REG_ARM64_X24 = 24, + PERF_REG_ARM64_X25 = 25, + PERF_REG_ARM64_X26 = 26, + PERF_REG_ARM64_X27 = 27, + PERF_REG_ARM64_X28 = 28, + PERF_REG_ARM64_X29 = 29, + PERF_REG_ARM64_LR = 30, + PERF_REG_ARM64_SP = 31, + PERF_REG_ARM64_PC = 32, + PERF_REG_ARM64_MAX = 33, + PERF_REG_ARM64_VG = 46, + PERF_REG_ARM64_EXTENDED_MAX = 47, +}; + +enum perf_pmu_scope { + PERF_PMU_SCOPE_NONE = 0, + PERF_PMU_SCOPE_CORE = 1, + PERF_PMU_SCOPE_DIE = 2, + PERF_PMU_SCOPE_CLUSTER = 3, + PERF_PMU_SCOPE_PKG = 4, + PERF_PMU_SCOPE_SYS_WIDE = 5, + PERF_PMU_MAX_SCOPE = 6, +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START = 1, + PERF_ADDR_FILTER_ACTION_FILTER = 2, +}; + +struct perf_addr_filter { + struct list_head entry; + struct path path; + long unsigned int offset; + long unsigned int size; + enum perf_addr_filter_action_t action; +}; + +struct swevent_hlist { + struct hlist_head heads[256]; + struct callback_head callback_head; +}; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int online; + struct perf_cgroup *cgrp; + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; +}; + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; +}; + +struct min_heap_char { + int nr; + int size; + char *data; + char preallocated[0]; +}; + +typedef struct min_heap_char min_heap_char; + +struct min_heap_callbacks { + bool (*less)(const void *, const void *, void *); + void (*swp)(void *, void *, void *); +}; + +typedef int (*remote_function_f)(void *); + +struct remote_function_call { + struct task_struct *p; + remote_function_f func; + void *info; + int ret; +}; + +enum event_type_t { + EVENT_FLEXIBLE = 1, + EVENT_PINNED = 2, + EVENT_TIME = 4, + EVENT_FROZEN = 8, + EVENT_CPU = 16, + EVENT_CGROUP = 32, + EVENT_ALL = 3, + EVENT_TIME_FROZEN = 12, +}; + +typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); + +struct event_function_struct { + struct perf_event *event; + event_f func; + void *data; +}; + +struct __group_key { + int cpu; + struct pmu *pmu; + struct cgroup *cgroup; +}; + +struct stop_event_data { + struct perf_event *event; + unsigned int restart; +}; + +struct perf_event_min_heap { + int nr; + int size; + struct perf_event **data; + struct perf_event *preallocated[0]; +}; + +struct perf_read_data { + struct perf_event *event; + bool group; + int ret; +}; + +struct perf_read_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +typedef void perf_iterate_f(struct perf_event *, void *); + +struct remote_output { + struct perf_buffer *rb; + int err; +}; + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + struct { + struct perf_event_header header; + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + } event_id; +}; + +struct perf_namespaces_event { + struct task_struct *task; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 nr_namespaces; + struct perf_ns_link_info link_info[7]; + } event_id; +}; + +struct perf_cgroup_event { + char *path; + int path_size; + struct { + struct perf_event_header header; + u64 id; + char path[0]; + } event_id; +}; + +struct perf_mmap_event { + struct vm_area_struct *vma; + const char *file_name; + int file_size; + int maj; + int min; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + u8 build_id[20]; + u32 build_id_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +struct perf_switch_event { + struct task_struct *task; + struct task_struct *next_prev; + struct { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; + } event_id; +}; + +struct perf_ksymbol_event { + const char *name; + int name_len; + struct { + struct perf_event_header header; + u64 addr; + u32 len; + u16 ksym_type; + u16 flags; + } event_id; +}; + +struct perf_bpf_event { + struct bpf_prog *prog; + struct { + struct perf_event_header header; + u16 type; + u16 flags; + u32 id; + u8 tag[8]; + } event_id; +}; + +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + struct { + struct perf_event_header header; + u64 addr; + } event_id; +}; + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; +}; + +enum perf_probe_config { + PERF_PROBE_CONFIG_IS_RETPROBE = 1, + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, +}; + +enum { + IF_ACT_NONE = -1, + IF_ACT_FILTER = 0, + IF_ACT_START = 1, + IF_ACT_STOP = 2, + IF_SRC_FILE = 3, + IF_SRC_KERNEL = 4, + IF_SRC_FILEADDR = 5, + IF_SRC_KERNELADDR = 6, +}; + +enum { + IF_STATE_ACTION = 0, + IF_STATE_SOURCE = 1, + IF_STATE_END = 2, +}; + +struct perf_aux_event { + struct perf_event_header header; + u64 hw_id; +}; + +struct perf_aux_event___2 { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +struct perf_aux_event___3 { + struct perf_event_header header; + u64 offset; + u64 size; + u64 flags; +}; + +struct pde_opener { + struct list_head lh; + struct file *file; + bool closing; + struct completion *c; +}; + +enum { + BIAS = 2147483648, +}; + +struct recovery_info { + tid_t start_transaction; + tid_t end_transaction; + long unsigned int head_block; + int nr_replays; + int nr_revokes; + int nr_revoke_hits; +}; + +struct autofs_packet_hdr { + int proto_version; + int type; +}; + +struct autofs_packet_expire { + struct autofs_packet_hdr hdr; + int len; + char name[256]; +}; + +enum autofs_notify { + NFY_NONE = 0, + NFY_MOUNT = 1, + NFY_EXPIRE = 2, +}; + +struct inode_defrag { + struct rb_node rb_node; + u64 ino; + u64 transid; + u64 root; + u32 extent_thresh; +}; + +struct defrag_target_range { + struct list_head list; + u64 start; + u64 len; +}; + +struct btrfs_stripe_hash { + struct list_head hash_list; + spinlock_t lock; +}; + +struct btrfs_stripe_hash_table { + struct list_head stripe_cache; + spinlock_t cache_lock; + int cache_size; + struct btrfs_stripe_hash table[0]; +}; + +struct sector_ptr { + struct page *page; + unsigned int pgoff: 24; + unsigned int uptodate: 8; +}; + +struct btrfs_plug_cb { + struct blk_plug_cb cb; + struct btrfs_fs_info *info; + struct list_head rbio_list; +}; + +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kuid_t rootid; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + +enum x509_akid_actions { + ACT_x509_akid_note_kid = 0, + ACT_x509_akid_note_name = 1, + ACT_x509_akid_note_serial = 2, + ACT_x509_extract_name_segment___2 = 3, + ACT_x509_note_OID___2 = 4, + NR__x509_akid_actions = 5, +}; + +struct x509_parse_context { + struct x509_certificate *cert; + long unsigned int data; + const void *key; + size_t key_size; + const void *params; + size_t params_size; + enum OID key_algo; + enum OID last_oid; + enum OID sig_algo; + u8 o_size; + u8 cn_size; + u8 email_size; + u16 o_offset; + u16 cn_offset; + u16 email_offset; + unsigned int raw_akid_size; + const void *raw_akid; + const void *akid_raw_issuer; + unsigned int akid_raw_issuer_size; +}; + +enum { + REQ_FSEQ_PREFLUSH = 1, + REQ_FSEQ_DATA = 2, + REQ_FSEQ_POSTFLUSH = 4, + REQ_FSEQ_DONE = 8, + REQ_FSEQ_ACTIONS = 7, + FLUSH_PENDING_TIMEOUT = 5000, +}; + +struct throtl_data { + struct throtl_service_queue service_queue; + struct request_queue *queue; + unsigned int nr_queued[2]; + unsigned int throtl_slice; + struct work_struct dispatch_work; + bool track_bio_latency; +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1, + THROTL_TG_WAS_EMPTY = 2, + THROTL_TG_CANCELING = 4, +}; + +enum { + IORING_MEM_REGION_REG_WAIT_ARG = 1, +}; + +struct io_uring_mem_region_reg { + __u64 region_uptr; + __u64 flags; + __u64 __resv[2]; +}; + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; + +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; + __u8 sqe_op; + __u8 sqe_flags; + }; + __u8 resv; + __u32 resv2[3]; +}; + +struct io_uring_clock_register { + __u32 clockid; + __u32 __resv[3]; +}; + +enum io_uring_register_restriction_op { + IORING_RESTRICTION_REGISTER_OP = 0, + IORING_RESTRICTION_SQE_OP = 1, + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + IORING_RESTRICTION_LAST = 4, +}; + +struct io_ring_ctx_rings { + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_uring_sqe *sq_sqes; + struct io_rings *rings; +}; + +enum pci_bar_type { + pci_bar_unknown = 0, + pci_bar_io = 1, + pci_bar_mem32 = 2, + pci_bar_mem64 = 3, +}; + +struct pci_domain_busn_res { + struct list_head list; + struct resource res; + int domain_nr; +}; + +struct amba_id { + unsigned int id; + unsigned int mask; + void *data; +}; + +struct amba_driver { + struct device_driver drv; + int (*probe)(struct amba_device *, const struct amba_id *); + void (*remove)(struct amba_device *); + void (*shutdown)(struct amba_device *); + const struct amba_id *id_table; + bool driver_managed_dma; +}; + +struct of_pci_iommu_alias_info { + struct device *dev; + struct device_node *np; +}; + +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[8]; +}; + +struct swnode { + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + int id; + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + unsigned int allocated: 1; + unsigned int managed: 1; +}; + +struct request_sense; + +struct cdrom_generic_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + union { + void *reserved[1]; + void *unused; + }; +}; + +struct request_sense { + __u8 valid: 1; + __u8 error_code: 7; + __u8 segment_number; + __u8 reserved1: 2; + __u8 ili: 1; + __u8 reserved2: 1; + __u8 sense_key: 4; + __u8 information[4]; + __u8 add_sense_len; + __u8 command_info[4]; + __u8 asc; + __u8 ascq; + __u8 fruc; + __u8 sks[3]; + __u8 asb[46]; +}; + +struct scsi_ioctl_command { + unsigned int inlen; + unsigned int outlen; + unsigned char data[0]; +}; + +struct scsi_idlun { + __u32 dev_id; + __u32 host_unique_id; +}; + +struct scsi_disk { + struct scsi_device *device; + struct device disk_dev; + struct gendisk *disk; + struct opal_dev *opal_dev; + atomic_t openers; + sector_t capacity; + int max_retries; + u32 min_xfer_blocks; + u32 max_xfer_blocks; + u32 opt_xfer_blocks; + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; + u32 max_atomic; + u32 atomic_alignment; + u32 atomic_granularity; + u32 max_atomic_with_boundary; + u32 max_atomic_boundary; + u32 index; + unsigned int physical_block_size; + unsigned int max_medium_access_timeouts; + unsigned int medium_access_timed_out; + u16 permanent_stream_count; + u8 media_present; + u8 write_prot; + u8 protection_type; + u8 provisioning_mode; + u8 zeroing_mode; + u8 nr_actuators; + bool suspended; + unsigned int ATO: 1; + unsigned int cache_override: 1; + unsigned int WCE: 1; + unsigned int RCD: 1; + unsigned int DPOFUA: 1; + unsigned int first_scan: 1; + unsigned int lbpme: 1; + unsigned int lbprz: 1; + unsigned int lbpu: 1; + unsigned int lbpws: 1; + unsigned int lbpws10: 1; + unsigned int lbpvpd: 1; + unsigned int ws10: 1; + unsigned int ws16: 1; + unsigned int rc_basis: 2; + unsigned int zoned: 2; + unsigned int urswrz: 1; + unsigned int security: 1; + unsigned int ignore_medium_access_errors: 1; + unsigned int rscs: 1; + unsigned int use_atomic_write_boundary: 1; +}; + +struct ppl_header_entry { + __le64 data_sector; + __le32 pp_size; + __le32 data_size; + __le32 parity_disk; + __le32 checksum; +}; + +struct ppl_header { + __u8 reserved[512]; + __le32 signature; + __le32 padding; + __le64 generation; + __le32 entries_count; + __le32 checksum; + struct ppl_header_entry entries[148]; +}; + +struct ppl_log; + +struct ppl_io_unit { + struct ppl_log *log; + struct page *header_page; + unsigned int entries_count; + unsigned int pp_size; + u64 seq; + struct list_head log_sibling; + struct list_head stripe_list; + atomic_t pending_stripes; + atomic_t pending_flushes; + bool submitted; + struct bio bio; + struct bio_vec biovec[32]; +}; + +struct ppl_conf { + struct mddev *mddev; + struct ppl_log *child_logs; + int count; + int block_size; + u32 signature; + atomic64_t seq; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + struct bio_set flush_bs; + int recovered_entries; + int mismatch_count; + struct list_head no_mem_stripes; + spinlock_t no_mem_stripes_lock; + short unsigned int write_hint; +}; + +struct ppl_log { + struct ppl_conf *ppl_conf; + struct md_rdev *rdev; + struct mutex io_mutex; + struct ppl_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head io_list; + sector_t next_io_sector; + unsigned int entry_space; + bool use_multippl; + bool wb_cache_on; + long unsigned int disk_flush_bitmap; +}; + +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE = 2, + NVMEM_CELL_ADD = 3, + NVMEM_CELL_REMOVE = 4, + NVMEM_LAYOUT_ADD = 5, + NVMEM_LAYOUT_REMOVE = 6, +}; + +typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_cell_post_process_t)(void *, const char *, int, unsigned int, void *, size_t); + +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM = 1, + NVMEM_TYPE_OTP = 2, + NVMEM_TYPE_BATTERY_BACKED = 3, + NVMEM_TYPE_FRAM = 4, +}; + +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + size_t raw_len; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; + struct device_node *np; + nvmem_cell_post_process_t read_post_process; + void *priv; +}; + +struct nvmem_device; + +struct nvmem_layout; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + const struct nvmem_cell_info *cells; + int ncells; + bool add_legacy_fixed_of_cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + enum nvmem_type type; + bool read_only; + bool root_only; + bool ignore_wp; + struct nvmem_layout *layout; + struct device_node *of_node; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + bool compat; + struct device *base_dev; +}; + +struct nvmem_device { + struct module *owner; + struct device dev; + struct list_head node; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + struct nvmem_layout *layout; + void *priv; + bool sysfs_cells_populated; +}; + +struct nvmem_layout { + struct device dev; + struct nvmem_device *nvmem; + int (*add_cells)(struct nvmem_layout *); +}; + +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + +struct nvmem_cell_entry { + const char *name; + int offset; + size_t raw_len; + int bytes; + int bit_offset; + int nbits; + nvmem_cell_post_process_t read_post_process; + void *priv; + struct device_node *np; + struct nvmem_device *nvmem; + struct list_head node; +}; + +struct nvmem_cell { + struct nvmem_cell_entry *entry; + const char *id; + int index; +}; + +struct xdp_frame_bulk { + int count; + void *xa; + void *q[16]; +}; + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +enum { + BPF_F_CURRENT_NETNS = -1, +}; + +struct bpf_ct_opts { + s32 netns_id; + s32 error; + u8 l4proto; + u8 dir; + u16 ct_zone_id; + u8 ct_zone_dir; + u8 reserved[3]; +}; + +enum { + NF_BPF_CT_OPTS_SZ = 16, +}; + +struct ipfrag_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + }; + struct sk_buff *next_frag; + int frag_run_len; + int ip_defrag_offset; +}; + +enum { + IP6_FH_F_FRAG = 1, + IP6_FH_F_AUTH = 2, + IP6_FH_F_SKIP_RH = 4, +}; + +struct brport_attribute { + struct attribute attr; + ssize_t (*show)(struct net_bridge_port *, char *); + int (*store)(struct net_bridge_port *, long unsigned int); + int (*store_raw)(struct net_bridge_port *, char *); +}; + +enum { + CAP_HWCAP = 1, +}; + +struct __ftr_reg_entry { + u32 sys_id; + struct arm64_ftr_reg *reg; +}; + +typedef void kpti_remap_fn(int, int, phys_addr_t, long unsigned int); + +typedef void ttbr_replace_func(phys_addr_t); + +struct pvclock_vcpu_stolen_time { + __le32 revision; + __le32 attributes; + __le64 stolen_time; + u8 padding[48]; +}; + +enum reboot_type { + BOOT_TRIPLE = 116, + BOOT_KBD = 107, + BOOT_BIOS = 98, + BOOT_ACPI = 97, + BOOT_EFI = 101, + BOOT_CF9_FORCE = 112, + BOOT_CF9_SAFE = 113, +}; + +enum sys_off_mode { + SYS_OFF_MODE_POWER_OFF_PREPARE = 0, + SYS_OFF_MODE_POWER_OFF = 1, + SYS_OFF_MODE_RESTART_PREPARE = 2, + SYS_OFF_MODE_RESTART = 3, +}; + +struct sys_off_data { + int mode; + void *cb_data; + const char *cmd; + struct device *dev; +}; + +struct sys_off_handler { + struct notifier_block nb; + int (*sys_off_cb)(struct sys_off_data *); + void *cb_data; + enum sys_off_mode mode; + bool blocking; + void *list; + struct device *dev; +}; + +struct ctx_switch_entry { + struct trace_entry ent; + unsigned int prev_pid; + unsigned int next_pid; + unsigned int next_cpu; + unsigned char prev_prio; + unsigned char prev_state; + unsigned char next_prio; + unsigned char next_state; +}; + +struct hwlat_entry { + struct trace_entry ent; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + unsigned int nmi_count; + unsigned int seqnum; + unsigned int count; +}; + +struct osnoise_entry { + struct trace_entry ent; + u64 noise; + u64 runtime; + u64 max_sample; + unsigned int hw_count; + unsigned int nmi_count; + unsigned int irq_count; + unsigned int softirq_count; + unsigned int thread_count; +}; + +struct timerlat_entry { + struct trace_entry ent; + unsigned int seqnum; + int context; + u64 timer_latency; +}; + +struct trace_mark { + long long unsigned int val; + char sym; +}; + +struct kprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int ip; +}; + +struct kretprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int func; + long unsigned int ret_ip; +}; + +struct trace_kprobe { + struct dyn_event devent; + struct kretprobe rp; + long unsigned int *nhit; + const char *symbol; + struct trace_probe tp; +}; + +struct sym_count_ctx { + unsigned int count; + const char *name; +}; + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct xdp_dev_bulk_queue { + struct xdp_frame *q[16]; + struct list_head flush_node; + struct net_device *dev; + struct net_device *dev_rx; + struct bpf_prog *xdp_prog; + unsigned int count; +}; + +struct bpf_dtab_netdev { + struct net_device *dev; + struct hlist_node index_hlist; + struct bpf_prog *xdp_prog; + struct callback_head rcu; + unsigned int idx; + struct bpf_devmap_val val; +}; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev **netdev_map; + struct list_head list; + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; +}; + +struct trace_event_raw_mm_compaction_isolate_template { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int nr_scanned; + long unsigned int nr_taken; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_migratepages { + struct trace_entry ent; + long unsigned int nr_migrated; + long unsigned int nr_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_begin { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_end { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_try_to_compact_pages { + struct trace_entry ent; + int order; + long unsigned int gfp_mask; + int prio; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_suitable_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_defer_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + unsigned int considered; + unsigned int defer_shift; + int order_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_kcompactd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_kcompactd_wake_template { + struct trace_entry ent; + int nid; + int order; + enum zone_type highest_zoneidx; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_compaction_isolate_template {}; + +struct trace_event_data_offsets_mm_compaction_migratepages {}; + +struct trace_event_data_offsets_mm_compaction_begin {}; + +struct trace_event_data_offsets_mm_compaction_end {}; + +struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; + +struct trace_event_data_offsets_mm_compaction_suitable_template {}; + +struct trace_event_data_offsets_mm_compaction_defer_template {}; + +struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; + +struct trace_event_data_offsets_kcompactd_wake_template {}; + +typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_fast_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_migratepages)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_mm_compaction_begin)(void *, struct compact_control *, long unsigned int, long unsigned int, bool); + +typedef void (*btf_trace_mm_compaction_end)(void *, struct compact_control *, long unsigned int, long unsigned int, bool, int); + +typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); + +typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); + +typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); + +typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); + +typedef enum { + ISOLATE_ABORT = 0, + ISOLATE_NONE = 1, + ISOLATE_SUCCESS = 2, +} isolate_migrate_t; + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fsuuid2 { + __u8 len; + __u8 uuid[16]; +}; + +struct fs_sysfs_path { + __u8 len; + __u8 name[128]; +}; + +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +}; + +struct postprocess_bh_ctx { + struct work_struct work; + struct buffer_head *bh; +}; + +struct bh_lru { + struct buffer_head *bhs[16]; +}; + +struct bh_accounting { + int nr; + int ratelimit; +}; + +struct fname { + __u32 hash; + __u32 minor_hash; + struct rb_node rb_hash; + struct fname *next; + __u32 inode; + __u8 name_len; + __u8 file_type; + char name[0]; +}; + +struct autofs_packet_missing { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +struct autofs_packet_expire_multi { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +union autofs_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_packet_missing missing; + struct autofs_packet_expire expire; + struct autofs_packet_expire_multi expire_multi; +}; + +struct autofs_v5_packet { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + __u32 dev; + __u64 ino; + __u32 uid; + __u32 gid; + __u32 pid; + __u32 tgid; + __u32 len; + char name[256]; +}; + +typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_missing_direct_t; + +typedef struct autofs_v5_packet autofs_packet_expire_direct_t; + +union autofs_v5_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_v5_packet v5_packet; + autofs_packet_missing_indirect_t missing_indirect; + autofs_packet_expire_indirect_t expire_indirect; + autofs_packet_missing_direct_t missing_direct; + autofs_packet_expire_direct_t expire_direct; +}; + +struct btrfs_em_shrink_ctx { + long int nr_to_scan; + long int scanned; +}; + +struct scomp_scratch { + spinlock_t lock; + void *src; + void *dst; +}; + +enum pkcs7_actions { + ACT_pkcs7_check_content_type = 0, + ACT_pkcs7_extract_cert = 1, + ACT_pkcs7_note_OID = 2, + ACT_pkcs7_note_certificate_list = 3, + ACT_pkcs7_note_content = 4, + ACT_pkcs7_note_data = 5, + ACT_pkcs7_note_signed_info = 6, + ACT_pkcs7_note_signeddata_version = 7, + ACT_pkcs7_note_signerinfo_version = 8, + ACT_pkcs7_sig_note_authenticated_attr = 9, + ACT_pkcs7_sig_note_digest_algo = 10, + ACT_pkcs7_sig_note_issuer = 11, + ACT_pkcs7_sig_note_pkey_algo = 12, + ACT_pkcs7_sig_note_serial = 13, + ACT_pkcs7_sig_note_set_of_authattrs = 14, + ACT_pkcs7_sig_note_signature = 15, + ACT_pkcs7_sig_note_skid = 16, + NR__pkcs7_actions = 17, +}; + +enum { + DIO_SHOULD_DIRTY = 1, + DIO_IS_SYNC = 2, +}; + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + unsigned int flags; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct bio bio; + long: 64; +}; + +enum io_uring_socket_op { + SOCKET_URING_OP_SIOCINQ = 0, + SOCKET_URING_OP_SIOCOUTQ = 1, + SOCKET_URING_OP_GETSOCKOPT = 2, + SOCKET_URING_OP_SETSOCKOPT = 3, +}; + +struct uring_cache { + struct io_uring_sqe sqes[2]; +}; + +struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; + kuid_t uid; + kgid_t gid; + struct device *dev; +}; + +struct find_interface_arg { + int minor; + struct device_driver *drv; +}; + +struct each_dev_arg { + void *data; + int (*fn)(struct usb_device *, void *); +}; + +struct class_info { + int class; + char *class_name; +}; + +enum rc_filter_type { + RC_FILTER_NORMAL = 0, + RC_FILTER_WAKEUP = 1, + RC_FILTER_MAX = 2, +}; + +struct led_trigger {}; + +struct rc_filter_attribute { + struct device_attribute attr; + enum rc_filter_type type; + bool mask; +}; + +typedef long unsigned int psci_fn(long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef int (*psci_initcall_t)(const struct device_node *); + +struct armv8pmu_probe_info { + struct arm_pmu *pmu; + bool present; +}; + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *, char *); + ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); +}; + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *, char *); + ssize_t (*store)(struct netdev_queue *, const char *, size_t); +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, + ETHTOOL_A_PAUSE_STAT_PAD = 1, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, + __ETHTOOL_A_PAUSE_STAT_CNT = 4, + ETHTOOL_A_PAUSE_STAT_MAX = 3, +}; + +struct pause_req_info { + struct ethnl_req_info base; + enum ethtool_mac_stats_src src; +}; + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + struct ethtool_pause_stats pausestat; +}; + +enum nft_objref_attributes { + NFTA_OBJREF_UNSPEC = 0, + NFTA_OBJREF_IMM_TYPE = 1, + NFTA_OBJREF_IMM_NAME = 2, + NFTA_OBJREF_SET_SREG = 3, + NFTA_OBJREF_SET_NAME = 4, + NFTA_OBJREF_SET_ID = 5, + __NFTA_OBJREF_MAX = 6, +}; + +struct nft_objref_map { + struct nft_set *set; + u8 sreg; + struct nft_set_binding binding; +}; + +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = 1, + CA_ACK_WIN_UPDATE = 2, + CA_ACK_ECE = 4, +}; + +struct tcp_sacktag_state { + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; +}; + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +struct bpf_unix_iter_state { + struct seq_net_private p; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct bpf_iter__unix { + union { + struct bpf_iter_meta *meta; + }; + union { + struct unix_sock *unix_sk; + }; + uid_t uid; +}; + +typedef void (*alternative_cb_t)(struct alt_instr *, __le32 *, __le32 *, int); + +struct alt_region { + struct alt_instr *begin; + struct alt_instr *end; +}; + +struct kvm_pgtable_walk_data { + struct kvm_pgtable_walker *walker; + const u64 start; + u64 addr; + const u64 end; +}; + +struct leaf_walk_data { + kvm_pte_t pte; + s8 level; +}; + +struct hyp_map_data { + const u64 phys; + kvm_pte_t attr; +}; + +struct stage2_map_data { + const u64 phys; + kvm_pte_t attr; + u8 owner_id; + kvm_pte_t *anchor; + kvm_pte_t *childp; + struct kvm_s2_mmu *mmu; + void *memcache; + bool force_pte; +}; + +struct stage2_attr_data { + kvm_pte_t attr_set; + kvm_pte_t attr_clr; + kvm_pte_t pte; + s8 level; +}; + +struct stage2_age_data { + bool mkold; + bool young; +}; + +struct rt_wake_q_head { + struct wake_q_head head; + struct task_struct *rtlock_task; +}; + +struct tracer_stat { + const char *name; + void * (*stat_start)(struct tracer_stat *); + void * (*stat_next)(void *, int); + cmp_func_t stat_cmp; + int (*stat_show)(struct seq_file *, void *); + void (*stat_release)(void *); + int (*stat_headers)(struct seq_file *); +}; + +struct stat_node { + struct rb_node node; + void *stat; +}; + +struct stat_session { + struct list_head session_list; + struct tracer_stat *ts; + struct rb_root stat_root; + struct mutex stat_mutex; + struct dentry *file; +}; + +struct trace_event_raw_error_report_template { + struct trace_entry ent; + enum error_detector error_detector; + long unsigned int id; + char __data[0]; +}; + +struct trace_event_data_offsets_error_report_template {}; + +typedef void (*btf_trace_error_report_end)(void *, enum error_detector, long unsigned int); + +struct bpf_mem_cache { + struct llist_head free_llist; + local_t active; + struct llist_head free_llist_extra; + struct irq_work refill_work; + struct obj_cgroup *objcg; + int unit_size; + int free_cnt; + int low_watermark; + int high_watermark; + int batch; + int percpu_size; + bool draining; + struct bpf_mem_cache *tgt; + struct llist_head free_by_rcu; + struct llist_node *free_by_rcu_tail; + struct llist_head waiting_for_gp; + struct llist_node *waiting_for_gp_tail; + struct callback_head rcu; + atomic_t call_rcu_in_progress; + struct llist_head free_llist_extra_rcu; + struct llist_head free_by_rcu_ttrace; + struct llist_head waiting_for_gp_ttrace; + struct callback_head rcu_ttrace; + atomic_t call_rcu_ttrace_in_progress; +}; + +struct bpf_mem_caches { + struct bpf_mem_cache cache[11]; +}; + +struct callchain_cpus_entries { + struct callback_head callback_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +struct pcpu_group_info { + int nr_units; + long unsigned int base_offset; + unsigned int *cpu_map; +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[0]; +}; + +typedef int pcpu_fc_cpu_to_node_fn_t(int); + +typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); + +struct trace_event_raw_percpu_alloc_percpu { + struct trace_entry ent; + long unsigned int call_site; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + void *base_addr; + int off; + void *ptr; + size_t bytes_alloc; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_percpu_free_percpu { + struct trace_entry ent; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu_fail { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + char __data[0]; +}; + +struct trace_event_raw_percpu_create_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_raw_percpu_destroy_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_data_offsets_percpu_alloc_percpu {}; + +struct trace_event_data_offsets_percpu_free_percpu {}; + +struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; + +struct trace_event_data_offsets_percpu_create_chunk {}; + +struct trace_event_data_offsets_percpu_destroy_chunk {}; + +typedef void (*btf_trace_percpu_alloc_percpu)(void *, long unsigned int, bool, bool, size_t, size_t, void *, int, void *, size_t, gfp_t); + +typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *); + +typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); + +typedef void (*btf_trace_percpu_create_chunk)(void *, void *); + +typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); + +struct pcpu_block_md { + int scan_hint; + int scan_hint_start; + int contig_hint; + int contig_hint_start; + int left_free; + int right_free; + int first_free; + int nr_bits; +}; + +struct pcpuobj_ext { + struct obj_cgroup *cgroup; +}; + +struct pcpu_chunk { + struct list_head list; + int free_bytes; + struct pcpu_block_md chunk_md; + long unsigned int *bound_map; + void *base_addr; + long unsigned int *alloc_map; + struct pcpu_block_md *md_blocks; + void *data; + bool immutable; + bool isolated; + int start_offset; + int end_offset; + struct pcpuobj_ext *obj_exts; + int nr_pages; + int nr_populated; + int nr_empty_pop_pages; + long unsigned int populated[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); + +struct char_device_struct { + struct char_device_struct *next; + unsigned int major; + unsigned int baseminor; + int minorct; + char name[64]; + struct cdev *cdev; +}; + +struct pidfd_info { + __u64 mask; + __u64 cgroupid; + __u32 pid; + __u32 tgid; + __u32 ppid; + __u32 ruid; + __u32 rgid; + __u32 euid; + __u32 egid; + __u32 suid; + __u32 sgid; + __u32 fsuid; + __u32 fsgid; + __u32 spare0[1]; +}; + +struct vmcore { + struct list_head list; + long long unsigned int paddr; + long long unsigned int size; + loff_t offset; +}; + +typedef struct elf64_note Elf64_Nhdr; + +struct vmcore_cb { + bool (*pfn_is_ram)(struct vmcore_cb *, long unsigned int); + struct list_head next; +}; + +struct ext4_free_data { + struct list_head efd_list; + struct rb_node efd_node; + ext4_group_t efd_group; + ext4_grpblk_t efd_start_cluster; + ext4_grpblk_t efd_count; + tid_t efd_tid; +}; + +enum { + MB_INODE_PA = 0, + MB_GROUP_PA = 1, +}; + +struct ext4_buddy { + struct folio *bd_buddy_folio; + void *bd_buddy; + struct folio *bd_bitmap_folio; + void *bd_bitmap; + struct ext4_group_info *bd_info; + struct super_block *bd_sb; + __u16 bd_blkbits; + ext4_group_t bd_group; +}; + +typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); + +struct sg { + struct ext4_group_info info; + ext4_grpblk_t counters[18]; +}; + +enum { + AUTOFS_IOC_READY_CMD = 96, + AUTOFS_IOC_FAIL_CMD = 97, + AUTOFS_IOC_CATATONIC_CMD = 98, + AUTOFS_IOC_PROTOVER_CMD = 99, + AUTOFS_IOC_SETTIMEOUT_CMD = 100, + AUTOFS_IOC_EXPIRE_CMD = 101, +}; + +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 102, + AUTOFS_IOC_PROTOSUBVER_CMD = 103, + AUTOFS_IOC_ASKUMOUNT_CMD = 112, +}; + +struct ahash_alg { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_ahash *); + void (*exit_tfm)(struct crypto_ahash *); + int (*clone_tfm)(struct crypto_ahash *, struct crypto_ahash *); + struct hash_alg_common halg; +}; + +struct crypto_hash_walk { + char *data; + unsigned int offset; + unsigned int flags; + struct page *pg; + unsigned int entrylen; + unsigned int total; + struct scatterlist *sg; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *); + union { + struct { + char head[96]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct essiv_instance_ctx { + union { + struct crypto_skcipher_spawn skcipher_spawn; + struct crypto_aead_spawn aead_spawn; + } u; + char essiv_cipher_name[128]; + char shash_driver_name[128]; +}; + +struct essiv_tfm_ctx { + union { + struct crypto_skcipher *skcipher; + struct crypto_aead *aead; + } u; + struct crypto_cipher *essiv_cipher; + struct crypto_shash *hash; + int ivoffset; +}; + +struct essiv_aead_request_ctx { + struct scatterlist sg[4]; + u8 *assoc; + struct aead_request aead_req; +}; + +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __u64 data; +}; + +typedef long unsigned int uint64x2_t[2]; + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +typedef long int mpi_limb_signed_t; + +union uu { + short unsigned int us; + unsigned char b[2]; +}; + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +struct clk_gated_fixed { + struct clk_gpio clk_gpio; + struct regulator *supply; + long unsigned int rate; +}; + +struct vc_selection { + struct mutex lock; + struct vc_data *cons; + char *buffer; + unsigned int buf_len; + volatile int start; + int end; +}; + +struct scsi_dev_info_list { + struct list_head dev_info_list; + char vendor[8]; + char model[16]; + blist_flags_t flags; + unsigned int compatible; +}; + +struct scsi_dev_info_list_table { + struct list_head node; + struct list_head scsi_dev_info_list; + const char *name; + int key; +}; + +typedef void (*rtl_fw_write_t)(struct rtl8169_private *, int, int); + +typedef int (*rtl_fw_read_t)(struct rtl8169_private *, int); + +struct rtl_fw_phy_action { + __le32 *code; + size_t size; +}; + +struct rtl_fw___2 { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + char version[32]; + struct rtl_fw_phy_action phy_action; +}; + +enum rtl_fw_opcode { + PHY_READ = 0, + PHY_DATA_OR = 1, + PHY_DATA_AND = 2, + PHY_BJMPN = 3, + PHY_MDIO_CHG = 4, + PHY_CLEAR_READCOUNT = 7, + PHY_WRITE = 8, + PHY_READCOUNT_EQ_SKIP = 9, + PHY_COMP_EQ_SKIPN = 10, + PHY_COMP_NEQ_SKIPN = 11, + PHY_WRITE_PREVIOUS = 12, + PHY_SKIPN = 13, + PHY_DELAY_MS = 14, +}; + +struct fw_info { + u32 magic; + char version[32]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __attribute__((packed)); + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +struct ptp_clock_caps { + int max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int pps; + int n_pins; + int cross_timestamping; + int adjust_phase; + int max_phase_adj; + int rsv[11]; +}; + +struct ptp_sys_offset { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[51]; +}; + +struct ptp_sys_offset_extended { + unsigned int n_samples; + __kernel_clockid_t clockid; + unsigned int rsv[2]; + struct ptp_clock_time ts[75]; +}; + +struct ptp_sys_offset_precise { + struct ptp_clock_time device; + struct ptp_clock_time sys_realtime; + struct ptp_clock_time sys_monoraw; + unsigned int rsv[4]; +}; + +enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, + __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, +}; + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD = 0, + FLOW_DISSECT_RET_OUT_BAD = 1, + FLOW_DISSECT_RET_PROTO_AGAIN = 2, + FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, + FLOW_DISSECT_RET_CONTINUE = 4, +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl: 8; + u32 mpls_bos: 1; + u32 mpls_tc: 3; + u32 mpls_label: 20; +}; + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +struct flow_dissector_key_enc_opts { + u8 data[255]; + u8 len; + u32 dst_opt_type; +}; + +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +struct flow_dissector_key_hash { + u32 hash; +}; + +struct flow_dissector_key_num_of_vlans { + u8 num_of_vlans; +}; + +struct flow_dissector_key_pppoe { + __be16 session_id; + __be16 ppp_proto; + __be16 type; +}; + +struct flow_dissector_key_l2tpv3 { + __be32 session_id; +}; + +struct flow_dissector_key_ipsec { + __be32 spi; +}; + +struct flow_dissector_key_cfm { + u8 mdl_ver; + u8 opcode; +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; +}; + +struct flow_keys_digest { + u8 data[16]; +}; + +enum bpf_ret_code { + BPF_OK = 0, + BPF_DROP = 2, + BPF_REDIRECT = 7, + BPF_LWT_REROUTE = 128, + BPF_FLOW_DISSECTOR_CONTINUE = 129, +}; + +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1, + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2, + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4, +}; + +struct pptp_gre_header { + struct gre_base_hdr gre_hd; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; +}; + +struct tipc_basic_hdr { + __be32 w[4]; +}; + +enum dccp_state { + DCCP_OPEN = 1, + DCCP_REQUESTING = 2, + DCCP_LISTEN = 10, + DCCP_RESPOND = 3, + DCCP_ACTIVE_CLOSEREQ = 4, + DCCP_PASSIVE_CLOSE = 8, + DCCP_CLOSING = 11, + DCCP_TIME_WAIT = 6, + DCCP_CLOSED = 7, + DCCP_NEW_SYN_RECV = 12, + DCCP_PARTOPEN = 14, + DCCP_PASSIVE_CLOSEREQ = 15, + DCCP_MAX_STATES = 16, +}; + +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +}; + +struct pppoe_hdr { + __u8 ver: 4; + __u8 type: 4; + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +}; + +struct hsr_tag { + __be16 path_and_LSDU_size; + __be16 sequence_nr; + __be16 encap_proto; +}; + +struct mpls_label { + __be32 entry; +}; + +enum batadv_packettype { + BATADV_IV_OGM = 0, + BATADV_BCAST = 1, + BATADV_CODED = 2, + BATADV_ELP = 3, + BATADV_OGM2 = 4, + BATADV_MCAST = 5, + BATADV_UNICAST = 64, + BATADV_UNICAST_FRAG = 65, + BATADV_UNICAST_4ADDR = 66, + BATADV_ICMP = 67, + BATADV_UNICAST_TVLV = 68, +}; + +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; + __u8 dest[6]; +}; + +struct _flow_keys_digest_data { + __be16 n_proto; + u8 ip_proto; + u8 padding; + __be32 ports; + __be32 src; + __be32 dst; +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A = 0, + ETHTOOL_A_CABLE_PAIR_B = 1, + ETHTOOL_A_CABLE_PAIR_C = 2, + ETHTOOL_A_CABLE_PAIR_D = 3, +}; + +enum { + ETHTOOL_A_CABLE_INF_SRC_UNSPEC = 0, + ETHTOOL_A_CABLE_INF_SRC_TDR = 1, + ETHTOOL_A_CABLE_INF_SRC_ALCD = 2, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, + ETHTOOL_A_CABLE_RESULT_PAIR = 1, + ETHTOOL_A_CABLE_RESULT_CODE = 2, + ETHTOOL_A_CABLE_RESULT_SRC = 3, + __ETHTOOL_A_CABLE_RESULT_CNT = 4, + ETHTOOL_A_CABLE_RESULT_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, + ETHTOOL_A_CABLE_FAULT_LENGTH_SRC = 3, + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 4, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_NEST_RESULT = 1, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, + __ETHTOOL_A_CABLE_NEST_CNT = 3, + ETHTOOL_A_CABLE_NEST_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, + ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, + __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, + ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, +}; + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, + ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, + __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, + ETHTOOL_A_CABLE_PULSE_mV = 1, + __ETHTOOL_A_CABLE_PULSE_CNT = 2, + ETHTOOL_A_CABLE_PULSE_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC = 0, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, + __ETHTOOL_A_CABLE_STEP_CNT = 4, + ETHTOOL_A_CABLE_STEP_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, + ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, + __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, + ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, +}; + +enum nft_inner_type { + NFT_INNER_UNSPEC = 0, + NFT_INNER_VXLAN = 1, + NFT_INNER_GENEVE = 2, +}; + +enum nft_inner_flags { + NFT_INNER_HDRSIZE = 1, + NFT_INNER_LL = 2, + NFT_INNER_NH = 4, + NFT_INNER_TH = 8, +}; + +enum nft_inner_attributes { + NFTA_INNER_UNSPEC = 0, + NFTA_INNER_NUM = 1, + NFTA_INNER_TYPE = 2, + NFTA_INNER_FLAGS = 3, + NFTA_INNER_HDRSIZE = 4, + NFTA_INNER_EXPR = 5, + __NFTA_INNER_MAX = 6, +}; + +struct genevehdr { + u8 ver: 2; + u8 opt_len: 6; + u8 oam: 1; + u8 critical: 1; + u8 rsvd1: 6; + __be16 proto_type; + u8 vni[3]; + u8 rsvd2; + u8 options[0]; +}; + +struct __nft_expr { + const struct nft_expr_ops *ops; + union { + struct nft_payload payload; + struct nft_meta meta; + }; +}; + +enum { + NFT_INNER_EXPR_PAYLOAD = 0, + NFT_INNER_EXPR_META = 1, +}; + +struct nft_inner { + u8 flags; + u8 hdrsize; + u8 type; + u8 expr_type; + struct __nft_expr expr; +}; + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; + unsigned char nh_protocol; + unsigned char resvd; + unsigned int nh_flags; +}; + +struct nexthop_grp { + __u32 id; + __u8 weight; + __u8 weight_high; + __u16 resvd2; +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH = 0, + NEXTHOP_GRP_TYPE_RES = 1, + __NEXTHOP_GRP_TYPE_MAX = 2, +}; + +enum { + NHA_UNSPEC = 0, + NHA_ID = 1, + NHA_GROUP = 2, + NHA_GROUP_TYPE = 3, + NHA_BLACKHOLE = 4, + NHA_OIF = 5, + NHA_GATEWAY = 6, + NHA_ENCAP_TYPE = 7, + NHA_ENCAP = 8, + NHA_GROUPS = 9, + NHA_MASTER = 10, + NHA_FDB = 11, + NHA_RES_GROUP = 12, + NHA_RES_BUCKET = 13, + NHA_OP_FLAGS = 14, + NHA_GROUP_STATS = 15, + NHA_HW_STATS_ENABLE = 16, + NHA_HW_STATS_USED = 17, + __NHA_MAX = 18, +}; + +enum { + NHA_RES_GROUP_UNSPEC = 0, + NHA_RES_GROUP_PAD = 0, + NHA_RES_GROUP_BUCKETS = 1, + NHA_RES_GROUP_IDLE_TIMER = 2, + NHA_RES_GROUP_UNBALANCED_TIMER = 3, + NHA_RES_GROUP_UNBALANCED_TIME = 4, + __NHA_RES_GROUP_MAX = 5, +}; + +enum { + NHA_RES_BUCKET_UNSPEC = 0, + NHA_RES_BUCKET_PAD = 0, + NHA_RES_BUCKET_INDEX = 1, + NHA_RES_BUCKET_IDLE_TIME = 2, + NHA_RES_BUCKET_NH_ID = 3, + __NHA_RES_BUCKET_MAX = 4, +}; + +enum { + NHA_GROUP_STATS_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY = 1, + __NHA_GROUP_STATS_MAX = 2, +}; + +enum { + NHA_GROUP_STATS_ENTRY_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY_ID = 1, + NHA_GROUP_STATS_ENTRY_PACKETS = 2, + NHA_GROUP_STATS_ENTRY_PACKETS_HW = 3, + __NHA_GROUP_STATS_ENTRY_MAX = 4, +}; + +struct nh_config { + u32 nh_id; + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u8 nh_fdb; + u32 nh_flags; + int nh_ifindex; + struct net_device *dev; + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + struct nlattr *nh_grp; + u16 nh_grp_type; + u16 nh_grp_res_num_buckets; + long unsigned int nh_grp_res_idle_timer; + long unsigned int nh_grp_res_unbalanced_timer; + bool nh_grp_res_has_num_buckets; + bool nh_grp_res_has_idle_timer; + bool nh_grp_res_has_unbalanced_timer; + bool nh_hw_stats; + struct nlattr *nh_encap; + u16 nh_encap_type; + u32 nlflags; + struct nl_info nlinfo; +}; + +enum nexthop_event_type { + NEXTHOP_EVENT_DEL = 0, + NEXTHOP_EVENT_REPLACE = 1, + NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, + NEXTHOP_EVENT_BUCKET_REPLACE = 3, + NEXTHOP_EVENT_HW_STATS_REPORT_DELTA = 4, +}; + +enum nh_notifier_info_type { + NH_NOTIFIER_INFO_TYPE_SINGLE = 0, + NH_NOTIFIER_INFO_TYPE_GRP = 1, + NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, + NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, + NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS = 4, +}; + +struct nh_notifier_single_info { + struct net_device *dev; + u8 gw_family; + union { + __be32 ipv4; + struct in6_addr ipv6; + }; + u32 id; + u8 is_reject: 1; + u8 is_fdb: 1; + u8 has_encap: 1; +}; + +struct nh_notifier_grp_entry_info { + u16 weight; + struct nh_notifier_single_info nh; +}; + +struct nh_notifier_grp_info { + u16 num_nh; + bool is_fdb; + bool hw_stats; + struct nh_notifier_grp_entry_info nh_entries[0]; +}; + +struct nh_notifier_res_bucket_info { + u16 bucket_index; + unsigned int idle_timer_ms; + bool force; + struct nh_notifier_single_info old_nh; + struct nh_notifier_single_info new_nh; +}; + +struct nh_notifier_res_table_info { + u16 num_nh_buckets; + bool hw_stats; + struct nh_notifier_single_info nhs[0]; +}; + +struct nh_notifier_grp_hw_stats_entry_info { + u32 id; + u64 packets; +}; + +struct nh_notifier_grp_hw_stats_info { + u16 num_nh; + bool hw_stats_used; + struct nh_notifier_grp_hw_stats_entry_info stats[0]; +}; + +struct nh_notifier_info { + struct net *net; + struct netlink_ext_ack *extack; + u32 id; + enum nh_notifier_info_type type; + union { + struct nh_notifier_single_info *nh; + struct nh_notifier_grp_info *nh_grp; + struct nh_notifier_res_table_info *nh_res_table; + struct nh_notifier_res_bucket_info *nh_res_bucket; + struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats; + }; +}; + +struct nh_dump_filter { + u32 nh_id; + int dev_idx; + int master_idx; + bool group_filter; + bool fdb_filter; + u32 res_bucket_nh_id; + u32 op_flags; +}; + +struct rtm_dump_nh_ctx { + u32 idx; +}; + +struct rtm_dump_res_bucket_ctx { + struct rtm_dump_nh_ctx nh; + u16 bucket_index; +}; + +struct rtm_dump_nexthop_bucket_data { + struct rtm_dump_res_bucket_ctx *ctx; + struct nh_dump_filter filter; +}; + +enum rtmutex_chainwalk { + RT_MUTEX_MIN_CHAINWALK = 0, + RT_MUTEX_FULL_CHAINWALK = 1, +}; + +struct kexec_load_limit { + struct mutex mutex; + int limit; +}; + +enum { + TRACE_NOP_OPT_ACCEPT = 1, + TRACE_NOP_OPT_REFUSE = 2, +}; + +struct bpf_bloom_filter { + struct bpf_map map; + u32 bitset_mask; + u32 hash_seed; + u32 nr_hash_funcs; + long unsigned int bitset[0]; +}; + +typedef u64 (*btf_bpf_cgrp_storage_get)(struct bpf_map *, struct cgroup *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_cgrp_storage_delete)(struct bpf_map *, struct cgroup *); + +enum pgt_entry { + NORMAL_PMD = 0, + HPAGE_PMD = 1, + NORMAL_PUD = 2, + HPAGE_PUD = 3, +}; + +struct proc_fs_opts { + int flag; + const char *str; +}; + +struct ext4_getfsmap_info { + struct ext4_fsmap_head *gfi_head; + ext4_fsmap_format_t gfi_formatter; + void *gfi_format_arg; + ext4_fsblk_t gfi_next_fsblk; + u32 gfi_dev; + ext4_group_t gfi_agno; + struct ext4_fsmap gfi_low; + struct ext4_fsmap gfi_high; + struct ext4_fsmap gfi_lastfree; + struct list_head gfi_meta_list; + bool gfi_last; +}; + +struct ext4_getfsmap_dev { + int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); + u32 gfd_dev; +}; + +struct tracefs_dir_ops { + int (*mkdir)(const char *); + int (*rmdir)(const char *); +}; + +struct tracefs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid___6 = 0, + Opt_gid___7 = 1, + Opt_mode___6 = 2, +}; + +struct btrfs_dir_log_item { + __le64 end; +}; + +enum { + LOG_INODE_ALL = 0, + LOG_INODE_EXISTS = 1, +}; + +enum { + LOG_WALK_PIN_ONLY = 0, + LOG_WALK_REPLAY_INODES = 1, + LOG_WALK_REPLAY_DIR_INDEX = 2, + LOG_WALK_REPLAY_ALL = 3, +}; + +struct walk_control___2 { + int free; + int pin; + int stage; + bool ignore_cur_inode; + struct btrfs_root *replay_dest; + struct btrfs_trans_handle *trans; + int (*process_func)(struct btrfs_root *, struct extent_buffer *, struct walk_control___2 *, u64, int); +}; + +struct btrfs_dir_list { + u64 ino; + struct list_head list; +}; + +struct btrfs_ino_list { + u64 ino; + u64 parent; + struct list_head list; +}; + +struct btrfs_dio_data { + ssize_t submitted; + struct extent_changeset *data_reserved; + struct btrfs_ordered_extent *ordered; + bool data_space_reserved; + bool nocow_done; +}; + +struct btrfs_dio_private { + u64 file_offset; + u32 bytes; + struct btrfs_bio bbio; +}; + +struct cryptomgr_param { + struct rtattr *tb[34]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } attrs[32]; + char template[128]; + struct crypto_larval *larval; + u32 otype; + u32 omask; +}; + +struct crypto_test_param { + char driver[128]; + char alg[128]; + u32 type; +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); +}; + +enum { + MILLION = 1000000, + MIN_PERIOD = 1000, + MAX_PERIOD = 1000000, + MARGIN_MIN_PCT = 10, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + INUSE_ADJ_STEP_PCT = 25, + TIMER_SLACK_PCT = 1, + WEIGHT_ONE = 65536, +}; + +enum { + VTIME_PER_SEC_SHIFT = 37ULL, + VTIME_PER_SEC = 137438953472ULL, + VTIME_PER_USEC = 137438ULL, + VTIME_PER_NSEC = 137ULL, + VRATE_MIN_PPM = 10000ULL, + VRATE_MAX_PPM = 100000000ULL, + VRATE_MIN = 1374ULL, + VRATE_CLAMP_ADJ_PCT = 4ULL, + AUTOP_CYCLE_NSEC = 10000000000ULL, +}; + +enum { + RQ_WAIT_BUSY_PCT = 5, + UNBUSY_THR_PCT = 75, + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250000, + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100000, + MAX_LAGGING_PERIODS = 10, + IOC_PAGE_SHIFT = 12, + IOC_PAGE_SIZE = 4096, + IOC_SECT_TO_PAGE_SHIFT = 3, + LCOEF_RANDIO_PAGES = 4096, +}; + +enum ioc_running { + IOC_IDLE = 0, + IOC_RUNNING = 1, + IOC_STOP = 2, +}; + +enum { + QOS_ENABLE = 0, + QOS_CTRL = 1, + NR_QOS_CTRL_PARAMS = 2, +}; + +enum { + QOS_RPPM = 0, + QOS_RLAT = 1, + QOS_WPPM = 2, + QOS_WLAT = 3, + QOS_MIN = 4, + QOS_MAX = 5, + NR_QOS_PARAMS = 6, +}; + +enum { + COST_CTRL = 0, + COST_MODEL = 1, + NR_COST_CTRL_PARAMS = 2, +}; + +enum { + I_LCOEF_RBPS = 0, + I_LCOEF_RSEQIOPS = 1, + I_LCOEF_RRANDIOPS = 2, + I_LCOEF_WBPS = 3, + I_LCOEF_WSEQIOPS = 4, + I_LCOEF_WRANDIOPS = 5, + NR_I_LCOEFS = 6, +}; + +enum { + LCOEF_RPAGE = 0, + LCOEF_RSEQIO = 1, + LCOEF_RRANDIO = 2, + LCOEF_WPAGE = 3, + LCOEF_WSEQIO = 4, + LCOEF_WRANDIO = 5, + NR_LCOEFS = 6, +}; + +enum { + AUTOP_INVALID = 0, + AUTOP_HDD = 1, + AUTOP_SSD_QD1 = 2, + AUTOP_SSD_DFL = 3, + AUTOP_SSD_FAST = 4, +}; + +struct ioc_params { + u32 qos[6]; + u64 i_lcoefs[6]; + u64 lcoefs[6]; + u32 too_fast_vrate_pct; + u32 too_slow_vrate_pct; +}; + +struct ioc_margins { + s64 min; + s64 low; + s64 target; +}; + +struct ioc_missed { + local_t nr_met; + local_t nr_missed; + u32 last_met; + u32 last_missed; +}; + +struct ioc_pcpu_stat { + struct ioc_missed missed[2]; + local64_t rq_wait_ns; + u64 last_rq_wait_ns; +}; + +struct ioc { + struct rq_qos rqos; + bool enabled; + struct ioc_params params; + struct ioc_margins margins; + u32 period_us; + u32 timer_slack_ns; + u64 vrate_min; + u64 vrate_max; + spinlock_t lock; + struct timer_list timer; + struct list_head active_iocgs; + struct ioc_pcpu_stat *pcpu_stat; + enum ioc_running running; + atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; + seqcount_spinlock_t period_seqcount; + u64 period_at; + u64 period_at_vtime; + atomic64_t cur_period; + int busy_level; + bool weights_updated; + atomic_t hweight_gen; + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; + u64 autop_too_fast_at; + u64 autop_too_slow_at; + int autop_idx; + bool user_qos_params: 1; + bool user_cost_model: 1; +}; + +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct iocg_stat { + u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; +}; + +struct ioc_gq { + struct blkg_policy_data pd; + struct ioc *ioc; + u32 cfg_weight; + u32 weight; + u32 active; + u32 inuse; + u32 last_inuse; + s64 saved_margin; + sector_t cursor; + atomic64_t vtime; + atomic64_t done_vtime; + u64 abs_vdebt; + u64 delay; + u64 delay_at; + atomic64_t active_period; + struct list_head active_list; + u64 child_active_sum; + u64 child_inuse_sum; + u64 child_adjusted_sum; + int hweight_gen; + u32 hweight_active; + u32 hweight_inuse; + u32 hweight_donating; + u32 hweight_after_donation; + struct list_head walk_list; + struct list_head surplus_list; + struct wait_queue_head waitq; + struct hrtimer waitq_timer; + u64 activated_at; + struct iocg_pcpu_stat *pcpu_stat; + struct iocg_stat stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; + int level; + struct ioc_gq *ancestors[0]; +}; + +struct ioc_cgrp { + struct blkcg_policy_data cpd; + unsigned int dfl_weight; +}; + +struct ioc_now { + u64 now_ns; + u64 now; + u64 vnow; +}; + +struct iocg_wait { + struct wait_queue_entry wait; + struct bio *bio; + u64 abs_cost; + bool committed; +}; + +struct iocg_wake_ctx { + struct ioc_gq *iocg; + u32 hw_inuse; + s64 vbudget; +}; + +struct trace_event_raw_iocost_iocg_state { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u64 vrate; + u64 last_period; + u64 cur_period; + u64 vtime; + u32 weight; + u32 inuse; + u64 hweight_active; + u64 hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocg_inuse_update { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u32 old_inuse; + u32 new_inuse; + u64 old_hweight_inuse; + u64 new_hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocost_ioc_vrate_adj { + struct trace_entry ent; + u32 __data_loc_devname; + u64 old_vrate; + u64 new_vrate; + int busy_level; + u32 read_missed_ppm; + u32 write_missed_ppm; + u32 rq_wait_pct; + int nr_lagging; + int nr_shortages; + char __data[0]; +}; + +struct trace_event_raw_iocost_iocg_forgive_debt { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u32 usage_pct; + u64 old_debt; + u64 new_debt; + u64 old_delay; + u64 new_delay; + char __data[0]; +}; + +struct trace_event_data_offsets_iocost_iocg_state { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocg_inuse_update { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocost_ioc_vrate_adj { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_iocost_iocg_forgive_debt { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_iocg_idle)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); + +typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u64, u64, u64, u64); + +typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); + +struct trace_event_raw_hw_pressure_update { + struct trace_entry ent; + long unsigned int hw_pressure; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_hw_pressure_update {}; + +typedef void (*btf_trace_hw_pressure_update)(void *, int, long unsigned int); + +enum rtl_dash_type { + RTL_DASH_NONE = 0, + RTL_DASH_DP = 1, + RTL_DASH_EP = 2, +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +struct r8169_led_classdev; + +struct TxDesc; + +struct RxDesc; + +struct rtl8169_counters; + +struct rtl8169_private { + void *mmio_addr; + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; + u32 cur_tx; + u32 dirty_tx; + struct TxDesc *TxDescArray; + struct RxDesc *RxDescArray; + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[256]; + struct ring_info tx_skb[256]; + u16 cp_cmd; + u16 tx_lpi_timer; + u32 irq_mask; + int irq; + struct clk *clk; + struct { + long unsigned int flags[1]; + struct work_struct work; + } wk; + raw_spinlock_t mac_ocp_lock; + struct mutex led_lock; + unsigned int supports_gmii: 1; + unsigned int aspm_manageable: 1; + unsigned int dash_enabled: 1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + const char *fw_name; + struct rtl_fw___2 *rtl_fw; + struct r8169_led_classdev *leds; + u32 ocp_base; +}; + +enum rtl_registers { + MAC0 = 0, + MAC4 = 4, + MAR0 = 8, + CounterAddrLow = 16, + CounterAddrHigh = 20, + TxDescStartAddrLow = 32, + TxDescStartAddrHigh = 36, + TxHDescStartAddrLow = 40, + TxHDescStartAddrHigh = 44, + FLASH = 48, + ERSR = 54, + ChipCmd = 55, + TxPoll = 56, + IntrMask = 60, + IntrStatus = 62, + TxConfig = 64, + RxConfig = 68, + Cfg9346 = 80, + Config0 = 81, + Config1 = 82, + Config2 = 83, + Config3 = 84, + Config4 = 85, + Config5 = 86, + PHYAR = 96, + PHYstatus = 108, + RxMaxSize = 218, + CPlusCmd = 224, + IntrMitigate = 226, + RxDescAddrLow = 228, + RxDescAddrHigh = 232, + EarlyTxThres = 236, + MaxTxPacketSize = 236, + FuncEvent = 240, + FuncEventMask = 244, + FuncPresetState = 248, + IBCR0 = 248, + IBCR2 = 249, + IBIMR0 = 250, + IBISR0 = 251, + FuncForceEvent = 252, +}; + +enum rtl8168_8101_registers { + CSIDR = 100, + CSIAR = 104, + PMCH = 111, + EPHYAR = 128, + DLLPR = 208, + DBG_REG = 209, + TWSI = 210, + MCU = 211, + EFUSEAR = 220, + MISC_1 = 242, +}; + +enum rtl8168_registers { + LED_CTRL = 24, + LED_FREQ = 26, + EEE_LED = 27, + ERIDR = 112, + ERIAR = 116, + EPHY_RXER_NUM = 124, + OCPDR = 176, + OCPAR = 180, + GPHY_OCP = 184, + RDSAR1 = 208, + MISC = 240, +}; + +enum rtl8125_registers { + LEDSEL0 = 24, + INT_CFG0_8125 = 52, + IntrMask_8125 = 56, + IntrStatus_8125 = 60, + INT_CFG1_8125 = 122, + LEDSEL2 = 132, + LEDSEL1 = 134, + TxPoll_8125 = 144, + LEDSEL3 = 150, + MAC0_BKP = 6624, + RSS_CTRL_8125 = 17664, + Q_NUM_CTRL_8125 = 18432, + EEE_TXIDLE_TIMER_8125 = 24648, +}; + +enum rtl_register_content___2 { + SYSErr = 32768, + PCSTimeout = 16384, + SWInt = 256, + TxDescUnavail = 128, + RxFIFOOver = 64, + LinkChg = 32, + RxOverflow = 16, + TxErr = 8, + TxOK = 4, + RxErr = 2, + RxOK = 1, + RxRWT = 4194304, + RxRES = 2097152, + RxRUNT = 1048576, + RxCRC = 524288, + StopReq = 128, + CmdReset = 16, + CmdRxEnb = 8, + CmdTxEnb = 4, + RxBufEmpty = 1, + HPQ = 128, + NPQ = 64, + FSWInt = 1, + Cfg9346_Lock = 0, + Cfg9346_Unlock = 192, + AcceptErr = 32, + AcceptRunt = 16, + AcceptBroadcast = 8, + AcceptMulticast = 4, + AcceptMyPhys = 2, + AcceptAllPhys = 1, + TxInterFrameGapShift = 24, + TxDMAShift = 8, + LEDS1 = 128, + LEDS0 = 64, + Speed_down = 16, + MEMMAP = 8, + IOMAP = 4, + VPD = 2, + PMEnable = 1, + ClkReqEn = 128, + MSIEnable = 32, + PCI_Clock_66MHz = 1, + PCI_Clock_33MHz = 0, + MagicPacket = 32, + LinkUp = 16, + Jumbo_En0 = 4, + Rdy_to_L23 = 2, + Beacon_en = 1, + Jumbo_En1 = 2, + BWF = 64, + MWF = 32, + UWF = 16, + Spi_en = 8, + LanWake = 2, + PMEStatus = 1, + ASPM_en = 1, + EnableBist = 32768, + Mac_dbgo_oe = 16384, + EnAnaPLL = 16384, + Normal_mode = 8192, + Force_half_dup = 4096, + Force_rxflow_en = 2048, + Force_txflow_en = 1024, + Cxpl_dbg_sel = 512, + ASF = 256, + PktCntrDisable = 128, + Mac_dbgo_sel = 28, + RxVlan = 64, + RxChkSum = 32, + PCIDAC = 16, + PCIMulRW = 8, + TBI_Enable = 128, + TxFlowCtrl = 64, + RxFlowCtrl = 32, + _1000bpsF = 16, + _100bps___2 = 8, + _10bps___2 = 4, + LinkStatus = 2, + FullDup = 1, + CounterReset = 1, + CounterDump = 8, + MagicPacket_v2 = 65536, +}; + +enum rtl_desc_bit { + DescOwn = -2147483648, + RingEnd = 1073741824, + FirstFrag = 536870912, + LastFrag = 268435456, +}; + +enum rtl_tx_desc_bit { + TD_LSO = 134217728, + TxVlanTag = 131072, +}; + +enum rtl_tx_desc_bit_0 { + TD0_TCP_CS = 65536, + TD0_UDP_CS = 131072, + TD0_IP_CS = 262144, +}; + +enum rtl_tx_desc_bit_1 { + TD1_GTSENV4 = 67108864, + TD1_GTSENV6 = 33554432, + TD1_IPv6_CS = 268435456, + TD1_IPv4_CS = 536870912, + TD1_TCP_CS = 1073741824, + TD1_UDP_CS = -2147483648, +}; + +enum rtl_rx_desc_bit { + PID1 = 262144, + PID0 = 131072, + IPFail = 65536, + UDPFail = 32768, + TCPFail = 16384, + RxVlanTag = 65536, +}; + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; + __le64 tx_octets; + __le64 rx_octets; + __le64 rx_multicast64; + __le64 tx_unicast64; + __le64 tx_broadcast64; + __le64 tx_multicast64; + __le32 tx_pause_on; + __le32 tx_pause_off; + __le32 tx_pause_all; + __le32 tx_deferred; + __le32 tx_late_collision; + __le32 tx_all_collision; + __le32 tx_aborted32; + __le32 align_errors32; + __le32 rx_frame_too_long; + __le32 rx_runt; + __le32 rx_pause_on; + __le32 rx_pause_off; + __le32 rx_pause_all; + __le32 rx_unknown_opcode; + __le32 rx_mac_error; + __le32 tx_underrun32; + __le32 rx_mac_missed; + __le32 rx_tcam_dropped; + __le32 tdu; + __le32 rdu; +}; + +enum rtl_flag { + RTL_FLAG_TASK_RESET_PENDING = 0, + RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE = 1, + RTL_FLAG_TASK_TX_TIMEOUT = 2, + RTL_FLAG_MAX = 3, +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *); + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; +}; + +struct net_device_devres { + struct net_device *ndev; +}; + +struct flow_dissector_key_ports_range { + union { + struct flow_dissector_key_ports tp; + struct { + struct flow_dissector_key_ports tp_min; + struct flow_dissector_key_ports tp_max; + }; + }; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key; + struct flow_dissector_key_meta *mask; +}; + +struct flow_match_arp { + struct flow_dissector_key_arp *key; + struct flow_dissector_key_arp *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key; + struct flow_dissector_key_ipv4_addrs *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key; + struct flow_dissector_key_ipv6_addrs *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key; + struct flow_dissector_key_ip *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key; + struct flow_dissector_key_ports *mask; +}; + +struct flow_match_ports_range { + struct flow_dissector_key_ports_range *key; + struct flow_dissector_key_ports_range *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key; + struct flow_dissector_key_icmp *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key; + struct flow_dissector_key_tcp *mask; +}; + +struct flow_match_ipsec { + struct flow_dissector_key_ipsec *key; + struct flow_dissector_key_ipsec *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key; + struct flow_dissector_key_mpls *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key; + struct flow_dissector_key_keyid *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key; + struct flow_dissector_key_enc_opts *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key; + struct flow_dissector_key_ct *mask; +}; + +struct flow_match_pppoe { + struct flow_dissector_key_pppoe *key; + struct flow_dissector_key_pppoe *mask; +}; + +struct flow_match_l2tpv3 { + struct flow_dissector_key_l2tpv3 *key; + struct flow_dissector_key_l2tpv3 *mask; +}; + +enum offload_act_command { + FLOW_ACT_REPLACE = 0, + FLOW_ACT_DESTROY = 1, + FLOW_ACT_STATS = 2, +}; + +struct flow_offload_action { + struct netlink_ext_ack *extack; + enum offload_act_command command; + enum flow_action_id id; + u32 index; + long unsigned int cookie; + struct flow_stats stats; + struct flow_action action; +}; + +typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); + +struct flow_indr_dev { + struct list_head list; + flow_indr_block_bind_cb_t *cb; + void *cb_priv; + refcount_t refcnt; +}; + +struct flow_indir_dev_info { + void *data; + struct net_device *dev; + struct Qdisc *sch; + enum tc_setup_type type; + void (*cleanup)(struct flow_block_cb *); + struct list_head list; + enum flow_block_command command; + enum flow_block_binder_type binder_type; + struct list_head *cb_list; +}; + +struct llc_pdu_sn { + u8 dsap; + u8 ssap; + u8 ctrl_1; + u8 ctrl_2; +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE = 1, + __ETHTOOL_A_TUNNEL_UDP_CNT = 2, + ETHTOOL_A_TUNNEL_UDP_MAX = 1, +}; + +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN = 1, + UDP_TUNNEL_TYPE_GENEVE = 2, + UDP_TUNNEL_TYPE_VXLAN_GPE = 4, +}; + +enum udp_tunnel_nic_info_flags { + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, +}; + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + long unsigned int ifindex; +}; + +enum nft_ct_keys { + NFT_CT_STATE = 0, + NFT_CT_DIRECTION = 1, + NFT_CT_STATUS = 2, + NFT_CT_MARK = 3, + NFT_CT_SECMARK = 4, + NFT_CT_EXPIRATION = 5, + NFT_CT_HELPER = 6, + NFT_CT_L3PROTOCOL = 7, + NFT_CT_SRC = 8, + NFT_CT_DST = 9, + NFT_CT_PROTOCOL = 10, + NFT_CT_PROTO_SRC = 11, + NFT_CT_PROTO_DST = 12, + NFT_CT_LABELS = 13, + NFT_CT_PKTS = 14, + NFT_CT_BYTES = 15, + NFT_CT_AVGPKT = 16, + NFT_CT_ZONE = 17, + NFT_CT_EVENTMASK = 18, + NFT_CT_SRC_IP = 19, + NFT_CT_DST_IP = 20, + NFT_CT_SRC_IP6 = 21, + NFT_CT_DST_IP6 = 22, + NFT_CT_ID = 23, + __NFT_CT_MAX = 24, +}; + +enum nft_ct_attributes { + NFTA_CT_UNSPEC = 0, + NFTA_CT_DREG = 1, + NFTA_CT_KEY = 2, + NFTA_CT_DIRECTION = 3, + NFTA_CT_SREG = 4, + __NFTA_CT_MAX = 5, +}; + +enum nft_ct_helper_attributes { + NFTA_CT_HELPER_UNSPEC = 0, + NFTA_CT_HELPER_NAME = 1, + NFTA_CT_HELPER_L3PROTO = 2, + NFTA_CT_HELPER_L4PROTO = 3, + __NFTA_CT_HELPER_MAX = 4, +}; + +enum nft_ct_expectation_attributes { + NFTA_CT_EXPECT_UNSPEC = 0, + NFTA_CT_EXPECT_L3PROTO = 1, + NFTA_CT_EXPECT_L4PROTO = 2, + NFTA_CT_EXPECT_DPORT = 3, + NFTA_CT_EXPECT_TIMEOUT = 4, + NFTA_CT_EXPECT_SIZE = 5, + __NFTA_CT_EXPECT_MAX = 6, +}; + +struct nft_ct { + enum nft_ct_keys key: 8; + enum ip_conntrack_dir dir: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +struct nft_ct_helper_obj { + struct nf_conntrack_helper *helper4; + struct nf_conntrack_helper *helper6; + u8 l4proto; +}; + +struct nft_ct_expect_obj { + u16 l3num; + __be16 dport; + u8 l4proto; + u8 size; + u32 timeout; +}; + +enum ipi_msg_type { + IPI_RESCHEDULE = 0, + IPI_CALL_FUNC = 1, + IPI_CPU_STOP = 2, + IPI_CPU_STOP_NMI = 3, + IPI_TIMER = 4, + IPI_IRQ_WORK = 5, + NR_IPI = 6, + IPI_CPU_BACKTRACE = 6, + IPI_KGDB_ROUNDUP = 7, + MAX_IPI = 8, +}; + +typedef u64 hpa_t; + +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + long unsigned int hva; + long unsigned int len; + struct kvm_memory_slot *memslot; +}; + +struct kvm_userspace_memory_region { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; + __u64 userspace_addr; +}; + +struct kvm_userspace_memory_region2 { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; + __u64 userspace_addr; + __u64 guest_memfd_offset; + __u32 guest_memfd; + __u32 pad1; + __u64 pad2[14]; +}; + +struct kvm_dirty_log { + __u32 slot; + __u32 padding1; + union { + void *dirty_bitmap; + __u64 padding2; + }; +}; + +struct kvm_clear_dirty_log { + __u32 slot; + __u32 num_pages; + __u64 first_page; + union { + void *dirty_bitmap; + __u64 padding2; + }; +}; + +struct kvm_signal_mask { + __u32 len; + __u8 sigset[0]; +}; + +struct kvm_irq_routing { + __u32 nr; + __u32 flags; + struct kvm_irq_routing_entry entries[0]; +}; + +struct kvm_create_device { + __u32 type; + __u32 fd; + __u32 flags; +}; + +struct kvm_host_map { + struct page *pinned_page; + struct page *page; + void *hva; + kvm_pfn_t pfn; + kvm_pfn_t gfn; + bool writable; +}; + +struct kvm_memslot_iter { + struct kvm_memslots *slots; + struct rb_node *node; + struct kvm_memory_slot *slot; +}; + +struct kvm_follow_pfn { + const struct kvm_memory_slot *slot; + const gfn_t gfn; + long unsigned int hva; + unsigned int flags; + bool pin; + bool *map_writable; + struct page **refcounted_page; +}; + +struct trace_event_raw_kvm_userspace_exit { + struct trace_entry ent; + __u32 reason; + int errno; + char __data[0]; +}; + +struct trace_event_raw_kvm_vcpu_wakeup { + struct trace_entry ent; + __u64 ns; + bool waited; + bool valid; + char __data[0]; +}; + +struct trace_event_raw_kvm_set_irq { + struct trace_entry ent; + unsigned int gsi; + int level; + int irq_source_id; + char __data[0]; +}; + +struct trace_event_raw_kvm_ack_irq { + struct trace_entry ent; + unsigned int irqchip; + unsigned int pin; + char __data[0]; +}; + +struct trace_event_raw_kvm_mmio { + struct trace_entry ent; + u32 type; + u32 len; + u64 gpa; + u64 val; + char __data[0]; +}; + +struct trace_event_raw_kvm_iocsr { + struct trace_entry ent; + u32 type; + u32 len; + u64 gpa; + u64 val; + char __data[0]; +}; + +struct trace_event_raw_kvm_fpu { + struct trace_entry ent; + u32 load; + char __data[0]; +}; + +struct trace_event_raw_kvm_halt_poll_ns { + struct trace_entry ent; + bool grow; + unsigned int vcpu_id; + unsigned int new; + unsigned int old; + char __data[0]; +}; + +struct trace_event_raw_kvm_dirty_ring_push { + struct trace_entry ent; + int index; + u32 dirty_index; + u32 reset_index; + u32 slot; + u64 offset; + char __data[0]; +}; + +struct trace_event_raw_kvm_dirty_ring_reset { + struct trace_entry ent; + int index; + u32 dirty_index; + u32 reset_index; + char __data[0]; +}; + +struct trace_event_raw_kvm_dirty_ring_exit { + struct trace_entry ent; + int vcpu_id; + char __data[0]; +}; + +struct trace_event_raw_kvm_unmap_hva_range { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_kvm_age_hva { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_kvm_test_age_hva { + struct trace_entry ent; + long unsigned int hva; + char __data[0]; +}; + +struct trace_event_data_offsets_kvm_userspace_exit {}; + +struct trace_event_data_offsets_kvm_vcpu_wakeup {}; + +struct trace_event_data_offsets_kvm_set_irq {}; + +struct trace_event_data_offsets_kvm_ack_irq {}; + +struct trace_event_data_offsets_kvm_mmio {}; + +struct trace_event_data_offsets_kvm_iocsr {}; + +struct trace_event_data_offsets_kvm_fpu {}; + +struct trace_event_data_offsets_kvm_halt_poll_ns {}; + +struct trace_event_data_offsets_kvm_dirty_ring_push {}; + +struct trace_event_data_offsets_kvm_dirty_ring_reset {}; + +struct trace_event_data_offsets_kvm_dirty_ring_exit {}; + +struct trace_event_data_offsets_kvm_unmap_hva_range {}; + +struct trace_event_data_offsets_kvm_age_hva {}; + +struct trace_event_data_offsets_kvm_test_age_hva {}; + +typedef void (*btf_trace_kvm_userspace_exit)(void *, __u32, int); + +typedef void (*btf_trace_kvm_vcpu_wakeup)(void *, __u64, bool, bool); + +typedef void (*btf_trace_kvm_set_irq)(void *, unsigned int, int, int); + +typedef void (*btf_trace_kvm_ack_irq)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_kvm_mmio)(void *, int, int, u64, void *); + +typedef void (*btf_trace_kvm_iocsr)(void *, int, int, u64, void *); + +typedef void (*btf_trace_kvm_fpu)(void *, int); + +typedef void (*btf_trace_kvm_halt_poll_ns)(void *, bool, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_kvm_dirty_ring_push)(void *, struct kvm_dirty_ring *, u32, u64); + +typedef void (*btf_trace_kvm_dirty_ring_reset)(void *, struct kvm_dirty_ring *); + +typedef void (*btf_trace_kvm_dirty_ring_exit)(void *, struct kvm_vcpu *); + +typedef void (*btf_trace_kvm_unmap_hva_range)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_age_hva)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_test_age_hva)(void *, long unsigned int); + +typedef bool (*gfn_handler_t)(struct kvm *, struct kvm_gfn_range *); + +typedef void (*on_lock_fn_t)(struct kvm *); + +struct kvm_mmu_notifier_range { + u64 start; + u64 end; + union kvm_mmu_notifier_arg arg; + gfn_handler_t handler; + on_lock_fn_t on_lock; + bool flush_on_ret; + bool may_block; +}; + +struct kvm_mmu_notifier_return { + bool ret; + bool found_memslot; +}; + +typedef struct kvm_mmu_notifier_return kvm_mn_ret_t; + +struct kvm_mem_range { + u64 start; + u64 end; +}; + +struct pkvm_mem_transition { + u64 nr_pages; + struct { + enum pkvm_component_id id; + u64 addr; + union { + struct { + u64 completer_addr; + } host; + struct { + u64 completer_addr; + } hyp; + }; + } initiator; + struct { + enum pkvm_component_id id; + } completer; +}; + +struct pkvm_mem_share { + const struct pkvm_mem_transition tx; + const enum kvm_pgtable_prot completer_prot; +}; + +struct pkvm_mem_donation { + const struct pkvm_mem_transition tx; +}; + +struct check_walk_data { + enum pkvm_page_state desired; + enum pkvm_page_state (*get_page_state)(kvm_pte_t, u64); +}; + +enum rwsem_waiter_type { + RWSEM_WAITING_FOR_WRITE = 0, + RWSEM_WAITING_FOR_READ = 1, +}; + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + enum rwsem_waiter_type type; + long unsigned int timeout; + bool handoff_set; +}; + +enum rwsem_wake_type { + RWSEM_WAKE_ANY = 0, + RWSEM_WAKE_READERS = 1, + RWSEM_WAKE_READ_OWNED = 2, +}; + +enum owner_state { + OWNER_NULL = 1, + OWNER_WRITER = 2, + OWNER_READER = 4, + OWNER_NONSPINNABLE = 8, +}; + +enum tick_broadcast_mode { + TICK_BROADCAST_OFF = 0, + TICK_BROADCAST_ON = 1, + TICK_BROADCAST_FORCE = 2, +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct range ranges[0]; +}; + +struct syscall_trace_enter { + struct trace_entry ent; + int nr; + long unsigned int args[0]; +}; + +struct syscall_trace_exit { + struct trace_entry ent; + int nr; + long int ret; +}; + +struct syscall_tp_t { + struct trace_entry ent; + int syscall_nr; + long unsigned int ret; +}; + +struct syscall_tp_t___2 { + struct trace_entry ent; + int syscall_nr; + long unsigned int args[6]; +}; + +struct bucket { + struct hlist_nulls_head head; + raw_spinlock_t raw_lock; +}; + +struct htab_elem; + +struct bpf_htab { + struct bpf_map map; + struct bpf_mem_alloc ma; + struct bpf_mem_alloc pcpu_ma; + struct bucket *buckets; + void *elems; + union { + struct pcpu_freelist freelist; + struct bpf_lru lru; + }; + struct htab_elem **extra_elems; + struct percpu_counter pcount; + atomic_t count; + bool use_percpu_counter; + u32 n_buckets; + u32 elem_size; + u32 hashrnd; + struct lock_class_key lockdep_key; + int *map_locked[8]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct htab_elem { + union { + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct pcpu_freelist_node fnode; + struct htab_elem *batch_flink; + }; + }; + }; + union { + void *ptr_to_pptr; + struct bpf_lru_node lru_node; + }; + u32 hash; + long: 0; + char key[0]; +}; + +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; + u32 bucket_id; + u32 skip_elems; +}; + +struct cachestat_range { + __u64 off; + __u64 len; +}; + +struct cachestat { + __u64 nr_cache; + __u64 nr_dirty; + __u64 nr_writeback; + __u64 nr_evicted; + __u64 nr_recently_evicted; +}; + +struct trace_event_raw_mm_filemap_op_page_cache { + struct trace_entry ent; + long unsigned int pfn; + long unsigned int i_ino; + long unsigned int index; + dev_t s_dev; + unsigned char order; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_op_page_cache_range { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + long unsigned int last_index; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_fault { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_filemap_set_wb_err { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + errseq_t errseq; + char __data[0]; +}; + +struct trace_event_raw_file_check_and_advance_wb_err { + struct trace_entry ent; + struct file *file; + long unsigned int i_ino; + dev_t s_dev; + errseq_t old; + errseq_t new; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache {}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache_range {}; + +struct trace_event_data_offsets_mm_filemap_fault {}; + +struct trace_event_data_offsets_filemap_set_wb_err {}; + +struct trace_event_data_offsets_file_check_and_advance_wb_err {}; + +typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_get_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_map_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_fault)(void *, struct address_space *, long unsigned int); + +typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); + +typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); + +enum behavior { + EXCLUSIVE = 0, + SHARED = 1, + DROP = 2, +}; + +struct swap_iocb { + struct kiocb iocb; + struct bio_vec bvec[32]; + int pages; + int len; +}; + +struct eventfd_ctx { + struct kref kref; + wait_queue_head_t wqh; + __u64 count; + unsigned int flags; + int id; +}; + +enum { + Opt_direct = 0, + Opt_fd = 1, + Opt_gid___8 = 2, + Opt_ignore___2 = 3, + Opt_indirect = 4, + Opt_maxproto = 5, + Opt_minproto = 6, + Opt_offset = 7, + Opt_pgrp = 8, + Opt_strictexpire = 9, + Opt_uid___7 = 10, +}; + +struct autofs_fs_context { + kuid_t uid; + kgid_t gid; + int pgrp; + bool pgrp_set; +}; + +struct workspace___3 { + void *mem; + size_t size; + char *buf; + unsigned int level; + unsigned int req_level; + long unsigned int last_used; + struct list_head list; + struct list_head lru_list; + zstd_in_buffer in_buf; + zstd_out_buffer out_buf; +}; + +struct zstd_workspace_manager { + const struct btrfs_compress_op *ops; + spinlock_t lock; + struct list_head lru_list; + struct list_head idle_ws[15]; + long unsigned int active_map; + wait_queue_head_t wait; + struct timer_list timer; +}; + +struct tree_mod_root { + u64 logical; + u8 level; +}; + +struct tree_mod_elem { + struct rb_node node; + u64 logical; + u64 seq; + enum btrfs_mod_log_op op; + int slot; + u64 generation; + struct btrfs_disk_key key; + u64 blockptr; + struct { + int dst_slot; + int nr_items; + } move; + struct tree_mod_root old_root; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +enum rsaprivkey_actions { + ACT_rsa_get_d = 0, + ACT_rsa_get_dp = 1, + ACT_rsa_get_dq = 2, + ACT_rsa_get_e___2 = 3, + ACT_rsa_get_n___2 = 4, + ACT_rsa_get_p = 5, + ACT_rsa_get_q = 6, + ACT_rsa_get_qinv = 7, + NR__rsaprivkey_actions = 8, +}; + +struct hash_prefix { + const char *name; + const u8 *data; + size_t size; +}; + +struct rsassa_pkcs1_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct rsassa_pkcs1_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct hash_prefix *hash_prefix; +}; + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK = 1, +}; + +typedef struct tree_desc_s tree_desc; + +struct ei_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; + int etype; + void *priv; +}; + +struct vcs_poll_data { + struct notifier_block notifier; + unsigned int cons_num; + int event; + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +struct probe { + struct probe *next; + dev_t dev; + long unsigned int range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; +}; + +struct kobj_map { + struct probe *probes[255]; + struct mutex *lock; +}; + +struct soc_device { + struct device dev; + struct soc_device_attribute *attr; + int soc_dev_num; +}; + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1, + BIP_MAPPED_INTEGRITY = 2, + BIP_CTRL_NOCHECK = 4, + BIP_DISK_NOCHECK = 8, + BIP_IP_CHECKSUM = 16, + BIP_COPY_USER = 32, +}; + +enum t10_dif_type { + T10_PI_TYPE0_PROTECTION = 0, + T10_PI_TYPE1_PROTECTION = 1, + T10_PI_TYPE2_PROTECTION = 2, + T10_PI_TYPE3_PROTECTION = 3, +}; + +struct scsi_io_group_descriptor { + u8 io_advice_hints_mode: 2; + u8 reserved1: 3; + u8 st_enble: 1; + u8 cs_enble: 1; + u8 ic_enable: 1; + u8 reserved2[3]; + u8 acdlu: 1; + u8 reserved3: 1; + u8 rlbsr: 2; + u8 lbm_descriptor_type: 4; + u8 params[2]; + u8 reserved4; + u8 reserved5[8]; +}; + +struct scsi_stream_status { + u8 perm: 1; + u8 reserved1: 7; + u8 reserved2; + __be16 stream_identifier; + u8 reserved3: 2; + u8 rel_lifetime: 6; + u8 reserved4[3]; +}; + +struct scsi_stream_status_header { + __be32 len; + u16 reserved; + __be16 number_of_open_streams; + struct { + struct {} __empty_stream_status; + struct scsi_stream_status stream_status[0]; + }; +}; + +enum scsi_prot_flags { + SCSI_PROT_TRANSFER_PI = 1, + SCSI_PROT_GUARD_CHECK = 2, + SCSI_PROT_REF_CHECK = 4, + SCSI_PROT_REF_INCREMENT = 8, + SCSI_PROT_IP_CHECKSUM = 16, +}; + +enum { + SD_EXT_CDB_SIZE = 32, + SD_MEMPOOL_SIZE = 2, +}; + +enum { + SD_DEF_XFER_BLOCKS = 65535, + SD_MAX_XFER_BLOCKS = 4294967295, + SD_MAX_WS10_BLOCKS = 65535, + SD_MAX_WS16_BLOCKS = 8388607, +}; + +enum { + SD_LBP_FULL = 0, + SD_LBP_UNMAP = 1, + SD_LBP_WS16 = 2, + SD_LBP_WS10 = 3, + SD_LBP_ZERO = 4, + SD_LBP_DISABLE = 5, +}; + +enum { + SD_ZERO_WRITE = 0, + SD_ZERO_WS = 1, + SD_ZERO_WS16_UNMAP = 2, + SD_ZERO_WS10_UNMAP = 3, +}; + +struct virtio_net_config { + __u8 mac[6]; + __virtio16 status; + __virtio16 max_virtqueue_pairs; + __virtio16 mtu; + __le32 speed; + __u8 duplex; + __u8 rss_max_key_size; + __le16 rss_max_indirection_table_length; + __le32 supported_hash_types; +}; + +struct virtio_net_hdr_v1 { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + union { + struct { + __virtio16 csum_start; + __virtio16 csum_offset; + }; + struct { + __virtio16 start; + __virtio16 offset; + } csum; + struct { + __le16 segments; + __le16 dup_acks; + } rsc; + }; + __virtio16 num_buffers; +}; + +struct virtio_net_hdr_v1_hash { + struct virtio_net_hdr_v1 hdr; + __le32 hash_value; + __le16 hash_report; + __le16 padding; +}; + +struct virtio_net_ctrl_hdr { + __u8 class; + __u8 cmd; +}; + +typedef __u8 virtio_net_ctrl_ack; + +struct virtio_net_ctrl_mac { + __virtio32 entries; + __u8 macs[0]; +}; + +struct virtio_net_ctrl_mq { + __virtio16 virtqueue_pairs; +}; + +struct virtio_net_ctrl_coal_tx { + __le32 tx_max_packets; + __le32 tx_usecs; +}; + +struct virtio_net_ctrl_coal_rx { + __le32 rx_max_packets; + __le32 rx_usecs; +}; + +struct virtio_net_ctrl_coal { + __le32 max_packets; + __le32 max_usecs; +}; + +struct virtio_net_ctrl_coal_vq { + __le16 vqn; + __le16 reserved; + struct virtio_net_ctrl_coal coal; +}; + +struct virtio_net_stats_capabilities { + __le64 supported_stats_types[1]; +}; + +struct virtio_net_ctrl_queue_stats { + struct { + __le16 vq_index; + __le16 reserved[3]; + __le64 types_bitmap[1]; + } stats[1]; +}; + +struct virtio_net_stats_reply_hdr { + __u8 type; + __u8 reserved; + __le16 vq_index; + __le16 reserved1; + __le16 size; +}; + +struct ewma_pkt_len { + long unsigned int internal; +}; + +struct virtnet_stat_desc { + char desc[32]; + size_t offset; + size_t qstat_offset; +}; + +struct virtnet_sq_free_stats { + u64 packets; + u64 bytes; + u64 napi_packets; + u64 napi_bytes; + u64 xsk; +}; + +struct virtnet_sq_stats { + struct u64_stats_sync syncp; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t xdp_tx; + u64_stats_t xdp_tx_drops; + u64_stats_t kicks; + u64_stats_t tx_timeouts; + u64_stats_t stop; + u64_stats_t wake; +}; + +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t drops; + u64_stats_t xdp_packets; + u64_stats_t xdp_tx; + u64_stats_t xdp_redirects; + u64_stats_t xdp_drops; + u64_stats_t kicks; +}; + +struct virtnet_interrupt_coalesce { + u32 max_packets; + u32 max_usecs; +}; + +struct virtnet_rq_dma { + dma_addr_t addr; + u32 ref; + u16 len; + u16 need_sync; +}; + +struct send_queue { + struct virtqueue *vq; + struct scatterlist sg[19]; + char name[16]; + struct virtnet_sq_stats stats; + struct virtnet_interrupt_coalesce intr_coal; + struct napi_struct napi; + bool reset; + struct xsk_buff_pool *xsk_pool; + dma_addr_t xsk_hdr_dma_addr; +}; + +struct receive_queue { + struct virtqueue *vq; + struct napi_struct napi; + struct bpf_prog *xdp_prog; + struct virtnet_rq_stats stats; + u16 calls; + bool dim_enabled; + struct mutex dim_lock; + struct dim dim; + u32 packets_in_napi; + struct virtnet_interrupt_coalesce intr_coal; + struct page *pages; + struct ewma_pkt_len mrg_avg_pkt_len; + struct page_frag alloc_frag; + struct scatterlist sg[19]; + unsigned int min_buf_len; + char name[16]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info xdp_rxq; + struct virtnet_rq_dma *last_dma; + struct xsk_buff_pool *xsk_pool; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info xsk_rxq_info; + struct xdp_buff **xsk_buffs; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct virtio_net_ctrl_rss { + u32 hash_types; + u16 indirection_table_mask; + u16 unclassified_queue; + u16 hash_cfg_reserved; + u16 max_tx_vq; + u8 hash_key_length; + u8 key[40]; + u16 *indirection_table; +}; + +struct control_buf { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; +}; + +struct virtnet_info { + struct virtio_device *vdev; + struct virtqueue *cvq; + struct net_device *dev; + struct send_queue *sq; + struct receive_queue *rq; + unsigned int status; + u16 max_queue_pairs; + u16 curr_queue_pairs; + u16 xdp_queue_pairs; + bool xdp_enabled; + bool big_packets; + unsigned int big_packets_num_skbfrags; + bool mergeable_rx_bufs; + bool has_rss; + bool has_rss_hash_report; + u8 rss_key_size; + u16 rss_indir_table_size; + u32 rss_hash_types_supported; + u32 rss_hash_types_saved; + struct virtio_net_ctrl_rss rss; + bool has_cvq; + struct mutex cvq_lock; + bool any_header_sg; + u8 hdr_len; + struct delayed_work refill; + bool refill_enabled; + spinlock_t refill_lock; + struct work_struct config_work; + struct work_struct rx_mode_work; + bool rx_mode_work_enabled; + bool affinity_hint_set; + struct hlist_node node; + struct hlist_node node_dead; + struct control_buf *ctrl; + u8 duplex; + u32 speed; + bool rx_dim_enabled; + struct virtnet_interrupt_coalesce intr_coal_tx; + struct virtnet_interrupt_coalesce intr_coal_rx; + long unsigned int guest_offloads; + long unsigned int guest_offloads_capable; + struct failover *failover; + u64 device_stats_cap; +}; + +struct virtio_net_common_hdr { + union { + struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf mrg_hdr; + struct virtio_net_hdr_v1_hash hash_v1_hdr; + }; +}; + +enum virtnet_xmit_type { + VIRTNET_XMIT_TYPE_SKB = 0, + VIRTNET_XMIT_TYPE_SKB_ORPHAN = 1, + VIRTNET_XMIT_TYPE_XDP = 2, + VIRTNET_XMIT_TYPE_XSK = 3, +}; + +struct virtnet_stats_ctx { + bool to_qstat; + u32 desc_num[3]; + u64 bitmap[3]; + u32 size[3]; + u64 *data; +}; + +enum serio_event_type { + SERIO_RESCAN_PORT = 0, + SERIO_RECONNECT_PORT = 1, + SERIO_RECONNECT_SUBTREE = 2, + SERIO_REGISTER_PORT = 3, + SERIO_ATTACH_DRIVER = 4, +}; + +struct serio_event { + enum serio_event_type type; + void *object; + struct module *owner; + struct list_head node; +}; + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING = 1, + POWER_SUPPLY_STATUS_DISCHARGING = 2, + POWER_SUPPLY_STATUS_NOT_CHARGING = 3, + POWER_SUPPLY_STATUS_FULL = 4, +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE = 1, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, + POWER_SUPPLY_CHARGE_TYPE_FAST = 3, + POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, + POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, + POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, + POWER_SUPPLY_CHARGE_TYPE_BYPASS = 8, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD = 1, + POWER_SUPPLY_HEALTH_OVERHEAT = 2, + POWER_SUPPLY_HEALTH_DEAD = 3, + POWER_SUPPLY_HEALTH_OVERVOLTAGE = 4, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE = 5, + POWER_SUPPLY_HEALTH_COLD = 6, + POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE = 7, + POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE = 8, + POWER_SUPPLY_HEALTH_OVERCURRENT = 9, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED = 10, + POWER_SUPPLY_HEALTH_WARM = 11, + POWER_SUPPLY_HEALTH_COOL = 12, + POWER_SUPPLY_HEALTH_HOT = 13, + POWER_SUPPLY_HEALTH_NO_BATTERY = 14, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, + POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, + POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP = 1, + POWER_SUPPLY_USB_TYPE_DCP = 2, + POWER_SUPPLY_USB_TYPE_CDP = 3, + POWER_SUPPLY_USB_TYPE_ACA = 4, + POWER_SUPPLY_USB_TYPE_C = 5, + POWER_SUPPLY_USB_TYPE_PD = 6, + POWER_SUPPLY_USB_TYPE_PD_DRP = 7, + POWER_SUPPLY_USB_TYPE_PD_PPS = 8, + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, +}; + +enum power_supply_charge_behaviour { + POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0, + POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE = 1, + POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE = 2, +}; + +struct power_supply_attr { + const char *prop_name; + char attr_name[31]; + struct device_attribute dev_attr; + const char * const *text_values; + int text_values_len; +}; + +enum arch_timer_reg { + ARCH_TIMER_REG_CTRL = 0, + ARCH_TIMER_REG_CVAL = 1, +}; + +enum arch_timer_ppi_nr { + ARCH_TIMER_PHYS_SECURE_PPI = 0, + ARCH_TIMER_PHYS_NONSECURE_PPI = 1, + ARCH_TIMER_VIRT_PPI = 2, + ARCH_TIMER_HYP_PPI = 3, + ARCH_TIMER_HYP_VIRT_PPI = 4, + ARCH_TIMER_MAX_TIMER_PPI = 5, +}; + +enum arch_timer_spi_nr { + ARCH_TIMER_PHYS_SPI = 0, + ARCH_TIMER_VIRT_SPI = 1, + ARCH_TIMER_MAX_TIMER_SPI = 2, +}; + +struct arch_timer_mem_frame { + bool valid; + phys_addr_t cntbase; + size_t size; + int phys_irq; + int virt_irq; +}; + +struct arch_timer_mem { + phys_addr_t cntctlbase; + size_t size; + struct arch_timer_mem_frame frame[8]; +}; + +struct acpi_table_header { + char signature[4]; + u32 length; + u8 revision; + u8 checksum; + char oem_id[6]; + char oem_table_id[8]; + u32 oem_revision; + char asl_compiler_id[4]; + u32 asl_compiler_revision; +}; + +struct arch_timer { + void *base; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct clock_event_device evt; +}; + +struct ate_acpi_oem_info { + char oem_id[7]; + char oem_table_id[9]; + u32 oem_revision; +}; + +typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, const void *); + +struct nvmem_layout_driver { + struct device_driver driver; + int (*probe)(struct nvmem_layout *); + void (*remove)(struct nvmem_layout *); +}; + +enum { + ETHTOOL_A_PROFILE_UNSPEC = 0, + ETHTOOL_A_PROFILE_IRQ_MODERATION = 1, + __ETHTOOL_A_PROFILE_CNT = 2, + ETHTOOL_A_PROFILE_MAX = 1, +}; + +enum { + ETHTOOL_A_IRQ_MODERATION_UNSPEC = 0, + ETHTOOL_A_IRQ_MODERATION_USEC = 1, + ETHTOOL_A_IRQ_MODERATION_PKTS = 2, + ETHTOOL_A_IRQ_MODERATION_COMPS = 3, + __ETHTOOL_A_IRQ_MODERATION_CNT = 4, + ETHTOOL_A_IRQ_MODERATION_MAX = 3, +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + struct kernel_ethtool_coalesce kernel_coalesce; + u32 supported_params; +}; + +enum nfulnl_msg_types { + NFULNL_MSG_PACKET = 0, + NFULNL_MSG_CONFIG = 1, + NFULNL_MSG_MAX = 2, +}; + +struct nfulnl_msg_packet_hdr { + __be16 hw_protocol; + __u8 hook; + __u8 _pad; +}; + +struct nfulnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfulnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfulnl_vlan_attr { + NFULA_VLAN_UNSPEC = 0, + NFULA_VLAN_PROTO = 1, + NFULA_VLAN_TCI = 2, + __NFULA_VLAN_MAX = 3, +}; + +enum nfulnl_attr_type { + NFULA_UNSPEC = 0, + NFULA_PACKET_HDR = 1, + NFULA_MARK = 2, + NFULA_TIMESTAMP = 3, + NFULA_IFINDEX_INDEV = 4, + NFULA_IFINDEX_OUTDEV = 5, + NFULA_IFINDEX_PHYSINDEV = 6, + NFULA_IFINDEX_PHYSOUTDEV = 7, + NFULA_HWADDR = 8, + NFULA_PAYLOAD = 9, + NFULA_PREFIX = 10, + NFULA_UID = 11, + NFULA_SEQ = 12, + NFULA_SEQ_GLOBAL = 13, + NFULA_GID = 14, + NFULA_HWTYPE = 15, + NFULA_HWHEADER = 16, + NFULA_HWLEN = 17, + NFULA_CT = 18, + NFULA_CT_INFO = 19, + NFULA_VLAN = 20, + NFULA_L2HDR = 21, + __NFULA_MAX = 22, +}; + +enum nfulnl_msg_config_cmds { + NFULNL_CFG_CMD_NONE = 0, + NFULNL_CFG_CMD_BIND = 1, + NFULNL_CFG_CMD_UNBIND = 2, + NFULNL_CFG_CMD_PF_BIND = 3, + NFULNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfulnl_msg_config_cmd { + __u8 command; +}; + +struct nfulnl_msg_config_mode { + __be32 copy_range; + __u8 copy_mode; + __u8 _pad; +} __attribute__((packed)); + +enum nfulnl_attr_config { + NFULA_CFG_UNSPEC = 0, + NFULA_CFG_CMD = 1, + NFULA_CFG_MODE = 2, + NFULA_CFG_NLBUFSIZ = 3, + NFULA_CFG_TIMEOUT = 4, + NFULA_CFG_QTHRESH = 5, + NFULA_CFG_FLAGS = 6, + __NFULA_CFG_MAX = 7, +}; + +struct nfulnl_instance { + struct hlist_node hlist; + spinlock_t lock; + refcount_t use; + unsigned int qlen; + struct sk_buff *skb; + struct timer_list timer; + struct net *net; + netns_tracker ns_tracker; + struct user_namespace *peer_user_ns; + u32 peer_portid; + unsigned int flushtimeout; + unsigned int nlbufsiz; + unsigned int qthreshold; + u_int32_t copy_range; + u_int32_t seq; + u_int16_t group_num; + u_int16_t flags; + u_int8_t copy_mode; + struct callback_head rcu; +}; + +struct nfnl_log_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; + atomic_t global_seq; +}; + +enum nft_cmp_ops { + NFT_CMP_EQ = 0, + NFT_CMP_NEQ = 1, + NFT_CMP_LT = 2, + NFT_CMP_LTE = 3, + NFT_CMP_GT = 4, + NFT_CMP_GTE = 5, +}; + +enum nft_cmp_attributes { + NFTA_CMP_UNSPEC = 0, + NFTA_CMP_SREG = 1, + NFTA_CMP_OP = 2, + NFTA_CMP_DATA = 3, + __NFTA_CMP_MAX = 4, +}; + +struct nft_cmp_expr { + struct nft_data data; + u8 sreg; + u8 len; + enum nft_cmp_ops op: 8; +}; + +union nft_cmp_offload_data { + u16 val16; + u32 val32; + u64 val64; +}; + +struct xt_cgroup_info_v0 { + __u32 id; + __u32 invert; +}; + +struct xt_cgroup_info_v1 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + char path[4096]; + __u32 classid; + void *priv; +}; + +struct xt_cgroup_info_v2 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + union { + char path[512]; + __u32 classid; + }; + void *priv; +}; + +struct bpf_iter__udp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct udp_sock *udp_sk; + }; + uid_t uid; + long: 0; + int bucket; +}; + +struct bpf_udp_iter_state { + struct udp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + int offset; + struct sock **batch; + bool st_bucket_done; +}; + +struct icmp6_filter { + __u32 data[8]; +}; + +struct raw6_sock { + struct inet_sock inet; + __u32 checksum; + __u32 offset; + struct icmp6_filter filter; + __u32 ip6mr_table; + struct ipv6_pinfo inet6; +}; + +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +enum { + FDB_NOTIFY_BIT = 1, + FDB_NOTIFY_INACTIVE_BIT = 2, +}; + +enum { + NFEA_UNSPEC = 0, + NFEA_ACTIVITY_NOTIFY = 1, + NFEA_DONT_REFRESH = 2, + __NFEA_MAX = 3, +}; + +struct __fdb_entry { + __u8 mac_addr[6]; + __u8 port_no; + __u8 is_local; + __u32 ageing_timer_value; + __u8 port_hi; + __u8 pad0; + __u16 unused; +}; + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute pop +#endif + +#endif /* __VMLINUX_H__ */ diff --git a/selftest/struct-ops/dep/include/arch/mips/vmlinux-v6.13-rc1-g353b2a7de2a3.h b/selftest/struct-ops/dep/include/arch/mips/vmlinux-v6.13-rc1-g353b2a7de2a3.h new file mode 100644 index 00000000..7b0761ed --- /dev/null +++ b/selftest/struct-ops/dep/include/arch/mips/vmlinux-v6.13-rc1-g353b2a7de2a3.h @@ -0,0 +1,105667 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) +#endif + +typedef signed char __s8; + +typedef unsigned char __u8; + +typedef short unsigned int __u16; + +typedef int __s32; + +typedef unsigned int __u32; + +typedef long long int __s64; + +typedef long long unsigned int __u64; + +typedef __s8 s8; + +typedef __u8 u8; + +typedef __u16 u16; + +typedef __s32 s32; + +typedef __u32 u32; + +typedef __s64 s64; + +typedef __u64 u64; + +enum { + false = 0, + true = 1, +}; + +typedef long int __kernel_long_t; + +typedef long unsigned int __kernel_ulong_t; + +typedef int __kernel_pid_t; + +typedef unsigned int __kernel_uid32_t; + +typedef unsigned int __kernel_gid32_t; + +typedef unsigned int __kernel_size_t; + +typedef int __kernel_ssize_t; + +typedef long long int __kernel_loff_t; + +typedef long long int __kernel_time64_t; + +typedef __kernel_long_t __kernel_clock_t; + +typedef int __kernel_timer_t; + +typedef int __kernel_clockid_t; + +typedef unsigned int __poll_t; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +typedef short unsigned int umode_t; + +typedef __kernel_pid_t pid_t; + +typedef __kernel_clockid_t clockid_t; + +typedef _Bool bool; + +typedef __kernel_uid32_t uid_t; + +typedef __kernel_gid32_t gid_t; + +typedef __kernel_loff_t loff_t; + +typedef __kernel_size_t size_t; + +typedef __kernel_ssize_t ssize_t; + +typedef s32 int32_t; + +typedef u32 uint32_t; + +typedef s64 ktime_t; + +typedef u64 sector_t; + +typedef u64 blkcnt_t; + +typedef unsigned int gfp_t; + +typedef unsigned int fmode_t; + +typedef struct { + int counter; +} atomic_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct cache_desc { + unsigned int waysize; + short unsigned int sets; + unsigned char ways; + unsigned char linesz; + unsigned char waybit; + unsigned char flags; +}; + +struct guest_info { + long unsigned int ases; + long unsigned int ases_dyn; + long long unsigned int options; + long long unsigned int options_dyn; + int tlbsize; + u8 conf; + u8 kscratch_mask; +}; + +struct cpuinfo_mips { + u64 asid_cache; + long unsigned int ases; + long: 32; + long long unsigned int options; + unsigned int udelay_val; + unsigned int processor_id; + unsigned int fpu_id; + unsigned int fpu_csr31; + unsigned int fpu_msk31; + unsigned int msa_id; + unsigned int cputype; + int isa_level; + int tlbsize; + int tlbsizevtlb; + int tlbsizeftlbsets; + int tlbsizeftlbways; + struct cache_desc icache; + struct cache_desc dcache; + struct cache_desc vcache; + struct cache_desc scache; + struct cache_desc tcache; + int srsets; + int package; + unsigned int globalnumber; + void *data; + unsigned int watch_reg_count; + unsigned int watch_reg_use_cnt; + u16 watch_reg_masks[4]; + unsigned int kscratch_mask; + unsigned int writecombine; + unsigned int htw_seq; + struct guest_info guest; + unsigned int gtoffset_mask; + unsigned int guestid_mask; + unsigned int guestid_cache; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef atomic_t atomic_long_t; + +struct qspinlock { + union { + atomic_t val; + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; + }; +}; + +typedef struct qspinlock arch_spinlock_t; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +struct seq_operations; + +struct file; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + long: 32; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +typedef __s64 time64_t; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long int tv_nsec; +}; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; + long: 32; +}; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct lock_class_key {}; + +struct fs_context; + +struct fs_parameter_spec; + +struct dentry; + +struct super_block; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct qrwlock { + union { + atomic_t cnts; + struct { + u8 __lstate[3]; + u8 wlocked; + }; + }; + arch_spinlock_t wait_lock; +}; + +typedef struct qrwlock arch_rwlock_t; + +struct lockdep_map {}; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + unsigned int flags; + long unsigned int begin; +}; + +typedef u32 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + long unsigned int type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno: 18; + unsigned int class_id: 6; + unsigned int flags: 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; + long: 32; +}; + +enum class_map_type { + DD_CLASS_TYPE_DISJOINT_BITS = 0, + DD_CLASS_TYPE_LEVEL_NUM = 1, + DD_CLASS_TYPE_DISJOINT_NAMES = 2, + DD_CLASS_TYPE_LEVEL_NAMES = 3, +}; + +struct ddebug_class_map { + struct list_head link; + struct module *mod; + const char *mod_name; + const char **class_names; + const int length; + const int base; + enum class_map_type map_type; +}; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kernfs_node; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + const struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized: 1; + unsigned int state_in_sysfs: 1; + unsigned int state_add_uevent_sent: 1; + unsigned int state_remove_uevent_sent: 1; + unsigned int uevent_suppress: 1; +}; + +struct module_param_attrs; + +struct completion; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct rb_node { + long unsigned int __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_memory { + void *base; + void *rw_copy; + bool is_rox; + unsigned int size; + struct mod_tree_node mtn; +}; + +struct exception_table_entry; + +struct mips_hi16; + +struct mod_arch_specific { + struct list_head dbe_list; + const struct exception_table_entry *dbe_start; + const struct exception_table_entry *dbe_end; + struct mips_hi16 *r_mips_hi16_list; +}; + +struct elf32_sym; + +typedef struct elf32_sym Elf32_Sym; + +struct mod_kallsyms { + Elf32_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +struct _ddebug_info { + struct _ddebug *descs; + struct ddebug_class_map *classes; + unsigned int num_descs; + unsigned int num_classes; +}; + +struct module_attribute; + +struct kernel_symbol; + +struct kernel_param; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct tracepoint; + +typedef struct tracepoint * const tracepoint_ptr_t; + +struct srcu_struct; + +struct bpf_raw_event_map; + +struct trace_event_call; + +struct trace_eval_map; + +struct module { + enum module_state state; + struct list_head list; + char name[60]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + bool async_probe_requested; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct module_memory mem[7]; + struct mod_arch_specific arch; + long unsigned int taints; + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void *percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + unsigned int btf_data_size; + unsigned int btf_base_data_size; + void *btf_data; + void *btf_base_data; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + void *kprobes_text_start; + unsigned int kprobes_text_size; + long unsigned int *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + struct _ddebug_info dyndbg_info; + long: 32; +}; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +typedef unsigned int fop_flags_t; + +typedef void *fl_owner_t; + +struct kiocb; + +struct iov_iter; + +struct io_comp_batch; + +struct dir_context; + +struct poll_table_struct; + +struct vm_area_struct; + +struct inode; + +struct file_lock; + +struct pipe_inode_info; + +struct file_lease; + +struct io_uring_cmd; + +struct file_operations { + struct module *owner; + fop_flags_t fop_flags; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap)(struct file *, struct vm_area_struct *); + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct file *); + int (*setlease)(struct file *, int, struct file_lease **, void **); + long int (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); + int (*uring_cmd)(struct io_uring_cmd *, unsigned int); + int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); +}; + +struct static_call_key { + void *func; +}; + +struct cpumask { + long unsigned int bits[128]; +}; + +typedef struct cpumask cpumask_t; + +typedef struct { + s64 counter; +} atomic64_t; + +struct llist_node; + +struct llist_head { + struct llist_node *first; +}; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; +}; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +struct pollfd { + int fd; + short int events; + short int revents; +}; + +struct restart_block { + long unsigned int arch_data; + long int (*fn)(struct restart_block *); + union { + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + long: 32; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + long: 32; + u64 expires; + } nanosleep; + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + long unsigned int tv_sec; + long unsigned int tv_nsec; + } poll; + }; +}; + +union fpureg { + __u32 val32[2]; + __u64 val64[1]; +}; + +struct mips_fpu_struct { + union fpureg fpr[32]; + unsigned int fcr31; + unsigned int msacsr; +}; + +typedef long unsigned int dspreg_t; + +struct mips_dsp_state { + dspreg_t dspr[6]; + unsigned int dspcontrol; +}; + +struct mips3264_watch_reg_state { + long unsigned int watchlo[4]; + u16 watchhi[4]; +}; + +union mips_watch_reg_state { + struct mips3264_watch_reg_state mips3264; +}; + +struct mips_abi; + +struct thread_struct { + long unsigned int reg16; + long unsigned int reg17; + long unsigned int reg18; + long unsigned int reg19; + long unsigned int reg20; + long unsigned int reg21; + long unsigned int reg22; + long unsigned int reg23; + long unsigned int reg29; + long unsigned int reg30; + long unsigned int reg31; + long unsigned int cp0_status; + struct mips_fpu_struct fpu; + atomic_t bd_emu_frame; + long unsigned int bd_emu_branch_pc; + long unsigned int bd_emu_cont_pc; + long unsigned int emulated_fp; + cpumask_t user_cpus_allowed; + struct mips_dsp_state dsp; + union mips_watch_reg_state watch; + long unsigned int cp0_badvaddr; + long unsigned int cp0_baduaddr; + long unsigned int error_code; + long unsigned int trap_nr; + struct mips_abi *abi; + long: 32; +}; + +struct task_struct; + +struct pt_regs; + +struct thread_info { + struct task_struct *task; + long unsigned int flags; + long unsigned int tp_value; + __u32 cpu; + int preempt_count; + struct pt_regs *regs; + long int syscall; +}; + +struct load_weight { + long unsigned int weight; + u32 inv_weight; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + long unsigned int load_avg; + long unsigned int runnable_avg; + long unsigned int util_avg; + unsigned int util_est; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cfs_rq; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + long: 32; + u64 deadline; + u64 min_vruntime; + u64 min_slice; + struct list_head group_node; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + unsigned char custom_slice; + long: 32; + u64 exec_start; + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; + u64 nr_migrations; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + long unsigned int runnable_weight; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + long unsigned int timeout; + long unsigned int watchdog_stamp; + unsigned int time_slice; + short unsigned int on_rq; + short unsigned int on_list; + struct sched_rt_entity *back; +}; + +struct timerqueue_node { + struct rb_node node; + long: 32; + ktime_t expires; +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; + long: 32; +}; + +struct sched_dl_entity; + +typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); + +typedef struct task_struct * (*dl_server_pick_f)(struct sched_dl_entity *); + +struct rq; + +struct sched_dl_entity { + struct rb_node rb_node; + long: 32; + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + s64 runtime; + u64 deadline; + unsigned int flags; + unsigned int dl_throttled: 1; + unsigned int dl_yielded: 1; + unsigned int dl_non_contending: 1; + unsigned int dl_overrun: 1; + unsigned int dl_server: 1; + unsigned int dl_defer: 1; + unsigned int dl_defer_armed: 1; + unsigned int dl_defer_running: 1; + struct hrtimer dl_timer; + struct hrtimer inactive_timer; + struct rq *rq; + dl_server_has_tasks_f server_has_tasks; + dl_server_pick_f server_pick_task; + struct sched_dl_entity *pi_se; +}; + +struct scx_dsq_list_node { + struct list_head node; + u32 flags; + u32 priv; +}; + +struct scx_dispatch_q; + +struct cgroup; + +struct sched_ext_entity { + struct scx_dispatch_q *dsq; + struct scx_dsq_list_node dsq_list; + struct rb_node dsq_priq; + u32 dsq_seq; + u32 dsq_flags; + u32 flags; + u32 weight; + s32 sticky_cpu; + s32 holding_cpu; + u32 kf_mask; + struct task_struct *kf_tasks[2]; + atomic_long_t ops_state; + struct list_head runnable_node; + long unsigned int runnable_at; + long: 32; + u64 ddsp_dsq_id; + u64 ddsp_enq_flags; + u64 slice; + u64 dsq_vtime; + bool disallow; + struct cgroup *cgrp_moving_from; + struct list_head tasks_node; +}; + +struct sched_statistics {}; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + long unsigned int pcount; + long: 32; + long long unsigned int run_delay; + long long unsigned int last_arrival; + long long unsigned int last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; + long: 32; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + long unsigned int sig[4]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct syscall_user_dispatch {}; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + long unsigned int bits[1]; +} nodemask_t; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +struct tlbflush_unmap_batch {}; + +struct page; + +struct page_frag { + struct page *page; + __u16 offset; + __u16 size; +}; + +struct kmap_ctrl {}; + +struct timer_list { + struct hlist_node entry; + long unsigned int expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct sched_class; + +struct task_group; + +struct rcu_node; + +struct mm_struct; + +struct address_space; + +struct pid; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct rseq; + +struct task_delay_info; + +struct mem_cgroup; + +struct obj_cgroup; + +struct gendisk; + +struct uprobe_task; + +struct bpf_local_storage; + +struct bpf_run_ctx; + +struct bpf_net_context; + +struct task_struct { + unsigned int __state; + unsigned int saved_state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + int on_cpu; + struct __call_single_node wake_entry; + unsigned int wakee_flips; + long unsigned int wakee_flip_decay_ts; + struct task_struct *last_wakee; + int recent_used_cpu; + int wake_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_entity se; + struct sched_rt_entity rt; + long: 32; + struct sched_dl_entity dl; + struct sched_dl_entity *dl_server; + long: 32; + struct sched_ext_entity scx; + const struct sched_class *sched_class; + struct task_group *sched_task_group; + long: 32; + long: 32; + struct sched_statistics stats; + unsigned int btrace_seq; + unsigned int policy; + long unsigned int max_allowed_capacity; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + short unsigned int migration_disabled; + short unsigned int migration_flags; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; + long unsigned int rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_exit_cpu; + struct list_head rcu_tasks_exit_list; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + long unsigned int jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork: 1; + unsigned int sched_contributes_to_load: 1; + unsigned int sched_migrated: 1; + long: 29; + unsigned int sched_remote_wakeup: 1; + unsigned int sched_rt_mutex: 1; + unsigned int in_execve: 1; + unsigned int in_iowait: 1; + unsigned int no_cgroup_migration: 1; + unsigned int frozen: 1; + unsigned int use_memdelay: 1; + unsigned int in_memstall: 1; + unsigned int in_eventfd: 1; + unsigned int in_thrashing: 1; + long unsigned int atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + long unsigned int stack_canary; + struct task_struct *real_parent; + struct task_struct *parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_node; + struct completion *vfork_done; + int *set_child_tid; + int *clear_child_tid; + void *worker_private; + long: 32; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + u64 start_time; + u64 start_boottime; + long unsigned int min_flt; + long unsigned int maj_flt; + struct posix_cputimers posix_cputimers; + const struct cred *ptracer_cred; + const struct cred *real_cred; + const struct cred *cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + long unsigned int last_switch_count; + long unsigned int last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + long unsigned int sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + long: 32; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct io_context *io_context; + struct capture_control *capture_control; + long unsigned int ptrace_message; + kernel_siginfo_t *last_siginfo; + long: 32; + struct task_io_accounting ioac; + unsigned int psi_flags; + long: 32; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + struct css_set *cgroups; + struct list_head cg_list; + struct robust_list_head *robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + u8 perf_recursion[4]; + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct rseq *rseq; + u32 rseq_len; + u32 rseq_sig; + long unsigned int rseq_event_mask; + int mm_cid; + int last_mm_cid; + int migrate_from_cpu; + int mm_cid_active; + struct callback_head cid_work; + struct tlbflush_unmap_batch tlb_ubc; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + long unsigned int dirty_paused_when; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + long unsigned int trace_recursion; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct obj_cgroup *objcg; + struct gendisk *throttle_disk; + struct uprobe_task *utask; + struct kmap_ctrl kmap_ctrl; + struct callback_head rcu; + refcount_t rcu_users; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; + struct bpf_local_storage *bpf_storage; + struct bpf_run_ctx *bpf_ctx; + struct bpf_net_context *bpf_net_context; + struct llist_head kretprobe_instances; + struct thread_struct thread; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct pt_regs { + long unsigned int pad0[8]; + long unsigned int regs[32]; + long unsigned int cp0_status; + long unsigned int hi; + long unsigned int lo; + long unsigned int cp0_badvaddr; + long unsigned int cp0_cause; + long unsigned int cp0_epc; + long unsigned int __last[0]; +}; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +struct page_pool; + +struct dev_pagemap; + +struct page { + long unsigned int flags; + union { + struct { + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + struct list_head buddy_list; + struct list_head pcp_list; + }; + struct address_space *mapping; + union { + long unsigned int index; + long unsigned int share; + }; + long unsigned int private; + }; + struct { + long unsigned int pp_magic; + struct page_pool *pp; + long unsigned int _pp_mapping_pad; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; + }; + struct { + long unsigned int compound_head; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + unsigned int page_type; + atomic_t _mapcount; + }; + atomic_t _refcount; + long unsigned int memcg_data; +}; + +typedef struct { + long unsigned int pgd; +} pgd_t; + +typedef struct { + long unsigned int pgprot; +} pgprot_t; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t *__sighandler_t; + +union sigval { + int sival_int; + void *sival_ptr; +}; + +typedef union sigval sigval_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void *_addr; + union { + int _trapno; + short int _addr_lsb; + struct { + char _dummy_bnd[4]; + void *_lower; + void *_upper; + } _addr_bnd; + struct { + char _dummy_pkey[4]; + __u32 _pkey; + } _addr_pkey; + struct { + long unsigned int _data; + __u32 _type; + __u32 _flags; + } _perf; + }; + } _sigfault; + struct { + long int _band; + int _fd; + } _sigpoll; + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_code; + int si_errno; + union __sifields _sifields; + }; +}; + +struct sigaction { + unsigned int sa_flags; + __sighandler_t sa_handler; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + __u64 rseq_cs; + __u32 flags; + __u32 node_id; + __u32 mm_cid; + char end[0]; + long: 32; +}; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +typedef struct { + uid_t val; +} kuid_t; + +typedef struct { + gid_t val; +} kgid_t; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +struct rhash_head { + struct rhash_head *next; +}; + +struct scx_dispatch_q { + raw_spinlock_t lock; + struct list_head list; + struct rb_root priq; + u32 nr; + u32 seq; + u64 id; + struct rhash_head hash_node; + struct llist_node free_node; + struct callback_head rcu; +}; + +struct rq_flags; + +struct affinity_context; + +struct sched_class { + void (*enqueue_task)(struct rq *, struct task_struct *, int); + bool (*dequeue_task)(struct rq *, struct task_struct *, int); + void (*yield_task)(struct rq *); + bool (*yield_to_task)(struct rq *, struct task_struct *); + void (*wakeup_preempt)(struct rq *, struct task_struct *, int); + int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); + struct task_struct * (*pick_task)(struct rq *); + struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *); + void (*put_prev_task)(struct rq *, struct task_struct *, struct task_struct *); + void (*set_next_task)(struct rq *, struct task_struct *, bool); + int (*select_task_rq)(struct task_struct *, int, int); + void (*migrate_task_rq)(struct task_struct *, int); + void (*task_woken)(struct rq *, struct task_struct *); + void (*set_cpus_allowed)(struct task_struct *, struct affinity_context *); + void (*rq_online)(struct rq *); + void (*rq_offline)(struct rq *); + struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); + void (*task_tick)(struct rq *, struct task_struct *, int); + void (*task_fork)(struct task_struct *); + void (*task_dead)(struct task_struct *); + void (*switching_to)(struct rq *, struct task_struct *); + void (*switched_from)(struct rq *, struct task_struct *); + void (*switched_to)(struct rq *, struct task_struct *); + void (*reweight_task)(struct rq *, struct task_struct *, const struct load_weight *); + void (*prio_changed)(struct rq *, struct task_struct *, int); + unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); + void (*update_curr)(struct rq *); + void (*task_change_group)(struct task_struct *); +}; + +typedef struct {} lockdep_map_p; + +struct maple_tree { + union { + spinlock_t ma_lock; + lockdep_map_p ma_external_lock; + }; + unsigned int ma_flags; + void *ma_root; +}; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +struct percpu_counter { + raw_spinlock_t lock; + long: 32; + s64 count; + s32 *counters; + long: 32; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +typedef struct { + union { + u64 asid[32]; + atomic64_t mmid; + }; + void *vdso; + spinlock_t bd_emupage_lock; + long unsigned int *bd_emupage_allocmap; + wait_queue_head_t bd_emupage_queue; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct mm_cid; + +struct linux_binfmt; + +struct kioctx_table; + +struct user_namespace; + +struct mm_struct { + struct { + struct { + atomic_t mm_count; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct maple_tree mm_mt; + long unsigned int mmap_base; + long unsigned int mmap_legacy_base; + long unsigned int task_size; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + struct mm_cid *pcpu_cid; + long unsigned int mm_cid_next_scan; + unsigned int nr_cpus_allowed; + atomic_t max_nr_cid; + raw_spinlock_t cpus_allowed_lock; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + long unsigned int hiwater_rss; + long unsigned int hiwater_vm; + long unsigned int total_vm; + long unsigned int locked_vm; + atomic64_t pinned_vm; + long unsigned int data_vm; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int def_flags; + seqcount_t write_protect_seq; + spinlock_t arg_lock; + long unsigned int start_code; + long unsigned int end_code; + long unsigned int start_data; + long unsigned int end_data; + long unsigned int start_brk; + long unsigned int brk; + long unsigned int start_stack; + long unsigned int arg_start; + long unsigned int arg_end; + long unsigned int env_start; + long unsigned int env_end; + long unsigned int saved_auxv[48]; + long: 32; + struct percpu_counter rss_stat[4]; + struct linux_binfmt *binfmt; + long: 32; + mm_context_t context; + long unsigned int flags; + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; + struct task_struct *owner; + struct user_namespace *user_ns; + struct file *exe_file; + atomic_t tlb_flush_pending; + struct uprobes_state uprobes_state; + struct work_struct async_put_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + long unsigned int cpu_bitmap[0]; +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void *xa_head; +}; + +typedef u32 errseq_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + struct rw_semaphore invalidate_lock; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + long unsigned int nrpages; + long unsigned int writeback_index; + const struct address_space_operations *a_ops; + long unsigned int flags; + errseq_t wb_err; + spinlock_t i_private_lock; + struct list_head i_private_list; + struct rw_semaphore i_mmap_rwsem; + void *i_private_data; +}; + +struct pid_namespace; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct dentry *stashed; + u64 ino; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[0]; +}; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +typedef struct { + u64 val; +} kernel_cap_t; + +struct user_struct; + +struct ucounts; + +struct group_info; + +struct cred { + atomic_long_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + struct user_struct *user; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; + long: 32; +}; + +typedef int32_t key_serial_t; + +typedef uint32_t key_perm_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + long unsigned int hash; + union { + struct { + char desc[2]; + u16 desc_len; + }; + long unsigned int x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + long unsigned int nr_leaves_on_tree; +}; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct rw_semaphore sem; + struct key_user *user; + void *security; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + short unsigned int quotalen; + short unsigned int datalen; + short int state; + long unsigned int flags; + union { + struct keyring_index_key index_key; + struct { + long unsigned int hash; + long unsigned int len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct pacct_struct { + int ac_flag; + long int ac_exitcode; + long unsigned int ac_mem; + long: 32; + u64 ac_utime; + u64 ac_stime; + long unsigned int ac_minflt; + long unsigned int ac_majflt; +}; + +struct core_state; + +struct tty_struct; + +struct taskstats; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + int quick_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exec_task; + int group_stop_count; + unsigned int flags; + struct core_state *core_state; + unsigned int is_child_subreaper: 1; + unsigned int has_child_subreaper: 1; + unsigned int next_posix_timer_id; + struct hlist_head posix_timers; + struct hlist_head ignored_posix_timers; + long: 32; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + seqlock_t stats_lock; + long: 32; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + long unsigned int cnvcsw; + long unsigned int cnivcsw; + long unsigned int min_flt; + long unsigned int maj_flt; + long unsigned int cmin_flt; + long unsigned int cmaj_flt; + long unsigned int inblock; + long unsigned int oublock; + long unsigned int cinblock; + long unsigned int coublock; + long unsigned int maxrss; + long unsigned int cmaxrss; + struct task_io_accounting ioac; + long long unsigned int sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + bool oom_flag_origin; + short int oom_score_adj; + short int oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; + long: 32; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[128]; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + short unsigned int ioprio; + spinlock_t lock; + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +struct arch_uprobe_task { + long unsigned int saved_trap_nr; +}; + +struct return_instance; + +struct uprobe; + +struct arch_uprobe; + +struct uprobe_task { + enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { + struct { + struct arch_uprobe_task autask; + long unsigned int vaddr; + }; + struct { + struct callback_head dup_xol_work; + long unsigned int dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + struct timer_list ri_timer; + long unsigned int xol_vaddr; + struct arch_uprobe *auprobe; +}; + +struct workqueue_struct; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + long unsigned int gp_seq[4]; + long int len; + long int seglen[4]; + u8 flags; +}; + +struct srcu_node; + +struct srcu_data { + atomic_long_t srcu_lock_count[2]; + atomic_long_t srcu_unlock_count[2]; + int srcu_reader_flavor; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + long unsigned int grpmask; + int cpu; + struct srcu_struct *ssp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct srcu_node { + spinlock_t lock; + long unsigned int srcu_have_cbs[4]; + long unsigned int srcu_data_have_cbs[4]; + long unsigned int srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_usage; + +struct srcu_struct { + unsigned int srcu_idx; + struct srcu_data *sda; + struct lockdep_map dep_map; + struct srcu_usage *srcu_sup; +}; + +struct srcu_usage { + struct srcu_node *node; + struct srcu_node *level[3]; + int srcu_size_state; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + long unsigned int srcu_gp_seq; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + long unsigned int srcu_gp_start; + long unsigned int srcu_last_gp_end; + long unsigned int srcu_size_jiffies; + long unsigned int srcu_n_lock_retries; + long unsigned int srcu_n_exp_nodelay; + bool sda_is_static; + long unsigned int srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + long unsigned int reschedule_jiffies; + long unsigned int reschedule_count; + struct delayed_work work; + struct srcu_struct *srcu_ssp; +}; + +struct arch_uprobe { + long unsigned int resume_epc; + u32 insn[2]; + u32 ixol[2]; +}; + +enum hprobe_state { + HPROBE_LEASED = 0, + HPROBE_STABLE = 1, + HPROBE_GONE = 2, + HPROBE_CONSUMED = 3, +}; + +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + +struct return_consumer { + __u64 cookie; + __u64 id; +}; + +struct return_instance { + struct hprobe hprobe; + long unsigned int func; + long unsigned int stack; + long unsigned int orig_ret_vaddr; + bool chained; + int consumers_cnt; + struct return_instance *next; + struct callback_head rcu; + long: 32; + struct return_consumer consumers[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef struct { + long unsigned int val; +} swp_entry_t; + +struct folio { + union { + struct { + long unsigned int flags; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; + struct address_space *mapping; + long unsigned int index; + union { + void *private; + swp_entry_t swap; + }; + atomic_t _mapcount; + atomic_t _refcount; + long unsigned int memcg_data; + }; + struct page page; + }; + union { + struct { + long unsigned int _flags_1; + long unsigned int _head_1; + atomic_t _large_mapcount; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + }; + struct page __page_1; + }; + union { + struct { + long unsigned int _flags_2; + long unsigned int _head_2; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; + }; + struct { + long unsigned int _flags_2a; + long unsigned int _head_2a; + struct list_head _deferred_list; + }; + struct page __page_2; + }; +}; + +typedef long unsigned int vm_flags_t; + +typedef struct { + atomic_t refcnt; +} file_ref_t; + +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +struct file_ra_state { + long unsigned int start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + long: 32; + loff_t prev_pos; +}; + +typedef struct { + long unsigned int v; +} freeptr_t; + +struct fown_struct; + +struct file { + file_ref_t f_ref; + spinlock_t f_lock; + fmode_t f_mode; + const struct file_operations *f_op; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + struct path f_path; + union { + struct mutex f_pos_lock; + u64 f_pipe; + }; + loff_t f_pos; + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; + struct hlist_head *f_ep; + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + freeptr_t f_freeptr; + }; +}; + +struct vm_userfaultfd_ctx {}; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + union { + struct { + long unsigned int vm_start; + long unsigned int vm_end; + }; + }; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + union { + const vm_flags_t vm_flags; + vm_flags_t __vm_flags; + }; + struct { + struct rb_node rb; + long unsigned int rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + long unsigned int vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +typedef unsigned int vm_fault_t; + +struct vm_fault; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*may_split)(struct vm_area_struct *, long unsigned int); + int (*mremap)(struct vm_area_struct *); + int (*mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int, long unsigned int); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); + vm_fault_t (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); + long unsigned int (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); + const char * (*name)(struct vm_area_struct *); + struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); +}; + +struct mm_cid { + u64 time; + int cid; + int recent_cid; +}; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; + struct xarray xa; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + long unsigned int subdirs; + struct rb_root children; + struct kernfs_root *root; + long unsigned int rev; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; + long: 32; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + short unsigned int flags; + umode_t mode; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + u64 id; + void *priv; + struct kernfs_iattrs *iattr; + struct callback_head rcu; +}; + +struct kernfs_open_file; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); + loff_t (*llseek)(struct kernfs_open_file *, loff_t, int); +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped: 1; + bool released: 1; + const struct vm_operations_struct *vm_ops; +}; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void * (*grab_current_ns)(); + const void * (*netlink_ns)(struct sock *); + const void * (*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; + u32 dio_mem_align; + u32 dio_offset_align; + u64 change_cookie; + u64 subvol; + u32 atomic_write_unit_min; + u32 atomic_write_unit_max; + u32 atomic_write_segments_max; + long: 32; +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + struct address_space * (*f_mapping)(); + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); + int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *, struct vm_area_struct *); +}; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); + size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); + struct attribute **attrs; + union { + struct bin_attribute **bin_attrs; + const struct bin_attribute * const *bin_attrs_new; + }; +}; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations * (*child_ns_type)(const struct kobject *); + const void * (*namespace)(const struct kobject *); + void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(const struct kobject *); + const char * (* const name)(const struct kobject *); + int (* const uevent)(const struct kobject *, struct kobj_uevent_env *); +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN = 1, + CLOCK_EVT_STATE_PERIODIC = 2, + CLOCK_EVT_STATE_ONESHOT = 3, + CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, +}; + +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(long unsigned int, struct clock_event_device *); + int (*set_next_ktime)(ktime_t, struct clock_event_device *); + long: 32; + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + long unsigned int retries; + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + long unsigned int min_delta_ticks; + long unsigned int max_delta_ticks; + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct proc_dir_entry; + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + long unsigned int thread_flags; + long unsigned int thread_mask; + const char *name; + struct proc_dir_entry *dir; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active: 1; + unsigned int in_hrtirq: 1; + unsigned int hang_detected: 1; + unsigned int softirq_activated: 1; + unsigned int online: 1; + unsigned int nr_events; + short unsigned int nr_retries; + short unsigned int nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + long: 32; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + long: 32; + struct hrtimer_clock_base clock_base[8]; +}; + +struct hlist_bl_node; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct lockref { + union { + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct qstr { + union { + struct { + u32 len; + u32 hash; + }; + u64 hash_len; + }; + const unsigned char *name; + long: 32; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + long: 32; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[36]; + const struct dentry_operations *d_op; + struct super_block *d_sb; + long unsigned int d_time; + void *d_fsdata; + struct lockref d_lockref; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct hlist_node d_sib; + struct hlist_head d_children; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; + long: 32; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +} __attribute__((mode(byte))); + +struct posix_acl; + +struct inode_operations; + +struct bdi_writeback; + +struct file_lock_context; + +struct cdev; + +struct fsnotify_mark_connector; + +struct inode { + umode_t i_mode; + short unsigned int i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + long unsigned int i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + time64_t i_atime_sec; + time64_t i_mtime_sec; + time64_t i_ctime_sec; + u32 i_atime_nsec; + u32 i_mtime_nsec; + u32 i_ctime_nsec; + u32 i_generation; + spinlock_t i_lock; + short unsigned int i_bytes; + u8 i_blkbits; + enum rw_hint i_write_hint; + blkcnt_t i_blocks; + seqcount_t i_size_seqcount; + u32 i_state; + struct rw_semaphore i_rwsem; + long unsigned int dirtied_when; + long unsigned int dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + void *i_private; + long: 32; +}; + +enum d_real_type { + D_REAL_DATA = 0, + D_REAL_METADATA = 1, +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char * (*d_dname)(struct dentry *, char *, int); + struct vfsmount * (*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry * (*d_real)(struct dentry *, enum d_real_type); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct mtd_info; + +typedef long long int qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + long unsigned int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + long: 32; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; + long: 32; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + long: 32; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; + long: 32; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct *task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + short unsigned int frozen; + int freeze_kcount; + int freeze_ucount; + struct percpu_rw_semaphore rw_sem[3]; +}; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct block_device; + +struct backing_dev_info; + +struct fsnotify_sb_info; + +struct shrinker; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + long unsigned int s_blocksize; + long: 32; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + long unsigned int s_flags; + long unsigned int s_iflags; + long unsigned int s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + const struct xattr_handler * const *s_xattr; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct file *s_bdev_file; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + long: 32; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + u32 s_fsnotify_mask; + struct fsnotify_sb_info *s_fsnotify_info; + char s_id[32]; + uuid_t s_uuid; + u8 s_uuid_len; + char s_sysfs_name[37]; + unsigned int s_max_links; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + struct shrinker *s_shrink; + atomic_long_t s_remove_count; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct mnt_idmap; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; + struct mnt_idmap *mnt_idmap; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + long unsigned int nr_to_scan; + long unsigned int nr_scanned; + struct mem_cgroup *memcg; +}; + +struct shrinker { + long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); + long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); + long int batch; + int seeks; + unsigned int flags; + refcount_t refcount; + struct completion done; + struct callback_head rcu; + void *private_data; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +struct list_lru_one { + struct list_head list; + long int nr_items; + spinlock_t lock; +}; + +struct list_lru_node { + struct list_lru_one lru; + atomic_long_t nr_items; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, +}; + +struct exception_table_entry { + long unsigned int insn; + long unsigned int nextinsn; +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct key_preparsed_payload; + +struct key_match_data; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long int (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction * (*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +struct user_struct { + refcount_t __count; + long: 32; + struct percpu_counter epoll_watches; + long unsigned int unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + struct ratelimit_state ratelimit; + long: 32; +}; + +struct group_info { + refcount_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +struct request_queue; + +struct kmem_cache; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + long: 32; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long int); + void *private; + int ki_flags; + u16 ki_ioprio; + union { + struct wait_page_queue *ki_waitq; + ssize_t (*dio_complete)(void *); + }; + long: 32; +}; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; + long: 32; +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + long: 32; + loff_t dq_off; + long unsigned int dq_flags; + long: 32; + struct mem_dqblk dq_dqb; +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot * (*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t * (*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_dqblk { + int d_fieldmask; + long: 32; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; + long: 32; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + long: 32; + long long unsigned int ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + long: 32; + struct qc_type_state s_state[3]; +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct writeback_control; + +struct readahead_control; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*read_folio)(struct file *, struct folio *); + int (*writepages)(struct address_space *, struct writeback_control *); + bool (*dirty_folio)(struct address_space *, struct folio *); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio)(struct folio *, size_t, size_t); + bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, enum migrate_mode); + int (*launder_folio)(struct folio *); + bool (*is_partially_uptodate)(struct folio *, size_t, size_t); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); + int (*error_remove_folio)(struct address_space *, struct folio *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); + int (*swap_rw)(struct kiocb *, struct iov_iter *); +}; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +struct fiemap_extent_info; + +struct fileattr; + +struct offset_ctx; + +struct inode_operations { + struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); + const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct mnt_idmap *, struct inode *, int); + struct posix_acl * (*get_inode_acl)(struct inode *, int, bool); + int (*readlink)(struct dentry *, char *, int); + int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); + int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); + int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); + int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); + int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); + struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int); + int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); + int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); + int (*fileattr_get)(struct dentry *, struct fileattr *); + struct offset_ctx * (*get_offset_ctx)(struct inode *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct fown_struct { + struct file *file; + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +enum freeze_holder { + FREEZE_HOLDER_KERNEL = 1, + FREEZE_HOLDER_USERSPACE = 2, + FREEZE_MAY_NEST = 4, +}; + +struct kstatfs; + +struct super_operations { + struct inode * (*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *, enum freeze_holder); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *, enum freeze_holder); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long int (*free_cached_objects)(struct super_block *, struct shrink_control *); + void (*shutdown)(struct super_block *); +}; + +struct fid; + +struct iomap; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry * (*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); + long unsigned int flags; +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); +}; + +typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + long: 32; + loff_t pos; +}; + +struct offset_ctx { + struct maple_tree mt; + long unsigned int next_offset; +}; + +struct p_log; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + short unsigned int flags; + const void *data; +}; + +typedef __u32 Elf32_Addr; + +typedef __u16 Elf32_Half; + +typedef __u32 Elf32_Word; + +struct elf32_sym { + Elf32_Word st_name; + Elf32_Addr st_value; + Elf32_Word st_size; + unsigned char st_info; + unsigned char st_other; + Elf32_Half st_shndx; +}; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint_ext { + int (*regfunc)(); + void (*unregfunc)(); + unsigned int faultable: 1; +}; + +struct tracepoint { + const char *name; + struct static_key_false key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + void *probestub; + struct tracepoint_func *funcs; + struct tracepoint_ext *ext; +}; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct kernel_symbol { + long unsigned int value; + const char *name; + const char *namespace; +}; + +struct seq_operations { + void * (*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void * (*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +enum cpu_type_enum { + CPU_UNKNOWN = 0, + CPU_R2000 = 1, + CPU_R3000 = 2, + CPU_R3000A = 3, + CPU_R3041 = 4, + CPU_R3051 = 5, + CPU_R3052 = 6, + CPU_R3081 = 7, + CPU_R3081E = 8, + CPU_R4000PC = 9, + CPU_R4000SC = 10, + CPU_R4000MC = 11, + CPU_R4200 = 12, + CPU_R4300 = 13, + CPU_R4310 = 14, + CPU_R4400PC = 15, + CPU_R4400SC = 16, + CPU_R4400MC = 17, + CPU_R4600 = 18, + CPU_R4640 = 19, + CPU_R4650 = 20, + CPU_R4700 = 21, + CPU_R5000 = 22, + CPU_R5500 = 23, + CPU_NEVADA = 24, + CPU_R10000 = 25, + CPU_R12000 = 26, + CPU_R14000 = 27, + CPU_R16000 = 28, + CPU_RM7000 = 29, + CPU_SR71000 = 30, + CPU_TX49XX = 31, + CPU_4KC = 32, + CPU_4KEC = 33, + CPU_4KSC = 34, + CPU_24K = 35, + CPU_34K = 36, + CPU_1004K = 37, + CPU_74K = 38, + CPU_ALCHEMY = 39, + CPU_PR4450 = 40, + CPU_BMIPS32 = 41, + CPU_BMIPS3300 = 42, + CPU_BMIPS4350 = 43, + CPU_BMIPS4380 = 44, + CPU_BMIPS5000 = 45, + CPU_XBURST = 46, + CPU_LOONGSON32 = 47, + CPU_M14KC = 48, + CPU_M14KEC = 49, + CPU_INTERAPTIV = 50, + CPU_P5600 = 51, + CPU_PROAPTIV = 52, + CPU_1074K = 53, + CPU_M5150 = 54, + CPU_I6400 = 55, + CPU_P6600 = 56, + CPU_M6250 = 57, + CPU_5KC = 58, + CPU_5KE = 59, + CPU_20KC = 60, + CPU_25KF = 61, + CPU_SB1 = 62, + CPU_SB1A = 63, + CPU_LOONGSON2EF = 64, + CPU_LOONGSON64 = 65, + CPU_CAVIUM_OCTEON = 66, + CPU_CAVIUM_OCTEON_PLUS = 67, + CPU_CAVIUM_OCTEON2 = 68, + CPU_CAVIUM_OCTEON3 = 69, + CPU_I6500 = 70, + CPU_QEMU_GENERIC = 71, + CPU_LAST = 72, +}; + +typedef struct { + long unsigned int pte; +} pte_t; + +typedef struct page *pgtable_t; + +struct vmem_altmap { + long unsigned int base_pfn; + const long unsigned int end_pfn; + const long unsigned int reserve; + long unsigned int free; + long unsigned int align; + long unsigned int alloc; + bool inaccessible; +}; + +struct percpu_ref_data; + +struct percpu_ref { + long unsigned int percpu_count_ptr; + struct percpu_ref_data *data; +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT = 2, + MEMORY_DEVICE_FS_DAX = 3, + MEMORY_DEVICE_GENERIC = 4, + MEMORY_DEVICE_PCI_P2PDMA = 5, +}; + +struct range { + u64 start; + u64 end; +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref ref; + struct completion done; + enum memory_type type; + unsigned int flags; + long unsigned int vmemmap_shift; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + long: 32; + union { + struct range range; + struct { + struct {} __empty_ranges; + struct range ranges[0]; + }; + }; +}; + +enum fault_flag { + FAULT_FLAG_WRITE = 1, + FAULT_FLAG_MKWRITE = 2, + FAULT_FLAG_ALLOW_RETRY = 4, + FAULT_FLAG_RETRY_NOWAIT = 8, + FAULT_FLAG_KILLABLE = 16, + FAULT_FLAG_TRIED = 32, + FAULT_FLAG_USER = 64, + FAULT_FLAG_REMOTE = 128, + FAULT_FLAG_INSTRUCTION = 256, + FAULT_FLAG_INTERRUPTIBLE = 512, + FAULT_FLAG_UNSHARE = 1024, + FAULT_FLAG_ORIG_PTE_VALID = 2048, + FAULT_FLAG_VMA_LOCK = 4096, +}; + +typedef struct { + pgd_t pgd; +} p4d_t; + +typedef struct { + p4d_t p4d; +} pud_t; + +typedef struct { + pud_t pud; +} pmd_t; + +struct vm_fault { + const struct { + struct vm_area_struct *vma; + gfp_t gfp_mask; + long unsigned int pgoff; + long unsigned int address; + long unsigned int real_address; + }; + enum fault_flag flags; + pmd_t *pmd; + pud_t *pud; + union { + pte_t orig_pte; + pmd_t orig_pmd; + }; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_BOUNCE = 8, + NR_FREE_CMA_PAGES = 9, + NR_VM_ZONE_STAT_ITEMS = 10, +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_THROTTLED_WRITTEN = 33, + NR_KERNEL_MISC_RECLAIMABLE = 34, + NR_FOLL_PIN_ACQUIRED = 35, + NR_FOLL_PIN_RELEASED = 36, + NR_KERNEL_STACK_KB = 37, + NR_PAGETABLE = 38, + NR_SECONDARY_PAGETABLE = 39, + NR_IOMMU_PAGES = 40, + NR_SWAPCACHE = 41, + PGDEMOTE_KSWAPD = 42, + PGDEMOTE_DIRECT = 43, + PGDEMOTE_KHUGEPAGED = 44, + NR_VM_NODE_STAT_ITEMS = 45, +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic: 1; + bool allow_reinit: 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); + int (*memory_failure)(struct dev_pagemap *, long unsigned int, long unsigned int, int); +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +enum utf8_normalization { + UTF8_NFDI = 0, + UTF8_NFDICF = 1, + UTF8_NMAX = 2, +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_NORMAL = 4, + PGALLOC_MOVABLE = 5, + ALLOCSTALL_NORMAL = 6, + ALLOCSTALL_MOVABLE = 7, + PGSCAN_SKIP_NORMAL = 8, + PGSCAN_SKIP_MOVABLE = 9, + PGFREE = 10, + PGACTIVATE = 11, + PGDEACTIVATE = 12, + PGLAZYFREE = 13, + PGFAULT = 14, + PGMAJFAULT = 15, + PGLAZYFREED = 16, + PGREFILL = 17, + PGREUSE = 18, + PGSTEAL_KSWAPD = 19, + PGSTEAL_DIRECT = 20, + PGSTEAL_KHUGEPAGED = 21, + PGSCAN_KSWAPD = 22, + PGSCAN_DIRECT = 23, + PGSCAN_KHUGEPAGED = 24, + PGSCAN_DIRECT_THROTTLE = 25, + PGSCAN_ANON = 26, + PGSCAN_FILE = 27, + PGSTEAL_ANON = 28, + PGSTEAL_FILE = 29, + PGINODESTEAL = 30, + SLABS_SCANNED = 31, + KSWAPD_INODESTEAL = 32, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 33, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 34, + PAGEOUTRUN = 35, + PGROTATED = 36, + DROP_PAGECACHE = 37, + DROP_SLAB = 38, + OOM_KILL = 39, + PGMIGRATE_SUCCESS = 40, + PGMIGRATE_FAIL = 41, + THP_MIGRATION_SUCCESS = 42, + THP_MIGRATION_FAIL = 43, + THP_MIGRATION_SPLIT = 44, + COMPACTMIGRATE_SCANNED = 45, + COMPACTFREE_SCANNED = 46, + COMPACTISOLATED = 47, + COMPACTSTALL = 48, + COMPACTFAIL = 49, + COMPACTSUCCESS = 50, + KCOMPACTD_WAKE = 51, + KCOMPACTD_MIGRATE_SCANNED = 52, + KCOMPACTD_FREE_SCANNED = 53, + CMA_ALLOC_SUCCESS = 54, + CMA_ALLOC_FAIL = 55, + UNEVICTABLE_PGCULLED = 56, + UNEVICTABLE_PGSCANNED = 57, + UNEVICTABLE_PGRESCUED = 58, + UNEVICTABLE_PGMLOCKED = 59, + UNEVICTABLE_PGMUNLOCKED = 60, + UNEVICTABLE_PGCLEARED = 61, + UNEVICTABLE_PGSTRANDED = 62, + SWAP_RA = 63, + SWAP_RA_HIT = 64, + SWPIN_ZERO = 65, + SWPOUT_ZERO = 66, + ZSWPIN = 67, + ZSWPOUT = 68, + ZSWPWB = 69, + NR_VM_EVENT_ITEMS = 70, +}; + +struct uasm_label { + u32 *addr; + int lab; +}; + +struct uasm_reloc { + u32 *addr; + unsigned int type; + int lab; +}; + +enum label_id { + label_clear_nopref = 1, + label_clear_pref = 2, + label_copy_nopref = 3, + label_copy_pref_both = 4, + label_copy_pref_store = 5, +}; + +typedef void *va_list; + +enum major_op { + spec_op = 0, + bcond_op = 1, + j_op = 2, + jal_op = 3, + beq_op = 4, + bne_op = 5, + blez_op = 6, + bgtz_op = 7, + addi_op = 8, + pop10_op = 8, + addiu_op = 9, + slti_op = 10, + sltiu_op = 11, + andi_op = 12, + ori_op = 13, + xori_op = 14, + lui_op = 15, + cop0_op = 16, + cop1_op = 17, + cop2_op = 18, + cop1x_op = 19, + beql_op = 20, + bnel_op = 21, + blezl_op = 22, + bgtzl_op = 23, + daddi_op = 24, + pop30_op = 24, + daddiu_op = 25, + ldl_op = 26, + ldr_op = 27, + spec2_op = 28, + jalx_op = 29, + mdmx_op = 30, + msa_op = 30, + spec3_op = 31, + lb_op = 32, + lh_op = 33, + lwl_op = 34, + lw_op = 35, + lbu_op = 36, + lhu_op = 37, + lwr_op = 38, + lwu_op = 39, + sb_op = 40, + sh_op = 41, + swl_op = 42, + sw_op = 43, + sdl_op = 44, + sdr_op = 45, + swr_op = 46, + cache_op = 47, + ll_op = 48, + lwc1_op = 49, + lwc2_op = 50, + bc6_op = 50, + pref_op = 51, + lld_op = 52, + ldc1_op = 53, + ldc2_op = 54, + pop66_op = 54, + ld_op = 55, + sc_op = 56, + swc1_op = 57, + swc2_op = 58, + balc6_op = 58, + major_3b_op = 59, + scd_op = 60, + sdc1_op = 61, + sdc2_op = 62, + pop76_op = 62, + sd_op = 63, +}; + +enum spec_op { + sll_op = 0, + movc_op = 1, + srl_op = 2, + sra_op = 3, + sllv_op = 4, + pmon_op = 5, + srlv_op = 6, + srav_op = 7, + jr_op = 8, + jalr_op = 9, + movz_op = 10, + movn_op = 11, + syscall_op = 12, + break_op = 13, + spim_op = 14, + sync_op = 15, + mfhi_op = 16, + mthi_op = 17, + mflo_op = 18, + mtlo_op = 19, + dsllv_op = 20, + spec2_unused_op = 21, + dsrlv_op = 22, + dsrav_op = 23, + mult_op = 24, + multu_op = 25, + div_op = 26, + divu_op = 27, + dmult_op = 28, + dmultu_op = 29, + ddiv_op = 30, + ddivu_op = 31, + add_op = 32, + addu_op = 33, + sub_op = 34, + subu_op = 35, + and_op = 36, + or_op = 37, + xor_op = 38, + nor_op = 39, + spec3_unused_op = 40, + spec4_unused_op = 41, + slt_op = 42, + sltu_op = 43, + dadd_op = 44, + daddu_op = 45, + dsub_op = 46, + dsubu_op = 47, + tge_op = 48, + tgeu_op = 49, + tlt_op = 50, + tltu_op = 51, + teq_op = 52, + seleqz_op = 53, + tne_op = 54, + selnez_op = 55, + dsll_op = 56, + spec5_unused_op = 57, + dsrl_op = 58, + dsra_op = 59, + dsll32_op = 60, + spec6_unused_op = 61, + dsrl32_op = 62, + dsra32_op = 63, +}; + +enum spec2_op { + madd_op = 0, + maddu_op = 1, + mul_op = 2, + spec2_3_unused_op = 3, + msub_op = 4, + msubu_op = 5, + clz_op = 32, + clo_op = 33, + dclz_op = 36, + dclo_op = 37, + sdbpp_op = 63, +}; + +enum spec3_op { + ext_op = 0, + dextm_op = 1, + dextu_op = 2, + dext_op = 3, + ins_op = 4, + dinsm_op = 5, + dinsu_op = 6, + dins_op = 7, + yield_op = 9, + lx_op = 10, + lwle_op = 25, + lwre_op = 26, + cachee_op = 27, + sbe_op = 28, + she_op = 29, + sce_op = 30, + swe_op = 31, + bshfl_op = 32, + swle_op = 33, + swre_op = 34, + prefe_op = 35, + dbshfl_op = 36, + cache6_op = 37, + sc6_op = 38, + scd6_op = 39, + lbue_op = 40, + lhue_op = 41, + lbe_op = 44, + lhe_op = 45, + lle_op = 46, + lwe_op = 47, + pref6_op = 53, + ll6_op = 54, + lld6_op = 55, + rdhwr_op = 59, +}; + +enum multu_op { + multu_multu_op = 0, + multu_mulu_op = 2, + multu_muhu_op = 3, +}; + +enum divu_op { + divu_divu_op = 0, + divu_divu6_op = 2, + divu_modu_op = 3, +}; + +enum dmultu_op { + dmultu_dmultu_op = 0, + dmultu_dmulu_op = 2, + dmultu_dmuhu_op = 3, +}; + +enum ddivu_op { + ddivu_ddivu_op = 0, + ddivu_ddivu6_op = 2, + ddivu_dmodu_op = 3, +}; + +enum rt_op { + bltz_op = 0, + bgez_op = 1, + bltzl_op = 2, + bgezl_op = 3, + spimi_op = 4, + unused_rt_op_0x05 = 5, + unused_rt_op_0x06 = 6, + unused_rt_op_0x07 = 7, + tgei_op = 8, + tgeiu_op = 9, + tlti_op = 10, + tltiu_op = 11, + teqi_op = 12, + unused_0x0d_rt_op = 13, + tnei_op = 14, + unused_0x0f_rt_op = 15, + bltzal_op = 16, + bgezal_op = 17, + bltzall_op = 18, + bgezall_op = 19, + rt_op_0x14 = 20, + rt_op_0x15 = 21, + rt_op_0x16 = 22, + rt_op_0x17 = 23, + rt_op_0x18 = 24, + rt_op_0x19 = 25, + rt_op_0x1a = 26, + rt_op_0x1b = 27, + bposge32_op = 28, + rt_op_0x1d = 29, + rt_op_0x1e = 30, + synci_op = 31, +}; + +enum cop_op { + mfc_op = 0, + dmfc_op = 1, + cfc_op = 2, + mfhc0_op = 2, + mfhc_op = 3, + mtc_op = 4, + dmtc_op = 5, + ctc_op = 6, + mthc0_op = 6, + mthc_op = 7, + bc_op = 8, + bc1eqz_op = 9, + mfmc0_op = 11, + bc1nez_op = 13, + wrpgpr_op = 14, + cop_op = 16, + copm_op = 24, +}; + +enum cop0_coi_func { + tlbr_op = 1, + tlbwi_op = 2, + tlbwr_op = 6, + tlbp_op = 8, + rfe_op = 16, + eret_op = 24, + wait_op = 32, + hypcall_op = 40, +}; + +enum ptw_func { + lwdir_op = 0, + lwpte_op = 1, + lddir_op = 2, + ldpte_op = 3, +}; + +enum lx_func { + lwx_op = 0, + lhx_op = 4, + lbux_op = 6, + ldx_op = 8, + lwux_op = 16, + lhux_op = 20, + lbx_op = 22, +}; + +enum bshfl_func { + wsbh_op = 2, + seb_op = 16, + seh_op = 24, +}; + +enum dbshfl_func { + dsbh_op = 2, + dshd_op = 5, +}; + +enum msa_func { + msa_elm_op = 25, +}; + +enum msa_elm { + msa_ctc_op = 62, + msa_cfc_op = 126, +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_DMA = 0, + KMALLOC_RANDOM_START = 0, + KMALLOC_RANDOM_END = 0, + KMALLOC_RECLAIM = 1, + KMALLOC_CGROUP = 2, + NR_KMALLOC_TYPES = 3, +}; + +enum fields { + RS = 1, + RT = 2, + RD = 4, + RE = 8, + SIMM = 16, + UIMM = 32, + BIMM = 64, + JIMM = 128, + FUNC = 256, + SET = 512, + SCIMM = 1024, + SIMM9 = 2048, +}; + +enum opcode { + insn_addiu = 0, + insn_addu = 1, + insn_and = 2, + insn_andi = 3, + insn_bbit0 = 4, + insn_bbit1 = 5, + insn_beq = 6, + insn_beql = 7, + insn_bgez = 8, + insn_bgezl = 9, + insn_bgtz = 10, + insn_blez = 11, + insn_bltz = 12, + insn_bltzl = 13, + insn_bne = 14, + insn_break = 15, + insn_cache = 16, + insn_cfc1 = 17, + insn_cfcmsa = 18, + insn_ctc1 = 19, + insn_ctcmsa = 20, + insn_daddiu = 21, + insn_daddu = 22, + insn_ddivu = 23, + insn_ddivu_r6 = 24, + insn_di = 25, + insn_dins = 26, + insn_dinsm = 27, + insn_dinsu = 28, + insn_divu = 29, + insn_divu_r6 = 30, + insn_dmfc0 = 31, + insn_dmodu = 32, + insn_dmtc0 = 33, + insn_dmultu = 34, + insn_dmulu = 35, + insn_drotr = 36, + insn_drotr32 = 37, + insn_dsbh = 38, + insn_dshd = 39, + insn_dsll = 40, + insn_dsll32 = 41, + insn_dsllv = 42, + insn_dsra = 43, + insn_dsra32 = 44, + insn_dsrav = 45, + insn_dsrl = 46, + insn_dsrl32 = 47, + insn_dsrlv = 48, + insn_dsubu = 49, + insn_eret = 50, + insn_ext = 51, + insn_ins = 52, + insn_j = 53, + insn_jal = 54, + insn_jalr = 55, + insn_jr = 56, + insn_lb = 57, + insn_lbu = 58, + insn_ld = 59, + insn_lddir = 60, + insn_ldpte = 61, + insn_ldx = 62, + insn_lh = 63, + insn_lhu = 64, + insn_ll = 65, + insn_lld = 66, + insn_lui = 67, + insn_lw = 68, + insn_lwu = 69, + insn_lwx = 70, + insn_mfc0 = 71, + insn_mfhc0 = 72, + insn_mfhi = 73, + insn_mflo = 74, + insn_modu = 75, + insn_movn = 76, + insn_movz = 77, + insn_mtc0 = 78, + insn_mthc0 = 79, + insn_mthi = 80, + insn_mtlo = 81, + insn_mul = 82, + insn_multu = 83, + insn_mulu = 84, + insn_muhu = 85, + insn_nor = 86, + insn_or = 87, + insn_ori = 88, + insn_pref = 89, + insn_rfe = 90, + insn_rotr = 91, + insn_sb = 92, + insn_sc = 93, + insn_scd = 94, + insn_seleqz = 95, + insn_selnez = 96, + insn_sd = 97, + insn_sh = 98, + insn_sll = 99, + insn_sllv = 100, + insn_slt = 101, + insn_slti = 102, + insn_sltiu = 103, + insn_sltu = 104, + insn_sra = 105, + insn_srav = 106, + insn_srl = 107, + insn_srlv = 108, + insn_subu = 109, + insn_sw = 110, + insn_sync = 111, + insn_syscall = 112, + insn_tlbp = 113, + insn_tlbr = 114, + insn_tlbwi = 115, + insn_tlbwr = 116, + insn_wait = 117, + insn_wsbh = 118, + insn_xor = 119, + insn_xori = 120, + insn_yield = 121, + insn_invalid = 122, +}; + +struct insn { + u32 match; + enum fields fields; +}; + +typedef long unsigned int uintptr_t; + +typedef u32 phys_addr_t; + +typedef int (*initcall_t)(); + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +typedef void (*smp_call_func_t)(void *); + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +enum cpu_pm_event { + CPU_PM_ENTER = 0, + CPU_PM_ENTER_FAILED = 1, + CPU_PM_EXIT = 2, + CPU_CLUSTER_PM_ENTER = 3, + CPU_CLUSTER_PM_ENTER_FAILED = 4, + CPU_CLUSTER_PM_EXIT = 5, +}; + +enum pgtable_bits { + _PAGE_PRESENT_SHIFT = 0, + _PAGE_NO_READ_SHIFT = 1, + _PAGE_WRITE_SHIFT = 2, + _PAGE_ACCESSED_SHIFT = 3, + _PAGE_MODIFIED_SHIFT = 4, + _PAGE_SPECIAL_SHIFT = 5, + _PAGE_GLOBAL_SHIFT = 6, + _PAGE_VALID_SHIFT = 7, + _PAGE_DIRTY_SHIFT = 8, + _CACHE_SHIFT = 9, +}; + +enum pageflags { + PG_locked = 0, + PG_writeback = 1, + PG_referenced = 2, + PG_uptodate = 3, + PG_dirty = 4, + PG_lru = 5, + PG_head = 6, + PG_waiters = 7, + PG_active = 8, + PG_workingset = 9, + PG_owner_priv_1 = 10, + PG_owner_2 = 11, + PG_arch_1 = 12, + PG_reserved = 13, + PG_private = 14, + PG_private_2 = 15, + PG_reclaim = 16, + PG_swapbacked = 17, + PG_unevictable = 18, + PG_mlocked = 19, + __NR_PAGEFLAGS = 20, + PG_readahead = 16, + PG_swapcache = 10, + PG_checked = 10, + PG_anon_exclusive = 11, + PG_mappedtodisk = 11, + PG_fscache = 15, + PG_pinned = 10, + PG_savepinned = 4, + PG_foreign = 10, + PG_xen_remapped = 10, + PG_isolated = 16, + PG_reported = 3, + PG_has_hwpoisoned = 8, + PG_large_rmappable = 9, + PG_partially_mapped = 16, +}; + +enum pagetype { + PGTY_buddy = 240, + PGTY_offline = 241, + PGTY_table = 242, + PGTY_guard = 243, + PGTY_hugetlb = 244, + PGTY_slab = 245, + PGTY_zsmalloc = 246, + PGTY_unaccepted = 247, + PGTY_mapcount_underflow = 255, +}; + +struct bcache_ops { + void (*bc_enable)(); + void (*bc_disable)(); + void (*bc_wback_inv)(long unsigned int, long unsigned int); + void (*bc_inv)(long unsigned int, long unsigned int); + void (*bc_prefetch_enable)(); + void (*bc_prefetch_disable)(); + bool (*bc_prefetch_is_enabled)(); +}; + +struct flush_cache_page_args { + struct vm_area_struct *vma; + long unsigned int addr; + long unsigned int pfn; +}; + +struct flush_icache_range_args { + long unsigned int start; + long unsigned int end; + unsigned int type; + bool user; +}; + +struct flush_kernel_vmap_range_args { + long unsigned int vaddr; + int size; +}; + +typedef short int __s16; + +struct cacheline_padding { + char x[0]; +}; + +enum { + ___GFP_DMA_BIT = 0, + ___GFP_HIGHMEM_BIT = 1, + ___GFP_DMA32_BIT = 2, + ___GFP_MOVABLE_BIT = 3, + ___GFP_RECLAIMABLE_BIT = 4, + ___GFP_HIGH_BIT = 5, + ___GFP_IO_BIT = 6, + ___GFP_FS_BIT = 7, + ___GFP_ZERO_BIT = 8, + ___GFP_UNUSED_BIT = 9, + ___GFP_DIRECT_RECLAIM_BIT = 10, + ___GFP_KSWAPD_RECLAIM_BIT = 11, + ___GFP_WRITE_BIT = 12, + ___GFP_NOWARN_BIT = 13, + ___GFP_RETRY_MAYFAIL_BIT = 14, + ___GFP_NOFAIL_BIT = 15, + ___GFP_NORETRY_BIT = 16, + ___GFP_MEMALLOC_BIT = 17, + ___GFP_COMP_BIT = 18, + ___GFP_NOMEMALLOC_BIT = 19, + ___GFP_HARDWALL_BIT = 20, + ___GFP_THISNODE_BIT = 21, + ___GFP_ACCOUNT_BIT = 22, + ___GFP_ZEROTAGS_BIT = 23, + ___GFP_NO_OBJ_EXT_BIT = 24, + ___GFP_LAST_BIT = 25, +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct fwnode_operations; + +struct device; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle * (*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); + bool (*device_dma_supported)(const struct fwnode_handle *); + enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); + const char * (*get_name)(const struct fwnode_handle *); + const char * (*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); + struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + void * (*iomap)(struct fwnode_handle *, int); + int (*irq_get)(const struct fwnode_handle *, unsigned int); + int (*add_links)(struct fwnode_handle *); +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head defer_sync; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +enum rpm_status { + RPM_INVALID = -1, + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +struct wake_irq; + +struct pm_subsys_data; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + bool can_wakeup: 1; + bool async_suspend: 1; + bool in_dpm_list: 1; + bool is_prepared: 1; + bool is_suspended: 1; + bool is_noirq_suspended: 1; + bool is_late_suspended: 1; + bool no_pm: 1; + bool early_init: 1; + bool direct_complete: 1; + u32 driver_flags; + spinlock_t lock; + bool should_wakeup: 1; + long: 32; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth: 3; + bool idle_notification: 1; + bool request_pending: 1; + bool deferred_resume: 1; + bool needs_force_resume: 1; + bool runtime_auto: 1; + bool ignore_children: 1; + bool no_callbacks: 1; + bool irq_safe: 1; + bool use_autosuspend: 1; + bool timer_autosuspends: 1; + bool memalloc_noio: 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + enum rpm_status last_status; + int runtime_error; + int autosuspend_delay; + long: 32; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; + long: 32; +}; + +struct irq_domain; + +struct msi_device_data; + +struct dev_msi_info { + struct irq_domain *domain; + struct msi_device_data *data; +}; + +struct dev_archdata {}; + +struct dev_iommu; + +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, + DEVICE_REMOVABLE_UNKNOWN = 1, + DEVICE_FIXED = 2, + DEVICE_REMOVABLE = 3, +}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct dma_coherent_mem; + +struct device_node; + +struct class; + +struct iommu_group; + +struct device_physical_location; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + const struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct dev_msi_info msi; + u64 *dma_mask; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct dma_coherent_mem *dma_mem; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + const struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + struct device_physical_location *physical_location; + enum device_removable removable; + bool offline_disabled: 1; + bool offline: 1; + bool of_node_reused: 1; + bool state_synced: 1; + bool can_match: 1; + bool dma_coherent: 1; + bool dma_skip_sync: 1; +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF = 0, + REFCOUNT_ADD_OVF = 1, + REFCOUNT_ADD_UAF = 2, + REFCOUNT_SUB_UAF = 3, + REFCOUNT_DEC_LEAK = 4, +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +struct task_cputime { + u64 stime; + u64 utime; + long long unsigned int sum_exec_runtime; +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + atomic_t count; + atomic_long_t ucount[10]; + atomic_long_t rlimit[4]; +}; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; + int nr_descendants; +}; + +struct cgroup_file { + struct kernfs_node *kn; + long unsigned int notified_at; + struct timer_list notify_timer; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; + u64 ntime; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + struct bpf_prog_array *effective[28]; + struct hlist_head progs[28]; + u8 flags[28]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + bool e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct psi_group; + +struct cgroup { + struct cgroup_subsys_state self; + long unsigned int flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + struct cgroup_file psi_files[3]; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state *subsys[13]; + int nr_dying_subsys[13]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[13]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad_; + struct cgroup *rstat_flush_next; + long: 32; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group *psi; + struct cgroup_bpf bpf; + struct cgroup_freezer_state freezer; + struct bpf_local_storage *bpf_cgrp_storage; + struct cgroup *ancestors[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_spinlock_t seq; + int umask; + int in_exec; + struct path root; + struct path pwd; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + refcount_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +struct reclaim_state { + long unsigned int reclaimed; +}; + +struct css_set { + struct cgroup_subsys_state *subsys[13]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[13]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +struct fasync_struct; + +struct pipe_buffer; + +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait; + wait_queue_head_t wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + bool poll_usage; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct page_counter { + atomic_long_t usage; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + long unsigned int emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + long unsigned int elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + long unsigned int watermark; + long unsigned int local_watermark; + long unsigned int failcnt; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + bool protection_support; + long unsigned int min; + long unsigned int low; + long unsigned int high; + long unsigned int max; + struct page_counter *parent; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct vmpressure { + long unsigned int scanned; + long unsigned int reclaimed; + long unsigned int tree_scanned; + long unsigned int tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + long: 32; + struct fprop_global completions; + struct timer_list period_timer; + long unsigned int period_time; + long unsigned int dirty_limit_tstamp; + long unsigned int dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + long: 32; + u64 at; + struct wb_completion done; +}; + +struct memcg_vmstats; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + long: 32; + long: 32; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct list_head memory_peaks; + struct list_head swap_peaks; + spinlock_t peaks_lock; + struct work_struct high_work; + long unsigned int zswap_max; + bool zswap_writeback; + struct vmpressure vmpressure; + bool oom_group; + int swappiness; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct memcg_vmstats *vmstats; + atomic_long_t memory_events[9]; + atomic_long_t memory_events_local[9]; + long unsigned int socket_pressure; + int kmemcg_id; + struct obj_cgroup *objcg; + struct obj_cgroup *orig_objcg; + struct list_head objcg_list; + struct memcg_vmstats_percpu *vmstats_percpu; + struct list_head cgwb_list; + long: 32; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct mem_cgroup_per_node *nodeinfo[0]; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct bpf_run_ctx {}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + union { + struct { + struct uid_gid_extent extent[5]; + u32 nr_extents; + }; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +struct proc_ns_operations; + +struct ns_common { + struct dentry *stashed; + const struct proc_ns_operations *ops; + unsigned int inum; + refcount_t count; +}; + +struct ctl_table; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + const struct ctl_table *ctl_table; + int ctl_table_size; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + const struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; + enum { + SYSCTL_TABLE_TYPE_DEFAULT = 0, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, + } type; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct binfmt_misc; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + long unsigned int flags; + bool parent_could_setfcap; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + long int ucount_max[10]; + long int rlimit_max[4]; + struct binfmt_misc *binfmt_misc; +}; + +struct zswap_lruvec_state { + atomic_long_t nr_disk_swapins; +}; + +struct free_area { + struct list_head free_list[6]; + long unsigned int nr_free; +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + spinlock_t lru_lock; + long unsigned int anon_cost; + long unsigned int file_cost; + atomic_long_t nonresident_age; + long unsigned int refaults[2]; + long unsigned int flags; + struct pglist_data *pgdat; + struct zswap_lruvec_state zswap_lruvec_state; +}; + +struct per_cpu_pages; + +struct per_cpu_zonestat; + +struct zone { + long unsigned int _watermark[4]; + long unsigned int watermark_boost; + long unsigned int nr_reserved_highatomic; + long unsigned int nr_free_highatomic; + long int lowmem_reserve[2]; + struct pglist_data *zone_pgdat; + struct per_cpu_pages *per_cpu_pageset; + struct per_cpu_zonestat *per_cpu_zonestats; + int pageset_high_min; + int pageset_high_max; + int pageset_batch; + long unsigned int *pageblock_flags; + long unsigned int zone_start_pfn; + atomic_long_t managed_pages; + long unsigned int spanned_pages; + long unsigned int present_pages; + long unsigned int cma_pages; + const char *name; + long unsigned int nr_isolate_pageblock; + int initialized; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct free_area free_area[11]; + long unsigned int flags; + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + long unsigned int percpu_drift_mark; + long unsigned int compact_cached_free_pfn; + long unsigned int compact_cached_migrate_pfn[2]; + long unsigned int compact_init_migrate_pfn; + long unsigned int compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad3_; + atomic_long_t vm_stat[10]; + atomic_long_t vm_numa_event[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[3]; +}; + +enum zone_type { + ZONE_NORMAL = 0, + ZONE_MOVABLE = 1, + __MAX_NR_ZONES = 2, +}; + +struct per_cpu_nodestat; + +struct pglist_data { + struct zone node_zones[2]; + struct zonelist node_zonelists[1]; + int nr_zones; + struct page *node_mem_map; + long unsigned int node_start_pfn; + long unsigned int node_present_pages; + long unsigned int node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + wait_queue_head_t reclaim_wait[4]; + atomic_t nr_writeback_throttled; + long unsigned int nr_reclaim_start; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + bool proactive_compact_trigger; + long unsigned int totalreserve_pages; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct lruvec __lruvec; + long unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[45]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct per_cpu_pages { + spinlock_t lock; + int count; + int high; + int high_min; + int high_max; + int batch; + u8 flags; + u8 alloc_factor; + short int free_count; + struct list_head lists[12]; + long: 32; +}; + +struct per_cpu_zonestat { + s8 vm_stat_diff[10]; + s8 stat_threshold; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[45]; +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + long: 32; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + long: 32; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + long: 32; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; + __u64 compact_count; + __u64 compact_delay_total; + __u32 ac_tgid; + long: 32; + __u64 ac_tgetime; + __u64 ac_exe_dev; + __u64 ac_exe_inode; + __u64 wpcopy_count; + __u64 wpcopy_delay_total; + __u64 irq_count; + __u64 irq_delay_total; +}; + +struct shrinker_info_unit { + atomic_long_t nr_deferred[32]; + long unsigned int map[1]; +}; + +struct shrinker_info { + struct callback_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[0]; +}; + +typedef int proc_handler(const struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set * (*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, kuid_t *, kgid_t *); + int (*permissions)(struct ctl_table_header *, const struct ctl_table *); +}; + +struct wait_page_queue { + struct folio *folio; + int bit_nr; + wait_queue_entry_t wait; +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +struct folio_batch { + unsigned char nr; + unsigned char i; + bool percpu_pvec_drained; + struct folio *folios[31]; +}; + +struct swap_iocb; + +struct writeback_control { + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate: 1; + unsigned int for_background: 1; + unsigned int tagged_writepages: 1; + unsigned int for_reclaim: 1; + unsigned int range_cyclic: 1; + unsigned int for_sync: 1; + unsigned int unpinned_netfs_wb: 1; + unsigned int no_cgroup_owner: 1; + struct swap_iocb **swap_plug; + struct list_head *list; + struct folio_batch fbatch; + long unsigned int index; + int saved_err; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + struct file_ra_state *ra; + long unsigned int _index; + unsigned int _nr_pages; + unsigned int _batch_count; + bool _workingset; + long unsigned int _pflags; +}; + +struct iovec { + void *iov_base; + __kernel_size_t iov_len; +}; + +struct kvec; + +struct bio_vec; + +struct folio_queue; + +struct iov_iter { + u8 iter_type; + bool nofault; + bool data_source; + size_t iov_offset; + union { + struct iovec __ubuf_iovec; + struct { + union { + const struct iovec *__iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + const struct folio_queue *folioq; + struct xarray *xarray; + void *ubuf; + }; + size_t count; + }; + }; + union { + long unsigned int nr_segs; + u8 folioq_slot; + loff_t xarray_start; + }; +}; + +struct swap_cluster_info; + +struct percpu_cluster; + +struct swap_info_struct { + struct percpu_ref users; + long unsigned int flags; + short int prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + long unsigned int *zeromap; + struct swap_cluster_info *cluster_info; + struct list_head free_clusters; + struct list_head full_clusters; + struct list_head nonfull_clusters[1]; + struct list_head frag_clusters[1]; + unsigned int frag_cluster_nr[1]; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int *cluster_next_cpu; + struct percpu_cluster *percpu_cluster; + struct rb_root swap_extent_root; + struct block_device *bdev; + struct file *swap_file; + struct completion comp; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct work_struct reclaim_work; + struct list_head discard_clusters; + struct plist_node avail_lists[0]; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +struct bdi_writeback { + struct backing_dev_info *bdi; + long unsigned int state; + long unsigned int last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + atomic_t writeback_inodes; + long: 32; + struct percpu_counter stat[4]; + long unsigned int bw_time_stamp; + long unsigned int dirtied_stamp; + long unsigned int written_stamp; + long unsigned int write_bandwidth; + long unsigned int avg_write_bandwidth; + long unsigned int dirty_ratelimit; + long unsigned int balanced_dirty_ratelimit; + long: 32; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + struct delayed_work bw_dwork; + struct list_head bdi_node; + struct percpu_ref refcnt; + long: 32; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + struct list_head b_attached; + struct list_head offline_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +struct disk_stats; + +struct blk_holder_ops; + +struct partition_meta_info; + +struct block_device { + sector_t bd_start_sect; + sector_t bd_nr_sectors; + struct gendisk *bd_disk; + struct request_queue *bd_queue; + struct disk_stats *bd_stats; + long unsigned int bd_stamp; + atomic_t __bd_flags; + dev_t bd_dev; + struct address_space *bd_mapping; + atomic_t bd_openers; + spinlock_t bd_size_lock; + void *bd_claiming; + void *bd_holder; + const struct blk_holder_ops *bd_holder_ops; + struct mutex bd_holder_lock; + int bd_holders; + struct kobject *bd_holder_dir; + atomic_t bd_fsfreeze_count; + struct mutex bd_fsfreeze_mutex; + struct partition_meta_info *bd_meta_info; + int bd_writers; + long: 32; + struct device bd_device; +}; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + long unsigned int ra_pages; + long unsigned int io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + long unsigned int last_bdp_sleep; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; + long: 32; +}; + +struct fc_log; + +struct p_log { + const char *prefix; + struct fc_log *log; +}; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT = 0, + FS_CONTEXT_FOR_SUBMOUNT = 1, + FS_CONTEXT_FOR_RECONFIGURE = 2, +}; + +enum fs_context_phase { + FS_CONTEXT_CREATE_PARAMS = 0, + FS_CONTEXT_CREATING = 1, + FS_CONTEXT_AWAITING_MOUNT = 2, + FS_CONTEXT_AWAITING_RECONF = 3, + FS_CONTEXT_RECONF_PARAMS = 4, + FS_CONTEXT_RECONFIGURING = 5, + FS_CONTEXT_FAILED = 6, +}; + +struct fs_context_operations; + +struct fs_context { + const struct fs_context_operations *ops; + struct mutex uapi_mutex; + struct file_system_type *fs_type; + void *fs_private; + void *sget_key; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + struct p_log log; + const char *source; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + unsigned int s_iflags; + enum fs_context_purpose purpose: 8; + enum fs_context_phase phase: 8; + bool need_free: 1; + bool global: 1; + bool oldapi: 1; + bool exclusive: 1; +}; + +struct audit_names; + +struct filename { + const char *name; + const char *uptr; + atomic_t refcnt; + struct audit_names *aname; + const char iname[0]; +}; + +typedef __u32 blk_opf_t; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +}; + +typedef unsigned int blk_qc_t; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct blkcg_gq; + +struct bio_set; + +struct bio { + struct bio *bi_next; + struct block_device *bi_bdev; + blk_opf_t bi_opf; + short unsigned int bi_flags; + short unsigned int bi_ioprio; + enum rw_hint bi_write_hint; + blk_status_t bi_status; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + union { + blk_qc_t bi_cookie; + unsigned int __bi_nr_segments; + }; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + long: 32; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + short unsigned int bi_vcnt; + short unsigned int bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct nsset; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common * (*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace * (*owner)(struct ns_common *); + struct ns_common * (*get_parent)(struct ns_common *); +}; + +struct cgroup_namespace { + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +struct u64_stats_sync { + seqcount_t seq; +}; + +struct bpf_prog; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + long: 32; + union { + struct bpf_cgroup_storage *cgroup_storage[2]; + u64 bpf_cookie; + }; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct psi_group_cpu { + seqcount_t seq; + unsigned int tasks[4]; + u32 state_mask; + u32 times[7]; + long: 32; + u64 state_start; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u32 times_prev[14]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct psi_group { + struct psi_group *parent; + bool enabled; + struct mutex avgs_lock; + struct psi_group_cpu *pcpu; + long: 32; + u64 avg_total[6]; + u64 avg_last_update; + u64 avg_next_update; + struct delayed_work avgs_work; + struct list_head avg_triggers; + u32 avg_nr_triggers[6]; + long: 32; + u64 total[12]; + long unsigned int avg[18]; + struct task_struct *rtpoll_task; + struct timer_list rtpoll_timer; + wait_queue_head_t rtpoll_wait; + atomic_t rtpoll_wakeup; + atomic_t rtpoll_scheduled; + struct mutex rtpoll_trigger_lock; + struct list_head rtpoll_triggers; + u32 rtpoll_nr_triggers[6]; + u32 rtpoll_states; + u64 rtpoll_min_period; + u64 rtpoll_total[6]; + u64 rtpoll_next_update; + u64 rtpoll_until; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init: 1; + bool implicit_on_dfl: 1; + bool threaded: 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + long: 32; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat subtree_bstat; + struct cgroup_base_stat last_subtree_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct list_head root_list; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cgroup cgrp; + struct cgroup *cgrp_ancestor_storage; + atomic_t nr_cgrps; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cftype { + char name[64]; + long unsigned int private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + struct lock_class_key lockdep_key; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; + struct list_head clock_list; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); + int (*set_performance_state)(struct device *, unsigned int); +}; + +struct bus_type { + const char *name; + const char *dev_name; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, const struct device_driver *); + int (*uevent)(const struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + void (*dma_cleanup)(struct device *); + const struct dev_pm_ops *pm; + bool need_parent_lock; +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +struct of_device_id; + +struct acpi_device_id; + +struct driver_private; + +struct device_driver { + const char *name; + const struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +struct class { + const char *name; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *); + void (*class_release)(const struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void * (*namespace)(const struct device *); + void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; +}; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *, struct cred *); + void (*cleanup)(struct subprocess_info *); + void *data; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +typedef long unsigned int kernel_ulong_t; + +struct acpi_device_id { + __u8 id[16]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + unsigned int min_align_mask; + long unsigned int segment_boundary_mask; +}; + +enum device_physical_location_panel { + DEVICE_PANEL_TOP = 0, + DEVICE_PANEL_BOTTOM = 1, + DEVICE_PANEL_LEFT = 2, + DEVICE_PANEL_RIGHT = 3, + DEVICE_PANEL_FRONT = 4, + DEVICE_PANEL_BACK = 5, + DEVICE_PANEL_UNKNOWN = 6, +}; + +enum device_physical_location_vertical_position { + DEVICE_VERT_POS_UPPER = 0, + DEVICE_VERT_POS_CENTER = 1, + DEVICE_VERT_POS_LOWER = 2, +}; + +enum device_physical_location_horizontal_position { + DEVICE_HORI_POS_LEFT = 0, + DEVICE_HORI_POS_CENTER = 1, + DEVICE_HORI_POS_RIGHT = 2, +}; + +struct device_physical_location { + enum device_physical_location_panel panel; + enum device_physical_location_vertical_position vertical_position; + enum device_physical_location_horizontal_position horizontal_position; + bool dock; + bool lid; +}; + +typedef u32 dma_addr_t; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; +}; + +struct blk_holder_ops { + void (*mark_dead)(struct block_device *, bool); + void (*sync)(struct block_device *); + int (*freeze)(struct block_device *); + int (*thaw)(struct block_device *); +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_alloc_cache; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + struct bio_alloc_cache *cache; + mempool_t bio_pool; + mempool_t bvec_pool; + unsigned int back_pad; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; + struct hlist_node cpuhp_dead; +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct folio_queue { + struct folio_batch vec; + u8 orders[31]; + struct folio_queue *next; + struct folio_queue *prev; + long unsigned int marks; + long unsigned int marks2; + long unsigned int marks3; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + atomic_t generation; +}; + +struct lruvec_stats_percpu; + +struct lruvec_stats; + +struct mem_cgroup_per_node { + struct mem_cgroup *memcg; + struct lruvec_stats_percpu *lruvec_stats_percpu; + struct lruvec_stats *lruvec_stats; + struct shrinker_info *shrinker_info; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct lruvec lruvec; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + long unsigned int lru_zone_size[10]; + struct mem_cgroup_reclaim_iter iter; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct swap_cluster_info { + spinlock_t lock; + u16 count; + u8 flags; + u8 order; + struct list_head list; +}; + +struct percpu_cluster { + unsigned int next[1]; +}; + +struct bpf_insn { + __u8 code; + __u8 dst_reg: 4; + __u8 src_reg: 4; + __s16 off; + __s32 imm; +}; + +enum bpf_cgroup_iter_order { + BPF_CGROUP_ITER_ORDER_UNSPEC = 0, + BPF_CGROUP_ITER_SELF_ONLY = 1, + BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, + BPF_CGROUP_ITER_DESCENDANTS_POST = 3, + BPF_CGROUP_ITER_ANCESTORS_UP = 4, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, + BPF_MAP_TYPE_USER_RINGBUF = 31, + BPF_MAP_TYPE_CGRP_STORAGE = 32, + BPF_MAP_TYPE_ARENA = 33, + __MAX_BPF_MAP_TYPE = 34, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, + BPF_PROG_TYPE_NETFILTER = 32, + __MAX_BPF_PROG_TYPE = 33, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + BPF_LSM_CGROUP = 43, + BPF_STRUCT_OPS = 44, + BPF_NETFILTER = 45, + BPF_TCX_INGRESS = 46, + BPF_TCX_EGRESS = 47, + BPF_TRACE_UPROBE_MULTI = 48, + BPF_CGROUP_UNIX_CONNECT = 49, + BPF_CGROUP_UNIX_SENDMSG = 50, + BPF_CGROUP_UNIX_RECVMSG = 51, + BPF_CGROUP_UNIX_GETPEERNAME = 52, + BPF_CGROUP_UNIX_GETSOCKNAME = 53, + BPF_NETKIT_PRIMARY = 54, + BPF_NETKIT_PEER = 55, + BPF_TRACE_KPROBE_SESSION = 56, + BPF_TRACE_UPROBE_SESSION = 57, + __MAX_BPF_ATTACH_TYPE = 58, +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + __u64 map_extra; + __s32 value_type_btf_obj_fd; + __s32 map_token_fd; + }; + struct { + __u32 map_fd; + long: 32; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; + __u64 fd_array; + __u64 core_relos; + __u32 core_relo_rec_size; + __u32 log_true_size; + __s32 prog_token_fd; + long: 32; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + __s32 path_fd; + long: 32; + }; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; + long: 32; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + union { + __u32 prog_cnt; + __u32 count; + }; + long: 32; + __u64 prog_attach_flags; + __u64 link_ids; + __u64 link_attach_flags; + __u64 revision; + } query; + struct { + __u64 name; + __u32 prog_fd; + long: 32; + __u64 cookie; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + __u32 btf_log_true_size; + __u32 btf_flags; + __s32 btf_token_fd; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + union { + __u32 prog_fd; + __u32 map_fd; + }; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + long: 32; + }; + struct { + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __u64 syms; + __u64 addrs; + __u64 cookies; + } kprobe_multi; + struct { + __u32 target_btf_id; + long: 32; + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + long: 32; + __u64 expected_revision; + } tcx; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + long: 32; + } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + long: 32; + __u64 expected_revision; + } netkit; + }; + } link_create; + struct { + __u32 link_fd; + union { + __u32 new_prog_fd; + __u32 new_map_fd; + }; + __u32 flags; + union { + __u32 old_prog_fd; + __u32 old_map_fd; + }; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; + struct { + __u32 flags; + __u32 bpffs_fd; + } token_create; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +struct bpf_prog_stats; + +struct bpf_prog_aux; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited: 1; + u16 jit_requested: 1; + u16 gpl_compatible: 1; + u16 cb_access: 1; + u16 dst_needed: 1; + u16 blinding_requested: 1; + u16 blinded: 1; + u16 is_func: 1; + u16 kprobe_override: 1; + u16 has_callchain_buf: 1; + u16 enforce_expected_attach_type: 1; + u16 call_get_stack: 1; + u16 call_get_func_ip: 1; + u16 tstamp_type_access: 1; + u16 sleepable: 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_stats *stats; + int *active; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + union { + struct { + struct {} __empty_insns; + struct sock_filter insns[0]; + }; + struct { + struct {} __empty_insnsi; + struct bpf_insn insnsi[0]; + }; + }; +}; + +enum btf_field_type { + BPF_SPIN_LOCK = 1, + BPF_TIMER = 2, + BPF_KPTR_UNREF = 4, + BPF_KPTR_REF = 8, + BPF_KPTR_PERCPU = 16, + BPF_KPTR = 28, + BPF_LIST_HEAD = 32, + BPF_LIST_NODE = 64, + BPF_RB_ROOT = 128, + BPF_RB_NODE = 256, + BPF_GRAPH_NODE = 320, + BPF_GRAPH_ROOT = 160, + BPF_REFCOUNT = 512, + BPF_WORKQUEUE = 1024, + BPF_UPTR = 2048, +}; + +typedef void (*btf_dtor_kfunc_t)(void *); + +struct btf; + +struct btf_field_kptr { + struct btf *btf; + struct module *module; + btf_dtor_kfunc_t dtor; + u32 btf_id; +}; + +struct btf_record; + +struct btf_field_graph_root { + struct btf *btf; + u32 value_btf_id; + u32 node_offset; + struct btf_record *value_rec; +}; + +struct btf_field { + u32 offset; + u32 size; + enum btf_field_type type; + union { + struct btf_field_kptr kptr; + struct btf_field_graph_root graph_root; + }; +}; + +struct btf_record { + u32 cnt; + u32 field_mask; + int spin_lock_off; + int timer_off; + int wq_off; + int refcount_off; + struct btf_field fields[0]; +}; + +typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +enum bpf_iter_task_type { + BPF_TASK_ITER_ALL = 0, + BPF_TASK_ITER_TID = 1, + BPF_TASK_ITER_TGID = 2, +}; + +struct bpf_map; + +struct bpf_iter_aux_info { + struct bpf_map *map; + struct { + struct cgroup *start; + enum bpf_cgroup_iter_order order; + } cgroup; + struct { + enum bpf_iter_task_type type; + u32 pid; + } task; +}; + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +struct bpf_local_storage_map; + +struct bpf_verifier_env; + +struct bpf_func_state; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map * (*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, union bpf_attr *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + void * (*map_lookup_elem)(struct bpf_map *, void *); + long int (*map_update_elem)(struct bpf_map *, void *, void *, u64); + long int (*map_delete_elem)(struct bpf_map *, void *); + long int (*map_push_elem)(struct bpf_map *, void *, u64); + long int (*map_pop_elem)(struct bpf_map *, void *); + long int (*map_peek_elem)(struct bpf_map *, void *); + void * (*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + long unsigned int (*map_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); + long int (*map_redirect)(struct bpf_map *, u64, u64); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); + long int (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); + u64 (*map_mem_usage)(const struct bpf_map *); + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u64 map_extra; + u32 map_flags; + u32 id; + struct btf_record *record; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + u32 btf_vmlinux_value_type_id; + struct btf *btf; + struct obj_cgroup *objcg; + char name[16]; + struct mutex freeze_mutex; + long: 32; + atomic64_t refcnt; + atomic64_t usercnt; + union { + struct work_struct work; + struct callback_head rcu; + }; + atomic64_t writecnt; + struct { + const struct btf_type *attach_func_proto; + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; + bool bypass_spec_v1; + bool frozen; + bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + long: 32; + atomic64_t sleepable_refcnt; + s64 *elem_count; + long: 32; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf_kfunc_set_tab; + +struct btf_id_dtor_kfunc_tab; + +struct btf_struct_metas; + +struct btf_struct_ops_tab; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; + struct btf_kfunc_set_tab *kfunc_set_tab; + struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; + struct btf_struct_metas *struct_meta_tab; + struct btf_struct_ops_tab *struct_ops_tab; + struct btf *base_btf; + u32 start_id; + u32 start_str_off; + char name[60]; + bool kernel_btf; + __u32 *base_id_map; +}; + +struct bpf_arena; + +struct bpf_ksym { + long unsigned int start; + long unsigned int end; + char name[512]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct bpf_jit_poke_descriptor; + +struct bpf_kfunc_desc_tab; + +struct bpf_kfunc_btf_tab; + +struct bpf_prog_ops; + +struct btf_mod_pair; + +struct bpf_token; + +struct bpf_prog_offload; + +struct bpf_func_info_aux; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 real_func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + struct btf *attach_btf; + const struct bpf_ctx_arg_aux *ctx_arg_info; + void *priv_stack_ptr; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool dev_bound; + bool offload_requested; + bool attach_btf_trace; + bool attach_tracing_prog; + bool func_proto_unreliable; + bool tail_call_reachable; + bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; + bool is_extended; + bool jits_use_priv_stack; + bool priv_stack_requested; + long: 32; + u64 prog_array_member_cnt; + struct mutex ext_mutex; + struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *); + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; + struct bpf_kfunc_btf_tab *kfunc_btf_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct btf_mod_pair *used_btfs; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + u32 verified_insns; + int cgroup_atype; + struct bpf_map *cgroup_storage[2]; + char name[16]; + u64 (*bpf_exception_cb)(u64, u64, u64, u64, u64); + struct bpf_token *token; + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + struct module *mod; + u32 num_exentries; + struct exception_table_entry *extable; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_KEY = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCK_COMMON = 12, + PTR_TO_TCP_SOCK = 13, + PTR_TO_TP_BUFFER = 14, + PTR_TO_XDP_SOCK = 15, + PTR_TO_BTF_ID = 16, + PTR_TO_MEM = 17, + PTR_TO_ARENA = 18, + PTR_TO_BUF = 19, + PTR_TO_FUNC = 20, + CONST_PTR_TO_DYNPTR = 21, + __BPF_REG_TYPE_MAX = 22, + PTR_TO_MAP_VALUE_OR_NULL = 260, + PTR_TO_SOCKET_OR_NULL = 267, + PTR_TO_SOCK_COMMON_OR_NULL = 268, + PTR_TO_TCP_SOCK_OR_NULL = 269, + PTR_TO_BTF_ID_OR_NULL = 272, + __BPF_REG_TYPE_LIMIT = 134217727, +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); +}; + +struct net_device; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct btf_func_model { + u8 ret_size; + u8 ret_flags; + u8 nr_args; + u8 arg_size[12]; + u8 arg_flags[12]; +}; + +struct bpf_tramp_image { + void *image; + int size; + struct bpf_ksym ksym; + struct percpu_ref pcref; + void *ip_after_call; + void *ip_epilogue; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct ftrace_ops; + +struct bpf_trampoline { + struct hlist_node hlist; + struct ftrace_ops *fops; + struct mutex mutex; + refcount_t refcnt; + u32 flags; + long: 32; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + struct bpf_tramp_image *cur_image; + long: 32; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; + bool called: 1; + bool verified: 1; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + void *aux; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + struct btf *btf; + u32 btf_id; +}; + +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + +struct bpf_token { + struct work_struct work; + atomic64_t refcnt; + struct user_namespace *userns; + long: 32; + u64 allowed_cmds; + u64 allowed_maps; + u64 allowed_progs; + u64 allowed_attachs; +}; + +enum fs_value_type { + fs_value_is_undefined = 0, + fs_value_is_flag = 1, + fs_value_is_string = 2, + fs_value_is_blob = 3, + fs_value_is_filename = 4, + fs_value_is_file = 5, +}; + +struct fs_parameter { + const char *key; + enum fs_value_type type: 8; + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +struct fc_log { + refcount_t usage; + u8 head; + u8 tail; + u8 need_free; + struct module *owner; + char *buffer[8]; +}; + +struct fs_context_operations { + void (*free)(struct fs_context *); + int (*dup)(struct fs_context *, struct fs_context *); + int (*parse_param)(struct fs_context *, struct fs_parameter *); + int (*parse_monolithic)(struct fs_context *, void *); + int (*get_tree)(struct fs_context *); + int (*reconfigure)(struct fs_context *); +}; + +struct fs_parse_result { + bool negated; + long: 32; + union { + bool boolean; + int int_32; + unsigned int uint_32; + u64 uint_64; + kuid_t uid; + kgid_t gid; + }; +}; + +struct pipe_buf_operations; + +struct pipe_buffer { + struct page *page; + unsigned int offset; + unsigned int len; + const struct pipe_buf_operations *ops; + unsigned int flags; + long unsigned int private; +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; + +typedef long unsigned int irq_hw_number_t; + +typedef struct cpumask cpumask_var_t[1]; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; + cpumask_var_t effective_affinity; + unsigned int ipi_offset; +}; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + irq_hw_number_t hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct irqstat; + +struct irq_affinity_notify; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + struct irqstat *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + long unsigned int last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + long unsigned int threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + struct proc_dir_entry *dir; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct codetag { + unsigned int flags; + unsigned int lineno; + const char *modname; + const char *function; + const char *filename; + long: 32; +}; + +struct alloc_tag_counters { + u64 bytes; + u64 calls; +}; + +struct alloc_tag { + struct codetag ct; + struct alloc_tag_counters *counters; + long: 32; +}; + +typedef struct { + void *lock; +} class_rcu_t; + +struct maple_alloc { + long unsigned int total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[61]; +}; + +struct maple_enode; + +enum store_type { + wr_invalid = 0, + wr_new_root = 1, + wr_store_root = 2, + wr_exact_fit = 3, + wr_spanning_store = 4, + wr_split_store = 5, + wr_rebalance = 6, + wr_append = 7, + wr_node_store = 8, + wr_slot_store = 9, +}; + +enum maple_status { + ma_active = 0, + ma_start = 1, + ma_root = 2, + ma_none = 3, + ma_pause = 4, + ma_overflow = 5, + ma_underflow = 6, + ma_error = 7, +}; + +struct ma_state { + struct maple_tree *tree; + long unsigned int index; + long unsigned int last; + struct maple_enode *node; + long unsigned int min; + long unsigned int max; + struct maple_alloc *alloc; + enum maple_status status; + unsigned char depth; + unsigned char offset; + unsigned char mas_flags; + unsigned char end; + enum store_type store_type; +}; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, + IRQ_NO_DEBUG = 2097152, +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +struct msi_msg; + +struct irq_chip { + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + long unsigned int flags; +}; + +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED = 1, + DOMAIN_BUS_GENERIC_MSI = 2, + DOMAIN_BUS_PCI_MSI = 3, + DOMAIN_BUS_PLATFORM_MSI = 4, + DOMAIN_BUS_NEXUS = 5, + DOMAIN_BUS_IPI = 6, + DOMAIN_BUS_FSL_MC_MSI = 7, + DOMAIN_BUS_TI_SCI_INTA_MSI = 8, + DOMAIN_BUS_WAKEUP = 9, + DOMAIN_BUS_VMD_MSI = 10, + DOMAIN_BUS_PCI_DEVICE_MSI = 11, + DOMAIN_BUS_PCI_DEVICE_MSIX = 12, + DOMAIN_BUS_DMAR = 13, + DOMAIN_BUS_AMDVI = 14, + DOMAIN_BUS_DEVICE_MSI = 15, + DOMAIN_BUS_WIRED_TO_MSI = 16, +}; + +struct irq_domain_ops; + +struct irq_domain_chip_generic; + +struct msi_parent_ops; + +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + struct mutex mutex; + struct irq_domain *root; + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; + struct device *dev; + struct device *pm_dev; + struct irq_domain *parent; + const struct msi_parent_ops *msi_parent_ops; + void (*exit)(struct irq_domain *); + irq_hw_number_t hwirq_max; + unsigned int revmap_size; + struct xarray revmap_tree; + struct irq_data *revmap[0]; +}; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, + IRQD_AFFINITY_ON_ACTIVATE = 268435456, + IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, + IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, +}; + +struct irqstat { + unsigned int cnt; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +struct irq_chip_regs { + long unsigned int enable; + long unsigned int disable; + long unsigned int mask; + long unsigned int ack; + long unsigned int eoi; + long unsigned int type; +}; + +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +struct irq_chip_generic { + raw_spinlock_t lock; + void *reg_base; + u32 (*reg_readl)(void *); + void (*reg_writel)(u32, void *); + void (*suspend)(struct irq_chip_generic *); + void (*resume)(struct irq_chip_generic *); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + long unsigned int installed; + long unsigned int unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1, + IRQ_GC_INIT_NESTED_LOCK = 2, + IRQ_GC_MASK_CACHE_PER_TYPE = 4, + IRQ_GC_NO_MASK = 8, + IRQ_GC_BE_IO = 16, +}; + +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + void (*exit)(struct irq_chip_generic *); + struct irq_chip_generic *gc[0]; +}; + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed: 1; +}; + +struct kernel_stat { + long unsigned int irqs_sum; + unsigned int softirqs[10]; +}; + +typedef u32 phandle; + +struct property { + char *name; + int length; + void *value; + struct property *next; + struct bin_attribute attr; +}; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + struct kobject kobj; + long unsigned int _flags; + void *data; +}; + +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[16]; +}; + +struct irq_domain_ops { + int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); + int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); + int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); + void (*unmap)(struct irq_domain *, unsigned int); + int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); + int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); + void (*free)(struct irq_domain *, unsigned int, unsigned int); + int (*activate)(struct irq_domain *, struct irq_data *, bool); + void (*deactivate)(struct irq_domain *, struct irq_data *); + int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); +}; + +struct msi_domain_info; + +struct msi_parent_ops { + u32 supported_flags; + u32 required_flags; + u32 bus_select_token; + u32 bus_select_mask; + const char *prefix; + bool (*init_dev_msi_info)(struct device *, struct irq_domain *, struct irq_domain *, struct msi_domain_info *); +}; + +enum { + IRQS_AUTODETECT = 1, + IRQS_SPURIOUS_DISABLED = 2, + IRQS_POLL_INPROGRESS = 8, + IRQS_ONESHOT = 32, + IRQS_REPLAY = 64, + IRQS_WAITING = 128, + IRQS_PENDING = 512, + IRQS_SUSPENDED = 2048, + IRQS_TIMINGS = 4096, + IRQS_NMI = 8192, + IRQS_SYSFS = 16384, +}; + +enum { + _IRQ_DEFAULT_INIT_FLAGS = 0, + _IRQ_PER_CPU = 512, + _IRQ_LEVEL = 256, + _IRQ_NOPROBE = 1024, + _IRQ_NOREQUEST = 2048, + _IRQ_NOTHREAD = 65536, + _IRQ_NOAUTOEN = 4096, + _IRQ_MOVE_PCNTXT = 16384, + _IRQ_NO_BALANCING = 8192, + _IRQ_NESTED_THREAD = 32768, + _IRQ_PER_CPU_DEVID = 131072, + _IRQ_IS_POLLED = 262144, + _IRQ_DISABLE_UNLAZY = 524288, + _IRQ_HIDDEN = 1048576, + _IRQ_NO_DEBUG = 2097152, + _IRQF_MODIFY_MASK = 2096911, +}; + +typedef struct { + void *lock; +} class_preempt_notrace_t; + +struct fs_pin; + +struct pid_namespace { + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; + int memfd_noexec_scope; +}; + +struct perf_event_groups { + struct rb_root tree; + long: 32; + u64 index; +}; + +typedef struct { + atomic_long_t a; +} local_t; + +struct perf_event_context { + raw_spinlock_t lock; + struct mutex mutex; + struct list_head pmu_ctx_list; + long: 32; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + int nr_events; + int nr_user; + int is_active; + int nr_task_data; + int nr_stat; + int nr_freq; + int rotate_disable; + refcount_t refcount; + struct task_struct *task; + long: 32; + u64 time; + u64 timestamp; + u64 timeoffset; + struct perf_event_context *parent_ctx; + long: 32; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + struct callback_head callback_head; + local_t nr_no_switch_fast; + long: 32; +}; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0, + HRTIMER_MODE_REL = 1, + HRTIMER_MODE_PINNED = 2, + HRTIMER_MODE_SOFT = 4, + HRTIMER_MODE_HARD = 8, + HRTIMER_MODE_ABS_PINNED = 2, + HRTIMER_MODE_REL_PINNED = 3, + HRTIMER_MODE_ABS_SOFT = 4, + HRTIMER_MODE_REL_SOFT = 5, + HRTIMER_MODE_ABS_PINNED_SOFT = 6, + HRTIMER_MODE_REL_PINNED_SOFT = 7, + HRTIMER_MODE_ABS_HARD = 8, + HRTIMER_MODE_REL_HARD = 9, + HRTIMER_MODE_ABS_PINNED_HARD = 10, + HRTIMER_MODE_REL_PINNED_HARD = 11, +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +struct trace_print_flags { + long unsigned int mask; + const char *name; +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct perf_event; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + union { + void *module; + atomic_t refcnt; + }; + void *data; + int flags; + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +struct trace_eval_map { + const char *system; + const char *eval_string; + long unsigned int eval_value; +}; + +struct ring_buffer_event { + u32 type_len: 5; + u32 time_delta: 27; + u32 array[0]; +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; +}; + +struct trace_seq { + char buffer[8172]; + struct seq_buf seq; + size_t readpos; + int full; +}; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled: 1; + __u64 inherit: 1; + __u64 pinned: 1; + __u64 exclusive: 1; + __u64 exclude_user: 1; + __u64 exclude_kernel: 1; + __u64 exclude_hv: 1; + __u64 exclude_idle: 1; + __u64 mmap: 1; + __u64 comm: 1; + __u64 freq: 1; + __u64 inherit_stat: 1; + __u64 enable_on_exec: 1; + __u64 task: 1; + __u64 watermark: 1; + __u64 precise_ip: 2; + __u64 mmap_data: 1; + __u64 sample_id_all: 1; + __u64 exclude_host: 1; + __u64 exclude_guest: 1; + __u64 exclude_callchain_kernel: 1; + __u64 exclude_callchain_user: 1; + __u64 mmap2: 1; + __u64 comm_exec: 1; + __u64 use_clockid: 1; + __u64 context_switch: 1; + __u64 write_backward: 1; + __u64 namespaces: 1; + __u64 ksymbol: 1; + __u64 bpf_event: 1; + __u64 aux_output: 1; + __u64 cgroup: 1; + __u64 text_poke: 1; + __u64 build_id: 1; + __u64 inherit_thread: 1; + __u64 remove_on_exec: 1; + __u64 sigtrap: 1; + __u64 __reserved_1: 26; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + union { + __u32 aux_action; + struct { + __u32 aux_start_paused: 1; + __u32 aux_pause: 1; + __u32 aux_resume: 1; + __u32 __reserved_3: 29; + }; + }; + __u64 sig_data; + __u64 config3; +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_rsvd: 18; + __u64 mem_hops: 3; + __u64 mem_blk: 3; + __u64 mem_snoopx: 2; + __u64 mem_remote: 1; + __u64 mem_lvl_num: 4; + __u64 mem_dtlb: 7; + __u64 mem_lock: 2; + __u64 mem_snoop: 5; + __u64 mem_lvl: 14; + __u64 mem_op: 5; + }; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred: 1; + __u64 predicted: 1; + __u64 in_tx: 1; + __u64 abort: 1; + __u64 cycles: 16; + __u64 type: 4; + __u64 spec: 2; + __u64 new_type: 4; + __u64 priv: 3; + __u64 reserved: 31; +}; + +union perf_sample_weight { + __u64 full; + struct { + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; + }; +}; + +typedef struct { + atomic64_t a; +} local64_t; + +struct irq_work { + struct __call_single_node node; + void (*func)(struct irq_work *); + struct rcuwait irqwait; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; + long: 32; +}; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, + __MAX_BPF_LINK_TYPE = 15, +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + long: 32; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + long: 32; + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; + __u32 target_btf_id; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + long: 32; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + union { + struct { + __u64 cgroup_id; + __u32 order; + long: 32; + } cgroup; + struct { + __u32 tid; + __u32 pid; + } task; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + struct { + __u32 map_id; + } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + __u64 addrs; + __u32 count; + __u32 flags; + __u64 missed; + __u64 cookies; + } kprobe_multi; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 path_size; + __u32 count; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + __u32 type; + long: 32; + union { + struct { + __u64 file_name; + __u32 name_len; + __u32 offset; + __u64 cookie; + } uprobe; + struct { + __u64 func_name; + __u32 name_len; + __u32 offset; + __u64 addr; + __u64 missed; + __u64 cookie; + } kprobe; + struct { + __u64 tp_name; + __u32 name_len; + long: 32; + __u64 cookie; + } tracepoint; + struct { + __u64 config; + __u32 type; + long: 32; + __u64 cookie; + } event; + }; + } perf_event; + struct { + __u32 ifindex; + __u32 attach_type; + } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; + }; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + bool sleepable; + union { + struct callback_head rcu; + struct work_struct work; + }; + long: 32; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + void (*dealloc_deferred)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); + int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; + long: 32; + u64 cookie; +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; + long: 32; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + long unsigned int config_base; + long unsigned int event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + u64 aux_config; + unsigned int aux_paused; + long: 32; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + long: 32; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + long unsigned int addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); + +struct pmu; + +struct perf_event_pmu_context; + +struct perf_buffer; + +struct perf_addr_filter_range; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + long: 32; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + unsigned int group_generation; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + long: 32; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + struct perf_event_pmu_context *pmu_ctx; + atomic_long_t refcount; + long: 32; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + long unsigned int rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + unsigned int pending_wakeup; + unsigned int pending_kill; + unsigned int pending_disable; + long unsigned int pending_addr; + struct irq_work pending_irq; + struct irq_work pending_disable_irq; + struct callback_head pending_task; + unsigned int pending_work; + struct rcuwait pending_work_wait; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + long unsigned int addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + atomic64_t lost_samples; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + struct bpf_prog *prog; + u64 bpf_cookie; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct perf_cgroup *cgrp; + struct list_head sb_list; + __u32 orig_type; +}; + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + long unsigned int pad; + }; + perf_copy_f copy; + void *data; + u32 size; +}; + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct perf_cpu_pmu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + struct device *parent; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + unsigned int scope; + int *pmu_disable_count; + struct perf_cpu_pmu_context *cpu_pmu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_pmu_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); + void * (*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + bool (*filter)(struct pmu *, int); + int (*check_period)(struct perf_event *, u64); +}; + +struct perf_event_pmu_context { + struct pmu *pmu; + struct perf_event_context *ctx; + struct list_head pmu_ctx_entry; + struct list_head pinned_active; + struct list_head flexible_active; + unsigned int embedded: 1; + unsigned int nr_events; + unsigned int nr_cgroups; + unsigned int nr_freq; + atomic_t refcount; + struct callback_head callback_head; + void *task_ctx_data; + int rotate_necessary; +}; + +struct perf_cpu_pmu_context { + struct perf_event_pmu_context epc; + struct perf_event_pmu_context *task_epc; + struct list_head sched_cb_entry; + int sched_cb_usage; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; + long: 32; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + long unsigned int wakeup; + long unsigned int size; + u64 aux_flags; + union { + void *addr; + long unsigned int head; + }; + int page; +}; + +struct perf_addr_filter_range { + long unsigned int start; + long unsigned int size; +}; + +struct perf_sample_data { + u64 sample_flags; + u64 period; + u64 dyn_size; + u64 type; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 ip; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; + union perf_sample_weight weight; + union perf_mem_data_src data_src; + u64 txn; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 stream_id; + u64 cgroup; + u64 addr; + u64 phys_addr; + u64 data_page_size; + u64 code_page_size; + u64 aux_size; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; + long: 32; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; + u64 timeoffset; + int active; + long: 32; +}; + +struct trace_entry { + short unsigned int type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + long unsigned int iter_flags; + void *temp; + unsigned int temp_size; + char *fmt; + unsigned int fmt_size; + atomic_t wait_index; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool closed; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + long unsigned int lost_events; + int leftover; + int ent_size; + int cpu; + long: 32; + u64 ts; + loff_t pos; + long int idx; + long: 32; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + const int len; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head * (*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +struct trace_buffer; + +struct trace_event_file; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned int trace_ctx; + struct pt_regs *regs; +}; + +struct eventfs_inode; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct eventfs_inode *ei; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + long unsigned int flags; + refcount_t ref; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +enum { + TRACE_EVENT_FL_CAP_ANY = 1, + TRACE_EVENT_FL_NO_SET_FILTER = 2, + TRACE_EVENT_FL_IGNORE_ENABLE = 4, + TRACE_EVENT_FL_TRACEPOINT = 8, + TRACE_EVENT_FL_DYNAMIC = 16, + TRACE_EVENT_FL_KPROBE = 32, + TRACE_EVENT_FL_UPROBE = 64, + TRACE_EVENT_FL_EPROBE = 128, + TRACE_EVENT_FL_FPROBE = 256, + TRACE_EVENT_FL_CUSTOM = 512, +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, + EVENT_FILE_FL_FREED = 2048, +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_RDYN_STRING = 3, + FILTER_PTR_STRING = 4, + FILTER_TRACE_FN = 5, + FILTER_CPUMASK = 6, + FILTER_COMM = 7, + FILTER_CPU = 8, + FILTER_STACKTRACE = 9, +}; + +struct trace_event_raw_timer_class { + struct trace_entry ent; + void *timer; + char __data[0]; +}; + +struct trace_event_raw_timer_start { + struct trace_entry ent; + void *timer; + void *function; + long unsigned int expires; + long unsigned int bucket_expiry; + long unsigned int now; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_timer_expire_entry { + struct trace_entry ent; + void *timer; + long unsigned int now; + void *function; + long unsigned int baseclk; + char __data[0]; +}; + +struct trace_event_raw_timer_base_idle { + struct trace_entry ent; + bool is_idle; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_init { + struct trace_entry ent; + void *hrtimer; + clockid_t clockid; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_start { + struct trace_entry ent; + void *hrtimer; + void *function; + s64 expires; + s64 softexpires; + enum hrtimer_mode mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_hrtimer_expire_entry { + struct trace_entry ent; + void *hrtimer; + long: 32; + s64 now; + void *function; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_hrtimer_class { + struct trace_entry ent; + void *hrtimer; + char __data[0]; +}; + +struct trace_event_raw_itimer_state { + struct trace_entry ent; + int which; + long: 32; + long long unsigned int expires; + long int value_sec; + long int value_nsec; + long int interval_sec; + long int interval_nsec; + char __data[0]; +}; + +struct trace_event_raw_itimer_expire { + struct trace_entry ent; + int which; + pid_t pid; + long long unsigned int now; + char __data[0]; +}; + +struct trace_event_data_offsets_timer_class {}; + +struct trace_event_data_offsets_timer_start {}; + +struct trace_event_data_offsets_timer_expire_entry {}; + +struct trace_event_data_offsets_timer_base_idle {}; + +struct trace_event_data_offsets_hrtimer_init {}; + +struct trace_event_data_offsets_hrtimer_start {}; + +struct trace_event_data_offsets_hrtimer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_class {}; + +struct trace_event_data_offsets_itimer_state {}; + +struct trace_event_data_offsets_itimer_expire {}; + +typedef void (*btf_trace_timer_init)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_base_idle)(void *, bool, unsigned int); + +typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); + +typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); + +typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); + +typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int); + +typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int); + +struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; + long unsigned int clk; + long unsigned int next_expiry; + unsigned int cpu; + bool next_expiry_recalc; + bool is_idle; + bool timers_pending; + long unsigned int pending_map[18]; + struct hlist_head vectors[576]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + TASK_COMM_LEN = 16, +}; + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +}; + +struct prog_entry; + +struct event_filter { + struct prog_entry *prog; + char *filter_string; +}; + +struct trace_array_cpu; + +struct array_buffer { + struct trace_array *tr; + struct trace_buffer *buffer; + struct trace_array_cpu *data; + long: 32; + u64 time_start; + int cpu; + long: 32; +}; + +struct trace_pid_list; + +struct trace_options; + +struct trace_func_repeats; + +struct trace_array { + struct list_head list; + char *name; + long: 32; + struct array_buffer array_buffer; + unsigned int mapped; + long unsigned int range_addr_start; + long unsigned int range_addr_size; + long int text_delta; + long int data_delta; + struct trace_pid_list *filtered_pids; + struct trace_pid_list *filtered_no_pids; + arch_spinlock_t max_lock; + int buffer_disabled; + int sys_refcount_enter; + int sys_refcount_exit; + struct trace_event_file *enter_syscall_files[4467]; + struct trace_event_file *exit_syscall_files[4467]; + int stop_count; + int clock_id; + int nr_topts; + bool clear_trace; + int buffer_percent; + unsigned int n_err_log_entries; + struct tracer *current_trace; + unsigned int trace_flags; + unsigned char trace_flags_index[32]; + unsigned int flags; + raw_spinlock_t start_lock; + const char *system_names; + struct list_head err_log; + struct dentry *dir; + struct dentry *options; + struct dentry *percpu_dir; + struct eventfs_inode *event_dir; + struct trace_options *topts; + struct list_head systems; + struct list_head events; + struct trace_event_file *trace_marker_file; + cpumask_var_t tracing_cpumask; + cpumask_var_t pipe_cpumask; + int ref; + int trace_ref; + int no_filter_buffering_ref; + struct list_head hist_vars; + struct trace_func_repeats *last_func_repeats; + bool ring_buffer_expanded; + long: 32; +}; + +struct tracer_flags; + +struct tracer { + const char *name; + int (*init)(struct trace_array *); + void (*reset)(struct trace_array *); + void (*start)(struct trace_array *); + void (*stop)(struct trace_array *); + int (*update_thresh)(struct trace_array *); + void (*open)(struct trace_iterator *); + void (*pipe_open)(struct trace_iterator *); + void (*close)(struct trace_iterator *); + void (*pipe_close)(struct trace_iterator *); + ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); + ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*print_header)(struct seq_file *); + enum print_line_t (*print_line)(struct trace_iterator *); + int (*set_flag)(struct trace_array *, u32, u32, int); + int (*flag_changed)(struct trace_array *, u32, int); + struct tracer *next; + struct tracer_flags *flags; + int enabled; + bool print_max; + bool allow_instances; + bool noboot; +}; + +enum { + TRACE_EVENT_FL_CAP_ANY_BIT = 0, + TRACE_EVENT_FL_NO_SET_FILTER_BIT = 1, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 2, + TRACE_EVENT_FL_TRACEPOINT_BIT = 3, + TRACE_EVENT_FL_DYNAMIC_BIT = 4, + TRACE_EVENT_FL_KPROBE_BIT = 5, + TRACE_EVENT_FL_UPROBE_BIT = 6, + TRACE_EVENT_FL_EPROBE_BIT = 7, + TRACE_EVENT_FL_FPROBE_BIT = 8, + TRACE_EVENT_FL_CUSTOM_BIT = 9, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, + EVENT_FILE_FL_FREED_BIT = 11, +}; + +struct event_subsystem; + +struct trace_subsystem_dir { + struct list_head list; + struct event_subsystem *subsystem; + struct trace_array *tr; + struct eventfs_inode *ei; + int ref_count; + int nr_events; +}; + +union lower_chunk { + union lower_chunk *next; + long unsigned int data[512]; +}; + +union upper_chunk { + union upper_chunk *next; + union lower_chunk *data[256]; +}; + +struct trace_pid_list { + raw_spinlock_t lock; + struct irq_work refill_irqwork; + union upper_chunk *upper[256]; + union upper_chunk *upper_list; + union lower_chunk *lower_list; + int free_upper_chunks; + int free_lower_chunks; +}; + +struct trace_array_cpu { + atomic_t disabled; + void *buffer_page; + long unsigned int entries; + long unsigned int saved_latency; + long unsigned int critical_start; + long unsigned int critical_end; + long unsigned int critical_sequence; + long unsigned int nice; + long unsigned int policy; + long unsigned int rt_priority; + long unsigned int skipped_entries; + long: 32; + u64 preempt_timestamp; + pid_t pid; + kuid_t uid; + char comm[16]; + bool ignore_pid; + long: 32; +}; + +struct trace_option_dentry; + +struct trace_options { + struct tracer *tracer; + struct trace_option_dentry *topts; +}; + +struct tracer_opt; + +struct trace_option_dentry { + struct tracer_opt *opt; + struct tracer_flags *flags; + struct trace_array *tr; + struct dentry *entry; +}; + +struct trace_func_repeats { + long unsigned int ip; + long unsigned int parent_ip; + long unsigned int count; + long: 32; + u64 ts_last_call; +}; + +struct tracer_opt { + const char *name; + u32 bit; +}; + +struct tracer_flags { + u32 val; + struct tracer_opt *opts; + struct tracer *trace; +}; + +enum trace_iterator_bits { + TRACE_ITER_PRINT_PARENT_BIT = 0, + TRACE_ITER_SYM_OFFSET_BIT = 1, + TRACE_ITER_SYM_ADDR_BIT = 2, + TRACE_ITER_VERBOSE_BIT = 3, + TRACE_ITER_RAW_BIT = 4, + TRACE_ITER_HEX_BIT = 5, + TRACE_ITER_BIN_BIT = 6, + TRACE_ITER_BLOCK_BIT = 7, + TRACE_ITER_FIELDS_BIT = 8, + TRACE_ITER_PRINTK_BIT = 9, + TRACE_ITER_ANNOTATE_BIT = 10, + TRACE_ITER_USERSTACKTRACE_BIT = 11, + TRACE_ITER_SYM_USEROBJ_BIT = 12, + TRACE_ITER_PRINTK_MSGONLY_BIT = 13, + TRACE_ITER_CONTEXT_INFO_BIT = 14, + TRACE_ITER_LATENCY_FMT_BIT = 15, + TRACE_ITER_RECORD_CMD_BIT = 16, + TRACE_ITER_RECORD_TGID_BIT = 17, + TRACE_ITER_OVERWRITE_BIT = 18, + TRACE_ITER_STOP_ON_FREE_BIT = 19, + TRACE_ITER_IRQ_INFO_BIT = 20, + TRACE_ITER_MARKERS_BIT = 21, + TRACE_ITER_EVENT_FORK_BIT = 22, + TRACE_ITER_TRACE_PRINTK_BIT = 23, + TRACE_ITER_PAUSE_ON_TRACE_BIT = 24, + TRACE_ITER_HASH_PTR_BIT = 25, + TRACE_ITER_STACKTRACE_BIT = 26, + TRACE_ITER_LAST_BIT = 27, +}; + +struct event_subsystem { + struct list_head list; + const char *name; + struct event_filter *filter; + int ref_count; +}; + +enum { + TRACE_NOP_OPT_ACCEPT = 1, + TRACE_NOP_OPT_REFUSE = 2, +}; + +typedef __s16 s16; + +typedef __u16 __be16; + +typedef __u32 __be32; + +typedef __u32 __wsum; + +typedef unsigned int slab_flags_t; + +typedef struct { + atomic_t refcnt; +} rcuref_t; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; + long: 32; +}; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT = 0, + BPF_FD_TYPE_TRACEPOINT = 1, + BPF_FD_TYPE_KPROBE = 2, + BPF_FD_TYPE_KRETPROBE = 3, + BPF_FD_TYPE_UPROBE = 4, + BPF_FD_TYPE_URETPROBE = 5, +}; + +struct rhashtable; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + +struct bpf_redirect_info { + u64 tgt_index; + void *tgt_value; + struct bpf_map *map; + u32 flags; + u32 map_id; + enum bpf_map_type map_type; + struct bpf_nh_params nh; + u32 kern_flags; + long: 32; +}; + +struct bpf_net_context { + struct bpf_redirect_info ri; + struct list_head cpu_map_flush_list; + struct list_head dev_map_flush_list; + struct list_head xskmap_map_flush_list; +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *, struct pt_regs *, __u64 *); + int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *, __u64 *); + bool (*filter)(struct uprobe_consumer *, struct mm_struct *); + struct list_head cons_node; + long: 32; + __u64 id; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next; + struct hlist_nulls_node **pprev; +}; + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +typedef __u64 __addrpair; + +typedef __u32 __portpair; + +typedef struct { + struct net *net; +} possible_net_t; + +struct proto; + +struct inet_timewait_death_row; + +struct sock_common { + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + short unsigned int skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse: 4; + unsigned char skc_reuseport: 1; + unsigned char skc_ipv6only: 1; + unsigned char skc_net_refcnt: 1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + atomic64_t skc_cookie; + union { + long unsigned int skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + int skc_dontcopy_begin[0]; + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + short unsigned int skc_tx_queue_mapping; + short unsigned int skc_rx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + refcount_t skc_refcnt; + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; + long: 32; +}; + +struct sk_buff; + +struct sk_buff_list { + struct sk_buff *next; + struct sk_buff *prev; +}; + +struct sk_buff_head { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + }; + struct sk_buff_list list; + }; + __u32 qlen; + spinlock_t lock; +}; + +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; +} socket_lock_t; + +typedef u64 netdev_features_t; + +struct sock_cgroup_data { + struct cgroup *cgroup; + u32 classid; +}; + +typedef struct {} netns_tracker; + +struct dst_entry; + +struct sk_filter; + +struct socket_wq; + +struct socket; + +struct sock_reuseport; + +struct sock { + struct sock_common __sk_common; + __u8 __cacheline_group_begin__sock_write_rx[0]; + atomic_t sk_drops; + __s32 sk_peek_off; + struct sk_buff_head sk_error_queue; + struct sk_buff_head sk_receive_queue; + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + __u8 __cacheline_group_end__sock_write_rx[0]; + __u8 __cacheline_group_begin__sock_read_rx[0]; + struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + unsigned int sk_ll_usec; + unsigned int sk_napi_id; + u16 sk_busy_poll_budget; + u8 sk_prefer_busy_poll; + u8 sk_userlocks; + int sk_rcvbuf; + struct sk_filter *sk_filter; + union { + struct socket_wq *sk_wq; + struct socket_wq *sk_wq_raw; + }; + void (*sk_data_ready)(struct sock *); + long int sk_rcvtimeo; + int sk_rcvlowat; + __u8 __cacheline_group_end__sock_read_rx[0]; + __u8 __cacheline_group_begin__sock_read_rxtx[0]; + int sk_err; + struct socket *sk_socket; + struct mem_cgroup *sk_memcg; + __u8 __cacheline_group_end__sock_read_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_rxtx[0]; + socket_lock_t sk_lock; + u32 sk_reserved_mem; + int sk_forward_alloc; + u32 sk_tsflags; + __u8 __cacheline_group_end__sock_write_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_tx[0]; + int sk_write_pending; + atomic_t sk_omem_alloc; + int sk_sndbuf; + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + long unsigned int sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff_head sk_write_queue; + u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + struct page_frag sk_frag; + struct timer_list sk_timer; + long unsigned int sk_pacing_rate; + atomic_t sk_zckey; + atomic_t sk_tskey; + __u8 __cacheline_group_end__sock_write_tx[0]; + __u8 __cacheline_group_begin__sock_read_tx[0]; + long unsigned int sk_max_pacing_rate; + long int sk_sndtimeo; + u32 sk_priority; + u32 sk_mark; + struct dst_entry *sk_dst_cache; + long: 32; + netdev_features_t sk_route_caps; + u16 sk_gso_type; + u16 sk_gso_max_segs; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + u32 sk_txhash; + u8 sk_pacing_shift; + bool sk_use_task_frag; + __u8 __cacheline_group_end__sock_read_tx[0]; + u8 sk_gso_disabled: 1; + u8 sk_kern_sock: 1; + u8 sk_no_check_tx: 1; + u8 sk_no_check_rx: 1; + u8 sk_shutdown; + u16 sk_type; + u16 sk_protocol; + long unsigned int sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + spinlock_t sk_peer_lock; + int sk_bind_phc; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + ktime_t sk_stamp; + seqlock_t sk_stamp_seq; + int sk_disconnects; + u8 sk_txrehash; + u8 sk_clockid; + u8 sk_txtime_deadline_mode: 1; + u8 sk_txtime_report_errors: 1; + u8 sk_txtime_unused: 6; + void *sk_user_data; + struct sock_cgroup_data sk_cgrp_data; + void (*sk_state_change)(struct sock *); + void (*sk_write_space)(struct sock *); + void (*sk_error_report)(struct sock *); + int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); + void (*sk_destruct)(struct sock *); + struct sock_reuseport *sk_reuseport_cb; + struct bpf_local_storage *sk_bpf_storage; + struct callback_head sk_rcu; + netns_tracker ns_tracker; + struct xarray sk_user_frags; + long: 32; +}; + +typedef struct { + union { + void *kernel; + void *user; + }; + bool is_kernel: 1; +} sockptr_t; + +struct btf_param { + __u32 name_off; + __u32 type; +}; + +struct ref_tracker_dir {}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + int sysctl_optmem_max; + u8 sysctl_txrehash; + u8 sysctl_tstamp_allow_data; + struct prot_inuse *prot_inuse; + struct cpumask *rps_default_mask; +}; + +struct ipstats_mib; + +struct tcp_mib; + +struct linux_mib; + +struct udp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct netns_mib { + struct ipstats_mib *ip_statistics; + struct ipstats_mib *ipv6_statistics; + struct tcp_mib *tcp_statistics; + struct linux_mib *net_statistics; + struct udp_mib *udp_statistics; + struct udp_mib *udp_stats_in6; + struct udp_mib *udplite_statistics; + struct udp_mib *udplite_stats_in6; + struct icmp_mib *icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct icmpv6_mib *icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct unix_table { + spinlock_t *locks; + struct hlist_head *buckets; +}; + +struct netns_unix { + struct unix_table table; + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + refcount_t tw_refcount; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct local_ports { + u32 range; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct udp_table; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct inet_peer_base; + +struct fqdir; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + __u8 __cacheline_group_begin__netns_ipv4_read_tx[0]; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_tso_rtt_log; + u8 sysctl_tcp_autocorking; + int sysctl_tcp_min_snd_mss; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_wmem[3]; + u8 sysctl_ip_fwd_use_pmtu; + __u8 __cacheline_group_end__netns_ipv4_read_tx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_txrx[0]; + u8 sysctl_tcp_moderate_rcvbuf; + __u8 __cacheline_group_end__netns_ipv4_read_txrx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_rx[0]; + u8 sysctl_ip_early_demux; + u8 sysctl_tcp_early_demux; + u8 sysctl_tcp_l3mdev_accept; + int sysctl_tcp_reordering; + int sysctl_tcp_rmem[3]; + __u8 __cacheline_group_end__netns_ipv4_read_rx[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct inet_timewait_death_row tcp_death_row; + struct udp_table *udp_table; + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + bool fib_has_custom_local_routes; + bool fib_offload_disabled; + u8 sysctl_tcp_shrink_window; + struct hlist_head *fib_table_hash; + struct sock *fibnl; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct fqdir *fqdir; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_msgs_per_sec; + int sysctl_icmp_msgs_burst; + atomic_t icmp_global_credit; + u32 icmp_global_stamp; + u32 ip_rt_min_pmtu; + int ip_rt_mtu_expires; + int ip_rt_min_advmss; + struct local_ports ip_local_ports; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; + u8 sysctl_ip_dynaddr; + u8 sysctl_udp_early_demux; + u8 sysctl_nexthop_compat_mode; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; + u8 sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; + u8 sysctl_tcp_comp_sack_nr; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; + int sysctl_tcp_fin_timeout; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + int sysctl_tcp_rto_min_us; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_adv_win_scale; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_challenge_ack_limit; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_reflect_tos; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + unsigned int sysctl_tcp_child_ehash_entries; + long unsigned int sysctl_tcp_comp_sack_delay_ns; + long unsigned int sysctl_tcp_comp_sack_slack_ns; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + long unsigned int tfo_active_disable_stamp; + u32 tcp_challenge_timestamp; + u32 tcp_challenge_count; + u8 sysctl_tcp_plb_enabled; + u8 sysctl_tcp_plb_idle_rehash_rounds; + u8 sysctl_tcp_plb_rehash_rounds; + u8 sysctl_tcp_plb_suspend_rto_sec; + int sysctl_tcp_plb_cong_thresh; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_tcp_syn_linear_timeouts; + u8 sysctl_igmp_llm_reports; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + unsigned int sysctl_udp_child_hash_entries; + long unsigned int *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + siphash_key_t ip_id_key; + struct hlist_head *inet_addr_lst; + struct delayed_work addr_chk_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct neighbour; + +struct dst_ops { + short unsigned int family; + unsigned int gc_thresh; + void (*gc)(struct dst_ops *); + struct dst_entry * (*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *); + void (*negative_advice)(struct sock *, struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct percpu_counter pcpuc_entries; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + u32 multipath_hash_fields; + u8 multipath_hash_policy; + u8 bindv6only; + u8 flowlabel_consistency; + u8 auto_flowlabels; + int icmpv6_time; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; + long unsigned int icmpv6_ratemask[8]; + long unsigned int *icmpv6_ratemask_ptr; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; + int idgen_retries; + int idgen_delay; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; + u8 skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; + u8 icmpv6_error_anycast_as_unicast; + long: 32; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct ioam6_pernet_data; + +struct netns_ipv6 { + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + atomic_t ip6_rt_gc_expire; + long unsigned int ip6_rt_last_gc; + unsigned char flowlabel_has_excl; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct hlist_head *inet6_addr_lst; + spinlock_t addrconf_hash_lock; + struct delayed_work addr_chk_work; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; + struct ioam6_pernet_data *ioam6_data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_logger *nf_loggers[11]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries *hooks_ipv4[5]; + struct nf_hook_entries *hooks_ipv6[5]; + struct nf_hook_entries *hooks_bridge[5]; + unsigned int defrag_ipv4_users; + unsigned int defrag_ipv6_users; +}; + +struct nf_ct_event_notifier; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; +}; + +struct ip_conntrack_stat; + +struct netns_ct { + u8 sysctl_log_invalid; + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_tstamp; + u8 sysctl_checksum; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_ip_net nf_ct_proto; +}; + +struct netns_nftables { + u8 gencursor; +}; + +struct netns_bpf { + struct bpf_prog_array *run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + spinlock_t rules_mod_lock; + unsigned int dev_base_seq; + u32 ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct ref_tracker_dir refcnt_tracker; + struct ref_tracker_dir notrefcnt_tracker; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct xarray dev_by_index; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + long: 32; + long: 32; + long: 32; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_nf nf; + struct netns_ct ct; + struct netns_nftables nft; + struct net_generic *gen; + struct netns_bpf bpf; + u64 net_cookie; + struct sock *diag_nlsk; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef struct { + u64 v; +} u64_stats_t; + +typedef unsigned int (*bpf_func_t)(const void *, const struct bpf_insn *); + +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); + int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); + int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_offloaded_map *, void *); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; + long: 32; +}; + +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +enum rx_handler_result { + RX_HANDLER_CONSUMED = 0, + RX_HANDLER_ANOTHER = 1, + RX_HANDLER_EXACT = 2, + RX_HANDLER_PASS = 3, +}; + +typedef enum rx_handler_result rx_handler_result_t; + +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); + +typedef u32 xdp_features_t; + +struct net_device_stats { + union { + long unsigned int rx_packets; + atomic_long_t __rx_packets; + }; + union { + long unsigned int tx_packets; + atomic_long_t __tx_packets; + }; + union { + long unsigned int rx_bytes; + atomic_long_t __rx_bytes; + }; + union { + long unsigned int tx_bytes; + atomic_long_t __tx_bytes; + }; + union { + long unsigned int rx_errors; + atomic_long_t __rx_errors; + }; + union { + long unsigned int tx_errors; + atomic_long_t __tx_errors; + }; + union { + long unsigned int rx_dropped; + atomic_long_t __rx_dropped; + }; + union { + long unsigned int tx_dropped; + atomic_long_t __tx_dropped; + }; + union { + long unsigned int multicast; + atomic_long_t __multicast; + }; + union { + long unsigned int collisions; + atomic_long_t __collisions; + }; + union { + long unsigned int rx_length_errors; + atomic_long_t __rx_length_errors; + }; + union { + long unsigned int rx_over_errors; + atomic_long_t __rx_over_errors; + }; + union { + long unsigned int rx_crc_errors; + atomic_long_t __rx_crc_errors; + }; + union { + long unsigned int rx_frame_errors; + atomic_long_t __rx_frame_errors; + }; + union { + long unsigned int rx_fifo_errors; + atomic_long_t __rx_fifo_errors; + }; + union { + long unsigned int rx_missed_errors; + atomic_long_t __rx_missed_errors; + }; + union { + long unsigned int tx_aborted_errors; + atomic_long_t __tx_aborted_errors; + }; + union { + long unsigned int tx_carrier_errors; + atomic_long_t __tx_carrier_errors; + }; + union { + long unsigned int tx_fifo_errors; + atomic_long_t __tx_fifo_errors; + }; + union { + long unsigned int tx_heartbeat_errors; + atomic_long_t __tx_heartbeat_errors; + }; + union { + long unsigned int tx_window_errors; + atomic_long_t __tx_window_errors; + }; + union { + long unsigned int rx_compressed; + atomic_long_t __rx_compressed; + }; + union { + long unsigned int tx_compressed; + atomic_long_t __tx_compressed; + }; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; + struct rb_root tree; +}; + +enum netdev_ml_priv_type { + ML_PRIV_NONE = 0, + ML_PRIV_CAN = 1, +}; + +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE = 0, + NETDEV_PCPU_STAT_LSTATS = 1, + NETDEV_PCPU_STAT_TSTATS = 2, + NETDEV_PCPU_STAT_DSTATS = 3, +}; + +struct sfp_bus; + +struct udp_tunnel_nic; + +struct bpf_xdp_link; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; + +typedef struct {} netdevice_tracker; + +struct net_device_ops; + +struct header_ops; + +struct netdev_queue; + +struct xps_dev_maps; + +struct bpf_mprog_entry; + +struct pcpu_lstats; + +struct pcpu_sw_netstats; + +struct pcpu_dstats; + +struct inet6_dev; + +struct netdev_rx_queue; + +struct netdev_name_node; + +struct dev_ifalias; + +struct xdp_metadata_ops; + +struct xsk_tx_metadata_ops; + +struct net_device_core_stats; + +struct ethtool_ops; + +struct ndisc_ops; + +struct in_device; + +struct cpu_rmap; + +struct Qdisc; + +struct xdp_dev_bulk_queue; + +struct rtnl_link_ops; + +struct netdev_stat_ops; + +struct netdev_queue_mgmt_ops; + +struct phy_link_topology; + +struct phy_device; + +struct udp_tunnel_nic_info; + +struct ethtool_netdev_state; + +struct rtnl_hw_stats64; + +struct devlink_port; + +struct dim_irq_moder; + +struct napi_config; + +struct net_device { + __u8 __cacheline_group_begin__net_device_read_tx[0]; + union { + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + }; + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + } priv_flags_fast; + }; + const struct net_device_ops *netdev_ops; + const struct header_ops *header_ops; + struct netdev_queue *_tx; + long: 32; + netdev_features_t gso_partial_features; + unsigned int real_num_tx_queues; + unsigned int gso_max_size; + unsigned int gso_ipv4_max_size; + u16 gso_max_segs; + s16 num_tc; + unsigned int mtu; + short unsigned int needed_headroom; + struct netdev_tc_txq tc_to_txq[16]; + struct xps_dev_maps *xps_maps[2]; + struct nf_hook_entries *nf_hooks_egress; + struct bpf_mprog_entry *tcx_egress; + __u8 __cacheline_group_end__net_device_read_tx[0]; + __u8 __cacheline_group_begin__net_device_read_txrx[0]; + union { + struct pcpu_lstats *lstats; + struct pcpu_sw_netstats *tstats; + struct pcpu_dstats *dstats; + }; + long unsigned int state; + unsigned int flags; + short unsigned int hard_header_len; + netdev_features_t features; + struct inet6_dev *ip6_ptr; + __u8 __cacheline_group_end__net_device_read_txrx[0]; + __u8 __cacheline_group_begin__net_device_read_rx[0]; + struct bpf_prog *xdp_prog; + struct list_head ptype_specific; + int ifindex; + unsigned int real_num_rx_queues; + struct netdev_rx_queue *_rx; + unsigned int gro_max_size; + unsigned int gro_ipv4_max_size; + rx_handler_func_t *rx_handler; + void *rx_handler_data; + possible_net_t nd_net; + struct bpf_mprog_entry *tcx_ingress; + __u8 __cacheline_group_end__net_device_read_rx[0]; + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias *ifalias; + long unsigned int mem_end; + long unsigned int mem_start; + long unsigned int base_addr; + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + xdp_features_t xdp_features; + const struct xdp_metadata_ops *xdp_metadata_ops; + const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; + short unsigned int gflags; + short unsigned int needed_tailroom; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + unsigned int min_mtu; + unsigned int max_mtu; + short unsigned int type; + unsigned char min_header_len; + unsigned char name_assign_type; + int group; + struct net_device_stats stats; + struct net_device_core_stats *core_stats; + atomic_t carrier_up_count; + atomic_t carrier_down_count; + const struct ethtool_ops *ethtool_ops; + const struct ndisc_ops *ndisc_ops; + unsigned int operstate; + unsigned char link_mode; + unsigned char if_port; + unsigned char dma; + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + short unsigned int neigh_priv_len; + short unsigned int dev_id; + short unsigned int dev_port; + int irq; + u32 priv_len; + spinlock_t addr_list_lock; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + struct kset *queues_kset; + unsigned int promiscuity; + unsigned int allmulti; + bool uc_promisc; + struct in_device *ip_ptr; + struct hlist_head fib_nh_head; + const unsigned char *dev_addr; + unsigned int num_rx_queues; + unsigned int xdp_zc_max_segs; + struct netdev_queue *ingress_queue; + struct nf_hook_entries *nf_hooks_ingress; + unsigned char broadcast[32]; + struct cpu_rmap *rx_cpu_rmap; + struct hlist_node index_hlist; + unsigned int num_tx_queues; + struct Qdisc *qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + struct xdp_dev_bulk_queue *xdp_bulkq; + struct timer_list watchdog_timer; + int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; + int *pcpu_refcnt; + struct ref_tracker_dir refcnt_tracker; + struct list_head link_watch_list; + u8 reg_state; + bool dismantle; + enum { + RTNL_LINK_INITIALIZED = 0, + RTNL_LINK_INITIALIZING = 1, + } rtnl_link_state: 16; + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *); + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type: 8; + long: 32; + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + const struct rtnl_link_ops *rtnl_link_ops; + const struct netdev_stat_ops *stat_ops; + const struct netdev_queue_mgmt_ops *queue_mgmt_ops; + unsigned int tso_max_size; + u16 tso_max_segs; + u8 prio_tc_map[16]; + struct phy_link_topology *link_topo; + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + bool proto_down; + bool threaded; + long unsigned int see_all_hwtstamp_requests: 1; + long unsigned int change_proto_down: 1; + long unsigned int netns_local: 1; + long unsigned int fcoe_mtu: 1; + struct list_head net_notifier_list; + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + struct ethtool_netdev_state *ethtool; + struct bpf_xdp_entity xdp_state[3]; + u8 dev_addr_shadow[32]; + netdevice_tracker linkwatch_dev_tracker; + netdevice_tracker watchdog_dev_tracker; + netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; + struct devlink_port *devlink_port; + struct hlist_head page_pools; + struct dim_irq_moder *irq_moder; + u64 max_pacing_offload_horizon; + struct napi_config *napi_config; + long unsigned int gro_flush_timeout; + u32 napi_defer_hard_irqs; + struct mutex lock; + struct hlist_head neighbours[2]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u8 priv[0]; +}; + +struct bpf_prog_stats { + u64_stats_t cnt; + u64_stats_t nsecs; + u64_stats_t misses; + struct u64_stats_sync syncp; + long: 32; +}; + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct bpf_storage_buffer; + +struct bpf_cgroup_storage_map; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list_map; + struct list_head list_cg; + struct rb_node node; + struct callback_head rcu; + long: 32; +}; + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + bool is_uprobe; + long: 32; +}; + +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *, const void *); + +typedef unsigned char *sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + long unsigned int dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + struct llist_node ll_node; + }; + struct sock *sk; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + long unsigned int _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + long unsigned int _sk_redir; + }; + long unsigned int _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned: 1; + __u8 nohdr: 1; + __u8 fclone: 2; + __u8 peeked: 1; + __u8 head_frag: 1; + __u8 pfmemalloc: 1; + __u8 pp_recycle: 1; + __u8 active_extensions; + union { + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + }; + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + } headers; + }; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; + long: 32; +}; + +struct dql { + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + short unsigned int stall_thrs; + long unsigned int history_head; + long unsigned int history[4]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + unsigned int limit; + unsigned int num_completed; + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + unsigned int lowest_slack; + long unsigned int slack_start_time; + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; + short unsigned int stall_max; + long unsigned int last_reap; + long unsigned int stall_cnt; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct prot_inuse { + int all; + int val[64]; +}; + +struct ipstats_mib { + u64 mibs[38]; + struct u64_stats_sync syncp; + long: 32; +}; + +struct icmp_mib { + long unsigned int mibs[30]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + long unsigned int mibs[7]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct tcp_mib { + long unsigned int mibs[16]; +}; + +struct udp_mib { + long unsigned int mibs[10]; +}; + +struct linux_mib { + long unsigned int mibs[132]; +}; + +struct inet_frags; + +struct fqdir { + long int high_thresh; + long int low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct rhashtable rhashtable; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_long_t mem; + struct work_struct destroy_work; + struct llist_node free_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + u8 tstamp_type; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*min_tso_segs)(struct sock *); + void (*cong_control)(struct sock *, u32, int, const struct rate_sample *); + u32 (*undo_cwnd)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct netlink_ext_ack; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(const struct net *); + int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); + struct module *owner; + struct callback_head rcu; +}; + +struct lwtunnel_state; + +struct uncached_list; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + long unsigned int _metrics; + long unsigned int expires; + void *__pad1; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + short unsigned int flags; + short int obsolete; + short unsigned int header_len; + short unsigned int trailer_len; + int __use; + long unsigned int lastuse; + struct callback_head callback_head; + short int error; + short int __pad; + __u32 tclassid; + struct lwtunnel_state *lwtstate; + rcuref_t __rcuref; + netdevice_tracker dev_tracker; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; +}; + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + long unsigned int hh_data[24]; +}; + +struct neigh_table; + +struct neigh_parms; + +struct neigh_ops; + +struct neighbour { + struct hlist_node hash; + struct hlist_node dev_list; + struct neigh_table *tbl; + struct neigh_parms *parms; + long unsigned int confirmed; + long unsigned int updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + long unsigned int used; + atomic_t probes; + u8 nud_state; + u8 type; + u8 dead; + u8 protocol; + u32 flags; + seqlock_t ha_lock; + long: 32; + unsigned char ha[32]; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct list_head managed_list; + struct callback_head rcu; + struct net_device *dev; + netdevice_tracker dev_tracker; + u8 primary_key[0]; +}; + +typedef short unsigned int __kernel_sa_family_t; + +typedef __kernel_sa_family_t sa_family_t; + +struct sockaddr { + sa_family_t sa_family; + union { + char sa_data_min[14]; + struct { + struct {} __empty_sa_data; + char sa_data[0]; + }; + }; +}; + +struct ubuf_info; + +struct msghdr { + void *msg_name; + int msg_namelen; + int msg_inq; + long: 32; + struct iov_iter msg_iter; + union { + void *msg_control; + void *msg_control_user; + }; + bool msg_control_is_user: 1; + bool msg_get_inq: 1; + unsigned int msg_flags; + __kernel_size_t msg_controllen; + struct kiocb *msg_iocb; + struct ubuf_info *msg_ubuf; + int (*sg_from_iter)(struct sk_buff *, struct iov_iter *, size_t); + long: 32; +}; + +struct ubuf_info_ops; + +struct ubuf_info { + const struct ubuf_info_ops *ops; + refcount_t refcnt; + u8 flags; +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; + unsigned int chaintoolong; +}; + +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; + long: 32; +}; + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short int cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +struct skb_shared_hwtstamps { + union { + ktime_t hwtstamp; + void *netdev_data; + }; +}; + +struct ubuf_info_ops { + void (*complete)(struct sk_buff *, struct ubuf_info *, bool); + int (*link_skb)(struct sk_buff *, struct ubuf_info *); +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[1]; + u8 chunks; + long: 0; + char data[0]; +}; + +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED = 1, + SS_CONNECTING = 2, + SS_CONNECTED = 3, + SS_DISCONNECTING = 4, +} socket_state; + +struct socket_wq { + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + long unsigned int flags; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct proto_ops; + +struct socket { + socket_state state; + short int type; + long unsigned int flags; + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct socket_wq wq; +}; + +typedef struct { + size_t written; + size_t count; + union { + char *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); + +typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); + +struct proto_accept_arg; + +struct proto_ops { + int family; + struct module *owner; + int (*release)(struct socket *); + int (*bind)(struct socket *, struct sockaddr *, int); + int (*connect)(struct socket *, struct sockaddr *, int, int); + int (*socketpair)(struct socket *, struct socket *); + int (*accept)(struct socket *, struct socket *, struct proto_accept_arg *); + int (*getname)(struct socket *, struct sockaddr *, int); + __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); + int (*ioctl)(struct socket *, unsigned int, long unsigned int); + int (*gettstamp)(struct socket *, void *, bool, bool); + int (*listen)(struct socket *, int); + int (*shutdown)(struct socket *, int); + int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct socket *, int, int, char *, int *); + void (*show_fdinfo)(struct seq_file *, struct socket *); + int (*sendmsg)(struct socket *, struct msghdr *, size_t); + int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); + int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); + ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct socket *); + int (*set_peek_off)(struct sock *, int); + int (*peek_len)(struct socket *); + int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); + int (*read_skb)(struct sock *, skb_read_actor_t); + int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); + int (*set_rcvlowat)(struct sock *, int); +}; + +struct proto_accept_arg { + int flags; + int err; + int is_empty; + bool kern; +}; + +enum lockdown_reason { + LOCKDOWN_NONE = 0, + LOCKDOWN_MODULE_SIGNATURE = 1, + LOCKDOWN_DEV_MEM = 2, + LOCKDOWN_EFI_TEST = 3, + LOCKDOWN_KEXEC = 4, + LOCKDOWN_HIBERNATION = 5, + LOCKDOWN_PCI_ACCESS = 6, + LOCKDOWN_IOPORT = 7, + LOCKDOWN_MSR = 8, + LOCKDOWN_ACPI_TABLES = 9, + LOCKDOWN_DEVICE_TREE = 10, + LOCKDOWN_PCMCIA_CIS = 11, + LOCKDOWN_TIOCSSERIAL = 12, + LOCKDOWN_MODULE_PARAMETERS = 13, + LOCKDOWN_MMIOTRACE = 14, + LOCKDOWN_DEBUGFS = 15, + LOCKDOWN_XMON_WR = 16, + LOCKDOWN_BPF_WRITE_USER = 17, + LOCKDOWN_DBG_WRITE_KERNEL = 18, + LOCKDOWN_RTAS_ERROR_INJECTION = 19, + LOCKDOWN_INTEGRITY_MAX = 20, + LOCKDOWN_KCORE = 21, + LOCKDOWN_KPROBES = 22, + LOCKDOWN_BPF_READ_KERNEL = 23, + LOCKDOWN_DBG_READ_KERNEL = 24, + LOCKDOWN_PERF = 25, + LOCKDOWN_TRACEFS = 26, + LOCKDOWN_XMON_RW = 27, + LOCKDOWN_XFRM_SECRET = 28, + LOCKDOWN_CONFIDENTIALITY_MAX = 29, +}; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; + unsigned int slot_map; +} te1_settings; + +typedef struct { + short unsigned int encoding; + short unsigned int parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + short unsigned int lmi; + short unsigned int dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +} fr_proto_pvc_info; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + short unsigned int dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; + +struct ifmap { + long unsigned int mem_start; + long unsigned int mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct if_settings { + unsigned int type; + unsigned int size; + union { + raw_hdlc_proto *raw_hdlc; + cisco_proto *cisco; + fr_proto *fr; + fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; + x25_hdlc_proto *x25; + sync_serial_settings *sync; + te1_settings *te1; + } ifs_ifsu; +}; + +struct ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void *ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +struct nla_policy; + +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + const struct nla_policy *policy; + const struct nlattr *miss_nest; + u16 miss_type; + u8 cookie[20]; + u8 cookie_len; + char _msg_buf[80]; +}; + +struct netlink_range_validation; + +struct netlink_range_validation_signed; + +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + u16 strict_start_type; + const u32 bitfield32_valid; + const u32 mask; + const char *reject_message; + const struct nla_policy *nested_policy; + const struct netlink_range_validation *range; + const struct netlink_range_validation_signed *range_signed; + struct { + s16 min; + s16 max; + }; + int (*validate)(const struct nlattr *, struct netlink_ext_ack *); + }; +}; + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq; + unsigned int seq; + int flags; + bool strict_check; + union { + u8 ctx[48]; + long int args[6]; + }; +}; + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + __u64 rx_compressed; + __u64 tx_compressed; + __u64 rx_nohandler; + __u64 rx_otherhost_dropped; +}; + +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + +struct ifla_vf_guid { + __u32 vf; + long: 32; + __u64 guid; +}; + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; + +enum netdev_tx { + __NETDEV_TX_MIN = -2147483648, + NETDEV_TX_OK = 0, + NETDEV_TX_BUSY = 16, +}; + +typedef enum netdev_tx netdev_tx_t; + +struct net_device_core_stats { + long unsigned int rx_dropped; + long unsigned int tx_dropped; + long unsigned int rx_nohandler; + long unsigned int rx_otherhost_dropped; +}; + +struct header_ops { + int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); + int (*parse)(const struct sk_buff *, unsigned char *); + int (*cache)(const struct neighbour *, struct hh_cache *, __be16); + void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); + bool (*validate)(const char *, unsigned int); + __be16 (*parse_protocol)(const struct sk_buff *); +}; + +struct gro_list { + struct list_head list; + int count; +}; + +struct napi_config { + u64 gro_flush_timeout; + u64 irq_suspend_timeout; + u32 defer_hard_irqs; + unsigned int napi_id; +}; + +struct napi_struct { + struct list_head poll_list; + long unsigned int state; + int weight; + u32 defer_hard_irqs_count; + long unsigned int gro_bitmask; + int (*poll)(struct napi_struct *, int); + int list_owner; + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + unsigned int napi_id; + struct hrtimer timer; + struct task_struct *thread; + long unsigned int gro_flush_timeout; + long unsigned int irq_suspend_timeout; + u32 defer_hard_irqs; + struct list_head dev_list; + struct hlist_node napi_hash_node; + int irq; + int index; + struct napi_config *config; + long: 32; +}; + +struct netdev_queue { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; + struct kobject kobj; + long unsigned int tx_maxrate; + atomic_long_t trans_timeout; + struct net_device *sb_dev; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct dql dql; + spinlock_t _xmit_lock; + int xmit_lock_owner; + long unsigned int trans_start; + long unsigned int state; + struct napi_struct *napi; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct gnet_stats_basic_sync { + u64_stats_t bytes; + u64_stats_t packets; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +struct Qdisc_ops; + +struct qdisc_size_table; + +struct net_rate_estimator; + +struct Qdisc { + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + unsigned int flags; + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table *stab; + struct hlist_node hash; + u32 handle; + u32 parent; + struct netdev_queue *dev_queue; + struct net_rate_estimator *rate_est; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + int pad; + refcount_t refcnt; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sk_buff_head gso_skb; + struct qdisc_skb_head q; + struct gnet_stats_basic_sync bstats; + struct gnet_stats_queue qstats; + int owner; + long unsigned int state; + long unsigned int state2; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + long: 32; + long: 32; + long: 32; + spinlock_t busylock; + spinlock_t seqlock; + struct callback_head rcu; + netdevice_tracker dev_tracker; + struct lock_class_key root_lock_key; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long int privdata[0]; +}; + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[0]; +}; + +struct xps_dev_maps { + struct callback_head rcu; + unsigned int nr_ids; + s16 num_tc; + struct xps_map *attr_map[0]; +}; + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN = 1, + DEV_PATH_BRIDGE = 2, + DEV_PATH_PPPOE = 3, + DEV_PATH_DSA = 4, + DEV_PATH_MTK_WDMA = 5, +}; + +struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; + union { + struct { + u16 id; + __be16 proto; + u8 h_dest[6]; + } encap; + struct { + enum { + DEV_PATH_BR_VLAN_KEEP = 0, + DEV_PATH_BR_VLAN_TAG = 1, + DEV_PATH_BR_VLAN_UNTAG = 2, + DEV_PATH_BR_VLAN_UNTAG_HW = 3, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; + } bridge; + struct { + int port; + u16 proto; + } dsa; + struct { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; + u8 amsdu; + } mtk_wdma; + }; +}; + +struct net_device_path_ctx { + const struct net_device *dev; + u8 daddr[6]; + int num_vlans; + struct { + u16 id; + __be16 proto; + } vlan[2]; +}; + +enum tc_setup_type { + TC_QUERY_CAPS = 0, + TC_SETUP_QDISC_MQPRIO = 1, + TC_SETUP_CLSU32 = 2, + TC_SETUP_CLSFLOWER = 3, + TC_SETUP_CLSMATCHALL = 4, + TC_SETUP_CLSBPF = 5, + TC_SETUP_BLOCK = 6, + TC_SETUP_QDISC_CBS = 7, + TC_SETUP_QDISC_RED = 8, + TC_SETUP_QDISC_PRIO = 9, + TC_SETUP_QDISC_MQ = 10, + TC_SETUP_QDISC_ETF = 11, + TC_SETUP_ROOT_QDISC = 12, + TC_SETUP_QDISC_GRED = 13, + TC_SETUP_QDISC_TAPRIO = 14, + TC_SETUP_FT = 15, + TC_SETUP_QDISC_ETS = 16, + TC_SETUP_QDISC_TBF = 17, + TC_SETUP_QDISC_FIFO = 18, + TC_SETUP_QDISC_HTB = 19, + TC_SETUP_ACT = 20, +}; + +enum bpf_netdev_command { + XDP_SETUP_PROG = 0, + XDP_SETUP_PROG_HW = 1, + BPF_OFFLOAD_MAP_ALLOC = 2, + BPF_OFFLOAD_MAP_FREE = 3, + XDP_SETUP_XSK_POOL = 4, +}; + +struct xsk_buff_pool; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + struct { + struct bpf_offloaded_map *offmap; + }; + struct { + struct xsk_buff_pool *pool; + u16 queue_id; + } xsk; + }; +}; + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[0]; +}; + +struct xdp_frame; + +struct xdp_buff; + +struct ip_tunnel_parm_kern; + +struct kernel_hwtstamp_config; + +struct net_device_ops { + int (*ndo_init)(struct net_device *); + void (*ndo_uninit)(struct net_device *); + int (*ndo_open)(struct net_device *); + int (*ndo_stop)(struct net_device *); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); + netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); + u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); + void (*ndo_change_rx_flags)(struct net_device *, int); + void (*ndo_set_rx_mode)(struct net_device *); + int (*ndo_set_mac_address)(struct net_device *, void *); + int (*ndo_validate_addr)(struct net_device *); + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_eth_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_siocbond)(struct net_device *, struct ifreq *, int); + int (*ndo_siocwandev)(struct net_device *, struct if_settings *); + int (*ndo_siocdevprivate)(struct net_device *, struct ifreq *, void *, int); + int (*ndo_set_config)(struct net_device *, struct ifmap *); + int (*ndo_change_mtu)(struct net_device *, int); + int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); + void (*ndo_tx_timeout)(struct net_device *, unsigned int); + void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); + bool (*ndo_has_offload_stats)(const struct net_device *, int); + int (*ndo_get_offload_stats)(int, const struct net_device *, void *); + struct net_device_stats * (*ndo_get_stats)(struct net_device *); + int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); + int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); + int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); + int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); + int (*ndo_set_vf_rate)(struct net_device *, int, int, int); + int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); + int (*ndo_set_vf_trust)(struct net_device *, int, bool); + int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); + int (*ndo_set_vf_link_state)(struct net_device *, int, int); + int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); + int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); + int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); + int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); + int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); + int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); + int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); + int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_del_slave)(struct net_device *, struct net_device *); + struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); + struct net_device * (*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); + netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); + int (*ndo_set_features)(struct net_device *, netdev_features_t); + int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); + void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); + int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del_bulk)(struct nlmsghdr *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); + int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); + int (*ndo_mdb_add)(struct net_device *, struct nlattr **, u16, struct netlink_ext_ack *); + int (*ndo_mdb_del)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_del_bulk)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_dump)(struct net_device *, struct sk_buff *, struct netlink_callback *); + int (*ndo_mdb_get)(struct net_device *, struct nlattr **, u32, u32, struct netlink_ext_ack *); + int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); + int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); + int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); + int (*ndo_change_carrier)(struct net_device *, bool); + int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); + void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); + void (*ndo_dfwd_del_station)(struct net_device *, void *); + int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); + int (*ndo_get_iflink)(const struct net_device *); + int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); + void (*ndo_set_rx_headroom)(struct net_device *, int); + int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); + int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *, struct xdp_buff *); + int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); + int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm_kern *, int); + struct net_device * (*ndo_get_peer_dev)(struct net_device *); + int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); + ktime_t (*ndo_get_tstamp)(struct net_device *, const struct skb_shared_hwtstamps *, bool); + int (*ndo_hwtstamp_get)(struct net_device *, struct kernel_hwtstamp_config *); + int (*ndo_hwtstamp_set)(struct net_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + void *sysctl_table; + int dead; + refcount_t refcnt; + struct callback_head callback_head; + int reachable_time; + u32 qlen; + int data[14]; + long unsigned int data_state[1]; +}; + +enum hwtstamp_source { + HWTSTAMP_SOURCE_UNSPEC = 0, + HWTSTAMP_SOURCE_NETDEV = 1, + HWTSTAMP_SOURCE_PHYLIB = 2, +}; + +struct kernel_hwtstamp_config { + int flags; + int tx_type; + int rx_filter; + struct ifreq *ifr; + bool copied_to_user; + enum hwtstamp_source source; +}; + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +struct pcpu_sw_netstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct pcpu_dstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_drops; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_drops; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +enum xdp_rss_hash_type { + XDP_RSS_L3_IPV4 = 1, + XDP_RSS_L3_IPV6 = 2, + XDP_RSS_L3_DYNHDR = 4, + XDP_RSS_L4 = 8, + XDP_RSS_L4_TCP = 16, + XDP_RSS_L4_UDP = 32, + XDP_RSS_L4_SCTP = 64, + XDP_RSS_L4_IPSEC = 128, + XDP_RSS_L4_ICMP = 256, + XDP_RSS_TYPE_NONE = 0, + XDP_RSS_TYPE_L2 = 0, + XDP_RSS_TYPE_L3_IPV4 = 1, + XDP_RSS_TYPE_L3_IPV6 = 2, + XDP_RSS_TYPE_L3_IPV4_OPT = 5, + XDP_RSS_TYPE_L3_IPV6_EX = 6, + XDP_RSS_TYPE_L4_ANY = 8, + XDP_RSS_TYPE_L4_IPV4_TCP = 25, + XDP_RSS_TYPE_L4_IPV4_UDP = 41, + XDP_RSS_TYPE_L4_IPV4_SCTP = 73, + XDP_RSS_TYPE_L4_IPV4_IPSEC = 137, + XDP_RSS_TYPE_L4_IPV4_ICMP = 265, + XDP_RSS_TYPE_L4_IPV6_TCP = 26, + XDP_RSS_TYPE_L4_IPV6_UDP = 42, + XDP_RSS_TYPE_L4_IPV6_SCTP = 74, + XDP_RSS_TYPE_L4_IPV6_IPSEC = 138, + XDP_RSS_TYPE_L4_IPV6_ICMP = 266, + XDP_RSS_TYPE_L4_IPV6_TCP_EX = 30, + XDP_RSS_TYPE_L4_IPV6_UDP_EX = 46, + XDP_RSS_TYPE_L4_IPV6_SCTP_EX = 78, +}; + +struct xdp_md; + +struct xdp_metadata_ops { + int (*xmo_rx_timestamp)(const struct xdp_md *, u64 *); + int (*xmo_rx_hash)(const struct xdp_md *, u32 *, enum xdp_rss_hash_type *); + int (*xmo_rx_vlan_tag)(const struct xdp_md *, __be16 *, u16 *); +}; + +struct xsk_tx_metadata_ops { + void (*tmo_request_timestamp)(void *); + u64 (*tmo_fill_timestamp)(void *); + void (*tmo_request_checksum)(u16, u16, void *); +}; + +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE = 0, + ETHTOOL_ID_ACTIVE = 1, + ETHTOOL_ID_ON = 2, + ETHTOOL_ID_OFF = 3, +}; + +struct ethtool_drvinfo; + +struct ethtool_regs; + +struct ethtool_wolinfo; + +struct ethtool_link_ext_state_info; + +struct ethtool_link_ext_stats; + +struct ethtool_eeprom; + +struct ethtool_coalesce; + +struct kernel_ethtool_coalesce; + +struct ethtool_ringparam; + +struct kernel_ethtool_ringparam; + +struct ethtool_pause_stats; + +struct ethtool_pauseparam; + +struct ethtool_test; + +struct ethtool_stats; + +struct ethtool_rxnfc; + +struct ethtool_flash; + +struct ethtool_rxfh_param; + +struct ethtool_rxfh_context; + +struct ethtool_channels; + +struct ethtool_dump; + +struct kernel_ethtool_ts_info; + +struct ethtool_ts_stats; + +struct ethtool_modinfo; + +struct ethtool_keee; + +struct ethtool_tunable; + +struct ethtool_link_ksettings; + +struct ethtool_fec_stats; + +struct ethtool_fecparam; + +struct ethtool_module_eeprom; + +struct ethtool_eth_phy_stats; + +struct ethtool_eth_mac_stats; + +struct ethtool_eth_ctrl_stats; + +struct ethtool_rmon_stats; + +struct ethtool_rmon_hist_range; + +struct ethtool_module_power_mode_params; + +struct ethtool_mm_state; + +struct ethtool_mm_cfg; + +struct ethtool_mm_stats; + +struct ethtool_ops { + u32 cap_link_lanes_supported: 1; + u32 cap_rss_ctx_supported: 1; + u32 cap_rss_sym_xor_supported: 1; + u32 rxfh_per_ctx_key: 1; + u32 cap_rss_rxnfc_adds: 1; + u32 rxfh_indir_space; + u16 rxfh_key_space; + u16 rxfh_priv_size; + u32 rxfh_max_num_contexts; + u32 supported_coalesce_params; + u32 supported_ring_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); + void (*get_link_ext_stats)(struct net_device *, struct ethtool_link_ext_stats *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); + void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); + int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*create_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*modify_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*remove_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, u32, struct netlink_ext_ack *); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *); + void (*get_ts_stats)(struct net_device *, struct ethtool_ts_stats *); + int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_keee *); + int (*set_eee)(struct net_device *, struct ethtool_keee *); + int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); + int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + int (*set_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); + void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); + void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); + void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, const struct ethtool_rmon_hist_range **); + int (*get_module_power_mode)(struct net_device *, struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*set_module_power_mode)(struct net_device *, const struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*get_mm)(struct net_device *, struct ethtool_mm_state *); + int (*set_mm)(struct net_device *, struct ethtool_mm_cfg *, struct netlink_ext_ack *); + void (*get_mm_stats)(struct net_device *, struct ethtool_mm_stats *); +}; + +struct nd_opt_hdr; + +struct ndisc_options; + +struct prefix_info; + +struct ndisc_ops { + int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); + void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); + int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); + void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); + void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); +}; + +struct rtnl_link_ops { + struct list_head list; + struct srcu_struct srcu; + const char *kind; + size_t priv_size; + struct net_device * (*alloc)(struct nlattr **, const char *, unsigned char, unsigned int, unsigned int); + void (*setup)(struct net_device *); + bool netns_refund; + const u16 peer_type; + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + void (*dellink)(struct net_device *, struct list_head *); + size_t (*get_size)(const struct net_device *); + int (*fill_info)(struct sk_buff *, const struct net_device *); + size_t (*get_xstats_size)(const struct net_device *); + int (*fill_xstats)(struct sk_buff *, const struct net_device *); + unsigned int (*get_num_tx_queues)(); + unsigned int (*get_num_rx_queues)(); + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + size_t (*get_slave_size)(const struct net_device *, const struct net_device *); + int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); + struct net * (*get_link_net)(const struct net_device *); + size_t (*get_linkxstats_size)(const struct net_device *, int); + int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); +}; + +struct netdev_queue_stats_rx; + +struct netdev_queue_stats_tx; + +struct netdev_stat_ops { + void (*get_queue_stats_rx)(struct net_device *, int, struct netdev_queue_stats_rx *); + void (*get_queue_stats_tx)(struct net_device *, int, struct netdev_queue_stats_tx *); + void (*get_base_stats)(struct net_device *, struct netdev_queue_stats_rx *, struct netdev_queue_stats_tx *); +}; + +struct netdev_queue_mgmt_ops { + size_t ndo_queue_mem_size; + int (*ndo_queue_mem_alloc)(struct net_device *, void *, int); + void (*ndo_queue_mem_free)(struct net_device *, void *); + int (*ndo_queue_start)(struct net_device *, void *, int); + int (*ndo_queue_stop)(struct net_device *, void *, int); +}; + +struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; +}; + +struct udp_tunnel_info; + +struct udp_tunnel_nic_shared; + +struct udp_tunnel_nic_info { + int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*sync_table)(struct net_device *, unsigned int); + struct udp_tunnel_nic_shared *shared; + unsigned int flags; + struct udp_tunnel_nic_table_info tables[4]; +}; + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + short unsigned int tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +struct netlink_range_validation { + u64 min; + u64 max; +}; + +struct netlink_range_validation_signed { + s64 min; + s64 max; +}; + +struct pneigh_entry; + +struct neigh_statistics; + +struct neigh_hash_table; + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *, const struct net_device *, __u32 *); + bool (*key_eq)(const struct neighbour *, const void *); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *); + int (*is_multicast)(const void *); + bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + long unsigned int last_flush; + struct delayed_work gc_work; + struct delayed_work managed_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + struct list_head managed_list; + rwlock_t lock; + long unsigned int last_rand; + struct neigh_statistics *stats; + struct neigh_hash_table *nht; + struct pneigh_entry **phash_buckets; +}; + +struct neigh_statistics { + long unsigned int allocs; + long unsigned int destroys; + long unsigned int hash_grows; + long unsigned int res_failed; + long unsigned int lookups; + long unsigned int hits; + long unsigned int rcv_probes_mcast; + long unsigned int rcv_probes_ucast; + long unsigned int periodic_gc_runs; + long unsigned int forced_gc_runs; + long unsigned int unres_discards; + long unsigned int table_fulls; +}; + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u8 protocol; + u32 key[0]; +}; + +struct neigh_hash_table { + struct hlist_head *hash_heads; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + +struct smc_hashinfo; + +struct sk_psock; + +struct request_sock_ops; + +struct timewait_sock_ops; + +struct raw_hashinfo; + +struct proto { + void (*close)(struct sock *, long int); + int (*pre_connect)(struct sock *, struct sockaddr *, int); + int (*connect)(struct sock *, struct sockaddr *, int); + int (*disconnect)(struct sock *, int); + struct sock * (*accept)(struct sock *, struct proto_accept_arg *); + int (*ioctl)(struct sock *, int, int *); + int (*init)(struct sock *); + void (*destroy)(struct sock *); + void (*shutdown)(struct sock *, int); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*keepalive)(struct sock *, int); + int (*sendmsg)(struct sock *, struct msghdr *, size_t); + int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int *); + void (*splice_eof)(struct socket *); + int (*bind)(struct sock *, struct sockaddr *, int); + int (*bind_add)(struct sock *, struct sockaddr *, int); + int (*backlog_rcv)(struct sock *, struct sk_buff *); + bool (*bpf_bypass_getsockopt)(int, int); + void (*release_cb)(struct sock *); + int (*hash)(struct sock *); + void (*unhash)(struct sock *); + void (*rehash)(struct sock *); + int (*get_port)(struct sock *, short unsigned int); + void (*put_port)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + unsigned int inuse_idx; + bool (*stream_memory_free)(const struct sock *, int); + bool (*sock_is_readable)(struct sock *); + void (*enter_memory_pressure)(struct sock *); + void (*leave_memory_pressure)(struct sock *); + atomic_long_t *memory_allocated; + int *per_cpu_fw_alloc; + struct percpu_counter *sockets_allocated; + long unsigned int *memory_pressure; + long int *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + int max_header; + bool no_autobind; + struct kmem_cache *slab; + unsigned int obj_size; + unsigned int ipv6_pinfo_offset; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + unsigned int *orphan_count; + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + struct module *owner; + char name[32]; + struct list_head node; + int (*diag_destroy)(struct sock *, int); +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[0]; +}; + +struct gnet_dump { + spinlock_t *lock; + struct sk_buff *skb; + struct nlattr *tail; + int compat_tc_stats; + int compat_xstats; + int padattr; + void *xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0, + FLOW_ACTION_HW_STATS_DELAYED_BIT = 1, + FLOW_ACTION_HW_STATS_DISABLED_BIT = 2, + FLOW_ACTION_HW_STATS_NUM_BITS = 3, +}; + +struct flow_block { + struct list_head cb_list; +}; + +typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[0]; +}; + +struct Qdisc_class_ops; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); + int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*attach)(struct Qdisc *); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + void (*change_real_num_tx)(struct Qdisc *, unsigned int); + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *, u32); + void (*egress_block_set)(struct Qdisc *, u32); + u32 (*ingress_block_get)(struct Qdisc *); + u32 (*egress_block_get)(struct Qdisc *); + struct module *owner; +}; + +struct qdisc_walker; + +struct tcf_block; + +struct Qdisc_class_ops { + unsigned int flags; + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); + struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); + void (*qlen_notify)(struct Qdisc *, long unsigned int); + long unsigned int (*find)(struct Qdisc *, u32); + int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + void (*walk)(struct Qdisc *, struct qdisc_walker *); + struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); + void (*unbind_tcf)(struct Qdisc *, long unsigned int); + int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); + int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); +}; + +struct tcf_chain; + +struct tcf_block { + struct xarray ports; + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + bool bypass_wanted; + atomic_t filtercnt; + atomic_t skipswcnt; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[128]; + struct mutex proto_destroy_lock; +}; + +struct tcf_result; + +struct tcf_proto_ops; + +struct tcf_proto { + struct tcf_proto *next; + void *root; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + __be16 protocol; + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + spinlock_t lock; + bool deleting; + bool counted; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +struct tcf_result { + union { + struct { + long unsigned int class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + }; +}; + +struct tcf_walker; + +struct tcf_exts; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + int (*init)(struct tcf_proto *); + void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); + void * (*get)(struct tcf_proto *, u32); + void (*put)(struct tcf_proto *, void *); + int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, u32, struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *); + void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); + int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); + void (*hw_add)(struct tcf_proto *, void *); + void (*hw_del)(struct tcf_proto *, void *); + void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); + void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); + void (*tmplt_destroy)(void *); + void (*tmplt_reoffload)(struct tcf_chain *, bool, flow_setup_cb_t *, void *); + struct tcf_exts * (*get_exts)(const struct tcf_proto *, u32); + int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*tmplt_dump)(struct sk_buff *, struct net *, void *); + struct module *owner; + int flags; +}; + +struct tcf_chain { + struct mutex filter_chain_lock; + struct tcf_proto *filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +typedef unsigned int (*bpf_dispatcher_fn)(const void *, const struct bpf_insn *, unsigned int (*)(const void *, const struct bpf_insn *)); + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 20, +}; + +struct dyn_event; + +struct dyn_event_operations { + struct list_head list; + int (*create)(const char *); + int (*show)(struct seq_file *, struct dyn_event *); + bool (*is_busy)(struct dyn_event *); + int (*free)(struct dyn_event *); + bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); +}; + +struct dyn_event { + struct list_head list; + struct dyn_event_operations *ops; +}; + +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); + +enum fetch_op { + FETCH_OP_NOP = 0, + FETCH_OP_REG = 1, + FETCH_OP_STACK = 2, + FETCH_OP_STACKP = 3, + FETCH_OP_RETVAL = 4, + FETCH_OP_IMM = 5, + FETCH_OP_COMM = 6, + FETCH_OP_ARG = 7, + FETCH_OP_FOFFS = 8, + FETCH_OP_DATA = 9, + FETCH_OP_EDATA = 10, + FETCH_OP_DEREF = 11, + FETCH_OP_UDEREF = 12, + FETCH_OP_ST_RAW = 13, + FETCH_OP_ST_MEM = 14, + FETCH_OP_ST_UMEM = 15, + FETCH_OP_ST_STRING = 16, + FETCH_OP_ST_USTRING = 17, + FETCH_OP_ST_SYMSTR = 18, + FETCH_OP_ST_EDATA = 19, + FETCH_OP_MOD_BF = 20, + FETCH_OP_LP_ARRAY = 21, + FETCH_OP_TP_ARG = 22, + FETCH_OP_END = 23, + FETCH_NOP_SYMBOL = 24, +}; + +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + long unsigned int immediate; + void *data; + }; +}; + +struct fetch_type { + const char *name; + size_t size; + bool is_signed; + bool is_string; + print_type_func_t print; + const char *fmt; + const char *fmttype; +}; + +struct probe_arg { + struct fetch_insn *code; + bool dynamic; + unsigned int offset; + unsigned int count; + const char *name; + const char *comm; + char *fmt; + const struct fetch_type *type; +}; + +struct probe_entry_arg { + struct fetch_insn *code; + unsigned int size; +}; + +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + +struct trace_probe_event { + unsigned int flags; + struct trace_event_class class; + struct trace_event_call call; + struct list_head files; + struct list_head probes; + struct trace_uprobe_filter filter[0]; +}; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; + ssize_t size; + unsigned int nr_args; + struct probe_entry_arg *entry_arg; + struct probe_arg args[0]; +}; + +struct event_file_link { + struct trace_event_file *file; + struct list_head list; +}; + +struct traceprobe_parse_context { + struct trace_event_call *event; + const char *funcname; + const struct btf_type *proto; + const struct btf_param *params; + s32 nr_params; + struct btf *btf; + const struct btf_type *last_type; + u32 last_bitoffs; + u32 last_bitsize; + struct trace_probe *tp; + unsigned int flags; + int offset; +}; + +enum probe_print_type { + PROBE_PRINT_NORMAL = 0, + PROBE_PRINT_RETURN = 1, + PROBE_PRINT_EVENT = 2, +}; + +enum { + TP_ERR_FILE_NOT_FOUND = 0, + TP_ERR_NO_REGULAR_FILE = 1, + TP_ERR_BAD_REFCNT = 2, + TP_ERR_REFCNT_OPEN_BRACE = 3, + TP_ERR_BAD_REFCNT_SUFFIX = 4, + TP_ERR_BAD_UPROBE_OFFS = 5, + TP_ERR_BAD_MAXACT_TYPE = 6, + TP_ERR_BAD_MAXACT = 7, + TP_ERR_MAXACT_TOO_BIG = 8, + TP_ERR_BAD_PROBE_ADDR = 9, + TP_ERR_NON_UNIQ_SYMBOL = 10, + TP_ERR_BAD_RETPROBE = 11, + TP_ERR_NO_TRACEPOINT = 12, + TP_ERR_BAD_ADDR_SUFFIX = 13, + TP_ERR_NO_GROUP_NAME = 14, + TP_ERR_GROUP_TOO_LONG = 15, + TP_ERR_BAD_GROUP_NAME = 16, + TP_ERR_NO_EVENT_NAME = 17, + TP_ERR_EVENT_TOO_LONG = 18, + TP_ERR_BAD_EVENT_NAME = 19, + TP_ERR_EVENT_EXIST = 20, + TP_ERR_RETVAL_ON_PROBE = 21, + TP_ERR_NO_RETVAL = 22, + TP_ERR_BAD_STACK_NUM = 23, + TP_ERR_BAD_ARG_NUM = 24, + TP_ERR_BAD_VAR = 25, + TP_ERR_BAD_REG_NAME = 26, + TP_ERR_BAD_MEM_ADDR = 27, + TP_ERR_BAD_IMM = 28, + TP_ERR_IMMSTR_NO_CLOSE = 29, + TP_ERR_FILE_ON_KPROBE = 30, + TP_ERR_BAD_FILE_OFFS = 31, + TP_ERR_SYM_ON_UPROBE = 32, + TP_ERR_TOO_MANY_OPS = 33, + TP_ERR_DEREF_NEED_BRACE = 34, + TP_ERR_BAD_DEREF_OFFS = 35, + TP_ERR_DEREF_OPEN_BRACE = 36, + TP_ERR_COMM_CANT_DEREF = 37, + TP_ERR_BAD_FETCH_ARG = 38, + TP_ERR_ARRAY_NO_CLOSE = 39, + TP_ERR_BAD_ARRAY_SUFFIX = 40, + TP_ERR_BAD_ARRAY_NUM = 41, + TP_ERR_ARRAY_TOO_BIG = 42, + TP_ERR_BAD_TYPE = 43, + TP_ERR_BAD_STRING = 44, + TP_ERR_BAD_SYMSTRING = 45, + TP_ERR_BAD_BITFIELD = 46, + TP_ERR_ARG_NAME_TOO_LONG = 47, + TP_ERR_NO_ARG_NAME = 48, + TP_ERR_BAD_ARG_NAME = 49, + TP_ERR_USED_ARG_NAME = 50, + TP_ERR_ARG_TOO_LONG = 51, + TP_ERR_NO_ARG_BODY = 52, + TP_ERR_BAD_INSN_BNDRY = 53, + TP_ERR_FAIL_REG_PROBE = 54, + TP_ERR_DIFF_PROBE_TYPE = 55, + TP_ERR_DIFF_ARG_TYPE = 56, + TP_ERR_SAME_PROBE = 57, + TP_ERR_NO_EVENT_INFO = 58, + TP_ERR_BAD_ATTACH_EVENT = 59, + TP_ERR_BAD_ATTACH_ARG = 60, + TP_ERR_NO_EP_FILTER = 61, + TP_ERR_NOSUP_BTFARG = 62, + TP_ERR_NO_BTFARG = 63, + TP_ERR_NO_BTF_ENTRY = 64, + TP_ERR_BAD_VAR_ARGS = 65, + TP_ERR_NOFENTRY_ARGS = 66, + TP_ERR_DOUBLE_ARGS = 67, + TP_ERR_ARGS_2LONG = 68, + TP_ERR_ARGIDX_2BIG = 69, + TP_ERR_NO_PTR_STRCT = 70, + TP_ERR_NOSUP_DAT_ARG = 71, + TP_ERR_BAD_HYPHEN = 72, + TP_ERR_NO_BTF_FIELD = 73, + TP_ERR_BAD_BTF_TID = 74, + TP_ERR_BAD_TYPE4STR = 75, + TP_ERR_NEED_STRING_TYPE = 76, +}; + +struct trace_uprobe; + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + long unsigned int bp_addr; +}; + +struct trace_uprobe { + struct dyn_event devent; + long: 32; + struct uprobe_consumer consumer; + struct path path; + char *filename; + struct uprobe *uprobe; + long unsigned int offset; + long unsigned int ref_ctr_offset; + long unsigned int *nhits; + struct trace_probe tp; + long: 32; +}; + +struct uprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int vaddr[0]; +}; + +struct uprobe_cpu_buffer { + struct mutex mutex; + void *buf; + int dsize; +}; + +typedef bool (*filter_func_t)(struct uprobe_consumer *, struct mm_struct *); + +typedef void (*swap_func_t)(void *, void *, int); + +typedef int (*cmp_func_t)(const void *, const void *); + +struct static_key_mod { + struct static_key_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_FREEING_INITMEM = 2, + SYSTEM_RUNNING = 3, + SYSTEM_HALT = 4, + SYSTEM_POWER_OFF = 5, + SYSTEM_RESTART = 6, + SYSTEM_SUSPEND = 7, +}; + +typedef struct mutex *class_mutex_t; + +enum work_bits { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_INACTIVE_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + WORK_STRUCT_FLAG_BITS = 4, + WORK_STRUCT_COLOR_SHIFT = 4, + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PWQ_SHIFT = 8, + WORK_OFFQ_FLAG_SHIFT = 4, + WORK_OFFQ_BH_BIT = 4, + WORK_OFFQ_FLAG_END = 5, + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_DISABLE_SHIFT = 5, + WORK_OFFQ_DISABLE_BITS = 16, + WORK_OFFQ_POOL_SHIFT = 21, + WORK_OFFQ_LEFT = 11, + WORK_OFFQ_POOL_BITS = 11, +}; + +enum wq_misc_consts { + WORK_NR_COLORS = 16, + WORK_CPU_UNBOUND = 32, + WORK_BUSY_PENDING = 1, + WORK_BUSY_RUNNING = 2, + WORKER_DESC_LEN = 32, +}; + +enum mod_mem_type { + MOD_TEXT = 0, + MOD_DATA = 1, + MOD_RODATA = 2, + MOD_RO_AFTER_INIT = 3, + MOD_INIT_TEXT = 4, + MOD_INIT_DATA = 5, + MOD_INIT_RODATA = 6, + MOD_MEM_NUM_TYPES = 7, + MOD_INVALID = -1, +}; + +struct static_key_deferred { + struct static_key key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct compact_control; + +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +struct anon_vma { + struct anon_vma *root; + struct rw_semaphore rwsem; + atomic_t refcount; + long unsigned int num_children; + long unsigned int num_active_vmas; + struct anon_vma *parent; + struct rb_root_cached rb_root; +}; + +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node *parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void *slots[64]; + union { + long unsigned int tags[6]; + long unsigned int marks[6]; + }; +}; + +typedef void (*xa_update_node_t)(struct xa_node *); + +struct xa_state { + struct xarray *xa; + long unsigned int xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; + struct list_lru *xa_lru; +}; + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + AS_NO_WRITEBACK_TAGS = 5, + AS_RELEASE_ALWAYS = 6, + AS_STABLE_WRITES = 7, + AS_INACCESSIBLE = 8, + AS_FOLIO_ORDER_BITS = 5, + AS_FOLIO_ORDER_MIN = 16, + AS_FOLIO_ORDER_MAX = 21, +}; + +typedef unsigned int fgf_t; + +struct compact_control { + struct list_head freepages[11]; + struct list_head migratepages; + unsigned int nr_freepages; + unsigned int nr_migratepages; + long unsigned int free_pfn; + long unsigned int migrate_pfn; + long unsigned int fast_start_pfn; + struct zone *zone; + long unsigned int total_migrate_scanned; + long unsigned int total_free_scanned; + short unsigned int fast_search_fail; + short int search_order; + const gfp_t gfp_mask; + int order; + int migratetype; + const unsigned int alloc_flags; + const int highest_zoneidx; + enum migrate_mode mode; + bool ignore_skip_hint; + bool no_set_skip_hint; + bool ignore_block_suitable; + bool direct_compaction; + bool proactive_compaction; + bool whole_zone; + bool contended; + bool finish_pageblock; + bool alloc_contig; +}; + +typedef phys_addr_t resource_size_t; + +enum lockdep_ok { + LOCKDEP_STILL_OK = 0, + LOCKDEP_NOW_UNRELIABLE = 1, +}; + +typedef struct { + u64 val; +} pfn_t; + +enum { + MM_FILEPAGES = 0, + MM_ANONPAGES = 1, + MM_SWAPENTS = 2, + MM_SHMEMPAGES = 3, + NR_MM_COUNTERS = 4, +}; + +struct task_delay_info { + raw_spinlock_t lock; + long: 32; + u64 blkio_start; + u64 blkio_delay; + u64 swapin_start; + u64 swapin_delay; + u32 blkio_count; + u32 swapin_count; + u64 freepages_start; + u64 freepages_delay; + u64 thrashing_start; + u64 thrashing_delay; + u64 compact_start; + u64 compact_delay; + u64 wpcopy_start; + u64 wpcopy_delay; + u64 irq_delay; + u32 freepages_count; + u32 thrashing_count; + u32 compact_count; + u32 wpcopy_count; + u32 irq_count; + long: 32; +}; + +struct ptdesc { + long unsigned int __page_flags; + union { + struct callback_head pt_rcu_head; + struct list_head pt_list; + struct { + long unsigned int _pt_pad_1; + pgtable_t pmd_huge_pte; + }; + }; + long unsigned int __page_mapping; + union { + long unsigned int pt_index; + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + union { + long unsigned int _pt_pad_2; + spinlock_t ptl; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int pt_memcg_data; +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_COMPLETED = 16384, + VM_FAULT_HINDEX_MASK = 983040, +}; + +typedef unsigned int zap_flags_t; + +enum { + FOLL_WRITE = 1, + FOLL_GET = 2, + FOLL_DUMP = 4, + FOLL_FORCE = 8, + FOLL_NOWAIT = 16, + FOLL_NOFAULT = 32, + FOLL_HWPOISON = 64, + FOLL_ANON = 128, + FOLL_LONGTERM = 256, + FOLL_SPLIT_PMD = 512, + FOLL_PCI_P2PDMA = 1024, + FOLL_INTERRUPTIBLE = 2048, + FOLL_HONOR_NUMA_FAULT = 4096, +}; + +enum { + __PERCPU_REF_ATOMIC = 1, + __PERCPU_REF_DEAD = 2, + __PERCPU_REF_ATOMIC_DEAD = 3, + __PERCPU_REF_FLAG_BITS = 2, +}; + +typedef unsigned int pgtbl_mod_mask; + +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC = 0, + MTHP_STAT_ANON_FAULT_FALLBACK = 1, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE = 2, + MTHP_STAT_ZSWPOUT = 3, + MTHP_STAT_SWPIN = 4, + MTHP_STAT_SWPOUT = 5, + MTHP_STAT_SWPOUT_FALLBACK = 6, + MTHP_STAT_SHMEM_ALLOC = 7, + MTHP_STAT_SHMEM_FALLBACK = 8, + MTHP_STAT_SHMEM_FALLBACK_CHARGE = 9, + MTHP_STAT_SPLIT = 10, + MTHP_STAT_SPLIT_FAILED = 11, + MTHP_STAT_SPLIT_DEFERRED = 12, + MTHP_STAT_NR_ANON = 13, + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED = 14, + __MTHP_STAT_COUNT = 15, +}; + +struct encoded_page; + +struct vm_event_state { + long unsigned int event[70]; +}; + +struct zap_details { + struct folio *single_folio; + bool even_cows; + zap_flags_t zap_flags; +}; + +struct follow_pfnmap_args { + struct vm_area_struct *vma; + long unsigned int address; + spinlock_t *lock; + pte_t *ptep; + long unsigned int pfn; + pgprot_t pgprot; + bool writable; + bool special; +}; + +typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *); + +enum { + SWP_USED = 1, + SWP_WRITEOK = 2, + SWP_DISCARDABLE = 4, + SWP_DISCARDING = 8, + SWP_SOLIDSTATE = 16, + SWP_CONTINUED = 32, + SWP_BLKDEV = 64, + SWP_ACTIVATED = 128, + SWP_FS_OPS = 256, + SWP_AREA_DISCARD = 512, + SWP_PAGE_DISCARD = 1024, + SWP_STABLE_WRITES = 2048, + SWP_SYNCHRONOUS_IO = 4096, + SWP_SCANNING = 16384, +}; + +typedef long unsigned int pte_marker; + +typedef int rmap_t; + +enum rmap_level { + RMAP_LEVEL_PTE = 0, + RMAP_LEVEL_PMD = 1, +}; + +struct mmu_notifier_range { + long unsigned int start; + long unsigned int end; +}; + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct encoded_page *encoded_pages[0]; +}; + +struct mmu_gather { + struct mm_struct *mm; + long unsigned int start; + long unsigned int end; + unsigned int fullmm: 1; + unsigned int need_flush_all: 1; + unsigned int freed_tables: 1; + unsigned int delayed_rmap: 1; + unsigned int cleared_ptes: 1; + unsigned int cleared_pmds: 1; + unsigned int cleared_puds: 1; + unsigned int cleared_p4ds: 1; + unsigned int vma_exec: 1; + unsigned int vma_huge: 1; + unsigned int vma_pfn: 1; + unsigned int batch_count; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[8]; +}; + +struct unlink_vma_file_batch { + int count; + struct vm_area_struct *vmas[8]; +}; + +typedef int fpb_t; + +struct saved { + struct path link; + struct delayed_call done; + const char *name; + unsigned int seq; +}; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; + unsigned int flags; + unsigned int state; + unsigned int seq; + unsigned int next_seq; + unsigned int m_seq; + unsigned int r_seq; + int last_type; + unsigned int depth; + int total_link_count; + struct saved *stack; + struct saved internal[2]; + struct filename *name; + const char *pathname; + struct nameidata *saved; + unsigned int root_seq; + int dfd; + vfsuid_t dir_vfsuid; + umode_t dir_mode; +}; + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[36]; + long: 32; +}; + +struct __large_struct { + long unsigned int buf[100]; +}; + +struct posix_acl_entry { + short int e_tag; + short unsigned int e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + unsigned int a_count; + struct callback_head a_rcu; + struct posix_acl_entry a_entries[0]; +}; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct fsnotify_mark_connector { + spinlock_t lock; + unsigned char type; + unsigned char prio; + short unsigned int flags; + union { + void *obj; + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +enum inode_i_mutex_lock_class { + I_MUTEX_NORMAL = 0, + I_MUTEX_PARENT = 1, + I_MUTEX_CHILD = 2, + I_MUTEX_XATTR = 3, + I_MUTEX_NONDIR2 = 4, + I_MUTEX_PARENT2 = 5, +}; + +struct fsnotify_sb_info { + struct fsnotify_mark_connector *sb_marks; + atomic_long_t watched_objects[3]; +}; + +struct renamedata { + struct mnt_idmap *old_mnt_idmap; + struct inode *old_dir; + struct dentry *old_dentry; + struct mnt_idmap *new_mnt_idmap; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +}; + +struct file_lock_core { + struct file_lock_core *flc_blocker; + struct list_head flc_list; + struct hlist_node flc_link; + struct list_head flc_blocked_requests; + struct list_head flc_blocked_member; + fl_owner_t flc_owner; + unsigned int flc_flags; + unsigned char flc_type; + pid_t flc_pid; + int flc_link_cpu; + wait_queue_head_t flc_wait; + struct file *flc_file; +}; + +struct nlm_lockowner; + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; + +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +struct file_lock_operations; + +struct lock_manager_operations; + +struct file_lock { + struct file_lock_core c; + loff_t fl_start; + loff_t fl_end; + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + struct { + struct inode *inode; + } ceph; + } fl_u; +}; + +struct lease_manager_operations; + +struct file_lease { + struct file_lock_core c; + struct fasync_struct *fl_fasync; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + const struct lease_manager_operations *fl_lmops; +}; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + void *lm_mod_owner; + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_lock_expirable)(struct file_lock *); + void (*lm_expire_lock)(); +}; + +struct lease_manager_operations { + bool (*lm_break)(struct file_lease *); + int (*lm_change)(struct file_lease *, int, struct list_head *); + void (*lm_setup)(struct file_lease *, void **); + bool (*lm_breaker_owns_lease)(struct file_lease *); +}; + +enum { + LAST_NORM = 0, + LAST_ROOT = 1, + LAST_DOT = 2, + LAST_DOTDOT = 3, +}; + +typedef int filler_t(struct file *, struct folio *); + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +struct mount; + +struct mnt_namespace { + struct ns_common ns; + struct mount *root; + struct rb_root mounts; + struct user_namespace *user_ns; + struct ucounts *ucounts; + u64 seq; + wait_queue_head_t poll; + long: 32; + u64 event; + unsigned int nr_mounts; + unsigned int pending_mounts; + struct rb_node mnt_ns_tree_node; + refcount_t passive; +}; + +enum fsnotify_group_prio { + FSNOTIFY_PRIO_NORMAL = 0, + FSNOTIFY_PRIO_CONTENT = 1, + FSNOTIFY_PRIO_PRE_CONTENT = 2, + __FSNOTIFY_PRIO_NUM = 3, +}; + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE = 0, + FSNOTIFY_EVENT_PATH = 1, + FSNOTIFY_EVENT_INODE = 2, + FSNOTIFY_EVENT_DENTRY = 3, + FSNOTIFY_EVENT_ERROR = 4, +}; + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +struct fd { + long unsigned int word; +}; + +typedef struct fd class_fd_raw_t; + +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + +enum { + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS = 1, + IPSTATS_MIB_INOCTETS = 2, + IPSTATS_MIB_INDELIVERS = 3, + IPSTATS_MIB_OUTFORWDATAGRAMS = 4, + IPSTATS_MIB_OUTREQUESTS = 5, + IPSTATS_MIB_OUTOCTETS = 6, + IPSTATS_MIB_INHDRERRORS = 7, + IPSTATS_MIB_INTOOBIGERRORS = 8, + IPSTATS_MIB_INNOROUTES = 9, + IPSTATS_MIB_INADDRERRORS = 10, + IPSTATS_MIB_INUNKNOWNPROTOS = 11, + IPSTATS_MIB_INTRUNCATEDPKTS = 12, + IPSTATS_MIB_INDISCARDS = 13, + IPSTATS_MIB_OUTDISCARDS = 14, + IPSTATS_MIB_OUTNOROUTES = 15, + IPSTATS_MIB_REASMTIMEOUT = 16, + IPSTATS_MIB_REASMREQDS = 17, + IPSTATS_MIB_REASMOKS = 18, + IPSTATS_MIB_REASMFAILS = 19, + IPSTATS_MIB_FRAGOKS = 20, + IPSTATS_MIB_FRAGFAILS = 21, + IPSTATS_MIB_FRAGCREATES = 22, + IPSTATS_MIB_INMCASTPKTS = 23, + IPSTATS_MIB_OUTMCASTPKTS = 24, + IPSTATS_MIB_INBCASTPKTS = 25, + IPSTATS_MIB_OUTBCASTPKTS = 26, + IPSTATS_MIB_INMCASTOCTETS = 27, + IPSTATS_MIB_OUTMCASTOCTETS = 28, + IPSTATS_MIB_INBCASTOCTETS = 29, + IPSTATS_MIB_OUTBCASTOCTETS = 30, + IPSTATS_MIB_CSUMERRORS = 31, + IPSTATS_MIB_NOECTPKTS = 32, + IPSTATS_MIB_ECT1PKTS = 33, + IPSTATS_MIB_ECT0PKTS = 34, + IPSTATS_MIB_CEPKTS = 35, + IPSTATS_MIB_REASM_OVERLAPS = 36, + IPSTATS_MIB_OUTPKTS = 37, + __IPSTATS_MIB_MAX = 38, +}; + +enum { + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS = 1, + ICMP_MIB_INERRORS = 2, + ICMP_MIB_INDESTUNREACHS = 3, + ICMP_MIB_INTIMEEXCDS = 4, + ICMP_MIB_INPARMPROBS = 5, + ICMP_MIB_INSRCQUENCHS = 6, + ICMP_MIB_INREDIRECTS = 7, + ICMP_MIB_INECHOS = 8, + ICMP_MIB_INECHOREPS = 9, + ICMP_MIB_INTIMESTAMPS = 10, + ICMP_MIB_INTIMESTAMPREPS = 11, + ICMP_MIB_INADDRMASKS = 12, + ICMP_MIB_INADDRMASKREPS = 13, + ICMP_MIB_OUTMSGS = 14, + ICMP_MIB_OUTERRORS = 15, + ICMP_MIB_OUTDESTUNREACHS = 16, + ICMP_MIB_OUTTIMEEXCDS = 17, + ICMP_MIB_OUTPARMPROBS = 18, + ICMP_MIB_OUTSRCQUENCHS = 19, + ICMP_MIB_OUTREDIRECTS = 20, + ICMP_MIB_OUTECHOS = 21, + ICMP_MIB_OUTECHOREPS = 22, + ICMP_MIB_OUTTIMESTAMPS = 23, + ICMP_MIB_OUTTIMESTAMPREPS = 24, + ICMP_MIB_OUTADDRMASKS = 25, + ICMP_MIB_OUTADDRMASKREPS = 26, + ICMP_MIB_CSUMERRORS = 27, + ICMP_MIB_RATELIMITGLOBAL = 28, + ICMP_MIB_RATELIMITHOST = 29, + __ICMP_MIB_MAX = 30, +}; + +enum { + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS = 1, + ICMP6_MIB_INERRORS = 2, + ICMP6_MIB_OUTMSGS = 3, + ICMP6_MIB_OUTERRORS = 4, + ICMP6_MIB_CSUMERRORS = 5, + ICMP6_MIB_RATELIMITHOST = 6, + __ICMP6_MIB_MAX = 7, +}; + +enum { + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM = 1, + TCP_MIB_RTOMIN = 2, + TCP_MIB_RTOMAX = 3, + TCP_MIB_MAXCONN = 4, + TCP_MIB_ACTIVEOPENS = 5, + TCP_MIB_PASSIVEOPENS = 6, + TCP_MIB_ATTEMPTFAILS = 7, + TCP_MIB_ESTABRESETS = 8, + TCP_MIB_CURRESTAB = 9, + TCP_MIB_INSEGS = 10, + TCP_MIB_OUTSEGS = 11, + TCP_MIB_RETRANSSEGS = 12, + TCP_MIB_INERRS = 13, + TCP_MIB_OUTRSTS = 14, + TCP_MIB_CSUMERRORS = 15, + __TCP_MIB_MAX = 16, +}; + +enum { + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS = 1, + UDP_MIB_NOPORTS = 2, + UDP_MIB_INERRORS = 3, + UDP_MIB_OUTDATAGRAMS = 4, + UDP_MIB_RCVBUFERRORS = 5, + UDP_MIB_SNDBUFERRORS = 6, + UDP_MIB_CSUMERRORS = 7, + UDP_MIB_IGNOREDMULTI = 8, + UDP_MIB_MEMERRORS = 9, + __UDP_MIB_MAX = 10, +}; + +enum { + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT = 1, + LINUX_MIB_SYNCOOKIESRECV = 2, + LINUX_MIB_SYNCOOKIESFAILED = 3, + LINUX_MIB_EMBRYONICRSTS = 4, + LINUX_MIB_PRUNECALLED = 5, + LINUX_MIB_RCVPRUNED = 6, + LINUX_MIB_OFOPRUNED = 7, + LINUX_MIB_OUTOFWINDOWICMPS = 8, + LINUX_MIB_LOCKDROPPEDICMPS = 9, + LINUX_MIB_ARPFILTER = 10, + LINUX_MIB_TIMEWAITED = 11, + LINUX_MIB_TIMEWAITRECYCLED = 12, + LINUX_MIB_TIMEWAITKILLED = 13, + LINUX_MIB_PAWSACTIVEREJECTED = 14, + LINUX_MIB_PAWSESTABREJECTED = 15, + LINUX_MIB_DELAYEDACKS = 16, + LINUX_MIB_DELAYEDACKLOCKED = 17, + LINUX_MIB_DELAYEDACKLOST = 18, + LINUX_MIB_LISTENOVERFLOWS = 19, + LINUX_MIB_LISTENDROPS = 20, + LINUX_MIB_TCPHPHITS = 21, + LINUX_MIB_TCPPUREACKS = 22, + LINUX_MIB_TCPHPACKS = 23, + LINUX_MIB_TCPRENORECOVERY = 24, + LINUX_MIB_TCPSACKRECOVERY = 25, + LINUX_MIB_TCPSACKRENEGING = 26, + LINUX_MIB_TCPSACKREORDER = 27, + LINUX_MIB_TCPRENOREORDER = 28, + LINUX_MIB_TCPTSREORDER = 29, + LINUX_MIB_TCPFULLUNDO = 30, + LINUX_MIB_TCPPARTIALUNDO = 31, + LINUX_MIB_TCPDSACKUNDO = 32, + LINUX_MIB_TCPLOSSUNDO = 33, + LINUX_MIB_TCPLOSTRETRANSMIT = 34, + LINUX_MIB_TCPRENOFAILURES = 35, + LINUX_MIB_TCPSACKFAILURES = 36, + LINUX_MIB_TCPLOSSFAILURES = 37, + LINUX_MIB_TCPFASTRETRANS = 38, + LINUX_MIB_TCPSLOWSTARTRETRANS = 39, + LINUX_MIB_TCPTIMEOUTS = 40, + LINUX_MIB_TCPLOSSPROBES = 41, + LINUX_MIB_TCPLOSSPROBERECOVERY = 42, + LINUX_MIB_TCPRENORECOVERYFAIL = 43, + LINUX_MIB_TCPSACKRECOVERYFAIL = 44, + LINUX_MIB_TCPRCVCOLLAPSED = 45, + LINUX_MIB_TCPDSACKOLDSENT = 46, + LINUX_MIB_TCPDSACKOFOSENT = 47, + LINUX_MIB_TCPDSACKRECV = 48, + LINUX_MIB_TCPDSACKOFORECV = 49, + LINUX_MIB_TCPABORTONDATA = 50, + LINUX_MIB_TCPABORTONCLOSE = 51, + LINUX_MIB_TCPABORTONMEMORY = 52, + LINUX_MIB_TCPABORTONTIMEOUT = 53, + LINUX_MIB_TCPABORTONLINGER = 54, + LINUX_MIB_TCPABORTFAILED = 55, + LINUX_MIB_TCPMEMORYPRESSURES = 56, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, + LINUX_MIB_TCPSACKDISCARD = 58, + LINUX_MIB_TCPDSACKIGNOREDOLD = 59, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, + LINUX_MIB_TCPSPURIOUSRTOS = 61, + LINUX_MIB_TCPMD5NOTFOUND = 62, + LINUX_MIB_TCPMD5UNEXPECTED = 63, + LINUX_MIB_TCPMD5FAILURE = 64, + LINUX_MIB_SACKSHIFTED = 65, + LINUX_MIB_SACKMERGED = 66, + LINUX_MIB_SACKSHIFTFALLBACK = 67, + LINUX_MIB_TCPBACKLOGDROP = 68, + LINUX_MIB_PFMEMALLOCDROP = 69, + LINUX_MIB_TCPMINTTLDROP = 70, + LINUX_MIB_TCPDEFERACCEPTDROP = 71, + LINUX_MIB_IPRPFILTER = 72, + LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, + LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, + LINUX_MIB_TCPREQQFULLDROP = 75, + LINUX_MIB_TCPRETRANSFAIL = 76, + LINUX_MIB_TCPRCVCOALESCE = 77, + LINUX_MIB_TCPBACKLOGCOALESCE = 78, + LINUX_MIB_TCPOFOQUEUE = 79, + LINUX_MIB_TCPOFODROP = 80, + LINUX_MIB_TCPOFOMERGE = 81, + LINUX_MIB_TCPCHALLENGEACK = 82, + LINUX_MIB_TCPSYNCHALLENGE = 83, + LINUX_MIB_TCPFASTOPENACTIVE = 84, + LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, + LINUX_MIB_TCPFASTOPENPASSIVE = 86, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, + LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, + LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, + LINUX_MIB_BUSYPOLLRXPACKETS = 92, + LINUX_MIB_TCPAUTOCORKING = 93, + LINUX_MIB_TCPFROMZEROWINDOWADV = 94, + LINUX_MIB_TCPTOZEROWINDOWADV = 95, + LINUX_MIB_TCPWANTZEROWINDOWADV = 96, + LINUX_MIB_TCPSYNRETRANS = 97, + LINUX_MIB_TCPORIGDATASENT = 98, + LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, + LINUX_MIB_TCPHYSTARTTRAINCWND = 100, + LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, + LINUX_MIB_TCPHYSTARTDELAYCWND = 102, + LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, + LINUX_MIB_TCPACKSKIPPEDPAWS = 104, + LINUX_MIB_TCPACKSKIPPEDSEQ = 105, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, + LINUX_MIB_TCPWINPROBE = 109, + LINUX_MIB_TCPKEEPALIVE = 110, + LINUX_MIB_TCPMTUPFAIL = 111, + LINUX_MIB_TCPMTUPSUCCESS = 112, + LINUX_MIB_TCPDELIVERED = 113, + LINUX_MIB_TCPDELIVEREDCE = 114, + LINUX_MIB_TCPACKCOMPRESSED = 115, + LINUX_MIB_TCPZEROWINDOWDROP = 116, + LINUX_MIB_TCPRCVQDROP = 117, + LINUX_MIB_TCPWQUEUETOOBIG = 118, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, + LINUX_MIB_TCPTIMEOUTREHASH = 120, + LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, + LINUX_MIB_TCPDSACKRECVSEGS = 122, + LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, + LINUX_MIB_TCPMIGRATEREQSUCCESS = 124, + LINUX_MIB_TCPMIGRATEREQFAILURE = 125, + LINUX_MIB_TCPPLBREHASH = 126, + LINUX_MIB_TCPAOREQUIRED = 127, + LINUX_MIB_TCPAOBAD = 128, + LINUX_MIB_TCPAOKEYNOTFOUND = 129, + LINUX_MIB_TCPAOGOOD = 130, + LINUX_MIB_TCPAODROPPEDICMPS = 131, + __LINUX_MIB_MAX = 132, +}; + +enum { + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR = 1, + LINUX_MIB_XFRMINBUFFERERROR = 2, + LINUX_MIB_XFRMINHDRERROR = 3, + LINUX_MIB_XFRMINNOSTATES = 4, + LINUX_MIB_XFRMINSTATEPROTOERROR = 5, + LINUX_MIB_XFRMINSTATEMODEERROR = 6, + LINUX_MIB_XFRMINSTATESEQERROR = 7, + LINUX_MIB_XFRMINSTATEEXPIRED = 8, + LINUX_MIB_XFRMINSTATEMISMATCH = 9, + LINUX_MIB_XFRMINSTATEINVALID = 10, + LINUX_MIB_XFRMINTMPLMISMATCH = 11, + LINUX_MIB_XFRMINNOPOLS = 12, + LINUX_MIB_XFRMINPOLBLOCK = 13, + LINUX_MIB_XFRMINPOLERROR = 14, + LINUX_MIB_XFRMOUTERROR = 15, + LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, + LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, + LINUX_MIB_XFRMOUTNOSTATES = 18, + LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, + LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, + LINUX_MIB_XFRMOUTSTATESEQERROR = 21, + LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, + LINUX_MIB_XFRMOUTPOLBLOCK = 23, + LINUX_MIB_XFRMOUTPOLDEAD = 24, + LINUX_MIB_XFRMOUTPOLERROR = 25, + LINUX_MIB_XFRMFWDHDRERROR = 26, + LINUX_MIB_XFRMOUTSTATEINVALID = 27, + LINUX_MIB_XFRMACQUIREERROR = 28, + LINUX_MIB_XFRMOUTSTATEDIRERROR = 29, + LINUX_MIB_XFRMINSTATEDIRERROR = 30, + __LINUX_MIB_XFRMMAX = 31, +}; + +enum { + LINUX_MIB_TLSNUM = 0, + LINUX_MIB_TLSCURRTXSW = 1, + LINUX_MIB_TLSCURRRXSW = 2, + LINUX_MIB_TLSCURRTXDEVICE = 3, + LINUX_MIB_TLSCURRRXDEVICE = 4, + LINUX_MIB_TLSTXSW = 5, + LINUX_MIB_TLSRXSW = 6, + LINUX_MIB_TLSTXDEVICE = 7, + LINUX_MIB_TLSRXDEVICE = 8, + LINUX_MIB_TLSDECRYPTERROR = 9, + LINUX_MIB_TLSRXDEVICERESYNC = 10, + LINUX_MIB_TLSDECRYPTRETRY = 11, + LINUX_MIB_TLSRXNOPADVIOL = 12, + __LINUX_MIB_TLSMAX = 13, +}; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5, + NF_INET_INGRESS = 5, +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_NUMPROTO = 11, +}; + +enum tcp_conntrack { + TCP_CONNTRACK_NONE = 0, + TCP_CONNTRACK_SYN_SENT = 1, + TCP_CONNTRACK_SYN_RECV = 2, + TCP_CONNTRACK_ESTABLISHED = 3, + TCP_CONNTRACK_FIN_WAIT = 4, + TCP_CONNTRACK_CLOSE_WAIT = 5, + TCP_CONNTRACK_LAST_ACK = 6, + TCP_CONNTRACK_TIME_WAIT = 7, + TCP_CONNTRACK_CLOSE = 8, + TCP_CONNTRACK_LISTEN = 9, + TCP_CONNTRACK_MAX = 10, + TCP_CONNTRACK_IGNORE = 11, + TCP_CONNTRACK_RETRANS = 12, + TCP_CONNTRACK_UNACK = 13, + TCP_CONNTRACK_TIMEOUT_MAX = 14, +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED = 0, + UDP_CT_REPLIED = 1, + UDP_CT_MAX = 2, +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3, +}; + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP = 1, + MAX_NETNS_BPF_ATTACH_TYPE = 2, +}; + +enum { + TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = 1, + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = 2, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = 4, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = 8, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = 16, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = 32, + __TCA_FLOWER_KEY_FLAGS_MAX = 33, +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, + FLOW_DISSECTOR_KEY_PPPOE = 29, + FLOW_DISSECTOR_KEY_L2TPV3 = 30, + FLOW_DISSECTOR_KEY_CFM = 31, + FLOW_DISSECTOR_KEY_IPSEC = 32, + FLOW_DISSECTOR_KEY_MAX = 33, +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_NUM = 1, +}; + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; + +struct mnt_pcp; + +struct mountpoint; + +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; + union { + struct callback_head mnt_rcu; + struct llist_node mnt_llist; + }; + struct mnt_pcp *mnt_pcp; + struct list_head mnt_mounts; + struct list_head mnt_child; + struct list_head mnt_instance; + const char *mnt_devname; + union { + struct rb_node mnt_node; + struct list_head mnt_list; + }; + struct list_head mnt_expire; + struct list_head mnt_share; + struct list_head mnt_slave_list; + struct list_head mnt_slave; + struct mount *mnt_master; + struct mnt_namespace *mnt_ns; + struct mountpoint *mnt_mp; + union { + struct hlist_node mnt_mp_list; + struct hlist_node mnt_umount; + }; + struct list_head mnt_umounting; + struct fsnotify_mark_connector *mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; + int mnt_id; + long: 32; + u64 mnt_id_unique; + int mnt_group_id; + int mnt_expiry_mark; + struct hlist_head mnt_pins; + struct hlist_head mnt_stuck_children; +}; + +struct mnt_pcp { + int mnt_count; + int mnt_writers; +}; + +struct mountpoint { + struct hlist_node m_hash; + struct dentry *m_dentry; + struct hlist_head m_list; + int m_count; +}; + +enum { + WALK_TRAILING = 1, + WALK_MORE = 2, + WALK_NOFOLLOW = 4, +}; + +struct request; + +struct rq_list { + struct request *head; + struct request *tail; +}; + +struct blk_plug { + struct rq_list mq_list; + struct rq_list cached_rqs; + u64 cur_ktime; + short unsigned int nr_ios; + short unsigned int rq_count; + bool multiple_queues; + bool has_elevator; + struct list_head cb_list; +}; + +typedef unsigned int blk_mode_t; + +struct block_device_operations; + +struct timer_rand_state; + +struct disk_events; + +struct badblocks; + +struct blk_independent_access_ranges; + +struct gendisk { + int major; + int first_minor; + int minors; + char disk_name[32]; + short unsigned int events; + short unsigned int event_flags; + struct xarray part_tbl; + struct block_device *part0; + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + struct bio_set bio_split; + int flags; + long unsigned int state; + struct mutex open_mutex; + unsigned int open_partitions; + struct backing_dev_info *bdi; + struct kobject queue_kobj; + struct kobject *slave_dir; + struct list_head slave_bdevs; + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; + u64 diskseq; + blk_mode_t open_mode; + struct blk_independent_access_ranges *ia_ranges; +}; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +typedef unsigned int blk_features_t; + +typedef unsigned int blk_flags_t; + +enum blk_integrity_checksum { + BLK_INTEGRITY_CSUM_NONE = 0, + BLK_INTEGRITY_CSUM_IP = 1, + BLK_INTEGRITY_CSUM_CRC = 2, + BLK_INTEGRITY_CSUM_CRC64 = 3, +} __attribute__((mode(byte))); + +struct blk_integrity { + unsigned char flags; + enum blk_integrity_checksum csum_type; + unsigned char tuple_size; + unsigned char pi_offset; + unsigned char interval_exp; + unsigned char tag_size; +}; + +struct queue_limits { + blk_features_t features; + blk_flags_t flags; + long unsigned int seg_boundary_mask; + long unsigned int virt_boundary_mask; + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_user_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_user_discard_sectors; + unsigned int max_secure_erase_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_hw_zone_append_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int zone_write_granularity; + unsigned int atomic_write_hw_max; + unsigned int atomic_write_max_sectors; + unsigned int atomic_write_hw_boundary; + unsigned int atomic_write_boundary_sectors; + unsigned int atomic_write_hw_unit_min; + unsigned int atomic_write_unit_min; + unsigned int atomic_write_hw_unit_max; + unsigned int atomic_write_unit_max; + short unsigned int max_segments; + short unsigned int max_integrity_segments; + short unsigned int max_discard_segments; + unsigned int max_open_zones; + unsigned int max_active_zones; + unsigned int dma_alignment; + unsigned int dma_pad_mask; + struct blk_integrity integrity; +}; + +struct elevator_queue; + +struct blk_mq_ops; + +struct blk_mq_ctx; + +struct blk_queue_stats; + +struct rq_qos; + +struct blk_mq_tags; + +struct blk_trace; + +struct blk_flush_queue; + +struct throtl_data; + +struct blk_mq_tag_set; + +struct request_queue { + void *queuedata; + struct elevator_queue *elevator; + const struct blk_mq_ops *mq_ops; + struct blk_mq_ctx *queue_ctx; + long unsigned int queue_flags; + unsigned int rq_timeout; + unsigned int queue_depth; + refcount_t refs; + unsigned int nr_hw_queues; + struct xarray hctx_table; + struct percpu_ref q_usage_counter; + struct lock_class_key io_lock_cls_key; + struct lockdep_map io_lockdep_map; + struct lock_class_key q_lock_cls_key; + struct lockdep_map q_lockdep_map; + struct request *last_merge; + spinlock_t queue_lock; + int quiesce_depth; + struct gendisk *disk; + struct kobject *mq_kobj; + struct queue_limits limits; + struct device *dev; + enum rpm_status rpm_status; + atomic_t pm_only; + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + struct mutex rq_qos_mutex; + int id; + long unsigned int nr_requests; + struct timer_list timeout; + struct work_struct timeout_work; + atomic_t nr_active_requests_shared_tags; + struct blk_mq_tags *sched_shared_tags; + struct list_head icq_list; + long unsigned int blkcg_pols[1]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct mutex blkcg_mutex; + int node; + spinlock_t requeue_lock; + struct list_head requeue_list; + struct delayed_work requeue_work; + struct blk_trace *blk_trace; + struct blk_flush_queue *fq; + struct list_head flush_list; + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + struct mutex limits_lock; + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + int mq_freeze_depth; + struct throtl_data *td; + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + struct mutex mq_freeze_lock; + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; + struct mutex debugfs_mutex; + bool mq_sysfs_init_done; +}; + +struct buffer_head; + +typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); + +typedef void bh_end_io_t(struct buffer_head *, int); + +struct buffer_head { + long unsigned int b_state; + struct buffer_head *b_this_page; + union { + struct page *b_page; + struct folio *b_folio; + }; + long: 32; + sector_t b_blocknr; + size_t b_size; + char *b_data; + struct block_device *b_bdev; + bh_end_io_t *b_end_io; + void *b_private; + struct list_head b_assoc_buffers; + struct address_space *b_assoc_map; + atomic_t b_count; + spinlock_t b_uptodate_lock; +}; + +struct io_comp_batch { + struct rq_list req_list; + bool need_ts; + void (*complete)(struct io_comp_batch *); +}; + +struct partition_meta_info { + char uuid[37]; + u8 volname[64]; +}; + +enum { + BIO_PAGE_PINNED = 0, + BIO_CLONED = 1, + BIO_BOUNCED = 2, + BIO_QUIET = 3, + BIO_CHAIN = 4, + BIO_REFFED = 5, + BIO_BPS_THROTTLED = 6, + BIO_TRACE_COMPLETION = 7, + BIO_CGROUP_ACCT = 8, + BIO_QOS_THROTTLED = 9, + BIO_QOS_MERGED = 10, + BIO_REMAPPED = 11, + BIO_ZONE_WRITE_PLUGGING = 12, + BIO_EMULATES_ZONE_APPEND = 13, + BIO_FLAG_LAST = 14, +}; + +enum req_op { + REQ_OP_READ = 0, + REQ_OP_WRITE = 1, + REQ_OP_FLUSH = 2, + REQ_OP_DISCARD = 3, + REQ_OP_SECURE_ERASE = 5, + REQ_OP_ZONE_APPEND = 7, + REQ_OP_WRITE_ZEROES = 9, + REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_RESET = 13, + REQ_OP_ZONE_RESET_ALL = 15, + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST = 36, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = 8, + __REQ_FAILFAST_TRANSPORT = 9, + __REQ_FAILFAST_DRIVER = 10, + __REQ_SYNC = 11, + __REQ_META = 12, + __REQ_PRIO = 13, + __REQ_NOMERGE = 14, + __REQ_IDLE = 15, + __REQ_INTEGRITY = 16, + __REQ_FUA = 17, + __REQ_PREFLUSH = 18, + __REQ_RAHEAD = 19, + __REQ_BACKGROUND = 20, + __REQ_NOWAIT = 21, + __REQ_POLLED = 22, + __REQ_ALLOC_CACHE = 23, + __REQ_SWAP = 24, + __REQ_DRV = 25, + __REQ_FS_PRIVATE = 26, + __REQ_ATOMIC = 27, + __REQ_NOUNMAP = 28, + __REQ_NR_BITS = 29, +}; + +struct folio_iter { + struct folio *folio; + size_t offset; + size_t length; + struct folio *_next; + size_t _seg_count; + int _i; +}; + +enum bh_state_bits { + BH_Uptodate = 0, + BH_Dirty = 1, + BH_Lock = 2, + BH_Req = 3, + BH_Mapped = 4, + BH_New = 5, + BH_Async_Read = 6, + BH_Async_Write = 7, + BH_Delay = 8, + BH_Boundary = 9, + BH_Write_EIO = 10, + BH_Unwritten = 11, + BH_Quiet = 12, + BH_Meta = 13, + BH_Prio = 14, + BH_Defer_Completion = 15, + BH_PrivateStart = 16, +}; + +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 resv[4]; + __u64 capacity; + __u8 reserved[24]; +}; + +typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); + +enum blk_unique_id { + BLK_UID_T10 = 1, + BLK_UID_EUI64 = 2, + BLK_UID_NAA = 3, +}; + +struct hd_geometry; + +struct pr_ops; + +struct block_device_operations { + void (*submit_bio)(struct bio *); + int (*poll_bio)(struct bio *, struct io_comp_batch *, unsigned int); + int (*open)(struct gendisk *, blk_mode_t); + void (*release)(struct gendisk *); + int (*ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + int (*compat_ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + unsigned int (*check_events)(struct gendisk *, unsigned int); + void (*unlock_native_capacity)(struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + int (*set_read_only)(struct block_device *, bool); + void (*free_disk)(struct gendisk *); + void (*swap_slot_free_notify)(struct block_device *, long unsigned int); + int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); + char * (*devnode)(struct gendisk *, umode_t *); + int (*get_unique_id)(struct gendisk *, u8 *, enum blk_unique_id); + struct module *owner; + const struct pr_ops *pr_ops; + int (*alternative_gpt_sector)(struct gendisk *, sector_t *); +}; + +struct blk_independent_access_range { + struct kobject kobj; + long: 32; + sector_t sector; + sector_t nr_sectors; +}; + +struct blk_independent_access_ranges { + struct kobject kobj; + bool sysfs_registered; + unsigned int nr_ia_ranges; + long: 32; + struct blk_independent_access_range ia_range[0]; +}; + +enum blk_eh_timer_return { + BLK_EH_DONE = 0, + BLK_EH_RESET_TIMER = 1, +}; + +struct blk_mq_hw_ctx; + +struct blk_mq_queue_data; + +struct blk_mq_ops { + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); + void (*commit_rqs)(struct blk_mq_hw_ctx *); + void (*queue_rqs)(struct rq_list *); + int (*get_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *, int); + void (*set_rq_budget_token)(struct request *, int); + int (*get_rq_budget_token)(struct request *); + enum blk_eh_timer_return (*timeout)(struct request *); + int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); + void (*complete)(struct request *); + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); + void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); + void (*cleanup_rq)(struct request *); + bool (*busy)(struct request_queue *); + void (*map_queues)(struct blk_mq_tag_set *); + void (*show_rq)(struct seq_file *, struct request *); +}; + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_keys; + +struct pr_held_reservation; + +struct pr_ops { + int (*pr_register)(struct block_device *, u64, u64, u32); + int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); + int (*pr_release)(struct block_device *, u64, enum pr_type); + int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); + int (*pr_clear)(struct block_device *, u64); + int (*pr_read_keys)(struct block_device *, struct pr_keys *); + int (*pr_read_reservation)(struct block_device *, struct pr_held_reservation *); +}; + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + NR_STATS = 10, +}; + +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS = 1, + CGROUP_INET_SOCK_CREATE = 2, + CGROUP_SOCK_OPS = 3, + CGROUP_DEVICE = 4, + CGROUP_INET4_BIND = 5, + CGROUP_INET6_BIND = 6, + CGROUP_INET4_CONNECT = 7, + CGROUP_INET6_CONNECT = 8, + CGROUP_UNIX_CONNECT = 9, + CGROUP_INET4_POST_BIND = 10, + CGROUP_INET6_POST_BIND = 11, + CGROUP_UDP4_SENDMSG = 12, + CGROUP_UDP6_SENDMSG = 13, + CGROUP_UNIX_SENDMSG = 14, + CGROUP_SYSCTL = 15, + CGROUP_UDP4_RECVMSG = 16, + CGROUP_UDP6_RECVMSG = 17, + CGROUP_UNIX_RECVMSG = 18, + CGROUP_GETSOCKOPT = 19, + CGROUP_SETSOCKOPT = 20, + CGROUP_INET4_GETPEERNAME = 21, + CGROUP_INET6_GETPEERNAME = 22, + CGROUP_UNIX_GETPEERNAME = 23, + CGROUP_INET4_GETSOCKNAME = 24, + CGROUP_INET6_GETSOCKNAME = 25, + CGROUP_UNIX_GETSOCKNAME = 26, + CGROUP_INET_SOCK_RELEASE = 27, + CGROUP_LSM_START = 28, + CGROUP_LSM_END = 27, + MAX_CGROUP_BPF_ATTACH_TYPE = 28, +}; + +enum psi_task_count { + NR_IOWAIT = 0, + NR_MEMSTALL = 1, + NR_RUNNING = 2, + NR_MEMSTALL_RUNNING = 3, + NR_PSI_TASK_COUNTS = 4, +}; + +enum psi_res { + PSI_IO = 0, + PSI_MEM = 1, + PSI_CPU = 2, + NR_PSI_RESOURCES = 3, +}; + +enum psi_states { + PSI_IO_SOME = 0, + PSI_IO_FULL = 1, + PSI_MEM_SOME = 2, + PSI_MEM_FULL = 3, + PSI_CPU_SOME = 4, + PSI_CPU_FULL = 5, + PSI_NONIDLE = 6, + NR_PSI_STATES = 7, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL = 1, + NR_PSI_AGGREGATORS = 2, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + pids_cgrp_id = 9, + rdma_cgrp_id = 10, + misc_cgrp_id = 11, + debug_cgrp_id = 12, + CGROUP_SUBSYS_COUNT = 13, +}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +typedef int (*writepage_t)(struct folio *, struct writeback_control *, void *); + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_OOM_GROUP_KILL = 5, + MEMCG_SWAP_HIGH = 6, + MEMCG_SWAP_MAX = 7, + MEMCG_SWAP_FAIL = 8, + MEMCG_NR_MEMORY_EVENTS = 9, +}; + +enum page_memcg_data_flags { + MEMCG_DATA_OBJEXTS = 1, + MEMCG_DATA_KMEM = 2, + __NR_MEMCG_DATA_FLAGS = 4, +}; + +struct mpage_readpage_args { + struct bio *bio; + struct folio *folio; + unsigned int nr_pages; + bool is_readahead; + sector_t last_block_in_bio; + struct buffer_head map_bh; + long unsigned int first_logical_block; + get_block_t *get_block; +}; + +struct mpage_data { + struct bio *bio; + long: 32; + sector_t last_block_in_bio; + get_block_t *get_block; + long: 32; +}; + +enum _slab_flag_bits { + _SLAB_CONSISTENCY_CHECKS = 0, + _SLAB_RED_ZONE = 1, + _SLAB_POISON = 2, + _SLAB_KMALLOC = 3, + _SLAB_HWCACHE_ALIGN = 4, + _SLAB_CACHE_DMA = 5, + _SLAB_CACHE_DMA32 = 6, + _SLAB_STORE_USER = 7, + _SLAB_PANIC = 8, + _SLAB_TYPESAFE_BY_RCU = 9, + _SLAB_TRACE = 10, + _SLAB_NOLEAKTRACE = 11, + _SLAB_NO_MERGE = 12, + _SLAB_ACCOUNT = 13, + _SLAB_NO_USER_FLAGS = 14, + _SLAB_RECLAIM_ACCOUNT = 15, + _SLAB_OBJECT_POISON = 16, + _SLAB_CMPXCHG_DOUBLE = 17, + _SLAB_NO_OBJ_EXT = 18, + _SLAB_FLAGS_LAST_BIT = 19, +}; + +struct kmem_cache_args { + unsigned int align; + unsigned int useroffset; + unsigned int usersize; + unsigned int freeptr_offset; + bool use_freeptr_offset; + void (*ctor)(void *); +}; + +typedef int __kernel_rwf_t; + +typedef __kernel_rwf_t rwf_t; + +struct backing_file_ctx { + const struct cred *cred; + void (*accessed)(struct file *); + void (*end_write)(struct kiocb *, ssize_t); +}; + +struct backing_aio { + struct kiocb iocb; + refcount_t ref; + struct kiocb *orig_iocb; + void (*end_write)(struct kiocb *, ssize_t); + struct work_struct work; + long int res; +}; + +typedef u32 nlink_t; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + long unsigned int min_coredump; +}; + +struct ld_semaphore { + atomic_long_t count; + raw_spinlock_t wait_lock; + unsigned int wait_readers; + struct list_head read_wait; + struct list_head write_wait; +}; + +typedef unsigned int tcflag_t; + +typedef unsigned char cc_t; + +typedef unsigned int speed_t; + +struct ktermios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[23]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct winsize { + short unsigned int ws_row; + short unsigned int ws_col; + short unsigned int ws_xpixel; + short unsigned int ws_ypixel; +}; + +struct tty_driver; + +struct tty_port; + +struct tty_operations; + +struct tty_ldisc; + +struct tty_struct { + struct kref kref; + int index; + struct device *dev; + struct tty_driver *driver; + struct tty_port *port; + const struct tty_operations *ops; + struct tty_ldisc *ldisc; + struct ld_semaphore ldisc_sem; + struct mutex atomic_write_lock; + struct mutex legacy_mutex; + struct mutex throttle_mutex; + struct rw_semaphore termios_rwsem; + struct mutex winsize_mutex; + struct ktermios termios; + struct ktermios termios_locked; + char name[64]; + long unsigned int flags; + int count; + unsigned int receive_room; + struct winsize winsize; + struct { + spinlock_t lock; + bool stopped; + bool tco_stopped; + } flow; + struct { + struct pid *pgrp; + struct pid *session; + spinlock_t lock; + unsigned char pktstatus; + bool packet; + } ctrl; + bool hw_stopped; + bool closing; + int flow_change; + struct tty_struct *link; + struct fasync_struct *fasync; + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + spinlock_t files_lock; + int write_cnt; + u8 *write_buf; + struct list_head tty_files; + struct work_struct SAK_work; +}; + +struct proc_ops { + unsigned int proc_flags; + int (*proc_open)(struct inode *, struct file *); + ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *); + loff_t (*proc_lseek)(struct file *, loff_t, int); + int (*proc_release)(struct inode *, struct file *); + __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); + long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int); + int (*proc_mmap)(struct file *, struct vm_area_struct *); + long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); +}; + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +struct serial_icounter_struct; + +struct serial_struct; + +struct tty_operations { + struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); + int (*install)(struct tty_driver *, struct tty_struct *); + void (*remove)(struct tty_driver *, struct tty_struct *); + int (*open)(struct tty_struct *, struct file *); + void (*close)(struct tty_struct *, struct file *); + void (*shutdown)(struct tty_struct *); + void (*cleanup)(struct tty_struct *); + ssize_t (*write)(struct tty_struct *, const u8 *, size_t); + int (*put_char)(struct tty_struct *, u8); + void (*flush_chars)(struct tty_struct *); + unsigned int (*write_room)(struct tty_struct *); + unsigned int (*chars_in_buffer)(struct tty_struct *); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + void (*throttle)(struct tty_struct *); + void (*unthrottle)(struct tty_struct *); + void (*stop)(struct tty_struct *); + void (*start)(struct tty_struct *); + void (*hangup)(struct tty_struct *); + int (*break_ctl)(struct tty_struct *, int); + void (*flush_buffer)(struct tty_struct *); + int (*ldisc_ok)(struct tty_struct *, int); + void (*set_ldisc)(struct tty_struct *); + void (*wait_until_sent)(struct tty_struct *, int); + void (*send_xchar)(struct tty_struct *, u8); + int (*tiocmget)(struct tty_struct *); + int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); + int (*resize)(struct tty_struct *, struct winsize *); + int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); + int (*get_serial)(struct tty_struct *, struct serial_struct *); + int (*set_serial)(struct tty_struct *, struct serial_struct *); + void (*show_fdinfo)(struct tty_struct *, struct seq_file *); + int (*proc_show)(struct seq_file *, void *); +}; + +struct tty_driver { + struct kref kref; + struct cdev **cdevs; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; + int major; + int minor_start; + unsigned int num; + short int type; + short int subtype; + struct ktermios init_termios; + long unsigned int flags; + struct proc_dir_entry *proc_entry; + struct tty_driver *other; + struct tty_struct **ttys; + struct tty_port **ports; + struct ktermios **termios; + void *driver_state; + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +struct proc_dir_entry { + atomic_t in_use; + refcount_t refcnt; + struct list_head pde_openers; + spinlock_t pde_unload_lock; + struct completion *pde_unload_completion; + const struct inode_operations *proc_iops; + union { + const struct proc_ops *proc_ops; + const struct file_operations *proc_dir_ops; + }; + const struct dentry_operations *proc_dops; + union { + const struct seq_operations *seq_ops; + int (*single_show)(struct seq_file *, void *); + }; + proc_write_t write; + void *data; + unsigned int state_size; + unsigned int low_ino; + nlink_t nlink; + kuid_t uid; + kgid_t gid; + long: 32; + loff_t size; + struct proc_dir_entry *parent; + struct rb_root subdir; + struct rb_node subdir_node; + char *name; + umode_t mode; + u8 flags; + u8 namelen; + char inline_name[0]; + long: 32; +}; + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + unsigned int used; + unsigned int size; + unsigned int commit; + unsigned int lookahead; + unsigned int read; + bool flags; + long: 0; + u8 data[0]; +}; + +struct tty_bufhead { + struct tty_buffer *head; + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; + atomic_t mem_used; + int mem_limit; + struct tty_buffer *tail; +}; + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +struct tty_port_operations; + +struct tty_port_client_operations; + +struct tty_port { + struct tty_bufhead buf; + struct tty_struct *tty; + struct tty_struct *itty; + const struct tty_port_operations *ops; + const struct tty_port_client_operations *client_ops; + spinlock_t lock; + int blocked_open; + int count; + wait_queue_head_t open_wait; + wait_queue_head_t delta_msr_wait; + long unsigned int flags; + long unsigned int iflags; + unsigned char console: 1; + struct mutex mutex; + struct mutex buf_mutex; + u8 *xmit_buf; + struct { + union { + struct __kfifo kfifo; + u8 *type; + const u8 *const_type; + char (*rectype)[0]; + u8 *ptr; + const u8 *ptr_const; + }; + u8 buf[0]; + } xmit_fifo; + unsigned int close_delay; + unsigned int closing_wait; + int drain_delay; + struct kref kref; + void *client_data; +}; + +struct tty_ldisc_ops { + char *name; + int num; + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *); + ssize_t (*read)(struct tty_struct *, struct file *, u8 *, size_t, void **, long unsigned int); + ssize_t (*write)(struct tty_struct *, struct file *, const u8 *, size_t); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); + void (*hangup)(struct tty_struct *); + void (*receive_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, bool); + size_t (*receive_buf2)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + struct module *owner; +}; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + struct tty_struct *tty; +}; + +struct tty_port_operations { + bool (*carrier_raised)(struct tty_port *); + void (*dtr_rts)(struct tty_port *, bool); + void (*shutdown)(struct tty_port *); + int (*activate)(struct tty_port *, struct tty_struct *); + void (*destruct)(struct tty_port *); +}; + +struct tty_port_client_operations { + size_t (*receive_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_port *); +}; + +struct linux_binprm { + struct vm_area_struct *vma; + long unsigned int vma_pages; + long unsigned int argmin; + struct mm_struct *mm; + long unsigned int p; + unsigned int have_execfd: 1; + unsigned int execfd_creds: 1; + unsigned int secureexec: 1; + unsigned int point_of_no_return: 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + long unsigned int loader; + long unsigned int exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +typedef __u16 __le16; + +typedef __u32 __le32; + +typedef __u64 __le64; + +struct fiemap_extent; + +struct fiemap_extent_info { + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent *fi_extents_start; +}; + +typedef unsigned int tid_t; + +struct transaction_chp_stats_s { + long unsigned int cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +struct journal_s; + +typedef struct journal_s journal_t; + +struct journal_head; + +struct transaction_s; + +typedef struct transaction_s transaction_t; + +struct transaction_s { + journal_t *t_journal; + tid_t t_tid; + enum { + T_RUNNING = 0, + T_LOCKED = 1, + T_SWITCH = 2, + T_FLUSH = 3, + T_COMMIT = 4, + T_COMMIT_DFLUSH = 5, + T_COMMIT_JFLUSH = 6, + T_COMMIT_CALLBACK = 7, + T_FINISHED = 8, + } t_state; + long unsigned int t_log_start; + int t_nr_buffers; + struct journal_head *t_reserved_list; + struct journal_head *t_buffers; + struct journal_head *t_forget; + struct journal_head *t_checkpoint_list; + struct journal_head *t_shadow_list; + struct list_head t_inode_list; + long unsigned int t_max_wait; + long unsigned int t_start; + long unsigned int t_requested; + struct transaction_chp_stats_s t_chp_stats; + atomic_t t_updates; + atomic_t t_outstanding_credits; + atomic_t t_outstanding_revokes; + atomic_t t_handle_count; + transaction_t *t_cpnext; + transaction_t *t_cpprev; + long unsigned int t_expires; + ktime_t t_start_time; + unsigned int t_synchronous_commit: 1; + int t_need_data_flush; + struct list_head t_private_list; +}; + +struct jbd2_buffer_trigger_type; + +struct journal_head { + struct buffer_head *b_bh; + spinlock_t b_state_lock; + int b_jcount; + unsigned int b_jlist; + unsigned int b_modified; + char *b_frozen_data; + char *b_committed_data; + transaction_t *b_transaction; + transaction_t *b_next_transaction; + struct journal_head *b_tnext; + struct journal_head *b_tprev; + transaction_t *b_cp_transaction; + struct journal_head *b_cpnext; + struct journal_head *b_cpprev; + struct jbd2_buffer_trigger_type *b_triggers; + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +struct jbd2_buffer_trigger_type { + void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); + void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); +}; + +struct crypto_alg; + +struct crypto_tfm { + refcount_t refcnt; + u32 crt_flags; + int node; + void (*exit)(struct crypto_tfm *); + struct crypto_alg *__crt_alg; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__crt_ctx[0]; +}; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); + void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); + void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); + int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); +}; + +struct crypto_type; + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + int cra_priority; + refcount_t cra_refcnt; + char cra_name[128]; + char cra_driver_name[128]; + const struct crypto_type *cra_type; + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + int (*cra_init)(struct crypto_tfm *); + void (*cra_exit)(struct crypto_tfm *); + void (*cra_destroy)(struct crypto_alg *); + struct module *cra_module; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct crypto_instance; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); + unsigned int (*extsize)(struct crypto_alg *); + int (*init_tfm)(struct crypto_tfm *); + void (*show)(struct seq_file *, struct crypto_alg *); + int (*report)(struct sk_buff *, struct crypto_alg *); + void (*free)(struct crypto_instance *); + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_shash { + unsigned int descsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct jbd2_journal_handle; + +typedef struct jbd2_journal_handle handle_t; + +struct jbd2_journal_handle { + union { + transaction_t *h_transaction; + journal_t *h_journal; + }; + handle_t *h_rsv_handle; + int h_total_credits; + int h_revoke_credits; + int h_revoke_credits_requested; + int h_ref; + int h_err; + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; + long unsigned int h_start_jiffies; + unsigned int h_requested_credits; + unsigned int saved_alloc_context; +}; + +struct transaction_run_stats_s { + long unsigned int rs_wait; + long unsigned int rs_request_delay; + long unsigned int rs_running; + long unsigned int rs_locked; + long unsigned int rs_flushing; + long unsigned int rs_logging; + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + long unsigned int ts_tid; + long unsigned int ts_requested; + struct transaction_run_stats_s run; +}; + +enum passtype { + PASS_SCAN = 0, + PASS_REVOKE = 1, + PASS_REPLAY = 2, +}; + +struct journal_superblock_s; + +typedef struct journal_superblock_s journal_superblock_t; + +struct jbd2_revoke_table_s; + +struct jbd2_inode; + +struct journal_s { + long unsigned int j_flags; + int j_errno; + struct mutex j_abort_mutex; + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + rwlock_t j_state_lock; + int j_barrier_count; + struct mutex j_barrier; + transaction_t *j_running_transaction; + transaction_t *j_committing_transaction; + transaction_t *j_checkpoint_transactions; + wait_queue_head_t j_wait_transaction_locked; + wait_queue_head_t j_wait_done_commit; + wait_queue_head_t j_wait_commit; + wait_queue_head_t j_wait_updates; + wait_queue_head_t j_wait_reserved; + wait_queue_head_t j_fc_wait; + struct mutex j_checkpoint_mutex; + struct buffer_head *j_chkpt_bhs[64]; + struct shrinker *j_shrinker; + long: 32; + struct percpu_counter j_checkpoint_jh_count; + transaction_t *j_shrink_transaction; + long unsigned int j_head; + long unsigned int j_tail; + long unsigned int j_free; + long unsigned int j_first; + long unsigned int j_last; + long unsigned int j_fc_first; + long unsigned int j_fc_off; + long unsigned int j_fc_last; + struct block_device *j_dev; + int j_blocksize; + long: 32; + long long unsigned int j_blk_offset; + char j_devname[56]; + struct block_device *j_fs_dev; + errseq_t j_fs_dev_wb_err; + unsigned int j_total_len; + atomic_t j_reserved_credits; + spinlock_t j_list_lock; + struct inode *j_inode; + tid_t j_tail_sequence; + tid_t j_transaction_sequence; + tid_t j_commit_sequence; + tid_t j_commit_request; + __u8 j_uuid[16]; + struct task_struct *j_task; + int j_max_transaction_buffers; + int j_revoke_records_per_block; + int j_transaction_overhead_buffers; + long unsigned int j_commit_interval; + struct timer_list j_commit_timer; + spinlock_t j_revoke_lock; + struct jbd2_revoke_table_s *j_revoke; + struct jbd2_revoke_table_s *j_revoke_table[2]; + struct buffer_head **j_wbuf; + struct buffer_head **j_fc_wbuf; + int j_wbufsize; + int j_fc_wbufsize; + pid_t j_last_sync_writer; + long: 32; + u64 j_average_commit_time; + u32 j_min_batch_time; + u32 j_max_batch_time; + void (*j_commit_callback)(journal_t *, transaction_t *); + int (*j_submit_inode_data_buffers)(struct jbd2_inode *); + int (*j_finish_inode_data_buffers)(struct jbd2_inode *); + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + unsigned int j_failed_commit; + void *j_private; + struct crypto_shash *j_chksum_driver; + __u32 j_csum_seed; + void (*j_fc_cleanup_callback)(struct journal_s *, int, tid_t); + int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); + int (*j_bmap)(struct journal_s *, sector_t *); + long: 32; +}; + +struct journal_header_s { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +}; + +typedef struct journal_header_s journal_header_t; + +struct journal_superblock_s { + journal_header_t s_header; + __be32 s_blocksize; + __be32 s_maxlen; + __be32 s_first; + __be32 s_sequence; + __be32 s_start; + __be32 s_errno; + __be32 s_feature_compat; + __be32 s_feature_incompat; + __be32 s_feature_ro_compat; + __u8 s_uuid[16]; + __be32 s_nr_users; + __be32 s_dynsuper; + __be32 s_max_transaction; + __be32 s_max_trans_data; + __u8 s_checksum_type; + __u8 s_padding2[3]; + __be32 s_num_fc_blks; + __be32 s_head; + __u32 s_padding[40]; + __be32 s_checksum; + __u8 s_users[768]; +}; + +struct jbd2_inode { + transaction_t *i_transaction; + transaction_t *i_next_transaction; + struct list_head i_list; + struct inode *i_vfs_inode; + long unsigned int i_flags; + loff_t i_dirty_start; + loff_t i_dirty_end; +}; + +struct bgl_lock { + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct blockgroup_lock { + struct bgl_lock locks[128]; +}; + +struct fiemap_extent { + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + __u64 fe_reserved64[2]; + __u32 fe_flags; + __u32 fe_reserved[3]; +}; + +struct fscrypt_dummy_policy {}; + +typedef int ext4_grpblk_t; + +typedef long long unsigned int ext4_fsblk_t; + +typedef __u32 ext4_lblk_t; + +typedef unsigned int ext4_group_t; + +struct ext4_system_blocks { + struct rb_root root; + struct callback_head rcu; +}; + +struct flex_groups { + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; +}; + +enum { + EXT4_INODE_SECRM = 0, + EXT4_INODE_UNRM = 1, + EXT4_INODE_COMPR = 2, + EXT4_INODE_SYNC = 3, + EXT4_INODE_IMMUTABLE = 4, + EXT4_INODE_APPEND = 5, + EXT4_INODE_NODUMP = 6, + EXT4_INODE_NOATIME = 7, + EXT4_INODE_DIRTY = 8, + EXT4_INODE_COMPRBLK = 9, + EXT4_INODE_NOCOMPR = 10, + EXT4_INODE_ENCRYPT = 11, + EXT4_INODE_INDEX = 12, + EXT4_INODE_IMAGIC = 13, + EXT4_INODE_JOURNAL_DATA = 14, + EXT4_INODE_NOTAIL = 15, + EXT4_INODE_DIRSYNC = 16, + EXT4_INODE_TOPDIR = 17, + EXT4_INODE_HUGE_FILE = 18, + EXT4_INODE_EXTENTS = 19, + EXT4_INODE_VERITY = 20, + EXT4_INODE_EA_INODE = 21, + EXT4_INODE_DAX = 25, + EXT4_INODE_INLINE_DATA = 28, + EXT4_INODE_PROJINHERIT = 29, + EXT4_INODE_CASEFOLD = 30, + EXT4_INODE_RESERVED = 31, +}; + +struct extent_status { + struct rb_node rb_node; + ext4_lblk_t es_lblk; + ext4_lblk_t es_len; + long: 32; + ext4_fsblk_t es_pblk; +}; + +struct ext4_es_tree { + struct rb_root root; + struct extent_status *cache_es; +}; + +struct ext4_es_stats { + long unsigned int es_stats_shrunk; + long: 32; + struct percpu_counter es_stats_cache_hits; + struct percpu_counter es_stats_cache_misses; + u64 es_stats_scan_time; + u64 es_stats_max_scan_time; + struct percpu_counter es_stats_all_cnt; + struct percpu_counter es_stats_shk_cnt; +}; + +struct ext4_pending_tree { + struct rb_root root; +}; + +struct ext4_fc_stats { + unsigned int fc_ineligible_reason_count[10]; + long unsigned int fc_num_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_failed_commits; + long unsigned int fc_skipped_commits; + long unsigned int fc_numblks; + long: 32; + u64 s_fc_avg_commit_time; +}; + +struct ext4_fc_alloc_region { + ext4_lblk_t lblk; + long: 32; + ext4_fsblk_t pblk; + int ino; + int len; +}; + +struct ext4_fc_replay_state { + int fc_replay_num_tags; + int fc_replay_expected_off; + int fc_current_pass; + int fc_cur_tag; + int fc_crc; + struct ext4_fc_alloc_region *fc_regions; + int fc_regions_size; + int fc_regions_used; + int fc_regions_valid; + int *fc_modified_inodes; + int fc_modified_inodes_used; + int fc_modified_inodes_size; +}; + +struct ext4_inode_info { + __le32 i_data[15]; + __u32 i_dtime; + ext4_fsblk_t i_file_acl; + ext4_group_t i_block_group; + ext4_lblk_t i_dir_start_lookup; + long unsigned int i_state_flags; + long unsigned int i_flags; + struct rw_semaphore xattr_sem; + union { + struct list_head i_orphan; + unsigned int i_orphan_idx; + }; + struct list_head i_fc_dilist; + struct list_head i_fc_list; + ext4_lblk_t i_fc_lblk_start; + ext4_lblk_t i_fc_lblk_len; + atomic_t i_fc_updates; + atomic_t i_unwritten; + wait_queue_head_t i_fc_wait; + struct mutex i_fc_lock; + loff_t i_disksize; + struct rw_semaphore i_data_sem; + long: 32; + struct inode vfs_inode; + struct jbd2_inode *jinode; + spinlock_t i_raw_lock; + struct timespec64 i_crtime; + atomic_t i_prealloc_active; + unsigned int i_reserved_data_blocks; + struct rb_root i_prealloc_node; + rwlock_t i_prealloc_lock; + struct ext4_es_tree i_es_tree; + rwlock_t i_es_lock; + struct list_head i_es_list; + unsigned int i_es_all_nr; + unsigned int i_es_shk_nr; + ext4_lblk_t i_es_shrink_lblk; + ext4_group_t i_last_alloc_group; + struct ext4_pending_tree i_pending_tree; + __u16 i_extra_isize; + u16 i_inline_off; + u16 i_inline_size; + spinlock_t i_completed_io_lock; + struct list_head i_rsv_conversion_list; + struct work_struct i_rsv_conversion_work; + spinlock_t i_block_reservation_lock; + tid_t i_sync_tid; + tid_t i_datasync_tid; + __u32 i_csum_seed; + kprojid_t i_projid; +}; + +struct ext4_super_block { + __le32 s_inodes_count; + __le32 s_blocks_count_lo; + __le32 s_r_blocks_count_lo; + __le32 s_free_blocks_count_lo; + __le32 s_free_inodes_count; + __le32 s_first_data_block; + __le32 s_log_block_size; + __le32 s_log_cluster_size; + __le32 s_blocks_per_group; + __le32 s_clusters_per_group; + __le32 s_inodes_per_group; + __le32 s_mtime; + __le32 s_wtime; + __le16 s_mnt_count; + __le16 s_max_mnt_count; + __le16 s_magic; + __le16 s_state; + __le16 s_errors; + __le16 s_minor_rev_level; + __le32 s_lastcheck; + __le32 s_checkinterval; + __le32 s_creator_os; + __le32 s_rev_level; + __le16 s_def_resuid; + __le16 s_def_resgid; + __le32 s_first_ino; + __le16 s_inode_size; + __le16 s_block_group_nr; + __le32 s_feature_compat; + __le32 s_feature_incompat; + __le32 s_feature_ro_compat; + __u8 s_uuid[16]; + char s_volume_name[16]; + char s_last_mounted[64]; + __le32 s_algorithm_usage_bitmap; + __u8 s_prealloc_blocks; + __u8 s_prealloc_dir_blocks; + __le16 s_reserved_gdt_blocks; + __u8 s_journal_uuid[16]; + __le32 s_journal_inum; + __le32 s_journal_dev; + __le32 s_last_orphan; + __le32 s_hash_seed[4]; + __u8 s_def_hash_version; + __u8 s_jnl_backup_type; + __le16 s_desc_size; + __le32 s_default_mount_opts; + __le32 s_first_meta_bg; + __le32 s_mkfs_time; + __le32 s_jnl_blocks[17]; + __le32 s_blocks_count_hi; + __le32 s_r_blocks_count_hi; + __le32 s_free_blocks_count_hi; + __le16 s_min_extra_isize; + __le16 s_want_extra_isize; + __le32 s_flags; + __le16 s_raid_stride; + __le16 s_mmp_update_interval; + __le64 s_mmp_block; + __le32 s_raid_stripe_width; + __u8 s_log_groups_per_flex; + __u8 s_checksum_type; + __u8 s_encryption_level; + __u8 s_reserved_pad; + __le64 s_kbytes_written; + __le32 s_snapshot_inum; + __le32 s_snapshot_id; + __le64 s_snapshot_r_blocks_count; + __le32 s_snapshot_list; + __le32 s_error_count; + __le32 s_first_error_time; + __le32 s_first_error_ino; + __le64 s_first_error_block; + __u8 s_first_error_func[32]; + __le32 s_first_error_line; + __le32 s_last_error_time; + __le32 s_last_error_ino; + __le32 s_last_error_line; + __le64 s_last_error_block; + __u8 s_last_error_func[32]; + __u8 s_mount_opts[64]; + __le32 s_usr_quota_inum; + __le32 s_grp_quota_inum; + __le32 s_overhead_clusters; + __le32 s_backup_bgs[2]; + __u8 s_encrypt_algos[4]; + __u8 s_encrypt_pw_salt[16]; + __le32 s_lpf_ino; + __le32 s_prj_quota_inum; + __le32 s_checksum_seed; + __u8 s_wtime_hi; + __u8 s_mtime_hi; + __u8 s_mkfs_time_hi; + __u8 s_lastcheck_hi; + __u8 s_first_error_time_hi; + __u8 s_last_error_time_hi; + __u8 s_first_error_errcode; + __u8 s_last_error_errcode; + __le16 s_encoding; + __le16 s_encoding_flags; + __le32 s_orphan_file_inum; + __le32 s_reserved[94]; + __le32 s_checksum; +}; + +enum ext4_journal_trigger_type { + EXT4_JTR_ORPHAN_FILE = 0, + EXT4_JTR_NONE = 1, +}; + +struct ext4_journal_trigger { + struct jbd2_buffer_trigger_type tr_triggers; + struct super_block *sb; +}; + +struct ext4_orphan_block { + atomic_t ob_free_entries; + struct buffer_head *ob_bh; +}; + +struct ext4_orphan_info { + int of_blocks; + __u32 of_csum_seed; + struct ext4_orphan_block *of_binfo; +}; + +struct dax_device; + +struct ext4_group_info; + +struct ext4_locality_group; + +struct ext4_li_request; + +struct mb_cache; + +struct ext4_sb_info { + long unsigned int s_desc_size; + long unsigned int s_inodes_per_block; + long unsigned int s_blocks_per_group; + long unsigned int s_clusters_per_group; + long unsigned int s_inodes_per_group; + long unsigned int s_itb_per_group; + long unsigned int s_gdb_count; + long unsigned int s_desc_per_block; + ext4_group_t s_groups_count; + ext4_group_t s_blockfile_groups; + long unsigned int s_overhead; + unsigned int s_cluster_ratio; + unsigned int s_cluster_bits; + long: 32; + loff_t s_bitmap_maxbytes; + struct buffer_head *s_sbh; + struct ext4_super_block *s_es; + struct buffer_head **s_group_desc; + unsigned int s_mount_opt; + unsigned int s_mount_opt2; + long unsigned int s_mount_flags; + unsigned int s_def_mount_opt; + unsigned int s_def_mount_opt2; + ext4_fsblk_t s_sb_block; + atomic64_t s_resv_clusters; + kuid_t s_resuid; + kgid_t s_resgid; + short unsigned int s_mount_state; + short unsigned int s_pad; + int s_addr_per_block_bits; + int s_desc_per_block_bits; + int s_inode_size; + int s_first_ino; + unsigned int s_inode_readahead_blks; + unsigned int s_inode_goal; + u32 s_hash_seed[4]; + int s_def_hash_version; + int s_hash_unsigned; + long: 32; + struct percpu_counter s_freeclusters_counter; + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; + struct percpu_counter s_dirtyclusters_counter; + struct percpu_counter s_sra_exceeded_retry_limit; + struct blockgroup_lock *s_blockgroup_lock; + struct proc_dir_entry *s_proc; + struct kobject s_kobj; + struct completion s_kobj_unregister; + struct super_block *s_sb; + struct buffer_head *s_mmp_bh; + struct journal_s *s_journal; + long unsigned int s_ext4_flags; + struct mutex s_orphan_lock; + struct list_head s_orphan; + struct ext4_orphan_info s_orphan_info; + long unsigned int s_commit_interval; + u32 s_max_batch_time; + u32 s_min_batch_time; + struct file *s_journal_bdev_file; + unsigned int s_want_extra_isize; + struct ext4_system_blocks *s_system_blks; + struct ext4_group_info ***s_group_info; + struct inode *s_buddy_cache; + spinlock_t s_md_lock; + short unsigned int *s_mb_offsets; + unsigned int *s_mb_maxs; + unsigned int s_group_info_size; + unsigned int s_mb_free_pending; + struct list_head s_freed_data_list[2]; + struct list_head s_discard_list; + struct work_struct s_discard_work; + atomic_t s_retry_alloc_pending; + struct list_head *s_mb_avg_fragment_size; + rwlock_t *s_mb_avg_fragment_size_locks; + struct list_head *s_mb_largest_free_orders; + rwlock_t *s_mb_largest_free_orders_locks; + long unsigned int s_stripe; + unsigned int s_mb_max_linear_groups; + unsigned int s_mb_stream_request; + unsigned int s_mb_max_to_scan; + unsigned int s_mb_min_to_scan; + unsigned int s_mb_stats; + unsigned int s_mb_order2_reqs; + unsigned int s_mb_group_prealloc; + unsigned int s_max_dir_size_kb; + long unsigned int s_mb_last_group; + long unsigned int s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; + unsigned int s_mb_best_avail_max_trim_order; + atomic_t s_bal_reqs; + atomic_t s_bal_success; + atomic_t s_bal_allocated; + atomic_t s_bal_ex_scanned; + atomic_t s_bal_cX_ex_scanned[5]; + atomic_t s_bal_groups_scanned; + atomic_t s_bal_goals; + atomic_t s_bal_len_goals; + atomic_t s_bal_breaks; + atomic_t s_bal_2orders; + atomic_t s_bal_p2_aligned_bad_suggestions; + atomic_t s_bal_goal_fast_bad_suggestions; + atomic_t s_bal_best_avail_bad_suggestions; + long: 32; + atomic64_t s_bal_cX_groups_considered[5]; + atomic64_t s_bal_cX_hits[5]; + atomic64_t s_bal_cX_failed[5]; + atomic_t s_mb_buddies_generated; + long: 32; + atomic64_t s_mb_generation_time; + atomic_t s_mb_lost_chunks; + atomic_t s_mb_preallocated; + atomic_t s_mb_discarded; + atomic_t s_lock_busy; + struct ext4_locality_group *s_locality_groups; + long unsigned int s_sectors_written_start; + u64 s_kbytes_written; + unsigned int s_extent_max_zeroout_kb; + unsigned int s_log_groups_per_flex; + struct flex_groups **s_flex_groups; + ext4_group_t s_flex_groups_allocated; + struct workqueue_struct *rsv_conversion_wq; + struct timer_list s_err_report; + struct ext4_li_request *s_li_request; + unsigned int s_li_wait_mult; + struct task_struct *s_mmp_tsk; + long unsigned int s_last_trim_minblks; + struct crypto_shash *s_chksum_driver; + __u32 s_csum_seed; + struct shrinker *s_es_shrinker; + struct list_head s_es_list; + long int s_es_nr_inode; + struct ext4_es_stats s_es_stats; + struct mb_cache *s_ea_block_cache; + struct mb_cache *s_ea_inode_cache; + spinlock_t s_es_lock; + struct ext4_journal_trigger s_journal_triggers[1]; + struct ratelimit_state s_err_ratelimit_state; + struct ratelimit_state s_warning_ratelimit_state; + struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; + struct fscrypt_dummy_policy s_dummy_enc_policy; + struct percpu_rw_semaphore s_writepages_rwsem; + struct dax_device *s_daxdev; + long: 32; + u64 s_dax_part_off; + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; + spinlock_t s_error_lock; + int s_add_error_count; + int s_first_error_code; + __u32 s_first_error_line; + __u32 s_first_error_ino; + long: 32; + __u64 s_first_error_block; + const char *s_first_error_func; + long: 32; + time64_t s_first_error_time; + int s_last_error_code; + __u32 s_last_error_line; + __u32 s_last_error_ino; + long: 32; + __u64 s_last_error_block; + const char *s_last_error_func; + long: 32; + time64_t s_last_error_time; + struct work_struct s_sb_upd_work; + unsigned int s_awu_min; + unsigned int s_awu_max; + atomic_t s_fc_subtid; + struct list_head s_fc_q[2]; + struct list_head s_fc_dentry_q[2]; + unsigned int s_fc_bytes; + spinlock_t s_fc_lock; + struct buffer_head *s_fc_bh; + struct ext4_fc_stats s_fc_stats; + tid_t s_fc_ineligible_tid; + struct ext4_fc_replay_state s_fc_replay_state; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ext4_group_info { + long unsigned int bb_state; + struct rb_root bb_free_root; + ext4_grpblk_t bb_first_free; + ext4_grpblk_t bb_free; + ext4_grpblk_t bb_fragments; + int bb_avg_fragment_size_order; + ext4_grpblk_t bb_largest_free_order; + ext4_group_t bb_group; + struct list_head bb_prealloc_list; + struct rw_semaphore alloc_sem; + struct list_head bb_avg_fragment_size_node; + struct list_head bb_largest_free_order_node; + ext4_grpblk_t bb_counters[0]; +}; + +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP = 0, + EXT4_LI_MODE_ITABLE = 1, +}; + +struct ext4_li_request { + struct super_block *lr_super; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; + ext4_group_t lr_next_group; + struct list_head lr_request; + long unsigned int lr_next_sched; + long unsigned int lr_timeout; +}; + +struct xattr; + +typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); + +struct xattr { + const char *name; + void *value; + size_t value_len; +}; + +struct ramfs_mount_opts { + umode_t mode; +}; + +struct ramfs_fs_info { + struct ramfs_mount_opts mount_opts; +}; + +enum ramfs_param { + Opt_mode = 0, +}; + +enum dentry_d_lock_class { + DENTRY_D_LOCK_NORMAL = 0, + DENTRY_D_LOCK_NESTED = 1, +}; + +typedef unsigned int autofs_wqt_t; + +struct autofs_packet_hdr { + int proto_version; + int type; +}; + +struct autofs_packet_expire { + struct autofs_packet_hdr hdr; + int len; + char name[256]; +}; + +enum autofs_notify { + NFY_NONE = 0, + NFY_MOUNT = 1, + NFY_EXPIRE = 2, +}; + +struct autofs_sb_info; + +struct autofs_info { + struct dentry *dentry; + int flags; + struct completion expire_complete; + struct list_head active; + struct list_head expiring; + struct autofs_sb_info *sbi; + long unsigned int exp_timeout; + long unsigned int last_used; + int count; + kuid_t uid; + kgid_t gid; + struct callback_head rcu; +}; + +struct autofs_wait_queue; + +struct autofs_sb_info { + u32 magic; + int pipefd; + struct file *pipe; + struct pid *oz_pgrp; + int version; + int sub_version; + int min_proto; + int max_proto; + unsigned int flags; + long unsigned int exp_timeout; + unsigned int type; + struct super_block *sb; + struct mutex wq_mutex; + struct mutex pipe_mutex; + spinlock_t fs_lock; + struct autofs_wait_queue *queues; + spinlock_t lookup_lock; + struct list_head active_list; + struct list_head expiring_list; + struct callback_head rcu; +}; + +struct autofs_wait_queue { + wait_queue_head_t queue; + struct autofs_wait_queue *next; + autofs_wqt_t wait_queue_token; + long: 32; + struct qstr name; + u32 offset; + u32 dev; + u64 ino; + kuid_t uid; + kgid_t gid; + pid_t pid; + pid_t tgid; + int status; + unsigned int wait_ctr; +}; + +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +struct btrfs_qgroup_limit { + __u64 flags; + __u64 max_rfer; + __u64 max_excl; + __u64 rsv_rfer; + __u64 rsv_excl; +}; + +struct btrfs_qgroup_inherit { + __u64 flags; + __u64 num_qgroups; + __u64 num_ref_copies; + __u64 num_excl_copies; + struct btrfs_qgroup_limit lim; + __u64 qgroups[0]; +}; + +struct btrfs_scrub_progress { + __u64 data_extents_scrubbed; + __u64 tree_extents_scrubbed; + __u64 data_bytes_scrubbed; + __u64 tree_bytes_scrubbed; + __u64 read_errors; + __u64 csum_errors; + __u64 verify_errors; + __u64 no_csum; + __u64 csum_discards; + __u64 super_errors; + __u64 malloc_errors; + __u64 uncorrectable_errors; + __u64 corrected_errors; + __u64 last_physical; + __u64 unverified_errors; +}; + +struct btrfs_ioctl_defrag_range_args { + __u64 start; + __u64 len; + __u64 flags; + __u32 extent_thresh; + __u32 compress_type; + __u32 unused[4]; +}; + +struct btrfs_disk_key { + __le64 objectid; + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct btrfs_key { + __u64 objectid; + __u8 type; + __u64 offset; +} __attribute__((packed)); + +struct btrfs_header { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __u8 chunk_tree_uuid[16]; + __le64 generation; + __le64 owner; + __le32 nritems; + __u8 level; +} __attribute__((packed)); + +struct btrfs_root_backup { + __le64 tree_root; + __le64 tree_root_gen; + __le64 chunk_root; + __le64 chunk_root_gen; + __le64 extent_root; + __le64 extent_root_gen; + __le64 fs_root; + __le64 fs_root_gen; + __le64 dev_root; + __le64 dev_root_gen; + __le64 csum_root; + __le64 csum_root_gen; + __le64 total_bytes; + __le64 bytes_used; + __le64 num_devices; + __le64 unused_64[4]; + __u8 tree_root_level; + __u8 chunk_root_level; + __u8 extent_root_level; + __u8 fs_root_level; + __u8 dev_root_level; + __u8 csum_root_level; + __u8 unused_8[10]; +}; + +struct btrfs_item { + struct btrfs_disk_key key; + __le32 offset; + __le32 size; +} __attribute__((packed)); + +struct btrfs_key_ptr { + struct btrfs_disk_key key; + __le64 blockptr; + __le64 generation; +} __attribute__((packed)); + +struct btrfs_dev_item { + __le64 devid; + __le64 total_bytes; + __le64 bytes_used; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le64 type; + __le64 generation; + __le64 start_offset; + __le32 dev_group; + __u8 seek_speed; + __u8 bandwidth; + __u8 uuid[16]; + __u8 fsid[16]; +} __attribute__((packed)); + +struct btrfs_super_block { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __le64 magic; + __le64 generation; + __le64 root; + __le64 chunk_root; + __le64 log_root; + __le64 __unused_log_root_transid; + __le64 total_bytes; + __le64 bytes_used; + __le64 root_dir_objectid; + __le64 num_devices; + __le32 sectorsize; + __le32 nodesize; + __le32 __unused_leafsize; + __le32 stripesize; + __le32 sys_chunk_array_size; + __le64 chunk_root_generation; + __le64 compat_flags; + __le64 compat_ro_flags; + __le64 incompat_flags; + __le16 csum_type; + __u8 root_level; + __u8 chunk_root_level; + __u8 log_root_level; + struct btrfs_dev_item dev_item; + char label[256]; + __le64 cache_generation; + __le64 uuid_tree_generation; + __u8 metadata_uuid[16]; + __u64 nr_global_roots; + __le64 reserved[27]; + __u8 sys_chunk_array[2048]; + struct btrfs_root_backup super_roots[4]; + __u8 padding[565]; +} __attribute__((packed)); + +struct btrfs_timespec { + __le64 sec; + __le32 nsec; +}; + +struct btrfs_inode_item { + __le64 generation; + __le64 transid; + __le64 size; + __le64 nbytes; + __le64 block_group; + __le32 nlink; + __le32 uid; + __le32 gid; + __le32 mode; + __le64 rdev; + __le64 flags; + __le64 sequence; + __le64 reserved[4]; + struct btrfs_timespec atime; + struct btrfs_timespec ctime; + struct btrfs_timespec mtime; + struct btrfs_timespec otime; +}; + +struct btrfs_root_item { + struct btrfs_inode_item inode; + __le64 generation; + __le64 root_dirid; + __le64 bytenr; + __le64 byte_limit; + __le64 bytes_used; + __le64 last_snapshot; + __le64 flags; + __le32 refs; + struct btrfs_disk_key drop_progress; + __u8 drop_level; + __u8 level; + __le64 generation_v2; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __le64 ctransid; + __le64 otransid; + __le64 stransid; + __le64 rtransid; + struct btrfs_timespec ctime; + struct btrfs_timespec otime; + struct btrfs_timespec stime; + struct btrfs_timespec rtime; + __le64 reserved[8]; +} __attribute__((packed)); + +struct btrfs_file_extent_item { + __le64 generation; + __le64 ram_bytes; + __u8 compression; + __u8 encryption; + __le16 other_encoding; + __u8 type; + __le64 disk_bytenr; + __le64 disk_num_bytes; + __le64 offset; + __le64 num_bytes; +} __attribute__((packed)); + +struct btrfs_work; + +typedef void (*btrfs_func_t)(struct btrfs_work *); + +typedef void (*btrfs_ordered_func_t)(struct btrfs_work *, bool); + +struct btrfs_workqueue; + +struct btrfs_work { + btrfs_func_t func; + btrfs_ordered_func_t ordered_func; + struct work_struct normal_work; + struct list_head ordered_list; + struct btrfs_workqueue *wq; + long unsigned int flags; +}; + +struct btrfs_inode; + +struct btrfs_ordered_extent { + u64 file_offset; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 bytes_left; + u64 truncated_len; + long unsigned int flags; + int compress_type; + int qgroup_rsv; + refcount_t refs; + struct btrfs_inode *inode; + struct list_head list; + struct list_head log_list; + wait_queue_head_t wait; + struct rb_node rb_node; + struct list_head root_extent_list; + struct btrfs_work work; + struct completion completion; + struct btrfs_work flush_work; + struct list_head work_list; + struct list_head bioc_list; + long: 32; +}; + +struct extent_map_tree { + struct rb_root root; + struct list_head modified_extents; + rwlock_t lock; +}; + +struct btrfs_fs_info; + +struct extent_io_tree { + struct rb_root state; + union { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + }; + u8 owner; + spinlock_t lock; +}; + +enum btrfs_rsv_type { + BTRFS_BLOCK_RSV_GLOBAL = 0, + BTRFS_BLOCK_RSV_DELALLOC = 1, + BTRFS_BLOCK_RSV_TRANS = 2, + BTRFS_BLOCK_RSV_CHUNK = 3, + BTRFS_BLOCK_RSV_DELOPS = 4, + BTRFS_BLOCK_RSV_DELREFS = 5, + BTRFS_BLOCK_RSV_EMPTY = 6, + BTRFS_BLOCK_RSV_TEMP = 7, +}; + +struct btrfs_space_info; + +struct btrfs_block_rsv { + u64 size; + u64 reserved; + struct btrfs_space_info *space_info; + spinlock_t lock; + bool full; + bool failfast; + enum btrfs_rsv_type type: 8; + long: 32; + u64 qgroup_rsv_size; + u64 qgroup_rsv_reserved; +}; + +struct btrfs_root; + +struct btrfs_delayed_node; + +struct btrfs_inode { + struct btrfs_root *root; + long: 32; + u64 objectid; + u8 prop_compress; + u8 defrag_compress; + spinlock_t lock; + struct extent_map_tree extent_tree; + struct extent_io_tree io_tree; + struct extent_io_tree *file_extent_tree; + struct mutex log_mutex; + unsigned int outstanding_extents; + spinlock_t ordered_tree_lock; + struct rb_root ordered_tree; + struct rb_node *ordered_tree_last; + struct list_head delalloc_inodes; + long unsigned int runtime_flags; + long: 32; + u64 generation; + u64 last_trans; + u64 logged_trans; + int last_sub_trans; + int last_log_commit; + union { + u64 delalloc_bytes; + u64 first_dir_index_to_log; + }; + union { + u64 new_delalloc_bytes; + u64 last_dir_index_offset; + }; + union { + u64 defrag_bytes; + u64 reloc_block_group_start; + }; + u64 disk_i_size; + union { + u64 index_cnt; + u64 csum_bytes; + }; + u64 dir_index; + u64 last_unlink_trans; + union { + u64 last_reflink_trans; + u64 ref_root_id; + }; + u32 flags; + u32 ro_flags; + struct btrfs_block_rsv block_rsv; + struct btrfs_delayed_node *delayed_node; + long: 32; + u64 i_otime_sec; + u32 i_otime_nsec; + struct list_head delayed_iput; + struct rw_semaphore i_mmap_lock; + struct inode vfs_inode; +}; + +struct btrfs_block_group; + +struct btrfs_free_cluster { + spinlock_t lock; + spinlock_t refill_lock; + struct rb_root root; + long: 32; + u64 max_size; + u64 window_start; + bool fragmented; + struct btrfs_block_group *block_group; + struct list_head block_group_list; +}; + +struct btrfs_discard_ctl { + struct workqueue_struct *discard_workers; + struct delayed_work work; + spinlock_t lock; + struct btrfs_block_group *block_group; + struct list_head discard_list[3]; + u64 prev_discard; + u64 prev_discard_time; + atomic_t discardable_extents; + long: 32; + atomic64_t discardable_bytes; + u64 max_discard_size; + u64 delay_ms; + u32 iops_limit; + u32 kbps_limit; + u64 discard_extent_bytes; + u64 discard_bitmap_bytes; + atomic64_t discard_bytes_saved; +}; + +struct btrfs_device; + +struct btrfs_dev_replace { + u64 replace_state; + time64_t time_started; + time64_t time_stopped; + atomic64_t num_write_errors; + atomic64_t num_uncorrectable_read_errors; + u64 cursor_left; + u64 committed_cursor_left; + u64 cursor_left_last_write_of_item; + u64 cursor_right; + u64 cont_reading_from_srcdev_mode; + int is_valid; + int item_needs_writeback; + struct btrfs_device *srcdev; + struct btrfs_device *tgtdev; + struct mutex lock_finishing_cancel_unmount; + struct rw_semaphore rwsem; + long: 32; + struct btrfs_scrub_progress scrub_progress; + struct percpu_counter bio_counter; + wait_queue_head_t replace_wait; + struct task_struct *replace_task; +}; + +enum btrfs_exclusive_operation { + BTRFS_EXCLOP_NONE = 0, + BTRFS_EXCLOP_BALANCE_PAUSED = 1, + BTRFS_EXCLOP_BALANCE = 2, + BTRFS_EXCLOP_DEV_ADD = 3, + BTRFS_EXCLOP_DEV_REMOVE = 4, + BTRFS_EXCLOP_DEV_REPLACE = 5, + BTRFS_EXCLOP_RESIZE = 6, + BTRFS_EXCLOP_SWAP_ACTIVATE = 7, +}; + +struct btrfs_commit_stats { + u64 commit_count; + u64 max_commit_dur; + u64 last_commit_dur; + u64 total_commit_dur; +}; + +struct btrfs_transaction; + +struct btrfs_stripe_hash_table; + +struct btrfs_fs_devices; + +struct reloc_control; + +struct btrfs_balance_control; + +struct ulist; + +struct btrfs_delayed_root; + +struct btrfs_fs_info { + u8 chunk_tree_uuid[16]; + long unsigned int flags; + struct btrfs_root *tree_root; + struct btrfs_root *chunk_root; + struct btrfs_root *dev_root; + struct btrfs_root *fs_root; + struct btrfs_root *quota_root; + struct btrfs_root *uuid_root; + struct btrfs_root *data_reloc_root; + struct btrfs_root *block_group_root; + struct btrfs_root *stripe_root; + struct btrfs_root *log_root_tree; + rwlock_t global_root_lock; + struct rb_root global_root_tree; + spinlock_t fs_roots_radix_lock; + struct xarray fs_roots_radix; + rwlock_t block_group_cache_lock; + struct rb_root_cached block_group_cache_tree; + atomic64_t free_chunk_space; + struct extent_io_tree excluded_extents; + struct rb_root_cached mapping_tree; + rwlock_t mapping_tree_lock; + struct btrfs_block_rsv global_block_rsv; + struct btrfs_block_rsv trans_block_rsv; + struct btrfs_block_rsv chunk_block_rsv; + struct btrfs_block_rsv delayed_block_rsv; + struct btrfs_block_rsv delayed_refs_rsv; + struct btrfs_block_rsv empty_block_rsv; + u64 generation; + u64 last_trans_committed; + u64 last_reloc_trans; + u64 last_trans_log_full_commit; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + u32 commit_interval; + long: 32; + u64 max_inline; + struct btrfs_transaction *running_transaction; + wait_queue_head_t transaction_throttle; + wait_queue_head_t transaction_wait; + wait_queue_head_t transaction_blocked_wait; + wait_queue_head_t async_submit_wait; + spinlock_t super_lock; + struct btrfs_super_block *super_copy; + struct btrfs_super_block *super_for_commit; + struct super_block *sb; + struct inode *btree_inode; + struct mutex tree_log_mutex; + struct mutex transaction_kthread_mutex; + struct mutex cleaner_mutex; + struct mutex chunk_mutex; + struct mutex ro_block_group_mutex; + struct btrfs_stripe_hash_table *stripe_hash_table; + struct mutex ordered_operations_mutex; + struct rw_semaphore commit_root_sem; + struct rw_semaphore cleanup_work_sem; + struct rw_semaphore subvol_sem; + spinlock_t trans_lock; + struct mutex reloc_mutex; + struct list_head trans_list; + struct list_head dead_roots; + struct list_head caching_block_groups; + spinlock_t delayed_iput_lock; + struct list_head delayed_iputs; + atomic_t nr_delayed_iputs; + wait_queue_head_t delayed_iputs_wait; + atomic64_t tree_mod_seq; + rwlock_t tree_mod_log_lock; + struct rb_root tree_mod_log; + struct list_head tree_mod_seq_list; + atomic_t async_delalloc_pages; + spinlock_t ordered_root_lock; + struct list_head ordered_roots; + struct mutex delalloc_root_mutex; + spinlock_t delalloc_root_lock; + struct list_head delalloc_roots; + struct btrfs_workqueue *workers; + struct btrfs_workqueue *delalloc_workers; + struct btrfs_workqueue *flush_workers; + struct workqueue_struct *endio_workers; + struct workqueue_struct *endio_meta_workers; + struct workqueue_struct *rmw_workers; + struct workqueue_struct *compressed_write_workers; + struct btrfs_workqueue *endio_write_workers; + struct btrfs_workqueue *endio_freespace_worker; + struct btrfs_workqueue *caching_workers; + struct btrfs_workqueue *fixup_workers; + struct btrfs_workqueue *delayed_workers; + struct task_struct *transaction_kthread; + struct task_struct *cleaner_kthread; + u32 thread_pool_size; + struct kobject *space_info_kobj; + struct kobject *qgroups_kobj; + struct kobject *discard_kobj; + struct percpu_counter dirty_metadata_bytes; + struct percpu_counter delalloc_bytes; + struct percpu_counter ordered_bytes; + s32 dirty_metadata_batch; + s32 delalloc_batch; + struct percpu_counter evictable_extent_maps; + u64 em_shrinker_last_root; + u64 em_shrinker_last_ino; + atomic64_t em_shrinker_nr_to_scan; + struct work_struct em_shrinker_work; + struct list_head dirty_cowonly_roots; + struct btrfs_fs_devices *fs_devices; + struct list_head space_info; + struct btrfs_space_info *data_sinfo; + struct reloc_control *reloc_ctl; + long: 32; + struct btrfs_free_cluster data_alloc_cluster; + struct btrfs_free_cluster meta_alloc_cluster; + spinlock_t defrag_inodes_lock; + struct rb_root defrag_inodes; + atomic_t defrag_running; + seqlock_t profiles_lock; + long: 32; + u64 avail_data_alloc_bits; + u64 avail_metadata_alloc_bits; + u64 avail_system_alloc_bits; + spinlock_t balance_lock; + struct mutex balance_mutex; + atomic_t balance_pause_req; + atomic_t balance_cancel_req; + struct btrfs_balance_control *balance_ctl; + wait_queue_head_t balance_wait_q; + atomic_t reloc_cancel_req; + u32 data_chunk_allocations; + u32 metadata_ratio; + void *bdev_holder; + struct mutex scrub_lock; + atomic_t scrubs_running; + atomic_t scrub_pause_req; + atomic_t scrubs_paused; + atomic_t scrub_cancel_req; + wait_queue_head_t scrub_pause_wait; + refcount_t scrub_workers_refcnt; + u32 sectors_per_page; + struct workqueue_struct *scrub_workers; + long: 32; + struct btrfs_discard_ctl discard_ctl; + u64 qgroup_flags; + struct rb_root qgroup_tree; + spinlock_t qgroup_lock; + struct ulist *qgroup_ulist; + struct mutex qgroup_ioctl_lock; + struct list_head dirty_qgroups; + long: 32; + u64 qgroup_seq; + struct mutex qgroup_rescan_lock; + struct btrfs_key qgroup_rescan_progress; + struct btrfs_workqueue *qgroup_rescan_workers; + struct completion qgroup_rescan_completion; + struct btrfs_work qgroup_rescan_work; + bool qgroup_rescan_running; + u8 qgroup_drop_subtree_thres; + long: 32; + u64 qgroup_enable_gen; + int fs_error; + long unsigned int fs_state; + struct btrfs_delayed_root *delayed_root; + spinlock_t buffer_lock; + struct xarray buffer_radix; + int backup_root_index; + struct btrfs_dev_replace dev_replace; + struct semaphore uuid_tree_rescan_sem; + struct work_struct async_reclaim_work; + struct work_struct async_data_reclaim_work; + struct work_struct preempt_reclaim_work; + struct work_struct reclaim_bgs_work; + struct list_head reclaim_bgs; + int bg_reclaim_threshold; + spinlock_t unused_bgs_lock; + struct list_head unused_bgs; + struct mutex unused_bg_unpin_mutex; + struct mutex reclaim_bgs_lock; + u32 nodesize; + u32 sectorsize; + u32 sectorsize_bits; + u32 csum_size; + u32 csums_per_leaf; + u32 stripesize; + u64 max_extent_size; + spinlock_t swapfile_pins_lock; + struct rb_root swapfile_pins; + struct crypto_shash *csum_shash; + enum btrfs_exclusive_operation exclusive_operation; + u64 zone_size; + struct queue_limits limits; + long: 32; + u64 max_zone_append_size; + struct mutex zoned_meta_io_lock; + spinlock_t treelog_bg_lock; + long: 32; + u64 treelog_bg; + spinlock_t relocation_bg_lock; + long: 32; + u64 data_reloc_bg; + struct mutex zoned_data_reloc_io_lock; + struct btrfs_block_group *active_meta_bg; + struct btrfs_block_group *active_system_bg; + u64 nr_global_roots; + spinlock_t zone_active_bgs_lock; + struct list_head zone_active_bgs; + long: 32; + struct btrfs_commit_stats commit_stats; + u64 last_root_drop_gen; + struct lockdep_map btrfs_trans_num_writers_map; + struct lockdep_map btrfs_trans_num_extwriters_map; + struct lockdep_map btrfs_state_change_map[4]; + struct lockdep_map btrfs_trans_pending_ordered_map; + struct lockdep_map btrfs_ordered_extent_map; +}; + +enum btrfs_compression_type { + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_LZO = 2, + BTRFS_COMPRESS_ZSTD = 3, + BTRFS_NR_COMPRESS_TYPES = 4, +}; + +struct ulist_node { + u64 val; + u64 aux; + struct list_head list; + struct rb_node rb_node; + long: 32; +}; + +struct ulist { + long unsigned int nnodes; + struct list_head nodes; + struct rb_root root; + struct ulist_node *prealloc; +}; + +struct extent_buffer { + u64 start; + u32 len; + u32 folio_size; + long unsigned int bflags; + struct btrfs_fs_info *fs_info; + void *addr; + spinlock_t refs_lock; + atomic_t refs; + int read_mirror; + s8 log_index; + u8 folio_shift; + struct callback_head callback_head; + struct rw_semaphore lock; + struct folio *folios[16]; +}; + +struct extent_changeset { + u64 bytes_changed; + struct ulist range_changed; + long: 32; +}; + +enum btrfs_lock_nesting { + BTRFS_NESTING_NORMAL = 0, + BTRFS_NESTING_COW = 1, + BTRFS_NESTING_LEFT = 2, + BTRFS_NESTING_RIGHT = 3, + BTRFS_NESTING_LEFT_COW = 4, + BTRFS_NESTING_RIGHT_COW = 5, + BTRFS_NESTING_SPLIT = 6, + BTRFS_NESTING_NEW_ROOT = 7, + BTRFS_NESTING_MAX = 8, +}; + +struct btrfs_drew_lock { + atomic_t readers; + atomic_t writers; + wait_queue_head_t pending_writers; + wait_queue_head_t pending_readers; +}; + +enum { + __EXTENT_DIRTY_BIT = 0, + EXTENT_DIRTY = 1, + __EXTENT_DIRTY_SEQ = 0, + __EXTENT_UPTODATE_BIT = 1, + EXTENT_UPTODATE = 2, + __EXTENT_UPTODATE_SEQ = 1, + __EXTENT_LOCKED_BIT = 2, + EXTENT_LOCKED = 4, + __EXTENT_LOCKED_SEQ = 2, + __EXTENT_DIO_LOCKED_BIT = 3, + EXTENT_DIO_LOCKED = 8, + __EXTENT_DIO_LOCKED_SEQ = 3, + __EXTENT_NEW_BIT = 4, + EXTENT_NEW = 16, + __EXTENT_NEW_SEQ = 4, + __EXTENT_DELALLOC_BIT = 5, + EXTENT_DELALLOC = 32, + __EXTENT_DELALLOC_SEQ = 5, + __EXTENT_DEFRAG_BIT = 6, + EXTENT_DEFRAG = 64, + __EXTENT_DEFRAG_SEQ = 6, + __EXTENT_BOUNDARY_BIT = 7, + EXTENT_BOUNDARY = 128, + __EXTENT_BOUNDARY_SEQ = 7, + __EXTENT_NODATASUM_BIT = 8, + EXTENT_NODATASUM = 256, + __EXTENT_NODATASUM_SEQ = 8, + __EXTENT_CLEAR_META_RESV_BIT = 9, + EXTENT_CLEAR_META_RESV = 512, + __EXTENT_CLEAR_META_RESV_SEQ = 9, + __EXTENT_NEED_WAIT_BIT = 10, + EXTENT_NEED_WAIT = 1024, + __EXTENT_NEED_WAIT_SEQ = 10, + __EXTENT_NORESERVE_BIT = 11, + EXTENT_NORESERVE = 2048, + __EXTENT_NORESERVE_SEQ = 11, + __EXTENT_QGROUP_RESERVED_BIT = 12, + EXTENT_QGROUP_RESERVED = 4096, + __EXTENT_QGROUP_RESERVED_SEQ = 12, + __EXTENT_CLEAR_DATA_RESV_BIT = 13, + EXTENT_CLEAR_DATA_RESV = 8192, + __EXTENT_CLEAR_DATA_RESV_SEQ = 13, + __EXTENT_DELALLOC_NEW_BIT = 14, + EXTENT_DELALLOC_NEW = 16384, + __EXTENT_DELALLOC_NEW_SEQ = 14, + __EXTENT_ADD_INODE_BYTES_BIT = 15, + EXTENT_ADD_INODE_BYTES = 32768, + __EXTENT_ADD_INODE_BYTES_SEQ = 15, + __EXTENT_CLEAR_ALL_BITS_BIT = 16, + EXTENT_CLEAR_ALL_BITS = 65536, + __EXTENT_CLEAR_ALL_BITS_SEQ = 16, + __EXTENT_NOWAIT_BIT = 17, + EXTENT_NOWAIT = 131072, + __EXTENT_NOWAIT_SEQ = 17, +}; + +struct extent_state { + u64 start; + u64 end; + struct rb_node rb_node; + wait_queue_head_t wq; + refcount_t refs; + u32 state; +}; + +enum { + BTRFS_FS_STATE_REMOUNTING = 0, + BTRFS_FS_STATE_RO = 1, + BTRFS_FS_STATE_TRANS_ABORTED = 2, + BTRFS_FS_STATE_DEV_REPLACING = 3, + BTRFS_FS_STATE_DUMMY_FS_INFO = 4, + BTRFS_FS_STATE_NO_DATA_CSUMS = 5, + BTRFS_FS_STATE_SKIP_META_CSUMS = 6, + BTRFS_FS_STATE_LOG_CLEANUP_ERROR = 7, + BTRFS_FS_STATE_COUNT = 8, +}; + +enum { + BTRFS_FS_CLOSING_START = 0, + BTRFS_FS_CLOSING_DONE = 1, + BTRFS_FS_LOG_RECOVERING = 2, + BTRFS_FS_OPEN = 3, + BTRFS_FS_QUOTA_ENABLED = 4, + BTRFS_FS_UPDATE_UUID_TREE_GEN = 5, + BTRFS_FS_CREATING_FREE_SPACE_TREE = 6, + BTRFS_FS_BTREE_ERR = 7, + BTRFS_FS_LOG1_ERR = 8, + BTRFS_FS_LOG2_ERR = 9, + BTRFS_FS_QUOTA_OVERRIDE = 10, + BTRFS_FS_FROZEN = 11, + BTRFS_FS_BALANCE_RUNNING = 12, + BTRFS_FS_RELOC_RUNNING = 13, + BTRFS_FS_CLEANER_RUNNING = 14, + BTRFS_FS_CSUM_IMPL_FAST = 15, + BTRFS_FS_DISCARD_RUNNING = 16, + BTRFS_FS_CLEANUP_SPACE_CACHE_V1 = 17, + BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED = 18, + BTRFS_FS_TREE_MOD_LOG_USERS = 19, + BTRFS_FS_COMMIT_TRANS = 20, + BTRFS_FS_UNFINISHED_DROPS = 21, + BTRFS_FS_NEED_ZONE_FINISH = 22, + BTRFS_FS_NEED_TRANS_COMMIT = 23, + BTRFS_FS_ACTIVE_ZONE_TRACKING = 24, + BTRFS_FS_FEATURE_CHANGED = 25, + BTRFS_FS_UNALIGNED_TREE_BLOCK = 26, + BTRFS_FS_32BIT_ERROR = 27, + BTRFS_FS_32BIT_WARN = 28, +}; + +enum { + BTRFS_MOUNT_NODATASUM = 1ULL, + BTRFS_MOUNT_NODATACOW = 2ULL, + BTRFS_MOUNT_NOBARRIER = 4ULL, + BTRFS_MOUNT_SSD = 8ULL, + BTRFS_MOUNT_DEGRADED = 16ULL, + BTRFS_MOUNT_COMPRESS = 32ULL, + BTRFS_MOUNT_NOTREELOG = 64ULL, + BTRFS_MOUNT_FLUSHONCOMMIT = 128ULL, + BTRFS_MOUNT_SSD_SPREAD = 256ULL, + BTRFS_MOUNT_NOSSD = 512ULL, + BTRFS_MOUNT_DISCARD_SYNC = 1024ULL, + BTRFS_MOUNT_FORCE_COMPRESS = 2048ULL, + BTRFS_MOUNT_SPACE_CACHE = 4096ULL, + BTRFS_MOUNT_CLEAR_CACHE = 8192ULL, + BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = 16384ULL, + BTRFS_MOUNT_ENOSPC_DEBUG = 32768ULL, + BTRFS_MOUNT_AUTO_DEFRAG = 65536ULL, + BTRFS_MOUNT_USEBACKUPROOT = 131072ULL, + BTRFS_MOUNT_SKIP_BALANCE = 262144ULL, + BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = 524288ULL, + BTRFS_MOUNT_RESCAN_UUID_TREE = 1048576ULL, + BTRFS_MOUNT_FRAGMENT_DATA = 2097152ULL, + BTRFS_MOUNT_FRAGMENT_METADATA = 4194304ULL, + BTRFS_MOUNT_FREE_SPACE_TREE = 8388608ULL, + BTRFS_MOUNT_NOLOGREPLAY = 16777216ULL, + BTRFS_MOUNT_REF_VERIFY = 33554432ULL, + BTRFS_MOUNT_DISCARD_ASYNC = 67108864ULL, + BTRFS_MOUNT_IGNOREBADROOTS = 134217728ULL, + BTRFS_MOUNT_IGNOREDATACSUMS = 268435456ULL, + BTRFS_MOUNT_NODISCARD = 536870912ULL, + BTRFS_MOUNT_NOSPACECACHE = 1073741824ULL, + BTRFS_MOUNT_IGNOREMETACSUMS = 2147483648ULL, + BTRFS_MOUNT_IGNORESUPERFLAGS = 4294967296ULL, +}; + +struct btrfs_qgroup_swapped_blocks { + spinlock_t lock; + bool swapped; + struct rb_root blocks[8]; +}; + +struct btrfs_root { + struct rb_node rb_node; + struct extent_buffer *node; + struct extent_buffer *commit_root; + struct btrfs_root *log_root; + struct btrfs_root *reloc_root; + long unsigned int state; + struct btrfs_root_item root_item; + struct btrfs_key root_key; + struct btrfs_fs_info *fs_info; + struct extent_io_tree dirty_log_pages; + struct mutex objectid_mutex; + spinlock_t accounting_lock; + struct btrfs_block_rsv *block_rsv; + struct mutex log_mutex; + wait_queue_head_t log_writer_wait; + wait_queue_head_t log_commit_wait[2]; + struct list_head log_ctxs[2]; + atomic_t log_writers; + atomic_t log_commit[2]; + atomic_t log_batch; + int log_transid; + int log_transid_committed; + int last_log_commit; + pid_t log_start_pid; + u64 last_trans; + u64 free_objectid; + struct btrfs_key defrag_progress; + struct btrfs_key defrag_max; + struct list_head dirty_list; + struct list_head root_list; + struct xarray inodes; + struct xarray delayed_nodes; + dev_t anon_dev; + spinlock_t root_item_lock; + refcount_t refs; + struct mutex delalloc_mutex; + spinlock_t delalloc_lock; + struct list_head delalloc_inodes; + struct list_head delalloc_root; + long: 32; + u64 nr_delalloc_inodes; + struct mutex ordered_extent_mutex; + spinlock_t ordered_extent_lock; + struct list_head ordered_extents; + struct list_head ordered_root; + long: 32; + u64 nr_ordered_extents; + struct list_head reloc_dirty_list; + int send_in_progress; + int dedupe_in_progress; + struct btrfs_drew_lock snapshot_lock; + atomic_t snapshot_force_cow; + spinlock_t qgroup_meta_rsv_lock; + u64 qgroup_meta_rsv_pertrans; + u64 qgroup_meta_rsv_prealloc; + wait_queue_head_t qgroup_flush_wait; + atomic_t nr_swapfiles; + struct btrfs_qgroup_swapped_blocks swapped_blocks; + struct extent_io_tree log_csum_range; + u64 relocation_src_root; +}; + +enum btrfs_trans_state { + TRANS_STATE_RUNNING = 0, + TRANS_STATE_COMMIT_PREP = 1, + TRANS_STATE_COMMIT_START = 2, + TRANS_STATE_COMMIT_DOING = 3, + TRANS_STATE_UNBLOCKED = 4, + TRANS_STATE_SUPER_COMMITTED = 5, + TRANS_STATE_COMPLETED = 6, + TRANS_STATE_MAX = 7, +}; + +struct btrfs_delayed_ref_root { + struct xarray head_refs; + struct xarray dirty_extents; + spinlock_t lock; + long unsigned int num_heads; + long unsigned int num_heads_ready; + long: 32; + u64 pending_csums; + long unsigned int flags; + long: 32; + u64 run_delayed_start; + u64 qgroup_to_skip; +}; + +struct btrfs_transaction { + u64 transid; + atomic_t num_extwriters; + atomic_t num_writers; + refcount_t use_count; + long unsigned int flags; + enum btrfs_trans_state state; + int aborted; + struct list_head list; + struct extent_io_tree dirty_pages; + time64_t start_time; + wait_queue_head_t writer_wait; + wait_queue_head_t commit_wait; + struct list_head pending_snapshots; + struct list_head dev_update_list; + struct list_head switch_commits; + struct list_head dirty_bgs; + struct list_head io_bgs; + struct list_head dropped_roots; + struct extent_io_tree pinned_extents; + struct mutex cache_write_mutex; + spinlock_t dirty_bgs_lock; + struct list_head deleted_bgs; + spinlock_t dropped_roots_lock; + struct btrfs_delayed_ref_root delayed_refs; + struct btrfs_fs_info *fs_info; + atomic_t pending_ordered; + wait_queue_head_t pending_wait; + long: 32; +}; + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + struct list_head prepare_list; + atomic_t items; + atomic_t items_seq; + int nodes; + wait_queue_head_t wait; +}; + +struct btrfs_path { + struct extent_buffer *nodes[8]; + int slots[8]; + u8 locks[8]; + u8 reada; + u8 lowest_level; + unsigned int search_for_split: 1; + unsigned int keep_locks: 1; + unsigned int skip_locking: 1; + unsigned int search_commit_root: 1; + unsigned int need_commit_sem: 1; + unsigned int skip_release_on_error: 1; + unsigned int search_for_extension: 1; + unsigned int nowait: 1; +}; + +enum { + BTRFS_ROOT_IN_TRANS_SETUP = 0, + BTRFS_ROOT_SHAREABLE = 1, + BTRFS_ROOT_TRACK_DIRTY = 2, + BTRFS_ROOT_IN_RADIX = 3, + BTRFS_ROOT_ORPHAN_ITEM_INSERTED = 4, + BTRFS_ROOT_DEFRAG_RUNNING = 5, + BTRFS_ROOT_FORCE_COW = 6, + BTRFS_ROOT_MULTI_LOG_TASKS = 7, + BTRFS_ROOT_DIRTY = 8, + BTRFS_ROOT_DELETING = 9, + BTRFS_ROOT_DEAD_RELOC_TREE = 10, + BTRFS_ROOT_DEAD_TREE = 11, + BTRFS_ROOT_HAS_LOG_TREE = 12, + BTRFS_ROOT_QGROUP_FLUSHING = 13, + BTRFS_ROOT_ORPHAN_CLEANUP = 14, + BTRFS_ROOT_UNFINISHED_DROP = 15, + BTRFS_ROOT_RESET_LOCKDEP_CLASS = 16, +}; + +enum { + __EXTENT_FLAG_PINNED_BIT = 0, + EXTENT_FLAG_PINNED = 1, + __EXTENT_FLAG_PINNED_SEQ = 0, + __EXTENT_FLAG_COMPRESS_ZLIB_BIT = 1, + EXTENT_FLAG_COMPRESS_ZLIB = 2, + __EXTENT_FLAG_COMPRESS_ZLIB_SEQ = 1, + __EXTENT_FLAG_COMPRESS_LZO_BIT = 2, + EXTENT_FLAG_COMPRESS_LZO = 4, + __EXTENT_FLAG_COMPRESS_LZO_SEQ = 2, + __EXTENT_FLAG_COMPRESS_ZSTD_BIT = 3, + EXTENT_FLAG_COMPRESS_ZSTD = 8, + __EXTENT_FLAG_COMPRESS_ZSTD_SEQ = 3, + __EXTENT_FLAG_PREALLOC_BIT = 4, + EXTENT_FLAG_PREALLOC = 16, + __EXTENT_FLAG_PREALLOC_SEQ = 4, + __EXTENT_FLAG_LOGGING_BIT = 5, + EXTENT_FLAG_LOGGING = 32, + __EXTENT_FLAG_LOGGING_SEQ = 5, + __EXTENT_FLAG_MERGED_BIT = 6, + EXTENT_FLAG_MERGED = 64, + __EXTENT_FLAG_MERGED_SEQ = 6, +}; + +struct extent_map { + struct rb_node rb_node; + long: 32; + u64 start; + u64 len; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 ram_bytes; + u64 generation; + u32 flags; + refcount_t refs; + struct list_head list; +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + struct list_head n_list; + struct list_head p_list; + struct rb_root_cached ins_root; + struct rb_root_cached del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + refcount_t refs; + int count; + long: 32; + u64 index_cnt; + long unsigned int flags; + u32 curr_index_batch_size; + u32 index_item_leaves; + long: 32; +}; + +enum { + BTRFS_INODE_FLUSH_ON_CLOSE = 0, + BTRFS_INODE_DUMMY = 1, + BTRFS_INODE_IN_DEFRAG = 2, + BTRFS_INODE_HAS_ASYNC_EXTENT = 3, + BTRFS_INODE_NEEDS_FULL_SYNC = 4, + BTRFS_INODE_COPY_EVERYTHING = 5, + BTRFS_INODE_HAS_PROPS = 6, + BTRFS_INODE_SNAPSHOT_FLUSH = 7, + BTRFS_INODE_NO_XATTRS = 8, + BTRFS_INODE_NO_DELALLOC_FLUSH = 9, + BTRFS_INODE_VERITY_IN_PROGRESS = 10, + BTRFS_INODE_FREE_SPACE_INODE = 11, + BTRFS_INODE_NO_CAP_XATTR = 12, + BTRFS_INODE_COW_WRITE_ERROR = 13, + BTRFS_INODE_ROOT_STUB = 14, +}; + +struct btrfs_pending_snapshot; + +struct btrfs_trans_handle { + u64 transid; + u64 bytes_reserved; + u64 delayed_refs_bytes_reserved; + u64 chunk_bytes_reserved; + long unsigned int delayed_ref_updates; + long unsigned int delayed_ref_csum_deletions; + struct btrfs_transaction *transaction; + struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *orig_rsv; + struct btrfs_pending_snapshot *pending_snapshot; + refcount_t use_count; + unsigned int type; + short int aborted; + bool adding_csums; + bool allocating_chunk; + bool removing_chunk; + bool reloc_reserved; + bool in_fsync; + struct btrfs_fs_info *fs_info; + struct list_head new_bgs; + long: 32; + struct btrfs_block_rsv delayed_rsv; +}; + +struct btrfs_pending_snapshot { + struct dentry *dentry; + struct btrfs_inode *dir; + struct btrfs_root *root; + struct btrfs_root_item *root_item; + struct btrfs_root *snap; + struct btrfs_qgroup_inherit *inherit; + struct btrfs_path *path; + long: 32; + struct btrfs_block_rsv block_rsv; + int error; + dev_t anon_dev; + bool readonly; + struct list_head list; + long: 32; +}; + +struct inode_defrag { + struct rb_node rb_node; + long: 32; + u64 ino; + u64 transid; + u64 root; + u32 extent_thresh; + long: 32; +}; + +struct defrag_target_range { + struct list_head list; + u64 start; + u64 len; +}; + +enum { + btrfs_bitmap_nr_uptodate = 0, + btrfs_bitmap_nr_dirty = 1, + btrfs_bitmap_nr_writeback = 2, + btrfs_bitmap_nr_ordered = 3, + btrfs_bitmap_nr_checked = 4, + btrfs_bitmap_nr_locked = 5, + btrfs_bitmap_nr_max = 6, +}; + +struct btrfs_subpage { + spinlock_t lock; + union { + atomic_t eb_refs; + atomic_t nr_locked; + }; + long unsigned int bitmaps[0]; +}; + +enum btrfs_subpage_type { + BTRFS_SUBPAGE_METADATA = 0, + BTRFS_SUBPAGE_DATA = 1, +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED = 0, + BPF_CGROUP_STORAGE_PERCPU = 1, + __BPF_CGROUP_STORAGE_MAX = 2, +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_NR_TABLES = 2, + NEIGH_LINK_TABLE = 2, +}; + +enum { + NAPI_STATE_SCHED = 0, + NAPI_STATE_MISSED = 1, + NAPI_STATE_DISABLE = 2, + NAPI_STATE_NPSVC = 3, + NAPI_STATE_LISTED = 4, + NAPI_STATE_NO_BUSY_POLL = 5, + NAPI_STATE_IN_BUSY_POLL = 6, + NAPI_STATE_PREFER_BUSY_POLL = 7, + NAPI_STATE_THREADED = 8, + NAPI_STATE_SCHED_THREADED = 9, +}; + +enum xps_map_type { + XPS_CPUS = 0, + XPS_RXQS = 1, + XPS_MAPS_MAX = 2, +}; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE = 3, +}; + +enum { + NETIF_MSG_DRV_BIT = 0, + NETIF_MSG_PROBE_BIT = 1, + NETIF_MSG_LINK_BIT = 2, + NETIF_MSG_TIMER_BIT = 3, + NETIF_MSG_IFDOWN_BIT = 4, + NETIF_MSG_IFUP_BIT = 5, + NETIF_MSG_RX_ERR_BIT = 6, + NETIF_MSG_TX_ERR_BIT = 7, + NETIF_MSG_TX_QUEUED_BIT = 8, + NETIF_MSG_INTR_BIT = 9, + NETIF_MSG_TX_DONE_BIT = 10, + NETIF_MSG_RX_STATUS_BIT = 11, + NETIF_MSG_PKTDATA_BIT = 12, + NETIF_MSG_HW_BIT = 13, + NETIF_MSG_WOL_BIT = 14, + NETIF_MSG_CLASS_COUNT = 15, +}; + +enum { + RTAX_UNSPEC = 0, + RTAX_LOCK = 1, + RTAX_MTU = 2, + RTAX_WINDOW = 3, + RTAX_RTT = 4, + RTAX_RTTVAR = 5, + RTAX_SSTHRESH = 6, + RTAX_CWND = 7, + RTAX_ADVMSS = 8, + RTAX_REORDERING = 9, + RTAX_HOPLIMIT = 10, + RTAX_INITCWND = 11, + RTAX_FEATURES = 12, + RTAX_RTO_MIN = 13, + RTAX_INITRWND = 14, + RTAX_QUICKACK = 15, + RTAX_CC_ALGO = 16, + RTAX_FASTOPEN_NO_COOKIE = 17, + __RTAX_MAX = 18, +}; + +enum { + NEIGH_VAR_MCAST_PROBES = 0, + NEIGH_VAR_UCAST_PROBES = 1, + NEIGH_VAR_APP_PROBES = 2, + NEIGH_VAR_MCAST_REPROBES = 3, + NEIGH_VAR_RETRANS_TIME = 4, + NEIGH_VAR_BASE_REACHABLE_TIME = 5, + NEIGH_VAR_DELAY_PROBE_TIME = 6, + NEIGH_VAR_INTERVAL_PROBE_TIME_MS = 7, + NEIGH_VAR_GC_STALETIME = 8, + NEIGH_VAR_QUEUE_LEN_BYTES = 9, + NEIGH_VAR_PROXY_QLEN = 10, + NEIGH_VAR_ANYCAST_DELAY = 11, + NEIGH_VAR_PROXY_DELAY = 12, + NEIGH_VAR_LOCKTIME = 13, + NEIGH_VAR_QUEUE_LEN = 14, + NEIGH_VAR_RETRANS_TIME_MS = 15, + NEIGH_VAR_BASE_REACHABLE_TIME_MS = 16, + NEIGH_VAR_GC_INTERVAL = 17, + NEIGH_VAR_GC_THRESH1 = 18, + NEIGH_VAR_GC_THRESH2 = 19, + NEIGH_VAR_GC_THRESH3 = 20, + NEIGH_VAR_MAX = 21, +}; + +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT = 2, + TCP_SYN_RECV = 3, + TCP_FIN_WAIT1 = 4, + TCP_FIN_WAIT2 = 5, + TCP_TIME_WAIT = 6, + TCP_CLOSE = 7, + TCP_CLOSE_WAIT = 8, + TCP_LAST_ACK = 9, + TCP_LISTEN = 10, + TCP_CLOSING = 11, + TCP_NEW_SYN_RECV = 12, + TCP_BOUND_INACTIVE = 13, + TCP_MAX_STATES = 14, +}; + +enum devcg_behavior { + DEVCG_DEFAULT_NONE = 0, + DEVCG_DEFAULT_ALLOW = 1, + DEVCG_DEFAULT_DENY = 2, +}; + +struct dev_exception_item { + u32 major; + u32 minor; + short int type; + short int access; + struct list_head list; + struct callback_head rcu; +}; + +struct dev_cgroup { + struct cgroup_subsys_state css; + struct list_head exceptions; + enum devcg_behavior behavior; + long: 32; +}; + +typedef void (*exitcall_t)(); + +typedef void (*crypto_completion_t)(void *, int); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + u32 flags; +}; + +struct scatterlist; + +struct aead_request { + struct crypto_async_request base; + unsigned int assoclen; + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct scatterlist { + long unsigned int page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; +}; + +struct crypto_aead; + +struct aead_alg { + int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); + int (*setauthsize)(struct crypto_aead *, unsigned int); + int (*encrypt)(struct aead_request *); + int (*decrypt)(struct aead_request *); + int (*init)(struct crypto_aead *); + void (*exit)(struct crypto_aead *); + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct crypto_template; + +struct crypto_spawn; + +struct crypto_instance { + struct crypto_alg alg; + struct crypto_template *tmpl; + union { + struct hlist_node list; + struct crypto_spawn *spawns; + }; + struct work_struct free_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + struct crypto_instance *inst; + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct rtattr; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + int (*create)(struct crypto_template *, struct rtattr **); + char name[128]; +}; + +struct aead_instance { + void (*free)(struct aead_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct ahash_request { + struct crypto_async_request base; + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + void *priv; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_ahash { + bool using_shash; + unsigned int statesize; + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct shash_desc { + struct crypto_shash *tfm; + long: 32; + void *__ctx[0]; +}; + +struct shash_alg { + int (*init)(struct shash_desc *); + int (*update)(struct shash_desc *, const u8 *, unsigned int); + int (*final)(struct shash_desc *, u8 *); + int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*export)(struct shash_desc *, void *); + int (*import)(struct shash_desc *, const void *); + int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_shash *); + void (*exit_tfm)(struct crypto_shash *); + int (*clone_tfm)(struct crypto_shash *, struct crypto_shash *); + unsigned int descsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + unsigned int digestsize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; + }; + struct hash_alg_common halg; + }; +}; + +struct shash_instance { + void (*free)(struct shash_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[256]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct skcipher_request { + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + struct crypto_async_request base; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_skcipher { + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct skcipher_alg_common { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct ccm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn mac; +}; + +struct crypto_ccm_ctx { + struct crypto_ahash *mac; + struct crypto_skcipher *ctr; +}; + +struct crypto_rfc4309_ctx { + struct crypto_aead *child; + u8 nonce[3]; +}; + +struct crypto_rfc4309_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct aead_request subreq; +}; + +struct crypto_ccm_req_priv_ctx { + u8 odata[16]; + u8 idata[16]; + u8 auth_tag[16]; + u32 flags; + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + }; +}; + +struct cbcmac_tfm_ctx { + struct crypto_cipher *child; +}; + +struct cbcmac_desc_ctx { + unsigned int len; + u8 dg[0]; +}; + +typedef u64 uint64_t; + +struct sbitmap_word { + long unsigned int word; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int cleared; + raw_spinlock_t swap_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sbitmap { + unsigned int depth; + unsigned int shift; + unsigned int map_nr; + bool round_robin; + struct sbitmap_word *map; + unsigned int *alloc_hint; +}; + +struct sbq_wait_state { + wait_queue_head_t wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sbitmap_queue { + struct sbitmap sb; + unsigned int wake_batch; + atomic_t wake_index; + struct sbq_wait_state *ws; + atomic_t ws_active; + unsigned int min_shallow_depth; + atomic_t completion_cnt; + atomic_t wakeup_cnt; +}; + +typedef __u32 req_flags_t; + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +enum rq_end_io_ret { + RQ_END_IO_NONE = 0, + RQ_END_IO_FREE = 1, +}; + +typedef enum rq_end_io_ret rq_end_io_fn(struct request *, blk_status_t); + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + int tag; + int internal_tag; + unsigned int timeout; + unsigned int __data_len; + long: 32; + sector_t __sector; + struct bio *bio; + struct bio *biotail; + union { + struct list_head queuelist; + struct request *rq_next; + }; + struct block_device *part; + long: 32; + u64 alloc_time_ns; + u64 start_time_ns; + u64 io_start_time_ns; + short unsigned int wbt_flags; + short unsigned int stats_sectors; + short unsigned int nr_phys_segments; + short unsigned int nr_integrity_segments; + enum mq_rq_state state; + atomic_t ref; + long unsigned int deadline; + union { + struct hlist_node hash; + struct llist_node ipi_list; + }; + union { + struct rb_node rb_node; + struct bio_vec special_vec; + }; + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { + unsigned int seq; + rq_end_io_fn *saved_end_io; + } flush; + long: 32; + u64 fifo_time; + rq_end_io_fn *end_io; + void *end_io_data; +}; + +enum rq_qos_id { + RQ_QOS_WBT = 0, + RQ_QOS_LATENCY = 1, + RQ_QOS_COST = 2, +}; + +struct rq_qos_ops; + +struct rq_qos { + const struct rq_qos_ops *ops; + struct gendisk *disk; + enum rq_qos_id id; + struct rq_qos *next; + struct dentry *debugfs_dir; +}; + +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + unsigned int active_queues; + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; + spinlock_t lock; +}; + +struct blk_flush_queue { + spinlock_t mq_flush_lock; + unsigned int flush_pending_idx: 1; + unsigned int flush_running_idx: 1; + blk_status_t rq_status; + long unsigned int flush_pending_since; + struct list_head flush_queue[2]; + long unsigned int flush_data_in_flight; + struct request *flush_rq; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +struct blk_mq_tag_set { + const struct blk_mq_ops *ops; + struct blk_mq_queue_map map[3]; + unsigned int nr_maps; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int cmd_size; + int numa_node; + unsigned int timeout; + unsigned int flags; + void *driver_data; + struct blk_mq_tags **tags; + struct blk_mq_tags *shared_tags; + struct mutex tag_list_lock; + struct list_head tag_list; + struct srcu_struct *srcu; +}; + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 4294967295, +}; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + long unsigned int state; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + long unsigned int flags; + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + void *driver_data; + struct sbitmap ctx_map; + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + short unsigned int type; + short unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + unsigned int numa_node; + unsigned int queue_num; + atomic_t nr_active; + struct hlist_node cpuhp_online; + struct hlist_node cpuhp_dead; + struct kobject kobj; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct list_head hctx_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +enum blk_integrity_flags { + BLK_INTEGRITY_NOVERIFY = 1, + BLK_INTEGRITY_NOGENERATE = 2, + BLK_INTEGRITY_DEVICE_CAPABLE = 4, + BLK_INTEGRITY_REF_TAG = 8, + BLK_INTEGRITY_STACKED = 16, +}; + +struct blk_mq_debugfs_attr { + const char *name; + umode_t mode; + int (*show)(void *, struct seq_file *); + ssize_t (*write)(void *, const char *, size_t, loff_t *); + const struct seq_operations *seq_ops; +}; + +struct rq_qos_ops { + void (*throttle)(struct rq_qos *, struct bio *); + void (*track)(struct rq_qos *, struct request *, struct bio *); + void (*merge)(struct rq_qos *, struct request *, struct bio *); + void (*issue)(struct rq_qos *, struct request *); + void (*requeue)(struct rq_qos *, struct request *); + void (*done)(struct rq_qos *, struct request *); + void (*done_bio)(struct rq_qos *, struct bio *); + void (*cleanup)(struct rq_qos *, struct bio *); + void (*queue_depth_changed)(struct rq_qos *); + void (*exit)(struct rq_qos *); + const struct blk_mq_debugfs_attr *debugfs_attrs; +}; + +enum { + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, + IOPRIO_CLASS_INVALID = 7, +}; + +enum { + IOPRIO_HINT_NONE = 0, + IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, + IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, + IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, + IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, + IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, + IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, + IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, +}; + +struct blkg_iostat { + u64 bytes[3]; + u64 ios[3]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkcg_gq *blkg; + struct llist_node lnode; + int lqueued; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +struct blkcg; + +struct blkg_policy_data; + +struct blkcg_gq { + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + struct blkcg_gq *parent; + struct percpu_ref refcnt; + bool online; + struct blkg_iostat_set *iostat_cpu; + long: 32; + struct blkg_iostat_set iostat; + struct blkg_policy_data *pd[6]; + spinlock_t async_bio_lock; + struct bio_list async_bios; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + struct callback_head callback_head; + long: 32; +}; + +enum blkg_iostat_type { + BLKG_IOSTAT_READ = 0, + BLKG_IOSTAT_WRITE = 1, + BLKG_IOSTAT_DISCARD = 2, + BLKG_IOSTAT_NR = 3, +}; + +struct blkcg_policy_data; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + atomic_t congestion_count; + struct xarray blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + struct blkcg_policy_data *cpd[6]; + struct list_head all_blkcgs_node; + struct llist_head *lhead; + struct list_head cgwb_list; + long: 32; +}; + +struct blkg_policy_data { + struct blkcg_gq *blkg; + int plid; + bool online; +}; + +struct blkcg_policy_data { + struct blkcg *blkcg; + int plid; +}; + +typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); + +typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); + +typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(struct gendisk *, struct blkcg *, gfp_t); + +typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_stat_pd_fn(struct blkg_policy_data *, struct seq_file *); + +struct blkcg_policy { + int plid; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +enum prio_policy { + POLICY_NO_CHANGE = 0, + POLICY_PROMOTE_TO_RT = 1, + POLICY_RESTRICT_TO_BE = 2, + POLICY_ALL_TO_IDLE = 3, + POLICY_NONE_TO_RT = 4, +}; + +struct ioprio_blkcg { + struct blkcg_policy_data cpd; + enum prio_policy prio_policy; +}; + +enum tk_offsets { + TK_OFFS_REAL = 0, + TK_OFFS_BOOT = 1, + TK_OFFS_TAI = 2, + TK_OFFS_MAX = 3, +}; + +struct io_ring_ctx; + +struct io_wq; + +struct io_uring_task { + int cached_refs; + const struct io_ring_ctx *last; + struct task_struct *task; + struct io_wq *io_wq; + struct file *registered_rings[16]; + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_cancel; + atomic_t inflight_tracked; + struct percpu_counter inflight; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + struct llist_head task_list; + struct callback_head task_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; +}; + +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + struct { + __u32 cmd_op; + __u32 __pad1; + }; + }; + union { + __u64 addr; + __u64 splice_off_in; + struct { + __u32 level; + __u32 optname; + }; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + __u32 hardlink_flags; + __u32 xattr_flags; + __u32 msg_ring_flags; + __u32 uring_cmd_flags; + __u32 waitid_flags; + __u32 futex_flags; + __u32 install_fd_flags; + __u32 nop_flags; + }; + __u64 user_data; + union { + __u16 buf_index; + __u16 buf_group; + }; + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; + __u32 optlen; + struct { + __u16 addr_len; + __u16 __pad3[1]; + }; + }; + union { + struct { + __u64 addr3; + __u64 __pad2[1]; + }; + __u64 optval; + __u8 cmd[0]; + }; +}; + +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; + __u64 big_cqe[0]; +}; + +enum task_work_notify_mode { + TWA_NONE = 0, + TWA_RESUME = 1, + TWA_SIGNAL = 2, + TWA_SIGNAL_NO_IPI = 3, + TWA_NMI_CURRENT = 4, + TWA_FLAGS = 65280, + TWAF_NO_ALLOC = 256, +}; + +enum io_uring_cmd_flags { + IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, + IO_URING_F_MULTISHOT = 4, + IO_URING_F_IOWQ = 8, + IO_URING_F_NONBLOCK = -2147483648, + IO_URING_F_SQE128 = 256, + IO_URING_F_CQE32 = 512, + IO_URING_F_IOPOLL = 1024, + IO_URING_F_CANCEL = 2048, + IO_URING_F_COMPAT = 4096, + IO_URING_F_TASK_DEAD = 8192, +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_wq_work { + struct io_wq_work_node list; + atomic_t flags; + int cancel_seq; +}; + +struct io_rsrc_node; + +struct io_rsrc_data { + unsigned int nr; + struct io_rsrc_node **nodes; +}; + +struct io_mapped_ubuf; + +struct io_rsrc_node { + unsigned char type; + int refs; + u64 tag; + union { + long unsigned int file_ptr; + struct io_mapped_ubuf *buf; + }; + long: 32; +}; + +struct io_file_table { + struct io_rsrc_data data; + long unsigned int *bitmap; + unsigned int alloc_hint; +}; + +struct io_hash_bucket { + struct hlist_head list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned int hash_bits; +}; + +struct io_mapped_region { + struct page **pages; + void *vmap_ptr; + size_t nr_pages; +}; + +struct io_kiocb; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + struct io_wq_work_node free_list; + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + bool plug_started; + bool need_plug; + bool cq_flush; + short unsigned int submit_nr; + long: 32; + struct blk_plug plug; +}; + +struct io_alloc_cache { + void **entries; + unsigned int nr_cached; + unsigned int max_cached; + size_t elem_size; +}; + +struct io_restriction { + long unsigned int register_op[2]; + long unsigned int sqe_op[2]; + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_rings; + +struct io_ev_fd; + +struct io_sq_data; + +struct io_wq_hash; + +struct io_ring_ctx { + struct { + unsigned int flags; + unsigned int drain_next: 1; + unsigned int restricted: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + unsigned int has_evfd: 1; + unsigned int task_complete: 1; + unsigned int lockless_cq: 1; + unsigned int syscall_iopoll: 1; + unsigned int poll_activated: 1; + unsigned int drain_disabled: 1; + unsigned int compat: 1; + unsigned int iowq_limits_set: 1; + struct task_struct *submitter_task; + struct io_rings *rings; + struct percpu_ref refs; + clockid_t clockid; + enum tk_offsets clock_offset; + enum task_work_notify_mode notify_method; + unsigned int sq_thread_idle; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct mutex uring_lock; + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned int cached_sq_head; + unsigned int sq_entries; + atomic_t cancel_seq; + bool poll_multi_queue; + struct io_wq_work_list iopoll_list; + struct io_file_table file_table; + struct io_rsrc_data buf_table; + struct io_submit_state submit_state; + struct xarray io_bl_xa; + struct io_hash_table cancel_table; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + struct io_alloc_cache rw_cache; + struct io_alloc_cache uring_cache; + struct hlist_head cancelable_uring_cmd; + u64 hybrid_poll_time; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + unsigned int cached_cq_tail; + unsigned int cq_entries; + struct io_ev_fd *io_ev_fd; + unsigned int cq_extra; + void *cq_wait_arg; + size_t cq_wait_size; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct llist_head work_llist; + struct llist_head retry_llist; + long unsigned int check_cq; + atomic_t cq_wait_nr; + atomic_t cq_timeouts; + struct wait_queue_head cq_wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + spinlock_t timeout_lock; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned int cq_last_tm_flush; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + spinlock_t completion_lock; + struct list_head io_buffers_comp; + struct list_head cq_overflow_list; + struct hlist_head waitid_list; + struct hlist_head futex_list; + struct io_alloc_cache futex_cache; + const struct cred *sq_creds; + struct io_sq_data *sq_data; + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + unsigned int file_alloc_start; + unsigned int file_alloc_end; + struct list_head io_buffers_cache; + struct wait_queue_head poll_wq; + struct io_restriction restrictions; + u32 pers_next; + struct xarray personalities; + struct io_wq_hash *hash_map; + struct user_struct *user; + struct mm_struct *mm_account; + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + u32 iowq_limits[2]; + struct callback_head poll_wq_task_work; + struct list_head defer_list; + struct io_alloc_cache msg_cache; + spinlock_t msg_lock; + struct list_head napi_list; + spinlock_t napi_lock; + long: 32; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; + u8 napi_track_mode; + struct hlist_head napi_ht[16]; + unsigned int evfd_last_cq_tail; + struct mutex resize_lock; + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_mapped_region param_region; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct io_uring { + u32 head; + u32 tail; +}; + +struct io_rings { + struct io_uring sq; + struct io_uring cq; + u32 sq_ring_mask; + u32 cq_ring_mask; + u32 sq_ring_entries; + u32 cq_ring_entries; + u32 sq_dropped; + atomic_t sq_flags; + u32 cq_flags; + u32 cq_overflow; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct io_uring_cqe cqes[0]; +}; + +struct io_cmd_data { + struct file *file; + __u8 data[56]; +}; + +typedef u64 io_req_flags_t; + +struct io_cqe { + __u64 user_data; + __s32 res; + union { + __u32 flags; + int fd; + }; +}; + +struct io_tw_state; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *, struct io_tw_state *); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_buffer; + +struct io_buffer_list; + +struct async_poll; + +struct io_kiocb { + union { + struct file *file; + struct io_cmd_data cmd; + }; + u8 opcode; + u8 iopoll_completed; + u16 buf_index; + unsigned int nr_tw; + long: 32; + io_req_flags_t flags; + struct io_cqe cqe; + struct io_ring_ctx *ctx; + struct io_uring_task *tctx; + union { + struct io_buffer *kbuf; + struct io_buffer_list *buf_list; + struct io_rsrc_node *buf_node; + }; + union { + struct io_wq_work_node comp_list; + __poll_t apoll_events; + }; + struct io_rsrc_node *file_node; + atomic_t refs; + bool cancel_seq_set; + struct io_task_work io_task_work; + long: 32; + union { + struct hlist_node hash_node; + u64 iopoll_start; + }; + struct async_poll *apoll; + void *async_data; + atomic_t poll_refs; + struct io_kiocb *link; + const struct cred *creds; + struct io_wq_work work; + struct { + u64 extra1; + u64 extra2; + } big_cqe; +}; + +struct io_wq_hash { + refcount_t refs; + long unsigned int map; + struct wait_queue_head wait; +}; + +struct io_tw_state {}; + +enum { + REQ_F_FIXED_FILE = 1ULL, + REQ_F_IO_DRAIN = 2ULL, + REQ_F_LINK = 4ULL, + REQ_F_HARDLINK = 8ULL, + REQ_F_FORCE_ASYNC = 16ULL, + REQ_F_BUFFER_SELECT = 32ULL, + REQ_F_CQE_SKIP = 64ULL, + REQ_F_FAIL = 256ULL, + REQ_F_INFLIGHT = 512ULL, + REQ_F_CUR_POS = 1024ULL, + REQ_F_NOWAIT = 2048ULL, + REQ_F_LINK_TIMEOUT = 4096ULL, + REQ_F_NEED_CLEANUP = 8192ULL, + REQ_F_POLLED = 16384ULL, + REQ_F_IOPOLL_STATE = 32768ULL, + REQ_F_BUFFER_SELECTED = 65536ULL, + REQ_F_BUFFER_RING = 131072ULL, + REQ_F_REISSUE = 262144ULL, + REQ_F_SUPPORT_NOWAIT = 268435456ULL, + REQ_F_ISREG = 536870912ULL, + REQ_F_CREDS = 524288ULL, + REQ_F_REFCOUNT = 1048576ULL, + REQ_F_ARM_LTIMEOUT = 2097152ULL, + REQ_F_ASYNC_DATA = 4194304ULL, + REQ_F_SKIP_LINK_CQES = 8388608ULL, + REQ_F_SINGLE_POLL = 16777216ULL, + REQ_F_DOUBLE_POLL = 33554432ULL, + REQ_F_APOLL_MULTISHOT = 67108864ULL, + REQ_F_CLEAR_POLLIN = 134217728ULL, + REQ_F_POLL_NO_LAZY = 1073741824ULL, + REQ_F_CAN_POLL = 2147483648ULL, + REQ_F_BL_EMPTY = 4294967296ULL, + REQ_F_BL_NO_RECYCLE = 8589934592ULL, + REQ_F_BUFFERS_COMMIT = 17179869184ULL, + REQ_F_BUF_NODE = 34359738368ULL, +}; + +struct io_mapped_ubuf { + u64 ubuf; + unsigned int len; + unsigned int nr_bvecs; + unsigned int folio_shift; + refcount_t refs; + long unsigned int acct_pages; + struct bio_vec bvec[0]; + long: 32; +}; + +enum { + IOU_OK = 0, + IOU_ISSUE_SKIP_COMPLETE = -529, + IOU_REQUEUE = -3072, + IOU_STOP_MULTISHOT = -158, +}; + +struct io_fadvise { + struct file *file; + long: 32; + u64 offset; + u64 len; + u32 advice; + long: 32; +}; + +struct io_madvise { + struct file *file; + long: 32; + u64 addr; + u64 len; + u32 advice; + long: 32; +}; + +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + long unsigned int flags; + long unsigned int desc; + struct resource *parent; + struct resource *sibling; + struct resource *child; +}; + +typedef void (*dr_release_t)(struct device *, void *); + +typedef int (*dr_match_t)(struct device *, void *, void *); + +enum devm_ioremap_type { + DEVM_IOREMAP = 0, + DEVM_IOREMAP_UC = 1, + DEVM_IOREMAP_WC = 2, + DEVM_IOREMAP_NP = 3, +}; + +struct arch_io_reserve_memtype_wc_devres { + resource_size_t start; + resource_size_t size; +}; + +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_MCE_DEAD = 8, + CPUHP_VIRT_NET_DEAD = 9, + CPUHP_IBMVNIC_DEAD = 10, + CPUHP_SLUB_DEAD = 11, + CPUHP_DEBUG_OBJ_DEAD = 12, + CPUHP_MM_WRITEBACK_DEAD = 13, + CPUHP_MM_VMSTAT_DEAD = 14, + CPUHP_SOFTIRQ_DEAD = 15, + CPUHP_NET_MVNETA_DEAD = 16, + CPUHP_CPUIDLE_DEAD = 17, + CPUHP_ARM64_FPSIMD_DEAD = 18, + CPUHP_ARM_OMAP_WAKE_DEAD = 19, + CPUHP_IRQ_POLL_DEAD = 20, + CPUHP_BLOCK_SOFTIRQ_DEAD = 21, + CPUHP_BIO_DEAD = 22, + CPUHP_ACPI_CPUDRV_DEAD = 23, + CPUHP_S390_PFAULT_DEAD = 24, + CPUHP_BLK_MQ_DEAD = 25, + CPUHP_FS_BUFF_DEAD = 26, + CPUHP_PRINTK_DEAD = 27, + CPUHP_MM_MEMCQ_DEAD = 28, + CPUHP_PERCPU_CNT_DEAD = 29, + CPUHP_RADIX_DEAD = 30, + CPUHP_PAGE_ALLOC = 31, + CPUHP_NET_DEV_DEAD = 32, + CPUHP_PCI_XGENE_DEAD = 33, + CPUHP_IOMMU_IOVA_DEAD = 34, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, + CPUHP_PADATA_DEAD = 36, + CPUHP_AP_DTPM_CPU_DEAD = 37, + CPUHP_RANDOM_PREPARE = 38, + CPUHP_WORKQUEUE_PREP = 39, + CPUHP_POWER_NUMA_PREPARE = 40, + CPUHP_HRTIMERS_PREPARE = 41, + CPUHP_X2APIC_PREPARE = 42, + CPUHP_SMPCFD_PREPARE = 43, + CPUHP_RELAY_PREPARE = 44, + CPUHP_MD_RAID5_PREPARE = 45, + CPUHP_RCUTREE_PREP = 46, + CPUHP_CPUIDLE_COUPLED_PREPARE = 47, + CPUHP_POWERPC_PMAC_PREPARE = 48, + CPUHP_POWERPC_MMU_CTX_PREPARE = 49, + CPUHP_XEN_PREPARE = 50, + CPUHP_XEN_EVTCHN_PREPARE = 51, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, + CPUHP_SH_SH3X_PREPARE = 53, + CPUHP_TOPOLOGY_PREPARE = 54, + CPUHP_NET_IUCV_PREPARE = 55, + CPUHP_ARM_BL_PREPARE = 56, + CPUHP_TRACE_RB_PREPARE = 57, + CPUHP_MM_ZS_PREPARE = 58, + CPUHP_MM_ZSWP_POOL_PREPARE = 59, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 60, + CPUHP_ZCOMP_PREPARE = 61, + CPUHP_TIMERS_PREPARE = 62, + CPUHP_TMIGR_PREPARE = 63, + CPUHP_MIPS_SOC_PREPARE = 64, + CPUHP_BP_PREPARE_DYN = 65, + CPUHP_BP_PREPARE_DYN_END = 85, + CPUHP_BP_KICK_AP = 86, + CPUHP_BRINGUP_CPU = 87, + CPUHP_AP_IDLE_DEAD = 88, + CPUHP_AP_OFFLINE = 89, + CPUHP_AP_CACHECTRL_STARTING = 90, + CPUHP_AP_SCHED_STARTING = 91, + CPUHP_AP_RCUTREE_DYING = 92, + CPUHP_AP_CPU_PM_STARTING = 93, + CPUHP_AP_IRQ_GIC_STARTING = 94, + CPUHP_AP_IRQ_HIP04_STARTING = 95, + CPUHP_AP_IRQ_APPLE_AIC_STARTING = 96, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 97, + CPUHP_AP_IRQ_BCM2836_STARTING = 98, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 99, + CPUHP_AP_IRQ_EIOINTC_STARTING = 100, + CPUHP_AP_IRQ_AVECINTC_STARTING = 101, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 102, + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING = 103, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING = 104, + CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING = 105, + CPUHP_AP_ARM_MVEBU_COHERENCY = 106, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 107, + CPUHP_AP_PERF_X86_STARTING = 108, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 109, + CPUHP_AP_PERF_XTENSA_STARTING = 110, + CPUHP_AP_ARM_VFP_STARTING = 111, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 112, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 113, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 114, + CPUHP_AP_PERF_ARM_STARTING = 115, + CPUHP_AP_PERF_RISCV_STARTING = 116, + CPUHP_AP_ARM_L2X0_STARTING = 117, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 118, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 119, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING = 120, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 121, + CPUHP_AP_JCORE_TIMER_STARTING = 122, + CPUHP_AP_ARM_TWD_STARTING = 123, + CPUHP_AP_QCOM_TIMER_STARTING = 124, + CPUHP_AP_TEGRA_TIMER_STARTING = 125, + CPUHP_AP_ARMADA_TIMER_STARTING = 126, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 127, + CPUHP_AP_ARC_TIMER_STARTING = 128, + CPUHP_AP_REALTEK_TIMER_STARTING = 129, + CPUHP_AP_RISCV_TIMER_STARTING = 130, + CPUHP_AP_CLINT_TIMER_STARTING = 131, + CPUHP_AP_CSKY_TIMER_STARTING = 132, + CPUHP_AP_TI_GP_TIMER_STARTING = 133, + CPUHP_AP_HYPERV_TIMER_STARTING = 134, + CPUHP_AP_DUMMY_TIMER_STARTING = 135, + CPUHP_AP_ARM_XEN_STARTING = 136, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 137, + CPUHP_AP_ARM_CORESIGHT_STARTING = 138, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 139, + CPUHP_AP_ARM64_ISNDEP_STARTING = 140, + CPUHP_AP_SMPCFD_DYING = 141, + CPUHP_AP_HRTIMERS_DYING = 142, + CPUHP_AP_TICK_DYING = 143, + CPUHP_AP_X86_TBOOT_DYING = 144, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 145, + CPUHP_AP_ONLINE = 146, + CPUHP_TEARDOWN_CPU = 147, + CPUHP_AP_ONLINE_IDLE = 148, + CPUHP_AP_HYPERV_ONLINE = 149, + CPUHP_AP_KVM_ONLINE = 150, + CPUHP_AP_SCHED_WAIT_EMPTY = 151, + CPUHP_AP_SMPBOOT_THREADS = 152, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 153, + CPUHP_AP_BLK_MQ_ONLINE = 154, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 155, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 156, + CPUHP_AP_PERF_ONLINE = 157, + CPUHP_AP_PERF_X86_ONLINE = 158, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 159, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 160, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 161, + CPUHP_AP_PERF_S390_CF_ONLINE = 162, + CPUHP_AP_PERF_S390_SF_ONLINE = 163, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 164, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 165, + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 166, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 167, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 168, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 169, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 170, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 171, + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 172, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 173, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 174, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 175, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 176, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 177, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 178, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 179, + CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE = 180, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 181, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 182, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 183, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 184, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 185, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 186, + CPUHP_AP_PERF_CSKY_ONLINE = 187, + CPUHP_AP_TMIGR_ONLINE = 188, + CPUHP_AP_WATCHDOG_ONLINE = 189, + CPUHP_AP_WORKQUEUE_ONLINE = 190, + CPUHP_AP_RANDOM_ONLINE = 191, + CPUHP_AP_RCUTREE_ONLINE = 192, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 193, + CPUHP_AP_ONLINE_DYN = 194, + CPUHP_AP_ONLINE_DYN_END = 234, + CPUHP_AP_X86_HPET_ONLINE = 235, + CPUHP_AP_X86_KVM_CLK_ONLINE = 236, + CPUHP_AP_ACTIVE = 237, + CPUHP_ONLINE = 238, +}; + +union ieee754dp { + struct { + unsigned int sign: 1; + unsigned int bexp: 11; + u64 mant: 52; + }; + u64 bits; +}; + +enum { + IEEE754_CLASS_NORM = 0, + IEEE754_CLASS_ZERO = 1, + IEEE754_CLASS_DNORM = 2, + IEEE754_CLASS_INF = 3, + IEEE754_CLASS_SNAN = 4, + IEEE754_CLASS_QNAN = 5, +}; + +struct _ieee754_csr { + unsigned int fcc: 7; + unsigned int nod: 1; + unsigned int c: 1; + unsigned int pad0: 3; + unsigned int abs2008: 1; + unsigned int nan2008: 1; + unsigned int cx: 6; + unsigned int mx: 5; + unsigned int sx: 5; + unsigned int rm: 2; +}; + +union ieee754sp { + struct { + unsigned int sign: 1; + unsigned int bexp: 8; + unsigned int mant: 23; + }; + u32 bits; +}; + +enum { + LOGIC_PIO_INDIRECT = 0, + LOGIC_PIO_CPU_MMIO = 1, +}; + +struct logic_pio_host_ops; + +struct logic_pio_hwaddr { + struct list_head list; + const struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + long unsigned int flags; + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *, long unsigned int, size_t); + void (*out)(void *, long unsigned int, u32, size_t); + u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); + void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); +}; + +enum work_flags { + WORK_STRUCT_PENDING = 1, + WORK_STRUCT_INACTIVE = 2, + WORK_STRUCT_PWQ = 4, + WORK_STRUCT_LINKED = 8, + WORK_STRUCT_STATIC = 0, +}; + +typedef unsigned int xa_mark_t; + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +struct ida { + struct xarray xa; +}; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(const struct bus_type *, char *); + ssize_t (*store)(const struct bus_type *, const char *, size_t); +}; + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *, struct device_attribute *, char *); + ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); +}; + +struct pci_device_id { + __u32 vendor; + __u32 device; + __u32 subvendor; + __u32 subdevice; + __u32 class; + __u32 class_mask; + kernel_ulong_t driver_data; + __u32 override_only; +}; + +struct dmi_strmatch { + unsigned char slot: 7; + unsigned char exact_match: 1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; + +struct pci_bus; + +struct hotplug_slot; + +struct pci_slot { + struct pci_bus *bus; + struct list_head list; + struct hotplug_slot *hotplug; + unsigned char number; + struct kobject kobj; +}; + +typedef short unsigned int pci_bus_flags_t; + +struct pci_dev; + +struct pci_ops; + +struct pci_bus { + struct list_head node; + struct pci_bus *parent; + struct list_head children; + struct list_head devices; + struct pci_dev *self; + struct list_head slots; + struct resource *resource[4]; + struct list_head resources; + struct resource busn_res; + struct pci_ops *ops; + void *sysdata; + struct proc_dir_entry *procdir; + unsigned char number; + unsigned char primary; + unsigned char max_bus_speed; + unsigned char cur_bus_speed; + int domain_nr; + char name[48]; + short unsigned int bridge_ctl; + pci_bus_flags_t bus_flags; + struct device *bridge; + long: 32; + struct device dev; + struct bin_attribute *legacy_io; + struct bin_attribute *legacy_mem; + unsigned int is_added: 1; + unsigned int unsafe_warn: 1; + long: 32; +}; + +struct hotplug_slot_ops; + +struct hotplug_slot { + const struct hotplug_slot_ops *ops; + struct list_head slot_list; + struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; +}; + +enum { + PCI_STD_RESOURCES = 0, + PCI_STD_RESOURCE_END = 5, + PCI_ROM_RESOURCE = 6, + PCI_BRIDGE_RESOURCES = 7, + PCI_BRIDGE_RESOURCE_END = 10, + PCI_NUM_RESOURCES = 11, + DEVICE_COUNT_RESOURCE = 11, +}; + +typedef int pci_power_t; + +typedef unsigned int pci_channel_state_t; + +enum { + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, +}; + +enum pcie_reset_state { + pcie_deassert_reset = 1, + pcie_warm_reset = 2, + pcie_hot_reset = 3, +}; + +typedef short unsigned int pci_dev_flags_t; + +enum pci_dev_flags { + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, + PCI_DEV_FLAGS_NO_D3 = 2, + PCI_DEV_FLAGS_ASSIGNED = 4, + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, + PCI_DEV_FLAGS_NO_BUS_RESET = 64, + PCI_DEV_FLAGS_NO_PM_RESET = 128, + PCI_DEV_FLAGS_VPD_REF_F0 = 256, + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, + PCI_DEV_FLAGS_NO_FLR_RESET = 1024, + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, + PCI_DEV_FLAGS_HAS_MSI_MASKING = 4096, +}; + +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = 1, + PCI_BUS_FLAGS_NO_MMRBC = 2, + PCI_BUS_FLAGS_NO_AERSID = 4, + PCI_BUS_FLAGS_NO_EXTCFG = 8, +}; + +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0, + PCIE_LNK_X1 = 1, + PCIE_LNK_X2 = 2, + PCIE_LNK_X4 = 4, + PCIE_LNK_X8 = 8, + PCIE_LNK_X12 = 12, + PCIE_LNK_X16 = 16, + PCIE_LNK_X32 = 32, + PCIE_LNK_WIDTH_UNKNOWN = 255, +}; + +enum pci_bus_speed { + PCI_SPEED_33MHz = 0, + PCI_SPEED_66MHz = 1, + PCI_SPEED_66MHz_PCIX = 2, + PCI_SPEED_100MHz_PCIX = 3, + PCI_SPEED_133MHz_PCIX = 4, + PCI_SPEED_66MHz_PCIX_ECC = 5, + PCI_SPEED_100MHz_PCIX_ECC = 6, + PCI_SPEED_133MHz_PCIX_ECC = 7, + PCI_SPEED_66MHz_PCIX_266 = 9, + PCI_SPEED_100MHz_PCIX_266 = 10, + PCI_SPEED_133MHz_PCIX_266 = 11, + AGP_UNKNOWN = 12, + AGP_1X = 13, + AGP_2X = 14, + AGP_4X = 15, + AGP_8X = 16, + PCI_SPEED_66MHz_PCIX_533 = 17, + PCI_SPEED_100MHz_PCIX_533 = 18, + PCI_SPEED_133MHz_PCIX_533 = 19, + PCIE_SPEED_2_5GT = 20, + PCIE_SPEED_5_0GT = 21, + PCIE_SPEED_8_0GT = 22, + PCIE_SPEED_16_0GT = 23, + PCIE_SPEED_32_0GT = 24, + PCIE_SPEED_64_0GT = 25, + PCI_SPEED_UNKNOWN = 255, +}; + +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; +}; + +struct pcie_bwctrl_data; + +struct pci_driver; + +struct pcie_link_state; + +struct pci_dev { + struct list_head bus_list; + struct pci_bus *bus; + struct pci_bus *subordinate; + void *sysdata; + struct proc_dir_entry *procent; + struct pci_slot *slot; + unsigned int devfn; + short unsigned int vendor; + short unsigned int device; + short unsigned int subsystem_vendor; + short unsigned int subsystem_device; + unsigned int class; + u8 revision; + u8 hdr_type; + u32 devcap; + u8 pcie_cap; + u8 msi_cap; + u8 msix_cap; + u8 pcie_mpss: 3; + u8 rom_base_reg; + u8 pin; + u16 pcie_flags_reg; + long unsigned int *dma_alias_mask; + struct pci_driver *driver; + long: 32; + u64 dma_mask; + struct device_dma_parameters dma_parms; + pci_power_t current_state; + u8 pm_cap; + unsigned int pme_support: 5; + unsigned int pme_poll: 1; + unsigned int pinned: 1; + unsigned int config_rrs_sv: 1; + unsigned int imm_ready: 1; + unsigned int d1_support: 1; + unsigned int d2_support: 1; + unsigned int no_d1d2: 1; + unsigned int no_d3cold: 1; + unsigned int bridge_d3: 1; + unsigned int d3cold_allowed: 1; + unsigned int mmio_always_on: 1; + unsigned int wakeup_prepared: 1; + unsigned int skip_bus_pm: 1; + unsigned int ignore_hotplug: 1; + unsigned int hotplug_user_indicators: 1; + unsigned int clear_retrain_link: 1; + unsigned int d3hot_delay; + unsigned int d3cold_delay; + u16 l1ss; + struct pcie_link_state *link_state; + unsigned int ltr_path: 1; + unsigned int pasid_no_tlp: 1; + unsigned int eetlp_prefix_path: 1; + pci_channel_state_t error_state; + long: 32; + struct device dev; + int cfg_size; + unsigned int irq; + struct resource resource[11]; + struct resource driver_exclusive_resource; + bool match_driver; + unsigned int transparent: 1; + unsigned int io_window: 1; + unsigned int pref_window: 1; + unsigned int pref_64_window: 1; + unsigned int multifunction: 1; + unsigned int is_busmaster: 1; + unsigned int no_msi: 1; + unsigned int no_64bit_msi: 1; + unsigned int block_cfg_access: 1; + unsigned int broken_parity_status: 1; + unsigned int irq_reroute_variant: 2; + unsigned int msi_enabled: 1; + unsigned int msix_enabled: 1; + unsigned int ari_enabled: 1; + unsigned int ats_enabled: 1; + unsigned int pasid_enabled: 1; + unsigned int pri_enabled: 1; + unsigned int tph_enabled: 1; + unsigned int is_managed: 1; + unsigned int is_msi_managed: 1; + unsigned int needs_freset: 1; + unsigned int state_saved: 1; + unsigned int is_physfn: 1; + unsigned int is_virtfn: 1; + unsigned int is_hotplug_bridge: 1; + unsigned int shpc_managed: 1; + unsigned int is_thunderbolt: 1; + unsigned int untrusted: 1; + unsigned int external_facing: 1; + unsigned int broken_intx_masking: 1; + unsigned int io_window_1k: 1; + unsigned int irq_managed: 1; + unsigned int non_compliant_bars: 1; + unsigned int is_probed: 1; + unsigned int link_active_reporting: 1; + unsigned int no_vf_scan: 1; + unsigned int no_command_memory: 1; + unsigned int rom_bar_overlap: 1; + unsigned int rom_attr_enabled: 1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; + spinlock_t pcie_cap_lock; + u32 saved_config_space[16]; + struct hlist_head saved_cap_space; + struct bin_attribute *res_attr[11]; + struct bin_attribute *res_attr_wc[11]; + void *msix_base; + raw_spinlock_t msi_lock; + struct pci_vpd vpd; + struct pcie_bwctrl_data *link_bwctrl; + u16 acs_cap; + u8 supported_speeds; + phys_addr_t rom; + size_t romlen; + const char *driver_override; + long unsigned int priv_flags; + u8 reset_methods[8]; + long: 32; +}; + +struct pci_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct pci_error_handlers; + +struct pci_driver { + const char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *, const struct pci_device_id *); + void (*remove)(struct pci_dev *); + int (*suspend)(struct pci_dev *, pm_message_t); + int (*resume)(struct pci_dev *); + void (*shutdown)(struct pci_dev *); + int (*sriov_configure)(struct pci_dev *, int); + int (*sriov_set_msix_vec_count)(struct pci_dev *, int); + u32 (*sriov_get_vf_total_msix)(struct pci_dev *); + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + struct device_driver driver; + struct pci_dynids dynids; + bool driver_managed_dma; +}; + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; + struct pci_ops *ops; + struct pci_ops *child_ops; + void *sysdata; + int busnr; + int domain_nr; + struct list_head windows; + struct list_head dma_ranges; + u8 (*swizzle_irq)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + unsigned int ignore_reset_delay: 1; + unsigned int no_ext_tags: 1; + unsigned int no_inc_mrrs: 1; + unsigned int native_aer: 1; + unsigned int native_pcie_hotplug: 1; + unsigned int native_shpc_hotplug: 1; + unsigned int native_pme: 1; + unsigned int native_ltr: 1; + unsigned int native_dpc: 1; + unsigned int native_cxl_error: 1; + unsigned int preserve_config: 1; + unsigned int size_windows: 1; + unsigned int msi_domain: 1; + resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int private[0]; +}; + +struct pci_ops { + int (*add_bus)(struct pci_bus *); + void (*remove_bus)(struct pci_bus *); + void * (*map_bus)(struct pci_bus *, unsigned int, int); + int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); + int (*write)(struct pci_bus *, unsigned int, int, int, u32); +}; + +typedef unsigned int pci_ers_result_t; + +struct pci_error_handlers { + pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *); + pci_ers_result_t (*slot_reset)(struct pci_dev *); + void (*reset_prepare)(struct pci_dev *); + void (*reset_done)(struct pci_dev *); + void (*resume)(struct pci_dev *); + void (*cor_error_detected)(struct pci_dev *); +}; + +enum { + PCI_REASSIGN_ALL_RSRC = 1, + PCI_REASSIGN_ALL_BUS = 2, + PCI_PROBE_ONLY = 4, + PCI_CAN_SKIP_ISA_ALIGN = 8, + PCI_ENABLE_PROC_DOMAINS = 16, + PCI_COMPAT_DOMAIN_0 = 32, + PCI_SCAN_ALL_PCIE_DEVS = 64, +}; + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF = 0, + PCIE_BUS_DEFAULT = 1, + PCIE_BUS_SAFE = 2, + PCIE_BUS_PERFORMANCE = 3, + PCIE_BUS_PEER2PEER = 4, +}; + +typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); + +enum pci_fixup_pass { + pci_fixup_early = 0, + pci_fixup_header = 1, + pci_fixup_final = 2, + pci_fixup_enable = 3, + pci_fixup_resume = 4, + pci_fixup_suspend = 5, + pci_fixup_resume_early = 6, + pci_fixup_suspend_late = 7, +}; + +struct hotplug_slot_ops { + int (*enable_slot)(struct hotplug_slot *); + int (*disable_slot)(struct hotplug_slot *); + int (*set_attention_status)(struct hotplug_slot *, u8); + int (*hardware_test)(struct hotplug_slot *, u32); + int (*get_power_status)(struct hotplug_slot *, u8 *); + int (*get_attention_status)(struct hotplug_slot *, u8 *); + int (*get_latch_status)(struct hotplug_slot *, u8 *); + int (*get_adapter_status)(struct hotplug_slot *, u8 *); + int (*reset_slot)(struct hotplug_slot *, bool); +}; + +struct pcie_tlp_log { + u32 dw[4]; +}; + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct pci_reset_fn_method { + int (*reset_fn)(struct pci_dev *, bool); + char *name; +}; + +struct pci_pme_device { + struct list_head list; + struct pci_dev *dev; +}; + +struct pci_acs { + u16 cap; + u16 ctrl; + u16 fw_ctrl; +}; + +struct pci_saved_state { + u32 config_space[16]; + struct pci_cap_saved_data cap[0]; +}; + +struct u32_fract { + __u32 numerator; + __u32 denominator; +}; + +struct clk_core; + +struct clk_hw; + +struct clk_rate_request { + struct clk_core *core; + long unsigned int rate; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +struct clk; + +struct clk_init_data; + +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +struct clk_ops { + int (*prepare)(struct clk_hw *); + void (*unprepare)(struct clk_hw *); + int (*is_prepared)(struct clk_hw *); + void (*unprepare_unused)(struct clk_hw *); + int (*enable)(struct clk_hw *); + void (*disable)(struct clk_hw *); + int (*is_enabled)(struct clk_hw *); + void (*disable_unused)(struct clk_hw *); + int (*save_context)(struct clk_hw *); + void (*restore_context)(struct clk_hw *); + long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int); + long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *); + int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); + int (*set_parent)(struct clk_hw *, u8); + u8 (*get_parent)(struct clk_hw *); + int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int); + int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8); + long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int); + int (*get_phase)(struct clk_hw *); + int (*set_phase)(struct clk_hw *, int); + int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*init)(struct clk_hw *); + void (*terminate)(struct clk_hw *); + void (*debug_init)(struct clk_hw *, struct dentry *); +}; + +struct clk_parent_data { + const struct clk_hw *hw; + const char *fw_name; + const char *name; + int index; +}; + +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + const char * const *parent_names; + const struct clk_parent_data *parent_data; + const struct clk_hw **parent_hws; + u8 num_parents; + long unsigned int flags; +}; + +struct clk_fractional_divider { + struct clk_hw hw; + void *reg; + u8 mshift; + u8 mwidth; + u8 nshift; + u8 nwidth; + u8 flags; + void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *); + spinlock_t *lock; +}; + +struct va_format { + const char *fmt; + va_list *va; +}; + +struct plist_head { + struct list_head node_list; +}; + +enum kobject_action { + KOBJ_ADD = 0, + KOBJ_REMOVE = 1, + KOBJ_CHANGE = 2, + KOBJ_MOVE = 3, + KOBJ_ONLINE = 4, + KOBJ_OFFLINE = 5, + KOBJ_BIND = 6, + KOBJ_UNBIND = 7, +}; + +struct klist_node; + +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +}; + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED = 0, + PM_QOS_MAX = 1, + PM_QOS_MIN = 2, +}; + +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; +}; + +struct dev_pm_qos_request; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct freq_constraints freq; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +enum bus_notifier_event { + BUS_NOTIFY_ADD_DEVICE = 0, + BUS_NOTIFY_DEL_DEVICE = 1, + BUS_NOTIFY_REMOVED_DEVICE = 2, + BUS_NOTIFY_BIND_DRIVER = 3, + BUS_NOTIFY_BOUND_DRIVER = 4, + BUS_NOTIFY_UNBIND_DRIVER = 5, + BUS_NOTIFY_UNBOUND_DRIVER = 6, + BUS_NOTIFY_DRIVER_NOT_BOUND = 7, +}; + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + const struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead: 1; +}; + +typedef u64 async_cookie_t; + +typedef void (*async_func_t)(void *, async_cookie_t); + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; +}; + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX = 2, +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE = 2, + DEV_PM_QOS_MIN_FREQUENCY = 3, + DEV_PM_QOS_MAX_FREQUENCY = 4, + DEV_PM_QOS_FLAGS = 5, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + struct freq_qos_request freq; + } data; + struct device *dev; +}; + +struct device_attach_data { + struct device *dev; + bool check_async; + bool want_async; + bool have_async; +}; + +struct msdos_partition { + u8 boot_ind; + u8 head; + u8 sector; + u8 cyl; + u8 sys_ind; + u8 end_head; + u8 end_sector; + u8 end_cyl; + __le32 start_sect; + __le32 nr_sects; +}; + +enum nvme_opcode { + nvme_cmd_flush = 0, + nvme_cmd_write = 1, + nvme_cmd_read = 2, + nvme_cmd_write_uncor = 4, + nvme_cmd_compare = 5, + nvme_cmd_write_zeroes = 8, + nvme_cmd_dsm = 9, + nvme_cmd_verify = 12, + nvme_cmd_resv_register = 13, + nvme_cmd_resv_report = 14, + nvme_cmd_resv_acquire = 17, + nvme_cmd_resv_release = 21, + nvme_cmd_zone_mgmt_send = 121, + nvme_cmd_zone_mgmt_recv = 122, + nvme_cmd_zone_append = 125, + nvme_cmd_vendor_start = 128, +}; + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0, + nvme_admin_create_sq = 1, + nvme_admin_get_log_page = 2, + nvme_admin_delete_cq = 4, + nvme_admin_create_cq = 5, + nvme_admin_identify = 6, + nvme_admin_abort_cmd = 8, + nvme_admin_set_features = 9, + nvme_admin_get_features = 10, + nvme_admin_async_event = 12, + nvme_admin_ns_mgmt = 13, + nvme_admin_activate_fw = 16, + nvme_admin_download_fw = 17, + nvme_admin_dev_self_test = 20, + nvme_admin_ns_attach = 21, + nvme_admin_keep_alive = 24, + nvme_admin_directive_send = 25, + nvme_admin_directive_recv = 26, + nvme_admin_virtual_mgmt = 28, + nvme_admin_nvme_mi_send = 29, + nvme_admin_nvme_mi_recv = 30, + nvme_admin_dbbuf = 124, + nvme_admin_format_nvm = 128, + nvme_admin_security_send = 129, + nvme_admin_security_recv = 130, + nvme_admin_sanitize_nvm = 132, + nvme_admin_get_lba_status = 134, + nvme_admin_vendor_start = 192, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0, + nvme_fabrics_type_connect = 1, + nvme_fabrics_type_property_get = 4, + nvme_fabrics_type_auth_send = 5, + nvme_fabrics_type_auth_receive = 6, +}; + +typedef __u32 blk_mq_req_flags_t; + +struct rchan; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + long unsigned int *sequence; + unsigned char *msg_data; + u16 act_mask; + long: 32; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct list_head running_list; + atomic_t dropped; +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT = 0, + HCTX_TYPE_READ = 1, + HCTX_TYPE_POLL = 2, + HCTX_MAX_TYPES = 3, +}; + +struct rchan_buf { + void *start; + void *data; + size_t offset; + size_t subbufs_produced; + size_t subbufs_consumed; + struct rchan *chan; + wait_queue_head_t read_wait; + struct irq_work wakeup_work; + struct dentry *dentry; + struct kref kref; + struct page **page_array; + unsigned int page_count; + unsigned int finalized; + size_t *padding; + size_t prev_padding; + size_t bytes_consumed; + size_t early_bytes; + unsigned int cpu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct rchan_callbacks; + +struct rchan { + u32 version; + size_t subbuf_size; + size_t n_subbufs; + size_t alloc_size; + const struct rchan_callbacks *cb; + struct kref kref; + void *private_data; + size_t last_toobig; + struct rchan_buf **buf; + int is_global; + struct list_head list; + struct dentry *parent; + int has_base_filename; + char base_filename[255]; +}; + +struct rchan_callbacks { + int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); + struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); + int (*remove_buf_file)(struct dentry *); +}; + +enum blktrace_act { + __BLK_TA_QUEUE = 1, + __BLK_TA_BACKMERGE = 2, + __BLK_TA_FRONTMERGE = 3, + __BLK_TA_GETRQ = 4, + __BLK_TA_SLEEPRQ = 5, + __BLK_TA_REQUEUE = 6, + __BLK_TA_ISSUE = 7, + __BLK_TA_COMPLETE = 8, + __BLK_TA_PLUG = 9, + __BLK_TA_UNPLUG_IO = 10, + __BLK_TA_UNPLUG_TIMER = 11, + __BLK_TA_INSERT = 12, + __BLK_TA_SPLIT = 13, + __BLK_TA_BOUNCE = 14, + __BLK_TA_REMAP = 15, + __BLK_TA_ABORT = 16, + __BLK_TA_DRV_DATA = 17, + __BLK_TA_CGROUP = 256, +}; + +typedef __u32 nvme_submit_flags_t; + +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE = 1, + DL_STATE_CONSUMER_PROBE = 2, + DL_STATE_ACTIVE = 3, + DL_STATE_SUPPLIER_UNBIND = 4, +}; + +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + struct work_struct rm_work; + bool supplier_preactivated; + long: 32; +}; + +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; + +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, + ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, + ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, + __ETHTOOL_LINK_MODE_MASK_NBITS = 103, +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF = 0, + HWTSTAMP_TX_ON = 1, + HWTSTAMP_TX_ONESTEP_SYNC = 2, + HWTSTAMP_TX_ONESTEP_P2P = 3, + __HWTSTAMP_TX_CNT = 4, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE = 0, + HWTSTAMP_FILTER_ALL = 1, + HWTSTAMP_FILTER_SOME = 2, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, + HWTSTAMP_FILTER_PTP_V2_EVENT = 12, + HWTSTAMP_FILTER_PTP_V2_SYNC = 13, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, + HWTSTAMP_FILTER_NTP_ALL = 15, + __HWTSTAMP_FILTER_CNT = 16, +}; + +struct kernel_ethtool_ts_info { + u32 cmd; + u32 so_timestamping; + int phc_index; + enum hwtstamp_tx_types tx_types; + enum hwtstamp_rx_filters rx_filters; +}; + +struct gpio_desc; + +struct reset_control; + +struct mii_bus; + +struct mdio_device { + struct device dev; + struct mii_bus *bus; + char modalias[32]; + int (*bus_match)(struct device *, const struct device_driver *); + void (*device_free)(struct mdio_device *); + void (*device_remove)(struct mdio_device *); + int addr; + int flags; + int reset_state; + struct gpio_desc *reset_gpio; + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; + long: 32; +}; + +struct phy_c45_device_ids { + u32 devices_in_package; + u32 mmds_present; + u32 device_ids[32]; +}; + +enum phy_state { + PHY_DOWN = 0, + PHY_READY = 1, + PHY_HALTED = 2, + PHY_ERROR = 3, + PHY_UP = 4, + PHY_RUNNING = 5, + PHY_NOLINK = 6, + PHY_CABLETEST = 7, +}; + +typedef enum { + PHY_INTERFACE_MODE_NA = 0, + PHY_INTERFACE_MODE_INTERNAL = 1, + PHY_INTERFACE_MODE_MII = 2, + PHY_INTERFACE_MODE_GMII = 3, + PHY_INTERFACE_MODE_SGMII = 4, + PHY_INTERFACE_MODE_TBI = 5, + PHY_INTERFACE_MODE_REVMII = 6, + PHY_INTERFACE_MODE_RMII = 7, + PHY_INTERFACE_MODE_REVRMII = 8, + PHY_INTERFACE_MODE_RGMII = 9, + PHY_INTERFACE_MODE_RGMII_ID = 10, + PHY_INTERFACE_MODE_RGMII_RXID = 11, + PHY_INTERFACE_MODE_RGMII_TXID = 12, + PHY_INTERFACE_MODE_RTBI = 13, + PHY_INTERFACE_MODE_SMII = 14, + PHY_INTERFACE_MODE_XGMII = 15, + PHY_INTERFACE_MODE_XLGMII = 16, + PHY_INTERFACE_MODE_MOCA = 17, + PHY_INTERFACE_MODE_PSGMII = 18, + PHY_INTERFACE_MODE_QSGMII = 19, + PHY_INTERFACE_MODE_TRGMII = 20, + PHY_INTERFACE_MODE_100BASEX = 21, + PHY_INTERFACE_MODE_1000BASEX = 22, + PHY_INTERFACE_MODE_2500BASEX = 23, + PHY_INTERFACE_MODE_5GBASER = 24, + PHY_INTERFACE_MODE_RXAUI = 25, + PHY_INTERFACE_MODE_XAUI = 26, + PHY_INTERFACE_MODE_10GBASER = 27, + PHY_INTERFACE_MODE_25GBASER = 28, + PHY_INTERFACE_MODE_USXGMII = 29, + PHY_INTERFACE_MODE_10GKR = 30, + PHY_INTERFACE_MODE_QUSGMII = 31, + PHY_INTERFACE_MODE_1000BASEKX = 32, + PHY_INTERFACE_MODE_10G_QXGMII = 33, + PHY_INTERFACE_MODE_MAX = 34, +} phy_interface_t; + +struct eee_config { + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_enabled; +}; + +struct phylink; + +struct pse_control; + +struct phy_driver; + +struct phy_package_shared; + +struct mii_timestamper; + +struct phy_device { + struct mdio_device mdio; + const struct phy_driver *drv; + struct device_link *devlink; + u32 phyindex; + u32 phy_id; + struct phy_c45_device_ids c45_ids; + unsigned int is_c45: 1; + unsigned int is_internal: 1; + unsigned int is_pseudo_fixed_link: 1; + unsigned int is_gigabit_capable: 1; + unsigned int has_fixups: 1; + unsigned int suspended: 1; + unsigned int suspended_by_mdio_bus: 1; + unsigned int sysfs_links: 1; + unsigned int loopback_enabled: 1; + unsigned int downshifted_rate: 1; + unsigned int is_on_sfp_module: 1; + unsigned int mac_managed_pm: 1; + unsigned int wol_enabled: 1; + unsigned int autoneg: 1; + unsigned int link: 1; + unsigned int autoneg_complete: 1; + unsigned int interrupts: 1; + unsigned int irq_suspended: 1; + unsigned int irq_rerun: 1; + unsigned int default_timestamp: 1; + int rate_matching; + enum phy_state state; + u32 dev_flags; + phy_interface_t interface; + long unsigned int possible_interfaces[2]; + int speed; + int duplex; + int port; + int pause; + int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; + long unsigned int supported[4]; + long unsigned int advertising[4]; + long unsigned int lp_advertising[4]; + long unsigned int adv_old[4]; + long unsigned int supported_eee[4]; + long unsigned int advertising_eee[4]; + long unsigned int eee_broken_modes[4]; + bool enable_tx_lpi; + bool eee_active; + struct eee_config eee_cfg; + long unsigned int host_interfaces[2]; + struct list_head leds; + int irq; + void *priv; + struct phy_package_shared *shared; + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + struct delayed_work state_queue; + struct mutex lock; + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; + struct phylink *phylink; + struct net_device *attached_dev; + struct mii_timestamper *mii_ts; + struct pse_control *psec; + u8 mdix; + u8 mdix_ctrl; + int pma_extable; + unsigned int link_down_events; + void (*phy_link_change)(struct phy_device *, bool); + void (*adjust_link)(struct net_device *); +}; + +struct phy_plca_cfg { + int version; + int enabled; + int node_id; + int node_cnt; + int to_tmr; + int burst_cnt; + int burst_tmr; +}; + +struct phy_plca_status { + bool pst; +}; + +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + struct u64_stats_sync syncp; + long: 32; +}; + +struct mii_bus { + struct module *owner; + const char *name; + char id[61]; + void *priv; + int (*read)(struct mii_bus *, int, int); + int (*write)(struct mii_bus *, int, int, u16); + int (*read_c45)(struct mii_bus *, int, int, int); + int (*write_c45)(struct mii_bus *, int, int, int, u16); + int (*reset)(struct mii_bus *); + struct mdio_bus_stats stats[32]; + struct mutex mdio_lock; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED = 2, + MDIOBUS_UNREGISTERED = 3, + MDIOBUS_RELEASED = 4, + } state; + struct device dev; + struct mdio_device *mdio_map[32]; + u32 phy_mask; + u32 phy_ignore_ta_mask; + int irq[32]; + int reset_delay_us; + int reset_post_delay_us; + struct gpio_desc *reset_gpiod; + struct mutex shared_lock; + struct phy_package_shared *shared[32]; + long: 32; +}; + +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; + +struct mii_timestamper { + bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); + void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); + int (*hwtstamp)(struct mii_timestamper *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); + void (*link_state)(struct mii_timestamper *, struct phy_device *); + int (*ts_info)(struct mii_timestamper *, struct kernel_ethtool_ts_info *); + struct device *device; +}; + +struct phy_package_shared { + u8 base_addr; + struct device_node *np; + refcount_t refcnt; + long unsigned int flags; + size_t priv_size; + void *priv; +}; + +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + const long unsigned int * const features; + u32 flags; + const void *driver_data; + int (*soft_reset)(struct phy_device *); + int (*config_init)(struct phy_device *); + int (*probe)(struct phy_device *); + int (*get_features)(struct phy_device *); + int (*get_rate_matching)(struct phy_device *, phy_interface_t); + int (*suspend)(struct phy_device *); + int (*resume)(struct phy_device *); + int (*config_aneg)(struct phy_device *); + int (*aneg_done)(struct phy_device *); + int (*read_status)(struct phy_device *); + int (*config_intr)(struct phy_device *); + irqreturn_t (*handle_interrupt)(struct phy_device *); + void (*remove)(struct phy_device *); + int (*match_phy_device)(struct phy_device *); + int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*link_change_notify)(struct phy_device *); + int (*read_mmd)(struct phy_device *, int, u16); + int (*write_mmd)(struct phy_device *, int, u16, u16); + int (*read_page)(struct phy_device *); + int (*write_page)(struct phy_device *, int); + int (*module_info)(struct phy_device *, struct ethtool_modinfo *); + int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); + int (*cable_test_start)(struct phy_device *); + int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); + int (*cable_test_get_status)(struct phy_device *, bool *); + int (*get_sset_count)(struct phy_device *); + void (*get_strings)(struct phy_device *, u8 *); + void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); + int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); + int (*set_loopback)(struct phy_device *, bool); + int (*get_sqi)(struct phy_device *); + int (*get_sqi_max)(struct phy_device *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*led_brightness_set)(struct phy_device *, u8, enum led_brightness); + int (*led_blink_set)(struct phy_device *, u8, long unsigned int *, long unsigned int *); + int (*led_hw_is_supported)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_set)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_get)(struct phy_device *, u8, long unsigned int *); + int (*led_polarity_set)(struct phy_device *, int, long unsigned int); +}; + +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[16]; +}; + +struct firmware { + size_t size; + const u8 *data; + void *priv; +}; + +struct rtl8169_private; + +typedef void (*rtl_fw_write_t)(struct rtl8169_private *, int, int); + +typedef int (*rtl_fw_read_t)(struct rtl8169_private *, int); + +struct rtl_fw_phy_action { + __le32 *code; + size_t size; +}; + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + char version[32]; + struct rtl_fw_phy_action phy_action; +}; + +enum rtl_fw_opcode { + PHY_READ = 0, + PHY_DATA_OR = 1, + PHY_DATA_AND = 2, + PHY_BJMPN = 3, + PHY_MDIO_CHG = 4, + PHY_CLEAR_READCOUNT = 7, + PHY_WRITE = 8, + PHY_READCOUNT_EQ_SKIP = 9, + PHY_COMP_EQ_SKIPN = 10, + PHY_COMP_NEQ_SKIPN = 11, + PHY_WRITE_PREVIOUS = 12, + PHY_SKIPN = 13, + PHY_DELAY_MS = 14, +}; + +struct fw_info { + u32 magic; + char version[32]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __attribute__((packed)); + +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +}; + +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__((packed)); + +struct usb_ssp_isoc_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wReseved; + __le32 dwBytesPerInterval; +}; + +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +}; + +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +}; + +struct ep_device; + +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; + long: 0; + struct list_head urb_list; + void *hcpriv; + struct ep_device *ep_dev; + unsigned char *extra; + int extralen; + int enabled; + int streams; +} __attribute__((packed)); + +struct usb_host_interface { + struct usb_interface_descriptor desc; + int extralen; + unsigned char *extra; + struct usb_host_endpoint *endpoint; + char *string; +}; + +enum usb_interface_condition { + USB_INTERFACE_UNBOUND = 0, + USB_INTERFACE_BINDING = 1, + USB_INTERFACE_BOUND = 2, + USB_INTERFACE_UNBINDING = 3, +}; + +enum usb_wireless_status { + USB_WIRELESS_STATUS_NA = 0, + USB_WIRELESS_STATUS_DISCONNECTED = 1, + USB_WIRELESS_STATUS_CONNECTED = 2, +}; + +struct usb_interface { + struct usb_host_interface *altsetting; + struct usb_host_interface *cur_altsetting; + unsigned int num_altsetting; + struct usb_interface_assoc_descriptor *intf_assoc; + int minor; + enum usb_interface_condition condition; + unsigned int sysfs_files_created: 1; + unsigned int ep_devs_created: 1; + unsigned int unregistering: 1; + unsigned int needs_remote_wakeup: 1; + unsigned int needs_altsetting0: 1; + unsigned int needs_binding: 1; + unsigned int resetting_device: 1; + unsigned int authorized: 1; + enum usb_wireless_status wireless_status; + struct work_struct wireless_status_work; + struct device dev; + struct device *usb_dev; + struct work_struct reset_ws; + long: 32; +}; + +struct usb_class_driver { + char *name; + char * (*devnode)(const struct device *, umode_t *); + const struct file_operations *fops; + int minor_base; +}; + +enum sam_status { + SAM_STAT_GOOD = 0, + SAM_STAT_CHECK_CONDITION = 2, + SAM_STAT_CONDITION_MET = 4, + SAM_STAT_BUSY = 8, + SAM_STAT_INTERMEDIATE = 16, + SAM_STAT_INTERMEDIATE_CONDITION_MET = 20, + SAM_STAT_RESERVATION_CONFLICT = 24, + SAM_STAT_COMMAND_TERMINATED = 34, + SAM_STAT_TASK_SET_FULL = 40, + SAM_STAT_ACA_ACTIVE = 48, + SAM_STAT_TASK_ABORTED = 64, +}; + +struct scsi_sense_hdr { + u8 response_code; + u8 sense_key; + u8 asc; + u8 ascq; + u8 byte4; + u8 byte5; + u8 byte6; + u8 additional_length; +}; + +enum scsi_host_status { + DID_OK = 0, + DID_NO_CONNECT = 1, + DID_BUS_BUSY = 2, + DID_TIME_OUT = 3, + DID_BAD_TARGET = 4, + DID_ABORT = 5, + DID_PARITY = 6, + DID_ERROR = 7, + DID_RESET = 8, + DID_BAD_INTR = 9, + DID_PASSTHROUGH = 10, + DID_SOFT_ERROR = 11, + DID_IMM_RETRY = 12, + DID_REQUEUE = 13, + DID_TRANSPORT_DISRUPTED = 14, + DID_TRANSPORT_FAILFAST = 15, + DID_TRANSPORT_MARGINAL = 20, +}; + +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +typedef __u64 blist_flags_t; + +enum scsi_device_state { + SDEV_CREATED = 1, + SDEV_RUNNING = 2, + SDEV_CANCEL = 3, + SDEV_DEL = 4, + SDEV_QUIESCE = 5, + SDEV_OFFLINE = 6, + SDEV_TRANSPORT_OFFLINE = 7, + SDEV_BLOCK = 8, + SDEV_CREATED_BLOCK = 9, +}; + +struct scsi_vpd { + struct callback_head rcu; + int len; + unsigned char data[0]; +}; + +struct Scsi_Host; + +struct scsi_target; + +struct scsi_device_handler; + +struct bsg_device; + +struct scsi_device { + struct Scsi_Host *host; + struct request_queue *request_queue; + struct list_head siblings; + struct list_head same_target_siblings; + struct sbitmap budget_map; + atomic_t device_blocked; + atomic_t restarts; + spinlock_t list_lock; + struct list_head starved_entry; + short unsigned int queue_depth; + short unsigned int max_queue_depth; + short unsigned int last_queue_full_depth; + short unsigned int last_queue_full_count; + long unsigned int last_queue_full_time; + long unsigned int queue_ramp_up_period; + long unsigned int last_queue_ramp_up; + unsigned int id; + unsigned int channel; + u64 lun; + unsigned int manufacturer; + unsigned int sector_size; + void *hostdata; + unsigned char type; + char scsi_level; + char inq_periph_qual; + struct mutex inquiry_mutex; + unsigned char inquiry_len; + unsigned char *inquiry; + const char *vendor; + const char *model; + const char *rev; + struct scsi_vpd *vpd_pg0; + struct scsi_vpd *vpd_pg83; + struct scsi_vpd *vpd_pg80; + struct scsi_vpd *vpd_pg89; + struct scsi_vpd *vpd_pgb0; + struct scsi_vpd *vpd_pgb1; + struct scsi_vpd *vpd_pgb2; + struct scsi_vpd *vpd_pgb7; + struct scsi_target *sdev_target; + blist_flags_t sdev_bflags; + unsigned int eh_timeout; + unsigned int manage_system_start_stop: 1; + unsigned int manage_runtime_start_stop: 1; + unsigned int manage_shutdown: 1; + unsigned int force_runtime_start_on_system_start: 1; + unsigned int removable: 1; + unsigned int changed: 1; + unsigned int busy: 1; + unsigned int lockable: 1; + unsigned int locked: 1; + unsigned int borken: 1; + unsigned int disconnect: 1; + unsigned int soft_reset: 1; + unsigned int sdtr: 1; + unsigned int wdtr: 1; + unsigned int ppr: 1; + unsigned int tagged_supported: 1; + unsigned int simple_tags: 1; + unsigned int was_reset: 1; + unsigned int expecting_cc_ua: 1; + unsigned int use_10_for_rw: 1; + unsigned int use_10_for_ms: 1; + unsigned int set_dbd_for_ms: 1; + unsigned int read_before_ms: 1; + unsigned int no_report_opcodes: 1; + unsigned int no_write_same: 1; + unsigned int use_16_for_rw: 1; + unsigned int use_16_for_sync: 1; + unsigned int skip_ms_page_8: 1; + unsigned int skip_ms_page_3f: 1; + unsigned int skip_vpd_pages: 1; + unsigned int try_vpd_pages: 1; + unsigned int use_192_bytes_for_3f: 1; + unsigned int no_start_on_add: 1; + unsigned int allow_restart: 1; + unsigned int start_stop_pwr_cond: 1; + unsigned int no_uld_attach: 1; + unsigned int select_no_atn: 1; + unsigned int fix_capacity: 1; + unsigned int guess_capacity: 1; + unsigned int retry_hwerror: 1; + unsigned int last_sector_bug: 1; + unsigned int no_read_disc_info: 1; + unsigned int no_read_capacity_16: 1; + unsigned int try_rc_10_first: 1; + unsigned int security_supported: 1; + unsigned int is_visible: 1; + unsigned int wce_default_on: 1; + unsigned int no_dif: 1; + unsigned int broken_fua: 1; + unsigned int lun_in_cdb: 1; + unsigned int unmap_limit_for_ws: 1; + unsigned int rpm_autosuspend: 1; + unsigned int ignore_media_change: 1; + unsigned int silence_suspend: 1; + unsigned int no_vpd_size: 1; + unsigned int cdl_supported: 1; + unsigned int cdl_enable: 1; + unsigned int queue_stopped; + bool offline_already; + atomic_t disk_events_disable_depth; + long unsigned int supported_events[1]; + long unsigned int pending_events[1]; + struct list_head event_list; + struct work_struct event_work; + unsigned int max_device_blocked; + atomic_t iorequest_cnt; + atomic_t iodone_cnt; + atomic_t ioerr_cnt; + atomic_t iotmo_cnt; + long: 32; + struct device sdev_gendev; + struct device sdev_dev; + struct work_struct requeue_work; + struct scsi_device_handler *handler; + void *handler_data; + size_t dma_drain_len; + void *dma_drain_buf; + unsigned int sg_timeout; + unsigned int sg_reserved_size; + struct bsg_device *bsg_dev; + unsigned char access_state; + struct mutex state_mutex; + enum scsi_device_state sdev_state; + struct task_struct *quiesced_by; + long unsigned int sdev_data[0]; +}; + +enum scsi_host_state { + SHOST_CREATED = 1, + SHOST_RUNNING = 2, + SHOST_CANCEL = 3, + SHOST_DEL = 4, + SHOST_RECOVERY = 5, + SHOST_CANCEL_RECOVERY = 6, + SHOST_DEL_RECOVERY = 7, +}; + +struct scsi_host_template; + +struct scsi_transport_template; + +struct Scsi_Host { + struct list_head __devices; + struct list_head __targets; + struct list_head starved_list; + spinlock_t default_lock; + spinlock_t *host_lock; + struct mutex scan_mutex; + struct list_head eh_abort_list; + struct list_head eh_cmd_q; + struct task_struct *ehandler; + struct completion *eh_action; + wait_queue_head_t host_wait; + const struct scsi_host_template *hostt; + struct scsi_transport_template *transportt; + struct kref tagset_refcnt; + struct completion tagset_freed; + struct blk_mq_tag_set tag_set; + atomic_t host_blocked; + unsigned int host_failed; + unsigned int host_eh_scheduled; + unsigned int host_no; + int eh_deadline; + long unsigned int last_reset; + unsigned int max_channel; + unsigned int max_id; + u64 max_lun; + unsigned int unique_id; + short unsigned int max_cmd_len; + int this_id; + int can_queue; + short int cmd_per_lun; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int opt_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + unsigned int nr_hw_queues; + unsigned int nr_maps; + unsigned int active_mode: 2; + unsigned int host_self_blocked: 1; + unsigned int reverse_ordering: 1; + unsigned int tmf_in_progress: 1; + unsigned int async_scan: 1; + unsigned int eh_noresume: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int short_inquiry: 1; + unsigned int no_scsi2_lun_in_cdb: 1; + unsigned int no_highmem: 1; + struct workqueue_struct *work_q; + struct workqueue_struct *tmf_work_q; + unsigned int max_host_blocked; + unsigned int prot_capabilities; + unsigned char prot_guard_type; + long unsigned int base; + long unsigned int io_port; + unsigned char n_io_port; + unsigned char dma_channel; + unsigned int irq; + enum scsi_host_state shost_state; + long: 32; + struct device shost_gendev; + struct device shost_dev; + void *shost_data; + struct device *dma_dev; + int rpm_autosuspend_delay; + long unsigned int hostdata[0]; + long: 32; +}; + +enum scsi_target_state { + STARGET_CREATED = 1, + STARGET_RUNNING = 2, + STARGET_REMOVE = 3, + STARGET_CREATED_REMOVE = 4, + STARGET_DEL = 5, +}; + +struct scsi_target { + struct scsi_device *starget_sdev_user; + struct list_head siblings; + struct list_head devices; + long: 32; + struct device dev; + struct kref reap_ref; + unsigned int channel; + unsigned int id; + unsigned int create: 1; + unsigned int single_lun: 1; + unsigned int pdt_1f_for_no_lun: 1; + unsigned int no_report_luns: 1; + unsigned int expecting_lun_change: 1; + atomic_t target_busy; + atomic_t target_blocked; + unsigned int can_queue; + unsigned int max_target_blocked; + char scsi_level; + enum scsi_target_state state; + void *hostdata; + long unsigned int starget_data[0]; + long: 32; +}; + +struct scsi_data_buffer { + struct sg_table table; + unsigned int length; +}; + +enum scsi_cmnd_submitter { + SUBMITTED_BY_BLOCK_LAYER = 0, + SUBMITTED_BY_SCSI_ERROR_HANDLER = 1, + SUBMITTED_BY_SCSI_RESET_IOCTL = 2, +} __attribute__((mode(byte))); + +struct scsi_cmnd { + struct scsi_device *device; + struct list_head eh_entry; + struct delayed_work abort_work; + struct callback_head rcu; + int eh_eflags; + int budget_token; + long unsigned int jiffies_at_alloc; + int retries; + int allowed; + unsigned char prot_op; + unsigned char prot_type; + unsigned char prot_flags; + enum scsi_cmnd_submitter submitter; + short unsigned int cmd_len; + enum dma_data_direction sc_data_direction; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scsi_data_buffer *prot_sdb; + unsigned int underflow; + unsigned int transfersize; + unsigned int resid_len; + unsigned int sense_len; + unsigned char *sense_buffer; + int flags; + long unsigned int state; + unsigned int extra_len; + unsigned char *host_scribble; + int result; +}; + +struct scsi_eh_save { + int result; + unsigned int resid_len; + int eh_eflags; + enum dma_data_direction data_direction; + unsigned int underflow; + unsigned char cmd_len; + unsigned char prot_op; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scatterlist sense_sgl; +}; + +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +}; + +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +}; + +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__((packed)); + +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +struct usb_ext_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +} __attribute__((packed)); + +struct usb_ss_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; + __le16 wSpeedSupported; + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +}; + +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; +}; + +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; + __le16 wFunctionalitySupport; + __le16 wReserved; + union { + __le32 legacy_padding; + struct { + struct {} __empty_bmSublinkSpeedAttr; + __le32 bmSublinkSpeedAttr[0]; + }; + }; +}; + +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, + USB_SPEED_LOW = 1, + USB_SPEED_FULL = 2, + USB_SPEED_HIGH = 3, + USB_SPEED_WIRELESS = 4, + USB_SPEED_SUPER = 5, + USB_SPEED_SUPER_PLUS = 6, +}; + +enum usb_device_state { + USB_STATE_NOTATTACHED = 0, + USB_STATE_ATTACHED = 1, + USB_STATE_POWERED = 2, + USB_STATE_RECONNECTING = 3, + USB_STATE_UNAUTHENTICATED = 4, + USB_STATE_DEFAULT = 5, + USB_STATE_ADDRESS = 6, + USB_STATE_CONFIGURED = 7, + USB_STATE_SUSPENDED = 8, +}; + +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1 = 1, + USB_SSP_GEN_1x2 = 2, + USB_SSP_GEN_2x2 = 3, +}; + +struct usb_interface_cache { + unsigned int num_altsetting; + struct kref ref; + struct usb_host_interface altsetting[0]; +}; + +struct usb_host_config { + struct usb_config_descriptor desc; + char *string; + struct usb_interface_assoc_descriptor *intf_assoc[16]; + struct usb_interface *interface[32]; + struct usb_interface_cache *intf_cache[32]; + unsigned char *extra; + int extralen; +}; + +struct usb_host_bos { + struct usb_bos_descriptor *desc; + struct usb_ext_cap_descriptor *ext_cap; + struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; +}; + +struct usb_device; + +struct usb_bus { + struct device *controller; + struct device *sysdev; + int busnum; + const char *bus_name; + u8 uses_pio_for_control; + u8 otg_port; + unsigned int is_b_host: 1; + unsigned int b_hnp_enable: 1; + unsigned int no_stop_on_short: 1; + unsigned int no_sg_constraint: 1; + unsigned int sg_tablesize; + int devnum_next; + struct mutex devnum_next_mutex; + long unsigned int devmap[4]; + struct usb_device *root_hub; + struct usb_bus *hs_companion; + int bandwidth_allocated; + int bandwidth_int_reqs; + int bandwidth_isoc_reqs; + unsigned int resuming_ports; +}; + +enum usb_link_tunnel_mode { + USB_LINK_UNKNOWN = 0, + USB_LINK_NATIVE = 1, + USB_LINK_TUNNELED = 2, +}; + +struct usb2_lpm_parameters { + unsigned int besl; + int timeout; +}; + +struct usb3_lpm_parameters { + unsigned int mel; + unsigned int pel; + unsigned int sel; + int timeout; +}; + +struct usb_tt; + +struct usb_device { + int devnum; + char devpath[16]; + u32 route; + enum usb_device_state state; + enum usb_device_speed speed; + unsigned int rx_lanes; + unsigned int tx_lanes; + enum usb_ssp_rate ssp_rate; + struct usb_tt *tt; + int ttport; + unsigned int toggle[2]; + struct usb_device *parent; + struct usb_bus *bus; + struct usb_host_endpoint ep0; + long: 32; + struct device dev; + struct usb_device_descriptor descriptor; + struct usb_host_bos *bos; + struct usb_host_config *config; + struct usb_host_config *actconfig; + struct usb_host_endpoint *ep_in[16]; + struct usb_host_endpoint *ep_out[16]; + char **rawdescriptors; + short unsigned int bus_mA; + u8 portnum; + u8 level; + u8 devaddr; + unsigned int can_submit: 1; + unsigned int persist_enabled: 1; + unsigned int reset_in_progress: 1; + unsigned int have_langid: 1; + unsigned int authorized: 1; + unsigned int authenticated: 1; + unsigned int lpm_capable: 1; + unsigned int lpm_devinit_allow: 1; + unsigned int usb2_hw_lpm_capable: 1; + unsigned int usb2_hw_lpm_besl_capable: 1; + unsigned int usb2_hw_lpm_enabled: 1; + unsigned int usb2_hw_lpm_allowed: 1; + unsigned int usb3_lpm_u1_enabled: 1; + unsigned int usb3_lpm_u2_enabled: 1; + int string_langid; + char *product; + char *manufacturer; + char *serial; + struct list_head filelist; + int maxchild; + u32 quirks; + atomic_t urbnum; + long unsigned int active_duration; + long unsigned int connect_time; + unsigned int do_remote_wakeup: 1; + unsigned int reset_resume: 1; + unsigned int port_is_suspended: 1; + enum usb_link_tunnel_mode tunnel_mode; + int slot_id; + struct usb2_lpm_parameters l1_params; + struct usb3_lpm_parameters u1_params; + struct usb3_lpm_parameters u2_params; + unsigned int lpm_disable_count; + u16 hub_delay; + unsigned int use_generic_driver: 1; + long: 32; +}; + +struct usb_iso_packet_descriptor { + unsigned int offset; + unsigned int length; + unsigned int actual_length; + int status; +}; + +struct usb_anchor { + struct list_head urb_list; + wait_queue_head_t wait; + spinlock_t lock; + atomic_t suspend_wakeups; + unsigned int poisoned: 1; +}; + +struct urb; + +typedef void (*usb_complete_t)(struct urb *); + +struct urb { + struct kref kref; + int unlinked; + void *hcpriv; + atomic_t use_count; + atomic_t reject; + struct list_head urb_list; + struct list_head anchor_list; + struct usb_anchor *anchor; + struct usb_device *dev; + struct usb_host_endpoint *ep; + unsigned int pipe; + unsigned int stream_id; + int status; + unsigned int transfer_flags; + void *transfer_buffer; + dma_addr_t transfer_dma; + struct scatterlist *sg; + int num_mapped_sgs; + int num_sgs; + u32 transfer_buffer_length; + u32 actual_length; + unsigned char *setup_packet; + dma_addr_t setup_dma; + int start_frame; + int number_of_packets; + int interval; + int error_count; + void *context; + usb_complete_t complete; + struct usb_iso_packet_descriptor iso_frame_desc[0]; +}; + +struct usb_sg_request { + int status; + size_t bytes; + spinlock_t lock; + struct usb_device *dev; + int pipe; + int entries; + struct urb **urbs; + int count; + struct completion complete; +}; + +enum { + US_FL_SINGLE_LUN = 1, + US_FL_NEED_OVERRIDE = 2, + US_FL_SCM_MULT_TARG = 4, + US_FL_FIX_INQUIRY = 8, + US_FL_FIX_CAPACITY = 16, + US_FL_IGNORE_RESIDUE = 32, + US_FL_BULK32 = 64, + US_FL_NOT_LOCKABLE = 128, + US_FL_GO_SLOW = 256, + US_FL_NO_WP_DETECT = 512, + US_FL_MAX_SECTORS_64 = 1024, + US_FL_IGNORE_DEVICE = 2048, + US_FL_CAPACITY_HEURISTICS = 4096, + US_FL_MAX_SECTORS_MIN = 8192, + US_FL_BULK_IGNORE_TAG = 16384, + US_FL_SANE_SENSE = 32768, + US_FL_CAPACITY_OK = 65536, + US_FL_BAD_SENSE = 131072, + US_FL_NO_READ_DISC_INFO = 262144, + US_FL_NO_READ_CAPACITY_16 = 524288, + US_FL_INITIAL_READ10 = 1048576, + US_FL_WRITE_CACHE = 2097152, + US_FL_NEEDS_CAP16 = 4194304, + US_FL_IGNORE_UAS = 8388608, + US_FL_BROKEN_FUA = 16777216, + US_FL_NO_ATA_1X = 33554432, + US_FL_NO_REPORT_OPCODES = 67108864, + US_FL_MAX_SECTORS_240 = 134217728, + US_FL_NO_REPORT_LUNS = 268435456, + US_FL_ALWAYS_SYNC = 536870912, + US_FL_NO_SAME = 1073741824, + US_FL_SENSE_AFTER_SYNC = 2147483648, +}; + +struct bulk_cb_wrap { + __le32 Signature; + __u32 Tag; + __le32 DataTransferLength; + __u8 Flags; + __u8 Lun; + __u8 Length; + __u8 CDB[16]; +}; + +struct bulk_cs_wrap { + __le32 Signature; + __u32 Tag; + __le32 Residue; + __u8 Status; +}; + +enum scsi_timeout_action { + SCSI_EH_DONE = 0, + SCSI_EH_RESET_TIMER = 1, + SCSI_EH_NOT_HANDLED = 2, +}; + +struct scsi_host_template { + unsigned int cmd_size; + int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); + void (*commit_rqs)(struct Scsi_Host *, u16); + struct module *module; + const char *name; + const char * (*info)(struct Scsi_Host *); + int (*ioctl)(struct scsi_device *, unsigned int, void *); + int (*init_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*exit_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*eh_abort_handler)(struct scsi_cmnd *); + int (*eh_device_reset_handler)(struct scsi_cmnd *); + int (*eh_target_reset_handler)(struct scsi_cmnd *); + int (*eh_bus_reset_handler)(struct scsi_cmnd *); + int (*eh_host_reset_handler)(struct scsi_cmnd *); + int (*slave_alloc)(struct scsi_device *); + int (*device_configure)(struct scsi_device *, struct queue_limits *); + int (*slave_configure)(struct scsi_device *); + void (*slave_destroy)(struct scsi_device *); + int (*target_alloc)(struct scsi_target *); + void (*target_destroy)(struct scsi_target *); + int (*scan_finished)(struct Scsi_Host *, long unsigned int); + void (*scan_start)(struct Scsi_Host *); + int (*change_queue_depth)(struct scsi_device *, int); + void (*map_queues)(struct Scsi_Host *); + int (*mq_poll)(struct Scsi_Host *, unsigned int); + bool (*dma_need_drain)(struct request *); + int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *); + void (*unlock_native_capacity)(struct scsi_device *); + int (*show_info)(struct seq_file *, struct Scsi_Host *); + int (*write_info)(struct Scsi_Host *, char *, int); + enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); + bool (*eh_should_retry_cmd)(struct scsi_cmnd *); + int (*host_reset)(struct Scsi_Host *, int); + const char *proc_name; + int can_queue; + int this_id; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + short int cmd_per_lun; + int tag_alloc_policy; + unsigned int track_queue_depth: 1; + unsigned int supported_mode: 2; + unsigned int emulated: 1; + unsigned int skip_settle_delay: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int max_host_blocked; + const struct attribute_group **shost_groups; + const struct attribute_group **sdev_groups; + u64 vendor_id; +}; + +struct us_data; + +struct us_unusual_dev { + const char *vendorName; + const char *productName; + __u8 useProtocol; + __u8 useTransport; + int (*initFunction)(struct us_data *); +}; + +typedef int (*trans_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef int (*trans_reset)(struct us_data *); + +typedef void (*proto_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef void (*extra_data_destructor)(void *); + +typedef void (*pm_hook)(struct us_data *, int); + +struct us_data { + struct mutex dev_mutex; + struct usb_device *pusb_dev; + struct usb_interface *pusb_intf; + const struct us_unusual_dev *unusual_dev; + long: 32; + u64 fflags; + long unsigned int dflags; + unsigned int send_bulk_pipe; + unsigned int recv_bulk_pipe; + unsigned int send_ctrl_pipe; + unsigned int recv_ctrl_pipe; + unsigned int recv_intr_pipe; + char *transport_name; + char *protocol_name; + __le32 bcs_signature; + u8 subclass; + u8 protocol; + u8 max_lun; + u8 ifnum; + u8 ep_bInterval; + trans_cmnd transport; + trans_reset transport_reset; + proto_cmnd proto_handler; + struct scsi_cmnd *srb; + unsigned int tag; + char scsi_name[32]; + struct urb *current_urb; + struct usb_ctrlrequest *cr; + struct usb_sg_request current_sg; + unsigned char *iobuf; + dma_addr_t iobuf_dma; + struct task_struct *ctl_thread; + struct completion cmnd_ready; + struct completion notify; + wait_queue_head_t delay_wait; + struct delayed_work scan_dwork; + void *extra; + extra_data_destructor extra_destructor; + pm_hook suspend_resume_hook; + int use_last_sector_hacks; + int last_sector_retries; +}; + +enum xfer_buf_dir { + TO_XFER_BUF = 0, + FROM_XFER_BUF = 1, +}; + +struct opal_dev; + +struct scsi_disk { + struct scsi_device *device; + long: 32; + struct device disk_dev; + struct gendisk *disk; + struct opal_dev *opal_dev; + atomic_t openers; + long: 32; + sector_t capacity; + int max_retries; + u32 min_xfer_blocks; + u32 max_xfer_blocks; + u32 opt_xfer_blocks; + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; + u32 max_atomic; + u32 atomic_alignment; + u32 atomic_granularity; + u32 max_atomic_with_boundary; + u32 max_atomic_boundary; + u32 index; + unsigned int physical_block_size; + unsigned int max_medium_access_timeouts; + unsigned int medium_access_timed_out; + u16 permanent_stream_count; + u8 media_present; + u8 write_prot; + u8 protection_type; + u8 provisioning_mode; + u8 zeroing_mode; + u8 nr_actuators; + bool suspended; + unsigned int ATO: 1; + unsigned int cache_override: 1; + unsigned int WCE: 1; + unsigned int RCD: 1; + unsigned int DPOFUA: 1; + unsigned int first_scan: 1; + unsigned int lbpme: 1; + unsigned int lbprz: 1; + unsigned int lbpu: 1; + unsigned int lbpws: 1; + unsigned int lbpws10: 1; + unsigned int lbpvpd: 1; + unsigned int ws10: 1; + unsigned int ws16: 1; + unsigned int rc_basis: 2; + unsigned int zoned: 2; + unsigned int urswrz: 1; + unsigned int security: 1; + unsigned int ignore_medium_access_errors: 1; + unsigned int rscs: 1; + unsigned int use_atomic_write_boundary: 1; +}; + +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO = 1, + INPUT_CLK_BOOT = 2, + INPUT_CLK_MAX = 3, +}; + +enum rc_proto { + RC_PROTO_UNKNOWN = 0, + RC_PROTO_OTHER = 1, + RC_PROTO_RC5 = 2, + RC_PROTO_RC5X_20 = 3, + RC_PROTO_RC5_SZ = 4, + RC_PROTO_JVC = 5, + RC_PROTO_SONY12 = 6, + RC_PROTO_SONY15 = 7, + RC_PROTO_SONY20 = 8, + RC_PROTO_NEC = 9, + RC_PROTO_NECX = 10, + RC_PROTO_NEC32 = 11, + RC_PROTO_SANYO = 12, + RC_PROTO_MCIR2_KBD = 13, + RC_PROTO_MCIR2_MSE = 14, + RC_PROTO_RC6_0 = 15, + RC_PROTO_RC6_6A_20 = 16, + RC_PROTO_RC6_6A_24 = 17, + RC_PROTO_RC6_6A_32 = 18, + RC_PROTO_RC6_MCE = 19, + RC_PROTO_SHARP = 20, + RC_PROTO_XMP = 21, + RC_PROTO_CEC = 22, + RC_PROTO_IMON = 23, + RC_PROTO_RCMM12 = 24, + RC_PROTO_RCMM24 = 25, + RC_PROTO_RCMM32 = 26, + RC_PROTO_XBOX_DVD = 27, + RC_PROTO_MAX = 27, +}; + +struct rc_map_table { + u64 scancode; + u32 keycode; + long: 32; +}; + +struct rc_map { + struct rc_map_table *scan; + unsigned int size; + unsigned int len; + unsigned int alloc; + enum rc_proto rc_proto; + const char *name; + spinlock_t lock; +}; + +struct rc_map_list { + struct list_head list; + struct rc_map map; +}; + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +struct input_keymap_entry { + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +struct ff_replay { + __u16 length; + __u16 delay; +}; + +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + __s16 right_coeff; + __s16 left_coeff; + __u16 deadband; + __s16 center; +}; + +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + __s16 *custom_data; +}; + +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_device_id { + kernel_ulong_t flags; + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + kernel_ulong_t evbit[1]; + kernel_ulong_t keybit[24]; + kernel_ulong_t relbit[1]; + kernel_ulong_t absbit[2]; + kernel_ulong_t mscbit[1]; + kernel_ulong_t ledbit[1]; + kernel_ulong_t sndbit[1]; + kernel_ulong_t ffbit[4]; + kernel_ulong_t swbit[1]; + kernel_ulong_t propbit[1]; + kernel_ulong_t driver_info; +}; + +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +struct ff_device; + +struct input_dev_poller; + +struct input_mt; + +struct input_handle; + +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + long unsigned int propbit[1]; + long unsigned int evbit[1]; + long unsigned int keybit[24]; + long unsigned int relbit[1]; + long unsigned int absbit[2]; + long unsigned int mscbit[1]; + long unsigned int ledbit[1]; + long unsigned int sndbit[1]; + long unsigned int ffbit[4]; + long unsigned int swbit[1]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); + int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); + struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; + struct timer_list timer; + int rep[2]; + struct input_mt *mt; + struct input_absinfo *absinfo; + long unsigned int key[24]; + long unsigned int led[1]; + long unsigned int snd[1]; + long unsigned int sw[1]; + int (*open)(struct input_dev *); + void (*close)(struct input_dev *); + int (*flush)(struct input_dev *, struct file *); + int (*event)(struct input_dev *, unsigned int, unsigned int, int); + struct input_handle *grab; + spinlock_t event_lock; + struct mutex mutex; + unsigned int users; + bool going_away; + long: 32; + struct device dev; + struct list_head h_list; + struct list_head node; + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + bool devres_managed; + ktime_t timestamp[3]; + bool inhibited; + long: 32; +}; + +struct ff_device { + int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); + int (*erase)(struct input_dev *, int); + int (*playback)(struct input_dev *, int, int); + void (*set_gain)(struct input_dev *, u16); + void (*set_autocenter)(struct input_dev *, u16); + void (*destroy)(struct ff_device *); + void *private; + long unsigned int ffbit[4]; + struct mutex mutex; + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[0]; +}; + +struct input_handler; + +struct input_handle { + void *private; + int open; + const char *name; + struct input_dev *dev; + struct input_handler *handler; + unsigned int (*handle_events)(struct input_handle *, struct input_value *, unsigned int); + struct list_head d_node; + struct list_head h_node; +}; + +struct input_handler { + void *private; + void (*event)(struct input_handle *, unsigned int, unsigned int, int); + unsigned int (*events)(struct input_handle *, struct input_value *, unsigned int); + bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); + bool (*match)(struct input_handler *, struct input_dev *); + int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); + void (*disconnect)(struct input_handle *); + void (*start)(struct input_handle *); + bool passive_observer; + bool legacy_minors; + int minor; + const char *name; + const struct input_device_id *id_table; + struct list_head h_list; + struct list_head node; +}; + +enum rc_driver_type { + RC_DRIVER_SCANCODE = 0, + RC_DRIVER_IR_RAW = 1, + RC_DRIVER_IR_RAW_TX = 2, +}; + +struct rc_scancode_filter { + u32 data; + u32 mask; +}; + +struct ir_raw_event_ctrl; + +struct rc_dev { + struct device dev; + bool managed_alloc; + const struct attribute_group *sysfs_groups[5]; + const char *device_name; + const char *input_phys; + struct input_id input_id; + const char *driver_name; + const char *map_name; + struct rc_map rc_map; + struct mutex lock; + unsigned int minor; + struct ir_raw_event_ctrl *raw; + struct input_dev *input_dev; + enum rc_driver_type driver_type; + bool idle; + bool encode_wakeup; + u64 allowed_protocols; + u64 enabled_protocols; + u64 allowed_wakeup_protocols; + enum rc_proto wakeup_protocol; + struct rc_scancode_filter scancode_filter; + struct rc_scancode_filter scancode_wakeup_filter; + u32 scancode_mask; + u32 users; + void *priv; + spinlock_t keylock; + bool keypressed; + long unsigned int keyup_jiffies; + struct timer_list timer_keyup; + struct timer_list timer_repeat; + u32 last_keycode; + enum rc_proto last_protocol; + long: 32; + u64 last_scancode; + u8 last_toggle; + u32 timeout; + u32 min_timeout; + u32 max_timeout; + u32 rx_resolution; + bool registered; + int (*change_protocol)(struct rc_dev *, u64 *); + int (*open)(struct rc_dev *); + void (*close)(struct rc_dev *); + int (*s_tx_mask)(struct rc_dev *, u32); + int (*s_tx_carrier)(struct rc_dev *, u32); + int (*s_tx_duty_cycle)(struct rc_dev *, u32); + int (*s_rx_carrier_range)(struct rc_dev *, u32, u32); + int (*tx_ir)(struct rc_dev *, unsigned int *, unsigned int); + void (*s_idle)(struct rc_dev *, bool); + int (*s_wideband_receiver)(struct rc_dev *, int); + int (*s_carrier_report)(struct rc_dev *, int); + int (*s_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_wakeup_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_timeout)(struct rc_dev *, unsigned int); +}; + +struct ir_raw_event { + union { + u32 duration; + u32 carrier; + }; + u8 duty_cycle; + unsigned int pulse: 1; + unsigned int overflow: 1; + unsigned int timeout: 1; + unsigned int carrier_report: 1; +}; + +struct nec_dec { + int state; + unsigned int count; + u32 bits; + bool is_nec_x; + bool necx_repeat; +}; + +struct rc5_dec { + int state; + u32 bits; + unsigned int count; + bool is_rc5x; +}; + +struct rc6_dec { + int state; + u8 header; + u32 body; + bool toggle; + unsigned int count; + unsigned int wanted_bits; +}; + +struct sony_dec { + int state; + u32 bits; + unsigned int count; +}; + +struct jvc_dec { + int state; + u16 bits; + u16 old_bits; + unsigned int count; + bool first; + bool toggle; +}; + +struct sanyo_dec { + int state; + unsigned int count; + u64 bits; +}; + +struct sharp_dec { + int state; + unsigned int count; + u32 bits; + unsigned int pulse_len; +}; + +struct mce_kbd_dec { + spinlock_t keylock; + struct timer_list rx_timeout; + int state; + u8 header; + u32 body; + unsigned int count; + unsigned int wanted_bits; +}; + +struct xmp_dec { + int state; + unsigned int count; + u32 durations[16]; +}; + +struct ir_raw_event_ctrl { + struct list_head list; + struct task_struct *thread; + struct { + union { + struct __kfifo kfifo; + struct ir_raw_event *type; + const struct ir_raw_event *const_type; + char (*rectype)[0]; + struct ir_raw_event *ptr; + const struct ir_raw_event *ptr_const; + }; + struct ir_raw_event buf[512]; + } kfifo; + ktime_t last_event; + struct rc_dev *dev; + spinlock_t edge_spinlock; + struct timer_list edge_handle; + struct ir_raw_event prev_ev; + struct ir_raw_event this_ev; + struct nec_dec nec; + struct rc5_dec rc5; + struct rc6_dec rc6; + struct sony_dec sony; + struct jvc_dec jvc; + struct sanyo_dec sanyo; + struct sharp_dec sharp; + struct mce_kbd_dec mce_kbd; + struct xmp_dec xmp; + long: 32; +}; + +struct ir_raw_handler { + struct list_head list; + u64 protocols; + int (*decode)(struct rc_dev *, struct ir_raw_event); + int (*encode)(enum rc_proto, u32, struct ir_raw_event *, unsigned int); + u32 carrier; + u32 min_timeout; + int (*raw_register)(struct rc_dev *); + int (*raw_unregister)(struct rc_dev *); +}; + +struct ir_raw_timings_manchester { + unsigned int leader_pulse; + unsigned int leader_space; + unsigned int clock; + unsigned int invert: 1; + unsigned int trailer_space; +}; + +enum rc5_state { + STATE_INACTIVE = 0, + STATE_BIT_START = 1, + STATE_BIT_END = 2, + STATE_CHECK_RC5X = 3, + STATE_FINISHED = 4, +}; + +typedef struct {} local_lock_t; + +enum { + BIOSET_NEED_BVECS = 1, + BIOSET_NEED_RESCUER = 2, + BIOSET_PERCPU_CACHE = 4, +}; + +struct badblocks { + struct device *dev; + int count; + int unacked_exist; + int shift; + u64 *page; + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; +}; + +struct r5l_payload_header { + __le16 type; + __le16 flags; +}; + +enum r5l_payload_type { + R5LOG_PAYLOAD_DATA = 0, + R5LOG_PAYLOAD_PARITY = 1, + R5LOG_PAYLOAD_FLUSH = 2, +}; + +struct r5l_payload_data_parity { + struct r5l_payload_header header; + __le32 size; + __le64 location; + __le32 checksum[0]; +}; + +struct r5l_payload_flush { + struct r5l_payload_header header; + __le32 size; + __le64 flush_stripes[0]; +}; + +struct r5l_meta_block { + __le32 magic; + __le32 checksum; + __u8 version; + __u8 __zero_pading_1; + __le16 __zero_pading_2; + __le32 meta_size; + __le64 seq; + __le64 position; + struct r5l_payload_header payloads[0]; +}; + +enum sync_action { + ACTION_RESYNC = 0, + ACTION_RECOVER = 1, + ACTION_CHECK = 2, + ACTION_REPAIR = 3, + ACTION_RESHAPE = 4, + ACTION_FROZEN = 5, + ACTION_IDLE = 6, + NR_SYNC_ACTIONS = 7, +}; + +struct md_cluster_info; + +struct md_personality; + +struct md_thread; + +struct bitmap_operations; + +struct md_rdev; + +struct mddev { + void *private; + struct md_personality *pers; + dev_t unit; + int md_minor; + struct list_head disks; + long unsigned int flags; + long unsigned int sb_flags; + int suspended; + struct mutex suspend_mutex; + struct percpu_ref active_io; + int ro; + int sysfs_active; + struct gendisk *gendisk; + struct kobject kobj; + int hold_active; + int major_version; + int minor_version; + int patch_version; + int persistent; + int external; + char metadata_type[17]; + int chunk_sectors; + long: 32; + time64_t ctime; + time64_t utime; + int level; + int layout; + char clevel[16]; + int raid_disks; + int max_disks; + sector_t dev_sectors; + sector_t array_sectors; + int external_size; + long: 32; + __u64 events; + int can_decrease_events; + char uuid[16]; + long: 32; + sector_t reshape_position; + int delta_disks; + int new_level; + int new_layout; + int new_chunk_sectors; + int reshape_backwards; + struct md_thread *thread; + struct md_thread *sync_thread; + enum sync_action last_sync_action; + sector_t curr_resync; + sector_t curr_resync_completed; + long unsigned int resync_mark; + long: 32; + sector_t resync_mark_cnt; + sector_t curr_mark_cnt; + sector_t resync_max_sectors; + atomic64_t resync_mismatches; + sector_t suspend_lo; + sector_t suspend_hi; + int sync_speed_min; + int sync_speed_max; + int parallel_resync; + int ok_start_degraded; + long unsigned int recovery; + int recovery_disabled; + int in_sync; + struct mutex open_mutex; + struct mutex reconfig_mutex; + atomic_t active; + atomic_t openers; + int changed; + int degraded; + atomic_t recovery_active; + wait_queue_head_t recovery_wait; + long: 32; + sector_t recovery_cp; + sector_t resync_min; + sector_t resync_max; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_action; + struct kernfs_node *sysfs_completed; + struct kernfs_node *sysfs_degraded; + struct kernfs_node *sysfs_level; + struct work_struct del_work; + struct work_struct sync_work; + spinlock_t lock; + wait_queue_head_t sb_wait; + atomic_t pending_writes; + unsigned int safemode; + unsigned int safemode_delay; + struct timer_list safemode_timer; + struct percpu_ref writes_pending; + int sync_checkers; + void *bitmap; + struct bitmap_operations *bitmap_ops; + struct { + struct file *file; + long: 32; + loff_t offset; + long unsigned int space; + long: 32; + loff_t default_offset; + long unsigned int default_space; + struct mutex mutex; + long unsigned int chunksize; + long unsigned int daemon_sleep; + long unsigned int max_write_behind; + int external; + int nodes; + char cluster_name[64]; + } bitmap_info; + atomic_t max_corr_read_errors; + struct list_head all_mddevs; + const struct attribute_group *to_remove; + struct bio_set bio_set; + struct bio_set sync_set; + struct bio_set io_clone_set; + struct work_struct event_work; + mempool_t *serial_info_pool; + void (*sync_super)(struct mddev *, struct md_rdev *); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; + unsigned int noio_flag; + struct list_head deleting; + atomic_t sync_seq; + bool has_superblocks: 1; + bool fail_last_dev: 1; + bool serialize_policy: 1; + long: 32; +}; + +struct serial_in_rdev; + +struct md_rdev { + struct list_head same_set; + sector_t sectors; + struct mddev *mddev; + int last_events; + struct block_device *meta_bdev; + struct block_device *bdev; + struct file *bdev_file; + struct page *sb_page; + struct page *bb_page; + int sb_loaded; + __u64 sb_events; + sector_t data_offset; + sector_t new_data_offset; + sector_t sb_start; + int sb_size; + int preferred_minor; + struct kobject kobj; + long unsigned int flags; + wait_queue_head_t blocked_wait; + int desc_nr; + int raid_disk; + int new_raid_disk; + int saved_raid_disk; + long: 32; + union { + sector_t recovery_offset; + sector_t journal_tail; + }; + atomic_t nr_pending; + atomic_t read_errors; + time64_t last_read_error; + atomic_t corrected_errors; + struct serial_in_rdev *serial; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_unack_badblocks; + struct kernfs_node *sysfs_badblocks; + long: 32; + struct badblocks badblocks; + struct { + short int offset; + unsigned int size; + sector_t sector; + } ppl; +}; + +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + +enum flag_bits { + Faulty = 0, + In_sync = 1, + Bitmap_sync = 2, + WriteMostly = 3, + AutoDetected = 4, + Blocked = 5, + WriteErrorSeen = 6, + FaultRecorded = 7, + BlockedBadBlocks = 8, + WantReplacement = 9, + Replacement = 10, + Candidate = 11, + Journal = 12, + ClusterRemove = 13, + ExternalBbl = 14, + FailFast = 15, + LastDev = 16, + CollisionCheck = 17, + Nonrot = 18, +}; + +enum mddev_flags { + MD_ARRAY_FIRST_USE = 0, + MD_CLOSING = 1, + MD_JOURNAL_CLEAN = 2, + MD_HAS_JOURNAL = 3, + MD_CLUSTER_RESYNC_LOCKED = 4, + MD_FAILFAST_SUPPORTED = 5, + MD_HAS_PPL = 6, + MD_HAS_MULTIPLE_PPLS = 7, + MD_NOT_READY = 8, + MD_BROKEN = 9, + MD_DELETED = 10, +}; + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS = 0, + MD_SB_CHANGE_CLEAN = 1, + MD_SB_CHANGE_PENDING = 2, + MD_SB_NEED_REWRITE = 3, +}; + +struct md_personality { + char *name; + int level; + struct list_head list; + struct module *owner; + bool (*make_request)(struct mddev *, struct bio *); + int (*run)(struct mddev *); + int (*start)(struct mddev *); + void (*free)(struct mddev *, void *); + void (*status)(struct seq_file *, struct mddev *); + void (*error_handler)(struct mddev *, struct md_rdev *); + int (*hot_add_disk)(struct mddev *, struct md_rdev *); + int (*hot_remove_disk)(struct mddev *, struct md_rdev *); + int (*spare_active)(struct mddev *); + sector_t (*sync_request)(struct mddev *, sector_t, sector_t, int *); + int (*resize)(struct mddev *, sector_t); + sector_t (*size)(struct mddev *, sector_t, int); + int (*check_reshape)(struct mddev *); + int (*start_reshape)(struct mddev *); + void (*finish_reshape)(struct mddev *); + void (*update_reshape_pos)(struct mddev *); + void (*prepare_suspend)(struct mddev *); + void (*quiesce)(struct mddev *, int); + void * (*takeover)(struct mddev *); + int (*change_consistency_policy)(struct mddev *, const char *); +}; + +struct md_thread { + void (*run)(struct md_thread *); + struct mddev *mddev; + wait_queue_head_t wqueue; + long unsigned int flags; + struct task_struct *tsk; + long unsigned int timeout; + void *private; +}; + +struct md_bitmap_stats; + +struct bitmap_operations { + bool (*enabled)(struct mddev *); + int (*create)(struct mddev *, int); + int (*resize)(struct mddev *, sector_t, int, bool); + int (*load)(struct mddev *); + void (*destroy)(struct mddev *); + void (*flush)(struct mddev *); + void (*write_all)(struct mddev *); + void (*dirty_bits)(struct mddev *, long unsigned int, long unsigned int); + void (*unplug)(struct mddev *, bool); + void (*daemon_work)(struct mddev *); + void (*wait_behind_writes)(struct mddev *); + int (*startwrite)(struct mddev *, sector_t, long unsigned int, bool); + void (*endwrite)(struct mddev *, sector_t, long unsigned int, bool, bool); + bool (*start_sync)(struct mddev *, sector_t, sector_t *, bool); + void (*end_sync)(struct mddev *, sector_t, sector_t *); + void (*cond_end_sync)(struct mddev *, sector_t, bool); + void (*close_sync)(struct mddev *); + void (*update_sb)(void *); + int (*get_stats)(void *, struct md_bitmap_stats *); + void (*sync_with_cluster)(struct mddev *, sector_t, sector_t, sector_t, sector_t); + void * (*get_from_slot)(struct mddev *, int); + int (*copy_from_slot)(struct mddev *, int, sector_t *, sector_t *, bool); + void (*set_pages)(void *, long unsigned int); + void (*free)(void *); +}; + +enum recovery_flags { + MD_RECOVERY_NEEDED = 0, + MD_RECOVERY_RUNNING = 1, + MD_RECOVERY_INTR = 2, + MD_RECOVERY_DONE = 3, + MD_RECOVERY_FROZEN = 4, + MD_RECOVERY_WAIT = 5, + MD_RECOVERY_ERROR = 6, + MD_RECOVERY_SYNC = 7, + MD_RECOVERY_REQUESTED = 8, + MD_RECOVERY_CHECK = 9, + MD_RECOVERY_RECOVER = 10, + MD_RECOVERY_RESHAPE = 11, + MD_RESYNCING_REMOTE = 12, +}; + +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct mddev *, char *); + ssize_t (*store)(struct mddev *, const char *, size_t); +}; + +enum dma_transaction_type { + DMA_MEMCPY = 0, + DMA_XOR = 1, + DMA_PQ = 2, + DMA_XOR_VAL = 3, + DMA_PQ_VAL = 4, + DMA_MEMSET = 5, + DMA_MEMSET_SG = 6, + DMA_INTERRUPT = 7, + DMA_PRIVATE = 8, + DMA_ASYNC_TX = 9, + DMA_SLAVE = 10, + DMA_CYCLIC = 11, + DMA_INTERLEAVE = 12, + DMA_COMPLETION_NO_ORDER = 13, + DMA_REPEAT = 14, + DMA_LOAD_EOT = 15, + DMA_TX_TYPE_END = 16, +}; + +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +enum sum_check_flags { + SUM_CHECK_P_RESULT = 1, + SUM_CHECK_Q_RESULT = 2, +}; + +enum check_states { + check_state_idle = 0, + check_state_run = 1, + check_state_run_q = 2, + check_state_run_pq = 3, + check_state_check_result = 4, + check_state_compute_run = 5, + check_state_compute_result = 6, +}; + +enum reconstruct_states { + reconstruct_state_idle = 0, + reconstruct_state_prexor_drain_run = 1, + reconstruct_state_drain_run = 2, + reconstruct_state_run = 3, + reconstruct_state_prexor_drain_result = 4, + reconstruct_state_drain_result = 5, + reconstruct_state_result = 6, +}; + +struct r5l_log; + +struct r5l_io_unit { + struct r5l_log *log; + struct page *meta_page; + int meta_offset; + struct bio *current_bio; + atomic_t pending_stripe; + long: 32; + u64 seq; + sector_t log_start; + sector_t log_end; + struct list_head log_sibling; + struct list_head stripe_list; + int state; + bool need_split_bio; + struct bio *split_bio; + unsigned int has_flush: 1; + unsigned int has_fua: 1; + unsigned int has_null_flush: 1; + unsigned int has_flush_payload: 1; + unsigned int io_deferred: 1; + struct bio_list flush_barriers; +}; + +struct stripe_operations { + int target; + int target2; + enum sum_check_flags zero_sum_result; +}; + +struct r5dev { + struct bio req; + struct bio rreq; + struct bio_vec vec; + struct bio_vec rvec; + struct page *page; + struct page *orig_page; + unsigned int offset; + struct bio *toread; + struct bio *read; + struct bio *towrite; + struct bio *written; + long: 32; + sector_t sector; + long unsigned int flags; + u32 log_checksum; + short unsigned int write_hint; + long: 32; +}; + +struct r5conf; + +struct r5worker_group; + +struct ppl_io_unit; + +struct stripe_head { + struct hlist_node hash; + struct list_head lru; + struct llist_node release_list; + struct r5conf *raid_conf; + short int generation; + long: 32; + sector_t sector; + short int pd_idx; + short int qd_idx; + short int ddf_layout; + short int hash_lock_index; + long unsigned int state; + atomic_t count; + int bm_seq; + int disks; + int overwrite_disks; + enum check_states check_state; + enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; + int cpu; + struct r5worker_group *group; + struct stripe_head *batch_head; + spinlock_t batch_lock; + struct list_head batch_list; + union { + struct r5l_io_unit *log_io; + struct ppl_io_unit *ppl_io; + }; + struct list_head log_list; + long: 32; + sector_t log_start; + struct list_head r5c; + struct page *ppl_page; + struct stripe_operations ops; + struct r5dev dev[0]; +}; + +struct raid5_percpu; + +struct disk_info; + +struct r5pending_data; + +struct r5conf { + struct hlist_head *stripe_hashtbl; + spinlock_t hash_locks[8]; + struct mddev *mddev; + int chunk_sectors; + int level; + int algorithm; + int rmw_level; + int max_degraded; + int raid_disks; + int max_nr_stripes; + int min_nr_stripes; + sector_t reshape_progress; + sector_t reshape_safe; + int previous_raid_disks; + int prev_chunk_sectors; + int prev_algo; + short int generation; + seqcount_spinlock_t gen_lock; + long unsigned int reshape_checkpoint; + long long int min_offset_diff; + struct list_head handle_list; + struct list_head loprio_list; + struct list_head hold_list; + struct list_head delayed_list; + struct list_head bitmap_list; + struct bio *retry_read_aligned; + unsigned int retry_read_offset; + struct bio *retry_read_aligned_list; + atomic_t preread_active_stripes; + atomic_t active_aligned_reads; + atomic_t pending_full_writes; + int bypass_count; + int bypass_threshold; + int skip_copy; + struct list_head *last_hold; + atomic_t reshape_stripes; + int active_name; + char cache_name[96]; + struct kmem_cache *slab_cache; + struct mutex cache_size_mutex; + int seq_flush; + int seq_write; + int quiesce; + int fullsync; + int recovery_disabled; + struct raid5_percpu *percpu; + int scribble_disks; + int scribble_sectors; + struct hlist_node node; + atomic_t active_stripes; + struct list_head inactive_list[8]; + atomic_t r5c_cached_full_stripes; + struct list_head r5c_full_stripe_list; + atomic_t r5c_cached_partial_stripes; + struct list_head r5c_partial_stripe_list; + atomic_t r5c_flushing_full_stripes; + atomic_t r5c_flushing_partial_stripes; + atomic_t empty_inactive_list_nr; + struct llist_head released_stripes; + wait_queue_head_t wait_for_quiescent; + wait_queue_head_t wait_for_stripe; + wait_queue_head_t wait_for_reshape; + long unsigned int cache_state; + struct shrinker *shrinker; + int pool_size; + spinlock_t device_lock; + struct disk_info *disks; + struct bio_set bio_split; + struct md_thread *thread; + struct list_head temp_inactive_list[8]; + struct r5worker_group *worker_groups; + int group_cnt; + int worker_cnt_per_group; + struct r5l_log *log; + void *log_private; + spinlock_t pending_bios_lock; + bool batch_bio_dispatch; + struct r5pending_data *pending_data; + struct list_head free_list; + struct list_head pending_list; + int pending_data_cnt; + struct r5pending_data *next_pending_data; + long: 32; +}; + +struct r5worker; + +struct r5worker_group { + struct list_head handle_list; + struct list_head loprio_list; + struct r5conf *conf; + struct r5worker *workers; + int stripes_cnt; +}; + +struct stripe_head_state { + int syncing; + int expanding; + int expanded; + int replacing; + int locked; + int uptodate; + int to_read; + int to_write; + int failed; + int written; + int to_fill; + int compute; + int req_compute; + int non_overwrite; + int injournal; + int just_cached; + int failed_num[2]; + int p_failed; + int q_failed; + int dec_preread_active; + long unsigned int ops_request; + struct md_rdev *blocked_rdev; + int handle_bad_blocks; + int log_failed; + int waiting_extra_page; +}; + +enum r5dev_flags { + R5_UPTODATE = 0, + R5_LOCKED = 1, + R5_DOUBLE_LOCKED = 2, + R5_OVERWRITE = 3, + R5_Insync = 4, + R5_Wantread = 5, + R5_Wantwrite = 6, + R5_Overlap = 7, + R5_ReadNoMerge = 8, + R5_ReadError = 9, + R5_ReWrite = 10, + R5_Expanded = 11, + R5_Wantcompute = 12, + R5_Wantfill = 13, + R5_Wantdrain = 14, + R5_WantFUA = 15, + R5_SyncIO = 16, + R5_WriteError = 17, + R5_MadeGood = 18, + R5_ReadRepl = 19, + R5_MadeGoodRepl = 20, + R5_NeedReplace = 21, + R5_WantReplace = 22, + R5_Discard = 23, + R5_SkipCopy = 24, + R5_InJournal = 25, + R5_OrigPageUPTDODATE = 26, +}; + +enum { + STRIPE_ACTIVE = 0, + STRIPE_HANDLE = 1, + STRIPE_SYNC_REQUESTED = 2, + STRIPE_SYNCING = 3, + STRIPE_INSYNC = 4, + STRIPE_REPLACED = 5, + STRIPE_PREREAD_ACTIVE = 6, + STRIPE_DELAYED = 7, + STRIPE_DEGRADED = 8, + STRIPE_BIT_DELAY = 9, + STRIPE_EXPANDING = 10, + STRIPE_EXPAND_SOURCE = 11, + STRIPE_EXPAND_READY = 12, + STRIPE_IO_STARTED = 13, + STRIPE_FULL_WRITE = 14, + STRIPE_BIOFILL_RUN = 15, + STRIPE_COMPUTE_RUN = 16, + STRIPE_ON_UNPLUG_LIST = 17, + STRIPE_DISCARD = 18, + STRIPE_ON_RELEASE_LIST = 19, + STRIPE_BATCH_READY = 20, + STRIPE_BATCH_ERR = 21, + STRIPE_BITMAP_PENDING = 22, + STRIPE_LOG_TRAPPED = 23, + STRIPE_R5C_CACHING = 24, + STRIPE_R5C_PARTIAL_STRIPE = 25, + STRIPE_R5C_FULL_STRIPE = 26, + STRIPE_R5C_PREFLUSH = 27, +}; + +enum { + STRIPE_OP_BIOFILL = 0, + STRIPE_OP_COMPUTE_BLK = 1, + STRIPE_OP_PREXOR = 2, + STRIPE_OP_BIODRAIN = 3, + STRIPE_OP_RECONSTRUCT = 4, + STRIPE_OP_CHECK = 5, + STRIPE_OP_PARTIAL_PARITY = 6, +}; + +struct disk_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + struct page *extra_page; +}; + +struct r5worker { + struct work_struct work; + struct r5worker_group *group; + struct list_head temp_inactive_list[8]; + bool working; +}; + +enum r5c_journal_mode { + R5C_JOURNAL_MODE_WRITE_THROUGH = 0, + R5C_JOURNAL_MODE_WRITE_BACK = 1, +}; + +enum r5_cache_state { + R5_INACTIVE_BLOCKED = 0, + R5_ALLOC_MORE = 1, + R5_DID_ALLOC = 2, + R5C_LOG_TIGHT = 3, + R5C_LOG_CRITICAL = 4, + R5C_EXTRA_PAGE_IN_USE = 5, +}; + +struct r5pending_data { + struct list_head sibling; + sector_t sector; + struct bio_list bios; +}; + +struct raid5_percpu { + struct page *spare_page; + void *scribble; + int scribble_obj_size; + local_lock_t lock; +}; + +struct r5l_log { + struct md_rdev *rdev; + u32 uuid_checksum; + sector_t device_size; + sector_t max_free_space; + sector_t last_checkpoint; + u64 last_cp_seq; + sector_t log_start; + u64 seq; + sector_t next_checkpoint; + struct mutex io_mutex; + struct r5l_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head running_ios; + struct list_head io_end_ios; + struct list_head flushing_ios; + struct list_head finished_ios; + struct bio flush_bio; + struct list_head no_mem_stripes; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + mempool_t meta_pool; + struct md_thread *reclaim_thread; + long unsigned int reclaim_target; + wait_queue_head_t iounit_wait; + struct list_head no_space_stripes; + spinlock_t no_space_stripes_lock; + bool need_cache_flush; + enum r5c_journal_mode r5c_journal_mode; + struct list_head stripe_in_journal_list; + spinlock_t stripe_in_journal_lock; + atomic_t stripe_in_journal_count; + struct work_struct deferred_io_work; + struct work_struct disable_writeback_work; + spinlock_t tree_lock; + struct xarray big_stripe_tree; + long: 32; +}; + +struct md_bitmap_stats { + u64 events_cleared; + int behind_writes; + bool behind_wait; + long unsigned int missing_pages; + long unsigned int file_pages; + long unsigned int sync_size; + long unsigned int pages; + struct file *file; + long: 32; +}; + +enum r5l_io_unit_state { + IO_UNIT_RUNNING = 0, + IO_UNIT_IO_START = 1, + IO_UNIT_IO_END = 2, + IO_UNIT_STRIPE_END = 3, +}; + +struct r5l_recovery_ctx { + struct page *meta_page; + long: 32; + sector_t meta_total_blocks; + sector_t pos; + u64 seq; + int data_parity_stripes; + int data_only_stripes; + struct list_head cached_list; + struct page *ra_pool[256]; + struct bio_vec ra_bvec[256]; + sector_t pool_offset; + int total_pages; + int valid_pages; +}; + +struct netdev_hw_addr { + struct list_head list; + struct rb_node node; + unsigned char addr[32]; + unsigned char type; + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +enum netdev_reg_state { + NETREG_UNINITIALIZED = 0, + NETREG_REGISTERED = 1, + NETREG_UNREGISTERING = 2, + NETREG_UNREGISTERED = 3, + NETREG_RELEASED = 4, + NETREG_DUMMY = 5, +}; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; + struct callback_head rcu; +}; + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN = 2, + NETDEV_REBOOT = 3, + NETDEV_CHANGE = 4, + NETDEV_REGISTER = 5, + NETDEV_UNREGISTER = 6, + NETDEV_CHANGEMTU = 7, + NETDEV_CHANGEADDR = 8, + NETDEV_PRE_CHANGEADDR = 9, + NETDEV_GOING_DOWN = 10, + NETDEV_CHANGENAME = 11, + NETDEV_FEAT_CHANGE = 12, + NETDEV_BONDING_FAILOVER = 13, + NETDEV_PRE_UP = 14, + NETDEV_PRE_TYPE_CHANGE = 15, + NETDEV_POST_TYPE_CHANGE = 16, + NETDEV_POST_INIT = 17, + NETDEV_PRE_UNINIT = 18, + NETDEV_RELEASE = 19, + NETDEV_NOTIFY_PEERS = 20, + NETDEV_JOIN = 21, + NETDEV_CHANGEUPPER = 22, + NETDEV_RESEND_IGMP = 23, + NETDEV_PRECHANGEMTU = 24, + NETDEV_CHANGEINFODATA = 25, + NETDEV_BONDING_INFO = 26, + NETDEV_PRECHANGEUPPER = 27, + NETDEV_CHANGELOWERSTATE = 28, + NETDEV_UDP_TUNNEL_PUSH_INFO = 29, + NETDEV_UDP_TUNNEL_DROP_INFO = 30, + NETDEV_CHANGE_TX_QUEUE_LEN = 31, + NETDEV_CVLAN_FILTER_PUSH_INFO = 32, + NETDEV_CVLAN_FILTER_DROP_INFO = 33, + NETDEV_SVLAN_FILTER_PUSH_INFO = 34, + NETDEV_SVLAN_FILTER_DROP_INFO = 35, + NETDEV_OFFLOAD_XSTATS_ENABLE = 36, + NETDEV_OFFLOAD_XSTATS_DISABLE = 37, + NETDEV_OFFLOAD_XSTATS_REPORT_USED = 38, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA = 39, + NETDEV_XDP_FEAT_CHANGE = 40, +}; + +enum netdev_state_t { + __LINK_STATE_START = 0, + __LINK_STATE_PRESENT = 1, + __LINK_STATE_NOCARRIER = 2, + __LINK_STATE_LINKWATCH_PENDING = 3, + __LINK_STATE_DORMANT = 4, + __LINK_STATE_TESTING = 5, +}; + +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1, + IFF_EBRIDGE = 2, + IFF_BONDING = 4, + IFF_ISATAP = 8, + IFF_WAN_HDLC = 16, + IFF_XMIT_DST_RELEASE = 32, + IFF_DONT_BRIDGE = 64, + IFF_DISABLE_NETPOLL = 128, + IFF_MACVLAN_PORT = 256, + IFF_BRIDGE_PORT = 512, + IFF_OVS_DATAPATH = 1024, + IFF_TX_SKB_SHARING = 2048, + IFF_UNICAST_FLT = 4096, + IFF_TEAM_PORT = 8192, + IFF_SUPP_NOFCS = 16384, + IFF_LIVE_ADDR_CHANGE = 32768, + IFF_MACVLAN = 65536, + IFF_XMIT_DST_RELEASE_PERM = 131072, + IFF_L3MDEV_MASTER = 262144, + IFF_NO_QUEUE = 524288, + IFF_OPENVSWITCH = 1048576, + IFF_L3MDEV_SLAVE = 2097152, + IFF_TEAM = 4194304, + IFF_RXFH_CONFIGURED = 8388608, + IFF_PHONY_HEADROOM = 16777216, + IFF_MACSEC = 33554432, + IFF_NO_RX_HANDLER = 67108864, + IFF_FAILOVER = 134217728, + IFF_FAILOVER_SLAVE = 268435456, + IFF_L3MDEV_RX_HANDLER = 536870912, + IFF_NO_ADDRCONF = 1073741824, + IFF_TX_SKB_NO_LINEAR = 2147483648, +}; + +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN = 0, + NETDEV_LAG_TX_TYPE_RANDOM = 1, + NETDEV_LAG_TX_TYPE_BROADCAST = 2, + NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4, + NETDEV_LAG_TX_TYPE_HASH = 5, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE = 0, + NETDEV_LAG_HASH_L2 = 1, + NETDEV_LAG_HASH_L34 = 2, + NETDEV_LAG_HASH_L23 = 3, + NETDEV_LAG_HASH_E23 = 4, + NETDEV_LAG_HASH_E34 = 5, + NETDEV_LAG_HASH_VLAN_SRCMAC = 6, + NETDEV_LAG_HASH_UNKNOWN = 7, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct failover_ops { + int (*slave_pre_register)(struct net_device *, struct net_device *); + int (*slave_register)(struct net_device *, struct net_device *); + int (*slave_pre_unregister)(struct net_device *, struct net_device *); + int (*slave_unregister)(struct net_device *, struct net_device *); + int (*slave_link_change)(struct net_device *, struct net_device *); + int (*slave_name_change)(struct net_device *, struct net_device *); + rx_handler_result_t (*slave_handle_frame)(struct sk_buff **); +}; + +struct failover { + struct list_head list; + struct net_device *failover_dev; + netdevice_tracker dev_tracker; + struct failover_ops *ops; +}; + +struct xa_limit { + u32 max; + u32 min; +}; + +enum { + NETIF_F_SG_BIT = 0, + NETIF_F_IP_CSUM_BIT = 1, + __UNUSED_NETIF_F_1 = 2, + NETIF_F_HW_CSUM_BIT = 3, + NETIF_F_IPV6_CSUM_BIT = 4, + NETIF_F_HIGHDMA_BIT = 5, + NETIF_F_FRAGLIST_BIT = 6, + NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, + NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, + NETIF_F_VLAN_CHALLENGED_BIT = 10, + NETIF_F_GSO_BIT = 11, + __UNUSED_NETIF_F_12 = 12, + __UNUSED_NETIF_F_13 = 13, + NETIF_F_GRO_BIT = 14, + NETIF_F_LRO_BIT = 15, + NETIF_F_GSO_SHIFT = 16, + NETIF_F_TSO_BIT = 16, + NETIF_F_GSO_ROBUST_BIT = 17, + NETIF_F_TSO_ECN_BIT = 18, + NETIF_F_TSO_MANGLEID_BIT = 19, + NETIF_F_TSO6_BIT = 20, + NETIF_F_FSO_BIT = 21, + NETIF_F_GSO_GRE_BIT = 22, + NETIF_F_GSO_GRE_CSUM_BIT = 23, + NETIF_F_GSO_IPXIP4_BIT = 24, + NETIF_F_GSO_IPXIP6_BIT = 25, + NETIF_F_GSO_UDP_TUNNEL_BIT = 26, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, + NETIF_F_GSO_PARTIAL_BIT = 28, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, + NETIF_F_GSO_SCTP_BIT = 30, + NETIF_F_GSO_ESP_BIT = 31, + NETIF_F_GSO_UDP_BIT = 32, + NETIF_F_GSO_UDP_L4_BIT = 33, + NETIF_F_GSO_FRAGLIST_BIT = 34, + NETIF_F_GSO_LAST = 34, + NETIF_F_FCOE_CRC_BIT = 35, + NETIF_F_SCTP_CRC_BIT = 36, + __UNUSED_NETIF_F_37 = 37, + NETIF_F_NTUPLE_BIT = 38, + NETIF_F_RXHASH_BIT = 39, + NETIF_F_RXCSUM_BIT = 40, + NETIF_F_NOCACHE_COPY_BIT = 41, + NETIF_F_LOOPBACK_BIT = 42, + NETIF_F_RXFCS_BIT = 43, + NETIF_F_RXALL_BIT = 44, + NETIF_F_HW_VLAN_STAG_TX_BIT = 45, + NETIF_F_HW_VLAN_STAG_RX_BIT = 46, + NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, + NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, + NETIF_F_HW_TC_BIT = 49, + NETIF_F_HW_ESP_BIT = 50, + NETIF_F_HW_ESP_TX_CSUM_BIT = 51, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, + NETIF_F_HW_TLS_TX_BIT = 53, + NETIF_F_HW_TLS_RX_BIT = 54, + NETIF_F_GRO_HW_BIT = 55, + NETIF_F_HW_TLS_RECORD_BIT = 56, + NETIF_F_GRO_FRAGLIST_BIT = 57, + NETIF_F_HW_MACSEC_BIT = 58, + NETIF_F_GRO_UDP_FWD_BIT = 59, + NETIF_F_HW_HSR_TAG_INS_BIT = 60, + NETIF_F_HW_HSR_TAG_RM_BIT = 61, + NETIF_F_HW_HSR_FWD_BIT = 62, + NETIF_F_HW_HSR_DUP_BIT = 63, + NETDEV_FEATURE_COUNT = 64, +}; + +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +}; + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id: 12; + u16 vlan_dei: 1; + u16 vlan_priority: 3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; + __be16 vlan_eth_type; + u16 padding; +}; + +struct flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +struct flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +struct flow_dissector_key_eth_addrs { + unsigned char dst[6]; + unsigned char src[6]; +}; + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + +struct flow_dissector { + long long unsigned int used_keys; + short unsigned int offset[33]; + long: 32; +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[7]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + +struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; +}; + +struct ipv6_devconf { + __u8 __cacheline_group_begin__ipv6_devconf_read_txrx[0]; + __s32 disable_ipv6; + __s32 hop_limit; + __s32 mtu6; + __s32 forwarding; + __s32 disable_policy; + __s32 proxy_ndp; + __u8 __cacheline_group_end__ipv6_devconf_read_txrx[0]; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_min_advance; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_min_lft; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + __s32 accept_source_route; + __s32 accept_ra_from_local; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + __s32 accept_untracked_na; + struct ipv6_stable_secret stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 ndisc_tclass; + __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; + __u8 ndisc_evict_nocarrier; + __u8 ra_honor_pio_life; + __u8 ra_honor_pio_pflag; + struct ctl_table_header *sysctl_header; +}; + +enum { + IPPROTO_IP = 0, + IPPROTO_ICMP = 1, + IPPROTO_IGMP = 2, + IPPROTO_IPIP = 4, + IPPROTO_TCP = 6, + IPPROTO_EGP = 8, + IPPROTO_PUP = 12, + IPPROTO_UDP = 17, + IPPROTO_IDP = 22, + IPPROTO_TP = 29, + IPPROTO_DCCP = 33, + IPPROTO_IPV6 = 41, + IPPROTO_RSVP = 46, + IPPROTO_GRE = 47, + IPPROTO_ESP = 50, + IPPROTO_AH = 51, + IPPROTO_MTP = 92, + IPPROTO_BEETPH = 94, + IPPROTO_ENCAP = 98, + IPPROTO_PIM = 103, + IPPROTO_COMP = 108, + IPPROTO_L2TP = 115, + IPPROTO_SCTP = 132, + IPPROTO_UDPLITE = 136, + IPPROTO_MPLS = 137, + IPPROTO_ETHERNET = 143, + IPPROTO_RAW = 255, + IPPROTO_SMC = 256, + IPPROTO_MPTCP = 262, + IPPROTO_MAX = 263, +}; + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + __u32 ingress_ifindex; + __u32 rx_queue_index; + __u32 egress_ifindex; +}; + +struct xsk_queue; + +struct xdp_umem; + +struct xdp_buff_xsk; + +struct xdp_desc; + +struct xsk_buff_pool { + struct device *dev; + struct net_device *netdev; + struct list_head xsk_tx_list; + spinlock_t xsk_tx_list_lock; + refcount_t users; + struct xdp_umem *umem; + struct work_struct work; + struct list_head free_list; + struct list_head xskb_list; + u32 heads_cnt; + u16 queue_id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xsk_queue *fq; + struct xsk_queue *cq; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + struct xdp_desc *tx_descs; + long: 32; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 chunk_shift; + u32 frame_len; + u32 xdp_zc_max_segs; + u8 tx_metadata_len; + u8 cached_need_wakeup; + bool uses_need_wakeup; + bool unaligned; + bool tx_sw_csum; + void *addrs; + spinlock_t cq_lock; + struct xdp_buff_xsk *free_heads[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize; + struct xdp_mem_info mem; + struct net_device *dev_rx; + u32 frame_sz; + u32 flags; +}; + +struct xdp_rxq_info; + +struct xdp_txq_info; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; + u32 flags; +}; + +struct ipv6_devstat { + struct proc_dir_entry *proc_dir_entry; + struct ipstats_mib *ipv6; + struct icmpv6_mib_device *icmpv6dev; + struct icmpv6msg_mib_device *icmpv6msgdev; +}; + +struct ifmcaddr6; + +struct ifacaddr6; + +struct inet6_dev { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head addr_list; + struct ifmcaddr6 *mc_list; + struct ifmcaddr6 *mc_tomb; + unsigned char mc_qrv; + unsigned char mc_gq_running; + unsigned char mc_ifc_count; + unsigned char mc_dad_count; + long unsigned int mc_v1_seen; + long unsigned int mc_qi; + long unsigned int mc_qri; + long unsigned int mc_maxdelay; + struct delayed_work mc_gq_work; + struct delayed_work mc_ifc_work; + struct delayed_work mc_dad_work; + struct delayed_work mc_query_work; + struct delayed_work mc_report_work; + struct sk_buff_head mc_query_queue; + struct sk_buff_head mc_report_queue; + spinlock_t mc_query_lock; + spinlock_t mc_report_lock; + struct mutex mc_lock; + struct ifacaddr6 *ac_list; + rwlock_t lock; + refcount_t refcnt; + __u32 if_flags; + int dead; + u32 desync_factor; + struct list_head tempaddr_list; + struct in6_addr token; + struct neigh_parms *nd_parms; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; + struct timer_list rs_timer; + __s32 rs_interval; + __u8 rs_probes; + long unsigned int tstamp; + struct callback_head rcu; + unsigned int ra_mtu; +}; + +struct ethtool_netdev_state { + struct xarray rss_ctx; + struct mutex rss_lock; + unsigned int wol_enabled: 1; + unsigned int module_fw_flash_in_progress: 1; +}; + +struct devlink; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET = 0, + DEVLINK_PORT_TYPE_AUTO = 1, + DEVLINK_PORT_TYPE_ETH = 2, + DEVLINK_PORT_TYPE_IB = 3, +}; + +struct ib_device; + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, + DEVLINK_PORT_FLAVOUR_CPU = 1, + DEVLINK_PORT_FLAVOUR_DSA = 2, + DEVLINK_PORT_FLAVOUR_PCI_PF = 3, + DEVLINK_PORT_FLAVOUR_PCI_VF = 4, + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, + DEVLINK_PORT_FLAVOUR_UNUSED = 6, + DEVLINK_PORT_FLAVOUR_PCI_SF = 7, +}; + +struct devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u32 controller; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_pci_vf_attrs { + u32 controller; + u16 pf; + u16 vf; + u8 external: 1; +}; + +struct devlink_port_pci_sf_attrs { + u32 controller; + u32 sf; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_attrs { + u8 split: 1; + u8 splittable: 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + struct devlink_port_pci_sf_attrs pci_sf; + }; +}; + +struct devlink_linecard; + +struct devlink_port_ops; + +struct devlink_rate; + +struct devlink_port { + struct list_head list; + struct list_head region_list; + struct devlink *devlink; + const struct devlink_port_ops *ops; + unsigned int index; + spinlock_t type_lock; + enum devlink_port_type type; + enum devlink_port_type desired_type; + union { + struct { + struct net_device *netdev; + int ifindex; + char ifname[16]; + } type_eth; + struct { + struct ib_device *ibdev; + } type_ib; + }; + struct devlink_port_attrs attrs; + u8 attrs_set: 1; + u8 switch_port: 1; + u8 registered: 1; + u8 initialized: 1; + struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct devlink_rate *devlink_rate; + struct devlink_linecard *linecard; + u32 rel_index; +}; + +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +enum tunable_id { + ETHTOOL_ID_UNSPEC = 0, + ETHTOOL_RX_COPYBREAK = 1, + ETHTOOL_TX_COPYBREAK = 2, + ETHTOOL_PFC_PREVENTION_TOUT = 3, + ETHTOOL_TX_COPYBREAK_BUF_SIZE = 4, + __ETHTOOL_TUNABLE_COUNT = 5, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC = 0, + ETHTOOL_TUNABLE_U8 = 1, + ETHTOOL_TUNABLE_U16 = 2, + ETHTOOL_TUNABLE_U32 = 3, + ETHTOOL_TUNABLE_U64 = 4, + ETHTOOL_TUNABLE_STRING = 5, + ETHTOOL_TUNABLE_S8 = 6, + ETHTOOL_TUNABLE_S16 = 7, + ETHTOOL_TUNABLE_S32 = 8, + ETHTOOL_TUNABLE_S64 = 9, +}; + +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC = 0, + ETHTOOL_PHY_DOWNSHIFT = 1, + ETHTOOL_PHY_FAST_LINK_DOWN = 2, + ETHTOOL_PHY_EDPD = 3, + __ETHTOOL_PHY_TUNABLE_COUNT = 4, +}; + +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, + ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, + ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, + ETHTOOL_LINK_EXT_STATE_MODULE = 10, +}; + +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, +}; + +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, +}; + +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, +}; + +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST = 3, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS = 4, +}; + +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, +}; + +enum ethtool_link_ext_substate_module { + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, +}; + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS = 1, + ETH_SS_PRIV_FLAGS = 2, + ETH_SS_NTUPLE_FILTERS = 3, + ETH_SS_FEATURES = 4, + ETH_SS_RSS_HASH_FUNCS = 5, + ETH_SS_TUNABLES = 6, + ETH_SS_PHY_STATS = 7, + ETH_SS_PHY_TUNABLES = 8, + ETH_SS_LINK_MODES = 9, + ETH_SS_MSG_CLASSES = 10, + ETH_SS_WOL_MODES = 11, + ETH_SS_SOF_TIMESTAMPING = 12, + ETH_SS_TS_TX_TYPES = 13, + ETH_SS_TS_RX_FILTERS = 14, + ETH_SS_UDP_TUNNEL_TYPES = 15, + ETH_SS_STATS_STD = 16, + ETH_SS_STATS_ETH_PHY = 17, + ETH_SS_STATS_ETH_MAC = 18, + ETH_SS_STATS_ETH_CTRL = 19, + ETH_SS_STATS_RMON = 20, + ETH_SS_COUNT = 21, +}; + +enum ethtool_mac_stats_src { + ETHTOOL_MAC_STATS_SRC_AGGREGATE = 0, + ETHTOOL_MAC_STATS_SRC_EMAC = 1, + ETHTOOL_MAC_STATS_SRC_PMAC = 2, +}; + +enum ethtool_module_power_mode_policy { + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO = 2, +}; + +enum ethtool_module_power_mode { + ETHTOOL_MODULE_POWER_MODE_LOW = 1, + ETHTOOL_MODULE_POWER_MODE_HIGH = 2, +}; + +enum ethtool_mm_verify_status { + ETHTOOL_MM_VERIFY_STATUS_UNKNOWN = 0, + ETHTOOL_MM_VERIFY_STATUS_INITIAL = 1, + ETHTOOL_MM_VERIFY_STATUS_VERIFYING = 2, + ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED = 3, + ETHTOOL_MM_VERIFY_STATUS_FAILED = 4, + ETHTOOL_MM_VERIFY_STATUS_DISABLED = 5, +}; + +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; + +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; + +enum ethtool_flags { + ETH_FLAG_TXVLAN = 128, + ETH_FLAG_RXVLAN = 256, + ETH_FLAG_LRO = 32768, + ETH_FLAG_NTUPLE = 134217728, + ETH_FLAG_RXHASH = 268435456, +}; + +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + long: 32; + __u64 ring_cookie; + __u32 location; + long: 32; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; + long: 32; +}; + +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 input_xfrm; + __u8 rsvd8[2]; + __u32 rsvd32; + __u32 rss_config[0]; +}; + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT = 0, + ETHTOOL_F_WISH__BIT = 1, + ETHTOOL_F_COMPAT__BIT = 2, +}; + +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[128]; + char data[0]; +}; + +struct ethtool_fecparam { + __u32 cmd; + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT = 0, + ETHTOOL_FEC_AUTO_BIT = 1, + ETHTOOL_FEC_OFF_BIT = 2, + ETHTOOL_FEC_RS_BIT = 3, + ETHTOOL_FEC_BASER_BIT = 4, + ETHTOOL_FEC_LLRS_BIT = 5, +}; + +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 rate_matching; + __u32 reserved[7]; +}; + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = 1, + SOF_TIMESTAMPING_TX_SOFTWARE = 2, + SOF_TIMESTAMPING_RX_HARDWARE = 4, + SOF_TIMESTAMPING_RX_SOFTWARE = 8, + SOF_TIMESTAMPING_SOFTWARE = 16, + SOF_TIMESTAMPING_SYS_HARDWARE = 32, + SOF_TIMESTAMPING_RAW_HARDWARE = 64, + SOF_TIMESTAMPING_OPT_ID = 128, + SOF_TIMESTAMPING_TX_SCHED = 256, + SOF_TIMESTAMPING_TX_ACK = 512, + SOF_TIMESTAMPING_OPT_CMSG = 1024, + SOF_TIMESTAMPING_OPT_TSONLY = 2048, + SOF_TIMESTAMPING_OPT_STATS = 4096, + SOF_TIMESTAMPING_OPT_PKTINFO = 8192, + SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, + SOF_TIMESTAMPING_BIND_PHC = 32768, + SOF_TIMESTAMPING_OPT_ID_TCP = 65536, + SOF_TIMESTAMPING_OPT_RX_FILTER = 131072, + SOF_TIMESTAMPING_LAST = 131072, + SOF_TIMESTAMPING_MASK = 262143, +}; + +enum { + ETH_RSS_HASH_TOP_BIT = 0, + ETH_RSS_HASH_XOR_BIT = 1, + ETH_RSS_HASH_CRC32_BIT = 2, + ETH_RSS_HASH_FUNCS_COUNT = 3, +}; + +struct kernel_ethtool_ringparam { + u32 rx_buf_len; + u8 tcp_data_split; + u8 tx_push; + u8 rx_push; + u32 cqe_size; + u32 tx_push_buf_len; + u32 tx_push_buf_max_len; +}; + +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + enum ethtool_link_ext_substate_module module; + u32 __link_ext_substate; + }; +}; + +struct ethtool_link_ext_stats { + u64 link_down_events; +}; + +struct ethtool_rxfh_context { + u32 indir_size; + u32 key_size; + u16 priv_size; + u8 hfunc; + u8 input_xfrm; + u8 indir_configured: 1; + u8 key_configured: 1; + u32 key_off; + u8 data[0]; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + long unsigned int supported[4]; + long unsigned int advertising[4]; + long unsigned int lp_advertising[4]; + } link_modes; + u32 lanes; +}; + +struct ethtool_keee { + long unsigned int supported[4]; + long unsigned int advertised[4]; + long unsigned int lp_advertised[4]; + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_active; + bool eee_enabled; +}; + +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; + u32 tx_aggr_max_bytes; + u32 tx_aggr_max_frames; + u32 tx_aggr_time_usecs; +}; + +struct ethtool_eth_mac_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + }; + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + } stats; + }; +}; + +struct ethtool_eth_phy_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 SymbolErrorDuringCarrier; + }; + struct { + u64 SymbolErrorDuringCarrier; + } stats; + }; +}; + +struct ethtool_eth_ctrl_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + }; + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + } stats; + }; +}; + +struct ethtool_pause_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + }; + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + } stats; + }; +}; + +struct ethtool_fec_stat { + u64 total; + u64 lanes[8]; +}; + +struct ethtool_fec_stats { + struct ethtool_fec_stat corrected_blocks; + struct ethtool_fec_stat uncorrectable_blocks; + struct ethtool_fec_stat corrected_bits; +}; + +struct ethtool_rmon_hist_range { + u16 low; + u16 high; +}; + +struct ethtool_rmon_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + }; + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + } stats; + }; +}; + +struct ethtool_ts_stats { + union { + struct { + u64 pkts; + u64 lost; + u64 err; + }; + struct { + u64 pkts; + u64 lost; + u64 err; + } tx_stats; + }; +}; + +struct ethtool_module_eeprom { + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 *data; +}; + +struct ethtool_module_power_mode_params { + enum ethtool_module_power_mode_policy policy; + enum ethtool_module_power_mode mode; +}; + +struct ethtool_mm_state { + u32 verify_time; + u32 max_verify_time; + enum ethtool_mm_verify_status verify_status; + bool tx_enabled; + bool tx_active; + bool pmac_enabled; + bool verify_enabled; + u32 tx_min_frag_size; + u32 rx_min_frag_size; +}; + +struct ethtool_mm_cfg { + u32 verify_time; + bool verify_enabled; + bool tx_enabled; + bool pmac_enabled; + u32 tx_min_frag_size; +}; + +struct ethtool_mm_stats { + u64 MACMergeFrameAssErrorCount; + u64 MACMergeFrameSmdErrorCount; + u64 MACMergeFrameAssOkCount; + u64 MACMergeFragCountRx; + u64 MACMergeFragCountTx; + u64 MACMergeHoldCount; +}; + +struct ethtool_rxfh_param { + u8 hfunc; + u32 indir_size; + u32 *indir; + u32 key_size; + u8 *key; + u32 rss_context; + u8 rss_delete; + u8 input_xfrm; +}; + +struct flow_rule; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + long unsigned int priv[0]; +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP = 1, + FLOW_ACTION_TRAP = 2, + FLOW_ACTION_GOTO = 3, + FLOW_ACTION_REDIRECT = 4, + FLOW_ACTION_MIRRED = 5, + FLOW_ACTION_REDIRECT_INGRESS = 6, + FLOW_ACTION_MIRRED_INGRESS = 7, + FLOW_ACTION_VLAN_PUSH = 8, + FLOW_ACTION_VLAN_POP = 9, + FLOW_ACTION_VLAN_MANGLE = 10, + FLOW_ACTION_TUNNEL_ENCAP = 11, + FLOW_ACTION_TUNNEL_DECAP = 12, + FLOW_ACTION_MANGLE = 13, + FLOW_ACTION_ADD = 14, + FLOW_ACTION_CSUM = 15, + FLOW_ACTION_MARK = 16, + FLOW_ACTION_PTYPE = 17, + FLOW_ACTION_PRIORITY = 18, + FLOW_ACTION_RX_QUEUE_MAPPING = 19, + FLOW_ACTION_WAKE = 20, + FLOW_ACTION_QUEUE = 21, + FLOW_ACTION_SAMPLE = 22, + FLOW_ACTION_POLICE = 23, + FLOW_ACTION_CT = 24, + FLOW_ACTION_CT_METADATA = 25, + FLOW_ACTION_MPLS_PUSH = 26, + FLOW_ACTION_MPLS_POP = 27, + FLOW_ACTION_MPLS_MANGLE = 28, + FLOW_ACTION_GATE = 29, + FLOW_ACTION_PPPOE_PUSH = 30, + FLOW_ACTION_JUMP = 31, + FLOW_ACTION_PIPE = 32, + FLOW_ACTION_VLAN_PUSH_ETH = 33, + FLOW_ACTION_VLAN_POP_ETH = 34, + FLOW_ACTION_CONTINUE = 35, + NUM_FLOW_ACTIONS = 36, +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = 1, + FLOW_ACTION_HW_STATS_DELAYED = 2, + FLOW_ACTION_HW_STATS_ANY = 3, + FLOW_ACTION_HW_STATS_DISABLED = 4, + FLOW_ACTION_HW_STATS_DONT_CARE = 7, +}; + +typedef void (*action_destr)(void *); + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, + FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, + FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, + FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, + FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, +}; + +struct psample_group; + +struct action_gate_entry; + +struct ip_tunnel_info; + +struct nf_flowtable; + +struct flow_action_cookie; + +struct flow_action_entry { + enum flow_action_id id; + u32 hw_index; + long unsigned int cookie; + long: 32; + u64 miss_cookie; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + long: 32; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + unsigned char dst[6]; + unsigned char src[6]; + } vlan_push_eth; + struct { + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u16 rx_queue; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + u32 burst; + long: 32; + u64 rate_bytes_ps; + u64 peakrate_bytes_ps; + u32 avrate; + u16 overhead; + u64 burst_pkt; + u64 rate_pkt_ps; + u32 mtu; + struct { + enum flow_action_id act_id; + u32 extval; + } exceed; + struct { + enum flow_action_id act_id; + u32 extval; + } notexceed; + long: 32; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + long unsigned int cookie; + u32 mark; + u32 labels[4]; + bool orig_dir; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + s32 prio; + long: 32; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + struct { + u16 sid; + } pppoe; + }; + struct flow_action_cookie *user_cookie; + long: 32; +}; + +struct flow_action { + unsigned int num_entries; + long: 32; + struct flow_action_entry entries[0]; +}; + +struct flow_rule { + struct flow_match match; + long: 32; + struct flow_action action; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *); + int (*get_strings)(struct phy_device *, u8 *); + int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *, struct netlink_ext_ack *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); + int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); +}; + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[0]; +}; + +enum devlink_rate_type { + DEVLINK_RATE_TYPE_LEAF = 0, + DEVLINK_RATE_TYPE_NODE = 1, +}; + +enum devlink_port_fn_state { + DEVLINK_PORT_FN_STATE_INACTIVE = 0, + DEVLINK_PORT_FN_STATE_ACTIVE = 1, +}; + +enum devlink_port_fn_opstate { + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, +}; + +struct devlink_rate { + struct list_head list; + enum devlink_rate_type type; + struct devlink *devlink; + void *priv; + long: 32; + u64 tx_share; + u64 tx_max; + struct devlink_rate *parent; + union { + struct devlink_port *devlink_port; + struct { + char *name; + refcount_t refcnt; + }; + }; + u32 tx_priority; + u32 tx_weight; + long: 32; +}; + +struct devlink_port_ops { + int (*port_split)(struct devlink *, struct devlink_port *, unsigned int, struct netlink_ext_ack *); + int (*port_unsplit)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_type_set)(struct devlink_port *, enum devlink_port_type); + int (*port_del)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_get)(struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_set)(struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); + int (*port_fn_roce_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_roce_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_migratable_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_migratable_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_state_get)(struct devlink_port *, enum devlink_port_fn_state *, enum devlink_port_fn_opstate *, struct netlink_ext_ack *); + int (*port_fn_state_set)(struct devlink_port *, enum devlink_port_fn_state, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_get)(struct devlink_port *, u32 *, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_set)(struct devlink_port *, u32, struct netlink_ext_ack *); +}; + +enum sk_rst_reason { + SK_RST_REASON_NOT_SPECIFIED = 0, + SK_RST_REASON_NO_SOCKET = 1, + SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE = 2, + SK_RST_REASON_TCP_RFC7323_PAWS = 3, + SK_RST_REASON_TCP_TOO_OLD_ACK = 4, + SK_RST_REASON_TCP_ACK_UNSENT_DATA = 5, + SK_RST_REASON_TCP_FLAGS = 6, + SK_RST_REASON_TCP_OLD_ACK = 7, + SK_RST_REASON_TCP_ABORT_ON_DATA = 8, + SK_RST_REASON_TCP_TIMEWAIT_SOCKET = 9, + SK_RST_REASON_INVALID_SYN = 10, + SK_RST_REASON_TCP_ABORT_ON_CLOSE = 11, + SK_RST_REASON_TCP_ABORT_ON_LINGER = 12, + SK_RST_REASON_TCP_ABORT_ON_MEMORY = 13, + SK_RST_REASON_TCP_STATE = 14, + SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT = 15, + SK_RST_REASON_TCP_DISCONNECT_WITH_DATA = 16, + SK_RST_REASON_MPTCP_RST_EUNSPEC = 17, + SK_RST_REASON_MPTCP_RST_EMPTCP = 18, + SK_RST_REASON_MPTCP_RST_ERESOURCE = 19, + SK_RST_REASON_MPTCP_RST_EPROHIBIT = 20, + SK_RST_REASON_MPTCP_RST_EWQ2BIG = 21, + SK_RST_REASON_MPTCP_RST_EBADPERF = 22, + SK_RST_REASON_MPTCP_RST_EMIDDLEBOX = 23, + SK_RST_REASON_ERROR = 24, + SK_RST_REASON_MAX = 25, +}; + +struct request_sock; + +struct request_sock_ops { + int family; + unsigned int obj_size; + struct kmem_cache *slab; + char *slab_name; + int (*rtx_syn_ack)(const struct sock *, struct request_sock *); + void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*send_reset)(const struct sock *, struct sk_buff *, enum sk_rst_reason); + void (*destructor)(struct request_sock *); + void (*syn_ack_timeout)(const struct request_sock *); +}; + +struct timewait_sock_ops { + struct kmem_cache *twsk_slab; + char *twsk_slab_name; + unsigned int twsk_obj_size; + void (*twsk_destructor)(struct sock *); +}; + +struct saved_syn; + +struct request_sock { + struct sock_common __req_common; + struct request_sock *dl_next; + u16 mss; + u8 num_retrans; + u8 syncookie: 1; + u8 num_timeout: 7; + u32 ts_recent; + struct timer_list rsk_timer; + const struct request_sock_ops *rsk_ops; + struct sock *sk; + struct saved_syn *saved_syn; + u32 secid; + u32 peer_secid; + u32 timeout; +}; + +struct saved_syn { + u32 mac_hdrlen; + u32 network_hdrlen; + u32 tcp_hdrlen; + u8 data[0]; +}; + +enum tsq_enum { + TSQ_THROTTLED = 0, + TSQ_QUEUED = 1, + TCP_TSQ_DEFERRED = 2, + TCP_WRITE_TIMER_DEFERRED = 3, + TCP_DELACK_TIMER_DEFERRED = 4, + TCP_MTU_REDUCED_DEFERRED = 5, + TCP_ACK_DEFERRED = 6, +}; + +struct ip6_sf_list { + struct ip6_sf_list *sf_next; + struct in6_addr sf_addr; + long unsigned int sf_count[2]; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; + struct callback_head rcu; +}; + +struct ifmcaddr6 { + struct in6_addr mca_addr; + struct inet6_dev *idev; + struct ifmcaddr6 *next; + struct ip6_sf_list *mca_sources; + struct ip6_sf_list *mca_tomb; + unsigned int mca_sfmode; + unsigned char mca_crcount; + long unsigned int mca_sfcount[2]; + struct delayed_work mca_work; + unsigned int mca_flags; + int mca_users; + refcount_t mca_refcnt; + long unsigned int mca_cstamp; + long unsigned int mca_tstamp; + struct callback_head rcu; +}; + +struct ifacaddr6 { + struct in6_addr aca_addr; + struct fib6_info *aca_rt; + struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; + int aca_users; + refcount_t aca_refcnt; + long unsigned int aca_cstamp; + long unsigned int aca_tstamp; + struct callback_head rcu; +}; + +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +struct xdp_umem { + void *addrs; + long: 32; + u64 size; + u32 headroom; + u32 chunk_size; + u32 chunks; + u32 npgs; + struct user_struct *user; + refcount_t users; + u8 flags; + u8 tx_metadata_len; + bool zc; + struct page **pgs; + int id; + struct list_head xsk_dma_list; + struct work_struct work; + long: 32; +}; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; + unsigned int napi_id; + u32 frag_size; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct xdp_txq_info { + struct net_device *dev; +}; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + u8 cb[24]; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + struct list_head list_node; +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, + ETHTOOL_MSG_LINKINFO_NTF = 3, + ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, + ETHTOOL_MSG_LINKMODES_NTF = 5, + ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, + ETHTOOL_MSG_DEBUG_GET_REPLY = 7, + ETHTOOL_MSG_DEBUG_NTF = 8, + ETHTOOL_MSG_WOL_GET_REPLY = 9, + ETHTOOL_MSG_WOL_NTF = 10, + ETHTOOL_MSG_FEATURES_GET_REPLY = 11, + ETHTOOL_MSG_FEATURES_SET_REPLY = 12, + ETHTOOL_MSG_FEATURES_NTF = 13, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, + ETHTOOL_MSG_PRIVFLAGS_NTF = 15, + ETHTOOL_MSG_RINGS_GET_REPLY = 16, + ETHTOOL_MSG_RINGS_NTF = 17, + ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, + ETHTOOL_MSG_CHANNELS_NTF = 19, + ETHTOOL_MSG_COALESCE_GET_REPLY = 20, + ETHTOOL_MSG_COALESCE_NTF = 21, + ETHTOOL_MSG_PAUSE_GET_REPLY = 22, + ETHTOOL_MSG_PAUSE_NTF = 23, + ETHTOOL_MSG_EEE_GET_REPLY = 24, + ETHTOOL_MSG_EEE_NTF = 25, + ETHTOOL_MSG_TSINFO_GET_REPLY = 26, + ETHTOOL_MSG_CABLE_TEST_NTF = 27, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, + ETHTOOL_MSG_FEC_GET_REPLY = 30, + ETHTOOL_MSG_FEC_NTF = 31, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, + ETHTOOL_MSG_STATS_GET_REPLY = 33, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 34, + ETHTOOL_MSG_MODULE_GET_REPLY = 35, + ETHTOOL_MSG_MODULE_NTF = 36, + ETHTOOL_MSG_PSE_GET_REPLY = 37, + ETHTOOL_MSG_RSS_GET_REPLY = 38, + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 39, + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 40, + ETHTOOL_MSG_PLCA_NTF = 41, + ETHTOOL_MSG_MM_GET_REPLY = 42, + ETHTOOL_MSG_MM_NTF = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 44, + ETHTOOL_MSG_PHY_GET_REPLY = 45, + ETHTOOL_MSG_PHY_NTF = 46, + __ETHTOOL_MSG_KERNEL_CNT = 47, + ETHTOOL_MSG_KERNEL_MAX = 46, +}; + +struct ethtool_devlink_compat { + struct devlink *devlink; + union { + struct ethtool_flash efl; + struct ethtool_drvinfo info; + }; +}; + +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[4]; + __u32 advertising[4]; + __u32 lp_advertising[4]; + } link_modes; +}; + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; +}; + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +struct nf_hook_state; + +typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + struct nf_hook_entry hooks[0]; +}; + +struct net_generic { + union { + struct { + unsigned int len; + struct callback_head rcu; + } s; + struct { + struct {} __empty_ptr; + void *ptr[0]; + }; + }; +}; + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *); + void (*pre_exit)(struct net *); + void (*exit)(struct net *); + void (*exit_batch)(struct list_head *); + void (*exit_batch_rtnl)(struct list_head *, struct list_head *); + unsigned int * const id; + const size_t size; +}; + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *); + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); +}; + +enum { + NLA_UNSPEC = 0, + NLA_U8 = 1, + NLA_U16 = 2, + NLA_U32 = 3, + NLA_U64 = 4, + NLA_STRING = 5, + NLA_FLAG = 6, + NLA_MSECS = 7, + NLA_NESTED = 8, + NLA_NESTED_ARRAY = 9, + NLA_NUL_STRING = 10, + NLA_BINARY = 11, + NLA_S8 = 12, + NLA_S16 = 13, + NLA_S32 = 14, + NLA_S64 = 15, + NLA_BITFIELD32 = 16, + NLA_REJECT = 17, + NLA_BE16 = 18, + NLA_BE32 = 19, + NLA_SINT = 20, + NLA_UINT = 21, + __NLA_TYPE_MAX = 22, +}; + +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = 1, + NL_VALIDATE_MAXTYPE = 2, + NL_VALIDATE_UNSPEC = 4, + NL_VALIDATE_STRICT_ATTRS = 8, + NL_VALIDATE_NESTED = 16, +}; + +struct nf_hook_state { + u8 hook; + u8 pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +enum nfnetlink_groups { + NFNLGRP_NONE = 0, + NFNLGRP_CONNTRACK_NEW = 1, + NFNLGRP_CONNTRACK_UPDATE = 2, + NFNLGRP_CONNTRACK_DESTROY = 3, + NFNLGRP_CONNTRACK_EXP_NEW = 4, + NFNLGRP_CONNTRACK_EXP_UPDATE = 5, + NFNLGRP_CONNTRACK_EXP_DESTROY = 6, + NFNLGRP_NFTABLES = 7, + NFNLGRP_ACCT_QUOTA = 8, + NFNLGRP_NFTRACE = 9, + __NFNLGRP_MAX = 10, +}; + +struct nfgenmsg { + __u8 nfgen_family; + __u8 version; + __be16 res_id; +}; + +enum nfnl_batch_attributes { + NFNL_BATCH_UNSPEC = 0, + NFNL_BATCH_GENID = 1, + __NFNL_BATCH_MAX = 2, +}; + +struct nfnl_info { + struct net *net; + struct sock *sk; + const struct nlmsghdr *nlh; + const struct nfgenmsg *nfmsg; + struct netlink_ext_ack *extack; +}; + +enum nfnl_callback_type { + NFNL_CB_UNSPEC = 0, + NFNL_CB_MUTEX = 1, + NFNL_CB_RCU = 2, + NFNL_CB_BATCH = 3, +}; + +struct nfnl_callback { + int (*call)(struct sk_buff *, const struct nfnl_info *, const struct nlattr * const *); + const struct nla_policy *policy; + enum nfnl_callback_type type; + __u16 attr_count; +}; + +enum nfnl_abort_action { + NFNL_ABORT_NONE = 0, + NFNL_ABORT_AUTOLOAD = 1, + NFNL_ABORT_VALIDATE = 2, +}; + +struct nfnetlink_subsystem { + const char *name; + __u8 subsys_id; + __u8 cb_count; + const struct nfnl_callback *cb; + struct module *owner; + int (*commit)(struct net *, struct sk_buff *); + int (*abort)(struct net *, struct sk_buff *, enum nfnl_abort_action); + bool (*valid_genid)(struct net *, u32); +}; + +struct nfnl_net { + struct sock *nfnl; +}; + +struct nfnl_err { + struct list_head head; + struct nlmsghdr *nlh; + int err; + struct netlink_ext_ack extack; +}; + +enum { + NFNL_BATCH_FAILURE = 1, + NFNL_BATCH_DONE = 2, + NFNL_BATCH_REPLAY = 4, +}; + +typedef __u64 __be64; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head *next; +}; + +struct rhash_lock_head; + +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + struct bucket_table *future_tbl; + struct lockdep_map dep_map; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct rhash_lock_head *buckets[0]; +}; + +struct rhltable { + struct rhashtable ht; +}; + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +struct flow_dissector_key_tcp { + __be16 flags; +}; + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; + u8 l2_miss; +}; + +struct in_addr { + __be32 s_addr; +}; + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL = 0, + IP_CT_DIR_REPLY = 1, + IP_CT_DIR_MAX = 2, +}; + +enum nft_registers { + NFT_REG_VERDICT = 0, + NFT_REG_1 = 1, + NFT_REG_2 = 2, + NFT_REG_3 = 3, + NFT_REG_4 = 4, + __NFT_REG_MAX = 5, + NFT_REG32_00 = 8, + NFT_REG32_01 = 9, + NFT_REG32_02 = 10, + NFT_REG32_03 = 11, + NFT_REG32_04 = 12, + NFT_REG32_05 = 13, + NFT_REG32_06 = 14, + NFT_REG32_07 = 15, + NFT_REG32_08 = 16, + NFT_REG32_09 = 17, + NFT_REG32_10 = 18, + NFT_REG32_11 = 19, + NFT_REG32_12 = 20, + NFT_REG32_13 = 21, + NFT_REG32_14 = 22, + NFT_REG32_15 = 23, +}; + +enum nft_verdicts { + NFT_CONTINUE = -1, + NFT_BREAK = -2, + NFT_JUMP = -3, + NFT_GOTO = -4, + NFT_RETURN = -5, +}; + +enum nft_data_types { + NFT_DATA_VALUE = 0, + NFT_DATA_VERDICT = 4294967040, +}; + +enum nft_cmp_ops { + NFT_CMP_EQ = 0, + NFT_CMP_NEQ = 1, + NFT_CMP_LT = 2, + NFT_CMP_LTE = 3, + NFT_CMP_GT = 4, + NFT_CMP_GTE = 5, +}; + +enum nft_cmp_attributes { + NFTA_CMP_UNSPEC = 0, + NFTA_CMP_SREG = 1, + NFTA_CMP_OP = 2, + NFTA_CMP_DATA = 3, + __NFTA_CMP_MAX = 4, +}; + +struct rhash_lock_head {}; + +struct nf_flowtable_type; + +struct nf_flowtable { + unsigned int flags; + int priority; + struct rhashtable rhashtable; + struct list_head list; + const struct nf_flowtable_type *type; + struct delayed_work gc_work; + struct flow_block flow_block; + struct rw_semaphore flow_block_lock; + possible_net_t net; +}; + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 drops; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +enum flow_block_command { + FLOW_BLOCK_BIND = 0, + FLOW_BLOCK_UNBIND = 1, +}; + +enum l2tp_debug_flags { + L2TP_MSG_DEBUG = 1, + L2TP_MSG_CONTROL = 2, + L2TP_MSG_SEQ = 4, + L2TP_MSG_DATA = 8, +}; + +struct nf_flow_key { + struct flow_dissector_key_meta meta; + struct flow_dissector_key_control control; + struct flow_dissector_key_control enc_control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_keyid enc_key_id; + union { + struct flow_dissector_key_ipv4_addrs enc_ipv4; + struct flow_dissector_key_ipv6_addrs enc_ipv6; + }; + struct flow_dissector_key_tcp tcp; + struct flow_dissector_key_ports tp; +}; + +struct nf_flow_match { + struct flow_dissector dissector; + struct nf_flow_key key; + struct nf_flow_key mask; +}; + +struct nf_flow_rule { + struct nf_flow_match match; + struct flow_rule *rule; + long: 32; +}; + +enum flow_offload_tuple_dir { + FLOW_OFFLOAD_DIR_ORIGINAL = 0, + FLOW_OFFLOAD_DIR_REPLY = 1, +}; + +struct flow_offload; + +struct nf_flowtable_type { + struct list_head list; + int family; + int (*init)(struct nf_flowtable *); + bool (*gc)(const struct flow_offload *); + int (*setup)(struct nf_flowtable *, struct net_device *, enum flow_block_command); + int (*action)(struct net *, struct flow_offload *, enum flow_offload_tuple_dir, struct nf_flow_rule *); + void (*free)(struct nf_flowtable *); + void (*get)(struct nf_flowtable *); + void (*put)(struct nf_flowtable *); + nf_hookfn *hook; + struct module *owner; +}; + +struct flow_offload_tuple { + union { + struct in_addr src_v4; + struct in6_addr src_v6; + }; + union { + struct in_addr dst_v4; + struct in6_addr dst_v6; + }; + struct { + __be16 src_port; + __be16 dst_port; + }; + int iifidx; + u8 l3proto; + u8 l4proto; + struct { + u16 id; + __be16 proto; + } encap[2]; + struct {} __hash; + u8 dir: 2; + u8 xmit_type: 3; + u8 encap_num: 2; + char: 1; + u8 in_vlan_ingress: 2; + u16 mtu; + union { + struct { + struct dst_entry *dst_cache; + u32 dst_cookie; + }; + struct { + u32 ifidx; + u32 hw_ifidx; + u8 h_source[6]; + u8 h_dest[6]; + } out; + struct { + u32 iifidx; + } tc; + }; +}; + +struct flow_offload_tuple_rhash { + struct rhash_head node; + struct flow_offload_tuple tuple; +}; + +struct nf_conn; + +struct flow_offload { + struct flow_offload_tuple_rhash tuplehash[2]; + struct nf_conn *ct; + long unsigned int flags; + u16 type; + u32 timeout; + struct callback_head callback_head; +}; + +struct nft_pktinfo { + struct sk_buff *skb; + const struct nf_hook_state *state; + u8 flags; + u8 tprot; + u16 fragoff; + u16 thoff; + u16 inneroff; +}; + +struct nft_chain; + +struct nft_verdict { + u32 code; + struct nft_chain *chain; +}; + +struct nft_rule_blob; + +struct nft_table; + +struct nft_chain { + struct nft_rule_blob *blob_gen_0; + struct nft_rule_blob *blob_gen_1; + struct list_head rules; + struct list_head list; + struct rhlist_head rhlhead; + struct nft_table *table; + long: 32; + u64 handle; + u32 use; + u8 flags: 5; + u8 bound: 1; + u8 genmask: 2; + char *name; + u16 udlen; + u8 *udata; + struct callback_head callback_head; + struct nft_rule_blob *blob_next; +}; + +struct nft_data { + union { + u32 data[4]; + struct nft_verdict verdict; + }; +}; + +struct nft_regs { + union { + u32 data[20]; + struct nft_verdict verdict; + }; +}; + +struct nft_expr_ops; + +struct nft_expr { + const struct nft_expr_ops *ops; + long: 32; + unsigned char data[0]; +}; + +struct nft_regs_track { + struct { + const struct nft_expr *selector; + const struct nft_expr *bitwise; + u8 num_reg; + } regs[20]; + const struct nft_expr *cur; + const struct nft_expr *last; +}; + +struct nft_ctx { + struct net *net; + struct nft_table *table; + struct nft_chain *chain; + const struct nlattr * const *nla; + u32 portid; + u32 seq; + u16 flags; + u8 family; + u8 level; + bool report; + long unsigned int reg_inited[1]; +}; + +struct nft_table { + struct list_head list; + struct rhltable chains_ht; + struct list_head chains; + struct list_head sets; + struct list_head objects; + struct list_head flowtables; + possible_net_t net; + u64 hgenerator; + u64 handle; + u32 use; + u16 family: 6; + u16 flags: 8; + u16 genmask: 2; + u32 nlpid; + char *name; + u16 udlen; + u8 *udata; + u8 validate_state; + long: 32; +}; + +struct nft_data_desc { + enum nft_data_types type; + unsigned int size; + unsigned int len; + unsigned int flags; +}; + +enum nft_trans_phase { + NFT_TRANS_PREPARE = 0, + NFT_TRANS_PREPARE_ERROR = 1, + NFT_TRANS_ABORT = 2, + NFT_TRANS_COMMIT = 3, + NFT_TRANS_RELEASE = 4, +}; + +struct nft_offload_ctx; + +struct nft_flow_rule; + +struct nft_expr_type; + +struct nft_expr_ops { + void (*eval)(const struct nft_expr *, struct nft_regs *, const struct nft_pktinfo *); + int (*clone)(struct nft_expr *, const struct nft_expr *, gfp_t); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nft_expr *, const struct nlattr * const *); + void (*activate)(const struct nft_ctx *, const struct nft_expr *); + void (*deactivate)(const struct nft_ctx *, const struct nft_expr *, enum nft_trans_phase); + void (*destroy)(const struct nft_ctx *, const struct nft_expr *); + void (*destroy_clone)(const struct nft_ctx *, const struct nft_expr *); + int (*dump)(struct sk_buff *, const struct nft_expr *, bool); + int (*validate)(const struct nft_ctx *, const struct nft_expr *); + bool (*reduce)(struct nft_regs_track *, const struct nft_expr *); + bool (*gc)(struct net *, const struct nft_expr *); + int (*offload)(struct nft_offload_ctx *, struct nft_flow_rule *, const struct nft_expr *); + bool (*offload_action)(const struct nft_expr *); + void (*offload_stats)(struct nft_expr *, const struct flow_stats *); + const struct nft_expr_type *type; + void *data; +}; + +enum nft_set_extensions { + NFT_SET_EXT_KEY = 0, + NFT_SET_EXT_KEY_END = 1, + NFT_SET_EXT_DATA = 2, + NFT_SET_EXT_FLAGS = 3, + NFT_SET_EXT_TIMEOUT = 4, + NFT_SET_EXT_USERDATA = 5, + NFT_SET_EXT_EXPRESSIONS = 6, + NFT_SET_EXT_OBJREF = 7, + NFT_SET_EXT_NUM = 8, +}; + +struct nft_expr_type { + const struct nft_expr_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + void (*release_ops)(const struct nft_expr_ops *); + const struct nft_expr_ops *ops; + const struct nft_expr_ops *inner_ops; + struct list_head list; + const char *name; + struct module *owner; + const struct nla_policy *policy; + unsigned int maxattr; + u8 family; + u8 flags; +}; + +enum nft_offload_dep_type { + NFT_OFFLOAD_DEP_UNSPEC = 0, + NFT_OFFLOAD_DEP_NETWORK = 1, + NFT_OFFLOAD_DEP_TRANSPORT = 2, +}; + +struct nft_offload_reg { + u32 key; + u32 len; + u32 base_offset; + u32 offset; + u32 flags; + long: 32; + struct nft_data data; + struct nft_data mask; +}; + +struct nft_offload_ctx { + struct { + enum nft_offload_dep_type type; + __be16 l3num; + u8 protonum; + } dep; + unsigned int num_actions; + struct net *net; + struct nft_offload_reg regs[24]; +}; + +struct nft_flow_key { + struct flow_dissector_key_basic basic; + struct flow_dissector_key_control control; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_eth_addrs eth_addrs; + struct flow_dissector_key_meta meta; +}; + +struct nft_flow_match { + struct flow_dissector dissector; + struct nft_flow_key key; + struct nft_flow_key mask; +}; + +struct nft_flow_rule { + __be16 proto; + long: 32; + struct nft_flow_match match; + struct flow_rule *rule; + long: 32; +}; + +struct nft_rule_blob { + long unsigned int size; + long: 32; + unsigned char data[0]; +}; + +struct nft_cmp_fast_expr { + u32 data; + u32 mask; + u8 sreg; + u8 len; + bool inv; +}; + +struct nft_cmp16_fast_expr { + struct nft_data data; + struct nft_data mask; + u8 sreg; + u8 len; + bool inv; + long: 32; +}; + +enum nft_offload_reg_flags { + NFT_OFFLOAD_F_NETWORK2HOST = 1, +}; + +struct nft_cmp_expr { + struct nft_data data; + u8 sreg; + u8 len; + enum nft_cmp_ops op: 8; + long: 32; +}; + +union nft_cmp_offload_data { + u16 val16; + u32 val32; + u64 val64; +}; + +enum { + CSS_NO_REF = 1, + CSS_ONLINE = 2, + CSS_RELEASED = 4, + CSS_VISIBLE = 8, + CSS_DYING = 16, +}; + +struct xt_action_param; + +struct xt_mtchk_param; + +struct xt_mtdtor_param; + +struct xt_match { + struct list_head list; + const char name[29]; + u_int8_t revision; + bool (*match)(const struct sk_buff *, struct xt_action_param *); + int (*checkentry)(const struct xt_mtchk_param *); + void (*destroy)(const struct xt_mtdtor_param *); + struct module *me; + const char *table; + unsigned int matchsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_tgchk_param; + +struct xt_tgdtor_param; + +struct xt_target { + struct list_head list; + const char name[29]; + u_int8_t revision; + unsigned int (*target)(struct sk_buff *, const struct xt_action_param *); + int (*checkentry)(const struct xt_tgchk_param *); + void (*destroy)(const struct xt_tgdtor_param *); + struct module *me; + const char *table; + unsigned int targetsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_action_param { + union { + const struct xt_match *match; + const struct xt_target *target; + }; + union { + const void *matchinfo; + const void *targinfo; + }; + const struct nf_hook_state *state; + unsigned int thoff; + u16 fragoff; + bool hotdrop; +}; + +struct xt_mtchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_mtdtor_param { + struct net *net; + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +struct xt_tgchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_tgdtor_param { + struct net *net; + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + +struct xt_cgroup_info_v0 { + __u32 id; + __u32 invert; +}; + +struct xt_cgroup_info_v1 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + char path[4096]; + __u32 classid; + void *priv; + long: 32; +}; + +struct xt_cgroup_info_v2 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + union { + char path[512]; + __u32 classid; + }; + long: 32; + void *priv; + long: 32; +}; + +enum { + TCPF_ESTABLISHED = 2, + TCPF_SYN_SENT = 4, + TCPF_SYN_RECV = 8, + TCPF_FIN_WAIT1 = 16, + TCPF_FIN_WAIT2 = 32, + TCPF_TIME_WAIT = 64, + TCPF_CLOSE = 128, + TCPF_CLOSE_WAIT = 256, + TCPF_LAST_ACK = 512, + TCPF_LISTEN = 1024, + TCPF_CLOSING = 2048, + TCPF_NEW_SYN_RECV = 4096, + TCPF_BOUND_INACTIVE = 8192, +}; + +typedef __u16 __sum16; + +typedef void (*rcu_callback_t)(struct callback_head *); + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + int flowic_l3mdev; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + __u32 flowic_secid; + kuid_t flowic_uid; + __u32 flowic_multipath_hash; + struct flowi_tunnel flowic_tun_key; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + struct { + __u8 type; + __u8 code; + } icmpt; + __be32 gre_key; + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; + __be32 saddr; + __be32 daddr; + union flowi_uli uli; + long: 32; +}; + +struct flowi6 { + struct flowi_common __fl_common; + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; + __u32 mp_hash; + long: 32; +}; + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + } u; +}; + +typedef long unsigned int netmem_ref; + +struct skb_frag { + netmem_ref netmem; + unsigned int len; + unsigned int offset; +}; + +typedef struct skb_frag skb_frag_t; + +struct xsk_tx_metadata_compl { + __u64 *tx_timestamp; +}; + +struct skb_shared_info { + __u8 flags; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + short unsigned int gso_size; + short unsigned int gso_segs; + struct sk_buff *frag_list; + long: 32; + union { + struct skb_shared_hwtstamps hwtstamps; + struct xsk_tx_metadata_compl xsk_meta; + }; + unsigned int gso_type; + u32 tskey; + atomic_t dataref; + unsigned int xdp_frags_size; + void *destructor_arg; + skb_frag_t frags[17]; +}; + +struct minmax_sample { + u32 t; + u32 v; +}; + +struct minmax { + struct minmax_sample s[3]; +}; + +struct inet_ehash_bucket; + +struct inet_bind_hashbucket; + +struct inet_listen_hashbucket; + +struct inet_hashinfo { + struct inet_ehash_bucket *ehash; + spinlock_t *ehash_locks; + unsigned int ehash_mask; + unsigned int ehash_locks_mask; + struct kmem_cache *bind_bucket_cachep; + struct inet_bind_hashbucket *bhash; + struct kmem_cache *bind2_bucket_cachep; + struct inet_bind_hashbucket *bhash2; + unsigned int bhash_size; + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; + bool pernet; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ip_ra_chain { + struct ip_ra_chain *next; + struct sock *sk; + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct callback_head rcu; +}; + +struct inet_peer_base { + struct rb_root rb_root; + seqlock_t lock; + int total; +}; + +struct tcp_fastopen_context { + siphash_key_t key[2]; + int num; + struct callback_head rcu; + long: 32; +}; + +enum { + BPF_SOCK_OPS_VOID = 0, + BPF_SOCK_OPS_TIMEOUT_INIT = 1, + BPF_SOCK_OPS_RWND_INIT = 2, + BPF_SOCK_OPS_TCP_CONNECT_CB = 3, + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, + BPF_SOCK_OPS_NEEDS_ECN = 6, + BPF_SOCK_OPS_BASE_RTT = 7, + BPF_SOCK_OPS_RTO_CB = 8, + BPF_SOCK_OPS_RETRANS_CB = 9, + BPF_SOCK_OPS_STATE_CB = 10, + BPF_SOCK_OPS_TCP_LISTEN_CB = 11, + BPF_SOCK_OPS_RTT_CB = 12, + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, +}; + +enum net_device_flags { + IFF_UP = 1, + IFF_BROADCAST = 2, + IFF_DEBUG = 4, + IFF_LOOPBACK = 8, + IFF_POINTOPOINT = 16, + IFF_NOTRAILERS = 32, + IFF_RUNNING = 64, + IFF_NOARP = 128, + IFF_PROMISC = 256, + IFF_ALLMULTI = 512, + IFF_MASTER = 1024, + IFF_SLAVE = 2048, + IFF_MULTICAST = 4096, + IFF_PORTSEL = 8192, + IFF_AUTOMEDIA = 16384, + IFF_DYNAMIC = 32768, + IFF_LOWER_UP = 65536, + IFF_DORMANT = 131072, + IFF_ECHO = 262144, +}; + +struct lwtunnel_state { + __u16 type; + __u16 flags; + __u16 headroom; + atomic_t refcnt; + int (*orig_output)(struct net *, struct sock *, struct sk_buff *); + int (*orig_input)(struct sk_buff *); + struct callback_head rcu; + __u8 data[0]; +}; + +struct sock_reuseport { + struct callback_head rcu; + u16 max_socks; + u16 num_socks; + u16 num_closed_socks; + u16 incoming_cpu; + unsigned int synq_overflow_ts; + unsigned int reuseport_id; + unsigned int bind_inany: 1; + unsigned int has_conns: 1; + struct bpf_prog *prog; + struct sock *socks[0]; +}; + +struct fastopen_queue { + struct request_sock *rskq_rst_head; + struct request_sock *rskq_rst_tail; + spinlock_t lock; + int qlen; + int max_qlen; + struct tcp_fastopen_context *ctx; +}; + +struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + struct fastopen_queue fastopenq; +}; + +struct ip_options { + __be32 faddr; + __be32 nexthop; + unsigned char optlen; + unsigned char srr; + unsigned char rr; + unsigned char ts; + unsigned char is_strictroute: 1; + unsigned char srr_is_hit: 1; + unsigned char is_changed: 1; + unsigned char rr_needaddr: 1; + unsigned char ts_needtime: 1; + unsigned char ts_needaddr: 1; + unsigned char router_alert; + unsigned char cipso; + unsigned char __pad2; + unsigned char __data[0]; +}; + +struct ip_options_rcu { + struct callback_head rcu; + struct ip_options opt; +}; + +struct ipv6_opt_hdr; + +struct ipv6_rt_hdr; + +struct ipv6_txoptions { + refcount_t refcnt; + int tot_len; + __u16 opt_flen; + __u16 opt_nflen; + struct ipv6_opt_hdr *hopopt; + struct ipv6_opt_hdr *dst0opt; + struct ipv6_rt_hdr *srcrt; + struct ipv6_opt_hdr *dst1opt; + struct callback_head rcu; +}; + +struct inet_request_sock { + struct request_sock req; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u16 tstamp_ok: 1; + u16 sack_ok: 1; + u16 wscale_ok: 1; + u16 ecn_ok: 1; + u16 acked: 1; + u16 no_srccheck: 1; + u16 smc_ok: 1; + u32 ir_mark; + union { + struct ip_options_rcu *ireq_opt; + struct { + struct ipv6_txoptions *ipv6_opt; + struct sk_buff *pktopts; + }; + }; +}; + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + u32 ts_opt_id; + long: 32; + u64 transmit_time; + u32 mark; + long: 32; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + +struct ipv6_pinfo; + +struct ip_mc_socklist; + +struct inet_sock { + struct sock sk; + struct ipv6_pinfo *pinet6; + long unsigned int inet_flags; + __be32 inet_saddr; + __s16 uc_ttl; + __be16 inet_sport; + struct ip_options_rcu *inet_opt; + atomic_t inet_id; + __u8 tos; + __u8 min_ttl; + __u8 mc_ttl; + __u8 pmtudisc; + __u8 rcv_tos; + __u8 convert_csum; + int uc_index; + int mc_index; + __be32 mc_addr; + u32 local_port_range; + struct ip_mc_socklist *mc_list; + long: 32; + struct inet_cork_full cork; +}; + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +struct ipv6_mc_socklist; + +struct ipv6_ac_socklist; + +struct ipv6_fl_socklist; + +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; + __be32 flow_label; + __u32 frag_size; + s16 hop_limit; + u8 mcast_hops; + int ucast_oif; + int mcast_oif; + union { + struct { + __u16 srcrt: 1; + __u16 osrcrt: 1; + __u16 rxinfo: 1; + __u16 rxoinfo: 1; + __u16 rxhlim: 1; + __u16 rxohlim: 1; + __u16 hopopts: 1; + __u16 ohopopts: 1; + __u16 dstopts: 1; + __u16 odstopts: 1; + __u16 rxflow: 1; + __u16 rxtclass: 1; + __u16 rxpmtu: 1; + __u16 rxorigdstaddr: 1; + __u16 recvfragsize: 1; + } bits; + __u16 all; + } rxopt; + __u8 srcprefs; + __u8 pmtudisc; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + __u32 dst_cookie; + struct ipv6_mc_socklist *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist *ipv6_fl_list; + struct ipv6_txoptions *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +enum { + INET_FLAGS_PKTINFO = 0, + INET_FLAGS_TTL = 1, + INET_FLAGS_TOS = 2, + INET_FLAGS_RECVOPTS = 3, + INET_FLAGS_RETOPTS = 4, + INET_FLAGS_PASSSEC = 5, + INET_FLAGS_ORIGDSTADDR = 6, + INET_FLAGS_CHECKSUM = 7, + INET_FLAGS_RECVFRAGSIZE = 8, + INET_FLAGS_RECVERR = 9, + INET_FLAGS_RECVERR_RFC4884 = 10, + INET_FLAGS_FREEBIND = 11, + INET_FLAGS_HDRINCL = 12, + INET_FLAGS_MC_LOOP = 13, + INET_FLAGS_MC_ALL = 14, + INET_FLAGS_TRANSPARENT = 15, + INET_FLAGS_IS_ICSK = 16, + INET_FLAGS_NODEFRAG = 17, + INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, + INET_FLAGS_DEFER_CONNECT = 19, + INET_FLAGS_MC6_LOOP = 20, + INET_FLAGS_RECVERR6_RFC4884 = 21, + INET_FLAGS_MC6_ALL = 22, + INET_FLAGS_AUTOFLOWLABEL_SET = 23, + INET_FLAGS_AUTOFLOWLABEL = 24, + INET_FLAGS_DONTFRAG = 25, + INET_FLAGS_RECVERR6 = 26, + INET_FLAGS_REPFLOW = 27, + INET_FLAGS_RTALERT_ISOLATE = 28, + INET_FLAGS_SNDFLOW = 29, + INET_FLAGS_RTALERT = 30, +}; + +struct inet_connection_sock_af_ops { + int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); + void (*send_check)(struct sock *, struct sk_buff *); + int (*rebuild_header)(struct sock *); + void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); + int (*conn_request)(struct sock *, struct sk_buff *); + struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); + u16 net_header_len; + u16 sockaddr_len; + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*addr2sockaddr)(struct sock *, struct sockaddr *); + void (*mtu_reduced)(struct sock *); +}; + +struct inet_bind_bucket; + +struct inet_bind2_bucket; + +struct tcp_ulp_ops; + +struct inet_connection_sock { + struct inet_sock icsk_inet; + struct request_sock_queue icsk_accept_queue; + struct inet_bind_bucket *icsk_bind_hash; + struct inet_bind2_bucket *icsk_bind2_hash; + long unsigned int icsk_timeout; + struct timer_list icsk_retransmit_timer; + struct timer_list icsk_delack_timer; + __u32 icsk_rto; + __u32 icsk_rto_min; + __u32 icsk_delack_max; + __u32 icsk_pmtu_cookie; + const struct tcp_congestion_ops *icsk_ca_ops; + const struct inet_connection_sock_af_ops *icsk_af_ops; + const struct tcp_ulp_ops *icsk_ulp_ops; + void *icsk_ulp_data; + void (*icsk_clean_acked)(struct sock *, u32); + unsigned int (*icsk_sync_mss)(struct sock *, u32); + __u8 icsk_ca_state: 5; + __u8 icsk_ca_initialized: 1; + __u8 icsk_ca_setsockopt: 1; + __u8 icsk_ca_dst_locked: 1; + __u8 icsk_retransmits; + __u8 icsk_pending; + __u8 icsk_backoff; + __u8 icsk_syn_retries; + __u8 icsk_probes_out; + __u16 icsk_ext_hdr_len; + struct { + __u8 pending; + __u8 quick; + __u8 pingpong; + __u8 retry; + __u32 ato: 8; + __u32 lrcv_flowlabel: 20; + __u32 unused: 4; + long unsigned int timeout; + __u32 lrcvtime; + __u16 last_seg_size; + __u16 rcv_mss; + } icsk_ack; + struct { + int search_high; + int search_low; + u32 probe_size: 31; + u32 enabled: 1; + u32 probe_timestamp; + } icsk_mtup; + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + long: 32; + u64 icsk_ca_priv[13]; +}; + +struct inet_bind_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct in6_addr fast_v6_rcv_saddr; + __be32 fast_rcv_saddr; + short unsigned int fast_sk_family; + bool fast_ipv6_only; + struct hlist_node node; + struct hlist_head bhash2; +}; + +struct inet_bind2_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + short unsigned int addr_type; + struct in6_addr v6_rcv_saddr; + struct hlist_node node; + struct hlist_node bhash_node; + struct hlist_head owners; +}; + +struct tcp_ulp_ops { + struct list_head list; + int (*init)(struct sock *); + void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); + void (*release)(struct sock *); + int (*get_info)(struct sock *, struct sk_buff *); + size_t (*get_info_size)(const struct sock *); + void (*clone)(const struct request_sock *, struct sock *, const gfp_t); + char name[16]; + struct module *owner; +}; + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16, + ICSK_ACK_NOMEM = 32, +}; + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __u16 doff: 4; + __u16 res1: 4; + __u16 cwr: 1; + __u16 ece: 1; + __u16 urg: 1; + __u16 ack: 1; + __u16 psh: 1; + __u16 rst: 1; + __u16 syn: 1; + __u16 fin: 1; + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +enum tcp_fastopen_client_fail { + TFO_STATUS_UNSPEC = 0, + TFO_COOKIE_UNAVAILABLE = 1, + TFO_DATA_NOT_ACKED = 2, + TFO_SYN_RETRANSMITTED = 3, +}; + +struct tcp_fastopen_cookie { + __le64 val[2]; + s8 len; + bool exp; + long: 32; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +struct tcp_options_received { + int ts_recent_stamp; + u32 ts_recent; + u32 rcv_tsval; + u32 rcv_tsecr; + u16 saw_tstamp: 1; + u16 tstamp_ok: 1; + u16 dsack: 1; + u16 wscale_ok: 1; + u16 sack_ok: 3; + u16 smc_ok: 1; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u8 saw_unknown: 1; + u8 unused: 7; + u8 num_sacks; + u16 user_mss; + u16 mss_clamp; +}; + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + long: 32; + u64 snt_synack; + bool tfo_listener; + bool is_mptcp; + bool req_usec_ts; + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; + u32 rcv_nxt; + u8 syn_tos; +}; + +enum tcp_synack_type { + TCP_SYNACK_NORMAL = 0, + TCP_SYNACK_FASTOPEN = 1, + TCP_SYNACK_COOKIE = 2, +}; + +struct tcp_request_sock_ops { + u16 mss_clamp; + __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); + struct dst_entry * (*route_req)(const struct sock *, struct sk_buff *, struct flowi *, struct request_sock *, u32); + u32 (*init_seq)(const struct sk_buff *); + u32 (*init_ts_off)(const struct net *, const struct sk_buff *); + int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); +}; + +struct tcp_rack { + u64 mstamp; + u32 rtt_us; + u32 end_seq; + u32 last_delivered; + u8 reo_wnd_steps; + u8 reo_wnd_persist: 5; + u8 dsack_seen: 1; + u8 advanced: 1; +}; + +struct tcp_fastopen_request; + +struct tcp_sock { + struct inet_connection_sock inet_conn; + __u8 __cacheline_group_begin__tcp_sock_read_tx[0]; + u32 max_window; + u32 rcv_ssthresh; + u32 reordering; + u32 notsent_lowat; + u16 gso_segs; + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + __u8 __cacheline_group_end__tcp_sock_read_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_txrx[0]; + u32 tsoffset; + u32 snd_wnd; + u32 mss_cache; + u32 snd_cwnd; + u32 prr_out; + u32 lost_out; + u32 sacked_out; + u16 tcp_header_len; + u8 scaling_ratio; + u8 chrono_type: 2; + u8 repair: 1; + u8 tcp_usec_ts: 1; + u8 is_sack_reneg: 1; + u8 is_cwnd_limited: 1; + __u8 __cacheline_group_end__tcp_sock_read_txrx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_rx[0]; + u32 copied_seq; + u32 rcv_tstamp; + u32 snd_wl1; + u32 tlp_high_seq; + u32 rttvar_us; + u32 retrans_out; + u16 advmss; + u16 urg_data; + u32 lost; + struct minmax rtt_min; + struct rb_root out_of_order_queue; + u32 snd_ssthresh; + u8 recvmsg_inq: 1; + __u8 __cacheline_group_end__tcp_sock_read_rx[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + __u8 __cacheline_group_begin__tcp_sock_write_tx[0]; + u32 segs_out; + u32 data_segs_out; + u64 bytes_sent; + u32 snd_sml; + u32 chrono_start; + u32 chrono_stat[3]; + u32 write_seq; + u32 pushed_seq; + u32 lsndtime; + u32 mdev_us; + u32 rtt_seq; + u64 tcp_wstamp_ns; + struct list_head tsorted_sent_queue; + struct sk_buff *highest_sack; + u8 ecn_flags; + __u8 __cacheline_group_end__tcp_sock_write_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_write_txrx[0]; + __be32 pred_flags; + long: 32; + u64 tcp_clock_cache; + u64 tcp_mstamp; + u32 rcv_nxt; + u32 snd_nxt; + u32 snd_una; + u32 window_clamp; + u32 srtt_us; + u32 packets_out; + u32 snd_up; + u32 delivered; + u32 delivered_ce; + u32 app_limited; + u32 rcv_wnd; + struct tcp_options_received rx_opt; + u8 nonagle: 4; + u8 rate_app_limited: 1; + __u8 __cacheline_group_end__tcp_sock_write_txrx[0]; + long: 0; + __u8 __cacheline_group_begin__tcp_sock_write_rx[0]; + u64 bytes_received; + u32 segs_in; + u32 data_segs_in; + u32 rcv_wup; + u32 max_packets_out; + u32 cwnd_usage_seq; + u32 rate_delivered; + u32 rate_interval_us; + u32 rcv_rtt_last_tsecr; + u64 first_tx_mstamp; + u64 delivered_mstamp; + u64 bytes_acked; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + __u8 __cacheline_group_end__tcp_sock_write_rx[0]; + u32 dsack_dups; + u32 compressed_ack_rcv_nxt; + struct list_head tsq_node; + struct tcp_rack rack; + u8 compressed_ack; + u8 dup_ack_counter: 2; + u8 tlp_retrans: 1; + u8 unused: 5; + u8 thin_lto: 1; + u8 fastopen_connect: 1; + u8 fastopen_no_cookie: 1; + u8 fastopen_client_fail: 2; + u8 frto: 1; + u8 repair_queue; + u8 save_syn: 2; + u8 syn_data: 1; + u8 syn_fastopen: 1; + u8 syn_fastopen_exp: 1; + u8 syn_fastopen_ch: 1; + u8 syn_data_acked: 1; + u8 keepalive_probes; + u32 tcp_tx_delay; + u32 mdev_max_us; + u32 reord_seen; + u32 snd_cwnd_cnt; + u32 snd_cwnd_clamp; + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; + u32 prr_delivered; + u32 last_oow_ack_time; + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + struct sk_buff *ooo_last_skb; + struct tcp_sack_block duplicate_sack[1]; + struct tcp_sack_block selective_acks[4]; + struct tcp_sack_block recv_sack_cache[4]; + int lost_cnt_hint; + u32 prior_ssthresh; + u32 high_seq; + u32 retrans_stamp; + u32 undo_marker; + int undo_retrans; + long: 32; + u64 bytes_retrans; + u32 total_retrans; + u32 rto_stamp; + u16 total_rto; + u16 total_rto_recoveries; + u32 total_rto_time; + u32 urg_seq; + unsigned int keepalive_time; + unsigned int keepalive_intvl; + int linger2; + u8 bpf_sock_ops_cb_flags; + u8 bpf_chg_cc_inprogress: 1; + u16 timeout_rehash; + u32 rcv_ooopack; + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 plb_rehash; + u32 mtu_info; + struct tcp_fastopen_request *fastopen_req; + struct request_sock *fastopen_rsk; + struct saved_syn *saved_syn; + long: 32; +}; + +struct tcp_fastopen_request { + struct tcp_fastopen_cookie cookie; + struct msghdr *data; + size_t size; + int copied; + struct ubuf_info *uarg; +}; + +struct iphdr { + __u8 version: 4; + __u8 ihl: 4; + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + union { + struct { + __be32 saddr; + __be32 daddr; + }; + struct { + __be32 saddr; + __be32 daddr; + } addrs; + }; +}; + +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +}; + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; +}; + +struct ipv6hdr { + __u8 version: 4; + __u8 priority: 4; + __u8 flow_lbl[3]; + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + union { + struct { + struct in6_addr saddr; + struct in6_addr daddr; + }; + struct { + struct in6_addr saddr; + struct in6_addr daddr; + } addrs; + }; +}; + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; + __u16 frag_max_size; + __u16 srhoff; +}; + +struct ip6_sf_socklist; + +struct ipv6_mc_socklist { + struct in6_addr addr; + int ifindex; + unsigned int sfmode; + struct ipv6_mc_socklist *next; + struct ip6_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ipv6_ac_socklist { + struct in6_addr acl_addr; + int acl_ifindex; + struct ipv6_ac_socklist *acl_next; +}; + +struct ip6_flowlabel; + +struct ipv6_fl_socklist { + struct ipv6_fl_socklist *next; + struct ip6_flowlabel *fl; + struct callback_head rcu; +}; + +struct ip6_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + struct in6_addr sl_addr[0]; +}; + +struct ip6_flowlabel { + struct ip6_flowlabel *next; + __be32 label; + atomic_t users; + struct in6_addr dst; + struct ipv6_txoptions *opt; + long unsigned int linger; + struct callback_head rcu; + u8 share; + union { + struct pid *pid; + kuid_t uid; + } owner; + long unsigned int lastuse; + long unsigned int expires; + struct net *fl_net; +}; + +struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +}; + +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[15]; + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; +}; + +struct prefix_info { + __u8 type; + __u8 length; + __u8 prefix_len; + union { + __u8 flags; + struct { + __u8 onlink: 1; + __u8 autoconf: 1; + __u8 routeraddr: 1; + __u8 preferpd: 1; + __u8 reserved: 4; + }; + }; + __be32 valid; + __be32 prefered; + __be32 reserved2; + struct in6_addr prefix; +}; + +struct inet_skb_parm { + int iif; + struct ip_options opt; + u16 flags; + u16 frag_max_size; +}; + +struct inet_ehash_bucket { + struct hlist_nulls_head chain; +}; + +struct inet_bind_hashbucket { + spinlock_t lock; + struct hlist_head chain; +}; + +struct inet_listen_hashbucket { + spinlock_t lock; + struct hlist_nulls_head nulls_head; +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; + long: 32; + u64 temp; +}; + +struct tcp_skb_cb { + __u32 seq; + __u32 end_seq; + union { + struct { + u16 tcp_gso_segs; + u16 tcp_gso_size; + }; + }; + __u8 tcp_flags; + __u8 sacked; + __u8 ip_dsfield; + __u8 txstamp_ack: 1; + __u8 eor: 1; + __u8 has_rxtstamp: 1; + __u8 unused: 5; + __u32 ack_seq; + long: 32; + union { + struct { + __u32 is_app_limited: 1; + __u32 delivered_ce: 20; + __u32 unused: 11; + __u32 delivered; + u64 first_tx_mstamp; + u64 delivered_mstamp; + } tx; + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + }; +}; + +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; + u32 in_flight; +}; + +struct rate_sample { + u64 prior_mstamp; + u32 prior_delivered; + u32 prior_delivered_ce; + s32 delivered; + s32 delivered_ce; + long int interval_us; + u32 snd_interval_us; + u32 rcv_interval_us; + long int rtt_us; + int losses; + u32 acked_sacked; + u32 prior_in_flight; + u32 last_end_seq; + bool is_app_limited; + bool is_retrans; + bool is_ack_delayed; + long: 32; +}; + +enum skb_drop_reason { + SKB_NOT_DROPPED_YET = 0, + SKB_CONSUMED = 1, + SKB_DROP_REASON_NOT_SPECIFIED = 2, + SKB_DROP_REASON_NO_SOCKET = 3, + SKB_DROP_REASON_PKT_TOO_SMALL = 4, + SKB_DROP_REASON_TCP_CSUM = 5, + SKB_DROP_REASON_SOCKET_FILTER = 6, + SKB_DROP_REASON_UDP_CSUM = 7, + SKB_DROP_REASON_NETFILTER_DROP = 8, + SKB_DROP_REASON_OTHERHOST = 9, + SKB_DROP_REASON_IP_CSUM = 10, + SKB_DROP_REASON_IP_INHDR = 11, + SKB_DROP_REASON_IP_RPFILTER = 12, + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST = 13, + SKB_DROP_REASON_XFRM_POLICY = 14, + SKB_DROP_REASON_IP_NOPROTO = 15, + SKB_DROP_REASON_SOCKET_RCVBUFF = 16, + SKB_DROP_REASON_PROTO_MEM = 17, + SKB_DROP_REASON_TCP_AUTH_HDR = 18, + SKB_DROP_REASON_TCP_MD5NOTFOUND = 19, + SKB_DROP_REASON_TCP_MD5UNEXPECTED = 20, + SKB_DROP_REASON_TCP_MD5FAILURE = 21, + SKB_DROP_REASON_TCP_AONOTFOUND = 22, + SKB_DROP_REASON_TCP_AOUNEXPECTED = 23, + SKB_DROP_REASON_TCP_AOKEYNOTFOUND = 24, + SKB_DROP_REASON_TCP_AOFAILURE = 25, + SKB_DROP_REASON_SOCKET_BACKLOG = 26, + SKB_DROP_REASON_TCP_FLAGS = 27, + SKB_DROP_REASON_TCP_ABORT_ON_DATA = 28, + SKB_DROP_REASON_TCP_ZEROWINDOW = 29, + SKB_DROP_REASON_TCP_OLD_DATA = 30, + SKB_DROP_REASON_TCP_OVERWINDOW = 31, + SKB_DROP_REASON_TCP_OFOMERGE = 32, + SKB_DROP_REASON_TCP_RFC7323_PAWS = 33, + SKB_DROP_REASON_TCP_OLD_SEQUENCE = 34, + SKB_DROP_REASON_TCP_INVALID_SEQUENCE = 35, + SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE = 36, + SKB_DROP_REASON_TCP_RESET = 37, + SKB_DROP_REASON_TCP_INVALID_SYN = 38, + SKB_DROP_REASON_TCP_CLOSE = 39, + SKB_DROP_REASON_TCP_FASTOPEN = 40, + SKB_DROP_REASON_TCP_OLD_ACK = 41, + SKB_DROP_REASON_TCP_TOO_OLD_ACK = 42, + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA = 43, + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE = 44, + SKB_DROP_REASON_TCP_OFO_DROP = 45, + SKB_DROP_REASON_IP_OUTNOROUTES = 46, + SKB_DROP_REASON_BPF_CGROUP_EGRESS = 47, + SKB_DROP_REASON_IPV6DISABLED = 48, + SKB_DROP_REASON_NEIGH_CREATEFAIL = 49, + SKB_DROP_REASON_NEIGH_FAILED = 50, + SKB_DROP_REASON_NEIGH_QUEUEFULL = 51, + SKB_DROP_REASON_NEIGH_DEAD = 52, + SKB_DROP_REASON_TC_EGRESS = 53, + SKB_DROP_REASON_SECURITY_HOOK = 54, + SKB_DROP_REASON_QDISC_DROP = 55, + SKB_DROP_REASON_CPU_BACKLOG = 56, + SKB_DROP_REASON_XDP = 57, + SKB_DROP_REASON_TC_INGRESS = 58, + SKB_DROP_REASON_UNHANDLED_PROTO = 59, + SKB_DROP_REASON_SKB_CSUM = 60, + SKB_DROP_REASON_SKB_GSO_SEG = 61, + SKB_DROP_REASON_SKB_UCOPY_FAULT = 62, + SKB_DROP_REASON_DEV_HDR = 63, + SKB_DROP_REASON_DEV_READY = 64, + SKB_DROP_REASON_FULL_RING = 65, + SKB_DROP_REASON_NOMEM = 66, + SKB_DROP_REASON_HDR_TRUNC = 67, + SKB_DROP_REASON_TAP_FILTER = 68, + SKB_DROP_REASON_TAP_TXFILTER = 69, + SKB_DROP_REASON_ICMP_CSUM = 70, + SKB_DROP_REASON_INVALID_PROTO = 71, + SKB_DROP_REASON_IP_INADDRERRORS = 72, + SKB_DROP_REASON_IP_INNOROUTES = 73, + SKB_DROP_REASON_IP_LOCAL_SOURCE = 74, + SKB_DROP_REASON_IP_INVALID_SOURCE = 75, + SKB_DROP_REASON_IP_LOCALNET = 76, + SKB_DROP_REASON_IP_INVALID_DEST = 77, + SKB_DROP_REASON_PKT_TOO_BIG = 78, + SKB_DROP_REASON_DUP_FRAG = 79, + SKB_DROP_REASON_FRAG_REASM_TIMEOUT = 80, + SKB_DROP_REASON_FRAG_TOO_FAR = 81, + SKB_DROP_REASON_TCP_MINTTL = 82, + SKB_DROP_REASON_IPV6_BAD_EXTHDR = 83, + SKB_DROP_REASON_IPV6_NDISC_FRAG = 84, + SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT = 85, + SKB_DROP_REASON_IPV6_NDISC_BAD_CODE = 86, + SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS = 87, + SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST = 88, + SKB_DROP_REASON_QUEUE_PURGE = 89, + SKB_DROP_REASON_TC_COOKIE_ERROR = 90, + SKB_DROP_REASON_PACKET_SOCK_ERROR = 91, + SKB_DROP_REASON_TC_CHAIN_NOTFOUND = 92, + SKB_DROP_REASON_TC_RECLASSIFY_LOOP = 93, + SKB_DROP_REASON_VXLAN_INVALID_HDR = 94, + SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND = 95, + SKB_DROP_REASON_MAC_INVALID_SOURCE = 96, + SKB_DROP_REASON_VXLAN_ENTRY_EXISTS = 97, + SKB_DROP_REASON_VXLAN_NO_REMOTE = 98, + SKB_DROP_REASON_IP_TUNNEL_ECN = 99, + SKB_DROP_REASON_TUNNEL_TXINFO = 100, + SKB_DROP_REASON_LOCAL_MAC = 101, + SKB_DROP_REASON_ARP_PVLAN_DISABLE = 102, + SKB_DROP_REASON_MAX = 103, + SKB_DROP_REASON_SUBSYS_MASK = 4294901760, +}; + +struct rt6key { + struct in6_addr addr; + int plen; +}; + +struct rtable; + +struct fnhe_hash_bucket; + +struct fib_nh_common { + struct net_device *nhc_dev; + netdevice_tracker nhc_dev_tracker; + int nhc_oif; + unsigned char nhc_scope; + u8 nhc_family; + u8 nhc_gw_family; + unsigned char nhc_flags; + struct lwtunnel_state *nhc_lwtstate; + union { + __be32 ipv4; + struct in6_addr ipv6; + } nhc_gw; + int nhc_weight; + atomic_t nhc_upper_bound; + struct rtable **nhc_pcpu_rth_output; + struct rtable *nhc_rth_input; + struct fnhe_hash_bucket *nhc_exceptions; +}; + +struct rt6_exception_bucket; + +struct fib6_nh { + struct fib_nh_common nh_common; + struct rt6_info **rt6i_pcpu; + struct rt6_exception_bucket *rt6i_exception_bucket; +}; + +struct fib6_node; + +struct dst_metrics; + +struct nexthop; + +struct fib6_info { + struct fib6_table *fib6_table; + struct fib6_info *fib6_next; + struct fib6_node *fib6_node; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; + unsigned int fib6_nsiblings; + refcount_t fib6_ref; + long unsigned int expires; + struct hlist_node gc_link; + struct dst_metrics *fib6_metrics; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; + u32 fib6_metric; + u8 fib6_protocol; + u8 fib6_type; + u8 offload; + u8 trap; + u8 offload_failed; + u8 should_flush: 1; + u8 dst_nocount: 1; + u8 dst_nopolicy: 1; + u8 fib6_destroying: 1; + u8 unused: 4; + struct callback_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; +}; + +struct rt6_info { + struct dst_entry dst; + struct fib6_info *from; + int sernum; + struct rt6key rt6i_dst; + struct rt6key rt6i_src; + struct in6_addr rt6i_gateway; + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + short unsigned int rt6i_nfheader_len; +}; + +struct rt6_statistics { + __u32 fib_nodes; + __u32 fib_route_nodes; + __u32 fib_rt_entries; + __u32 fib_rt_cache; + __u32 fib_discarded_routes; + atomic_t fib_rt_alloc; +}; + +struct fib6_node { + struct fib6_node *parent; + struct fib6_node *left; + struct fib6_node *right; + struct fib6_info *leaf; + __u16 fn_bit; + __u16 fn_flags; + int fn_sernum; + struct fib6_info *rr_ptr; + struct callback_head rcu; +}; + +struct fib6_table { + struct hlist_node tb6_hlist; + u32 tb6_id; + spinlock_t tb6_lock; + struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; + unsigned int flags; + unsigned int fib_seq; + struct hlist_head tb6_gc_hlist; +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255, +}; + +enum { + XFRM_MSG_BASE = 16, + XFRM_MSG_NEWSA = 16, + XFRM_MSG_DELSA = 17, + XFRM_MSG_GETSA = 18, + XFRM_MSG_NEWPOLICY = 19, + XFRM_MSG_DELPOLICY = 20, + XFRM_MSG_GETPOLICY = 21, + XFRM_MSG_ALLOCSPI = 22, + XFRM_MSG_ACQUIRE = 23, + XFRM_MSG_EXPIRE = 24, + XFRM_MSG_UPDPOLICY = 25, + XFRM_MSG_UPDSA = 26, + XFRM_MSG_POLEXPIRE = 27, + XFRM_MSG_FLUSHSA = 28, + XFRM_MSG_FLUSHPOLICY = 29, + XFRM_MSG_NEWAE = 30, + XFRM_MSG_GETAE = 31, + XFRM_MSG_REPORT = 32, + XFRM_MSG_MIGRATE = 33, + XFRM_MSG_NEWSADINFO = 34, + XFRM_MSG_GETSADINFO = 35, + XFRM_MSG_NEWSPDINFO = 36, + XFRM_MSG_GETSPDINFO = 37, + XFRM_MSG_MAPPING = 38, + XFRM_MSG_SETDEFAULT = 39, + XFRM_MSG_GETDEFAULT = 40, + __XFRM_MSG_MAX = 41, +}; + +enum xfrm_attr_type_t { + XFRMA_UNSPEC = 0, + XFRMA_ALG_AUTH = 1, + XFRMA_ALG_CRYPT = 2, + XFRMA_ALG_COMP = 3, + XFRMA_ENCAP = 4, + XFRMA_TMPL = 5, + XFRMA_SA = 6, + XFRMA_POLICY = 7, + XFRMA_SEC_CTX = 8, + XFRMA_LTIME_VAL = 9, + XFRMA_REPLAY_VAL = 10, + XFRMA_REPLAY_THRESH = 11, + XFRMA_ETIMER_THRESH = 12, + XFRMA_SRCADDR = 13, + XFRMA_COADDR = 14, + XFRMA_LASTUSED = 15, + XFRMA_POLICY_TYPE = 16, + XFRMA_MIGRATE = 17, + XFRMA_ALG_AEAD = 18, + XFRMA_KMADDRESS = 19, + XFRMA_ALG_AUTH_TRUNC = 20, + XFRMA_MARK = 21, + XFRMA_TFCPAD = 22, + XFRMA_REPLAY_ESN_VAL = 23, + XFRMA_SA_EXTRA_FLAGS = 24, + XFRMA_PROTO = 25, + XFRMA_ADDRESS_FILTER = 26, + XFRMA_PAD = 27, + XFRMA_OFFLOAD_DEV = 28, + XFRMA_SET_MARK = 29, + XFRMA_SET_MARK_MASK = 30, + XFRMA_IF_ID = 31, + XFRMA_MTIMER_THRESH = 32, + XFRMA_SA_DIR = 33, + XFRMA_NAT_KEEPALIVE_INTERVAL = 34, + XFRMA_SA_PCPU = 35, + __XFRMA_MAX = 36, +}; + +struct dst_metrics { + u32 metrics[17]; + refcount_t refcnt; +}; + +struct fib_nh_exception { + struct fib_nh_exception *fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + bool fnhe_mtu_locked; + __be32 fnhe_gw; + long unsigned int fnhe_expires; + struct rtable *fnhe_rth_input; + struct rtable *fnhe_rth_output; + long unsigned int fnhe_stamp; + struct callback_head rcu; +}; + +struct rtable { + struct dst_entry dst; + int rt_genid; + unsigned int rt_flags; + __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; + int rt_iif; + u8 rt_gw_family; + union { + __be32 rt_gw4; + struct in6_addr rt_gw6; + }; + u32 rt_mtu_locked: 1; + u32 rt_pmtu: 31; +}; + +struct fnhe_hash_bucket { + struct fib_nh_exception *chain; +}; + +enum { + __ND_OPT_PREFIX_INFO_END = 0, + ND_OPT_SOURCE_LL_ADDR = 1, + ND_OPT_TARGET_LL_ADDR = 2, + ND_OPT_PREFIX_INFO = 3, + ND_OPT_REDIRECT_HDR = 4, + ND_OPT_MTU = 5, + ND_OPT_NONCE = 14, + __ND_OPT_ARRAY_MAX = 15, + ND_OPT_ROUTE_INFO = 24, + ND_OPT_RDNSS = 25, + ND_OPT_DNSSL = 31, + ND_OPT_6CO = 34, + ND_OPT_CAPTIVE_PORTAL = 37, + ND_OPT_PREF64 = 38, + __ND_OPT_MAX = 39, +}; + +struct net_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, u32); + unsigned int no_policy: 1; + unsigned int icmp_strict_tag_validation: 1; + u32 secret; +}; + +struct rt6_exception_bucket { + struct hlist_head chain; + int depth; +}; + +struct xfrm_tunnel { + int (*handler)(struct sk_buff *); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, u32); + struct xfrm_tunnel *next; + int priority; +}; + +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u32 cmpri: 4; + __u32 cmpre: 4; + __u32 pad: 4; + __u32 reserved: 20; + union { + struct { + struct {} __empty_addr; + struct in6_addr addr[0]; + }; + struct { + struct {} __empty_data; + __u8 data[0]; + }; + } segments; +}; + +enum { + SKB_GSO_TCPV4 = 1, + SKB_GSO_DODGY = 2, + SKB_GSO_TCP_ECN = 4, + SKB_GSO_TCP_FIXEDID = 8, + SKB_GSO_TCPV6 = 16, + SKB_GSO_FCOE = 32, + SKB_GSO_GRE = 64, + SKB_GSO_GRE_CSUM = 128, + SKB_GSO_IPXIP4 = 256, + SKB_GSO_IPXIP6 = 512, + SKB_GSO_UDP_TUNNEL = 1024, + SKB_GSO_UDP_TUNNEL_CSUM = 2048, + SKB_GSO_PARTIAL = 4096, + SKB_GSO_TUNNEL_REMCSUM = 8192, + SKB_GSO_SCTP = 16384, + SKB_GSO_ESP = 32768, + SKB_GSO_UDP = 65536, + SKB_GSO_UDP_L4 = 131072, + SKB_GSO_FRAGLIST = 262144, +}; + +struct udp_hslot; + +struct udp_hslot_main; + +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot_main *hash2; + struct udp_hslot *hash4; + unsigned int mask; + unsigned int log; +}; + +struct offload_callbacks { + struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); + struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sk_buff *, int); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +struct udp_hslot { + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; + int count; + spinlock_t lock; + long: 32; +}; + +struct udp_hslot_main { + struct udp_hslot hslot; + u32 hash4_cnt; + long: 32; +}; + +struct inet6_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + unsigned int flags; + u32 secret; +}; + +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; + u32 secret; +}; + +struct rps_sock_flow_table; + +struct net_hotdata { + struct packet_offload ip_packet_offload; + struct net_offload tcpv4_offload; + struct net_protocol tcp_protocol; + struct net_offload udpv4_offload; + struct net_protocol udp_protocol; + struct packet_offload ipv6_packet_offload; + struct net_offload tcpv6_offload; + struct inet6_protocol tcpv6_protocol; + struct inet6_protocol udpv6_protocol; + struct net_offload udpv6_offload; + struct list_head offload_base; + struct list_head ptype_all; + struct kmem_cache *skbuff_cache; + struct kmem_cache *skbuff_fclone_cache; + struct kmem_cache *skb_small_head_cache; + struct rps_sock_flow_table *rps_sock_flow_table; + u32 rps_cpu_mask; + int gro_normal_batch; + int netdev_budget; + int netdev_budget_usecs; + int tstamp_prequeue; + int max_backlog; + int dev_tx_weight; + int dev_rx_weight; + int sysctl_max_skb_frags; + int sysctl_skb_defer_max; + int sysctl_mem_pcpu_rsv; +}; + +struct napi_gro_cb { + union { + struct { + void *frag0; + unsigned int frag0_len; + }; + struct { + struct sk_buff *last; + long unsigned int age; + }; + }; + int data_offset; + u16 flush; + u16 count; + u16 proto; + u16 pad; + union { + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + }; + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + } zeroed; + }; + __wsum csum; + union { + struct { + u16 network_offset; + u16 inner_network_offset; + }; + u16 network_offsets[2]; + }; +}; + +typedef u8 uint8_t; + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED = 0, + BRNF_PROTO_8021Q = 1, + BRNF_PROTO_PPPOE = 2, + } orig_proto: 8; + u8 pkt_otherhost: 1; + u8 in_prerouting: 1; + u8 bridged_dnat: 1; + u8 sabotage_in_done: 1; + __u16 frag_max_size; + int physinif; + struct net_device *physoutdev; + union { + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + char neigh_header[8]; + }; +}; + +struct ipv4_devconf { + void *sysctl; + int data[33]; + long unsigned int state[2]; +}; + +struct in_ifaddr; + +struct ip_mc_list; + +struct in_device { + struct net_device *dev; + netdevice_tracker dev_tracker; + refcount_t refcnt; + int dead; + struct in_ifaddr *ifa_list; + struct ip_mc_list *mc_list; + struct ip_mc_list **mc_hash; + int mc_count; + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + long unsigned int mr_v1_seen; + long unsigned int mr_v2_seen; + long unsigned int mr_maxdelay; + long unsigned int mr_qi; + long unsigned int mr_qri; + unsigned char mr_qrv; + unsigned char mr_gq_running; + u32 mr_ifc_count; + struct timer_list mr_gq_timer; + struct timer_list mr_ifc_timer; + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct callback_head callback_head; +}; + +struct qdisc_walker { + int stop; + int skip; + int count; + int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); +}; + +struct tcf_walker { + int stop; + int skip; + int count; + bool nonempty; + long unsigned int cookie; + int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); +}; + +struct tcf_exts { + int action; + int police; +}; + +struct nf_queue_entry; + +struct nf_ipv6_ops { + void (*route_input)(struct sk_buff *); + int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); +}; + +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + unsigned int hook_index; + struct net_device *physin; + struct net_device *physout; + struct nf_hook_state state; + u16 size; +}; + +struct in_ifaddr { + struct hlist_node addr_lst; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct callback_head callback_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + unsigned char ifa_proto; + __u32 ifa_flags; + char ifa_label[16]; + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + long unsigned int ifa_cstamp; + long unsigned int ifa_tstamp; +}; + +struct bridge_stp_xstats { + __u64 transition_blk; + __u64 transition_fwd; + __u64 rx_bpdu; + __u64 tx_bpdu; + __u64 rx_tcn; + __u64 tx_tcn; +}; + +struct br_mcast_stats { + __u64 igmp_v1queries[2]; + __u64 igmp_v2queries[2]; + __u64 igmp_v3queries[2]; + __u64 igmp_leaves[2]; + __u64 igmp_v1reports[2]; + __u64 igmp_v2reports[2]; + __u64 igmp_v3reports[2]; + __u64 igmp_parse_errors; + __u64 mld_v1queries[2]; + __u64 mld_v2queries[2]; + __u64 mld_leaves[2]; + __u64 mld_v1reports[2]; + __u64 mld_v2reports[2]; + __u64 mld_parse_errors; + __u64 mcast_bytes[2]; + __u64 mcast_packets[2]; +}; + +struct br_ip { + union { + __be32 ip4; + struct in6_addr ip6; + } src; + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } dst; + __be16 proto; + __u16 vid; +}; + +struct bridge_id { + unsigned char prio[2]; + unsigned char addr[6]; +}; + +typedef struct bridge_id bridge_id; + +typedef __u16 port_id; + +struct bridge_mcast_own_query { + struct timer_list timer; + u32 startup_sent; +}; + +struct bridge_mcast_other_query { + struct timer_list timer; + struct timer_list delay_timer; +}; + +struct bridge_mcast_querier { + struct br_ip addr; + int port_ifidx; + seqcount_spinlock_t seq; +}; + +struct bridge_mcast_stats { + struct br_mcast_stats mstats; + struct u64_stats_sync syncp; + long: 32; +}; + +struct net_bridge; + +struct net_bridge_vlan; + +struct net_bridge_mcast { + struct net_bridge *br; + struct net_bridge_vlan *vlan; + u32 multicast_last_member_count; + u32 multicast_startup_query_count; + u8 multicast_querier; + u8 multicast_igmp_version; + u8 multicast_router; + u8 multicast_mld_version; + long unsigned int multicast_last_member_interval; + long unsigned int multicast_membership_interval; + long unsigned int multicast_querier_interval; + long unsigned int multicast_query_interval; + long unsigned int multicast_query_response_interval; + long unsigned int multicast_startup_query_interval; + struct hlist_head ip4_mc_router_list; + struct timer_list ip4_mc_router_timer; + struct bridge_mcast_other_query ip4_other_query; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_querier ip4_querier; + struct hlist_head ip6_mc_router_list; + struct timer_list ip6_mc_router_timer; + struct bridge_mcast_other_query ip6_other_query; + struct bridge_mcast_own_query ip6_own_query; + struct bridge_mcast_querier ip6_querier; +}; + +struct net_bridge { + spinlock_t lock; + spinlock_t hash_lock; + struct hlist_head frame_type_list; + struct net_device *dev; + long unsigned int options; + struct rhashtable fdb_hash_tbl; + struct list_head port_list; + union { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; + u16 group_fwd_mask; + u16 group_fwd_mask_required; + bridge_id designated_root; + bridge_id bridge_id; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; + long unsigned int max_age; + long unsigned int hello_time; + long unsigned int forward_delay; + long unsigned int ageing_time; + long unsigned int bridge_max_age; + long unsigned int bridge_hello_time; + long unsigned int bridge_forward_delay; + long unsigned int bridge_ageing_time; + u32 root_path_cost; + u8 group_addr[6]; + enum { + BR_NO_STP = 0, + BR_KERNEL_STP = 1, + BR_USER_STP = 2, + } stp_enabled; + struct net_bridge_mcast multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 hash_max; + spinlock_t multicast_lock; + struct rhashtable mdb_hash_tbl; + struct rhashtable sg_port_tbl; + struct hlist_head mcast_gc_list; + struct hlist_head mdb_list; + struct work_struct mcast_gc_work; + struct timer_list hello_timer; + struct timer_list tcn_timer; + struct timer_list topology_change_timer; + struct delayed_work gc_work; + struct kobject *ifobj; + u32 auto_cnt; + atomic_t fdb_n_learned; + u32 fdb_max_learned; + struct hlist_head fdb_list; +}; + +struct net_bridge_port; + +struct net_bridge_mcast_port { + struct net_bridge_port *port; + struct net_bridge_vlan *vlan; + struct bridge_mcast_own_query ip4_own_query; + struct timer_list ip4_mc_router_timer; + struct hlist_node ip4_rlist; + struct bridge_mcast_own_query ip6_own_query; + struct timer_list ip6_mc_router_timer; + struct hlist_node ip6_rlist; + unsigned char multicast_router; + u32 mdb_n_entries; + u32 mdb_max_entries; +}; + +struct net_bridge_port { + struct net_bridge *br; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + long unsigned int flags; + struct net_bridge_port *backup_port; + u32 backup_nhid; + u8 priority; + u8 state; + u16 port_no; + unsigned char topology_change_ack; + unsigned char config_pending; + port_id port_id; + port_id designated_port; + bridge_id designated_root; + bridge_id designated_bridge; + u32 path_cost; + u32 designated_cost; + long unsigned int designated_age; + struct timer_list forward_delay_timer; + struct timer_list hold_timer; + struct timer_list message_age_timer; + struct kobject kobj; + struct callback_head rcu; + struct net_bridge_mcast_port multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 multicast_eht_hosts_limit; + u32 multicast_eht_hosts_cnt; + struct hlist_head mglist; + char sysfs_name[16]; + u16 group_fwd_mask; + u16 backup_redirected_cnt; + long: 32; + struct bridge_stp_xstats stp_xstats; +}; + +struct metadata_dst; + +struct br_tunnel_info { + __be64 tunnel_id; + struct metadata_dst *tunnel_dst; + long: 32; +}; + +struct net_bridge_vlan { + struct rhash_head vnode; + struct rhash_head tnode; + u16 vid; + u16 flags; + u16 priv_flags; + u8 state; + struct pcpu_sw_netstats *stats; + union { + struct net_bridge *br; + struct net_bridge_port *port; + }; + union { + refcount_t refcnt; + struct net_bridge_vlan *brvlan; + }; + long: 32; + struct br_tunnel_info tinfo; + union { + struct net_bridge_mcast br_mcast_ctx; + struct net_bridge_mcast_port port_mcast_ctx; + }; + u16 msti; + struct list_head vlist; + struct callback_head rcu; +}; + +struct sched_param { + int sched_priority; +}; + +struct kthread_work; + +typedef void (*kthread_work_func_t)(struct kthread_work *); + +struct kthread_worker; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + int canceling; +}; + +enum { + KTW_FREEZABLE = 1, +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +struct binfmt_misc { + struct list_head entries; + rwlock_t entries_lock; + bool enabled; +}; + +enum hk_type { + HK_TYPE_TIMER = 0, + HK_TYPE_RCU = 1, + HK_TYPE_MISC = 2, + HK_TYPE_SCHED = 3, + HK_TYPE_TICK = 4, + HK_TYPE_DOMAIN = 5, + HK_TYPE_WQ = 6, + HK_TYPE_MANAGED_IRQ = 7, + HK_TYPE_KTHREAD = 8, + HK_TYPE_MAX = 9, +}; + +struct kthread_create_info { + char *full_name; + int (*threadfn)(void *); + void *data; + int node; + struct task_struct *result; + struct completion *done; + struct list_head list; +}; + +struct kthread { + long unsigned int flags; + unsigned int cpu; + int result; + int (*threadfn)(void *); + void *data; + struct completion parked; + struct completion exited; + struct cgroup_subsys_state *blkcg_css; + char *full_name; +}; + +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP = 1, + KTHREAD_SHOULD_PARK = 2, +}; + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +struct smp_hotplug_thread { + struct task_struct **store; + struct list_head list; + int (*thread_should_run)(unsigned int); + void (*thread_fn)(unsigned int); + void (*create)(unsigned int); + void (*setup)(unsigned int); + void (*cleanup)(unsigned int, bool); + void (*park)(unsigned int); + void (*unpark)(unsigned int); + bool selfparking; + const char *thread_comm; +}; + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE = 1, + HP_THREAD_PARKED = 2, +}; + +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long int len; +}; + +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; + struct { + enum bpf_cgroup_iter_order order; + __u32 cgroup_fd; + __u64 cgroup_id; + } cgroup; + struct { + __u32 tid; + __u32 pid; + __u32 pid_fd; + } task; +}; + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + BPF_FUNC_task_storage_get = 156, + BPF_FUNC_task_storage_delete = 157, + BPF_FUNC_get_current_task_btf = 158, + BPF_FUNC_bprm_opts_set = 159, + BPF_FUNC_ktime_get_coarse_ns = 160, + BPF_FUNC_ima_inode_hash = 161, + BPF_FUNC_sock_from_file = 162, + BPF_FUNC_check_mtu = 163, + BPF_FUNC_for_each_map_elem = 164, + BPF_FUNC_snprintf = 165, + BPF_FUNC_sys_bpf = 166, + BPF_FUNC_btf_find_by_name_kind = 167, + BPF_FUNC_sys_close = 168, + BPF_FUNC_timer_init = 169, + BPF_FUNC_timer_set_callback = 170, + BPF_FUNC_timer_start = 171, + BPF_FUNC_timer_cancel = 172, + BPF_FUNC_get_func_ip = 173, + BPF_FUNC_get_attach_cookie = 174, + BPF_FUNC_task_pt_regs = 175, + BPF_FUNC_get_branch_snapshot = 176, + BPF_FUNC_trace_vprintk = 177, + BPF_FUNC_skc_to_unix_sock = 178, + BPF_FUNC_kallsyms_lookup_name = 179, + BPF_FUNC_find_vma = 180, + BPF_FUNC_loop = 181, + BPF_FUNC_strncmp = 182, + BPF_FUNC_get_func_arg = 183, + BPF_FUNC_get_func_ret = 184, + BPF_FUNC_get_func_arg_cnt = 185, + BPF_FUNC_get_retval = 186, + BPF_FUNC_set_retval = 187, + BPF_FUNC_xdp_get_buff_len = 188, + BPF_FUNC_xdp_load_bytes = 189, + BPF_FUNC_xdp_store_bytes = 190, + BPF_FUNC_copy_from_user_task = 191, + BPF_FUNC_skb_set_tstamp = 192, + BPF_FUNC_ima_file_hash = 193, + BPF_FUNC_kptr_xchg = 194, + BPF_FUNC_map_lookup_percpu_elem = 195, + BPF_FUNC_skc_to_mptcp_sock = 196, + BPF_FUNC_dynptr_from_mem = 197, + BPF_FUNC_ringbuf_reserve_dynptr = 198, + BPF_FUNC_ringbuf_submit_dynptr = 199, + BPF_FUNC_ringbuf_discard_dynptr = 200, + BPF_FUNC_dynptr_read = 201, + BPF_FUNC_dynptr_write = 202, + BPF_FUNC_dynptr_data = 203, + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204, + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205, + BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206, + BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207, + BPF_FUNC_ktime_get_tai_ns = 208, + BPF_FUNC_user_ringbuf_drain = 209, + BPF_FUNC_cgrp_storage_get = 210, + BPF_FUNC_cgrp_storage_delete = 211, + __BPF_FUNC_MAX_ID = 212, +}; + +enum bpf_arg_type { + ARG_DONTCARE = 0, + ARG_CONST_MAP_PTR = 1, + ARG_PTR_TO_MAP_KEY = 2, + ARG_PTR_TO_MAP_VALUE = 3, + ARG_PTR_TO_MEM = 4, + ARG_PTR_TO_ARENA = 5, + ARG_CONST_SIZE = 6, + ARG_CONST_SIZE_OR_ZERO = 7, + ARG_PTR_TO_CTX = 8, + ARG_ANYTHING = 9, + ARG_PTR_TO_SPIN_LOCK = 10, + ARG_PTR_TO_SOCK_COMMON = 11, + ARG_PTR_TO_SOCKET = 12, + ARG_PTR_TO_BTF_ID = 13, + ARG_PTR_TO_RINGBUF_MEM = 14, + ARG_CONST_ALLOC_SIZE_OR_ZERO = 15, + ARG_PTR_TO_BTF_ID_SOCK_COMMON = 16, + ARG_PTR_TO_PERCPU_BTF_ID = 17, + ARG_PTR_TO_FUNC = 18, + ARG_PTR_TO_STACK = 19, + ARG_PTR_TO_CONST_STR = 20, + ARG_PTR_TO_TIMER = 21, + ARG_KPTR_XCHG_DEST = 22, + ARG_PTR_TO_DYNPTR = 23, + __BPF_ARG_TYPE_MAX = 24, + ARG_PTR_TO_MAP_VALUE_OR_NULL = 259, + ARG_PTR_TO_MEM_OR_NULL = 260, + ARG_PTR_TO_CTX_OR_NULL = 264, + ARG_PTR_TO_SOCKET_OR_NULL = 268, + ARG_PTR_TO_STACK_OR_NULL = 275, + ARG_PTR_TO_BTF_ID_OR_NULL = 269, + ARG_PTR_TO_UNINIT_MEM = 67141636, + ARG_PTR_TO_FIXED_SIZE_MEM = 262148, + __BPF_ARG_TYPE_LIMIT = 134217727, +}; + +enum bpf_return_type { + RET_INTEGER = 0, + RET_VOID = 1, + RET_PTR_TO_MAP_VALUE = 2, + RET_PTR_TO_SOCKET = 3, + RET_PTR_TO_TCP_SOCK = 4, + RET_PTR_TO_SOCK_COMMON = 5, + RET_PTR_TO_MEM = 6, + RET_PTR_TO_MEM_OR_BTF_ID = 7, + RET_PTR_TO_BTF_ID = 8, + __BPF_RET_TYPE_MAX = 9, + RET_PTR_TO_MAP_VALUE_OR_NULL = 258, + RET_PTR_TO_SOCKET_OR_NULL = 259, + RET_PTR_TO_TCP_SOCK_OR_NULL = 260, + RET_PTR_TO_SOCK_COMMON_OR_NULL = 261, + RET_PTR_TO_RINGBUF_MEM_OR_NULL = 1286, + RET_PTR_TO_DYNPTR_MEM_OR_NULL = 262, + RET_PTR_TO_BTF_ID_OR_NULL = 264, + RET_PTR_TO_BTF_ID_TRUSTED = 1048584, + __BPF_RET_TYPE_LIMIT = 134217727, +}; + +struct bpf_func_proto { + u64 (*func)(u64, u64, u64, u64, u64); + bool gpl_only; + bool pkt_access; + bool might_sleep; + bool allow_fastcall; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + union { + struct { + u32 *arg1_btf_id; + u32 *arg2_btf_id; + u32 *arg3_btf_id; + u32 *arg4_btf_id; + u32 *arg5_btf_id; + }; + u32 *arg_btf_id[5]; + struct { + size_t arg1_size; + size_t arg2_size; + size_t arg3_size; + size_t arg4_size; + size_t arg5_size; + }; + size_t arg_size[5]; + }; + int *ret_btf_id; + bool (*allowed)(const struct bpf_prog *); +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); + +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); + +typedef const struct bpf_func_proto * (*bpf_iter_get_func_proto_t)(enum bpf_func_id, const struct bpf_prog *); + +enum bpf_iter_feature { + BPF_ITER_RESCHED = 1, +}; + +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + bpf_iter_show_fdinfo_t show_fdinfo; + bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; + u32 ctx_arg_info_size; + u32 feature; + struct bpf_ctx_arg_aux ctx_arg_info[2]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_iter_meta { + union { + struct seq_file *seq; + }; + u64 session_id; + u64 seq_num; +}; + +struct kallsym_iter { + loff_t pos; + loff_t pos_mod_end; + loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; + long unsigned int value; + unsigned int nameoff; + char type; + char name[512]; + char module_name[60]; + int exported; + int show_value; +}; + +struct bpf_iter__ksym { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kallsym_iter *ksym; + }; +}; + +struct ftrace_event_field { + struct list_head link; + const char *name; + const char *type; + int filter_type; + int offset; + int size; + int is_signed; + int len; +}; + +struct filter_pred; + +struct prog_entry { + int target; + int when_to_branch; + struct filter_pred *pred; +}; + +struct regex; + +typedef int (*regex_match_func)(char *, struct regex *, int); + +struct regex { + char pattern[256]; + int len; + int field_len; + regex_match_func match; +}; + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY = 1, + MATCH_MIDDLE_ONLY = 2, + MATCH_END_ONLY = 3, + MATCH_GLOB = 4, + MATCH_INDEX = 5, +}; + +enum filter_op_ids { + OP_GLOB = 0, + OP_NE = 1, + OP_EQ = 2, + OP_LE = 3, + OP_LT = 4, + OP_GE = 5, + OP_GT = 6, + OP_BAND = 7, + OP_MAX = 8, +}; + +enum filter_pred_fn { + FILTER_PRED_FN_NOP = 0, + FILTER_PRED_FN_64 = 1, + FILTER_PRED_FN_64_CPUMASK = 2, + FILTER_PRED_FN_S64 = 3, + FILTER_PRED_FN_U64 = 4, + FILTER_PRED_FN_32 = 5, + FILTER_PRED_FN_32_CPUMASK = 6, + FILTER_PRED_FN_S32 = 7, + FILTER_PRED_FN_U32 = 8, + FILTER_PRED_FN_16 = 9, + FILTER_PRED_FN_16_CPUMASK = 10, + FILTER_PRED_FN_S16 = 11, + FILTER_PRED_FN_U16 = 12, + FILTER_PRED_FN_8 = 13, + FILTER_PRED_FN_8_CPUMASK = 14, + FILTER_PRED_FN_S8 = 15, + FILTER_PRED_FN_U8 = 16, + FILTER_PRED_FN_COMM = 17, + FILTER_PRED_FN_STRING = 18, + FILTER_PRED_FN_STRLOC = 19, + FILTER_PRED_FN_STRRELLOC = 20, + FILTER_PRED_FN_PCHAR_USER = 21, + FILTER_PRED_FN_PCHAR = 22, + FILTER_PRED_FN_CPU = 23, + FILTER_PRED_FN_CPU_CPUMASK = 24, + FILTER_PRED_FN_CPUMASK = 25, + FILTER_PRED_FN_CPUMASK_CPU = 26, + FILTER_PRED_FN_FUNCTION = 27, + FILTER_PRED_FN_ = 28, + FILTER_PRED_TEST_VISITED = 29, +}; + +struct filter_pred { + struct regex *regex; + struct cpumask *mask; + short unsigned int *ops; + struct ftrace_event_field *field; + u64 val; + u64 val2; + enum filter_pred_fn fn_num; + int offset; + int not; + int op; +}; + +enum { + FILT_ERR_NONE = 0, + FILT_ERR_INVALID_OP = 1, + FILT_ERR_TOO_MANY_OPEN = 2, + FILT_ERR_TOO_MANY_CLOSE = 3, + FILT_ERR_MISSING_QUOTE = 4, + FILT_ERR_MISSING_BRACE_OPEN = 5, + FILT_ERR_MISSING_BRACE_CLOSE = 6, + FILT_ERR_OPERAND_TOO_LONG = 7, + FILT_ERR_EXPECT_STRING = 8, + FILT_ERR_EXPECT_DIGIT = 9, + FILT_ERR_ILLEGAL_FIELD_OP = 10, + FILT_ERR_FIELD_NOT_FOUND = 11, + FILT_ERR_ILLEGAL_INTVAL = 12, + FILT_ERR_BAD_SUBSYS_FILTER = 13, + FILT_ERR_TOO_MANY_PREDS = 14, + FILT_ERR_INVALID_FILTER = 15, + FILT_ERR_INVALID_CPULIST = 16, + FILT_ERR_IP_FIELD_ONLY = 17, + FILT_ERR_INVALID_VALUE = 18, + FILT_ERR_NO_FUNCTION = 19, + FILT_ERR_ERRNO = 20, + FILT_ERR_NO_FILTER = 21, +}; + +struct filter_parse_error { + int lasterr; + int lasterr_pos; +}; + +typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); + +enum { + INVERT = 1, + PROCESS_AND = 2, + PROCESS_OR = 4, +}; + +struct ustring_buffer { + char buffer[1024]; +}; + +enum { + TOO_MANY_CLOSE = -1, + TOO_MANY_OPEN = -2, + MISSING_QUOTE = -3, +}; + +struct filter_list { + struct list_head list; + struct event_filter *filter; +}; + +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + +enum { + BPF_F_NO_PREALLOC = 1, + BPF_F_NO_COMMON_LRU = 2, + BPF_F_NUMA_NODE = 4, + BPF_F_RDONLY = 8, + BPF_F_WRONLY = 16, + BPF_F_STACK_BUILD_ID = 32, + BPF_F_ZERO_SEED = 64, + BPF_F_RDONLY_PROG = 128, + BPF_F_WRONLY_PROG = 256, + BPF_F_CLONE = 512, + BPF_F_MMAPABLE = 1024, + BPF_F_PRESERVE_ELEMS = 2048, + BPF_F_INNER_MAP = 4096, + BPF_F_LINK = 8192, + BPF_F_PATH_FD = 16384, + BPF_F_VTYPE_BTF_OBJ_FD = 32768, + BPF_F_TOKEN_FD = 65536, + BPF_F_SEGV_ON_FAULT = 131072, + BPF_F_NO_USER_CONV = 262144, +}; + +struct bpf_local_storage_data; + +struct bpf_local_storage { + struct bpf_local_storage_data *cache[16]; + struct bpf_local_storage_map *smap; + struct hlist_head list; + void *owner; + struct callback_head rcu; + raw_spinlock_t lock; +}; + +enum { + BTF_KIND_UNKN = 0, + BTF_KIND_INT = 1, + BTF_KIND_PTR = 2, + BTF_KIND_ARRAY = 3, + BTF_KIND_STRUCT = 4, + BTF_KIND_UNION = 5, + BTF_KIND_ENUM = 6, + BTF_KIND_FWD = 7, + BTF_KIND_TYPEDEF = 8, + BTF_KIND_VOLATILE = 9, + BTF_KIND_CONST = 10, + BTF_KIND_RESTRICT = 11, + BTF_KIND_FUNC = 12, + BTF_KIND_FUNC_PROTO = 13, + BTF_KIND_VAR = 14, + BTF_KIND_DATASEC = 15, + BTF_KIND_FLOAT = 16, + BTF_KIND_DECL_TAG = 17, + BTF_KIND_TYPE_TAG = 18, + BTF_KIND_ENUM64 = 19, + NR_BTF_KINDS = 20, + BTF_KIND_MAX = 19, +}; + +struct bpf_mem_caches; + +struct bpf_mem_cache; + +struct bpf_mem_alloc { + struct bpf_mem_caches *caches; + struct bpf_mem_cache *cache; + struct obj_cgroup *objcg; + bool percpu; + struct work_struct work; +}; + +struct bpf_local_storage_map_bucket; + +struct bpf_local_storage_map { + struct bpf_map map; + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; + struct bpf_mem_alloc selem_ma; + struct bpf_mem_alloc storage_ma; + bool bpf_ma; +}; + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_data { + struct bpf_local_storage_map *smap; + long: 32; + u8 data[0]; +}; + +struct bpf_local_storage_elem { + struct hlist_node map_node; + struct hlist_node snode; + struct bpf_local_storage *local_storage; + union { + struct callback_head rcu; + struct hlist_node free_node; + }; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_local_storage_data sdata; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + long: 32; + u64 idx_usage_counts[16]; +}; + +struct seqcount_rwlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_rwlock seqcount_rwlock_t; + +typedef struct { + struct srcu_struct *lock; + int idx; +} class_srcu_t; + +typedef u32 uprobe_opcode_t; + +struct uprobe { + struct rb_node rb_node; + refcount_t ref; + struct rw_semaphore register_rwsem; + struct rw_semaphore consumer_rwsem; + struct list_head pending_list; + struct list_head consumers; + struct inode *inode; + union { + struct callback_head rcu; + struct work_struct work; + }; + long: 32; + loff_t offset; + loff_t ref_ctr_offset; + long unsigned int flags; + struct arch_uprobe arch; +}; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +struct xol_area { + wait_queue_head_t wq; + long unsigned int *bitmap; + struct page *page; + long unsigned int vaddr; +}; + +struct vma_iterator { + struct ma_state mas; +}; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); + void (*close)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +struct page_vma_mapped_walk { + long unsigned int pfn; + long unsigned int nr_pages; + long unsigned int pgoff; + struct vm_area_struct *vma; + long unsigned int address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +typedef void (*task_work_func_t)(struct callback_head *); + +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +struct __uprobe_key { + struct inode *inode; + long: 32; + loff_t offset; +}; + +struct map_info { + struct map_info *next; + struct mm_struct *mm; + long unsigned int vaddr; +}; + +enum pageblock_bits { + PB_migrate = 0, + PB_migrate_end = 2, + PB_migrate_skip = 3, + NR_PAGEBLOCK_BITS = 4, +}; + +enum migratetype { + MIGRATE_UNMOVABLE = 0, + MIGRATE_MOVABLE = 1, + MIGRATE_RECLAIMABLE = 2, + MIGRATE_PCPTYPES = 3, + MIGRATE_HIGHATOMIC = 3, + MIGRATE_CMA = 4, + MIGRATE_ISOLATE = 5, + MIGRATE_TYPES = 6, +}; + +enum zone_watermarks { + WMARK_MIN = 0, + WMARK_LOW = 1, + WMARK_HIGH = 2, + WMARK_PROMO = 3, + NR_WMARK = 4, +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK = 0, + ZONE_RECLAIM_ACTIVE = 1, + ZONE_BELOW_HIGH = 2, +}; + +enum { + ZONELIST_FALLBACK = 0, + MAX_ZONELISTS = 1, +}; + +typedef struct pglist_data pg_data_t; + +enum meminit_context { + MEMINIT_EARLY = 0, + MEMINIT_HOTPLUG = 1, +}; + +enum migrate_reason { + MR_COMPACTION = 0, + MR_MEMORY_FAILURE = 1, + MR_MEMORY_HOTPLUG = 2, + MR_SYSCALL = 3, + MR_MEMPOLICY_MBIND = 4, + MR_NUMA_MISPLACED = 5, + MR_CONTIG_RANGE = 6, + MR_LONGTERM_PIN = 7, + MR_DEMOTION = 8, + MR_DAMON = 9, + MR_TYPES = 10, +}; + +enum oom_constraint { + CONSTRAINT_NONE = 0, + CONSTRAINT_CPUSET = 1, + CONSTRAINT_MEMORY_POLICY = 2, + CONSTRAINT_MEMCG = 3, +}; + +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct mem_cgroup *memcg; + const gfp_t gfp_mask; + const int order; + long unsigned int totalpages; + struct task_struct *chosen; + long int chosen_points; + enum oom_constraint constraint; +}; + +enum compact_priority { + COMPACT_PRIO_SYNC_FULL = 0, + MIN_COMPACT_PRIORITY = 0, + COMPACT_PRIO_SYNC_LIGHT = 1, + MIN_COMPACT_COSTLY_PRIORITY = 1, + DEF_COMPACT_PRIORITY = 1, + COMPACT_PRIO_ASYNC = 2, + INIT_COMPACT_PRIORITY = 2, +}; + +enum compact_result { + COMPACT_NOT_SUITABLE_ZONE = 0, + COMPACT_SKIPPED = 1, + COMPACT_DEFERRED = 2, + COMPACT_NO_SUITABLE_PAGE = 3, + COMPACT_CONTINUE = 4, + COMPACT_COMPLETE = 5, + COMPACT_PARTIAL_SKIPPED = 6, + COMPACT_CONTENDED = 7, + COMPACT_SUCCESS = 8, +}; + +typedef struct folio *new_folio_t(struct folio *, long unsigned int); + +typedef void free_folio_t(struct folio *, long unsigned int); + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = 1, + CACHE_TYPE_DATA = 2, + CACHE_TYPE_SEPARATE = 3, + CACHE_TYPE_UNIFIED = 4, +}; + +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int per_cpu_data_slice_size; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; + bool early_ci_levels; +}; + +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type highest_zoneidx; + bool spread_dirty_pages; +}; + +struct migration_target_control { + int nid; + nodemask_t *nmask; + gfp_t gfp_mask; + enum migrate_reason reason; +}; + +typedef int fpi_t; + +typedef struct fd class_fd_t; + +struct xattr_args { + __u64 value; + __u32 size; + __u32 flags; +}; + +struct simple_xattrs { + struct rb_root rb_root; + rwlock_t lock; +}; + +struct simple_xattr { + struct rb_node rb_node; + char *name; + size_t size; + char value[0]; +}; + +enum { + UNAME26 = 131072, + ADDR_NO_RANDOMIZE = 262144, + FDPIC_FUNCPTRS = 524288, + MMAP_PAGE_ZERO = 1048576, + ADDR_COMPAT_LAYOUT = 2097152, + READ_IMPLIES_EXEC = 4194304, + ADDR_LIMIT_32BIT = 8388608, + SHORT_INODE = 16777216, + WHOLE_SECONDS = 33554432, + STICKY_TIMEOUTS = 67108864, + ADDR_LIMIT_3GB = 134217728, +}; + +enum fsnotify_iter_type { + FSNOTIFY_ITER_TYPE_INODE = 0, + FSNOTIFY_ITER_TYPE_VFSMOUNT = 1, + FSNOTIFY_ITER_TYPE_SB = 2, + FSNOTIFY_ITER_TYPE_PARENT = 3, + FSNOTIFY_ITER_TYPE_INODE2 = 4, + FSNOTIFY_ITER_TYPE_COUNT = 5, +}; + +struct xattr_name { + char name[256]; +}; + +struct kernel_xattr_ctx { + union { + const void *cvalue; + void *value; + }; + void *kvalue; + size_t size; + struct xattr_name *kname; + unsigned int flags; +}; + +struct nsset { + unsigned int flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); + int lsmid; +}; + +struct proc_inode { + struct pid *pid; + unsigned int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + const struct ctl_table *sysctl_entry; + struct hlist_node sibling_inodes; + const struct proc_ns_operations *ns_ops; + long: 32; + struct inode vfs_inode; +}; + +typedef struct poll_table_struct poll_table; + +struct proc_mounts { + struct mnt_namespace *ns; + struct path root; + int (*show)(struct seq_file *, struct vfsmount *); +}; + +struct proc_fs_opts { + int flag; + const char *str; +}; + +struct posix_acl_xattr_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +struct posix_acl_xattr_header { + __le32 a_version; +}; + +typedef __kernel_ulong_t ino_t; + +struct kernfs_global_locks { + struct mutex open_file_mutex[1024]; +}; + +enum kernfs_node_type { + KERNFS_DIR = 1, + KERNFS_FILE = 2, + KERNFS_LINK = 4, +}; + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 16, + KERNFS_NS = 32, + KERNFS_HAS_SEQ_SHOW = 64, + KERNFS_HAS_MMAP = 128, + KERNFS_LOCKDEP = 256, + KERNFS_HIDDEN = 512, + KERNFS_SUICIDAL = 1024, + KERNFS_SUICIDED = 2048, + KERNFS_EMPTY_DIR = 4096, + KERNFS_HAS_RELEASE = 8192, + KERNFS_REMOVING = 16384, +}; + +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 1, + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, + KERNFS_ROOT_SUPPORT_EXPORTOP = 4, + KERNFS_ROOT_SUPPORT_USER_XATTR = 8, +}; + +struct kernfs_syscall_ops; + +struct kernfs_root { + struct kernfs_node *kn; + unsigned int flags; + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + struct list_head supers; + wait_queue_head_t deactivate_waitq; + struct rw_semaphore kernfs_rwsem; + struct rw_semaphore kernfs_iattr_rwsem; + struct rw_semaphore kernfs_supers_rwsem; + struct callback_head rcu; +}; + +struct kernfs_open_node { + struct callback_head callback_head; + atomic_t event; + wait_queue_head_t poll; + struct list_head files; + unsigned int nr_mmapped; + unsigned int nr_to_release; +}; + +struct kernfs_iattrs { + kuid_t ia_uid; + kgid_t ia_gid; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; + long: 32; +}; + +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *, struct kernfs_root *); + int (*mkdir)(struct kernfs_node *, const char *, umode_t); + int (*rmdir)(struct kernfs_node *); + int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); + int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); +}; + +struct kernfs_super_info { + struct super_block *sb; + struct kernfs_root *root; + const void *ns; + struct list_head node; +}; + +enum jbd_state_bits { + BH_JBD = 16, + BH_JWrite = 17, + BH_Freed = 18, + BH_Revoked = 19, + BH_RevokeValid = 20, + BH_JBDDirty = 21, + BH_JournalHead = 22, + BH_Shadow = 23, + BH_Verified = 24, + BH_JBDPrivateStart = 25, +}; + +struct ext4_io_end_vec { + struct list_head list; + loff_t offset; + ssize_t size; + long: 32; +}; + +struct ext4_io_end { + struct list_head list; + handle_t *handle; + struct inode *inode; + struct bio *bio; + unsigned int flag; + refcount_t count; + struct list_head list_vec; +}; + +typedef struct ext4_io_end ext4_io_end_t; + +struct ext4_io_submit { + struct writeback_control *io_wbc; + struct bio *io_bio; + ext4_io_end_t *io_end; + long: 32; + sector_t io_next_block; +}; + +enum { + Opt_direct = 0, + Opt_fd = 1, + Opt_gid = 2, + Opt_ignore = 3, + Opt_indirect = 4, + Opt_maxproto = 5, + Opt_minproto = 6, + Opt_offset = 7, + Opt_pgrp = 8, + Opt_strictexpire = 9, + Opt_uid = 10, +}; + +struct autofs_fs_context { + kuid_t uid; + kgid_t gid; + int pgrp; + bool pgrp_set; +}; + +struct btrfs_map_token { + struct extent_buffer *eb; + char *kaddr; + long unsigned int offset; +}; + +struct btrfs_data_container { + __u32 bytes_left; + __u32 bytes_missing; + __u32 elem_cnt; + __u32 elem_missed; + __u64 val[0]; +}; + +enum btrfs_dev_stat_values { + BTRFS_DEV_STAT_WRITE_ERRS = 0, + BTRFS_DEV_STAT_READ_ERRS = 1, + BTRFS_DEV_STAT_FLUSH_ERRS = 2, + BTRFS_DEV_STAT_CORRUPTION_ERRS = 3, + BTRFS_DEV_STAT_GENERATION_ERRS = 4, + BTRFS_DEV_STAT_VALUES_MAX = 5, +}; + +struct btrfs_extent_item { + __le64 refs; + __le64 generation; + __le64 flags; +}; + +struct btrfs_tree_block_info { + struct btrfs_disk_key key; + __u8 level; +}; + +struct btrfs_extent_data_ref { + __le64 root; + __le64 objectid; + __le64 offset; + __le32 count; +}; + +struct btrfs_shared_data_ref { + __le32 count; +}; + +struct btrfs_extent_inline_ref { + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct btrfs_inode_ref { + __le64 index; + __le16 name_len; +} __attribute__((packed)); + +struct btrfs_inode_extref { + __le64 parent_objectid; + __le64 index; + __le16 name_len; + __u8 name[0]; +} __attribute__((packed)); + +enum { + BTRFS_FILE_EXTENT_INLINE = 0, + BTRFS_FILE_EXTENT_REG = 1, + BTRFS_FILE_EXTENT_PREALLOC = 2, + BTRFS_NR_FILE_EXTENT_TYPES = 3, +}; + +struct btrfs_tree_parent_check { + u64 owner_root; + u64 transid; + struct btrfs_key first_key; + bool has_first_key; + u8 level; + long: 32; +}; + +struct ulist_iterator { + struct list_head *cur_list; +}; + +struct rb_simple_node { + struct rb_node rb_node; + long: 32; + u64 bytenr; +}; + +enum { + EXTENT_BUFFER_UPTODATE = 0, + EXTENT_BUFFER_DIRTY = 1, + EXTENT_BUFFER_CORRUPT = 2, + EXTENT_BUFFER_READAHEAD = 3, + EXTENT_BUFFER_TREE_REF = 4, + EXTENT_BUFFER_STALE = 5, + EXTENT_BUFFER_WRITEBACK = 6, + EXTENT_BUFFER_READ_ERR = 7, + EXTENT_BUFFER_UNMAPPED = 8, + EXTENT_BUFFER_IN_TREE = 9, + EXTENT_BUFFER_WRITE_ERR = 10, + EXTENT_BUFFER_ZONED_ZEROOUT = 11, + EXTENT_BUFFER_READING = 12, +}; + +enum btrfs_discard_state { + BTRFS_DISCARD_EXTENTS = 0, + BTRFS_DISCARD_BITMAPS = 1, + BTRFS_DISCARD_RESET_CURSOR = 2, +}; + +struct btrfs_io_ctl { + void *cur; + void *orig; + struct page *page; + struct page **pages; + struct btrfs_fs_info *fs_info; + struct inode *inode; + long unsigned int size; + int index; + int num_pages; + int entries; + int bitmaps; +}; + +enum btrfs_block_group_size_class { + BTRFS_BG_SZ_NONE = 0, + BTRFS_BG_SZ_SMALL = 1, + BTRFS_BG_SZ_MEDIUM = 2, + BTRFS_BG_SZ_LARGE = 3, +}; + +struct btrfs_caching_control; + +struct btrfs_free_space_ctl; + +struct btrfs_chunk_map; + +struct btrfs_block_group { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + spinlock_t lock; + long: 32; + u64 start; + u64 length; + u64 pinned; + u64 reserved; + u64 used; + u64 delalloc_bytes; + u64 bytes_super; + u64 flags; + u64 cache_generation; + u64 global_root_id; + u64 commit_used; + u32 bitmap_high_thresh; + u32 bitmap_low_thresh; + struct rw_semaphore data_rwsem; + long unsigned int full_stripe_len; + long unsigned int runtime_flags; + unsigned int ro; + int disk_cache_state; + int cached; + struct btrfs_caching_control *caching_ctl; + struct btrfs_space_info *space_info; + struct btrfs_free_space_ctl *free_space_ctl; + struct rb_node cache_node; + struct list_head list; + refcount_t refs; + struct list_head cluster_list; + struct list_head bg_list; + struct list_head ro_list; + atomic_t frozen; + struct list_head discard_list; + int discard_index; + long: 32; + u64 discard_eligible_time; + u64 discard_cursor; + enum btrfs_discard_state discard_state; + struct list_head dirty_list; + struct list_head io_list; + struct btrfs_io_ctl io_ctl; + atomic_t reservations; + atomic_t nocow_writers; + struct mutex free_space_lock; + int swap_extents; + long: 32; + u64 alloc_offset; + u64 zone_unusable; + u64 zone_capacity; + u64 meta_write_pointer; + struct btrfs_chunk_map *physical_map; + struct list_head active_bg_list; + struct work_struct zone_finish_work; + struct extent_buffer *last_eb; + enum btrfs_block_group_size_class size_class; + long: 32; + u64 reclaim_mark; +}; + +typedef int iterate_extent_inodes_t(u64, u64, u64, u64, void *); + +struct btrfs_backref_walk_ctx { + u64 bytenr; + u64 extent_item_pos; + bool ignore_extent_item_pos; + bool skip_inode_ref_list; + struct btrfs_trans_handle *trans; + struct btrfs_fs_info *fs_info; + long: 32; + u64 time_seq; + struct ulist *refs; + struct ulist *roots; + bool (*cache_lookup)(u64, void *, const u64 **, int *); + void (*cache_store)(u64, const struct ulist *, void *); + iterate_extent_inodes_t *indirect_ref_iterator; + int (*check_extent_item)(u64, const struct btrfs_extent_item *, const struct extent_buffer *, void *); + bool (*skip_data_ref)(u64, u64, u64, void *); + void *user_ctx; +}; + +struct inode_fs_paths { + struct btrfs_path *btrfs_path; + struct btrfs_root *fs_root; + struct btrfs_data_container *fspath; +}; + +struct btrfs_backref_shared_cache_entry { + u64 bytenr; + u64 gen; + bool is_shared; + long: 32; +}; + +struct btrfs_backref_share_check_ctx { + struct ulist refs; + long: 32; + u64 curr_leaf_bytenr; + u64 prev_leaf_bytenr; + struct btrfs_backref_shared_cache_entry path_cache_entries[8]; + bool use_path_cache; + long: 32; + struct { + u64 bytenr; + bool is_shared; + long: 32; + } prev_extents_cache[8]; + int prev_extents_cache_slot; + long: 32; +}; + +struct extent_inode_elem; + +struct prelim_ref { + struct rb_node rbnode; + long: 32; + u64 root_id; + struct btrfs_key key_for_search; + u8 level; + int count; + struct extent_inode_elem *inode_list; + long: 32; + u64 parent; + u64 wanted_disk_byte; +}; + +struct extent_inode_elem { + u64 inum; + u64 offset; + u64 num_bytes; + struct extent_inode_elem *next; + long: 32; +}; + +struct btrfs_backref_iter { + u64 bytenr; + struct btrfs_path *path; + struct btrfs_fs_info *fs_info; + struct btrfs_key cur_key; + u32 item_ptr; + u32 cur_ptr; + u32 end_ptr; +}; + +struct btrfs_backref_node { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + u64 new_bytenr; + u64 owner; + struct list_head list; + struct list_head upper; + struct list_head lower; + struct btrfs_root *root; + struct extent_buffer *eb; + unsigned int level: 8; + unsigned int cowonly: 1; + unsigned int lowest: 1; + unsigned int locked: 1; + unsigned int processed: 1; + unsigned int checked: 1; + unsigned int pending: 1; + unsigned int detached: 1; + unsigned int is_reloc_root: 1; + long: 32; +}; + +struct btrfs_backref_edge { + struct list_head list[2]; + struct btrfs_backref_node *node[2]; +}; + +struct btrfs_backref_cache { + struct rb_root rb_root; + struct btrfs_backref_node *path[8]; + struct list_head pending[8]; + struct list_head leaves; + struct list_head changed; + struct list_head detached; + long: 32; + u64 last_trans; + int nr_nodes; + int nr_edges; + struct list_head pending_edge; + struct list_head useless_node; + struct btrfs_fs_info *fs_info; + bool is_reloc; +}; + +enum btrfs_delayed_ref_action { + BTRFS_ADD_DELAYED_REF = 1, + BTRFS_DROP_DELAYED_REF = 2, + BTRFS_ADD_DELAYED_EXTENT = 3, + BTRFS_UPDATE_DELAYED_HEAD = 4, +} __attribute__((mode(byte))); + +struct btrfs_data_ref { + u64 objectid; + u64 offset; +}; + +struct btrfs_tree_ref { + int level; +}; + +struct btrfs_delayed_ref_node { + struct rb_node ref_node; + struct list_head add_list; + long: 32; + u64 bytenr; + u64 num_bytes; + u64 seq; + u64 ref_root; + u64 parent; + refcount_t refs; + int ref_mod; + unsigned int action: 8; + unsigned int type: 8; + long: 32; + union { + struct btrfs_tree_ref tree_ref; + struct btrfs_data_ref data_ref; + }; +}; + +struct btrfs_delayed_extent_op { + struct btrfs_disk_key key; + bool update_key; + bool update_flags; + long: 32; + u64 flags_to_set; +}; + +struct btrfs_delayed_ref_head { + u64 bytenr; + u64 num_bytes; + struct mutex mutex; + refcount_t refs; + spinlock_t lock; + struct rb_root_cached ref_tree; + struct list_head ref_add_list; + struct btrfs_delayed_extent_op *extent_op; + int total_ref_mod; + int ref_mod; + long: 32; + u64 owning_root; + u64 reserved_bytes; + u8 level; + bool must_insert_reserved; + bool is_data; + bool is_system; + bool processing; + bool tracked; +}; + +enum { + ____TRANS_FREEZABLE_BIT = 0, + __TRANS_FREEZABLE = 1, + ____TRANS_FREEZABLE_SEQ = 0, + ____TRANS_START_BIT = 1, + __TRANS_START = 2, + ____TRANS_START_SEQ = 1, + ____TRANS_ATTACH_BIT = 2, + __TRANS_ATTACH = 4, + ____TRANS_ATTACH_SEQ = 2, + ____TRANS_JOIN_BIT = 3, + __TRANS_JOIN = 8, + ____TRANS_JOIN_SEQ = 3, + ____TRANS_JOIN_NOLOCK_BIT = 4, + __TRANS_JOIN_NOLOCK = 16, + ____TRANS_JOIN_NOLOCK_SEQ = 4, + ____TRANS_DUMMY_BIT = 5, + __TRANS_DUMMY = 32, + ____TRANS_DUMMY_SEQ = 5, + ____TRANS_JOIN_NOSTART_BIT = 6, + __TRANS_JOIN_NOSTART = 64, + ____TRANS_JOIN_NOSTART_SEQ = 6, +}; + +struct btrfs_seq_list { + struct list_head list; + u64 seq; +}; + +enum btrfs_trim_state { + BTRFS_TRIM_STATE_UNTRIMMED = 0, + BTRFS_TRIM_STATE_TRIMMED = 1, + BTRFS_TRIM_STATE_TRIMMING = 2, +}; + +struct btrfs_free_space { + struct rb_node offset_index; + struct rb_node bytes_index; + u64 offset; + u64 bytes; + u64 max_extent_size; + long unsigned int *bitmap; + struct list_head list; + enum btrfs_trim_state trim_state; + s32 bitmap_extents; + long: 32; +}; + +enum { + BTRFS_STAT_CURR = 0, + BTRFS_STAT_PREV = 1, + BTRFS_STAT_NR_ENTRIES = 2, +}; + +struct btrfs_free_space_op; + +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + struct rb_root_cached free_space_bytes; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + s32 discardable_extents[2]; + s64 discardable_bytes[2]; + const struct btrfs_free_space_op *op; + struct btrfs_block_group *block_group; + struct mutex cache_writeout_mutex; + struct list_head trimming_ranges; +}; + +struct btrfs_free_space_op { + bool (*use_bitmap)(struct btrfs_free_space_ctl *, struct btrfs_free_space *); +}; + +struct btrfs_caching_control { + struct list_head list; + struct mutex mutex; + wait_queue_head_t wait; + struct btrfs_work work; + struct btrfs_block_group *block_group; + atomic_t progress; + refcount_t count; +}; + +enum btrfs_inline_ref_type { + BTRFS_REF_TYPE_INVALID = 0, + BTRFS_REF_TYPE_BLOCK = 1, + BTRFS_REF_TYPE_DATA = 2, + BTRFS_REF_TYPE_ANY = 3, +}; + +struct preftree { + struct rb_root_cached root; + unsigned int count; +}; + +struct preftrees { + struct preftree direct; + struct preftree indirect; + struct preftree indirect_missing_keys; +}; + +struct share_check { + struct btrfs_backref_share_check_ctx *ctx; + struct btrfs_root *root; + u64 inum; + u64 data_bytenr; + u64 data_extent_gen; + int share_count; + int self_ref_count; + bool have_delayed_delete_refs; + long: 32; +}; + +struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt = 0, + kernel_pkey_decrypt = 1, + kernel_pkey_sign = 2, + kernel_pkey_verify = 3, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op: 8; +}; + +enum key_need_perm { + KEY_NEED_UNSPECIFIED = 0, + KEY_NEED_VIEW = 1, + KEY_NEED_READ = 2, + KEY_NEED_WRITE = 3, + KEY_NEED_SEARCH = 4, + KEY_NEED_LINK = 5, + KEY_NEED_SETATTR = 6, + KEY_NEED_UNLINK = 7, + KEY_SYSADMIN_OVERRIDE = 8, + KEY_AUTHTOKEN_OVERRIDE = 9, + KEY_DEFER_PERM_CHECK = 10, +}; + +enum key_lookup_flag { + KEY_LOOKUP_CREATE = 1, + KEY_LOOKUP_PARTIAL = 2, + KEY_LOOKUP_ALL = 3, +}; + +struct __key_reference_with_attributes; + +typedef struct __key_reference_with_attributes *key_ref_t; + +enum key_state { + KEY_IS_UNINSTANTIATED = 0, + KEY_IS_POSITIVE = 1, +}; + +struct key_user { + struct rb_node node; + struct mutex cons_lock; + spinlock_t lock; + refcount_t usage; + atomic_t nkeys; + atomic_t nikeys; + kuid_t uid; + int qnkeys; + int qnbytes; +}; + +struct request_key_auth { + struct callback_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +}; + +struct key_preparsed_payload { + const char *orig_description; + char *description; + union key_payload payload; + const void *data; + size_t datalen; + size_t quotalen; + long: 32; + time64_t expiry; +}; + +struct key_match_data { + bool (*cmp)(const struct key *, const struct key_match_data *); + const void *raw_data; + void *preparsed; + unsigned int lookup_type; +}; + +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + struct key_match_data match_data; + unsigned int flags; + int (*iterator)(const void *, void *); + int skipped_ret; + bool possessed; + key_ref_t result; + long: 32; + time64_t now; +}; + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +enum asn1_class { + ASN1_UNIV = 0, + ASN1_APPL = 1, + ASN1_CONT = 2, + ASN1_PRIV = 3, +}; + +enum asn1_method { + ASN1_PRIM = 0, + ASN1_CONS = 1, +}; + +enum asn1_tag { + ASN1_EOC = 0, + ASN1_BOOL = 1, + ASN1_INT = 2, + ASN1_BTS = 3, + ASN1_OTS = 4, + ASN1_NULL = 5, + ASN1_OID = 6, + ASN1_ODE = 7, + ASN1_EXT = 8, + ASN1_REAL = 9, + ASN1_ENUM = 10, + ASN1_EPDV = 11, + ASN1_UTF8STR = 12, + ASN1_RELOID = 13, + ASN1_SEQ = 16, + ASN1_SET = 17, + ASN1_NUMSTR = 18, + ASN1_PRNSTR = 19, + ASN1_TEXSTR = 20, + ASN1_VIDSTR = 21, + ASN1_IA5STR = 22, + ASN1_UNITIM = 23, + ASN1_GENTIM = 24, + ASN1_GRASTR = 25, + ASN1_VISSTR = 26, + ASN1_GENSTR = 27, + ASN1_UNISTR = 28, + ASN1_CHRSTR = 29, + ASN1_BMPSTR = 30, + ASN1_LONG_TAG = 31, +}; + +typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum asn1_opcode { + ASN1_OP_MATCH = 0, + ASN1_OP_MATCH_OR_SKIP = 1, + ASN1_OP_MATCH_ACT = 2, + ASN1_OP_MATCH_ACT_OR_SKIP = 3, + ASN1_OP_MATCH_JUMP = 4, + ASN1_OP_MATCH_JUMP_OR_SKIP = 5, + ASN1_OP_MATCH_ANY = 8, + ASN1_OP_MATCH_ANY_OR_SKIP = 9, + ASN1_OP_MATCH_ANY_ACT = 10, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, + ASN1_OP_COND_MATCH_OR_SKIP = 17, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, + ASN1_OP_COND_MATCH_ANY = 24, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, + ASN1_OP_COND_MATCH_ANY_ACT = 26, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, + ASN1_OP_COND_FAIL = 28, + ASN1_OP_COMPLETE = 29, + ASN1_OP_ACT = 30, + ASN1_OP_MAYBE_ACT = 31, + ASN1_OP_END_SEQ = 32, + ASN1_OP_END_SET = 33, + ASN1_OP_END_SEQ_OF = 34, + ASN1_OP_END_SET_OF = 35, + ASN1_OP_END_SEQ_ACT = 36, + ASN1_OP_END_SET_ACT = 37, + ASN1_OP_END_SEQ_OF_ACT = 38, + ASN1_OP_END_SET_OF_ACT = 39, + ASN1_OP_RETURN = 40, + ASN1_OP__NR = 41, +}; + +enum pkcs7_actions { + ACT_pkcs7_check_content_type = 0, + ACT_pkcs7_extract_cert = 1, + ACT_pkcs7_note_OID = 2, + ACT_pkcs7_note_certificate_list = 3, + ACT_pkcs7_note_content = 4, + ACT_pkcs7_note_data = 5, + ACT_pkcs7_note_signed_info = 6, + ACT_pkcs7_note_signeddata_version = 7, + ACT_pkcs7_note_signerinfo_version = 8, + ACT_pkcs7_sig_note_authenticated_attr = 9, + ACT_pkcs7_sig_note_digest_algo = 10, + ACT_pkcs7_sig_note_issuer = 11, + ACT_pkcs7_sig_note_pkey_algo = 12, + ACT_pkcs7_sig_note_serial = 13, + ACT_pkcs7_sig_note_set_of_authattrs = 14, + ACT_pkcs7_sig_note_signature = 15, + ACT_pkcs7_sig_note_skid = 16, + NR__pkcs7_actions = 17, +}; + +enum wq_flags { + WQ_BH = 1, + WQ_UNBOUND = 2, + WQ_FREEZABLE = 4, + WQ_MEM_RECLAIM = 8, + WQ_HIGHPRI = 16, + WQ_CPU_INTENSIVE = 32, + WQ_SYSFS = 64, + WQ_POWER_EFFICIENT = 128, + __WQ_DESTROYING = 32768, + __WQ_DRAINING = 65536, + __WQ_ORDERED = 131072, + __WQ_LEGACY = 262144, + __WQ_BH_ALLOWS = 17, +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned int done; +}; + +typedef unsigned int iov_iter_extraction_t; + +enum iter_type { + ITER_UBUF = 0, + ITER_IOVEC = 1, + ITER_BVEC = 2, + ITER_KVEC = 3, + ITER_FOLIOQ = 4, + ITER_XARRAY = 5, + ITER_DISCARD = 6, +}; + +struct bio_alloc_cache { + struct bio *free_list; + struct bio *free_list_irq; + unsigned int nr; + unsigned int nr_irq; +}; + +struct bio_integrity_payload { + struct bio *bip_bio; + struct bvec_iter bip_iter; + short unsigned int bip_vcnt; + short unsigned int bip_max_vcnt; + short unsigned int bip_flags; + struct bvec_iter bio_iter; + struct work_struct bip_work; + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0]; +}; + +enum xen_domain_type { + XEN_NATIVE = 0, + XEN_PV_DOMAIN = 1, + XEN_HVM_DOMAIN = 2, +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; + +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __u64 data; +}; + +typedef struct io_wq_work *free_work_fn(struct io_wq_work *); + +typedef void io_wq_work_fn(struct io_wq_work *); + +struct io_wq_data { + struct io_wq_hash *hash; + struct task_struct *task; + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct io_ring_ctx *ctx; +}; + +struct robust_list { + struct robust_list *next; +}; + +struct robust_list_head { + struct robust_list list; + long int futex_offset; + struct robust_list *list_op_pending; +}; + +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +union futex_key { + struct { + u64 i_seq; + long unsigned int pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + long unsigned int address; + unsigned int offset; + } private; + struct { + u64 ptr; + long unsigned int word; + unsigned int offset; + } both; +}; + +struct futex_pi_state { + struct list_head list; + struct rt_mutex_base pi_mutex; + struct task_struct *owner; + refcount_t refcount; + union futex_key key; +}; + +enum io_uring_op { + IORING_OP_NOP = 0, + IORING_OP_READV = 1, + IORING_OP_WRITEV = 2, + IORING_OP_FSYNC = 3, + IORING_OP_READ_FIXED = 4, + IORING_OP_WRITE_FIXED = 5, + IORING_OP_POLL_ADD = 6, + IORING_OP_POLL_REMOVE = 7, + IORING_OP_SYNC_FILE_RANGE = 8, + IORING_OP_SENDMSG = 9, + IORING_OP_RECVMSG = 10, + IORING_OP_TIMEOUT = 11, + IORING_OP_TIMEOUT_REMOVE = 12, + IORING_OP_ACCEPT = 13, + IORING_OP_ASYNC_CANCEL = 14, + IORING_OP_LINK_TIMEOUT = 15, + IORING_OP_CONNECT = 16, + IORING_OP_FALLOCATE = 17, + IORING_OP_OPENAT = 18, + IORING_OP_CLOSE = 19, + IORING_OP_FILES_UPDATE = 20, + IORING_OP_STATX = 21, + IORING_OP_READ = 22, + IORING_OP_WRITE = 23, + IORING_OP_FADVISE = 24, + IORING_OP_MADVISE = 25, + IORING_OP_SEND = 26, + IORING_OP_RECV = 27, + IORING_OP_OPENAT2 = 28, + IORING_OP_EPOLL_CTL = 29, + IORING_OP_SPLICE = 30, + IORING_OP_PROVIDE_BUFFERS = 31, + IORING_OP_REMOVE_BUFFERS = 32, + IORING_OP_TEE = 33, + IORING_OP_SHUTDOWN = 34, + IORING_OP_RENAMEAT = 35, + IORING_OP_UNLINKAT = 36, + IORING_OP_MKDIRAT = 37, + IORING_OP_SYMLINKAT = 38, + IORING_OP_LINKAT = 39, + IORING_OP_MSG_RING = 40, + IORING_OP_FSETXATTR = 41, + IORING_OP_SETXATTR = 42, + IORING_OP_FGETXATTR = 43, + IORING_OP_GETXATTR = 44, + IORING_OP_SOCKET = 45, + IORING_OP_URING_CMD = 46, + IORING_OP_SEND_ZC = 47, + IORING_OP_SENDMSG_ZC = 48, + IORING_OP_READ_MULTISHOT = 49, + IORING_OP_WAITID = 50, + IORING_OP_FUTEX_WAIT = 51, + IORING_OP_FUTEX_WAKE = 52, + IORING_OP_FUTEX_WAITV = 53, + IORING_OP_FIXED_FD_INSTALL = 54, + IORING_OP_FTRUNCATE = 55, + IORING_OP_BIND = 56, + IORING_OP_LISTEN = 57, + IORING_OP_LAST = 58, +}; + +struct futex_waitv { + __u64 val; + __u64 uaddr; + __u32 flags; + __u32 __reserved; +}; + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +struct futex_hash_bucket { + atomic_t waiters; + spinlock_t lock; + struct plist_head chain; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct futex_q; + +typedef void futex_wake_fn(struct wake_q_head *, struct futex_q *); + +struct futex_q { + struct plist_node list; + struct task_struct *task; + spinlock_t *lock_ptr; + futex_wake_fn *wake; + void *wake_data; + long: 32; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; + atomic_t requeue_state; + long: 32; +}; + +struct futex_vector { + struct futex_waitv w; + struct futex_q q; +}; + +struct io_cancel_data { + struct io_ring_ctx *ctx; + long: 32; + union { + u64 data; + struct file *file; + }; + u8 opcode; + u32 flags; + int seq; + long: 32; +}; + +struct io_futex { + struct file *file; + union { + u32 *uaddr; + struct futex_waitv *uwaitv; + }; + long unsigned int futex_val; + long unsigned int futex_mask; + long unsigned int futexv_owned; + u32 futex_flags; + unsigned int futex_nr; + bool futexv_unqueued; +}; + +struct io_futex_data { + struct futex_q q; + struct io_kiocb *req; + long: 32; +}; + +typedef long unsigned int mpi_limb_t; + +typedef mpi_limb_t *mpi_ptr_t; + +typedef int mpi_size_t; + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + mpi_size_t tspace_size; + mpi_ptr_t tp; + mpi_size_t tp_size; +}; + +typedef s16 int16_t; + +typedef u16 uint16_t; + +typedef uint8_t BYTE; + +typedef uint8_t U8; + +typedef uint16_t U16; + +typedef int16_t S16; + +typedef uint32_t U32; + +typedef uint64_t U64; + +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 10, + ZSTD_error_version_unsupported = 12, + ZSTD_error_frameParameter_unsupported = 14, + ZSTD_error_frameParameter_windowTooLarge = 16, + ZSTD_error_corruption_detected = 20, + ZSTD_error_checksum_wrong = 22, + ZSTD_error_dictionary_corrupted = 30, + ZSTD_error_dictionary_wrong = 32, + ZSTD_error_dictionaryCreation_failed = 34, + ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_outOfBound = 42, + ZSTD_error_tableLog_tooLarge = 44, + ZSTD_error_maxSymbolValue_tooLarge = 46, + ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stage_wrong = 60, + ZSTD_error_init_missing = 62, + ZSTD_error_memory_allocation = 64, + ZSTD_error_workSpace_tooSmall = 66, + ZSTD_error_dstSize_tooSmall = 70, + ZSTD_error_srcSize_wrong = 72, + ZSTD_error_dstBuffer_null = 74, + ZSTD_error_frameIndex_tooLarge = 100, + ZSTD_error_seekableIO = 102, + ZSTD_error_dstBuffer_wrong = 104, + ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_maxCode = 120, +} ZSTD_ErrorCode; + +typedef U32 HUF_DTable; + +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + long: 32; +}; + +typedef struct { + U16 nextState; + BYTE nbAdditionalBits; + BYTE nbBits; + U32 baseValue; +} ZSTD_seqSymbol; + +typedef struct { + ZSTD_seqSymbol LLTable[513]; + ZSTD_seqSymbol OFTable[257]; + ZSTD_seqSymbol MLTable[513]; + HUF_DTable hufTable[4097]; + U32 rep[3]; + U32 workspace[157]; +} ZSTD_entropyDTables_t; + +typedef enum { + ZSTD_frame = 0, + ZSTD_skippableFrame = 1, +} ZSTD_frameType_e; + +typedef struct { + long long unsigned int frameContentSize; + long long unsigned int windowSize; + unsigned int blockSizeMax; + ZSTD_frameType_e frameType; + unsigned int headerSize; + unsigned int dictID; + unsigned int checksumFlag; + long: 32; +} ZSTD_frameHeader; + +typedef enum { + bt_raw = 0, + bt_rle = 1, + bt_compressed = 2, + bt_reserved = 3, +} blockType_e; + +typedef enum { + ZSTDds_getFrameHeaderSize = 0, + ZSTDds_decodeFrameHeader = 1, + ZSTDds_decodeBlockHeader = 2, + ZSTDds_decompressBlock = 3, + ZSTDds_decompressLastBlock = 4, + ZSTDds_checkChecksum = 5, + ZSTDds_decodeSkippableHeader = 6, + ZSTDds_skipFrame = 7, +} ZSTD_dStage; + +typedef enum { + ZSTD_f_zstd1 = 0, + ZSTD_f_zstd1_magicless = 1, +} ZSTD_format_e; + +typedef enum { + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1, +} ZSTD_forceIgnoreChecksum_e; + +typedef void * (*ZSTD_allocFunction)(void *, size_t); + +typedef void (*ZSTD_freeFunction)(void *, void *); + +typedef struct { + ZSTD_allocFunction customAlloc; + ZSTD_freeFunction customFree; + void *opaque; +} ZSTD_customMem; + +typedef enum { + ZSTD_use_indefinitely = -1, + ZSTD_dont_use = 0, + ZSTD_use_once = 1, +} ZSTD_dictUses_e; + +struct ZSTD_DDict_s; + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +typedef struct { + const ZSTD_DDict **ddictPtrTable; + size_t ddictPtrTableSize; + size_t ddictPtrCount; +} ZSTD_DDictHashSet; + +typedef enum { + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1, +} ZSTD_refMultipleDDicts_e; + +typedef enum { + zdss_init = 0, + zdss_loadHeader = 1, + zdss_read = 2, + zdss_load = 3, + zdss_flush = 4, +} ZSTD_dStreamStage; + +typedef enum { + ZSTD_bm_buffered = 0, + ZSTD_bm_stable = 1, +} ZSTD_bufferMode_e; + +struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; + +typedef enum { + ZSTD_not_in_dst = 0, + ZSTD_in_dst = 1, + ZSTD_split = 2, +} ZSTD_litLocation_e; + +struct ZSTD_DCtx_s { + const ZSTD_seqSymbol *LLTptr; + const ZSTD_seqSymbol *MLTptr; + const ZSTD_seqSymbol *OFTptr; + const HUF_DTable *HUFptr; + ZSTD_entropyDTables_t entropy; + U32 workspace[640]; + const void *previousDstEnd; + const void *prefixStart; + const void *virtualStart; + const void *dictEnd; + size_t expected; + ZSTD_frameHeader fParams; + U64 processedCSize; + U64 decodedSize; + blockType_e bType; + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + struct xxh64_state xxhState; + size_t headerSize; + ZSTD_format_e format; + ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; + U32 validateChecksum; + const BYTE *litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + size_t staticSize; + ZSTD_DDict *ddictLocal; + const ZSTD_DDict *ddict; + U32 dictID; + int ddictIsCold; + ZSTD_dictUses_e dictUses; + ZSTD_DDictHashSet *ddictSet; + ZSTD_refMultipleDDicts_e refMultipleDDicts; + ZSTD_dStreamStage streamStage; + char *inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char *outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t lhSize; + U32 hostageByte; + int noForwardProgress; + ZSTD_bufferMode_e outBufferMode; + ZSTD_outBuffer expectedOutBuffer; + BYTE *litBuffer; + const BYTE *litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[65568]; + BYTE headerBuffer[18]; + size_t oversizedDuration; + long: 32; +}; + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +typedef struct { + size_t error; + int lowerBound; + int upperBound; +} ZSTD_bounds; + +typedef enum { + ZSTD_reset_session_only = 1, + ZSTD_reset_parameters = 2, + ZSTD_reset_session_and_parameters = 3, +} ZSTD_ResetDirective; + +typedef enum { + ZSTD_d_windowLogMax = 100, + ZSTD_d_experimentalParam1 = 1000, + ZSTD_d_experimentalParam2 = 1001, + ZSTD_d_experimentalParam3 = 1002, + ZSTD_d_experimentalParam4 = 1003, +} ZSTD_dParameter; + +struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; + +typedef ZSTD_DCtx ZSTD_DStream; + +struct ZSTD_DDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictSize; + ZSTD_entropyDTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; + +typedef enum { + ZSTD_dct_auto = 0, + ZSTD_dct_rawContent = 1, + ZSTD_dct_fullDict = 2, +} ZSTD_dictContentType_e; + +typedef enum { + ZSTD_dlm_byCopy = 0, + ZSTD_dlm_byRef = 1, +} ZSTD_dictLoadMethod_e; + +typedef enum { + ZSTDnit_frameHeader = 0, + ZSTDnit_blockHeader = 1, + ZSTDnit_block = 2, + ZSTDnit_lastBlock = 3, + ZSTDnit_checksum = 4, + ZSTDnit_skippableFrame = 5, +} ZSTD_nextInputType_e; + +typedef struct { + size_t compressedSize; + long: 32; + long long unsigned int decompressedBound; +} ZSTD_frameSizeInfo; + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +typedef enum { + not_streaming = 0, + is_streaming = 1, +} streaming_operation; + +struct font_desc { + int idx; + const char *name; + unsigned int width; + unsigned int height; + unsigned int charcount; + const void *data; + int pref; +}; + +enum vesa_blank_mode { + VESA_NO_BLANKING = 0, + VESA_VSYNC_SUSPEND = 1, + VESA_HSYNC_SUSPEND = 2, + VESA_POWERDOWN = 3, + VESA_BLANK_MAX = 3, +}; + +struct fb_videomode { + const char *name; + u32 refresh; + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct fb_cvt_data { + u32 xres; + u32 yres; + u32 refresh; + u32 f_refresh; + u32 pixclock; + u32 hperiod; + u32 hblank; + u32 hfreq; + u32 htotal; + u32 vtotal; + u32 vsync; + u32 hsync; + u32 h_front_porch; + u32 h_back_porch; + u32 v_front_porch; + u32 v_back_porch; + u32 h_margin; + u32 v_margin; + u32 interlace; + u32 aspect_ratio; + u32 active_pixels; + u32 flags; + u32 status; +}; + +enum ftrace_dump_mode { + DUMP_NONE = 0, + DUMP_ALL = 1, + DUMP_ORIG = 2, + DUMP_PARAM = 3, +}; + +struct sysrq_key_op { + void (* const handler)(u8); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; +}; + +enum vc_intensity { + VCI_HALF_BRIGHT = 0, + VCI_NORMAL = 1, + VCI_BOLD = 2, + VCI_MASK = 3, +}; + +struct vc_state { + unsigned int x; + unsigned int y; + unsigned char color; + unsigned char Gx_charset[2]; + unsigned int charset: 1; + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; + +struct console_font { + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_mode { + char mode; + char waitv; + short int relsig; + short int acqsig; + short int frsig; +}; + +struct consw; + +struct uni_pagedict; + +struct vc_data { + struct tty_port port; + struct vc_state state; + struct vc_state saved_state; + short unsigned int vc_num; + unsigned int vc_cols; + unsigned int vc_rows; + unsigned int vc_size_row; + unsigned int vc_scan_lines; + unsigned int vc_cell_height; + long unsigned int vc_origin; + long unsigned int vc_scr_end; + long unsigned int vc_visible_origin; + unsigned int vc_top; + unsigned int vc_bottom; + const struct consw *vc_sw; + short unsigned int *vc_screenbuf; + unsigned int vc_screenbuf_size; + unsigned char vc_mode; + unsigned char vc_attr; + unsigned char vc_def_color; + unsigned char vc_ulcolor; + unsigned char vc_itcolor; + unsigned char vc_halfcolor; + unsigned int vc_cursor_type; + short unsigned int vc_complement_mask; + short unsigned int vc_s_complement_mask; + long unsigned int vc_pos; + short unsigned int vc_hi_font_mask; + struct console_font vc_font; + short unsigned int vc_video_erase_char; + unsigned int vc_state; + unsigned int vc_npar; + unsigned int vc_par[16]; + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + unsigned int vc_disp_ctrl: 1; + unsigned int vc_toggle_meta: 1; + unsigned int vc_decscnm: 1; + unsigned int vc_decom: 1; + unsigned int vc_decawm: 1; + unsigned int vc_deccm: 1; + unsigned int vc_decim: 1; + unsigned int vc_priv: 3; + unsigned int vc_need_wrap: 1; + unsigned int vc_can_do_color: 1; + unsigned int vc_report_mouse: 2; + unsigned char vc_utf: 1; + unsigned char vc_utf_count; + int vc_utf_char; + long unsigned int vc_tab_stop[8]; + unsigned char vc_palette[48]; + short unsigned int *vc_translate; + unsigned int vc_bell_pitch; + unsigned int vc_bell_duration; + short unsigned int vc_cur_blink_ms; + struct vc_data **vc_display_fg; + struct uni_pagedict *uni_pagedict; + struct uni_pagedict **uni_pagedict_loc; + u32 **vc_uni_lines; +}; + +enum con_scroll { + SM_UP = 0, + SM_DOWN = 1, +}; + +struct consw { + struct module *owner; + const char * (*con_startup)(); + void (*con_init)(struct vc_data *, bool); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, unsigned int, unsigned int, unsigned int); + void (*con_putc)(struct vc_data *, u16, unsigned int, unsigned int); + void (*con_putcs)(struct vc_data *, const u16 *, unsigned int, unsigned int, unsigned int); + void (*con_cursor)(struct vc_data *, bool); + bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); + bool (*con_switch)(struct vc_data *); + bool (*con_blank)(struct vc_data *, enum vesa_blank_mode, bool); + int (*con_font_set)(struct vc_data *, const struct console_font *, unsigned int, unsigned int); + int (*con_font_get)(struct vc_data *, struct console_font *, unsigned int); + int (*con_font_default)(struct vc_data *, struct console_font *, const char *); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, bool); + void (*con_set_palette)(struct vc_data *, const unsigned char *); + void (*con_scrolldelta)(struct vc_data *, int); + bool (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); + void (*con_invert_region)(struct vc_data *, u16 *, int); + void (*con_debug_enter)(struct vc_data *); + void (*con_debug_leave)(struct vc_data *); +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; +}; + +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + long unsigned int key_down[24]; + unsigned int alt; + unsigned int alt_use; + unsigned int shift; + unsigned int shift_use; + bool active; + bool need_reinject; + bool reinjecting; + bool reset_canceled; + bool reset_requested; + long unsigned int reset_keybit[24]; + int reset_seq_len; + int reset_seq_cnt; + int reset_seq_version; + struct timer_list keyreset_timer; +}; + +struct iosys_map { + union { + void *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +struct dma_fence_ops; + +struct dma_fence { + spinlock_t *lock; + const struct dma_fence_ops *ops; + union { + struct list_head cb_list; + ktime_t timestamp; + struct callback_head rcu; + }; + u64 context; + u64 seqno; + long unsigned int flags; + struct kref refcount; + int error; + long: 32; +}; + +struct dma_fence_ops { + bool use_64bit_seqno; + const char * (*get_driver_name)(struct dma_fence *); + const char * (*get_timeline_name)(struct dma_fence *); + bool (*enable_signaling)(struct dma_fence *); + bool (*signaled)(struct dma_fence *); + long int (*wait)(struct dma_fence *, bool, long int); + void (*release)(struct dma_fence *); + void (*fence_value_str)(struct dma_fence *, char *, int); + void (*timeline_value_str)(struct dma_fence *, char *, int); + void (*set_deadline)(struct dma_fence *, ktime_t); +}; + +struct dma_fence_cb; + +typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); + +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +struct dma_buf; + +struct dma_buf_attachment; + +struct dma_buf_ops { + bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + int (*pin)(struct dma_buf_attachment *); + void (*unpin)(struct dma_buf_attachment *); + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); + void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*mmap)(struct dma_buf *, struct vm_area_struct *); + int (*vmap)(struct dma_buf *, struct iosys_map *); + void (*vunmap)(struct dma_buf *, struct iosys_map *); +}; + +struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + __poll_t active; +}; + +struct dma_resv; + +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + unsigned int vmapping_counter; + struct iosys_map vmap_ptr; + const char *exp_name; + const char *name; + spinlock_t name_lock; + struct module *owner; + struct list_head list_node; + void *priv; + struct dma_resv *resv; + wait_queue_head_t poll; + struct dma_buf_poll_cb_t cb_in; + struct dma_buf_poll_cb_t cb_out; +}; + +struct dma_buf_attach_ops; + +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; + bool peer2peer; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; + void *priv; +}; + +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +}; + +struct dma_resv_list; + +struct dma_resv { + struct ww_mutex lock; + struct dma_resv_list *fences; +}; + +struct dma_buf_attach_ops { + bool allow_peer2peer; + void (*move_notify)(struct dma_buf_attachment *); +}; + +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct dma_resv *resv; + void *priv; +}; + +struct dma_fence_unwrap { + struct dma_fence *chain; + struct dma_fence *array; + unsigned int index; +}; + +struct sync_file { + struct file *file; + char user_name[32]; + struct list_head sync_file_list; + wait_queue_head_t wq; + long unsigned int flags; + struct dma_fence *fence; + struct dma_fence_cb cb; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + long unsigned int stamp; + unsigned int acquired; + short unsigned int wounded; + short unsigned int is_wait_die; +}; + +enum dma_resv_usage { + DMA_RESV_USAGE_KERNEL = 0, + DMA_RESV_USAGE_WRITE = 1, + DMA_RESV_USAGE_READ = 2, + DMA_RESV_USAGE_BOOKKEEP = 3, +}; + +struct dma_resv_iter { + struct dma_resv *obj; + enum dma_resv_usage usage; + struct dma_fence *fence; + enum dma_resv_usage fence_usage; + unsigned int index; + struct dma_resv_list *fences; + unsigned int num_fences; + bool is_restarted; +}; + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler * const *xattr; + const struct dentry_operations *dops; + long unsigned int magic; +}; + +struct dma_buf_sync { + __u64 flags; +}; + +struct dma_buf_export_sync_file { + __u32 flags; + __s32 fd; +}; + +struct dma_buf_import_sync_file { + __u32 flags; + __s32 fd; +}; + +struct mii_ioctl_data { + __u16 phy_id; + __u16 reg_num; + __u16 val_in; + __u16 val_out; +}; + +struct mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + unsigned int full_duplex: 1; + unsigned int force_media: 1; + unsigned int supports_gmii: 1; + struct net_device *dev; + int (*mdio_read)(struct net_device *, int, int); + void (*mdio_write)(struct net_device *, int, int, int); +}; + +enum { + DUMP_PREFIX_NONE = 0, + DUMP_PREFIX_ADDRESS = 1, + DUMP_PREFIX_OFFSET = 2, +}; + +struct __call_single_data { + struct __call_single_node node; + smp_call_func_t func; + void *info; +}; + +typedef struct __call_single_data call_single_data_t; + +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +struct netdev_xmit { + u16 recursion; + u8 more; + u8 skip_txqueue; +}; + +enum { + SKBTX_HW_TSTAMP = 1, + SKBTX_SW_TSTAMP = 2, + SKBTX_IN_PROGRESS = 4, + SKBTX_HW_TSTAMP_USE_CYCLES = 8, + SKBTX_WIFI_STATUS = 16, + SKBTX_HW_TSTAMP_NETDEV = 32, + SKBTX_SCHED_TSTAMP = 64, +}; + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE = 0, + PKT_HASH_TYPE_L2 = 1, + PKT_HASH_TYPE_L3 = 2, + PKT_HASH_TYPE_L4 = 3, +}; + +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP = 1, + XDP_PASS = 2, + XDP_TX = 3, + XDP_REDIRECT = 4, +}; + +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + NETDEV_XDP_ACT_MASK = 127, +}; + +enum gro_result { + GRO_MERGED = 0, + GRO_MERGED_FREE = 1, + GRO_HELD = 2, + GRO_NORMAL = 3, + GRO_CONSUMED = 4, +}; + +typedef enum gro_result gro_result_t; + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF = 0, + __QUEUE_STATE_STACK_XOFF = 1, + __QUEUE_STATE_FROZEN = 2, +}; + +struct sd_flow_limit; + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + local_lock_t process_queue_bh_lock; + unsigned int processed; + unsigned int time_squeeze; + struct softnet_data *rps_ipi_list; + unsigned int received_rps; + bool in_net_rx_action; + bool in_napi_threaded_poll; + struct sd_flow_limit *flow_limit; + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + struct netdev_xmit xmit; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + unsigned int input_queue_head; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + call_single_data_t csd; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + struct sk_buff_head input_pkt_queue; + long: 32; + struct napi_struct backlog; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t dropped; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t defer_lock; + int defer_count; + int defer_ipi_scheduled; + struct sk_buff *defer_list; + call_single_data_t defer_csd; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key; + struct flow_dissector_key_basic *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key; + struct flow_dissector_key_control *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key; + struct flow_dissector_key_eth_addrs *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key; + struct flow_dissector_key_vlan *mask; +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, + FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, +}; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; + struct Qdisc *sch; + struct list_head *cb_list_head; +}; + +enum flow_cls_command { + FLOW_CLS_REPLACE = 0, + FLOW_CLS_DESTROY = 1, + FLOW_CLS_STATS = 2, + FLOW_CLS_TMPLT_CREATE = 3, + FLOW_CLS_TMPLT_DESTROY = 4, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + bool skip_sw; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + bool use_act_stats; + long unsigned int cookie; + struct flow_rule *rule; + long: 32; + struct flow_stats stats; + u32 classid; + long: 32; +}; + +struct tc_query_caps_base { + enum tc_setup_type type; + void *caps; +}; + +struct tc_cbs_qopt_offload { + u8 enable; + s32 queue; + s32 hicredit; + s32 locredit; + s32 idleslope; + s32 sendslope; +}; + +struct tc_etf_qopt_offload { + u8 enable; + s32 queue; +}; + +struct tc_taprio_caps { + bool supports_queue_max_sdu: 1; + bool gate_mask_per_txq: 1; + bool broken_mqprio: 1; +}; + +enum pci_ers_result { + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NO_AER_DRIVER = 6, +}; + +struct msix_entry { + u32 vector; + u16 entry; +}; + +enum sctp_msg_flags { + MSG_NOTIFICATION = 32768, +}; + +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0 = 1, + MEM_TYPE_PAGE_POOL = 2, + MEM_TYPE_XSK_BUFF_POOL = 3, + MEM_TYPE_MAX = 4, +}; + +enum xdp_buff_flags { + XDP_FLAGS_HAS_FRAGS = 1, + XDP_FLAGS_FRAGS_PF_MEMALLOC = 2, +}; + +struct regulator; + +struct rt_mutex { + struct rt_mutex_base rtmutex; +}; + +struct i2c_msg { + __u16 addr; + __u16 flags; + __u16 len; + __u8 *buf; +}; + +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[34]; +}; + +struct i2c_adapter; + +struct i2c_client { + short unsigned int flags; + short unsigned int addr; + char name[20]; + struct i2c_adapter *adapter; + long: 32; + struct device dev; + int init_irq; + int irq; + struct list_head detected; + void *devres_group_id; + long: 32; +}; + +struct i2c_algorithm; + +struct i2c_lock_operations; + +struct i2c_bus_recovery_info; + +struct i2c_adapter_quirks; + +struct i2c_adapter { + struct module *owner; + unsigned int class; + const struct i2c_algorithm *algo; + void *algo_data; + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + int timeout; + int retries; + long: 32; + struct device dev; + long unsigned int locked_flags; + int nr; + char name[48]; + struct completion dev_released; + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; + struct dentry *debugfs; + long unsigned int addrs_in_instantiation[4]; + long: 32; +}; + +struct i2c_algorithm { + union { + int (*xfer)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); + }; + union { + int (*xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + }; + int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + u32 (*functionality)(struct i2c_adapter *); +}; + +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int); + int (*trylock_bus)(struct i2c_adapter *, unsigned int); + void (*unlock_bus)(struct i2c_adapter *, unsigned int); +}; + +struct pinctrl; + +struct pinctrl_state; + +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int); + int (*get_sda)(struct i2c_adapter *); + void (*set_sda)(struct i2c_adapter *, int); + int (*get_bus_free)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; +}; + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; + long: 32; +}; + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575 = 1, + e1000_82576 = 2, + e1000_82580 = 3, + e1000_i350 = 4, + e1000_i354 = 5, + e1000_i210 = 6, + e1000_i211 = 7, + e1000_num_macs = 8, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types = 4, +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none = 1, + e1000_nvm_eeprom_spi = 2, + e1000_nvm_flash_hw = 3, + e1000_nvm_invm = 4, + e1000_nvm_flash_sw = 5, +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small = 1, + e1000_nvm_override_spi_large = 2, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none = 1, + e1000_phy_m88 = 2, + e1000_phy_igp = 3, + e1000_phy_igp_2 = 4, + e1000_phy_gg82563 = 5, + e1000_phy_igp_3 = 6, + e1000_phy_ife = 7, + e1000_phy_82580 = 8, + e1000_phy_i210 = 9, + e1000_phy_bcm54616 = 10, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci = 1, + e1000_bus_type_pcix = 2, + e1000_bus_type_pci_express = 3, + e1000_bus_type_reserved = 4, +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33 = 1, + e1000_bus_speed_66 = 2, + e1000_bus_speed_100 = 3, + e1000_bus_speed_120 = 4, + e1000_bus_speed_133 = 5, + e1000_bus_speed_2500 = 6, + e1000_bus_speed_5000 = 7, + e1000_bus_speed_reserved = 8, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1 = 1, + e1000_bus_width_pcie_x2 = 2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32 = 9, + e1000_bus_width_64 = 10, + e1000_bus_width_reserved = 11, +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok = 1, + e1000_1000t_rx_status_undefined = 255, +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed = 1, + e1000_rev_polarity_undefined = 255, +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause = 1, + e1000_fc_tx_pause = 2, + e1000_fc_full = 3, + e1000_fc_default = 255, +}; + +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master = 1, + e1000_ms_force_slave = 2, + e1000_ms_auto = 3, +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on = 1, + e1000_smart_speed_off = 2, +}; + +struct e1000_sfp_flags { + u8 e1000_base_sx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_t: 1; + u8 e100_base_lx: 1; + u8 e100_base_fx: 1; + u8 e10_base_bx10: 1; + u8 e10_base_px: 1; +}; + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + +struct e1000_hw; + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[3]; +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type type; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + u16 mta_reg_count; + u16 uta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_fc_info { + u32 high_water; + u32 low_water; + u16 pause_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + u32 snoop; + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16, bool); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); + s32 (*unlock)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + u8 *hw_addr; + u8 *flash_address; + long unsigned int io_base; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + u8 revision_id; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + } read; + struct { + struct { + struct { + __le16 pkt_info; + __le16 hdr_info; + } lo_dword; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +struct cyclecounter { + u64 (*read)(const struct cyclecounter *); + long: 32; + u64 mask; + u32 mult; + u32 shift; +}; + +struct timecounter { + const struct cyclecounter *cc; + long: 32; + u64 cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +struct pps_event_time { + struct timespec64 ts_real; +}; + +struct ptp_clock_time { + __s64 sec; + __u32 nsec; + __u32 reserved; +}; + +struct ptp_extts_request { + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct ptp_perout_request { + union { + struct ptp_clock_time start; + struct ptp_clock_time phase; + }; + struct ptp_clock_time period; + unsigned int index; + unsigned int flags; + union { + struct ptp_clock_time on; + unsigned int rsv[4]; + }; +}; + +enum ptp_pin_function { + PTP_PF_NONE = 0, + PTP_PF_EXTTS = 1, + PTP_PF_PEROUT = 2, + PTP_PF_PHYSYNC = 3, +}; + +struct ptp_pin_desc { + char name[64]; + unsigned int index; + unsigned int func; + unsigned int chan; + unsigned int rsv[5]; +}; + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS = 0, + PTP_CLK_REQ_PEROUT = 1, + PTP_CLK_REQ_PPS = 2, + } type; + long: 32; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; + clockid_t clockid; + long: 32; +}; + +struct ptp_clock_info { + struct module *owner; + char name[32]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int n_pins; + int pps; + struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *, long int); + int (*adjphase)(struct ptp_clock_info *, s32); + s32 (*getmaxphase)(struct ptp_clock_info *); + int (*adjtime)(struct ptp_clock_info *, s64); + int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); + int (*gettimex64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); + int (*getcycles64)(struct ptp_clock_info *, struct timespec64 *); + int (*getcyclesx64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosscycles)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); + int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); + long int (*do_aux_work)(struct ptp_clock_info *); +}; + +enum ptp_clock_events { + PTP_CLOCK_ALARM = 0, + PTP_CLOCK_EXTTS = 1, + PTP_CLOCK_EXTOFF = 2, + PTP_CLOCK_PPS = 3, + PTP_CLOCK_PPSUSR = 4, +}; + +struct ptp_clock_event { + int type; + int index; + union { + u64 timestamp; + s64 offset; + struct pps_event_time pps_times; + }; +}; + +struct i2c_algo_bit_data { + void *data; + void (*setsda)(void *, int); + void (*setscl)(void *, int); + int (*getsda)(void *); + int (*getscl)(void *); + int (*pre_xfer)(struct i2c_adapter *); + void (*post_xfer)(struct i2c_adapter *); + int udelay; + int timeout; + bool can_do_atomic; +}; + +struct vf_data_storage { + unsigned char vf_mac_addresses[6]; + u16 vf_mc_hashes[30]; + u16 num_vf_mc_hashes; + u32 flags; + long unsigned int last_nack; + u16 pf_vlan; + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[6]; +}; + +enum igb_tx_flags { + IGB_TX_FLAGS_VLAN = 1, + IGB_TX_FLAGS_TSO = 2, + IGB_TX_FLAGS_TSTAMP = 4, + IGB_TX_FLAGS_IPV4 = 16, + IGB_TX_FLAGS_CSUM = 32, +}; + +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP = 1, +}; + +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + long unsigned int time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + dma_addr_t dma; + __u32 len; + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; + __u16 page_offset; + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring; + +struct igb_ring_container { + struct igb_ring *ring; + unsigned int total_bytes; + unsigned int total_packets; + u16 work_limit; + u8 count; + u8 itr; +}; + +struct igb_q_vector; + +struct igb_ring { + struct igb_q_vector *q_vector; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct device *dev; + union { + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; + long unsigned int flags; + void *tail; + dma_addr_t dma; + unsigned int size; + u16 count; + u8 queue_index; + u8 reg_idx; + bool launchtime_enable; + bool cbs_enable; + s32 idleslope; + s32 sendslope; + s32 hicredit; + s32 locredit; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + union { + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + struct { + struct sk_buff *skb; + long: 32; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + long: 32; + }; + }; + struct xdp_rxq_info xdp_rxq; +}; + +struct igb_adapter; + +struct igb_q_vector { + struct igb_adapter *adapter; + int cpu; + u32 eims_value; + u16 itr_val; + u8 set_itr; + void *itr_register; + struct igb_ring_container rx; + struct igb_ring_container tx; + long: 32; + struct napi_struct napi; + struct callback_head rcu; + char name[25]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct igb_ring ring[0]; +}; + +struct ptp_clock; + +struct hwmon_buff; + +struct igb_mac_addr; + +struct igb_adapter { + long unsigned int active_vlans[128]; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + long unsigned int state; + unsigned int flags; + unsigned int num_q_vectors; + struct msix_entry msix_entries[10]; + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + int num_rx_queues; + struct igb_ring *rx_ring[16]; + u32 max_frame_size; + u32 min_frame_size; + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + u8 *io_addr; + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + long unsigned int led_status; + struct pci_dev *pdev; + spinlock_t stats64_lock; + long: 32; + struct rtnl_link_stats64 stats64; + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + u32 test_icr; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + int msg_enable; + struct igb_q_vector *q_vector[8]; + u32 eims_enable_mask; + u32 eims_other; + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + long unsigned int ptp_tx_start; + long unsigned int last_rx_ptp_check; + long unsigned int last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + long: 32; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + struct ptp_pin_desc sdp_config[4]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[2]; + char fw_version[48]; + struct hwmon_buff *igb_hwmon_buff; + bool ets; + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[128]; + long unsigned int link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + spinlock_t nfc_lock; + bool etype_bitmap[3]; + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; + spinlock_t vfs_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER = 0, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED = 1, + IGB_RING_FLAG_RX_SCTP_CSUM = 2, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP = 3, + IGB_RING_FLAG_TX_CTX_IDX = 4, + IGB_RING_FLAG_TX_DETECT_HANG = 5, +}; + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[13]; + struct hwmon_attr hwmon_list[12]; + unsigned int n_hwmon; +}; + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 1, + IGB_FILTER_FLAG_VLAN_TCI = 2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 8, +}; + +struct igb_nfc_input { + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[6]; + u8 dst_addr[6]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + long unsigned int cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +struct igb_mac_addr { + u8 addr[6]; + u8 queue; + u8 state; +}; + +enum e1000_state_t { + __IGB_TESTING = 0, + __IGB_RESETTING = 1, + __IGB_DOWN = 2, + __IGB_PTP_TX_IN_PROGRESS = 3, +}; + +enum igb_boards { + board_82575 = 0, +}; + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY = 0, + QUEUE_MODE_STREAM_RESERVATION = 1, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH = 0, + TX_QUEUE_PRIO_LOW = 1, +}; + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255, +}; + +struct my_u0 { + __le64 a; + __le64 b; +}; + +struct lirc_scancode { + __u64 timestamp; + __u16 flags; + __u16 rc_proto; + __u32 keycode; + __u64 scancode; +}; + +enum rc_filter_type { + RC_FILTER_NORMAL = 0, + RC_FILTER_WAKEUP = 1, + RC_FILTER_MAX = 2, +}; + +struct led_trigger {}; + +struct rc_filter_attribute { + struct device_attribute attr; + enum rc_filter_type type; + bool mask; +}; + +struct hid_device_id { + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + kernel_ulong_t driver_data; +}; + +enum hid_report_type { + HID_INPUT_REPORT = 0, + HID_OUTPUT_REPORT = 1, + HID_FEATURE_REPORT = 2, + HID_REPORT_TYPES = 3, +}; + +enum hid_class_request { + HID_REQ_GET_REPORT = 1, + HID_REQ_GET_IDLE = 2, + HID_REQ_GET_PROTOCOL = 3, + HID_REQ_SET_REPORT = 9, + HID_REQ_SET_IDLE = 10, + HID_REQ_SET_PROTOCOL = 11, +}; + +enum bpf_type_flag { + PTR_MAYBE_NULL = 256, + MEM_RDONLY = 512, + MEM_RINGBUF = 1024, + MEM_USER = 2048, + MEM_PERCPU = 4096, + OBJ_RELEASE = 8192, + PTR_UNTRUSTED = 16384, + MEM_UNINIT = 32768, + DYNPTR_TYPE_LOCAL = 65536, + DYNPTR_TYPE_RINGBUF = 131072, + MEM_FIXED_SIZE = 262144, + MEM_ALLOC = 524288, + PTR_TRUSTED = 1048576, + MEM_RCU = 2097152, + NON_OWN_REF = 4194304, + DYNPTR_TYPE_SKB = 8388608, + DYNPTR_TYPE_XDP = 16777216, + MEM_ALIGNED = 33554432, + MEM_WRITE = 67108864, + __BPF_TYPE_FLAG_MAX = 67108865, + __BPF_TYPE_LAST_FLAG = 67108864, +}; + +enum { + BPF_MAX_TRAMP_LINKS = 38, +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY = 0, + BPF_TRAMP_FEXIT = 1, + BPF_TRAMP_MODIFY_RETURN = 2, + BPF_TRAMP_MAX = 3, + BPF_TRAMP_REPLACE = 4, +}; + +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE = 1, + HID_TYPE_USBNONE = 2, +}; + +struct hid_report; + +struct hid_report_enum { + unsigned int numbered; + struct list_head report_list; + struct hid_report *report_id_hash[256]; +}; + +struct hid_collection; + +struct hid_driver; + +struct hid_ll_driver; + +struct hid_field; + +struct hid_usage; + +struct hid_device { + const __u8 *dev_rdesc; + const __u8 *bpf_rdesc; + const __u8 *rdesc; + unsigned int dev_rsize; + unsigned int bpf_rsize; + unsigned int rsize; + unsigned int collection_size; + struct hid_collection *collection; + unsigned int maxcollection; + unsigned int maxapplication; + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + __u32 version; + enum hid_type type; + unsigned int country; + struct hid_report_enum report_enum[3]; + struct work_struct led_work; + struct semaphore driver_input_lock; + long: 32; + struct device dev; + struct hid_driver *driver; + void *devres_group_id; + const struct hid_ll_driver *ll_driver; + struct mutex ll_open_lock; + unsigned int ll_open_count; + long unsigned int status; + unsigned int claimed; + unsigned int quirks; + unsigned int initial_quirks; + bool io_started; + struct list_head inputs; + void *hiddev; + void *hidraw; + char name[128]; + char phys[64]; + char uniq[64]; + void *driver_data; + int (*ff_init)(struct hid_device *); + int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); + void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*hiddev_report_event)(struct hid_device *, struct hid_report *); + short unsigned int debug; + struct dentry *debug_dir; + struct dentry *debug_rdesc; + struct dentry *debug_events; + struct list_head debug_list; + spinlock_t debug_list_lock; + wait_queue_head_t debug_wait; + struct kref ref; + unsigned int id; + long: 32; +}; + +struct hid_field_entry; + +struct hid_report { + struct list_head list; + struct list_head hidinput_list; + struct list_head field_entry_list; + unsigned int id; + enum hid_report_type type; + unsigned int application; + struct hid_field *field[256]; + struct hid_field_entry *field_entries; + unsigned int maxfield; + unsigned int size; + struct hid_device *device; + bool tool_active; + unsigned int tool; +}; + +struct hid_collection { + int parent_idx; + unsigned int type; + unsigned int usage; + unsigned int level; +}; + +struct hid_usage { + unsigned int hid; + unsigned int collection_index; + unsigned int usage_index; + __s8 resolution_multiplier; + __s8 wheel_factor; + __u16 code; + __u8 type; + __s16 hat_min; + __s16 hat_max; + __s16 hat_dir; + __s16 wheel_accumulated; +}; + +struct hid_input; + +struct hid_field { + unsigned int physical; + unsigned int logical; + unsigned int application; + struct hid_usage *usage; + unsigned int maxusage; + unsigned int flags; + unsigned int report_offset; + unsigned int report_size; + unsigned int report_count; + unsigned int report_type; + __s32 *value; + __s32 *new_value; + __s32 *usages_priorities; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + bool ignored; + struct hid_report *report; + unsigned int index; + struct hid_input *hidinput; + __u16 dpad; + unsigned int slot_idx; +}; + +struct hid_input { + struct list_head list; + struct hid_report *report; + struct input_dev *input; + const char *name; + struct list_head reports; + unsigned int application; + bool registered; +}; + +struct hid_field_entry { + struct list_head list; + struct hid_field *field; + unsigned int index; + __s32 priority; +}; + +struct hid_report_id; + +struct hid_usage_id; + +struct hid_driver { + char *name; + const struct hid_device_id *id_table; + struct list_head dyn_list; + spinlock_t dyn_lock; + bool (*match)(struct hid_device *, bool); + int (*probe)(struct hid_device *, const struct hid_device_id *); + void (*remove)(struct hid_device *); + const struct hid_report_id *report_table; + int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); + const struct hid_usage_id *usage_table; + int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*report)(struct hid_device *, struct hid_report *); + const __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *); + int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_configured)(struct hid_device *, struct hid_input *); + void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); + int (*suspend)(struct hid_device *, pm_message_t); + int (*resume)(struct hid_device *); + int (*reset_resume)(struct hid_device *); + struct device_driver driver; +}; + +struct hid_ll_driver { + int (*start)(struct hid_device *); + void (*stop)(struct hid_device *); + int (*open)(struct hid_device *); + void (*close)(struct hid_device *); + int (*power)(struct hid_device *, int); + int (*parse)(struct hid_device *); + void (*request)(struct hid_device *, struct hid_report *, int); + int (*wait)(struct hid_device *); + int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int); + int (*output_report)(struct hid_device *, __u8 *, size_t); + int (*idle)(struct hid_device *, int, int, int); + bool (*may_wakeup)(struct hid_device *); + unsigned int max_buffer_size; +}; + +struct hid_report_id { + __u32 report_type; +}; + +struct hid_usage_id { + __u32 usage_hid; + __u32 usage_type; + __u32 usage_code; +}; + +struct hidraw_devinfo { + __u32 bustype; + __s16 vendor; + __s16 product; +}; + +struct hidraw { + unsigned int minor; + int exist; + int open; + wait_queue_head_t wait; + struct hid_device *hid; + struct device *dev; + spinlock_t list_lock; + struct list_head list; +}; + +struct hidraw_report { + __u8 *value; + int len; +}; + +struct hidraw_list { + struct hidraw_report buffer[64]; + int head; + int tail; + struct fasync_struct *fasync; + struct hidraw *hidraw; + struct list_head node; + struct mutex read_mutex; + bool revoked; +}; + +typedef __kernel_clock_t clock_t; + +enum { + IF_OPER_UNKNOWN = 0, + IF_OPER_NOTPRESENT = 1, + IF_OPER_DOWN = 2, + IF_OPER_LOWERLAYERDOWN = 3, + IF_OPER_TESTING = 4, + IF_OPER_DORMANT = 5, + IF_OPER_UP = 6, +}; + +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + struct netlink_ext_ack *extack; + void *data; + struct module *module; + u32 min_dump_alloc; + int flags; +}; + +enum { + NDA_UNSPEC = 0, + NDA_DST = 1, + NDA_LLADDR = 2, + NDA_CACHEINFO = 3, + NDA_PROBES = 4, + NDA_VLAN = 5, + NDA_PORT = 6, + NDA_VNI = 7, + NDA_IFINDEX = 8, + NDA_MASTER = 9, + NDA_LINK_NETNSID = 10, + NDA_SRC_VNI = 11, + NDA_PROTOCOL = 12, + NDA_NH_ID = 13, + NDA_FDB_EXT_ATTRS = 14, + NDA_FLAGS_EXT = 15, + NDA_NDM_STATE_MASK = 16, + NDA_NDM_FLAGS_MASK = 17, + __NDA_MAX = 18, +}; + +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + __u32 rx_compressed; + __u32 tx_compressed; + __u32 rx_nohandler; +}; + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; + long: 32; +}; + +enum { + IFLA_UNSPEC = 0, + IFLA_ADDRESS = 1, + IFLA_BROADCAST = 2, + IFLA_IFNAME = 3, + IFLA_MTU = 4, + IFLA_LINK = 5, + IFLA_QDISC = 6, + IFLA_STATS = 7, + IFLA_COST = 8, + IFLA_PRIORITY = 9, + IFLA_MASTER = 10, + IFLA_WIRELESS = 11, + IFLA_PROTINFO = 12, + IFLA_TXQLEN = 13, + IFLA_MAP = 14, + IFLA_WEIGHT = 15, + IFLA_OPERSTATE = 16, + IFLA_LINKMODE = 17, + IFLA_LINKINFO = 18, + IFLA_NET_NS_PID = 19, + IFLA_IFALIAS = 20, + IFLA_NUM_VF = 21, + IFLA_VFINFO_LIST = 22, + IFLA_STATS64 = 23, + IFLA_VF_PORTS = 24, + IFLA_PORT_SELF = 25, + IFLA_AF_SPEC = 26, + IFLA_GROUP = 27, + IFLA_NET_NS_FD = 28, + IFLA_EXT_MASK = 29, + IFLA_PROMISCUITY = 30, + IFLA_NUM_TX_QUEUES = 31, + IFLA_NUM_RX_QUEUES = 32, + IFLA_CARRIER = 33, + IFLA_PHYS_PORT_ID = 34, + IFLA_CARRIER_CHANGES = 35, + IFLA_PHYS_SWITCH_ID = 36, + IFLA_LINK_NETNSID = 37, + IFLA_PHYS_PORT_NAME = 38, + IFLA_PROTO_DOWN = 39, + IFLA_GSO_MAX_SEGS = 40, + IFLA_GSO_MAX_SIZE = 41, + IFLA_PAD = 42, + IFLA_XDP = 43, + IFLA_EVENT = 44, + IFLA_NEW_NETNSID = 45, + IFLA_IF_NETNSID = 46, + IFLA_TARGET_NETNSID = 46, + IFLA_CARRIER_UP_COUNT = 47, + IFLA_CARRIER_DOWN_COUNT = 48, + IFLA_NEW_IFINDEX = 49, + IFLA_MIN_MTU = 50, + IFLA_MAX_MTU = 51, + IFLA_PROP_LIST = 52, + IFLA_ALT_IFNAME = 53, + IFLA_PERM_ADDRESS = 54, + IFLA_PROTO_DOWN_REASON = 55, + IFLA_PARENT_DEV_NAME = 56, + IFLA_PARENT_DEV_BUS_NAME = 57, + IFLA_GRO_MAX_SIZE = 58, + IFLA_TSO_MAX_SIZE = 59, + IFLA_TSO_MAX_SEGS = 60, + IFLA_ALLMULTI = 61, + IFLA_DEVLINK_PORT = 62, + IFLA_GSO_IPV4_MAX_SIZE = 63, + IFLA_GRO_IPV4_MAX_SIZE = 64, + IFLA_DPLL_PIN = 65, + IFLA_MAX_PACING_OFFLOAD_HORIZON = 66, + __IFLA_MAX = 67, +}; + +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC = 0, + IFLA_PROTO_DOWN_REASON_MASK = 1, + IFLA_PROTO_DOWN_REASON_VALUE = 2, + __IFLA_PROTO_DOWN_REASON_CNT = 3, + IFLA_PROTO_DOWN_REASON_MAX = 2, +}; + +enum { + IFLA_BRPORT_UNSPEC = 0, + IFLA_BRPORT_STATE = 1, + IFLA_BRPORT_PRIORITY = 2, + IFLA_BRPORT_COST = 3, + IFLA_BRPORT_MODE = 4, + IFLA_BRPORT_GUARD = 5, + IFLA_BRPORT_PROTECT = 6, + IFLA_BRPORT_FAST_LEAVE = 7, + IFLA_BRPORT_LEARNING = 8, + IFLA_BRPORT_UNICAST_FLOOD = 9, + IFLA_BRPORT_PROXYARP = 10, + IFLA_BRPORT_LEARNING_SYNC = 11, + IFLA_BRPORT_PROXYARP_WIFI = 12, + IFLA_BRPORT_ROOT_ID = 13, + IFLA_BRPORT_BRIDGE_ID = 14, + IFLA_BRPORT_DESIGNATED_PORT = 15, + IFLA_BRPORT_DESIGNATED_COST = 16, + IFLA_BRPORT_ID = 17, + IFLA_BRPORT_NO = 18, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, + IFLA_BRPORT_CONFIG_PENDING = 20, + IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, + IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, + IFLA_BRPORT_HOLD_TIMER = 23, + IFLA_BRPORT_FLUSH = 24, + IFLA_BRPORT_MULTICAST_ROUTER = 25, + IFLA_BRPORT_PAD = 26, + IFLA_BRPORT_MCAST_FLOOD = 27, + IFLA_BRPORT_MCAST_TO_UCAST = 28, + IFLA_BRPORT_VLAN_TUNNEL = 29, + IFLA_BRPORT_BCAST_FLOOD = 30, + IFLA_BRPORT_GROUP_FWD_MASK = 31, + IFLA_BRPORT_NEIGH_SUPPRESS = 32, + IFLA_BRPORT_ISOLATED = 33, + IFLA_BRPORT_BACKUP_PORT = 34, + IFLA_BRPORT_MRP_RING_OPEN = 35, + IFLA_BRPORT_MRP_IN_OPEN = 36, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, + IFLA_BRPORT_LOCKED = 39, + IFLA_BRPORT_MAB = 40, + IFLA_BRPORT_MCAST_N_GROUPS = 41, + IFLA_BRPORT_MCAST_MAX_GROUPS = 42, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 43, + IFLA_BRPORT_BACKUP_NHID = 44, + __IFLA_BRPORT_MAX = 45, +}; + +enum { + IFLA_INFO_UNSPEC = 0, + IFLA_INFO_KIND = 1, + IFLA_INFO_DATA = 2, + IFLA_INFO_XSTATS = 3, + IFLA_INFO_SLAVE_KIND = 4, + IFLA_INFO_SLAVE_DATA = 5, + __IFLA_INFO_MAX = 6, +}; + +enum { + IFLA_VF_INFO_UNSPEC = 0, + IFLA_VF_INFO = 1, + __IFLA_VF_INFO_MAX = 2, +}; + +enum { + IFLA_VF_UNSPEC = 0, + IFLA_VF_MAC = 1, + IFLA_VF_VLAN = 2, + IFLA_VF_TX_RATE = 3, + IFLA_VF_SPOOFCHK = 4, + IFLA_VF_LINK_STATE = 5, + IFLA_VF_RATE = 6, + IFLA_VF_RSS_QUERY_EN = 7, + IFLA_VF_STATS = 8, + IFLA_VF_TRUST = 9, + IFLA_VF_IB_NODE_GUID = 10, + IFLA_VF_IB_PORT_GUID = 11, + IFLA_VF_VLAN_LIST = 12, + IFLA_VF_BROADCAST = 13, + __IFLA_VF_MAX = 14, +}; + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC = 0, + IFLA_VF_VLAN_INFO = 1, + __IFLA_VF_VLAN_INFO_MAX = 2, +}; + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_STATS_RX_PACKETS = 0, + IFLA_VF_STATS_TX_PACKETS = 1, + IFLA_VF_STATS_RX_BYTES = 2, + IFLA_VF_STATS_TX_BYTES = 3, + IFLA_VF_STATS_BROADCAST = 4, + IFLA_VF_STATS_MULTICAST = 5, + IFLA_VF_STATS_PAD = 6, + IFLA_VF_STATS_RX_DROPPED = 7, + IFLA_VF_STATS_TX_DROPPED = 8, + __IFLA_VF_STATS_MAX = 9, +}; + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_PORT_UNSPEC = 0, + IFLA_VF_PORT = 1, + __IFLA_VF_PORT_MAX = 2, +}; + +enum { + IFLA_PORT_UNSPEC = 0, + IFLA_PORT_VF = 1, + IFLA_PORT_PROFILE = 2, + IFLA_PORT_VSI_TYPE = 3, + IFLA_PORT_INSTANCE_UUID = 4, + IFLA_PORT_HOST_UUID = 5, + IFLA_PORT_REQUEST = 6, + IFLA_PORT_RESPONSE = 7, + __IFLA_PORT_MAX = 8, +}; + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +enum { + IFLA_STATS_UNSPEC = 0, + IFLA_STATS_LINK_64 = 1, + IFLA_STATS_LINK_XSTATS = 2, + IFLA_STATS_LINK_XSTATS_SLAVE = 3, + IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, + IFLA_STATS_AF_SPEC = 5, + __IFLA_STATS_MAX = 6, +}; + +enum { + IFLA_STATS_GETSET_UNSPEC = 0, + IFLA_STATS_GET_FILTERS = 1, + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 2, + __IFLA_STATS_GETSET_MAX = 3, +}; + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 2, + IFLA_OFFLOAD_XSTATS_L3_STATS = 3, + __IFLA_OFFLOAD_XSTATS_MAX = 4, +}; + +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 2, + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX = 3, +}; + +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV = 1, + XDP_ATTACHED_SKB = 2, + XDP_ATTACHED_HW = 3, + XDP_ATTACHED_MULTI = 4, +}; + +enum { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +}; + +enum { + IFLA_EVENT_NONE = 0, + IFLA_EVENT_REBOOT = 1, + IFLA_EVENT_FEATURES = 2, + IFLA_EVENT_BONDING_FAILOVER = 3, + IFLA_EVENT_NOTIFY_PEERS = 4, + IFLA_EVENT_IGMP_RESEND = 5, + IFLA_EVENT_BONDING_OPTIONS = 6, +}; + +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +enum { + IFLA_BRIDGE_FLAGS = 0, + IFLA_BRIDGE_MODE = 1, + IFLA_BRIDGE_VLAN_INFO = 2, + IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, + IFLA_BRIDGE_MRP = 4, + IFLA_BRIDGE_CFM = 5, + IFLA_BRIDGE_MST = 6, + __IFLA_BRIDGE_MAX = 7, +}; + +struct br_port_msg { + __u8 family; + __u32 ifindex; +}; + +struct br_mdb_entry { + __u32 ifindex; + __u8 state; + __u8 flags; + __u16 vid; + struct { + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } u; + __be16 proto; + } addr; +}; + +enum { + MDBA_SET_ENTRY_UNSPEC = 0, + MDBA_SET_ENTRY = 1, + MDBA_SET_ENTRY_ATTRS = 2, + __MDBA_SET_ENTRY_MAX = 3, +}; + +enum { + MDBA_GET_ENTRY_UNSPEC = 0, + MDBA_GET_ENTRY = 1, + MDBA_GET_ENTRY_ATTRS = 2, + __MDBA_GET_ENTRY_MAX = 3, +}; + +enum { + BR_MCAST_DIR_RX = 0, + BR_MCAST_DIR_TX = 1, + BR_MCAST_DIR_SIZE = 2, +}; + +enum { + RTM_BASE = 16, + RTM_NEWLINK = 16, + RTM_DELLINK = 17, + RTM_GETLINK = 18, + RTM_SETLINK = 19, + RTM_NEWADDR = 20, + RTM_DELADDR = 21, + RTM_GETADDR = 22, + RTM_NEWROUTE = 24, + RTM_DELROUTE = 25, + RTM_GETROUTE = 26, + RTM_NEWNEIGH = 28, + RTM_DELNEIGH = 29, + RTM_GETNEIGH = 30, + RTM_NEWRULE = 32, + RTM_DELRULE = 33, + RTM_GETRULE = 34, + RTM_NEWQDISC = 36, + RTM_DELQDISC = 37, + RTM_GETQDISC = 38, + RTM_NEWTCLASS = 40, + RTM_DELTCLASS = 41, + RTM_GETTCLASS = 42, + RTM_NEWTFILTER = 44, + RTM_DELTFILTER = 45, + RTM_GETTFILTER = 46, + RTM_NEWACTION = 48, + RTM_DELACTION = 49, + RTM_GETACTION = 50, + RTM_NEWPREFIX = 52, + RTM_GETMULTICAST = 58, + RTM_GETANYCAST = 62, + RTM_NEWNEIGHTBL = 64, + RTM_GETNEIGHTBL = 66, + RTM_SETNEIGHTBL = 67, + RTM_NEWNDUSEROPT = 68, + RTM_NEWADDRLABEL = 72, + RTM_DELADDRLABEL = 73, + RTM_GETADDRLABEL = 74, + RTM_GETDCB = 78, + RTM_SETDCB = 79, + RTM_NEWNETCONF = 80, + RTM_DELNETCONF = 81, + RTM_GETNETCONF = 82, + RTM_NEWMDB = 84, + RTM_DELMDB = 85, + RTM_GETMDB = 86, + RTM_NEWNSID = 88, + RTM_DELNSID = 89, + RTM_GETNSID = 90, + RTM_NEWSTATS = 92, + RTM_GETSTATS = 94, + RTM_SETSTATS = 95, + RTM_NEWCACHEREPORT = 96, + RTM_NEWCHAIN = 100, + RTM_DELCHAIN = 101, + RTM_GETCHAIN = 102, + RTM_NEWNEXTHOP = 104, + RTM_DELNEXTHOP = 105, + RTM_GETNEXTHOP = 106, + RTM_NEWLINKPROP = 108, + RTM_DELLINKPROP = 109, + RTM_GETLINKPROP = 110, + RTM_NEWVLAN = 112, + RTM_DELVLAN = 113, + RTM_GETVLAN = 114, + RTM_NEWNEXTHOPBUCKET = 116, + RTM_DELNEXTHOPBUCKET = 117, + RTM_GETNEXTHOPBUCKET = 118, + RTM_NEWTUNNEL = 120, + RTM_DELTUNNEL = 121, + RTM_GETTUNNEL = 122, + __RTM_MAX = 123, +}; + +enum rtattr_type_t { + RTA_UNSPEC = 0, + RTA_DST = 1, + RTA_SRC = 2, + RTA_IIF = 3, + RTA_OIF = 4, + RTA_GATEWAY = 5, + RTA_PRIORITY = 6, + RTA_PREFSRC = 7, + RTA_METRICS = 8, + RTA_MULTIPATH = 9, + RTA_PROTOINFO = 10, + RTA_FLOW = 11, + RTA_CACHEINFO = 12, + RTA_SESSION = 13, + RTA_MP_ALGO = 14, + RTA_TABLE = 15, + RTA_MARK = 16, + RTA_MFC_STATS = 17, + RTA_VIA = 18, + RTA_NEWDST = 19, + RTA_PREF = 20, + RTA_ENCAP_TYPE = 21, + RTA_ENCAP = 22, + RTA_EXPIRES = 23, + RTA_PAD = 24, + RTA_UID = 25, + RTA_TTL_PROPAGATE = 26, + RTA_IP_PROTO = 27, + RTA_SPORT = 28, + RTA_DPORT = 29, + RTA_NH_ID = 30, + __RTA_MAX = 31, +}; + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + short unsigned int ifi_type; + int ifi_index; + unsigned int ifi_flags; + unsigned int ifi_change; +}; + +enum { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + TCA_EXT_WARN_MSG = 16, + __TCA_MAX = 17, +}; + +enum rtnetlink_groups { + RTNLGRP_NONE = 0, + RTNLGRP_LINK = 1, + RTNLGRP_NOTIFY = 2, + RTNLGRP_NEIGH = 3, + RTNLGRP_TC = 4, + RTNLGRP_IPV4_IFADDR = 5, + RTNLGRP_IPV4_MROUTE = 6, + RTNLGRP_IPV4_ROUTE = 7, + RTNLGRP_IPV4_RULE = 8, + RTNLGRP_IPV6_IFADDR = 9, + RTNLGRP_IPV6_MROUTE = 10, + RTNLGRP_IPV6_ROUTE = 11, + RTNLGRP_IPV6_IFINFO = 12, + RTNLGRP_DECnet_IFADDR = 13, + RTNLGRP_NOP2 = 14, + RTNLGRP_DECnet_ROUTE = 15, + RTNLGRP_DECnet_RULE = 16, + RTNLGRP_NOP4 = 17, + RTNLGRP_IPV6_PREFIX = 18, + RTNLGRP_IPV6_RULE = 19, + RTNLGRP_ND_USEROPT = 20, + RTNLGRP_PHONET_IFADDR = 21, + RTNLGRP_PHONET_ROUTE = 22, + RTNLGRP_DCB = 23, + RTNLGRP_IPV4_NETCONF = 24, + RTNLGRP_IPV6_NETCONF = 25, + RTNLGRP_MDB = 26, + RTNLGRP_MPLS_ROUTE = 27, + RTNLGRP_NSID = 28, + RTNLGRP_MPLS_NETCONF = 29, + RTNLGRP_IPV4_MROUTE_R = 30, + RTNLGRP_IPV6_MROUTE_R = 31, + RTNLGRP_NEXTHOP = 32, + RTNLGRP_BRVLAN = 33, + RTNLGRP_MCTP_IFADDR = 34, + RTNLGRP_TUNNEL = 35, + RTNLGRP_STATS = 36, + __RTNLGRP_MAX = 37, +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE = 0, + NLA_VALIDATE_RANGE = 1, + NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, + NLA_VALIDATE_MIN = 3, + NLA_VALIDATE_MAX = 4, + NLA_VALIDATE_MASK = 5, + NLA_VALIDATE_RANGE_PTR = 6, + NLA_VALIDATE_FUNCTION = 7, +}; + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); + +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, + RTNL_FLAG_BULK_DEL_SUPPORTED = 2, + RTNL_FLAG_DUMP_UNLOCKED = 4, + RTNL_FLAG_DUMP_SPLIT_NLM_DONE = 8, +}; + +enum rtnl_kinds { + RTNL_KIND_NEW = 0, + RTNL_KIND_DEL = 1, + RTNL_KIND_GET = 2, + RTNL_KIND_SET = 3, +}; + +struct rtnl_msg_handler { + struct module *owner; + int protocol; + int msgtype; + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + int flags; +}; + +struct rtnl_af_ops { + struct list_head list; + struct srcu_struct srcu; + int family; + int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); + size_t (*get_link_af_size)(const struct net_device *, u32); + int (*validate_link_af)(const struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*set_link_af)(struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*fill_stats_af)(struct sk_buff *, const struct net_device *); + size_t (*get_stats_af_size)(const struct net_device *); +}; + +struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + struct module *owner; + unsigned int flags; + struct callback_head rcu; +}; + +struct rtnl_nets { + struct net *net[3]; + unsigned char len; +}; + +struct rtnl_newlink_tbs { + struct nlattr *tb[67]; + struct nlattr *linkinfo[6]; + struct nlattr *attr[51]; + struct nlattr *slave_attr[45]; +}; + +struct rtnl_offload_xstats_request_used { + bool request; + bool used; +}; + +struct rtnl_stats_dump_filters { + u32 mask[6]; +}; + +struct rtnl_mdb_dump_ctx { + long int idx; +}; + +enum { + BPF_REG_0 = 0, + BPF_REG_1 = 1, + BPF_REG_2 = 2, + BPF_REG_3 = 3, + BPF_REG_4 = 4, + BPF_REG_5 = 5, + BPF_REG_6 = 6, + BPF_REG_7 = 7, + BPF_REG_8 = 8, + BPF_REG_9 = 9, + BPF_REG_10 = 10, + __MAX_BPF_REG = 11, +}; + +typedef sockptr_t bpfptr_t; + +struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; +}; + +struct btf_struct_meta { + u32 btf_id; + struct btf_record *record; +}; + +struct bpf_verifier_log { + u64 start_pos; + u64 end_pos; + char *ubuf; + u32 level; + u32 len_total; + u32 len_max; + char kbuf[1024]; +}; + +enum priv_stack_mode { + PRIV_STACK_UNKNOWN = 0, + NO_PRIV_STACK = 1, + PRIV_STACK_ADAPTIVE = 2, +}; + +struct bpf_subprog_arg_info { + enum bpf_arg_type arg_type; + union { + u32 mem_size; + u32 btf_id; + }; +}; + +struct bpf_subprog_info { + u32 start; + u32 linfo_idx; + u16 stack_depth; + u16 stack_extra; + s16 fastcall_stack_off; + bool has_tail_call: 1; + bool tail_call_reachable: 1; + bool has_ld_abs: 1; + bool is_cb: 1; + bool is_async_cb: 1; + bool is_exception_cb: 1; + bool args_cached: 1; + bool keep_fastcall_stack: 1; + enum priv_stack_mode priv_stack_mode; + u8 arg_cnt; + struct bpf_subprog_arg_info args[5]; +}; + +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +struct bpf_idmap { + u32 tmp_id_gen; + struct bpf_id_pair map[600]; +}; + +struct bpf_idset { + u32 count; + u32 ids[600]; +}; + +struct backtrack_state { + struct bpf_verifier_env *env; + u32 frame; + u32 reg_masks[8]; + u64 stack_masks[8]; +}; + +enum bpf_dynptr_type { + BPF_DYNPTR_TYPE_INVALID = 0, + BPF_DYNPTR_TYPE_LOCAL = 1, + BPF_DYNPTR_TYPE_RINGBUF = 2, + BPF_DYNPTR_TYPE_SKB = 3, + BPF_DYNPTR_TYPE_XDP = 4, +}; + +enum bpf_iter_state { + BPF_ITER_STATE_INVALID = 0, + BPF_ITER_STATE_ACTIVE = 1, + BPF_ITER_STATE_DRAINED = 2, +}; + +struct tnum { + u64 value; + u64 mask; +}; + +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, + REG_LIVE_READ32 = 1, + REG_LIVE_READ64 = 2, + REG_LIVE_READ = 3, + REG_LIVE_WRITTEN = 4, + REG_LIVE_DONE = 8, +}; + +struct bpf_reg_state { + enum bpf_reg_type type; + s32 off; + union { + int range; + struct { + struct bpf_map *map_ptr; + u32 map_uid; + }; + struct { + struct btf *btf; + u32 btf_id; + }; + struct { + u32 mem_size; + u32 dynptr_id; + }; + struct { + enum bpf_dynptr_type type; + bool first_slot; + } dynptr; + struct { + struct btf *btf; + u32 btf_id; + enum bpf_iter_state state: 2; + int depth: 30; + } iter; + struct { + long unsigned int raw1; + long unsigned int raw2; + } raw; + u32 subprogno; + }; + long: 32; + struct tnum var_off; + s64 smin_value; + s64 smax_value; + u64 umin_value; + u64 umax_value; + s32 s32_min_value; + s32 s32_max_value; + u32 u32_min_value; + u32 u32_max_value; + u32 id; + u32 ref_obj_id; + struct bpf_reg_state *parent; + u32 frameno; + s32 subreg_def; + enum bpf_reg_liveness live; + bool precise; + long: 32; +}; + +struct bpf_verifier_ops; + +struct bpf_verifier_stack_elem; + +struct bpf_verifier_state; + +struct bpf_verifier_state_list; + +struct bpf_insn_aux_data; + +struct bpf_insn_hist_entry; + +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; + const struct bpf_verifier_ops *ops; + struct module *attach_btf_mod; + struct bpf_verifier_stack_elem *head; + int stack_size; + bool strict_alignment; + bool test_state_freq; + bool test_reg_invariants; + struct bpf_verifier_state *cur_state; + struct bpf_verifier_state_list **explored_states; + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[64]; + struct btf_mod_pair used_btfs[64]; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 id_gen; + u32 hidden_subprog_cnt; + int exception_callback_subprog; + bool explore_alu_limits; + bool allow_ptr_leaks; + bool allow_uninit_stack; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; + bool seen_direct_write; + bool seen_exception; + struct bpf_insn_aux_data *insn_aux_data; + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[258]; + union { + struct bpf_idmap idmap_scratch; + struct bpf_idset idset_scratch; + }; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + struct backtrack_state bt; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; + u32 pass_cnt; + u32 subprog_cnt; + u32 prev_insn_processed; + u32 insn_processed; + u32 prev_jmps_processed; + u32 jmps_processed; + long: 32; + u64 verification_time; + u32 max_states_per_insn; + u32 total_states; + u32 peak_states; + u32 longest_mark_read_walk; + bpfptr_t fd_array; + u32 scratched_regs; + long: 32; + u64 scratched_stack_slots; + u64 prev_log_pos; + u64 prev_insn_print_pos; + struct bpf_reg_state fake_reg[2]; + char tmp_str_buf[320]; + struct bpf_insn insn_buf[32]; + struct bpf_insn epilogue_buf[32]; +}; + +struct bpf_retval_range { + s32 minval; + s32 maxval; +}; + +struct bpf_reference_state; + +struct bpf_stack_state; + +struct bpf_func_state { + struct bpf_reg_state regs[11]; + int callsite; + u32 frameno; + u32 subprogno; + u32 async_entry_cnt; + struct bpf_retval_range callback_ret_range; + bool in_callback_fn; + bool in_async_callback_fn; + bool in_exception_callback_fn; + u32 callback_depth; + int acquired_refs; + int active_locks; + struct bpf_reference_state *refs; + struct bpf_stack_state *stack; + int allocated_stack; + long: 32; +}; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2, +}; + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + bool is_ldsx; + union { + int ctx_field_size; + struct { + struct btf *btf; + u32 btf_id; + }; + }; + struct bpf_verifier_log *log; + bool is_retval; +}; + +struct bpf_verifier_ops { + const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); + bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); + int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); + int (*gen_epilogue)(struct bpf_insn *, const struct bpf_prog *, s16); + int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); + u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + int (*btf_struct_access)(struct bpf_verifier_log *, const struct bpf_reg_state *, int, int); +}; + +struct bpf_tramp_link; + +struct bpf_tramp_links { + struct bpf_tramp_link *links[38]; + int nr_links; +}; + +struct bpf_tramp_link { + struct bpf_link link; + struct hlist_node tramp_hlist; + u64 cookie; +}; + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *); + int (*check_member)(const struct btf_type *, const struct btf_member *, const struct bpf_prog *); + int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); + int (*reg)(void *, struct bpf_link *); + void (*unreg)(void *, struct bpf_link *); + int (*update)(void *, void *, struct bpf_link *); + int (*validate)(void *); + void *cfi_stubs; + struct module *owner; + const char *name; + struct btf_func_model func_models[64]; +}; + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT = 0, + BPF_STRUCT_OPS_STATE_INUSE = 1, + BPF_STRUCT_OPS_STATE_TOBEFREE = 2, + BPF_STRUCT_OPS_STATE_READY = 3, +}; + +struct bpf_struct_ops_common_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; +}; + +struct bpf_dummy_ops_state { + int val; +}; + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *); + int (*test_2)(struct bpf_dummy_ops_state *, int, short unsigned int, char, long unsigned int); + int (*test_sleepable)(struct bpf_dummy_ops_state *); +}; + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[8]; +}; + +enum ref_state_type { + REF_TYPE_PTR = 0, + REF_TYPE_LOCK = 1, +}; + +struct bpf_reference_state { + enum ref_state_type type; + int id; + int insn_idx; + void *ptr; +}; + +enum { + INSN_F_FRAMENO_MASK = 7, + INSN_F_SPI_MASK = 63, + INSN_F_SPI_SHIFT = 3, + INSN_F_STACK_ACCESS = 512, +}; + +struct bpf_insn_hist_entry { + u32 idx; + u32 prev_idx: 22; + u32 flags: 10; + u64 linked_regs; +}; + +struct bpf_verifier_state { + struct bpf_func_state *frame[8]; + struct bpf_verifier_state *parent; + u32 branches; + u32 insn_idx; + u32 curframe; + bool speculative; + bool active_rcu_lock; + u32 active_preempt_lock; + bool used_as_loop_entry; + bool in_sleepable; + u32 first_insn_idx; + u32 last_insn_idx; + struct bpf_verifier_state *loop_entry; + u32 insn_hist_start; + u32 insn_hist_end; + u32 dfs_depth; + u32 callback_unroll_depth; + u32 may_goto_depth; +}; + +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt; + int hit_cnt; +}; + +struct bpf_loop_inline_state { + unsigned int initialized: 1; + unsigned int fit_for_inline: 1; + u32 callback_subprogno; +}; + +struct bpf_map_ptr_state { + struct bpf_map *map_ptr; + bool poison; + bool unpriv; +}; + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; + struct bpf_map_ptr_state map_ptr_state; + s32 call_imm; + u32 alu_limit; + struct { + u32 map_index; + u32 map_off; + }; + struct { + enum bpf_reg_type reg_type; + union { + struct { + struct btf *btf; + u32 btf_id; + }; + u32 mem_size; + }; + } btf_var; + struct bpf_loop_inline_state loop_inline_state; + }; + long: 32; + union { + u64 obj_new_size; + u64 insert_off; + }; + struct btf_struct_meta *kptr_struct_meta; + long: 32; + u64 map_key_state; + int ctx_field_size; + u32 seen; + bool sanitize_stack_spill; + bool zext_dst; + bool needs_zext; + bool storage_get_func_atomic; + bool is_iter_next; + bool call_with_percpu_alloc_ptr; + u8 alu_state; + u8 fastcall_pattern: 1; + u8 fastcall_spills_num: 3; + unsigned int orig_idx; + bool jmp_point; + bool prune_point; + bool force_checkpoint; + bool calls_callback; +}; + +typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *, ...); + +struct bpf_dummy_ops_test_args { + u64 args[12]; + struct bpf_dummy_ops_state state; + long: 32; +}; + +struct bpf_struct_ops_bpf_dummy_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_dummy_ops data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + ETHTOOL_MSG_USER_NONE = 0, + ETHTOOL_MSG_STRSET_GET = 1, + ETHTOOL_MSG_LINKINFO_GET = 2, + ETHTOOL_MSG_LINKINFO_SET = 3, + ETHTOOL_MSG_LINKMODES_GET = 4, + ETHTOOL_MSG_LINKMODES_SET = 5, + ETHTOOL_MSG_LINKSTATE_GET = 6, + ETHTOOL_MSG_DEBUG_GET = 7, + ETHTOOL_MSG_DEBUG_SET = 8, + ETHTOOL_MSG_WOL_GET = 9, + ETHTOOL_MSG_WOL_SET = 10, + ETHTOOL_MSG_FEATURES_GET = 11, + ETHTOOL_MSG_FEATURES_SET = 12, + ETHTOOL_MSG_PRIVFLAGS_GET = 13, + ETHTOOL_MSG_PRIVFLAGS_SET = 14, + ETHTOOL_MSG_RINGS_GET = 15, + ETHTOOL_MSG_RINGS_SET = 16, + ETHTOOL_MSG_CHANNELS_GET = 17, + ETHTOOL_MSG_CHANNELS_SET = 18, + ETHTOOL_MSG_COALESCE_GET = 19, + ETHTOOL_MSG_COALESCE_SET = 20, + ETHTOOL_MSG_PAUSE_GET = 21, + ETHTOOL_MSG_PAUSE_SET = 22, + ETHTOOL_MSG_EEE_GET = 23, + ETHTOOL_MSG_EEE_SET = 24, + ETHTOOL_MSG_TSINFO_GET = 25, + ETHTOOL_MSG_CABLE_TEST_ACT = 26, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27, + ETHTOOL_MSG_TUNNEL_INFO_GET = 28, + ETHTOOL_MSG_FEC_GET = 29, + ETHTOOL_MSG_FEC_SET = 30, + ETHTOOL_MSG_MODULE_EEPROM_GET = 31, + ETHTOOL_MSG_STATS_GET = 32, + ETHTOOL_MSG_PHC_VCLOCKS_GET = 33, + ETHTOOL_MSG_MODULE_GET = 34, + ETHTOOL_MSG_MODULE_SET = 35, + ETHTOOL_MSG_PSE_GET = 36, + ETHTOOL_MSG_PSE_SET = 37, + ETHTOOL_MSG_RSS_GET = 38, + ETHTOOL_MSG_PLCA_GET_CFG = 39, + ETHTOOL_MSG_PLCA_SET_CFG = 40, + ETHTOOL_MSG_PLCA_GET_STATUS = 41, + ETHTOOL_MSG_MM_GET = 42, + ETHTOOL_MSG_MM_SET = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 44, + ETHTOOL_MSG_PHY_GET = 45, + __ETHTOOL_MSG_USER_CNT = 46, + ETHTOOL_MSG_USER_MAX = 45, +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC = 0, + ETHTOOL_A_HEADER_DEV_INDEX = 1, + ETHTOOL_A_HEADER_DEV_NAME = 2, + ETHTOOL_A_HEADER_FLAGS = 3, + ETHTOOL_A_HEADER_PHY_INDEX = 4, + __ETHTOOL_A_HEADER_CNT = 5, + ETHTOOL_A_HEADER_MAX = 4, +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC = 0, + ETHTOOL_A_STRSET_HEADER = 1, + ETHTOOL_A_STRSET_STRINGSETS = 2, + ETHTOOL_A_STRSET_COUNTS_ONLY = 3, + __ETHTOOL_A_STRSET_CNT = 4, + ETHTOOL_A_STRSET_MAX = 3, +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC = 0, + ETHTOOL_A_LINKINFO_HEADER = 1, + ETHTOOL_A_LINKINFO_PORT = 2, + ETHTOOL_A_LINKINFO_PHYADDR = 3, + ETHTOOL_A_LINKINFO_TP_MDIX = 4, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, + ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, + __ETHTOOL_A_LINKINFO_CNT = 7, + ETHTOOL_A_LINKINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC = 0, + ETHTOOL_A_LINKMODES_HEADER = 1, + ETHTOOL_A_LINKMODES_AUTONEG = 2, + ETHTOOL_A_LINKMODES_OURS = 3, + ETHTOOL_A_LINKMODES_PEER = 4, + ETHTOOL_A_LINKMODES_SPEED = 5, + ETHTOOL_A_LINKMODES_DUPLEX = 6, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, + ETHTOOL_A_LINKMODES_LANES = 9, + ETHTOOL_A_LINKMODES_RATE_MATCHING = 10, + __ETHTOOL_A_LINKMODES_CNT = 11, + ETHTOOL_A_LINKMODES_MAX = 10, +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC = 0, + ETHTOOL_A_LINKSTATE_HEADER = 1, + ETHTOOL_A_LINKSTATE_LINK = 2, + ETHTOOL_A_LINKSTATE_SQI = 3, + ETHTOOL_A_LINKSTATE_SQI_MAX = 4, + ETHTOOL_A_LINKSTATE_EXT_STATE = 5, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 7, + __ETHTOOL_A_LINKSTATE_CNT = 8, + ETHTOOL_A_LINKSTATE_MAX = 7, +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC = 0, + ETHTOOL_A_DEBUG_HEADER = 1, + ETHTOOL_A_DEBUG_MSGMASK = 2, + __ETHTOOL_A_DEBUG_CNT = 3, + ETHTOOL_A_DEBUG_MAX = 2, +}; + +enum { + ETHTOOL_A_WOL_UNSPEC = 0, + ETHTOOL_A_WOL_HEADER = 1, + ETHTOOL_A_WOL_MODES = 2, + ETHTOOL_A_WOL_SOPASS = 3, + __ETHTOOL_A_WOL_CNT = 4, + ETHTOOL_A_WOL_MAX = 3, +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC = 0, + ETHTOOL_A_FEATURES_HEADER = 1, + ETHTOOL_A_FEATURES_HW = 2, + ETHTOOL_A_FEATURES_WANTED = 3, + ETHTOOL_A_FEATURES_ACTIVE = 4, + ETHTOOL_A_FEATURES_NOCHANGE = 5, + __ETHTOOL_A_FEATURES_CNT = 6, + ETHTOOL_A_FEATURES_MAX = 5, +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, + ETHTOOL_A_PRIVFLAGS_HEADER = 1, + ETHTOOL_A_PRIVFLAGS_FLAGS = 2, + __ETHTOOL_A_PRIVFLAGS_CNT = 3, + ETHTOOL_A_PRIVFLAGS_MAX = 2, +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC = 0, + ETHTOOL_A_RINGS_HEADER = 1, + ETHTOOL_A_RINGS_RX_MAX = 2, + ETHTOOL_A_RINGS_RX_MINI_MAX = 3, + ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, + ETHTOOL_A_RINGS_TX_MAX = 5, + ETHTOOL_A_RINGS_RX = 6, + ETHTOOL_A_RINGS_RX_MINI = 7, + ETHTOOL_A_RINGS_RX_JUMBO = 8, + ETHTOOL_A_RINGS_TX = 9, + ETHTOOL_A_RINGS_RX_BUF_LEN = 10, + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 11, + ETHTOOL_A_RINGS_CQE_SIZE = 12, + ETHTOOL_A_RINGS_TX_PUSH = 13, + ETHTOOL_A_RINGS_RX_PUSH = 14, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 15, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 16, + __ETHTOOL_A_RINGS_CNT = 17, + ETHTOOL_A_RINGS_MAX = 16, +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC = 0, + ETHTOOL_A_CHANNELS_HEADER = 1, + ETHTOOL_A_CHANNELS_RX_MAX = 2, + ETHTOOL_A_CHANNELS_TX_MAX = 3, + ETHTOOL_A_CHANNELS_OTHER_MAX = 4, + ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, + ETHTOOL_A_CHANNELS_RX_COUNT = 6, + ETHTOOL_A_CHANNELS_TX_COUNT = 7, + ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, + ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, + __ETHTOOL_A_CHANNELS_CNT = 10, + ETHTOOL_A_CHANNELS_MAX = 9, +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC = 0, + ETHTOOL_A_COALESCE_HEADER = 1, + ETHTOOL_A_COALESCE_RX_USECS = 2, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, + ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, + ETHTOOL_A_COALESCE_TX_USECS = 6, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, + ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, + ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, + ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, + ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, + ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, + ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 24, + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 25, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES = 26, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES = 27, + ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS = 28, + ETHTOOL_A_COALESCE_RX_PROFILE = 29, + ETHTOOL_A_COALESCE_TX_PROFILE = 30, + __ETHTOOL_A_COALESCE_CNT = 31, + ETHTOOL_A_COALESCE_MAX = 30, +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC = 0, + ETHTOOL_A_PAUSE_HEADER = 1, + ETHTOOL_A_PAUSE_AUTONEG = 2, + ETHTOOL_A_PAUSE_RX = 3, + ETHTOOL_A_PAUSE_TX = 4, + ETHTOOL_A_PAUSE_STATS = 5, + ETHTOOL_A_PAUSE_STATS_SRC = 6, + __ETHTOOL_A_PAUSE_CNT = 7, + ETHTOOL_A_PAUSE_MAX = 6, +}; + +enum { + ETHTOOL_A_EEE_UNSPEC = 0, + ETHTOOL_A_EEE_HEADER = 1, + ETHTOOL_A_EEE_MODES_OURS = 2, + ETHTOOL_A_EEE_MODES_PEER = 3, + ETHTOOL_A_EEE_ACTIVE = 4, + ETHTOOL_A_EEE_ENABLED = 5, + ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, + ETHTOOL_A_EEE_TX_LPI_TIMER = 7, + __ETHTOOL_A_EEE_CNT = 8, + ETHTOOL_A_EEE_MAX = 7, +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC = 0, + ETHTOOL_A_TSINFO_HEADER = 1, + ETHTOOL_A_TSINFO_TIMESTAMPING = 2, + ETHTOOL_A_TSINFO_TX_TYPES = 3, + ETHTOOL_A_TSINFO_RX_FILTERS = 4, + ETHTOOL_A_TSINFO_PHC_INDEX = 5, + ETHTOOL_A_TSINFO_STATS = 6, + __ETHTOOL_A_TSINFO_CNT = 7, + ETHTOOL_A_TSINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC = 0, + ETHTOOL_A_PHC_VCLOCKS_HEADER = 1, + ETHTOOL_A_PHC_VCLOCKS_NUM = 2, + ETHTOOL_A_PHC_VCLOCKS_INDEX = 3, + __ETHTOOL_A_PHC_VCLOCKS_CNT = 4, + ETHTOOL_A_PHC_VCLOCKS_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_HEADER = 1, + __ETHTOOL_A_CABLE_TEST_CNT = 2, + ETHTOOL_A_CABLE_TEST_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, + __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, + ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, + ETHTOOL_A_TUNNEL_INFO_HEADER = 1, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, + __ETHTOOL_A_TUNNEL_INFO_CNT = 3, + ETHTOOL_A_TUNNEL_INFO_MAX = 2, +}; + +enum { + ETHTOOL_A_FEC_UNSPEC = 0, + ETHTOOL_A_FEC_HEADER = 1, + ETHTOOL_A_FEC_MODES = 2, + ETHTOOL_A_FEC_AUTO = 3, + ETHTOOL_A_FEC_ACTIVE = 4, + ETHTOOL_A_FEC_STATS = 5, + __ETHTOOL_A_FEC_CNT = 6, + ETHTOOL_A_FEC_MAX = 5, +}; + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, + ETHTOOL_A_MODULE_EEPROM_HEADER = 1, + ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, + ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, + ETHTOOL_A_MODULE_EEPROM_PAGE = 4, + ETHTOOL_A_MODULE_EEPROM_BANK = 5, + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, + ETHTOOL_A_MODULE_EEPROM_DATA = 7, + __ETHTOOL_A_MODULE_EEPROM_CNT = 8, + ETHTOOL_A_MODULE_EEPROM_MAX = 7, +}; + +enum { + ETHTOOL_A_STATS_UNSPEC = 0, + ETHTOOL_A_STATS_PAD = 1, + ETHTOOL_A_STATS_HEADER = 2, + ETHTOOL_A_STATS_GROUPS = 3, + ETHTOOL_A_STATS_GRP = 4, + ETHTOOL_A_STATS_SRC = 5, + __ETHTOOL_A_STATS_CNT = 6, + ETHTOOL_A_STATS_MAX = 5, +}; + +enum { + ETHTOOL_STATS_ETH_PHY = 0, + ETHTOOL_STATS_ETH_MAC = 1, + ETHTOOL_STATS_ETH_CTRL = 2, + ETHTOOL_STATS_RMON = 3, + __ETHTOOL_STATS_CNT = 4, +}; + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC = 0, + ETHTOOL_A_STATS_GRP_PAD = 1, + ETHTOOL_A_STATS_GRP_ID = 2, + ETHTOOL_A_STATS_GRP_SS_ID = 3, + ETHTOOL_A_STATS_GRP_STAT = 4, + ETHTOOL_A_STATS_GRP_HIST_RX = 5, + ETHTOOL_A_STATS_GRP_HIST_TX = 6, + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, + ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, + ETHTOOL_A_STATS_GRP_HIST_VAL = 9, + __ETHTOOL_A_STATS_GRP_CNT = 10, + ETHTOOL_A_STATS_GRP_MAX = 9, +}; + +enum { + ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, + __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, + ETHTOOL_A_STATS_ETH_PHY_MAX = 0, +}; + +enum { + ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, + ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, + ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, + ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, + ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, + ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, + ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, + ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, + ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, + ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, + ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, + ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, + ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, + ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, + ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, + ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, + ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, + ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, + ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, + ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, + ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, + ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, + __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, + ETHTOOL_A_STATS_ETH_MAC_MAX = 21, +}; + +enum { + ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, + ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, + ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, + __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, + ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, +}; + +enum { + ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, + ETHTOOL_A_STATS_RMON_OVERSIZE = 1, + ETHTOOL_A_STATS_RMON_FRAG = 2, + ETHTOOL_A_STATS_RMON_JABBER = 3, + __ETHTOOL_A_STATS_RMON_CNT = 4, + ETHTOOL_A_STATS_RMON_MAX = 3, +}; + +enum { + ETHTOOL_A_MODULE_UNSPEC = 0, + ETHTOOL_A_MODULE_HEADER = 1, + ETHTOOL_A_MODULE_POWER_MODE_POLICY = 2, + ETHTOOL_A_MODULE_POWER_MODE = 3, + __ETHTOOL_A_MODULE_CNT = 4, + ETHTOOL_A_MODULE_MAX = 3, +}; + +enum { + ETHTOOL_A_PSE_UNSPEC = 0, + ETHTOOL_A_PSE_HEADER = 1, + ETHTOOL_A_PODL_PSE_ADMIN_STATE = 2, + ETHTOOL_A_PODL_PSE_ADMIN_CONTROL = 3, + ETHTOOL_A_PODL_PSE_PW_D_STATUS = 4, + ETHTOOL_A_C33_PSE_ADMIN_STATE = 5, + ETHTOOL_A_C33_PSE_ADMIN_CONTROL = 6, + ETHTOOL_A_C33_PSE_PW_D_STATUS = 7, + ETHTOOL_A_C33_PSE_PW_CLASS = 8, + ETHTOOL_A_C33_PSE_ACTUAL_PW = 9, + ETHTOOL_A_C33_PSE_EXT_STATE = 10, + ETHTOOL_A_C33_PSE_EXT_SUBSTATE = 11, + ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT = 12, + ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES = 13, + __ETHTOOL_A_PSE_CNT = 14, + ETHTOOL_A_PSE_MAX = 13, +}; + +enum { + ETHTOOL_A_RSS_UNSPEC = 0, + ETHTOOL_A_RSS_HEADER = 1, + ETHTOOL_A_RSS_CONTEXT = 2, + ETHTOOL_A_RSS_HFUNC = 3, + ETHTOOL_A_RSS_INDIR = 4, + ETHTOOL_A_RSS_HKEY = 5, + ETHTOOL_A_RSS_INPUT_XFRM = 6, + ETHTOOL_A_RSS_START_CONTEXT = 7, + __ETHTOOL_A_RSS_CNT = 8, + ETHTOOL_A_RSS_MAX = 7, +}; + +enum { + ETHTOOL_A_PLCA_UNSPEC = 0, + ETHTOOL_A_PLCA_HEADER = 1, + ETHTOOL_A_PLCA_VERSION = 2, + ETHTOOL_A_PLCA_ENABLED = 3, + ETHTOOL_A_PLCA_STATUS = 4, + ETHTOOL_A_PLCA_NODE_CNT = 5, + ETHTOOL_A_PLCA_NODE_ID = 6, + ETHTOOL_A_PLCA_TO_TMR = 7, + ETHTOOL_A_PLCA_BURST_CNT = 8, + ETHTOOL_A_PLCA_BURST_TMR = 9, + __ETHTOOL_A_PLCA_CNT = 10, + ETHTOOL_A_PLCA_MAX = 9, +}; + +enum { + ETHTOOL_A_MM_UNSPEC = 0, + ETHTOOL_A_MM_HEADER = 1, + ETHTOOL_A_MM_PMAC_ENABLED = 2, + ETHTOOL_A_MM_TX_ENABLED = 3, + ETHTOOL_A_MM_TX_ACTIVE = 4, + ETHTOOL_A_MM_TX_MIN_FRAG_SIZE = 5, + ETHTOOL_A_MM_RX_MIN_FRAG_SIZE = 6, + ETHTOOL_A_MM_VERIFY_ENABLED = 7, + ETHTOOL_A_MM_VERIFY_STATUS = 8, + ETHTOOL_A_MM_VERIFY_TIME = 9, + ETHTOOL_A_MM_MAX_VERIFY_TIME = 10, + ETHTOOL_A_MM_STATS = 11, + __ETHTOOL_A_MM_CNT = 12, + ETHTOOL_A_MM_MAX = 11, +}; + +enum { + ETHTOOL_A_MODULE_FW_FLASH_UNSPEC = 0, + ETHTOOL_A_MODULE_FW_FLASH_HEADER = 1, + ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME = 2, + ETHTOOL_A_MODULE_FW_FLASH_PASSWORD = 3, + ETHTOOL_A_MODULE_FW_FLASH_STATUS = 4, + ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG = 5, + ETHTOOL_A_MODULE_FW_FLASH_DONE = 6, + ETHTOOL_A_MODULE_FW_FLASH_TOTAL = 7, + __ETHTOOL_A_MODULE_FW_FLASH_CNT = 8, + ETHTOOL_A_MODULE_FW_FLASH_MAX = 7, +}; + +enum { + ETHTOOL_A_PHY_UNSPEC = 0, + ETHTOOL_A_PHY_HEADER = 1, + ETHTOOL_A_PHY_INDEX = 2, + ETHTOOL_A_PHY_DRVNAME = 3, + ETHTOOL_A_PHY_NAME = 4, + ETHTOOL_A_PHY_UPSTREAM_TYPE = 5, + ETHTOOL_A_PHY_UPSTREAM_INDEX = 6, + ETHTOOL_A_PHY_UPSTREAM_SFP_NAME = 7, + ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME = 8, + __ETHTOOL_A_PHY_CNT = 9, + ETHTOOL_A_PHY_MAX = 8, +}; + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +struct genl_multicast_group { + char name[16]; + u8 flags; +}; + +struct genl_split_ops; + +struct genl_info; + +struct genl_ops; + +struct genl_small_ops; + +struct genl_family { + unsigned int hdrsize; + char name[16]; + unsigned int version; + unsigned int maxattr; + u8 netnsok: 1; + u8 parallel_ops: 1; + u8 n_ops; + u8 n_small_ops; + u8 n_split_ops; + u8 n_mcgrps; + u8 resv_start_op; + const struct nla_policy *policy; + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*bind)(int); + void (*unbind)(int); + const struct genl_ops *ops; + const struct genl_small_ops *small_ops; + const struct genl_split_ops *split_ops; + const struct genl_multicast_group *mcgrps; + struct module *module; + size_t sock_priv_size; + void (*sock_priv_init)(void *); + void (*sock_priv_destroy)(void *); + int id; + unsigned int mcgrp_offset; + struct xarray *sock_privs; +}; + +struct genl_split_ops { + union { + struct { + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*doit)(struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + }; + struct { + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + }; + }; + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_info { + u32 snd_seq; + u32 snd_portid; + const struct genl_family *family; + const struct nlmsghdr *nlhdr; + struct genlmsghdr *genlhdr; + struct nlattr **attrs; + possible_net_t _net; + union { + u8 ctx[48]; + void *user_ptr[2]; + }; + struct netlink_ext_ack *extack; +}; + +struct genl_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_small_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct ethnl_req_info { + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u32 phy_index; +}; + +struct ethnl_reply_data { + struct net_device *dev; +}; + +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + u8 set_ntf_cmd; + int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); + int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, const struct genl_info *); + int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); + int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); + void (*cleanup_data)(struct ethnl_reply_data *); + int (*set_validate)(struct ethnl_req_info *, struct genl_info *); + int (*set)(struct ethnl_req_info *, struct genl_info *); +}; + +typedef const char (* const ethnl_string_array_t)[32]; + +struct stats_req_info { + struct ethnl_req_info base; + long unsigned int stat_mask[1]; + enum ethtool_mac_stats_src src; +}; + +struct stats_reply_data { + struct ethnl_reply_data base; + long: 32; + union { + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + }; + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + } stats; + }; + const struct ethtool_rmon_hist_range *rmon_ranges; + long: 32; +}; + +typedef u16 u_int16_t; + +typedef u32 u_int32_t; + +typedef u64 u_int64_t; + +enum ip_conntrack_info { + IP_CT_ESTABLISHED = 0, + IP_CT_RELATED = 1, + IP_CT_NEW = 2, + IP_CT_IS_REPLY = 3, + IP_CT_ESTABLISHED_REPLY = 3, + IP_CT_RELATED_REPLY = 4, + IP_CT_NUMBER = 5, + IP_CT_UNTRACKED = 7, +}; + +struct nf_conntrack { + refcount_t use; +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + u_int8_t last_wscale; + u_int8_t last_flags; +}; + +struct ip_tunnel_parm_kern { + char name[16]; + long unsigned int i_flags[1]; + long unsigned int o_flags[1]; + __be32 i_key; + __be32 o_key; + int link; + struct iphdr iph; +}; + +union nf_conntrack_man_proto { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; + +struct nf_ct_dccp { + u_int8_t role[2]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE = 0, + SCTP_CONNTRACK_CLOSED = 1, + SCTP_CONNTRACK_COOKIE_WAIT = 2, + SCTP_CONNTRACK_COOKIE_ECHOED = 3, + SCTP_CONNTRACK_ESTABLISHED = 4, + SCTP_CONNTRACK_SHUTDOWN_SENT = 5, + SCTP_CONNTRACK_SHUTDOWN_RECD = 6, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, + SCTP_CONNTRACK_HEARTBEAT_SENT = 8, + SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, + SCTP_CONNTRACK_MAX = 10, +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + __be32 vtag[2]; + u8 init[2]; + u8 last_dir; + u8 flags; +}; + +enum { + IP_TUNNEL_CSUM_BIT = 0, + IP_TUNNEL_ROUTING_BIT = 1, + IP_TUNNEL_KEY_BIT = 2, + IP_TUNNEL_SEQ_BIT = 3, + IP_TUNNEL_STRICT_BIT = 4, + IP_TUNNEL_REC_BIT = 5, + IP_TUNNEL_VERSION_BIT = 6, + IP_TUNNEL_NO_KEY_BIT = 7, + IP_TUNNEL_DONT_FRAGMENT_BIT = 8, + IP_TUNNEL_OAM_BIT = 9, + IP_TUNNEL_CRIT_OPT_BIT = 10, + IP_TUNNEL_GENEVE_OPT_BIT = 11, + IP_TUNNEL_VXLAN_OPT_BIT = 12, + IP_TUNNEL_NOCACHE_BIT = 13, + IP_TUNNEL_ERSPAN_OPT_BIT = 14, + IP_TUNNEL_GTP_OPT_BIT = 15, + IP_TUNNEL_VTI_BIT = 16, + IP_TUNNEL_SIT_ISATAP_BIT = 16, + IP_TUNNEL_PFCP_OPT_BIT = 17, + __IP_TUNNEL_FLAG_NUM = 18, +}; + +struct fib_info; + +struct fib_nh { + struct fib_nh_common nh_common; + struct hlist_node nh_hash; + struct fib_info *nh_parent; + __be32 nh_saddr; + int nh_saddr_genid; +}; + +struct fib_info { + struct hlist_node fib_hash; + struct hlist_node fib_lhash; + struct list_head nh_list; + struct net *fib_net; + refcount_t fib_treeref; + refcount_t fib_clntref; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_tb_id; + u32 fib_priority; + struct dst_metrics *fib_metrics; + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; + bool pfsrc_removed; + struct nexthop *nh; + struct callback_head rcu; + struct fib_nh fib_nh[0]; +}; + +struct nh_info; + +struct nh_group; + +struct nexthop { + struct rb_node rb_node; + struct list_head fi_list; + struct list_head f6i_list; + struct list_head fdb_list; + struct list_head grp_list; + struct net *net; + u32 id; + u8 protocol; + u8 nh_flags; + bool is_group; + refcount_t refcnt; + struct callback_head rcu; + union { + struct nh_info *nh_info; + struct nh_group *nh_grp; + }; +}; + +struct nh_info { + struct hlist_node dev_hash; + struct nexthop *nh_parent; + u8 family; + bool reject_nh; + bool fdb_nh; + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct nh_grp_entry; + +struct nh_res_bucket { + struct nh_grp_entry *nh_entry; + atomic_long_t used_time; + long unsigned int migrated_time; + bool occupied; + u8 nh_flags; +}; + +struct nh_grp_entry_stats; + +struct nh_grp_entry { + struct nexthop *nh; + struct nh_grp_entry_stats *stats; + u16 weight; + union { + struct { + atomic_t upper_bound; + } hthr; + struct { + struct list_head uw_nh_entry; + u16 count_buckets; + u16 wants_buckets; + } res; + }; + struct list_head nh_list; + struct nexthop *nh_parent; + long: 32; + u64 packets_hw; +}; + +struct nh_res_table { + struct net *net; + u32 nhg_id; + struct delayed_work upkeep_dw; + struct list_head uw_nh_entries; + long unsigned int unbalanced_since; + u32 idle_timer; + u32 unbalanced_timer; + u16 num_nh_buckets; + struct nh_res_bucket nh_buckets[0]; +}; + +struct nh_grp_entry_stats { + u64_stats_t packets; + struct u64_stats_sync syncp; + long: 32; +}; + +struct nh_group { + struct nh_group *spare; + u16 num_nh; + bool is_multipath; + bool hash_threshold; + bool resilient; + bool fdb_nh; + bool has_v4; + bool hw_stats; + struct nh_res_table *res_table; + struct nh_grp_entry nh_entries[0]; +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +struct nf_conntrack_man { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + u_int16_t l3num; +}; + +struct nf_conntrack_tuple { + struct nf_conntrack_man src; + struct { + union nf_inet_addr u3; + union { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + u_int8_t type; + u_int8_t code; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; + } u; + u_int8_t protonum; + struct {} __nfct_hash_offsetend; + u_int8_t dir; + } dst; +}; + +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; + struct nf_conntrack_tuple tuple; +}; + +struct nf_ct_udp { + long unsigned int stream_ts; +}; + +union nf_conntrack_proto { + struct nf_ct_dccp dccp; + struct ip_ct_sctp sctp; + struct ip_ct_tcp tcp; + struct nf_ct_udp udp; + struct nf_ct_gre gre; + unsigned int tmpl_padto; +}; + +struct nf_ct_ext; + +struct nf_conn { + struct nf_conntrack ct_general; + spinlock_t lock; + u32 timeout; + struct nf_conntrack_tuple_hash tuplehash[2]; + long unsigned int status; + possible_net_t ct_net; + struct hlist_node nat_bysource; + struct {} __nfct_init_offset; + struct nf_conn *master; + u_int32_t mark; + struct nf_ct_ext *ext; + union nf_conntrack_proto proto; +}; + +struct nf_ct_ext { + u8 offset[4]; + u8 len; + unsigned int gen_id; + long: 32; + char data[0]; +}; + +enum nf_ct_ext_id { + NF_CT_EXT_HELPER = 0, + NF_CT_EXT_NAT = 1, + NF_CT_EXT_SEQADJ = 2, + NF_CT_EXT_ACCT = 3, + NF_CT_EXT_NUM = 4, +}; + +struct nf_conntrack_expect_policy; + +struct nf_conntrack_helper { + struct hlist_node hnode; + char name[16]; + refcount_t refcnt; + struct module *me; + const struct nf_conntrack_expect_policy *expect_policy; + struct nf_conntrack_tuple tuple; + int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info); + void (*destroy)(struct nf_conn *); + int (*from_nlattr)(struct nlattr *, struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, const struct nf_conn *); + unsigned int expect_class_max; + unsigned int flags; + unsigned int queue_num; + u16 data_len; + char nat_mod_name[16]; +}; + +struct nf_conntrack_expect_policy { + unsigned int max_expected; + unsigned int timeout; + char name[16]; +}; + +enum nf_ct_helper_flags { + NF_CT_HELPER_F_USERSPACE = 1, + NF_CT_HELPER_F_CONFIGURED = 2, +}; + +struct nf_conn_help { + struct nf_conntrack_helper *helper; + struct hlist_head expectations; + u8 expecting[4]; + long: 32; + char data[32]; +}; + +enum ctattr_help { + CTA_HELP_UNSPEC = 0, + CTA_HELP_NAME = 1, + CTA_HELP_INFO = 2, + __CTA_HELP_MAX = 3, +}; + +enum nfnl_cthelper_msg_types { + NFNL_MSG_CTHELPER_NEW = 0, + NFNL_MSG_CTHELPER_GET = 1, + NFNL_MSG_CTHELPER_DEL = 2, + NFNL_MSG_CTHELPER_MAX = 3, +}; + +enum nfnl_cthelper_type { + NFCTH_UNSPEC = 0, + NFCTH_NAME = 1, + NFCTH_TUPLE = 2, + NFCTH_QUEUE_NUM = 3, + NFCTH_POLICY = 4, + NFCTH_PRIV_DATA_LEN = 5, + NFCTH_STATUS = 6, + __NFCTH_MAX = 7, +}; + +enum nfnl_cthelper_policy_type { + NFCTH_POLICY_SET_UNSPEC = 0, + NFCTH_POLICY_SET_NUM = 1, + NFCTH_POLICY_SET = 2, + NFCTH_POLICY_SET1 = 2, + NFCTH_POLICY_SET2 = 3, + NFCTH_POLICY_SET3 = 4, + NFCTH_POLICY_SET4 = 5, + __NFCTH_POLICY_SET_MAX = 6, +}; + +enum nfnl_cthelper_pol_type { + NFCTH_POLICY_UNSPEC = 0, + NFCTH_POLICY_NAME = 1, + NFCTH_POLICY_EXPECT_MAX = 2, + NFCTH_POLICY_EXPECT_TIMEOUT = 3, + __NFCTH_POLICY_MAX = 4, +}; + +enum nfnl_cthelper_tuple_type { + NFCTH_TUPLE_UNSPEC = 0, + NFCTH_TUPLE_L3PROTONUM = 1, + NFCTH_TUPLE_L4PROTONUM = 2, + __NFCTH_TUPLE_MAX = 3, +}; + +struct nfnl_cthelper { + struct list_head list; + struct nf_conntrack_helper helper; +}; + +enum nft_set_flags { + NFT_SET_ANONYMOUS = 1, + NFT_SET_CONSTANT = 2, + NFT_SET_INTERVAL = 4, + NFT_SET_MAP = 8, + NFT_SET_TIMEOUT = 16, + NFT_SET_EVAL = 32, + NFT_SET_OBJECT = 64, + NFT_SET_CONCAT = 128, + NFT_SET_EXPR = 256, +}; + +enum nft_objref_attributes { + NFTA_OBJREF_UNSPEC = 0, + NFTA_OBJREF_IMM_TYPE = 1, + NFTA_OBJREF_IMM_NAME = 2, + NFTA_OBJREF_SET_SREG = 3, + NFTA_OBJREF_SET_NAME = 4, + NFTA_OBJREF_SET_ID = 5, + __NFTA_OBJREF_MAX = 6, +}; + +struct nft_elem_priv {}; + +struct nft_set_elem { + union { + u32 buf[16]; + struct nft_data val; + } key; + union { + u32 buf[16]; + struct nft_data val; + } key_end; + union { + u32 buf[16]; + struct nft_data val; + } data; + struct nft_elem_priv *priv; + long: 32; +}; + +enum nft_iter_type { + NFT_ITER_UNSPEC = 0, + NFT_ITER_READ = 1, + NFT_ITER_UPDATE = 2, +}; + +struct nft_set; + +struct nft_set_iter { + u8 genmask; + enum nft_iter_type type: 8; + unsigned int count; + unsigned int skip; + int err; + int (*fn)(const struct nft_ctx *, struct nft_set *, const struct nft_set_iter *, struct nft_elem_priv *); +}; + +struct nft_set_ops; + +struct nft_set { + struct list_head list; + struct list_head bindings; + refcount_t refs; + struct nft_table *table; + possible_net_t net; + char *name; + u64 handle; + u32 ktype; + u32 dtype; + u32 objtype; + u32 size; + u8 field_len[16]; + u8 field_count; + u32 use; + atomic_t nelems; + u32 ndeact; + u64 timeout; + u32 gc_int; + u16 policy; + u16 udlen; + unsigned char *udata; + struct list_head pending_update; + long: 32; + long: 32; + long: 32; + const struct nft_set_ops *ops; + u16 flags: 13; + u16 dead: 1; + u16 genmask: 2; + u8 klen; + u8 dlen; + u8 num_exprs; + struct nft_expr *exprs[2]; + struct list_head catchall_list; + long: 32; + unsigned char data[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_set_desc { + u32 ktype; + unsigned int klen; + u32 dtype; + unsigned int dlen; + u32 objtype; + unsigned int size; + u32 policy; + u32 gc_int; + u64 timeout; + u8 field_len[16]; + u8 field_count; + bool expr; + long: 32; +}; + +enum nft_set_class { + NFT_SET_CLASS_O_1 = 0, + NFT_SET_CLASS_O_LOG_N = 1, + NFT_SET_CLASS_O_N = 2, +}; + +struct nft_set_estimate { + u64 size; + enum nft_set_class lookup; + enum nft_set_class space; +}; + +struct nft_set_ext; + +struct nft_set_ops { + bool (*lookup)(const struct net *, const struct nft_set *, const u32 *, const struct nft_set_ext **); + bool (*update)(struct nft_set *, const u32 *, struct nft_elem_priv * (*)(struct nft_set *, const struct nft_expr *, struct nft_regs *), const struct nft_expr *, struct nft_regs *, const struct nft_set_ext **); + bool (*delete)(const struct nft_set *, const u32 *); + int (*insert)(const struct net *, const struct nft_set *, const struct nft_set_elem *, struct nft_elem_priv **); + void (*activate)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + struct nft_elem_priv * (*deactivate)(const struct net *, const struct nft_set *, const struct nft_set_elem *); + void (*flush)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*remove)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*walk)(const struct nft_ctx *, struct nft_set *, struct nft_set_iter *); + struct nft_elem_priv * (*get)(const struct net *, const struct nft_set *, const struct nft_set_elem *, unsigned int); + void (*commit)(struct nft_set *); + void (*abort)(const struct nft_set *); + u64 (*privsize)(const struct nlattr * const *, const struct nft_set_desc *); + bool (*estimate)(const struct nft_set_desc *, u32, struct nft_set_estimate *); + int (*init)(const struct nft_set *, const struct nft_set_desc *, const struct nlattr * const *); + void (*destroy)(const struct nft_ctx *, const struct nft_set *); + void (*gc_init)(const struct nft_set *); + unsigned int elemsize; +}; + +struct nft_set_ext { + u8 genmask; + u8 offset[8]; + char data[0]; +}; + +struct nft_set_binding { + struct list_head list; + const struct nft_chain *chain; + u32 flags; +}; + +struct nft_object_hash_key { + const char *name; + const struct nft_table *table; +}; + +struct nft_object_ops; + +struct nft_object { + struct list_head list; + struct rhlist_head rhlhead; + struct nft_object_hash_key key; + u32 genmask: 2; + u32 use; + u64 handle; + u16 udlen; + u8 *udata; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + const struct nft_object_ops *ops; + long: 32; + unsigned char data[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_object_type; + +struct nft_object_ops { + void (*eval)(struct nft_object *, struct nft_regs *, const struct nft_pktinfo *); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nlattr * const *, struct nft_object *); + void (*destroy)(const struct nft_ctx *, struct nft_object *); + int (*dump)(struct sk_buff *, struct nft_object *, bool); + void (*update)(struct nft_object *, struct nft_object *); + const struct nft_object_type *type; +}; + +struct nft_object_type { + const struct nft_object_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + const struct nft_object_ops *ops; + struct list_head list; + u32 type; + unsigned int maxattr; + u8 family; + struct module *owner; + const struct nla_policy *policy; +}; + +struct nft_objref_map { + struct nft_set *set; + u8 sreg; + struct nft_set_binding binding; +}; + +typedef union { + __be32 a4; + __be32 a6[4]; + struct in6_addr in6; +} xfrm_address_t; + +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; + +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_trunc_len; + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_icv_len; + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + +struct xfrm_mark { + __u32 v; + __u32 m; +}; + +struct xfrm_address_filter { + xfrm_address_t saddr; + xfrm_address_t daddr; + __u16 family; + __u8 splen; + __u8 dplen; +}; + +struct xfrm_state_walk { + struct list_head all; + u8 state; + u8 dying; + u8 proto; + u32 seq; + struct xfrm_address_filter *filter; +}; + +struct xfrm_dev_offload { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net_device *real_dev; + long unsigned int offload_handle; + u8 dir: 2; + u8 type: 2; + u8 flags: 2; +}; + +struct xfrm_mode { + u8 encap; + u8 family; + u8 flags; +}; + +enum xfrm_replay_mode { + XFRM_REPLAY_MODE_LEGACY = 0, + XFRM_REPLAY_MODE_BMP = 1, + XFRM_REPLAY_MODE_ESN = 2, +}; + +struct xfrm_type; + +struct xfrm_type_offload; + +struct xfrm_state { + possible_net_t xs_net; + union { + struct hlist_node gclist; + struct hlist_node bydst; + }; + union { + struct hlist_node dev_gclist; + struct hlist_node bysrc; + }; + struct hlist_node byspi; + struct hlist_node byseq; + struct hlist_node state_cache; + struct hlist_node state_cache_input; + refcount_t refcnt; + spinlock_t lock; + u32 pcpu_num; + struct xfrm_id id; + struct xfrm_selector sel; + struct xfrm_mark mark; + u32 if_id; + u32 tfcpad; + u32 genid; + struct xfrm_state_walk km; + struct { + u32 reqid; + u8 mode; + u8 replay_window; + u8 aalgo; + u8 ealgo; + u8 calgo; + u8 flags; + u16 family; + xfrm_address_t saddr; + int header_len; + int trailer_len; + u32 extra_flags; + struct xfrm_mark smark; + } props; + struct xfrm_lifetime_cfg lft; + struct xfrm_algo_auth *aalg; + struct xfrm_algo *ealg; + struct xfrm_algo *calg; + struct xfrm_algo_aead *aead; + const char *geniv; + __be16 new_mapping_sport; + u32 new_mapping; + u32 mapping_maxage; + struct xfrm_encap_tmpl *encap; + struct sock *encap_sk; + u32 nat_keepalive_interval; + long: 32; + time64_t nat_keepalive_expiration; + xfrm_address_t *coaddr; + struct xfrm_state *tunnel; + atomic_t tunnel_users; + struct xfrm_replay_state replay; + struct xfrm_replay_state_esn *replay_esn; + struct xfrm_replay_state preplay; + struct xfrm_replay_state_esn *preplay_esn; + enum xfrm_replay_mode repl_mode; + u32 xflags; + u32 replay_maxage; + u32 replay_maxdiff; + struct timer_list rtimer; + struct xfrm_stats stats; + long: 32; + struct xfrm_lifetime_cur curlft; + struct hrtimer mtimer; + struct xfrm_dev_offload xso; + long int saved_tmo; + long: 32; + time64_t lastused; + struct page_frag xfrag; + const struct xfrm_type *type; + struct xfrm_mode inner_mode; + struct xfrm_mode inner_mode_iaf; + struct xfrm_mode outer_mode; + const struct xfrm_type_offload *type_offload; + struct xfrm_sec_ctx *security; + void *data; + u8 dir; +}; + +struct xfrm_type { + struct module *owner; + u8 proto; + u8 flags; + int (*init_state)(struct xfrm_state *, struct netlink_ext_ack *); + void (*destructor)(struct xfrm_state *); + int (*input)(struct xfrm_state *, struct sk_buff *); + int (*output)(struct xfrm_state *, struct sk_buff *); + int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); +}; + +struct xfrm_type_offload { + struct module *owner; + u8 proto; + void (*encap)(struct xfrm_state *, struct sk_buff *); + int (*input_tail)(struct xfrm_state *, struct sk_buff *); + int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); +}; + +struct xfrm_offload { + struct { + __u32 low; + __u32 hi; + } seq; + __u32 flags; + __u32 status; + __u32 orig_mac_len; + __u8 proto; + __u8 inner_ipproto; +}; + +struct sec_path { + int len; + int olen; + int verified_cnt; + struct xfrm_state *xvec[6]; + struct xfrm_offload ovec[1]; +}; + +enum sock_type { + SOCK_DGRAM = 1, + SOCK_STREAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + char __data[126]; + }; + void *__align; + }; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + union { + __be32 imsf_slist[1]; + struct { + struct {} __empty_imsf_slist_flex; + __be32 imsf_slist_flex[0]; + }; + }; +}; + +struct group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + unsigned char __pad[8]; +}; + +struct seq_net_private { + struct net *net; + netns_tracker ns_tracker; +}; + +enum { + IPV4_DEVCONF_FORWARDING = 1, + IPV4_DEVCONF_MC_FORWARDING = 2, + IPV4_DEVCONF_PROXY_ARP = 3, + IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, + IPV4_DEVCONF_SECURE_REDIRECTS = 5, + IPV4_DEVCONF_SEND_REDIRECTS = 6, + IPV4_DEVCONF_SHARED_MEDIA = 7, + IPV4_DEVCONF_RP_FILTER = 8, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, + IPV4_DEVCONF_BOOTP_RELAY = 10, + IPV4_DEVCONF_LOG_MARTIANS = 11, + IPV4_DEVCONF_TAG = 12, + IPV4_DEVCONF_ARPFILTER = 13, + IPV4_DEVCONF_MEDIUM_ID = 14, + IPV4_DEVCONF_NOXFRM = 15, + IPV4_DEVCONF_NOPOLICY = 16, + IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, + IPV4_DEVCONF_ARP_ANNOUNCE = 18, + IPV4_DEVCONF_ARP_IGNORE = 19, + IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, + IPV4_DEVCONF_ARP_ACCEPT = 21, + IPV4_DEVCONF_ARP_NOTIFY = 22, + IPV4_DEVCONF_ACCEPT_LOCAL = 23, + IPV4_DEVCONF_SRC_VMARK = 24, + IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, + IPV4_DEVCONF_ROUTE_LOCALNET = 26, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, + IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, + IPV4_DEVCONF_BC_FORWARDING = 32, + IPV4_DEVCONF_ARP_EVICT_NOCARRIER = 33, + __IPV4_DEVCONF_MAX = 34, +}; + +enum rt_scope_t { + RT_SCOPE_UNIVERSE = 0, + RT_SCOPE_SITE = 200, + RT_SCOPE_LINK = 253, + RT_SCOPE_HOST = 254, + RT_SCOPE_NOWHERE = 255, +}; + +struct ip_sf_list; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + long unsigned int sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list *next_rcu; + }; + struct ip_mc_list *next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; + unsigned char crcount; + struct callback_head rcu; +}; + +struct igmphdr { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; +}; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + __be32 sl_addr[0]; +}; + +struct ip_mc_socklist { + struct ip_mc_socklist *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; + struct ip_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + long unsigned int sf_count[2]; + __be32 sf_inaddr; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +typedef u8 dscp_t; + +struct igmp_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *in_dev; +}; + +struct igmp_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *idev; + struct ip_mc_list *im; +}; + +enum nf_hook_ops_type { + NF_HOOK_OP_UNDEFINED = 0, + NF_HOOK_OP_NF_TABLES = 1, + NF_HOOK_OP_BPF = 2, +}; + +struct nf_hook_ops { + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u8 pf; + enum nf_hook_ops_type hook_ops_type: 8; + unsigned int hooknum; + int priority; +}; + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = -2147483648, + NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP_PRI_CONNTRACK_DEFRAG = -400, + NF_IP_PRI_RAW = -300, + NF_IP_PRI_SELINUX_FIRST = -225, + NF_IP_PRI_CONNTRACK = -200, + NF_IP_PRI_MANGLE = -150, + NF_IP_PRI_NAT_DST = -100, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_HELPER = 300, + NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, + NF_IP_PRI_LAST = 2147483647, +}; + +struct xt_entry_target { + union { + struct { + __u16 target_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 target_size; + struct xt_target *target; + } kernel; + __u16 target_size; + } u; + unsigned char data[0]; +}; + +struct xt_standard_target { + struct xt_entry_target target; + int verdict; +}; + +struct xt_counters { + __u64 pcnt; + __u64 bcnt; +}; + +struct xt_table_info; + +struct xt_table { + struct list_head list; + unsigned int valid_hooks; + struct xt_table_info *private; + struct nf_hook_ops *ops; + struct module *me; + u_int8_t af; + int priority; + const char name[32]; +}; + +struct xt_table_info { + unsigned int size; + unsigned int number; + unsigned int initial_entries; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int stacksize; + void ***jumpstack; + long: 32; + unsigned char entries[0]; +}; + +struct ipt_ip { + struct in_addr src; + struct in_addr dst; + struct in_addr smsk; + struct in_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 flags; + __u8 invflags; +}; + +struct ipt_entry { + struct ipt_ip ip; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ipt_replace { + char name[32]; + unsigned int valid_hooks; + unsigned int num_entries; + unsigned int size; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_counters; + struct xt_counters *counters; + long: 32; + struct ipt_entry entries[0]; +}; + +struct ipt_standard { + struct ipt_entry entry; + struct xt_standard_target target; + long: 32; +}; + +enum { + IP6_FH_F_FRAG = 1, + IP6_FH_F_AUTH = 2, + IP6_FH_F_SKIP_RH = 4, +}; + +typedef initcall_t initcall_entry_t; + +typedef int (*parse_unknown_fn)(char *, char *, const char *, void *); + +struct trace_event_raw_initcall_level { + struct trace_entry ent; + u32 __data_loc_level; + char __data[0]; +}; + +struct trace_event_raw_initcall_start { + struct trace_entry ent; + initcall_t func; + char __data[0]; +}; + +struct trace_event_raw_initcall_finish { + struct trace_entry ent; + initcall_t func; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_initcall_level { + u32 level; + const void *level_ptr_; +}; + +struct trace_event_data_offsets_initcall_start {}; + +struct trace_event_data_offsets_initcall_finish {}; + +typedef void (*btf_trace_initcall_level)(void *, const char *); + +typedef void (*btf_trace_initcall_start)(void *, initcall_t); + +typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); + +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +enum ucount_type { + UCOUNT_USER_NAMESPACES = 0, + UCOUNT_PID_NAMESPACES = 1, + UCOUNT_UTS_NAMESPACES = 2, + UCOUNT_IPC_NAMESPACES = 3, + UCOUNT_NET_NAMESPACES = 4, + UCOUNT_MNT_NAMESPACES = 5, + UCOUNT_CGROUP_NAMESPACES = 6, + UCOUNT_TIME_NAMESPACES = 7, + UCOUNT_INOTIFY_INSTANCES = 8, + UCOUNT_INOTIFY_WATCHES = 9, + UCOUNT_COUNTS = 10, +}; + +enum rlimit_type { + UCOUNT_RLIMIT_NPROC = 0, + UCOUNT_RLIMIT_MSGQUEUE = 1, + UCOUNT_RLIMIT_SIGPENDING = 2, + UCOUNT_RLIMIT_MEMLOCK = 3, + UCOUNT_RLIMIT_COUNTS = 4, +}; + +enum { + PROC_ROOT_INO = 1, + PROC_IPC_INIT_INO = 4026531839, + PROC_UTS_INIT_INO = 4026531838, + PROC_USER_INIT_INO = 4026531837, + PROC_PID_INIT_INO = 4026531836, + PROC_CGROUP_INIT_INO = 4026531835, + PROC_TIME_INIT_INO = 4026531834, +}; + +struct ipc_ids { + int in_use; + short unsigned int seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; + int last_idx; + struct rhashtable key_ht; +}; + +struct ipc_namespace { + struct ipc_ids ids[3]; + int sem_ctls[4]; + int used_sems; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + long: 32; + struct percpu_counter percpu_msg_bytes; + struct percpu_counter percpu_msg_hdrs; + size_t shm_ctlmax; + size_t shm_ctlall; + long unsigned int shm_tot; + int shm_ctlmni; + int shm_rmid_forced; + struct notifier_block ipcns_nb; + struct vfsmount *mq_mnt; + unsigned int mq_queues_count; + unsigned int mq_queues_max; + unsigned int mq_msg_max; + unsigned int mq_msgsize_max; + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + struct ctl_table_set mq_set; + struct ctl_table_header *mq_sysctls; + struct ctl_table_set ipc_set; + struct ctl_table_header *ipc_sysctls; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +struct trace_event_raw_contention_begin { + struct trace_entry ent; + void *lock_addr; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_contention_end { + struct trace_entry ent; + void *lock_addr; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_contention_begin {}; + +struct trace_event_data_offsets_contention_end {}; + +typedef void (*btf_trace_contention_begin)(void *, void *, unsigned int); + +typedef void (*btf_trace_contention_end)(void *, void *, int); + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +}; + +enum vmscan_throttle_state { + VMSCAN_THROTTLE_WRITEBACK = 0, + VMSCAN_THROTTLE_ISOLATED = 1, + VMSCAN_THROTTLE_NOPROGRESS = 2, + VMSCAN_THROTTLE_CONGESTED = 3, + NR_VMSCAN_THROTTLE = 4, +}; + +enum { + IRQ_DOMAIN_FLAG_HIERARCHY = 1, + IRQ_DOMAIN_NAME_ALLOCATED = 2, + IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, + IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, + IRQ_DOMAIN_FLAG_MSI = 16, + IRQ_DOMAIN_FLAG_ISOLATED_MSI = 32, + IRQ_DOMAIN_FLAG_NO_MAP = 64, + IRQ_DOMAIN_FLAG_MSI_PARENT = 256, + IRQ_DOMAIN_FLAG_MSI_DEVICE = 512, + IRQ_DOMAIN_FLAG_DESTROY_GC = 1024, + IRQ_DOMAIN_FLAG_NONCORE = 65536, +}; + +typedef long unsigned int ulong; + +typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); + +enum { + CSD_FLAG_LOCK = 1, + IRQ_WORK_PENDING = 1, + IRQ_WORK_BUSY = 2, + IRQ_WORK_LAZY = 4, + IRQ_WORK_HARD_IRQ = 8, + IRQ_WORK_CLAIMED = 3, + CSD_TYPE_ASYNC = 0, + CSD_TYPE_SYNC = 16, + CSD_TYPE_IRQ_WORK = 32, + CSD_TYPE_TTWU = 48, + CSD_FLAG_TYPE_MASK = 240, +}; + +struct rcu_gp_oldstate { + long unsigned int rgos_norm; + long unsigned int rgos_exp; +}; + +typedef int (*task_call_f)(struct task_struct *, void *); + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +struct rcu_exp_work { + long unsigned int rew_s; + struct kthread_work rew_work; +}; + +struct rcu_node { + raw_spinlock_t lock; + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + long unsigned int completedqs; + long unsigned int qsmask; + long unsigned int rcu_gp_init_mask; + long unsigned int qsmaskinit; + long unsigned int qsmaskinitnext; + long unsigned int expmask; + long unsigned int expmaskinit; + long unsigned int expmaskinitnext; + struct kthread_worker *exp_kworker; + long unsigned int cbovldmask; + long unsigned int ffmask; + long unsigned int grpmask; + int grplo; + int grphi; + u8 grpnum; + u8 level; + bool wait_blkd_tasks; + struct rcu_node *parent; + struct list_head blkd_tasks; + struct list_head *gp_tasks; + struct list_head *exp_tasks; + struct list_head *boost_tasks; + struct rt_mutex boost_mtx; + long unsigned int boost_time; + struct mutex kthread_mutex; + struct task_struct *boost_kthread_task; + unsigned int boost_kthread_status; + long unsigned int n_boosts; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t fqslock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t exp_lock; + long unsigned int exp_seq_rq; + wait_queue_head_t exp_wq[4]; + struct rcu_exp_work rew; + bool exp_need_flush; + raw_spinlock_t exp_poll_lock; + long unsigned int exp_seq_poll_rq; + struct work_struct exp_poll_wq; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct rt_waiter_node { + struct rb_node entry; + int prio; + u64 deadline; +}; + +struct rt_mutex_waiter { + struct rt_waiter_node tree; + struct rt_waiter_node pi_tree; + struct task_struct *task; + struct rt_mutex_base *lock; + unsigned int wake_state; + struct ww_acquire_ctx *ww_ctx; +}; + +struct rcu_synchronize { + struct callback_head head; + struct completion completion; +}; + +enum ctx_state { + CT_STATE_DISABLED = -1, + CT_STATE_KERNEL = 0, + CT_STATE_IDLE = 1, + CT_STATE_USER = 2, + CT_STATE_GUEST = 3, + CT_STATE_MAX = 4, +}; + +struct context_tracking { + atomic_t state; + long int nesting; + long int nmi_nesting; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +struct kernel_cpustat { + u64 cpustat[10]; +}; + +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4, + TICK_DEP_BIT_RCU_EXP = 5, +}; + +union rcu_noqs { + struct { + u8 norm; + u8 exp; + } b; + u16 s; +}; + +struct rcu_snap_record { + long unsigned int gp_seq; + long: 32; + u64 cputime_irq; + u64 cputime_softirq; + u64 cputime_system; + long unsigned int nr_hardirqs; + unsigned int nr_softirqs; + long long unsigned int nr_csw; + long unsigned int jiffies; + long: 32; +}; + +struct rcu_data { + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + union rcu_noqs cpu_no_qs; + bool core_needs_qs; + bool beenonline; + bool gpwrap; + bool cpu_started; + struct rcu_node *mynode; + long unsigned int grpmask; + long unsigned int ticks_this_gp; + struct irq_work defer_qs_iw; + bool defer_qs_iw_pending; + struct work_struct strict_work; + struct rcu_segcblist cblist; + long int qlen_last_fqs_check; + long unsigned int n_cbs_invoked; + long unsigned int n_force_qs_snap; + long int blimit; + int watching_snap; + bool rcu_need_heavy_qs; + bool rcu_urgent_qs; + bool rcu_forced_tick; + bool rcu_forced_tick_exp; + long unsigned int barrier_seq_snap; + struct callback_head barrier_head; + int exp_watching_snap; + struct task_struct *rcu_cpu_kthread_task; + unsigned int rcu_cpu_kthread_status; + char rcu_cpu_has_work; + long unsigned int rcuc_activity; + unsigned int softirq_snap; + struct irq_work rcu_iw; + bool rcu_iw_pending; + long unsigned int rcu_iw_gp_seq; + long unsigned int rcu_ofl_gp_seq; + short int rcu_ofl_gp_state; + long unsigned int rcu_onl_gp_seq; + short int rcu_onl_gp_state; + long unsigned int last_fqs_resched; + long unsigned int last_sched_clock; + struct rcu_snap_record snap_record; + long int lazy_len; + int cpu; +}; + +struct sr_wait_node { + atomic_t inuse; + struct llist_node node; +}; + +struct rcu_state { + struct rcu_node node[3]; + struct rcu_node *level[3]; + int ncpus; + int n_online_cpus; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int gp_seq; + long unsigned int gp_max; + struct task_struct *gp_kthread; + struct swait_queue_head gp_wq; + short int gp_flags; + short int gp_state; + long unsigned int gp_wake_time; + long unsigned int gp_wake_seq; + long unsigned int gp_seq_polled; + long unsigned int gp_seq_polled_snap; + long unsigned int gp_seq_polled_exp_snap; + struct mutex barrier_mutex; + atomic_t barrier_cpu_count; + struct completion barrier_completion; + long unsigned int barrier_sequence; + raw_spinlock_t barrier_lock; + struct mutex exp_mutex; + struct mutex exp_wake_mutex; + long unsigned int expedited_sequence; + atomic_t expedited_need_qs; + struct swait_queue_head expedited_wq; + int ncpus_snap; + u8 cbovld; + u8 cbovldnext; + long unsigned int jiffies_force_qs; + long unsigned int jiffies_kick_kthreads; + long unsigned int n_force_qs; + long unsigned int gp_start; + long unsigned int gp_end; + long unsigned int gp_activity; + long unsigned int gp_req_activity; + long unsigned int jiffies_stall; + int nr_fqs_jiffies_stall; + long unsigned int jiffies_resched; + long unsigned int n_force_qs_gpstart; + const char *name; + char abbr; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + arch_spinlock_t ofl_lock; + struct llist_head srs_next; + struct llist_node *srs_wait_tail; + struct llist_node *srs_done_tail; + struct sr_wait_node srs_wait_nodes[5]; + struct work_struct srs_cleanup_work; + atomic_t srs_cleanups_pending; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kvfree_rcu_bulk_data { + struct list_head list; + struct rcu_gp_oldstate gp_snap; + long unsigned int nr_records; + void *records[0]; +}; + +struct kfree_rcu_cpu; + +struct kfree_rcu_cpu_work { + struct rcu_work rcu_work; + struct callback_head *head_free; + struct rcu_gp_oldstate head_free_gp_snap; + struct list_head bulk_head_free[2]; + struct kfree_rcu_cpu *krcp; +}; + +struct kfree_rcu_cpu { + struct callback_head *head; + long unsigned int head_gp_snap; + atomic_t head_count; + struct list_head bulk_head[2]; + atomic_t bulk_count[2]; + struct kfree_rcu_cpu_work krw_arr[2]; + raw_spinlock_t lock; + struct delayed_work monitor_work; + bool initialized; + struct delayed_work page_cache_work; + atomic_t backoff_page_cache_fill; + atomic_t work_in_progress; + long: 32; + struct hrtimer hrtimer; + struct llist_head bkvcache; + int nr_bkv_objs; +}; + +struct rcu_stall_chk_rdr { + int nesting; + union rcu_special rs; + bool on_blkd_list; +}; + +struct trace_buffer_meta { + __u32 meta_page_size; + __u32 meta_struct_len; + __u32 subbuf_size; + __u32 nr_subbufs; + struct { + __u64 lost_events; + __u32 id; + __u32 read; + } reader; + __u64 flags; + __u64 entries; + __u64 overrun; + __u64 read; + __u64 Reserved1; + __u64 Reserved2; +}; + +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING = 29, + RINGBUF_TYPE_TIME_EXTEND = 30, + RINGBUF_TYPE_TIME_STAMP = 31, +}; + +typedef bool (*ring_buffer_cond_fn)(void *); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1, +}; + +struct ring_buffer_per_cpu; + +struct buffer_page; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + long unsigned int head; + long unsigned int next_event; + struct buffer_page *head_page; + struct buffer_page *cache_reader_page; + long unsigned int cache_read; + long unsigned int cache_pages_removed; + long: 32; + u64 read_stamp; + u64 page_stamp; + struct ring_buffer_event *event; + size_t event_size; + int missed_events; + long: 32; +}; + +struct rb_irq_work { + struct irq_work work; + wait_queue_head_t waiters; + wait_queue_head_t full_waiters; + atomic_t seq; + bool waiters_pending; + bool full_waiters_pending; + bool wakeup_full; +}; + +struct trace_buffer { + unsigned int flags; + int cpus; + atomic_t record_disabled; + atomic_t resizing; + cpumask_var_t cpumask; + struct lock_class_key *reader_lock_key; + struct mutex mutex; + struct ring_buffer_per_cpu **buffers; + struct hlist_node node; + u64 (*clock)(); + struct rb_irq_work irq_work; + bool time_stamp_abs; + long unsigned int range_addr_start; + long unsigned int range_addr_end; + long int last_text_delta; + long int last_data_delta; + unsigned int subbuf_size; + unsigned int subbuf_order; + unsigned int max_data_size; +}; + +struct ring_buffer_meta { + int magic; + int struct_size; + long unsigned int text_addr; + long unsigned int data_addr; + long unsigned int first_buffer; + long unsigned int head_buffer; + long unsigned int commit_buffer; + __u32 subbuf_size; + __u32 nr_subbufs; + int buffers[0]; +}; + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 8, +}; + +struct buffer_data_page { + u64 time_stamp; + local_t commit; + unsigned char data[0]; + long: 32; +}; + +struct buffer_data_read_page { + unsigned int order; + struct buffer_data_page *data; +}; + +struct buffer_page { + struct list_head list; + local_t write; + unsigned int read; + local_t entries; + long unsigned int real_end; + unsigned int order; + u32 id: 30; + u32 range: 1; + struct buffer_data_page *page; +}; + +struct rb_event_info { + u64 ts; + u64 delta; + u64 before; + u64 after; + long unsigned int length; + struct buffer_page *tail_page; + int add_timestamp; + long: 32; +}; + +enum { + RB_ADD_STAMP_NONE = 0, + RB_ADD_STAMP_EXTEND = 2, + RB_ADD_STAMP_ABSOLUTE = 4, + RB_ADD_STAMP_FORCE = 8, +}; + +enum { + RB_CTX_TRANSITION = 0, + RB_CTX_NMI = 1, + RB_CTX_IRQ = 2, + RB_CTX_SOFTIRQ = 3, + RB_CTX_NORMAL = 4, + RB_CTX_MAX = 5, +}; + +struct rb_time_struct { + local64_t time; +}; + +typedef struct rb_time_struct rb_time_t; + +struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + atomic_t resize_disabled; + struct trace_buffer *buffer; + raw_spinlock_t reader_lock; + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct buffer_data_page *free_page; + long unsigned int nr_pages; + unsigned int current_context; + struct list_head *pages; + long unsigned int cnt; + struct buffer_page *head_page; + struct buffer_page *tail_page; + struct buffer_page *commit_page; + struct buffer_page *reader_page; + long unsigned int lost_events; + long unsigned int last_overrun; + long unsigned int nest; + local_t entries_bytes; + local_t entries; + local_t overrun; + local_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; + local_t pages_touched; + local_t pages_lost; + local_t pages_read; + long int last_pages_touch; + size_t shortest_full; + long unsigned int read; + long unsigned int read_bytes; + rb_time_t write_stamp; + rb_time_t before_stamp; + u64 event_stamp[5]; + u64 read_stamp; + long unsigned int pages_removed; + unsigned int mapped; + unsigned int user_mapped; + struct mutex mapping_lock; + long unsigned int *subbuf_ids; + struct trace_buffer_meta *meta_page; + struct ring_buffer_meta *ring_meta; + long int nr_pages_to_update; + struct list_head new_pages; + struct work_struct update_pages_work; + struct completion update_done; + struct rb_irq_work irq_work; + long: 32; +}; + +struct rb_wait_data { + struct rb_irq_work *irq_work; + int seq; +}; + +enum { + BPF_RB_NO_WAKEUP = 1, + BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +enum { + BPF_RINGBUF_BUSY_BIT = 2147483648, + BPF_RINGBUF_DISCARD_BIT = 1073741824, + BPF_RINGBUF_HDR_SZ = 8, +}; + +struct bpf_dynptr_kern { + void *data; + u32 size; + u32 offset; + long: 32; +}; + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + long: 32; + u64 mask; + struct page **pages; + int nr_pages; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t spinlock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t busy; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int consumer_pos; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int producer_pos; + long unsigned int pending_pos; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + char data[0]; +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_ringbuf *rb; + long: 32; +}; + +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_ringbuf_reserve_dynptr)(struct bpf_map *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_ringbuf_submit_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_user_ringbuf_drain)(struct bpf_map *, void *, void *, u64); + +typedef struct kmem_cache *kmem_buckets[14]; + +struct reciprocal_value { + u32 m; + u8 sh1; + u8 sh2; +}; + +struct kmem_cache_order_objects { + unsigned int x; +}; + +struct kmem_cache_cpu; + +struct kmem_cache_node; + +struct kmem_cache { + struct kmem_cache_cpu *cpu_slab; + slab_flags_t flags; + long unsigned int min_partial; + unsigned int size; + unsigned int object_size; + struct reciprocal_value reciprocal_size; + unsigned int offset; + unsigned int cpu_partial; + unsigned int cpu_partial_slabs; + struct kmem_cache_order_objects oo; + struct kmem_cache_order_objects min; + gfp_t allocflags; + int refcount; + void (*ctor)(void *); + unsigned int inuse; + unsigned int align; + unsigned int red_left_pad; + const char *name; + struct list_head list; + struct kobject kobj; + struct kmem_cache_node *node[1]; +}; + +enum { + PROC_ENTRY_PERMANENT = 1, +}; + +struct slab { + long unsigned int __page_flags; + struct kmem_cache *slab_cache; + union { + struct { + union { + struct list_head slab_list; + struct { + struct slab *next; + int slabs; + }; + }; + union { + struct { + void *freelist; + union { + long unsigned int counters; + struct { + unsigned int inuse: 16; + unsigned int objects: 15; + unsigned int frozen: 1; + }; + }; + }; + }; + }; + struct callback_head callback_head; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int obj_exts; +}; + +enum slab_state { + DOWN = 0, + PARTIAL = 1, + UP = 2, + FULL = 3, +}; + +struct kmalloc_info_struct { + const char *name[3]; + unsigned int size; +}; + +struct slabinfo { + long unsigned int active_objs; + long unsigned int num_objs; + long unsigned int active_slabs; + long unsigned int num_slabs; + long unsigned int shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +struct kmem_obj_info { + void *kp_ptr; + struct slab *kp_slab; + void *kp_objp; + long unsigned int kp_data_offset; + struct kmem_cache *kp_slab_cache; + void *kp_ret; + void *kp_stack[16]; + void *kp_free_stack[16]; +}; + +struct trace_event_raw_kmem_cache_alloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + bool accounted; + char __data[0]; +}; + +struct trace_event_raw_kmalloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + char __data[0]; +}; + +struct trace_event_raw_kfree { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + char __data[0]; +}; + +struct trace_event_raw_kmem_cache_free { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free_batched { + struct trace_entry ent; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + long unsigned int gfp_flags; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + int percpu_refill; + char __data[0]; +}; + +struct trace_event_raw_mm_page_pcpu_drain { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc_extfrag { + struct trace_entry ent; + long unsigned int pfn; + int alloc_order; + int fallback_order; + int alloc_migratetype; + int fallback_migratetype; + int change_ownership; + char __data[0]; +}; + +struct trace_event_raw_mm_alloc_contig_migrate_range_info { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + long unsigned int nr_migrated; + long unsigned int nr_reclaimed; + long unsigned int nr_mapped; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_rss_stat { + struct trace_entry ent; + unsigned int mm_id; + unsigned int curr; + int member; + long int size; + char __data[0]; +}; + +struct trace_event_data_offsets_kmem_cache_alloc {}; + +struct trace_event_data_offsets_kmalloc {}; + +struct trace_event_data_offsets_kfree {}; + +struct trace_event_data_offsets_kmem_cache_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_mm_page_free {}; + +struct trace_event_data_offsets_mm_page_free_batched {}; + +struct trace_event_data_offsets_mm_page_alloc {}; + +struct trace_event_data_offsets_mm_page {}; + +struct trace_event_data_offsets_mm_page_pcpu_drain {}; + +struct trace_event_data_offsets_mm_page_alloc_extfrag {}; + +struct trace_event_data_offsets_mm_alloc_contig_migrate_range_info {}; + +struct trace_event_data_offsets_rss_stat {}; + +typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, struct kmem_cache *, gfp_t, int); + +typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *); + +typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *, const struct kmem_cache *); + +typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); + +typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); + +typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); + +typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int, int); + +typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); + +typedef void (*btf_trace_mm_alloc_contig_migrate_range_info)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int); + +struct page_frag_cache { + long unsigned int encoded_page; + __u16 offset; + __u16 pagecnt_bias; +}; + +struct trace_event_raw_cma_release { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_start { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int count; + unsigned int align; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_finish { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + unsigned int align; + int errorno; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_busy_retry { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + unsigned int align; + char __data[0]; +}; + +struct trace_event_data_offsets_cma_release { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_start { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_finish { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_busy_retry { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_cma_release)(void *, const char *, long unsigned int, const struct page *, long unsigned int); + +typedef void (*btf_trace_cma_alloc_start)(void *, const char *, long unsigned int, unsigned int); + +typedef void (*btf_trace_cma_alloc_finish)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int, int); + +typedef void (*btf_trace_cma_alloc_busy_retry)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int); + +struct cma { + long unsigned int base_pfn; + long unsigned int count; + long unsigned int *bitmap; + unsigned int order_per_bit; + spinlock_t lock; + char name[64]; + bool reserve_pages_on_error; +}; + +struct constant_table { + const char *name; + int value; +}; + +enum legacy_fs_param { + LEGACY_FS_UNSET_PARAMS = 0, + LEGACY_FS_MONOLITHIC_PARAMS = 1, + LEGACY_FS_INDIVIDUAL_PARAMS = 2, +}; + +struct legacy_fs_context { + char *legacy_data; + size_t data_size; + enum legacy_fs_param param_type; +}; + +struct iomap_folio_ops; + +struct iomap { + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + struct block_device *bdev; + struct dax_device *dax_dev; + void *inline_data; + void *private; + const struct iomap_folio_ops *folio_ops; + u64 validity_cookie; +}; + +struct iomap_iter; + +struct iomap_folio_ops { + struct folio * (*get_folio)(struct iomap_iter *, loff_t, unsigned int); + void (*put_folio)(struct inode *, loff_t, unsigned int, struct folio *); + bool (*iomap_valid)(struct inode *, const struct iomap *); +}; + +struct iomap_iter { + struct inode *inode; + long: 32; + loff_t pos; + u64 len; + s64 processed; + unsigned int flags; + long: 32; + struct iomap iomap; + struct iomap srcmap; + void *private; + long: 32; +}; + +struct trace_event_raw_iomap_readpage_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + int nr_pages; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_range_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + loff_t size; + loff_t offset; + u64 length; + char __data[0]; +}; + +struct trace_event_raw_iomap_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_writepage_map { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + u64 pos; + u64 dirty_len; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_iter { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + loff_t pos; + u64 length; + s64 processed; + unsigned int flags; + const void *ops; + long unsigned int caller; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_dio_rw_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + size_t count; + size_t done_before; + int ki_flags; + unsigned int dio_flags; + bool aio; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_dio_complete { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + int ki_flags; + bool aio; + int error; + ssize_t ret; + char __data[0]; +}; + +struct trace_event_data_offsets_iomap_readpage_class {}; + +struct trace_event_data_offsets_iomap_range_class {}; + +struct trace_event_data_offsets_iomap_class {}; + +struct trace_event_data_offsets_iomap_writepage_map {}; + +struct trace_event_data_offsets_iomap_iter {}; + +struct trace_event_data_offsets_iomap_dio_rw_begin {}; + +struct trace_event_data_offsets_iomap_dio_complete {}; + +typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_release_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_invalidate_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_rw_queued)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_iter_dstmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_iter_srcmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_writepage_map)(void *, struct inode *, u64, unsigned int, struct iomap *); + +typedef void (*btf_trace_iomap_iter)(void *, struct iomap_iter *, const void *, long unsigned int); + +typedef void (*btf_trace_iomap_dio_rw_begin)(void *, struct kiocb *, struct iov_iter *, unsigned int, size_t); + +typedef void (*btf_trace_iomap_dio_complete)(void *, struct kiocb *, int, ssize_t); + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +struct kstatfs { + long int f_type; + long int f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long int f_namelen; + long int f_frsize; + long int f_flags; + long int f_spare[4]; + long: 32; +}; + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + long unsigned int magic; + bool new_sb_created; +}; + +enum fid_type { + FILEID_ROOT = 0, + FILEID_INO32_GEN = 1, + FILEID_INO32_GEN_PARENT = 2, + FILEID_BTRFS_WITHOUT_PARENT = 77, + FILEID_BTRFS_WITH_PARENT = 78, + FILEID_BTRFS_WITH_PARENT_ROOT = 79, + FILEID_UDF_WITHOUT_PARENT = 81, + FILEID_UDF_WITH_PARENT = 82, + FILEID_NILFS_WITHOUT_PARENT = 97, + FILEID_NILFS_WITH_PARENT = 98, + FILEID_FAT_WITHOUT_PARENT = 113, + FILEID_FAT_WITH_PARENT = 114, + FILEID_INO64_GEN = 129, + FILEID_INO64_GEN_PARENT = 130, + FILEID_LUSTRE = 151, + FILEID_BCACHEFS_WITHOUT_PARENT = 177, + FILEID_BCACHEFS_WITH_PARENT = 178, + FILEID_KERNFS = 254, + FILEID_INVALID = 255, +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u64 ino; + u32 gen; + } i64; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + struct { + struct {} __empty_raw; + __u32 raw[0]; + }; + }; +}; + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + bool is_nokey_name; +}; + +struct ext4_map_blocks { + ext4_fsblk_t m_pblk; + ext4_lblk_t m_lblk; + unsigned int m_len; + unsigned int m_flags; + long: 32; +}; + +enum { + EXT4_FC_REASON_XATTR = 0, + EXT4_FC_REASON_CROSS_RENAME = 1, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, + EXT4_FC_REASON_NOMEM = 3, + EXT4_FC_REASON_SWAP_BOOT = 4, + EXT4_FC_REASON_RESIZE = 5, + EXT4_FC_REASON_RENAME_DIR = 6, + EXT4_FC_REASON_FALLOC_RANGE = 7, + EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, + EXT4_FC_REASON_ENCRYPTED_FILENAME = 9, + EXT4_FC_REASON_MAX = 10, +}; + +enum { + EXT4_MF_MNTDIR_SAMPLED = 0, + EXT4_MF_FC_INELIGIBLE = 1, +}; + +enum { + EXT4_STATE_NEW = 0, + EXT4_STATE_XATTR = 1, + EXT4_STATE_NO_EXPAND = 2, + EXT4_STATE_DA_ALLOC_CLOSE = 3, + EXT4_STATE_EXT_MIGRATE = 4, + EXT4_STATE_NEWENTRY = 5, + EXT4_STATE_MAY_INLINE_DATA = 6, + EXT4_STATE_EXT_PRECACHED = 7, + EXT4_STATE_LUSTRE_EA_INODE = 8, + EXT4_STATE_VERITY_IN_PROGRESS = 9, + EXT4_STATE_FC_COMMITTING = 10, + EXT4_STATE_ORPHAN_FILE = 11, +}; + +struct ext4_dir_entry { + __le32 inode; + __le16 rec_len; + __le16 name_len; + char name[255]; +}; + +struct ext4_dir_entry_hash { + __le32 hash; + __le32 minor_hash; +}; + +struct ext4_dir_entry_2 { + __le32 inode; + __le16 rec_len; + __u8 name_len; + __u8 file_type; + char name[255]; +}; + +struct ext4_dir_entry_tail { + __le32 det_reserved_zero1; + __le16 det_rec_len; + __u8 det_reserved_zero2; + __u8 det_reserved_ft; + __le32 det_checksum; +}; + +struct dx_hash_info { + u32 hash; + u32 minor_hash; + int hash_version; + u32 *seed; +}; + +struct ext4_filename { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + struct dx_hash_info hinfo; +}; + +typedef enum { + EXT4_IGET_NORMAL = 0, + EXT4_IGET_SPECIAL = 1, + EXT4_IGET_HANDLE = 2, + EXT4_IGET_BAD = 4, + EXT4_IGET_EA_INODE = 8, +} ext4_iget_flags; + +typedef enum { + EITHER = 0, + INDEX = 1, + DIRENT = 2, + DIRENT_HTREE = 3, +} dirblock_type_t; + +struct fake_dirent { + __le32 inode; + __le16 rec_len; + u8 name_len; + u8 file_type; +}; + +struct dx_countlimit { + __le16 limit; + __le16 count; +}; + +struct dx_entry { + __le32 hash; + __le32 block; +}; + +struct dx_root_info { + __le32 reserved_zero; + u8 hash_version; + u8 info_length; + u8 indirect_levels; + u8 unused_flags; +}; + +struct dx_root { + struct fake_dirent dot; + char dot_name[4]; + struct fake_dirent dotdot; + char dotdot_name[4]; + struct dx_root_info info; + struct dx_entry entries[0]; +}; + +struct dx_node { + struct fake_dirent fake; + struct dx_entry entries[0]; +}; + +struct dx_frame { + struct buffer_head *bh; + struct dx_entry *entries; + struct dx_entry *at; +}; + +struct dx_map_entry { + u32 hash; + u16 offs; + u16 size; +}; + +struct dx_tail { + u32 dt_reserved; + __le32 dt_checksum; +}; + +struct ext4_renament { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool is_dir; + int dir_nlink_delta; + struct buffer_head *bh; + struct ext4_dir_entry_2 *de; + int inlined; + struct buffer_head *dir_bh; + struct ext4_dir_entry_2 *parent_de; + int dir_inlined; +}; + +struct fdtable { + unsigned int max_fds; + struct file **fd; + long unsigned int *close_on_exec; + long unsigned int *open_fds; + long unsigned int *full_fds_bits; + struct callback_head rcu; +}; + +struct files_struct { + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable *fdt; + struct fdtable fdtab; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t file_lock; + unsigned int next_fd; + long unsigned int close_on_exec_init[1]; + long unsigned int open_fds_init[1]; + long unsigned int full_fds_bits_init[1]; + struct file *fd_array[32]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +struct args_protover { + __u32 version; +}; + +struct args_protosubver { + __u32 sub_version; +}; + +struct args_openmount { + __u32 devid; +}; + +struct args_ready { + __u32 token; +}; + +struct args_fail { + __u32 token; + __s32 status; +}; + +struct args_setpipefd { + __s32 pipefd; +}; + +struct args_timeout { + __u64 timeout; +}; + +struct args_requester { + __u32 uid; + __u32 gid; +}; + +struct args_expire { + __u32 how; +}; + +struct args_askumount { + __u32 may_umount; +}; + +struct args_in { + __u32 type; +}; + +struct args_out { + __u32 devid; + __u32 magic; +}; + +struct args_ismountpoint { + union { + struct args_in in; + struct args_out out; + }; +}; + +struct autofs_dev_ioctl { + __u32 ver_major; + __u32 ver_minor; + __u32 size; + __s32 ioctlfd; + union { + struct args_protover protover; + struct args_protosubver protosubver; + struct args_openmount openmount; + struct args_ready ready; + struct args_fail fail; + struct args_setpipefd setpipefd; + struct args_timeout timeout; + struct args_requester requester; + struct args_expire expire; + struct args_askumount askumount; + struct args_ismountpoint ismountpoint; + }; + char path[0]; +}; + +enum { + AUTOFS_DEV_IOCTL_VERSION_CMD = 113, + AUTOFS_DEV_IOCTL_PROTOVER_CMD = 114, + AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD = 115, + AUTOFS_DEV_IOCTL_OPENMOUNT_CMD = 116, + AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD = 117, + AUTOFS_DEV_IOCTL_READY_CMD = 118, + AUTOFS_DEV_IOCTL_FAIL_CMD = 119, + AUTOFS_DEV_IOCTL_SETPIPEFD_CMD = 120, + AUTOFS_DEV_IOCTL_CATATONIC_CMD = 121, + AUTOFS_DEV_IOCTL_TIMEOUT_CMD = 122, + AUTOFS_DEV_IOCTL_REQUESTER_CMD = 123, + AUTOFS_DEV_IOCTL_EXPIRE_CMD = 124, + AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD = 125, + AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD = 126, +}; + +typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *, struct autofs_dev_ioctl *); + +struct btrfs_ioctl_encoded_io_args { + const struct iovec *iov; + long unsigned int iovcnt; + __s64 offset; + __u64 flags; + __u64 len; + __u64 unencoded_len; + __u64 unencoded_offset; + __u32 compression; + __u32 encryption; + __u8 reserved[64]; +}; + +enum { + READA_NONE = 0, + READA_BACK = 1, + READA_FORWARD = 2, + READA_FORWARD_ALWAYS = 3, +}; + +struct btrfs_replace_extent_info { + u64 disk_offset; + u64 disk_len; + u64 data_offset; + u64 data_len; + u64 file_offset; + char *extent_buf; + bool is_new_extent; + bool update_times; + int qgroup_reserved; + int insertions; +}; + +struct btrfs_drop_extents_args { + struct btrfs_path *path; + long: 32; + u64 start; + u64 end; + bool drop_cache; + bool replace_extent; + u32 extent_item_size; + u64 drop_end; + u64 bytes_found; + bool extent_inserted; + long: 32; +}; + +struct btrfs_file_private { + void *filldir_buf; + long: 32; + u64 last_index; + struct extent_state *llseek_cached_state; + struct task_struct *owner_task; +}; + +struct btrfs_item_batch { + const struct btrfs_key *keys; + const u32 *data_sizes; + u32 total_data_size; + int nr; +}; + +struct btrfs_file_extent { + u64 disk_bytenr; + u64 disk_num_bytes; + u64 num_bytes; + u64 ram_bytes; + u64 offset; + u8 compression; + long: 32; +}; + +enum btrfs_ilock_type { + __BTRFS_ILOCK_SHARED_BIT = 0, + BTRFS_ILOCK_SHARED = 1, + __BTRFS_ILOCK_SHARED_SEQ = 0, + __BTRFS_ILOCK_TRY_BIT = 1, + BTRFS_ILOCK_TRY = 2, + __BTRFS_ILOCK_TRY_SEQ = 1, + __BTRFS_ILOCK_MMAP_BIT = 2, + BTRFS_ILOCK_MMAP = 4, + __BTRFS_ILOCK_MMAP_SEQ = 2, +}; + +enum btrfs_ref_type { + BTRFS_REF_NOT_SET = 0, + BTRFS_REF_DATA = 1, + BTRFS_REF_METADATA = 2, + BTRFS_REF_LAST = 3, +} __attribute__((mode(byte))); + +struct btrfs_ref { + enum btrfs_ref_type type; + enum btrfs_delayed_ref_action action; + bool skip_qgroup; + long: 32; + u64 bytenr; + u64 num_bytes; + u64 owning_root; + u64 ref_root; + u64 parent; + union { + struct btrfs_data_ref data_ref; + struct btrfs_tree_ref tree_ref; + }; +}; + +struct btrfs_log_ctx { + int log_ret; + int log_transid; + bool log_new_dentries; + bool logging_new_name; + bool logging_new_delayed_dentries; + bool logged_before; + struct btrfs_inode *inode; + struct list_head list; + struct list_head ordered_extents; + struct list_head conflict_inodes; + int num_conflict_inodes; + bool logging_conflict_inodes; + struct extent_buffer *scratch_eb; +}; + +enum btrfs_qgroup_rsv_type { + BTRFS_QGROUP_RSV_DATA = 0, + BTRFS_QGROUP_RSV_META_PERTRANS = 1, + BTRFS_QGROUP_RSV_META_PREALLOC = 2, + BTRFS_QGROUP_RSV_LAST = 3, +}; + +struct falloc_range { + struct list_head list; + u64 start; + u64 len; +}; + +enum { + RANGE_BOUNDARY_WRITTEN_EXTENT = 0, + RANGE_BOUNDARY_PREALLOC_EXTENT = 1, + RANGE_BOUNDARY_HOLE = 2, +}; + +struct btrfs_balance_args { + __u64 profiles; + union { + __u64 usage; + struct { + __u32 usage_min; + __u32 usage_max; + }; + }; + __u64 devid; + __u64 pstart; + __u64 pend; + __u64 vstart; + __u64 vend; + __u64 target; + __u64 flags; + union { + __u64 limit; + struct { + __u32 limit_min; + __u32 limit_max; + }; + }; + __u32 stripes_min; + __u32 stripes_max; + __u64 unused[6]; +}; + +struct btrfs_balance_progress { + __u64 expected; + __u64 considered; + __u64 completed; +}; + +struct btrfs_dir_item { + struct btrfs_disk_key location; + __le64 transid; + __le16 data_len; + __le16 name_len; + __u8 type; +} __attribute__((packed)); + +struct btrfs_space_info { + struct btrfs_fs_info *fs_info; + spinlock_t lock; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 bytes_zone_unusable; + u64 max_extent_size; + u64 chunk_size; + int bg_reclaim_threshold; + int clamp; + unsigned int full: 1; + unsigned int chunk_alloc: 1; + unsigned int flush: 1; + unsigned int force_alloc; + u64 disk_used; + u64 disk_total; + u64 flags; + struct list_head list; + struct list_head ro_bgs; + struct list_head priority_tickets; + struct list_head tickets; + u64 reclaim_size; + u64 tickets_id; + struct rw_semaphore groups_sem; + struct list_head block_groups[9]; + struct kobject kobj; + struct kobject *block_group_kobjs[9]; + long: 32; + u64 reclaim_count; + u64 reclaim_bytes; + u64 reclaim_errors; + bool dynamic_reclaim; + bool periodic_reclaim; + bool periodic_reclaim_ready; + long: 32; + s64 reclaimable_bytes; +}; + +struct rcu_string; + +struct btrfs_zoned_device_info; + +struct scrub_ctx; + +struct btrfs_device { + struct list_head dev_list; + struct list_head dev_alloc_list; + struct list_head post_commit_list; + struct btrfs_fs_devices *fs_devices; + struct btrfs_fs_info *fs_info; + struct rcu_string *name; + long: 32; + u64 generation; + struct file *bdev_file; + struct block_device *bdev; + struct btrfs_zoned_device_info *zone_info; + dev_t devt; + long unsigned int dev_state; + blk_status_t last_flush_error; + seqcount_t data_seqcount; + long: 32; + u64 devid; + u64 total_bytes; + u64 disk_total_bytes; + u64 bytes_used; + u32 io_align; + u32 io_width; + u64 type; + atomic_t sb_write_errors; + u32 sector_size; + u8 uuid[16]; + u64 commit_total_bytes; + u64 commit_bytes_used; + struct bio flush_bio; + struct completion flush_wait; + struct scrub_ctx *scrub_ctx; + int dev_stats_valid; + atomic_t dev_stats_ccnt; + atomic_t dev_stat_values[5]; + struct extent_io_tree alloc_state; + struct completion kobj_unregister; + struct kobject devid_kobj; + long: 32; + u64 scrub_speed_max; +}; + +enum btrfs_chunk_allocation_policy { + BTRFS_CHUNK_ALLOC_REGULAR = 0, + BTRFS_CHUNK_ALLOC_ZONED = 1, +}; + +enum btrfs_read_policy { + BTRFS_READ_POLICY_PID = 0, + BTRFS_NR_READ_POLICY = 1, +}; + +struct btrfs_fs_devices { + u8 fsid[16]; + u8 metadata_uuid[16]; + struct list_head fs_list; + u64 num_devices; + u64 open_devices; + u64 rw_devices; + u64 missing_devices; + u64 total_rw_bytes; + u64 total_devices; + u64 latest_generation; + struct btrfs_device *latest_dev; + struct mutex device_list_mutex; + struct list_head devices; + struct list_head alloc_list; + struct list_head seed_list; + int opened; + bool rotating; + bool discardable; + bool seeding; + bool temp_fsid; + struct btrfs_fs_info *fs_info; + struct kobject fsid_kobj; + struct kobject *devices_kobj; + struct kobject *devinfo_kobj; + struct completion kobj_unregister; + enum btrfs_chunk_allocation_policy chunk_alloc_policy; + enum btrfs_read_policy read_policy; + long: 32; +}; + +struct btrfs_balance_control { + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + u64 flags; + struct btrfs_balance_progress stat; +}; + +struct rcu_string { + struct callback_head rcu; + char str[0]; +}; + +enum btrfs_raid_types { + BTRFS_RAID_SINGLE = 0, + BTRFS_RAID_RAID0 = 1, + BTRFS_RAID_RAID1 = 2, + BTRFS_RAID_DUP = 3, + BTRFS_RAID_RAID10 = 4, + BTRFS_RAID_RAID5 = 5, + BTRFS_RAID_RAID6 = 6, + BTRFS_RAID_RAID1C3 = 7, + BTRFS_RAID_RAID1C4 = 8, + BTRFS_NR_RAID_TYPES = 9, +}; + +enum btrfs_reserve_flush_enum { + BTRFS_RESERVE_NO_FLUSH = 0, + BTRFS_RESERVE_FLUSH_LIMIT = 1, + BTRFS_RESERVE_FLUSH_EVICT = 2, + BTRFS_RESERVE_FLUSH_DATA = 3, + BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE = 4, + BTRFS_RESERVE_FLUSH_ALL = 5, + BTRFS_RESERVE_FLUSH_ALL_STEAL = 6, + BTRFS_RESERVE_FLUSH_EMERGENCY = 7, +}; + +struct prop_handler { + struct hlist_node node; + const char *xattr_name; + int (*validate)(const struct btrfs_inode *, const char *, size_t); + int (*apply)(struct inode *, const char *, size_t); + const char * (*extract)(const struct inode *); + bool (*ignore)(const struct btrfs_inode *); + int inheritable; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +struct crypto_lskcipher { + struct crypto_tfm base; +}; + +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); + int (*encrypt)(struct skcipher_request *); + int (*decrypt)(struct skcipher_request *); + int (*export)(struct skcipher_request *, void *); + int (*import)(struct skcipher_request *, const void *); + int (*init)(struct crypto_skcipher *); + void (*exit)(struct crypto_skcipher *); + unsigned int walksize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; + }; + struct skcipher_alg_common co; + }; +}; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[256]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } src; + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } dst; + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + unsigned int ivsize; + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; +}; + +enum { + SKCIPHER_WALK_PHYS = 1, + SKCIPHER_WALK_SLOW = 2, + SKCIPHER_WALK_COPY = 4, + SKCIPHER_WALK_DIFF = 8, + SKCIPHER_WALK_SLEEP = 16, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[0]; +}; + +typedef s32 dma_cookie_t; + +enum dma_status { + DMA_COMPLETE = 0, + DMA_IN_PROGRESS = 1, + DMA_PAUSED = 2, + DMA_ERROR = 3, + DMA_OUT_OF_ORDER = 4, +}; + +enum dma_transfer_direction { + DMA_MEM_TO_MEM = 0, + DMA_MEM_TO_DEV = 1, + DMA_DEV_TO_MEM = 2, + DMA_DEV_TO_DEV = 3, + DMA_TRANS_NONE = 4, +}; + +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +struct dma_vec { + dma_addr_t addr; + size_t len; +}; + +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = 1, + DMA_CTRL_ACK = 2, + DMA_PREP_PQ_DISABLE_P = 4, + DMA_PREP_PQ_DISABLE_Q = 8, + DMA_PREP_CONTINUE = 16, + DMA_PREP_FENCE = 32, + DMA_CTRL_REUSE = 64, + DMA_PREP_CMD = 128, + DMA_PREP_REPEAT = 256, + DMA_PREP_LOAD_EOT = 512, +}; + +typedef struct { + long unsigned int bits[1]; +} dma_cap_mask_t; + +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = 1, + DESC_METADATA_ENGINE = 2, +}; + +struct dma_chan_percpu { + long unsigned int memcpy_count; + long unsigned int bytes_transferred; +}; + +struct dma_router { + struct device *dev; + void (*route_free)(struct device *, void *); +}; + +struct dma_device; + +struct dma_chan_dev; + +struct dma_chan { + struct dma_device *device; + struct device *slave; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + int chan_id; + struct dma_chan_dev *dev; + const char *name; + char *dbg_client_name; + struct list_head device_node; + struct dma_chan_percpu *local; + int client_count; + int table_count; + struct dma_router *router; + void *route_data; + void *private; +}; + +typedef bool (*dma_filter_fn)(struct dma_chan *, void *); + +struct dma_slave_map; + +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, +}; + +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +struct dma_async_tx_descriptor; + +struct dma_slave_caps; + +struct dma_slave_config; + +struct dma_tx_state; + +struct dma_device { + struct kref ref; + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; + short unsigned int max_xor; + short unsigned int max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + int dev_id; + struct device *dev; + struct module *owner; + struct ida chan_ida; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan *); + int (*device_router_config)(struct dma_chan *); + void (*device_free_chan_resources)(struct dma_chan *); + struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t, dma_addr_t, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t, int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_peripheral_dma_vec)(struct dma_chan *, const struct dma_vec *, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); + struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t, u64, long unsigned int); + void (*device_caps)(struct dma_chan *, struct dma_slave_caps *); + int (*device_config)(struct dma_chan *, struct dma_slave_config *); + int (*device_pause)(struct dma_chan *); + int (*device_resume)(struct dma_chan *); + int (*device_terminate_all)(struct dma_chan *); + void (*device_synchronize)(struct dma_chan *); + enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t, struct dma_tx_state *); + void (*device_issue_pending)(struct dma_chan *); + void (*device_release)(struct dma_device *); + void (*dbg_summary_show)(struct seq_file *, struct dma_device *); + struct dentry *dbg_dev_root; +}; + +struct dma_chan_dev { + struct dma_chan *chan; + long: 32; + struct device device; + int dev_id; + bool chan_dma_dev; +}; + +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, + DMA_SLAVE_BUSWIDTH_128_BYTES = 128, +}; + +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + void *peripheral_config; + size_t peripheral_size; +}; + +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +typedef void (*dma_async_tx_callback)(void *); + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, + DMA_TRANS_READ_FAILED = 1, + DMA_TRANS_WRITE_FAILED = 2, + DMA_TRANS_ABORTED = 3, +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); + +struct dmaengine_unmap_data { + u8 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); + void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); + int (*set_len)(struct dma_async_tx_descriptor *, size_t); +}; + +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; + dma_addr_t phys; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); + int (*desc_free)(struct dma_async_tx_descriptor *); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; +}; + +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; + u32 in_flight_bytes; +}; + +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = 1, + ASYNC_TX_XOR_DROP_DST = 2, + ASYNC_TX_ACK = 4, + ASYNC_TX_FENCE = 8, + ASYNC_TX_PQ_XOR_DST = 16, +}; + +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + +typedef struct { + __u8 b[16]; +} guid_t; + +struct parsed_partitions { + struct gendisk *disk; + char name[32]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + long: 32; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +typedef struct { + struct folio *v; +} Sector; + +typedef guid_t efi_guid_t; + +struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; +}; + +typedef struct _gpt_header gpt_header; + +struct _gpt_entry_attributes { + u64 required_to_function: 1; + u64 reserved: 47; + u64 type_guid_specific: 16; +}; + +typedef struct _gpt_entry_attributes gpt_entry_attributes; + +struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + __le16 partition_name[36]; +}; + +typedef struct _gpt_entry gpt_entry; + +struct _gpt_mbr_record { + u8 boot_indicator; + u8 start_head; + u8 start_sector; + u8 start_track; + u8 os_type; + u8 end_head; + u8 end_sector; + u8 end_track; + __le32 starting_lba; + __le32 size_in_lba; +}; + +typedef struct _gpt_mbr_record gpt_mbr_record; + +struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __attribute__((packed)); + +typedef struct _legacy_mbr legacy_mbr; + +struct io_uring_buf { + __u64 addr; + __u32 len; + __u16 bid; + __u16 resv; +}; + +struct io_uring_buf_ring { + union { + struct { + __u64 resv1; + __u32 resv2; + __u16 resv3; + __u16 tail; + }; + struct { + struct {} __empty_bufs; + struct io_uring_buf bufs[0]; + }; + }; +}; + +struct io_sq_data { + refcount_t refs; + atomic_t park_pending; + struct mutex lock; + struct list_head ctx_list; + struct task_struct *thread; + struct wait_queue_head wait; + unsigned int sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + u64 work_time; + long unsigned int state; + struct completion exited; + long: 32; +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __u32 len; + __u16 bid; + __u16 bgid; +}; + +struct io_buffer_list { + union { + struct list_head buf_list; + struct { + struct page **buf_pages; + struct io_uring_buf_ring *buf_ring; + }; + struct callback_head rcu; + }; + __u16 bgid; + __u16 buf_nr_pages; + __u16 nr_entries; + __u16 head; + __u16 mask; + __u16 flags; + atomic_t refs; +}; + +struct io_poll { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + int retries; + struct wait_queue_entry wait; +}; + +struct async_poll { + struct io_poll poll; + struct io_poll *double_poll; +}; + +struct io_issue_def { + unsigned int needs_file: 1; + unsigned int plug: 1; + unsigned int hash_reg_file: 1; + unsigned int unbound_nonreg_file: 1; + unsigned int pollin: 1; + unsigned int pollout: 1; + unsigned int poll_exclusive: 1; + unsigned int buffer_select: 1; + unsigned int audit_skip: 1; + unsigned int ioprio: 1; + unsigned int iopoll: 1; + unsigned int iopoll_queue: 1; + unsigned int vectored: 1; + short unsigned int async_size; + int (*issue)(struct io_kiocb *, unsigned int); + int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); +}; + +struct io_cold_def { + const char *name; + void (*cleanup)(struct io_kiocb *); + void (*fail)(struct io_kiocb *); +}; + +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +struct statx { + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + __u64 stx_mnt_id; + __u32 stx_dio_mem_align; + __u32 stx_dio_offset_align; + __u64 stx_subvol; + __u32 stx_atomic_write_unit_min; + __u32 stx_atomic_write_unit_max; + __u32 stx_atomic_write_segments_max; + __u32 __spare1[1]; + __u64 __spare3[9]; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + struct filename *filename; + struct statx *buffer; +}; + +typedef long long unsigned int UDItype; + +struct gen_pool; + +typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); + +struct gen_pool { + spinlock_t lock; + struct list_head chunks; + int min_alloc_order; + genpool_algo_t algo; + void *data; + const char *name; +}; + +struct gen_pool_chunk { + struct list_head next_chunk; + atomic_long_t avail; + phys_addr_t phys_addr; + void *owner; + long unsigned int start_addr; + long unsigned int end_addr; + long unsigned int bits[0]; +}; + +struct genpool_data_align { + int align; +}; + +struct genpool_data_fixed { + long unsigned int offset; +}; + +struct platform_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct pdev_archdata {}; + +struct mfd_cell; + +struct platform_device { + const char *name; + int id; + bool id_auto; + long: 32; + struct device dev; + u64 platform_dma_mask; + struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + const struct platform_device_id *id_entry; + const char *driver_override; + struct mfd_cell *mfd_cell; + struct pdev_archdata archdata; +}; + +typedef struct { + raw_spinlock_t *lock; + long unsigned int flags; +} class_raw_spinlock_irqsave_t; + +struct sbq_wait { + struct sbitmap_queue *sbq; + struct wait_queue_entry wait; +}; + +struct pcim_iomap_devres { + void *table[6]; +}; + +struct pcim_intx_devres { + int orig_intx; +}; + +enum pcim_addr_devres_type { + PCIM_ADDR_DEVRES_TYPE_INVALID = 0, + PCIM_ADDR_DEVRES_TYPE_REGION = 1, + PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING = 2, + PCIM_ADDR_DEVRES_TYPE_MAPPING = 3, +}; + +struct pcim_addr_devres { + enum pcim_addr_devres_type type; + void *baseaddr; + long unsigned int offset; + long unsigned int len; + int bar; +}; + +struct fb_fix_screeninfo { + char id[16]; + long unsigned int smem_start; + __u32 smem_len; + __u32 type; + __u32 type_aux; + __u32 visual; + __u16 xpanstep; + __u16 ypanstep; + __u16 ywrapstep; + __u32 line_length; + long unsigned int mmio_start; + __u32 mmio_len; + __u32 accel; + __u16 capabilities; + __u16 reserved[2]; +}; + +struct fb_bitfield { + __u32 offset; + __u32 length; + __u32 msb_right; +}; + +struct fb_var_screeninfo { + __u32 xres; + __u32 yres; + __u32 xres_virtual; + __u32 yres_virtual; + __u32 xoffset; + __u32 yoffset; + __u32 bits_per_pixel; + __u32 grayscale; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + __u32 nonstd; + __u32 activate; + __u32 height; + __u32 width; + __u32 accel_flags; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 rotate; + __u32 colorspace; + __u32 reserved[4]; +}; + +struct fb_cmap { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 fg_color; + __u32 bg_color; + __u8 depth; + const char *data; + struct fb_cmap cmap; +}; + +struct fbcurpos { + __u16 x; + __u16 y; +}; + +struct fb_cursor { + __u16 set; + __u16 enable; + __u16 rop; + const char *mask; + struct fbcurpos hot; + struct fb_image image; +}; + +struct fb_chroma { + __u32 redx; + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; + __u8 manufacturer[4]; + __u8 monitor[14]; + __u8 serial_no[14]; + __u8 ascii[14]; + __u32 modedb_len; + __u32 model; + __u32 serial; + __u32 year; + __u32 week; + __u32 hfmin; + __u32 hfmax; + __u32 dclkmin; + __u32 dclkmax; + __u16 input; + __u16 dpms; + __u16 signal; + __u16 vfmin; + __u16 vfmax; + __u16 gamma; + __u16 gtf: 1; + __u16 misc; + __u8 version; + __u8 revision; + __u8 max_x; + __u8 max_y; +}; + +struct fb_info; + +struct fb_pixmap { + u8 *addr; + u32 size; + u32 offset; + u32 buf_align; + u32 scan_align; + u32 access_align; + u32 flags; + long unsigned int blit_x[2]; + long unsigned int blit_y[4]; + void (*writeio)(struct fb_info *, void *, void *, unsigned int); + void (*readio)(struct fb_info *, void *, void *, unsigned int); +}; + +struct lcd_device; + +struct fb_ops; + +struct fb_info { + refcount_t count; + int node; + int flags; + int fbcon_rotate_hint; + struct mutex lock; + struct mutex mm_lock; + struct fb_var_screeninfo var; + struct fb_fix_screeninfo fix; + struct fb_monspecs monspecs; + struct fb_pixmap pixmap; + struct fb_pixmap sprite; + struct fb_cmap cmap; + struct list_head modelist; + struct fb_videomode *mode; + struct lcd_device *lcd_dev; + const struct fb_ops *fbops; + struct device *device; + struct device *dev; + int class_flag; + union { + char *screen_base; + char *screen_buffer; + }; + long unsigned int screen_size; + void *pseudo_palette; + u32 state; + void *fbcon_par; + void *par; + bool skip_vt_switch; + bool skip_panic; +}; + +struct fb_blit_caps { + long unsigned int x[2]; + long unsigned int y[4]; + u32 len; + u32 flags; +}; + +struct fb_ops { + struct module *owner; + int (*fb_open)(struct fb_info *, int); + int (*fb_release)(struct fb_info *, int); + ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); + ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); + int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); + int (*fb_set_par)(struct fb_info *); + int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); + int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); + int (*fb_blank)(int, struct fb_info *); + int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); + void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); + void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); + void (*fb_imageblit)(struct fb_info *, const struct fb_image *); + int (*fb_cursor)(struct fb_info *, struct fb_cursor *); + int (*fb_sync)(struct fb_info *); + int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); + void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); + void (*fb_destroy)(struct fb_info *); + int (*fb_debug_enter)(struct fb_info *); + int (*fb_debug_leave)(struct fb_info *); +}; + +struct termios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[23]; +}; + +struct termios2 { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[23]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + int sg_flags; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; + +struct termio { + short unsigned int c_iflag; + short unsigned int c_oflag; + short unsigned int c_cflag; + short unsigned int c_lflag; + char c_line; + unsigned char c_cc[23]; +}; + +enum tty_flow_change { + TTY_FLOW_NO_CHANGE = 0, + TTY_THROTTLE_SAFE = 1, + TTY_UNTHROTTLE_SAFE = 2, +}; + +typedef int (*device_match_t)(struct device *, const void *); + +struct iommu_group {}; + +struct property_entry; + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + bool of_node_reused; + const char *name; + int id; + const struct resource *res; + unsigned int num_res; + const void *data; + size_t size_data; + long: 32; + u64 dma_mask; + const struct property_entry *properties; + long: 32; +}; + +enum dev_prop_type { + DEV_PROP_U8 = 0, + DEV_PROP_U16 = 1, + DEV_PROP_U32 = 2, + DEV_PROP_U64 = 3, + DEV_PROP_STRING = 4, + DEV_PROP_REF = 5, +}; + +struct property_entry { + const char *name; + size_t length; + bool is_inline; + enum dev_prop_type type; + union { + const void *pointer; + union { + u8 u8_data[8]; + u16 u16_data[4]; + u32 u32_data[2]; + u64 u64_data[1]; + const char *str[2]; + } value; + }; +}; + +struct platform_driver { + int (*probe)(struct platform_device *); + union { + void (*remove)(struct platform_device *); + void (*remove_new)(struct platform_device *); + }; + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; + bool driver_managed_dma; +}; + +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; +}; + +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int); + void *priv; +}; + +typedef void *acpi_handle; + +struct irq_affinity_devres { + unsigned int count; + unsigned int irq[0]; +}; + +struct platform_object { + struct platform_device pdev; + char name[0]; +}; + +struct acpi_device; + +typedef unsigned int uint; + +struct class_interface { + struct list_head node; + const struct class *class; + int (*add_dev)(struct device *); + void (*remove_dev)(struct device *); +}; + +enum scsi_disposition { + NEEDS_RETRY = 8193, + SUCCESS = 8194, + FAILED = 8195, + QUEUED = 8196, + SOFT_ERROR = 8197, + ADD_TO_MLQUEUE = 8198, + TIMEOUT_ERROR = 8199, + SCSI_RETURN_NOT_HANDLED = 8200, + FAST_IO_FAIL = 8201, +}; + +enum scsi_scan_mode { + SCSI_SCAN_INITIAL = 0, + SCSI_SCAN_RESCAN = 1, + SCSI_SCAN_MANUAL = 2, +}; + +enum scsi_device_event { + SDEV_EVT_MEDIA_CHANGE = 1, + SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, + SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, + SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, + SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, + SDEV_EVT_LUN_CHANGE_REPORTED = 6, + SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, + SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, + SDEV_EVT_FIRST = 1, + SDEV_EVT_LAST = 8, + SDEV_EVT_MAXBITS = 9, +}; + +struct scsi_event { + enum scsi_device_event evt_type; + struct list_head node; +}; + +typedef void (*activate_complete)(void *, int); + +struct scsi_device_handler { + struct list_head list; + struct module *module; + const char *name; + enum scsi_disposition (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); + int (*attach)(struct scsi_device *); + void (*detach)(struct scsi_device *); + int (*activate)(struct scsi_device *, activate_complete, void *); + blk_status_t (*prep_fn)(struct scsi_device *, struct request *); + int (*set_params)(struct scsi_device *, const char *); + void (*rescan)(struct scsi_device *); +}; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); + long unsigned int flags; +}; + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +struct scsi_transport_template { + struct transport_container host_attrs; + struct transport_container target_attrs; + struct transport_container device_attrs; + int (*user_scan)(struct Scsi_Host *, uint, uint, u64); + int device_size; + int device_private_offset; + int target_size; + int target_private_offset; + int host_size; + unsigned int create_work_queue: 1; + void (*eh_strategy_handler)(struct Scsi_Host *); +}; + +enum phy_upstream { + PHY_UPSTREAM_MAC = 0, + PHY_UPSTREAM_PHY = 1, +}; + +struct phy_link_topology { + struct xarray phys; + u32 next_phy_index; +}; + +struct phy_device_node { + enum phy_upstream upstream_type; + union { + struct net_device *netdev; + struct phy_device *phydev; + } upstream; + struct sfp_bus *parent_sfp_bus; + struct phy_device *phy; +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = -1, + PM_QOS_FLAGS_NONE = 0, + PM_QOS_FLAGS_SOME = 1, + PM_QOS_FLAGS_ALL = 2, +}; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +enum usb3_link_state { + USB3_LPM_U0 = 0, + USB3_LPM_U1 = 1, + USB3_LPM_U2 = 2, + USB3_LPM_U3 = 3, +}; + +enum usb_port_connect_type { + USB_PORT_CONNECT_TYPE_UNKNOWN = 0, + USB_PORT_CONNECT_TYPE_HOT_PLUG = 1, + USB_PORT_CONNECT_TYPE_HARD_WIRED = 2, + USB_PORT_NOT_USED = 3, +}; + +struct usb_tt { + struct usb_device *hub; + int multi; + unsigned int think_time; + void *hcpriv; + spinlock_t lock; + struct list_head clear_list; + struct work_struct clear_work; +}; + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +struct phy_configure_opts_dp { + unsigned int link_rate; + unsigned int lanes; + unsigned int voltage[4]; + unsigned int pre[4]; + u8 ssc: 1; + u8 set_rate: 1; + u8 set_lanes: 1; + u8 set_voltages: 1; +}; + +struct phy_configure_opts_lvds { + unsigned int bits_per_lane_and_dclk_cycle; + long unsigned int differential_clk_rate; + unsigned int lanes; + bool is_slave; +}; + +struct phy_configure_opts_mipi_dphy { + unsigned int clk_miss; + unsigned int clk_post; + unsigned int clk_pre; + unsigned int clk_prepare; + unsigned int clk_settle; + unsigned int clk_term_en; + unsigned int clk_trail; + unsigned int clk_zero; + unsigned int d_term_en; + unsigned int eot; + unsigned int hs_exit; + unsigned int hs_prepare; + unsigned int hs_settle; + unsigned int hs_skip; + unsigned int hs_trail; + unsigned int hs_zero; + unsigned int init; + unsigned int lpx; + unsigned int ta_get; + unsigned int ta_go; + unsigned int ta_sure; + unsigned int wakeup; + long unsigned int hs_clk_rate; + long unsigned int lp_clk_rate; + unsigned char lanes; +}; + +enum phy_mode { + PHY_MODE_INVALID = 0, + PHY_MODE_USB_HOST = 1, + PHY_MODE_USB_HOST_LS = 2, + PHY_MODE_USB_HOST_FS = 3, + PHY_MODE_USB_HOST_HS = 4, + PHY_MODE_USB_HOST_SS = 5, + PHY_MODE_USB_DEVICE = 6, + PHY_MODE_USB_DEVICE_LS = 7, + PHY_MODE_USB_DEVICE_FS = 8, + PHY_MODE_USB_DEVICE_HS = 9, + PHY_MODE_USB_DEVICE_SS = 10, + PHY_MODE_USB_OTG = 11, + PHY_MODE_UFS_HS_A = 12, + PHY_MODE_UFS_HS_B = 13, + PHY_MODE_PCIE = 14, + PHY_MODE_ETHERNET = 15, + PHY_MODE_MIPI_DPHY = 16, + PHY_MODE_SATA = 17, + PHY_MODE_LVDS = 18, + PHY_MODE_DP = 19, +}; + +enum phy_media { + PHY_MEDIA_DEFAULT = 0, + PHY_MEDIA_SR = 1, + PHY_MEDIA_DAC = 2, +}; + +union phy_configure_opts { + struct phy_configure_opts_mipi_dphy mipi_dphy; + struct phy_configure_opts_dp dp; + struct phy_configure_opts_lvds lvds; +}; + +struct phy; + +struct phy_ops { + int (*init)(struct phy *); + int (*exit)(struct phy *); + int (*power_on)(struct phy *); + int (*power_off)(struct phy *); + int (*set_mode)(struct phy *, enum phy_mode, int); + int (*set_media)(struct phy *, enum phy_media); + int (*set_speed)(struct phy *, int); + int (*configure)(struct phy *, union phy_configure_opts *); + int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); + int (*reset)(struct phy *); + int (*calibrate)(struct phy *); + int (*connect)(struct phy *, int); + int (*disconnect)(struct phy *, int); + void (*release)(struct phy *); + struct module *owner; +}; + +struct phy_attrs { + u32 bus_width; + u32 max_link_rate; + enum phy_mode mode; +}; + +struct phy { + struct device dev; + int id; + const struct phy_ops *ops; + struct mutex mutex; + int init_count; + int power_count; + struct phy_attrs attrs; + struct regulator *pwr; + struct dentry *debugfs; + long: 32; +}; + +struct extcon_dev; + +enum usb_charger_type { + UNKNOWN_TYPE = 0, + SDP_TYPE = 1, + DCP_TYPE = 2, + CDP_TYPE = 3, + ACA_TYPE = 4, +}; + +enum usb_charger_state { + USB_CHARGER_DEFAULT = 0, + USB_CHARGER_PRESENT = 1, + USB_CHARGER_ABSENT = 2, +}; + +enum usb_phy_events { + USB_EVENT_NONE = 0, + USB_EVENT_VBUS = 1, + USB_EVENT_ID = 2, + USB_EVENT_CHARGER = 3, + USB_EVENT_ENUMERATED = 4, +}; + +enum usb_phy_type { + USB_PHY_TYPE_UNDEFINED = 0, + USB_PHY_TYPE_USB2 = 1, + USB_PHY_TYPE_USB3 = 2, +}; + +enum usb_otg_state { + OTG_STATE_UNDEFINED = 0, + OTG_STATE_B_IDLE = 1, + OTG_STATE_B_SRP_INIT = 2, + OTG_STATE_B_PERIPHERAL = 3, + OTG_STATE_B_WAIT_ACON = 4, + OTG_STATE_B_HOST = 5, + OTG_STATE_A_IDLE = 6, + OTG_STATE_A_WAIT_VRISE = 7, + OTG_STATE_A_WAIT_BCON = 8, + OTG_STATE_A_HOST = 9, + OTG_STATE_A_SUSPEND = 10, + OTG_STATE_A_PERIPHERAL = 11, + OTG_STATE_A_WAIT_VFALL = 12, + OTG_STATE_A_VBUS_ERR = 13, +}; + +struct usb_phy; + +struct usb_phy_io_ops { + int (*read)(struct usb_phy *, u32); + int (*write)(struct usb_phy *, u32, u32); +}; + +struct usb_charger_current { + unsigned int sdp_min; + unsigned int sdp_max; + unsigned int dcp_min; + unsigned int dcp_max; + unsigned int cdp_min; + unsigned int cdp_max; + unsigned int aca_min; + unsigned int aca_max; +}; + +struct usb_otg; + +struct usb_phy { + struct device *dev; + const char *label; + unsigned int flags; + enum usb_phy_type type; + enum usb_phy_events last_event; + struct usb_otg *otg; + struct device *io_dev; + struct usb_phy_io_ops *io_ops; + void *io_priv; + struct extcon_dev *edev; + struct extcon_dev *id_edev; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct notifier_block type_nb; + enum usb_charger_type chg_type; + enum usb_charger_state chg_state; + struct usb_charger_current chg_cur; + struct work_struct chg_work; + struct atomic_notifier_head notifier; + u16 port_status; + u16 port_change; + struct list_head head; + int (*init)(struct usb_phy *); + void (*shutdown)(struct usb_phy *); + int (*set_vbus)(struct usb_phy *, int); + int (*set_power)(struct usb_phy *, unsigned int); + int (*set_suspend)(struct usb_phy *, int); + int (*set_wakeup)(struct usb_phy *, bool); + int (*notify_connect)(struct usb_phy *, enum usb_device_speed); + int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed); + enum usb_charger_type (*charger_detect)(struct usb_phy *); +}; + +struct usb_gadget; + +struct usb_otg { + u8 default_a; + struct phy *phy; + struct usb_phy *usb_phy; + struct usb_bus *host; + struct usb_gadget *gadget; + enum usb_otg_state state; + int (*set_host)(struct usb_otg *, struct usb_bus *); + int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); + int (*set_vbus)(struct usb_otg *, bool); + int (*start_srp)(struct usb_otg *); + int (*start_hnp)(struct usb_otg *); +}; + +struct usb_port_status { + __le16 wPortStatus; + __le16 wPortChange; + __le32 dwExtPortStatus; +}; + +struct usb_hub_status { + __le16 wHubStatus; + __le16 wHubChange; +}; + +struct usb_hub_descriptor { + __u8 bDescLength; + __u8 bDescriptorType; + __u8 bNbrPorts; + __le16 wHubCharacteristics; + __u8 bPwrOn2PwrGood; + __u8 bHubContrCurrent; + union { + struct { + __u8 DeviceRemovable[4]; + __u8 PortPwrCtrlMask[4]; + } hs; + struct { + __u8 bHubHdrDecLat; + __le16 wHubDelay; + __le16 DeviceRemovable; + } __attribute__((packed)) ss; + } u; +} __attribute__((packed)); + +struct giveback_urb_bh { + bool running; + bool high_prio; + spinlock_t lock; + struct list_head head; + struct work_struct bh; + struct usb_host_endpoint *completing_ep; +}; + +enum usb_dev_authorize_policy { + USB_DEVICE_AUTHORIZE_NONE = 0, + USB_DEVICE_AUTHORIZE_ALL = 1, + USB_DEVICE_AUTHORIZE_INTERNAL = 2, +}; + +struct hc_driver; + +struct usb_phy_roothub; + +struct dma_pool; + +struct usb_hcd { + struct usb_bus self; + struct kref kref; + const char *product_desc; + int speed; + char irq_descr[24]; + struct timer_list rh_timer; + struct urb *status_urb; + struct work_struct wakeup_work; + struct work_struct died_work; + const struct hc_driver *driver; + struct usb_phy *usb_phy; + struct usb_phy_roothub *phy_roothub; + long unsigned int flags; + enum usb_dev_authorize_policy dev_policy; + unsigned int rh_registered: 1; + unsigned int rh_pollable: 1; + unsigned int msix_enabled: 1; + unsigned int msi_enabled: 1; + unsigned int skip_phy_initialization: 1; + unsigned int uses_new_polling: 1; + unsigned int has_tt: 1; + unsigned int amd_resume_bug: 1; + unsigned int can_do_streams: 1; + unsigned int tpl_support: 1; + unsigned int cant_recv_wakeups: 1; + unsigned int irq; + void *regs; + resource_size_t rsrc_start; + resource_size_t rsrc_len; + unsigned int power_budget; + struct giveback_urb_bh high_prio_bh; + struct giveback_urb_bh low_prio_bh; + struct mutex *address0_mutex; + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + struct dma_pool *pool[4]; + int state; + struct gen_pool *localmem_pool; + long: 32; + long unsigned int hcd_priv[0]; +}; + +struct hc_driver { + const char *description; + const char *product_desc; + size_t hcd_priv_size; + irqreturn_t (*irq)(struct usb_hcd *); + int flags; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*pci_suspend)(struct usb_hcd *, bool); + int (*pci_resume)(struct usb_hcd *, pm_message_t); + int (*pci_poweroff_late)(struct usb_hcd *, bool); + void (*stop)(struct usb_hcd *); + void (*shutdown)(struct usb_hcd *); + int (*get_frame_number)(struct usb_hcd *); + int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); + int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); + int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); + void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); + void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); + void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); + int (*hub_status_data)(struct usb_hcd *, char *); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned int); + long unsigned int (*get_resuming_ports)(struct usb_hcd *); + void (*relinquish_port)(struct usb_hcd *, int); + int (*port_handed_over)(struct usb_hcd *, int); + void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + void (*free_dev)(struct usb_hcd *, struct usb_device *); + int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); + int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*address_device)(struct usb_hcd *, struct usb_device *, unsigned int); + int (*enable_device)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); + int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*find_raw_port_number)(struct usb_hcd *, int); + int (*port_power)(struct usb_hcd *, int, bool); + int (*submit_single_step_set_feature)(struct usb_hcd *, struct urb *, int); +}; + +struct typec_connector { + void (*attach)(struct typec_connector *, struct device *); + void (*deattach)(struct typec_connector *, struct device *); +}; + +typedef u32 usb_port_location_t; + +struct usb_port; + +struct usb_hub { + struct device *intfdev; + struct usb_device *hdev; + struct kref kref; + struct urb *urb; + u8 (*buffer)[8]; + union { + struct usb_hub_status hub; + struct usb_port_status port; + } *status; + struct mutex status_mutex; + int error; + int nerrors; + long unsigned int event_bits[1]; + long unsigned int change_bits[1]; + long unsigned int removed_bits[1]; + long unsigned int wakeup_bits[1]; + long unsigned int power_bits[1]; + long unsigned int child_usage_bits[1]; + long unsigned int warm_reset_bits[1]; + struct usb_hub_descriptor *descriptor; + struct usb_tt tt; + unsigned int mA_per_port; + unsigned int wakeup_enabled_descendants; + unsigned int limited_power: 1; + unsigned int quiescing: 1; + unsigned int disconnected: 1; + unsigned int in_reset: 1; + unsigned int quirk_disable_autosuspend: 1; + unsigned int quirk_check_port_auto_suspend: 1; + unsigned int has_indicators: 1; + u8 indicator[31]; + struct delayed_work leds; + struct delayed_work init_work; + struct work_struct events; + spinlock_t irq_urb_lock; + struct timer_list irq_urb_retry; + struct usb_port **ports; + struct list_head onboard_devs; +}; + +struct usb_dev_state; + +struct usb_port { + struct usb_device *child; + long: 32; + struct device dev; + struct usb_dev_state *port_owner; + struct usb_port *peer; + struct typec_connector *connector; + struct dev_pm_qos_request *req; + enum usb_port_connect_type connect_type; + enum usb_device_state state; + struct kernfs_node *state_kn; + usb_port_location_t location; + struct mutex status_lock; + u32 over_current_count; + u8 portnum; + u32 quirks; + unsigned int early_stop: 1; + unsigned int ignore_event: 1; + unsigned int is_superspeed: 1; + unsigned int usb3_lpm_u1_permit: 1; + unsigned int usb3_lpm_u2_permit: 1; +}; + +struct ir_raw_timings_pd { + unsigned int header_pulse; + unsigned int header_space; + unsigned int bit_pulse; + unsigned int bit_space[2]; + unsigned int trailer_pulse; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +enum nec_state { + STATE_INACTIVE___2 = 0, + STATE_HEADER_SPACE = 1, + STATE_BIT_PULSE = 2, + STATE_BIT_SPACE = 3, + STATE_TRAILER_PULSE = 4, + STATE_TRAILER_SPACE = 5, +}; + +struct blk_plug_cb; + +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); + +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *, struct rb_node *); + void (*copy)(struct rb_node *, struct rb_node *); + void (*rotate)(struct rb_node *, struct rb_node *); +}; + +struct md_cluster_operations { + int (*join)(struct mddev *, int); + int (*leave)(struct mddev *); + int (*slot_number)(struct mddev *); + int (*resync_info_update)(struct mddev *, sector_t, sector_t); + int (*resync_start_notify)(struct mddev *); + int (*resync_status_get)(struct mddev *); + void (*resync_info_get)(struct mddev *, sector_t *, sector_t *); + int (*metadata_update_start)(struct mddev *); + int (*metadata_update_finish)(struct mddev *); + void (*metadata_update_cancel)(struct mddev *); + int (*resync_start)(struct mddev *); + int (*resync_finish)(struct mddev *); + int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); + int (*add_new_disk)(struct mddev *, struct md_rdev *); + void (*add_new_disk_cancel)(struct mddev *); + int (*new_disk_ack)(struct mddev *, bool); + int (*remove_disk)(struct mddev *, struct md_rdev *); + void (*load_bitmaps)(struct mddev *, int); + int (*gather_bitmaps)(struct md_rdev *); + int (*resize_bitmaps)(struct mddev *, sector_t, sector_t); + int (*lock_all_bitmaps)(struct mddev *); + void (*unlock_all_bitmaps)(struct mddev *); + void (*update_size)(struct mddev *, sector_t); +}; + +struct serial_info { + struct rb_node node; + long: 32; + sector_t start; + sector_t last; + sector_t _subtree_last; +}; + +struct raid1_info { + struct md_rdev *rdev; + long: 32; + sector_t head_position; + sector_t next_seq_sect; + sector_t seq_start; +}; + +struct pool_info { + struct mddev *mddev; + int raid_disks; +}; + +struct r1conf { + struct mddev *mddev; + struct raid1_info *mirrors; + int raid_disks; + int nonrot_disks; + spinlock_t device_lock; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + wait_queue_head_t wait_barrier; + spinlock_t resync_lock; + atomic_t nr_sync_pending; + atomic_t *nr_pending; + atomic_t *nr_waiting; + atomic_t *nr_queued; + atomic_t *barrier; + int array_frozen; + int fullsync; + int recovery_disabled; + struct pool_info *poolinfo; + mempool_t r1bio_pool; + mempool_t r1buf_pool; + struct bio_set bio_split; + struct page *tmppage; + struct md_thread *thread; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r1bio { + atomic_t remaining; + atomic_t behind_remaining; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_disk; + struct list_head retry_list; + struct bio *behind_master_bio; + struct bio *bios[0]; +}; + +enum r1bio_state { + R1BIO_Uptodate = 0, + R1BIO_IsSync = 1, + R1BIO_Degraded = 2, + R1BIO_BehindIO = 3, + R1BIO_ReadError = 4, + R1BIO_Returned = 5, + R1BIO_MadeGood = 6, + R1BIO_WriteError = 7, + R1BIO_FailFast = 8, +}; + +struct resync_pages { + void *raid_bio; + struct page *pages[16]; +}; + +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + unsigned int count; +}; + +struct read_balance_ctl { + sector_t closest_dist; + int closest_dist_disk; + int min_pending; + int min_pending_disk; + int sequential_disk; + int readable_disks; + long: 32; +}; + +typedef int (*netlink_filter_fn)(struct sock *, struct sk_buff *, void *); + +struct pcpu_gen_cookie { + local_t nesting; + long: 32; + u64 last; +}; + +struct gen_cookie { + struct pcpu_gen_cookie *local; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic64_t forward_last; + atomic64_t reverse_last; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + INET_DIAG_REQ_NONE = 0, + INET_DIAG_REQ_BYTECODE = 1, + INET_DIAG_REQ_SK_BPF_STORAGES = 2, + INET_DIAG_REQ_PROTOCOL = 3, + __INET_DIAG_REQ_MAX = 4, +}; + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC = 0, + SK_MEMINFO_RCVBUF = 1, + SK_MEMINFO_WMEM_ALLOC = 2, + SK_MEMINFO_SNDBUF = 3, + SK_MEMINFO_FWD_ALLOC = 4, + SK_MEMINFO_WMEM_QUEUED = 5, + SK_MEMINFO_OPTMEM = 6, + SK_MEMINFO_BACKLOG = 7, + SK_MEMINFO_DROPS = 8, + SK_MEMINFO_VARS = 9, +}; + +enum sknetlink_groups { + SKNLGRP_NONE = 0, + SKNLGRP_INET_TCP_DESTROY = 1, + SKNLGRP_INET_UDP_DESTROY = 2, + SKNLGRP_INET6_TCP_DESTROY = 3, + SKNLGRP_INET6_UDP_DESTROY = 4, + __SKNLGRP_MAX = 5, +}; + +struct sock_diag_handler { + struct module *owner; + __u8 family; + int (*dump)(struct sk_buff *, struct nlmsghdr *); + int (*get_info)(struct sk_buff *, struct sock *); + int (*destroy)(struct sk_buff *, struct nlmsghdr *); +}; + +struct sock_diag_inet_compat { + struct module *owner; + int (*fn)(struct sk_buff *, struct nlmsghdr *); +}; + +struct broadcast_sk { + struct sock *sk; + struct work_struct work; +}; + +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = 1, + BPF_SK_STORAGE_GET_F_CREATE = 1, +}; + +enum { + BTF_SOCK_TYPE_INET = 0, + BTF_SOCK_TYPE_INET_CONN = 1, + BTF_SOCK_TYPE_INET_REQ = 2, + BTF_SOCK_TYPE_INET_TW = 3, + BTF_SOCK_TYPE_REQ = 4, + BTF_SOCK_TYPE_SOCK = 5, + BTF_SOCK_TYPE_SOCK_COMMON = 6, + BTF_SOCK_TYPE_TCP = 7, + BTF_SOCK_TYPE_TCP_REQ = 8, + BTF_SOCK_TYPE_TCP_TW = 9, + BTF_SOCK_TYPE_TCP6 = 10, + BTF_SOCK_TYPE_UDP = 11, + BTF_SOCK_TYPE_UDP6 = 12, + BTF_SOCK_TYPE_UNIX = 13, + BTF_SOCK_TYPE_MPTCP = 14, + BTF_SOCK_TYPE_SOCKET = 15, + MAX_BTF_SOCK_TYPE = 16, +}; + +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE = 0, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, + __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE = 0, + SK_DIAG_BPF_STORAGE = 1, + __SK_DIAG_BPF_STORAGE_REP_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_NONE = 0, + SK_DIAG_BPF_STORAGE_PAD = 1, + SK_DIAG_BPF_STORAGE_MAP_ID = 2, + SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, + __SK_DIAG_BPF_STORAGE_MAX = 4, +}; + +typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); + +typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[0]; +}; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned int skip_elems; +}; + +struct bpf_iter__bpf_sk_storage_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + struct sock *sk; + }; + union { + void *value; + }; +}; + +enum ethtool_header_flags { + ETHTOOL_FLAG_COMPACT_BITSETS = 1, + ETHTOOL_FLAG_OMIT_REPLY = 2, + ETHTOOL_FLAG_STATS = 4, +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC = 0, + ETHTOOL_A_FEC_STAT_PAD = 1, + ETHTOOL_A_FEC_STAT_CORRECTED = 2, + ETHTOOL_A_FEC_STAT_UNCORR = 3, + ETHTOOL_A_FEC_STAT_CORR_BITS = 4, + __ETHTOOL_A_FEC_STAT_CNT = 5, + ETHTOOL_A_FEC_STAT_MAX = 4, +}; + +struct fec_stat_grp { + u64 stats[9]; + u8 cnt; + long: 32; +}; + +struct fec_reply_data { + struct ethnl_reply_data base; + long unsigned int fec_link_modes[4]; + u32 active_fec; + u8 fec_auto; + long: 32; + struct fec_stat_grp corr; + struct fec_stat_grp uncorr; + struct fec_stat_grp corr_bits; +}; + +struct icmpv6_echo { + __be16 identifier; + __be16 sequence; +}; + +struct icmpv6_nd_advt { + __u32 router: 1; + __u32 solicited: 1; + __u32 override: 1; + __u32 reserved: 29; +}; + +struct icmpv6_nd_ra { + __u8 hop_limit; + __u8 managed: 1; + __u8 other: 1; + __u8 home_agent: 1; + __u8 router_pref: 2; + __u8 reserved: 3; + __be16 rt_lifetime; +}; + +struct icmp6hdr { + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + struct icmpv6_echo u_echo; + struct icmpv6_nd_advt u_nd_advt; + struct icmpv6_nd_ra u_nd_ra; + } icmp6_dataun; +}; + +enum ip_conntrack_status { + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = 1, + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = 2, + IPS_ASSURED_BIT = 2, + IPS_ASSURED = 4, + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = 8, + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = 16, + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = 32, + IPS_NAT_MASK = 48, + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = 64, + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = 128, + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = 256, + IPS_NAT_DONE_MASK = 384, + IPS_DYING_BIT = 9, + IPS_DYING = 512, + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = 1024, + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = 2048, + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = 4096, + IPS_NAT_CLASH_BIT = 12, + IPS_NAT_CLASH = 4096, + IPS_HELPER_BIT = 13, + IPS_HELPER = 8192, + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = 16384, + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = 32768, + IPS_UNCHANGEABLE_MASK = 56313, + __IPS_MAX_BIT = 16, +}; + +struct skb_checksum_ops { + __wsum (*update)(const void *, int, __wsum); + __wsum (*combine)(__wsum, __wsum, int, int); +}; + +enum { + RTN_UNSPEC = 0, + RTN_UNICAST = 1, + RTN_LOCAL = 2, + RTN_BROADCAST = 3, + RTN_ANYCAST = 4, + RTN_MULTICAST = 5, + RTN_BLACKHOLE = 6, + RTN_UNREACHABLE = 7, + RTN_PROHIBIT = 8, + RTN_THROW = 9, + RTN_NAT = 10, + RTN_XRESOLVE = 11, + __RTN_MAX = 12, +}; + +struct inet_timewait_sock { + struct sock_common __tw_common; + __u32 tw_mark; + unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __be16 tw_sport; + unsigned int tw_transparent: 1; + unsigned int tw_flowlabel: 20; + unsigned int tw_usec_ts: 1; + unsigned int tw_pad: 2; + unsigned int tw_tos: 8; + u32 tw_txhash; + u32 tw_priority; + struct timer_list tw_timer; + struct inet_bind_bucket *tw_tb; + struct inet_bind2_bucket *tw_tb2; +}; + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + __u8 reserved[4]; + } un; +}; + +enum nf_nat_manip_type { + NF_NAT_MANIP_SRC = 0, + NF_NAT_MANIP_DST = 1, +}; + +enum nf_ip6_hook_priorities { + NF_IP6_PRI_FIRST = -2147483648, + NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP6_PRI_CONNTRACK_DEFRAG = -400, + NF_IP6_PRI_RAW = -300, + NF_IP6_PRI_SELINUX_FIRST = -225, + NF_IP6_PRI_CONNTRACK = -200, + NF_IP6_PRI_MANGLE = -150, + NF_IP6_PRI_NAT_DST = -100, + NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, + NF_IP6_PRI_NAT_SRC = 100, + NF_IP6_PRI_SELINUX_LAST = 225, + NF_IP6_PRI_CONNTRACK_HELPER = 300, + NF_IP6_PRI_LAST = 2147483647, +}; + +struct xt_tcp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 option; + __u8 flg_mask; + __u8 flg_cmp; + __u8 invflags; +}; + +struct xt_udp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 invflags; +}; + +struct ipt_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +struct ip6t_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +enum lwtunnel_encap_types { + LWTUNNEL_ENCAP_NONE = 0, + LWTUNNEL_ENCAP_MPLS = 1, + LWTUNNEL_ENCAP_IP = 2, + LWTUNNEL_ENCAP_ILA = 3, + LWTUNNEL_ENCAP_IP6 = 4, + LWTUNNEL_ENCAP_SEG6 = 5, + LWTUNNEL_ENCAP_BPF = 6, + LWTUNNEL_ENCAP_SEG6_LOCAL = 7, + LWTUNNEL_ENCAP_RPL = 8, + LWTUNNEL_ENCAP_IOAM6 = 9, + LWTUNNEL_ENCAP_XFRM = 10, + __LWTUNNEL_ENCAP_MAX = 11, +}; + +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC = 0, + LWTUNNEL_IP_ID = 1, + LWTUNNEL_IP_DST = 2, + LWTUNNEL_IP_SRC = 3, + LWTUNNEL_IP_TTL = 4, + LWTUNNEL_IP_TOS = 5, + LWTUNNEL_IP_FLAGS = 6, + LWTUNNEL_IP_PAD = 7, + LWTUNNEL_IP_OPTS = 8, + __LWTUNNEL_IP_MAX = 9, +}; + +enum lwtunnel_ip6_t { + LWTUNNEL_IP6_UNSPEC = 0, + LWTUNNEL_IP6_ID = 1, + LWTUNNEL_IP6_DST = 2, + LWTUNNEL_IP6_SRC = 3, + LWTUNNEL_IP6_HOPLIMIT = 4, + LWTUNNEL_IP6_TC = 5, + LWTUNNEL_IP6_FLAGS = 6, + LWTUNNEL_IP6_PAD = 7, + LWTUNNEL_IP6_OPTS = 8, + __LWTUNNEL_IP6_MAX = 9, +}; + +enum { + LWTUNNEL_IP_OPTS_UNSPEC = 0, + LWTUNNEL_IP_OPTS_GENEVE = 1, + LWTUNNEL_IP_OPTS_VXLAN = 2, + LWTUNNEL_IP_OPTS_ERSPAN = 3, + __LWTUNNEL_IP_OPTS_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, + LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, + LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, + LWTUNNEL_IP_OPT_GENEVE_DATA = 3, + __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_VXLAN_GBP = 1, + __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, +}; + +enum { + LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_ERSPAN_VER = 1, + LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, + LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, + LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, + __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, +}; + +struct lwtunnel_encap_ops { + int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); + void (*destroy_state)(struct lwtunnel_state *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*input)(struct sk_buff *); + int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); + int (*get_encap_size)(struct lwtunnel_state *); + int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); + int (*xmit)(struct sk_buff *); + struct module *owner; +}; + +enum { + IFLA_IPTUN_UNSPEC = 0, + IFLA_IPTUN_LINK = 1, + IFLA_IPTUN_LOCAL = 2, + IFLA_IPTUN_REMOTE = 3, + IFLA_IPTUN_TTL = 4, + IFLA_IPTUN_TOS = 5, + IFLA_IPTUN_ENCAP_LIMIT = 6, + IFLA_IPTUN_FLOWINFO = 7, + IFLA_IPTUN_FLAGS = 8, + IFLA_IPTUN_PROTO = 9, + IFLA_IPTUN_PMTUDISC = 10, + IFLA_IPTUN_6RD_PREFIX = 11, + IFLA_IPTUN_6RD_RELAY_PREFIX = 12, + IFLA_IPTUN_6RD_PREFIXLEN = 13, + IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14, + IFLA_IPTUN_ENCAP_TYPE = 15, + IFLA_IPTUN_ENCAP_FLAGS = 16, + IFLA_IPTUN_ENCAP_SPORT = 17, + IFLA_IPTUN_ENCAP_DPORT = 18, + IFLA_IPTUN_COLLECT_METADATA = 19, + IFLA_IPTUN_FWMARK = 20, + __IFLA_IPTUN_MAX = 21, +}; + +struct dst_cache_pcpu; + +struct dst_cache { + struct dst_cache_pcpu *cache; + long unsigned int reset_ts; +}; + +struct ip_tunnel_key { + __be64 tun_id; + union { + struct { + __be32 src; + __be32 dst; + } ipv4; + struct { + struct in6_addr src; + struct in6_addr dst; + } ipv6; + } u; + long unsigned int tun_flags[1]; + __be32 label; + u32 nhid; + u8 tos; + u8 ttl; + __be16 tp_src; + __be16 tp_dst; + __u8 flow_flags; + long: 32; +}; + +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + +struct ip_tunnel_info { + struct ip_tunnel_key key; + struct ip_tunnel_encap encap; + struct dst_cache dst_cache; + u8 options_len; + u8 mode; + long: 32; +}; + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); + int (*err_handler)(struct sk_buff *, u32); +}; + +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); +}; + +typedef u64 sci_t; + +enum metadata_type { + METADATA_IP_TUNNEL = 0, + METADATA_HW_PORT_MUX = 1, + METADATA_MACSEC = 2, + METADATA_XFRM = 3, +}; + +struct hw_port_info { + struct net_device *lower_dev; + u32 port_id; +}; + +struct macsec_info { + sci_t sci; +}; + +struct xfrm_md_info { + u32 if_id; + int link; + struct dst_entry *dst_orig; +}; + +struct metadata_dst { + struct dst_entry dst; + enum metadata_type type; + long: 32; + union { + struct ip_tunnel_info tun_info; + struct hw_port_info port_info; + struct macsec_info macsec_info; + struct xfrm_md_info xfrm_info; + } u; +}; + +struct udp_tunnel_info { + short unsigned int type; + sa_family_t sa_family; + __be16 port; + u8 hw_priv; +}; + +struct udp_tunnel_nic_shared { + struct udp_tunnel_nic *udp_tunnel_nic_info; + struct list_head devices; +}; + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 r1: 1; + u8 r2: 1; + u8 r3: 1; + u8 length: 5; + u8 opt_data[0]; +}; + +struct vxlan_metadata { + u32 gbp; +}; + +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; + __u8 p: 1; + __u8 ft: 5; + __u8 hwid_upper: 2; + __u8 hwid: 4; + __u8 dir: 1; + __u8 gra: 2; + __u8 o: 1; +}; + +struct erspan_metadata { + int version; + union { + __be32 index; + struct erspan_md2 md2; + } u; +}; + +enum sock_flags { + SOCK_DEAD = 0, + SOCK_DONE = 1, + SOCK_URGINLINE = 2, + SOCK_KEEPOPEN = 3, + SOCK_LINGER = 4, + SOCK_DESTROY = 5, + SOCK_BROADCAST = 6, + SOCK_TIMESTAMP = 7, + SOCK_ZAPPED = 8, + SOCK_USE_WRITE_QUEUE = 9, + SOCK_DBG = 10, + SOCK_RCVTSTAMP = 11, + SOCK_RCVTSTAMPNS = 12, + SOCK_LOCALROUTE = 13, + SOCK_MEMALLOC = 14, + SOCK_TIMESTAMPING_RX_SOFTWARE = 15, + SOCK_FASYNC = 16, + SOCK_RXQ_OVFL = 17, + SOCK_ZEROCOPY = 18, + SOCK_WIFI_STATUS = 19, + SOCK_NOFCS = 20, + SOCK_FILTER_LOCKED = 21, + SOCK_SELECT_ERR_QUEUE = 22, + SOCK_RCU_FREE = 23, + SOCK_TXTIME = 24, + SOCK_XDP = 25, + SOCK_TSTAMP_NEW = 26, + SOCK_RCVMARK = 27, +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *stream_parser; + struct bpf_prog *stream_verdict; + struct bpf_prog *skb_verdict; + struct bpf_link *msg_parser_link; + struct bpf_link *stream_parser_link; + struct bpf_link *stream_verdict_link; + struct bpf_link *skb_verdict_link; +}; + +struct sk_psock_work_state { + u32 len; + u32 off; +}; + +struct sk_msg; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + bool redir_ingress; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + spinlock_t ingress_lock; + long unsigned int state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *); + void (*saved_destroy)(struct sock *); + void (*saved_close)(struct sock *, long int); + void (*saved_write_space)(struct sock *); + void (*saved_data_ready)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + struct proto *sk_proto; + struct mutex work_mutex; + struct sk_psock_work_state work_state; + struct delayed_work work; + struct sock *sk_pair; + struct rcu_work rwork; +}; + +struct udp_sock { + struct inet_sock inet; + long unsigned int udp_flags; + int pending; + __u8 encap_type; + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; + __u16 len; + __u16 gso_size; + __u16 pcslen; + __u16 pcrlen; + int (*encap_rcv)(struct sock *, struct sk_buff *); + void (*encap_err_rcv)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*encap_err_lookup)(struct sock *, struct sk_buff *); + void (*encap_destroy)(struct sock *); + struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sock *, struct sk_buff *, int); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sk_buff_head reader_queue; + int forward_deficit; + int forward_threshold; + bool peeking_with_offset; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + long unsigned int copy[1]; + struct scatterlist data[19]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +enum { + UDP_BPF_IPV4 = 0, + UDP_BPF_IPV6 = 1, + UDP_BPF_NUM_PROTS = 2, +}; + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct sockaddr_in6 { + short unsigned int sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u32 tsflags; + u32 ts_opt_id; + long: 32; +}; + +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; +}; + +struct static_key_false_deferred { + struct static_key_false key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct ipcm6_cookie { + struct sockcm_cookie sockc; + __s16 hlimit; + __s16 tclass; + __u16 gso_size; + __s8 dontfrag; + struct ipv6_txoptions *opt; + long: 32; +}; + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats: 1; + u8 unused: 7; +}; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING = 1, + UMH_DISABLED = 2, +}; + +struct msi_dev_domain { + struct xarray store; + struct irq_domain *domain; +}; + +struct msi_device_data { + long unsigned int properties; + struct mutex mutex; + struct msi_dev_domain __domains[1]; + long unsigned int __iter_idx; +}; + +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY = 1, + IRQ_SET_MASK_OK_DONE = 2, +}; + +struct arch_msi_msg_addr_lo { + u32 address_lo; +}; + +typedef struct arch_msi_msg_addr_lo arch_msi_msg_addr_lo_t; + +struct arch_msi_msg_addr_hi { + u32 address_hi; +}; + +typedef struct arch_msi_msg_addr_hi arch_msi_msg_addr_hi_t; + +struct arch_msi_msg_data { + u32 data; +}; + +typedef struct arch_msi_msg_data arch_msi_msg_data_t; + +struct msi_msg { + union { + u32 address_lo; + arch_msi_msg_addr_lo_t arch_addr_lo; + }; + union { + u32 address_hi; + arch_msi_msg_addr_hi_t arch_addr_hi; + }; + union { + u32 data; + arch_msi_msg_data_t arch_data; + }; +}; + +struct pci_msi_desc { + union { + u32 msi_mask; + u32 msix_ctrl; + }; + struct { + u8 is_msix: 1; + u8 multiple: 3; + u8 multi_cap: 3; + u8 can_mask: 1; + u8 is_64: 1; + u8 is_virtual: 1; + unsigned int default_irq; + } msi_attrib; + union { + u8 mask_pos; + void *mask_base; + }; +}; + +union msi_domain_cookie { + u64 value; + void *ptr; + void *iobase; +}; + +union msi_instance_cookie { + u64 value; + void *ptr; +}; + +struct msi_desc_data { + union msi_domain_cookie dcookie; + union msi_instance_cookie icookie; +}; + +struct msi_desc { + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct irq_affinity_desc *affinity; + struct device_attribute *sysfs_attrs; + void (*write_msi_msg)(struct msi_desc *, void *); + void *write_msi_msg_data; + u16 msi_index; + long: 32; + union { + struct pci_msi_desc pci; + struct msi_desc_data data; + }; +}; + +enum { + IRQCHIP_SET_TYPE_MASKED = 1, + IRQCHIP_EOI_IF_HANDLED = 2, + IRQCHIP_MASK_ON_SUSPEND = 4, + IRQCHIP_ONOFFLINE_ENABLED = 8, + IRQCHIP_SKIP_SET_WAKE = 16, + IRQCHIP_ONESHOT_SAFE = 32, + IRQCHIP_EOI_THREADED = 64, + IRQCHIP_SUPPORTS_LEVEL_MSI = 128, + IRQCHIP_SUPPORTS_NMI = 256, + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, + IRQCHIP_AFFINITY_PRE_STARTUP = 1024, + IRQCHIP_IMMUTABLE = 2048, +}; + +enum { + IRQCHIP_FWNODE_REAL = 0, + IRQCHIP_FWNODE_NAMED = 1, + IRQCHIP_FWNODE_NAMED_ID = 2, +}; + +enum msi_domain_ids { + MSI_DEFAULT_DOMAIN = 0, + MSI_MAX_DEVICE_IRQDOMAINS = 1, +}; + +struct msi_map { + int index; + int virq; +}; + +struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + long unsigned int flags; + union { + long unsigned int ul; + void *ptr; + } scratchpad[2]; +}; + +typedef struct msi_alloc_info msi_alloc_info_t; + +enum msi_desc_filter { + MSI_DESC_ALL = 0, + MSI_DESC_NOTASSOCIATED = 1, + MSI_DESC_ASSOCIATED = 2, +}; + +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); + int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); + void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); + int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); + void (*prepare_desc)(struct irq_domain *, msi_alloc_info_t *, struct msi_desc *); + void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); + int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); + void (*domain_free_irqs)(struct irq_domain *, struct device *); + void (*msi_post_free)(struct irq_domain *, struct device *); + int (*msi_translate)(struct irq_domain *, struct irq_fwspec *, irq_hw_number_t *, unsigned int *); +}; + +struct msi_domain_info { + u32 flags; + enum irq_domain_bus_token bus_token; + unsigned int hwsize; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +struct msi_domain_template { + char name[48]; + struct irq_chip chip; + struct msi_domain_ops ops; + struct msi_domain_info info; +}; + +enum { + MSI_FLAG_USE_DEF_DOM_OPS = 1, + MSI_FLAG_USE_DEF_CHIP_OPS = 2, + MSI_FLAG_ACTIVATE_EARLY = 4, + MSI_FLAG_MUST_REACTIVATE = 8, + MSI_FLAG_DEV_SYSFS = 16, + MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = 32, + MSI_FLAG_FREE_MSI_DESCS = 64, + MSI_FLAG_USE_DEV_FWNODE = 128, + MSI_FLAG_PARENT_PM_DEV = 256, + MSI_FLAG_PCI_MSI_MASK_PARENT = 512, + MSI_GENERIC_FLAGS_MASK = 65535, + MSI_DOMAIN_FLAGS_MASK = 4294901760, + MSI_FLAG_MULTI_PCI_MSI = 65536, + MSI_FLAG_PCI_MSIX = 131072, + MSI_FLAG_LEVEL_CAPABLE = 262144, + MSI_FLAG_MSIX_CONTIGUOUS = 524288, + MSI_FLAG_PCI_MSIX_ALLOC_DYN = 1048576, + MSI_FLAG_NO_AFFINITY = 2097152, +}; + +struct msi_ctrl { + unsigned int domid; + unsigned int first; + unsigned int last; + unsigned int nirqs; +}; + +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; + long: 32; +}; + +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + __u64 nr_io_wait; +}; + +enum { + CGRP_NOTIFY_ON_RELEASE = 0, + CGRP_CPUSET_CLONE_CHILDREN = 1, + CGRP_FREEZE = 2, + CGRP_FROZEN = 3, + CGRP_KILL = 4, +}; + +enum { + CGRP_ROOT_NOPREFIX = 2, + CGRP_ROOT_XATTR = 4, + CGRP_ROOT_NS_DELEGATE = 8, + CGRP_ROOT_FAVOR_DYNMODS = 16, + CGRP_ROOT_CPUSET_V2_MODE = 65536, + CGRP_ROOT_MEMORY_LOCAL_EVENTS = 131072, + CGRP_ROOT_MEMORY_RECURSIVE_PROT = 262144, + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = 524288, + CGRP_ROOT_PIDS_LOCAL_EVENTS = 1048576, +}; + +enum { + CFTYPE_ONLY_ON_ROOT = 1, + CFTYPE_NOT_ON_ROOT = 2, + CFTYPE_NS_DELEGATABLE = 4, + CFTYPE_NO_PREFIX = 8, + CFTYPE_WORLD_WRITABLE = 16, + CFTYPE_DEBUG = 32, + __CFTYPE_ONLY_ON_DFL = 65536, + __CFTYPE_NOT_ON_DFL = 131072, + __CFTYPE_ADDED = 262144, +}; + +struct cgroup_taskset { + struct list_head src_csets; + struct list_head dst_csets; + int nr_tasks; + int ssid; + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +struct cgroup_of_peak { + long unsigned int value; + struct list_head list; +}; + +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + struct list_head *cset_pos; + struct list_head *cset_head; + struct list_head *tcset_pos; + struct list_head *tcset_head; + struct list_head *task_pos; + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +struct cgroup_fs_context { + struct kernfs_fs_context kfc; + struct cgroup_root *root; + struct cgroup_namespace *ns; + unsigned int flags; + bool cpuset_clone_children; + bool none; + bool all_ss; + u16 subsys_mask; + char *name; + char *release_agent; +}; + +enum cgroup_filetype { + CGROUP_FILE_PROCS = 0, + CGROUP_FILE_TASKS = 1, +}; + +struct cgroup_pidlist { + struct { + enum cgroup_filetype type; + struct pid_namespace *ns; + } key; + pid_t *list; + int length; + struct list_head links; + struct cgroup *owner; + struct delayed_work destroy_dwork; +}; + +struct cgroup_file_ctx { + struct cgroup_namespace *ns; + struct { + void *trigger; + } psi; + struct { + bool started; + struct css_task_iter iter; + } procs; + struct { + struct cgroup_pidlist *pidlist; + } procs1; + struct cgroup_of_peak peak; +}; + +struct cgrp_cset_link { + struct cgroup *cgrp; + struct css_set *cset; + struct list_head cset_link; + struct list_head cgrp_link; +}; + +struct cgroup_mgctx { + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + struct cgroup_taskset tset; + u16 ss_mask; +}; + +enum cgroup1_param { + Opt_all = 0, + Opt_clone_children = 1, + Opt_cpuset_v2_mode = 2, + Opt_name = 3, + Opt_none = 4, + Opt_noprefix = 5, + Opt_release_agent = 6, + Opt_xattr = 7, + Opt_favordynmods = 8, + Opt_nofavordynmods = 9, +}; + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +struct bpf_iter__bpf_prog { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_prog *prog; + }; +}; + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 jited_prog_insns; + __u64 xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 map_ids; + char name[16]; + __u32 ifindex; + __u32 gpl_compatible: 1; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 jited_ksyms; + __u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 line_info; + __u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; + __u64 recursion_misses; + __u32 verified_insns; + __u32 attach_btf_obj_id; + __u32 attach_btf_id; + long: 32; +}; + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_id; + __u64 map_extra; +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *, int, int); + int (*finalize)(struct bpf_verifier_env *); + int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); + int (*remove_insns)(struct bpf_verifier_env *, u32, u32); + int (*prepare)(struct bpf_prog *); + int (*translate)(struct bpf_prog *); + void (*destroy)(struct bpf_prog *); +}; + +struct bpf_offload_dev { + const struct bpf_prog_offload_ops *ops; + struct list_head netdevs; + void *priv; +}; + +struct __una_u32 { + u32 x; +}; + +typedef struct ns_common *ns_get_path_helper_t(void *); + +enum xdp_rx_metadata { + XDP_METADATA_KFUNC_RX_TIMESTAMP = 0, + XDP_METADATA_KFUNC_RX_HASH = 1, + XDP_METADATA_KFUNC_RX_VLAN_TAG = 2, + MAX_XDP_METADATA_KFUNC = 3, +}; + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + struct list_head progs; + struct list_head maps; + struct list_head offdev_netdevs; +}; + +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +struct ns_get_path_bpf_map_args { + struct bpf_offloaded_map *offmap; + struct bpf_map_info *info; +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH = 0, + TLB_REMOTE_SHOOTDOWN = 1, + TLB_LOCAL_SHOOTDOWN = 2, + TLB_LOCAL_MM_SHOOTDOWN = 3, + TLB_REMOTE_SEND_IPI = 4, + NR_TLB_FLUSH_REASONS = 5, +}; + +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; + struct rb_node rb; + long unsigned int rb_subtree_last; +}; + +enum ttu_flags { + TTU_SPLIT_HUGE_PMD = 4, + TTU_IGNORE_MLOCK = 8, + TTU_SYNC = 16, + TTU_HWPOISON = 32, + TTU_BATCH_FLUSH = 64, + TTU_RMAP_LOCKED = 128, +}; + +struct rmap_walk_control { + void *arg; + bool try_lock; + bool contended; + bool (*rmap_one)(struct folio *, struct vm_area_struct *, long unsigned int, void *); + int (*done)(struct folio *); + struct anon_vma * (*anon_lock)(const struct folio *, struct rmap_walk_control *); + bool (*invalid_vma)(struct vm_area_struct *, void *); +}; + +struct hstate {}; + +struct trace_event_raw_tlb_flush { + struct trace_entry ent; + int reason; + long unsigned int pages; + char __data[0]; +}; + +struct trace_event_data_offsets_tlb_flush {}; + +typedef void (*btf_trace_tlb_flush)(void *, int, long unsigned int); + +struct trace_event_raw_mm_migrate_pages { + struct trace_entry ent; + long unsigned int succeeded; + long unsigned int failed; + long unsigned int thp_succeeded; + long unsigned int thp_failed; + long unsigned int thp_split; + long unsigned int large_folio_split; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_mm_migrate_pages_start { + struct trace_entry ent; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_migration_pte { + struct trace_entry ent; + long unsigned int addr; + long unsigned int pte; + int order; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_migrate_pages {}; + +struct trace_event_data_offsets_mm_migrate_pages_start {}; + +struct trace_event_data_offsets_migration_pte {}; + +typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, enum migrate_mode, int); + +typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); + +typedef void (*btf_trace_set_migration_pte)(void *, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_remove_migration_pte)(void *, long unsigned int, long unsigned int, int); + +struct folio_referenced_arg { + int mapcount; + int referenced; + long unsigned int vm_flags; + struct mem_cgroup *memcg; +}; + +typedef __kernel_long_t __kernel_old_time_t; + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + +struct utimbuf { + __kernel_old_time_t actime; + __kernel_old_time_t modtime; +}; + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[8]; +}; + +struct radix_tree_preload { + local_lock_t lock; + unsigned int nr; + struct xa_node *nodes; +}; + +struct fsnotify_group; + +struct fsnotify_iter_info; + +struct fsnotify_mark; + +struct fsnotify_event; + +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); + int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); + void (*free_group_priv)(struct fsnotify_group *); + void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); + void (*free_event)(struct fsnotify_group *, struct fsnotify_event *); + void (*free_mark)(struct fsnotify_mark *); +}; + +struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; +}; + +struct fsnotify_group { + const struct fsnotify_ops *ops; + refcount_t refcnt; + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + enum fsnotify_group_prio priority; + bool shutdown; + int flags; + unsigned int owner_flags; + struct mutex mark_mutex; + atomic_t user_waits; + struct list_head marks_list; + struct fasync_struct *fsn_fa; + struct fsnotify_event *overflow_event; + struct mem_cgroup *memcg; + union { + void *private; + struct inotify_group_private_data inotify_data; + }; +}; + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[5]; + struct fsnotify_group *current_group; + unsigned int report_mask; + int srcu_idx; +}; + +struct fsnotify_mark { + __u32 mask; + refcount_t refcnt; + struct fsnotify_group *group; + struct list_head g_list; + spinlock_t lock; + struct hlist_node obj_list; + struct fsnotify_mark_connector *connector; + __u32 ignore_mask; + unsigned int flags; +}; + +struct fsnotify_event { + struct list_head list; +}; + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_ANY = -1, + FSNOTIFY_OBJ_TYPE_INODE = 0, + FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, + FSNOTIFY_OBJ_TYPE_SB = 2, + FSNOTIFY_OBJ_TYPE_COUNT = 3, + FSNOTIFY_OBJ_TYPE_DETACHED = 3, +}; + +struct inotify_event { + __s32 wd; + __u32 mask; + __u32 cookie; + __u32 len; + char name[0]; +}; + +struct inotify_event_info { + struct fsnotify_event fse; + u32 mask; + int wd; + u32 sync_cookie; + int name_len; + char name[0]; +}; + +struct inotify_inode_mark { + struct fsnotify_mark fsn_mark; + int wd; +}; + +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, +}; + +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; + struct dentry *proc_thread_self; + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; + struct callback_head rcu; +}; + +typedef u16 wchar_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char)(wchar_t, unsigned char *, int); + int (*char2uni)(const unsigned char *, int, wchar_t *); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +struct fat_mount_options { + kuid_t fs_uid; + kgid_t fs_gid; + short unsigned int fs_fmask; + short unsigned int fs_dmask; + short unsigned int codepage; + int time_offset; + char *iocharset; + short unsigned int shortname; + unsigned char name_check; + unsigned char errors; + unsigned char nfs; + short unsigned int allow_utime; + unsigned int quiet: 1; + unsigned int showexec: 1; + unsigned int sys_immutable: 1; + unsigned int dotsOK: 1; + unsigned int isvfat: 1; + unsigned int utf8: 1; + unsigned int unicode_xlate: 1; + unsigned int numtail: 1; + unsigned int flush: 1; + unsigned int nocase: 1; + unsigned int usefree: 1; + unsigned int tz_set: 1; + unsigned int rodir: 1; + unsigned int discard: 1; + unsigned int dos1xfloppy: 1; + unsigned int debug: 1; +}; + +struct fatent_operations; + +struct msdos_sb_info { + short unsigned int sec_per_clus; + short unsigned int cluster_bits; + unsigned int cluster_size; + unsigned char fats; + unsigned char fat_bits; + short unsigned int fat_start; + long unsigned int fat_length; + long unsigned int dir_start; + short unsigned int dir_entries; + long unsigned int data_start; + long unsigned int max_cluster; + long unsigned int root_cluster; + long unsigned int fsinfo_sector; + struct mutex fat_lock; + struct mutex nfs_build_inode_lock; + struct mutex s_lock; + unsigned int prev_free; + unsigned int free_clusters; + unsigned int free_clus_valid; + struct fat_mount_options options; + struct nls_table *nls_disk; + struct nls_table *nls_io; + const void *dir_ops; + int dir_per_block; + int dir_per_block_bits; + unsigned int vol_id; + int fatent_shift; + const struct fatent_operations *fatent_ops; + struct inode *fat_inode; + struct inode *fsinfo_inode; + struct ratelimit_state ratelimit; + spinlock_t inode_hash_lock; + struct hlist_head inode_hashtable[256]; + spinlock_t dir_hash_lock; + struct hlist_head dir_hashtable[256]; + unsigned int dirty; + struct callback_head rcu; +}; + +struct fat_entry; + +struct fatent_operations { + void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); + void (*ent_set_ptr)(struct fat_entry *, int); + int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); + int (*ent_get)(struct fat_entry *); + void (*ent_put)(struct fat_entry *, int); + int (*ent_next)(struct fat_entry *); +}; + +struct msdos_inode_info { + spinlock_t cache_lru_lock; + struct list_head cache_lru; + int nr_caches; + unsigned int cache_valid_id; + long: 32; + loff_t mmu_private; + int i_start; + int i_logstart; + int i_attrs; + long: 32; + loff_t i_pos; + struct hlist_node i_fat_hash; + struct hlist_node i_dir_hash; + struct rw_semaphore truncate_lock; + long: 32; + struct timespec64 i_crtime; + struct inode vfs_inode; +}; + +struct fat_entry { + int entry; + union { + u8 *ent12_p[2]; + __le16 *ent16_p; + __le32 *ent32_p; + } u; + int nr_bhs; + struct buffer_head *bhs[2]; + struct inode *fat_inode; +}; + +struct fat_cache { + struct list_head cache_list; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct fat_cache_id { + unsigned int id; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct tree_descr { + const char *name; + const struct file_operations *ops; + int mode; +}; + +typedef int (*eventfs_callback)(const char *, umode_t *, void **, const struct file_operations **); + +typedef void (*eventfs_release)(const char *, void *); + +struct eventfs_entry { + const char *name; + eventfs_callback callback; + eventfs_release release; +}; + +enum { + TRACEFS_EVENT_INODE = 2, + TRACEFS_GID_PERM_SET = 4, + TRACEFS_UID_PERM_SET = 8, + TRACEFS_INSTANCE_INODE = 16, +}; + +struct tracefs_inode { + struct inode vfs_inode; + struct list_head list; + long unsigned int flags; + void *private; +}; + +struct eventfs_attr { + int mode; + kuid_t uid; + kgid_t gid; +}; + +struct eventfs_inode { + union { + struct list_head list; + struct callback_head rcu; + }; + struct list_head children; + const struct eventfs_entry *entries; + const char *name; + struct eventfs_attr *entry_attrs; + void *data; + struct eventfs_attr attr; + struct kref kref; + unsigned int is_freed: 1; + unsigned int is_events: 1; + unsigned int nr_entries: 30; + unsigned int ino; +}; + +struct tracefs_dir_ops { + int (*mkdir)(const char *); + int (*rmdir)(const char *); +}; + +struct tracefs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid___2 = 0, + Opt_gid___2 = 1, + Opt_mode___2 = 2, +}; + +typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); + +struct btrfs_dir_log_item { + __le64 end; +}; + +struct btrfs_ordered_sum { + u64 logical; + u32 len; + struct list_head list; + u8 sums[0]; + long: 32; +}; + +enum { + BTRFS_ORDERED_REGULAR = 0, + BTRFS_ORDERED_NOCOW = 1, + BTRFS_ORDERED_PREALLOC = 2, + BTRFS_ORDERED_COMPRESSED = 3, + BTRFS_ORDERED_DIRECT = 4, + BTRFS_ORDERED_IO_DONE = 5, + BTRFS_ORDERED_COMPLETE = 6, + BTRFS_ORDERED_IOERR = 7, + BTRFS_ORDERED_TRUNCATED = 8, + BTRFS_ORDERED_LOGGED = 9, + BTRFS_ORDERED_LOGGED_CSUM = 10, + BTRFS_ORDERED_PENDING = 11, + BTRFS_ORDERED_ENCODED = 12, +}; + +enum btrfs_delayed_item_type { + BTRFS_DELAYED_INSERTION_ITEM = 0, + BTRFS_DELAYED_DELETION_ITEM = 1, +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + long: 32; + u64 index; + struct list_head tree_list; + struct list_head readdir_list; + struct list_head log_list; + u64 bytes_reserved; + struct btrfs_delayed_node *delayed_node; + refcount_t refs; + enum btrfs_delayed_item_type type: 8; + bool logged; + u16 data_len; + char data[0]; + long: 32; +}; + +struct btrfs_io_context; + +struct btrfs_io_stripe { + struct btrfs_device *dev; + long: 32; + u64 physical; + u64 length; + bool rst_search_commit_root; + struct btrfs_io_context *bioc; +}; + +struct btrfs_chunk_map { + struct rb_node rb_node; + int verified_stripes; + refcount_t refs; + long: 32; + u64 start; + u64 chunk_len; + u64 stripe_size; + u64 type; + int io_align; + int io_width; + int num_stripes; + int sub_stripes; + struct btrfs_io_stripe stripes[0]; +}; + +struct btrfs_io_context { + refcount_t refs; + struct btrfs_fs_info *fs_info; + u64 map_type; + struct bio *orig_bio; + atomic_t error; + u16 max_errors; + long: 32; + u64 logical; + u64 size; + struct list_head rst_ordered_entry; + u16 num_stripes; + u16 mirror_num; + u16 replace_nr_stripes; + s16 replace_stripe_src; + u64 full_stripe_logical; + struct btrfs_io_stripe stripes[0]; +}; + +struct btrfs_truncate_control { + struct btrfs_inode *inode; + long: 32; + u64 new_size; + u64 extents_found; + u64 last_size; + u64 sub_bytes; + u64 ino; + u32 min_type; + bool skip_ref_updates; + bool clear_extent_range; +}; + +enum { + LOG_INODE_ALL = 0, + LOG_INODE_EXISTS = 1, +}; + +enum { + LOG_WALK_PIN_ONLY = 0, + LOG_WALK_REPLAY_INODES = 1, + LOG_WALK_REPLAY_DIR_INDEX = 2, + LOG_WALK_REPLAY_ALL = 3, +}; + +struct walk_control { + int free; + int pin; + int stage; + bool ignore_cur_inode; + struct btrfs_root *replay_dest; + struct btrfs_trans_handle *trans; + int (*process_func)(struct btrfs_root *, struct extent_buffer *, struct walk_control *, u64, int); +}; + +struct btrfs_dir_list { + u64 ino; + struct list_head list; +}; + +struct btrfs_ino_list { + u64 ino; + u64 parent; + struct list_head list; +}; + +typedef int __kernel_mqd_t; + +typedef __kernel_mqd_t mqd_t; + +struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[13]; + int _tid; + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +}; + +struct mq_attr { + __kernel_long_t mq_flags; + __kernel_long_t mq_maxmsg; + __kernel_long_t mq_msgsize; + __kernel_long_t mq_curmsgs; + __kernel_long_t __reserved[4]; +}; + +struct msg_msgseg; + +struct msg_msg { + struct list_head m_list; + long int m_type; + size_t m_ts; + struct msg_msgseg *next; + void *security; +}; + +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; + bool newns; +}; + +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; + +struct ext_wait_queue { + struct task_struct *task; + struct list_head list; + struct msg_msg *msg; + int state; +}; + +struct mqueue_inode_info { + spinlock_t lock; + long: 32; + struct inode vfs_inode; + wait_queue_head_t wait_q; + struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; + struct posix_msg_tree_node *node_cache; + struct mq_attr attr; + struct sigevent notify; + struct pid *notify_owner; + u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct ucounts *ucounts; + struct sock *notify_sock; + struct sk_buff *notify_cookie; + struct ext_wait_queue e_wait_q[2]; + long unsigned int qsize; + long: 32; +}; + +typedef struct { + __le64 b; + __le64 a; +} le128; + +struct xts_tfm_ctx { + struct crypto_skcipher *child; + struct crypto_cipher *tweak; +}; + +struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; + struct crypto_cipher_spawn tweak_spawn; +}; + +struct xts_request_ctx { + le128 t; + struct scatterlist *tail; + struct scatterlist sg[2]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct skcipher_request subreq; +}; + +struct io_uring_cmd { + struct file *file; + const struct io_uring_sqe *sqe; + void (*task_work_cb)(struct io_uring_cmd *, unsigned int); + u32 cmd_op; + u32 flags; + u8 pdu[32]; +}; + +struct iomap_ops { + int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, struct iomap *); + int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *); +}; + +enum { + DIO_SHOULD_DIRTY = 1, + DIO_IS_SYNC = 2, +}; + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bio bio; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct elevator_type; + +struct elevator_queue { + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + long unsigned int flags; + struct hlist_head hash[64]; +}; + +struct blk_mq_ctxs; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + unsigned int cpu; + short unsigned int index_hw[3]; + struct blk_mq_hw_ctx *hctxs[3]; + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx *queue_ctx; +}; + +typedef unsigned int blk_insert_t; + +struct blk_mq_alloc_data { + struct request_queue *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + unsigned int nr_tags; + struct rq_list *cached_rqs; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, blk_insert_t); + struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request * (*former_request)(struct request_queue *, struct request *); + struct request * (*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +struct elv_fs_entry; + +struct elevator_type { + struct kmem_cache *icq_cache; + struct elevator_mq_ops ops; + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + struct module *elevator_owner; + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; + char icq_cache_name[22]; + struct list_head list; +}; + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +struct blkg_conf_ctx { + char *input; + char *body; + struct block_device *bdev; + struct blkcg_gq *blkg; +}; + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ = 0, + BLKG_RWSTAT_WRITE = 1, + BLKG_RWSTAT_SYNC = 2, + BLKG_RWSTAT_ASYNC = 3, + BLKG_RWSTAT_DISCARD = 4, + BLKG_RWSTAT_NR = 5, + BLKG_RWSTAT_TOTAL = 5, +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[5]; + atomic64_t aux_cnt[5]; +}; + +struct blkg_rwstat_sample { + u64 cnt[5]; +}; + +struct bfq_entity; + +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + u64 vtime; + long unsigned int wsum; + long: 32; +}; + +struct bfq_sched_data; + +struct bfq_queue; + +struct bfq_entity { + struct rb_node rb_node; + bool on_st_or_in_serv; + u64 start; + u64 finish; + struct rb_root *tree; + long: 32; + u64 min_start; + int service; + int budget; + int allocated; + int dev_weight; + int weight; + int new_weight; + int orig_weight; + struct bfq_entity *parent; + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + int prio_changed; + bool in_groups_with_pending_reqs; + struct bfq_queue *last_bfqq_created; + long: 32; +}; + +struct bfq_sched_data { + struct bfq_entity *in_service_entity; + struct bfq_entity *next_in_service; + struct bfq_service_tree service_tree[3]; + long unsigned int bfq_class_idle_last_service; + long: 32; +}; + +struct bfq_weight_counter { + unsigned int weight; + unsigned int num_active; + struct rb_node weights_node; +}; + +struct bfq_ttime { + u64 last_end_request; + u64 ttime_total; + long unsigned int ttime_samples; + long: 32; + u64 ttime_mean; +}; + +struct bfq_data; + +struct bfq_io_cq; + +struct bfq_queue { + int ref; + int stable_ref; + struct bfq_data *bfqd; + short unsigned int ioprio; + short unsigned int ioprio_class; + short unsigned int new_ioprio; + short unsigned int new_ioprio_class; + long: 32; + u64 last_serv_time_ns; + unsigned int inject_limit; + long unsigned int decrease_time_jif; + struct bfq_queue *new_bfqq; + struct rb_node pos_node; + struct rb_root *pos_root; + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int meta_pending; + struct list_head fifo; + struct bfq_entity entity; + struct bfq_weight_counter *weight_counter; + int max_budget; + long unsigned int budget_timeout; + int dispatched; + long unsigned int flags; + struct list_head bfqq_list; + long: 32; + struct bfq_ttime ttime; + u64 io_start_time; + u64 tot_idle_time; + u32 seek_history; + struct hlist_node burst_list_node; + long: 32; + sector_t last_request_pos; + unsigned int requests_within_timer; + pid_t pid; + struct bfq_io_cq *bic; + long unsigned int wr_cur_max_time; + long unsigned int soft_rt_next_start; + long unsigned int last_wr_start_finish; + unsigned int wr_coeff; + long unsigned int last_idle_bklogged; + long unsigned int service_from_backlogged; + long unsigned int service_from_wr; + long unsigned int wr_start_at_switch_to_srt; + long unsigned int split_time; + long unsigned int first_IO_time; + long unsigned int creation_time; + struct bfq_queue *waker_bfqq; + struct bfq_queue *tentative_waker_bfqq; + unsigned int num_waker_detections; + long: 32; + u64 waker_detection_started; + struct hlist_node woken_list_node; + struct hlist_head woken_list; + unsigned int actuator_idx; +}; + +struct bfq_group; + +struct bfq_data { + struct request_queue *queue; + struct list_head dispatch; + struct bfq_group *root_group; + struct rb_root_cached queue_weights_tree; + unsigned int num_groups_with_pending_reqs; + unsigned int busy_queues[3]; + int wr_busy_queues; + int queued; + int tot_rq_in_driver; + int rq_in_driver[8]; + bool nonrot_with_queueing; + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + int budgets_assigned; + struct hrtimer idle_slice_timer; + struct bfq_queue *in_service_queue; + long: 32; + sector_t last_position; + sector_t in_serv_last_pos; + u64 last_completion; + struct bfq_queue *last_completed_rq_bfqq; + struct bfq_queue *last_bfqq_created; + u64 last_empty_occupied_ns; + bool wait_dispatch; + struct request *waited_rq; + bool rqs_injected; + long: 32; + u64 first_dispatch; + u64 last_dispatch; + ktime_t last_budget_start; + ktime_t last_idling_start; + long unsigned int last_idling_start_jiffies; + int peak_rate_samples; + u32 sequential_samples; + long: 32; + u64 tot_sectors_dispatched; + u32 last_rq_max_size; + long: 32; + u64 delta_from_first; + u32 peak_rate; + int bfq_max_budget; + struct list_head active_list[8]; + struct list_head idle_list; + u64 bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + u32 bfq_slice_idle; + int bfq_user_max_budget; + unsigned int bfq_timeout; + bool strict_guarantees; + long unsigned int last_ins_in_burst; + long unsigned int bfq_burst_interval; + int burst_size; + struct bfq_entity *burst_parent_entity; + long unsigned int bfq_large_burst_thresh; + bool large_burst; + struct hlist_head burst_list; + bool low_latency; + unsigned int bfq_wr_coeff; + unsigned int bfq_wr_rt_max_time; + unsigned int bfq_wr_min_idle_time; + long unsigned int bfq_wr_min_inter_arr_async; + unsigned int bfq_wr_max_softrt_rate; + long: 32; + u64 rate_dur_prod; + struct bfq_queue oom_bfqq; + spinlock_t lock; + struct bfq_io_cq *bio_bic; + struct bfq_queue *bio_bfqq; + unsigned int word_depths[4]; + unsigned int full_depth_shift; + unsigned int num_actuators; + long: 32; + sector_t sector[8]; + sector_t nr_sectors[8]; + struct blk_independent_access_range ia_ranges[8]; + unsigned int actuator_load_threshold; + long: 32; +}; + +struct bfq_iocq_bfqq_data { + bool saved_has_short_ttime; + bool saved_IO_bound; + long: 32; + u64 saved_io_start_time; + u64 saved_tot_idle_time; + bool saved_in_large_burst; + bool was_in_burst_list; + unsigned int saved_weight; + long unsigned int saved_wr_coeff; + long unsigned int saved_last_wr_start_finish; + long unsigned int saved_service_from_wr; + long unsigned int saved_wr_start_at_switch_to_srt; + unsigned int saved_wr_cur_max_time; + long: 32; + struct bfq_ttime saved_ttime; + u64 saved_last_serv_time_ns; + unsigned int saved_inject_limit; + long unsigned int saved_decrease_time_jif; + struct bfq_queue *stable_merge_bfqq; + bool stably_merged; +}; + +struct bfq_io_cq { + struct io_cq icq; + struct bfq_queue *bfqq[16]; + int ioprio; + uint64_t blkcg_serial_nr; + struct bfq_iocq_bfqq_data bfqq_data[8]; + unsigned int requests; + long: 32; +}; + +struct bfqg_stats { + struct blkg_rwstat bytes; + struct blkg_rwstat ios; +}; + +struct bfq_group { + struct blkg_policy_data pd; + refcount_t ref; + struct bfq_entity entity; + struct bfq_sched_data sched_data; + struct bfq_data *bfqd; + struct bfq_queue *async_bfqq[128]; + struct bfq_queue *async_idle_bfqq[8]; + struct bfq_entity *my_entity; + int active_entities; + int num_queues_with_pending_reqs; + struct rb_root rq_pos_tree; + long: 32; + struct bfqg_stats stats; +}; + +enum bfqq_expiration { + BFQQE_TOO_IDLE = 0, + BFQQE_BUDGET_TIMEOUT = 1, + BFQQE_BUDGET_EXHAUSTED = 2, + BFQQE_NO_MORE_REQUESTS = 3, + BFQQE_PREEMPTED = 4, +}; + +struct bfq_group_data { + struct blkcg_policy_data pd; + unsigned int weight; +}; + +struct epoll_event { + __poll_t events; + long: 32; + __u64 data; +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +typedef int __kernel_ptrdiff_t; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +struct region { + unsigned int start; + unsigned int off; + unsigned int group_len; + unsigned int end; + unsigned int nbits; +}; + +typedef long int mpi_limb_signed_t; + +struct gcry_mpi { + int alloced; + int nlimbs; + int nbits; + int sign; + unsigned int flags; + mpi_limb_t *d; +}; + +typedef struct gcry_mpi *MPI; + +typedef resource_size_t (*resource_alignf)(void *, const struct resource *, resource_size_t, resource_size_t); + +struct resource_constraint { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_alignf alignf; + void *alignf_data; +}; + +typedef u32 pci_bus_addr_t; + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +struct pci_dev_resource { + struct list_head list; + struct resource *res; + struct pci_dev *dev; + resource_size_t start; + resource_size_t end; + resource_size_t add_size; + resource_size_t min_align; + long unsigned int flags; +}; + +enum release_type { + leaf_only = 0, + whole_subtree = 1, +}; + +enum enable_type { + undefined = -1, + user_disabled = 0, + auto_disabled = 1, + user_enabled = 2, + auto_enabled = 3, +}; + +typedef unsigned char u_char; + +typedef short unsigned int u_short; + +struct fbcon_display { + const u_char *fontdata; + int userfont; + u_short inverse; + short int yscroll; + int vrows; + int cursor_shape; + int con_rotate; + u32 xres_virtual; + u32 yres_virtual; + u32 height; + u32 width; + u32 bits_per_pixel; + u32 grayscale; + u32 nonstd; + u32 accel_flags; + u32 rotate; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + const struct fb_videomode *mode; +}; + +struct fbcon_ops { + void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); + void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); + void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); + void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); + void (*cursor)(struct vc_data *, struct fb_info *, bool, int, int); + int (*update_start)(struct fb_info *); + int (*rotate_font)(struct fb_info *, struct vc_data *); + struct fb_var_screeninfo var; + struct delayed_work cursor_work; + struct fb_cursor cursor_state; + struct fbcon_display *p; + struct fb_info *info; + int currcon; + int cur_blink_jiffies; + int cursor_flash; + int cursor_reset; + int blank_state; + int graphics; + int save_graphics; + bool initialized; + int rotate; + int cur_rotate; + char *cursor_data; + u8 *fontbuffer; + u8 *fontdata; + u8 *cursor_src; + u32 cursor_size; + u32 fd_size; +}; + +struct devres_node { + struct list_head entry; + dr_release_t release; + const char *name; + size_t size; +}; + +struct devres { + struct devres_node node; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u8 data[0]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; +}; + +struct action_devres { + void *data; + void (*action)(void *); +}; + +struct pages_devres { + long unsigned int addr; + unsigned int order; +}; + +enum { + __RQF_STARTED = 0, + __RQF_FLUSH_SEQ = 1, + __RQF_MIXED_MERGE = 2, + __RQF_DONTPREP = 3, + __RQF_SCHED_TAGS = 4, + __RQF_USE_SCHED = 5, + __RQF_FAILED = 6, + __RQF_QUIET = 7, + __RQF_IO_STAT = 8, + __RQF_PM = 9, + __RQF_HASHED = 10, + __RQF_STATS = 11, + __RQF_SPECIAL_PAYLOAD = 12, + __RQF_ZONE_WRITE_PLUGGING = 13, + __RQF_TIMED_OUT = 14, + __RQF_RESV = 15, + __RQF_BITS = 16, +}; + +enum scsi_prot_operations { + SCSI_PROT_NORMAL = 0, + SCSI_PROT_READ_INSERT = 1, + SCSI_PROT_WRITE_STRIP = 2, + SCSI_PROT_READ_STRIP = 3, + SCSI_PROT_WRITE_INSERT = 4, + SCSI_PROT_READ_PASS = 5, + SCSI_PROT_WRITE_PASS = 6, +}; + +struct scsi_driver { + struct device_driver gendrv; + int (*resume)(struct device *); + void (*rescan)(struct device *); + blk_status_t (*init_command)(struct scsi_cmnd *); + void (*uninit_command)(struct scsi_cmnd *); + int (*done)(struct scsi_cmnd *); + int (*eh_action)(struct scsi_cmnd *, int); + void (*eh_reset)(struct scsi_cmnd *); +}; + +enum scsi_ml_status { + SCSIML_STAT_OK = 0, + SCSIML_STAT_RESV_CONFLICT = 1, + SCSIML_STAT_NOSPC = 2, + SCSIML_STAT_MED_ERROR = 3, + SCSIML_STAT_TGT_FAILURE = 4, + SCSIML_STAT_DL_TIMEOUT = 5, +}; + +struct mdio_driver { + struct mdio_driver_common mdiodrv; + int (*probe)(struct mdio_device *); + void (*remove)(struct mdio_device *); + void (*shutdown)(struct mdio_device *); +}; + +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +enum e1000_mac_type___2 { + e1000_82571 = 0, + e1000_82572 = 1, + e1000_82573 = 2, + e1000_82574 = 3, + e1000_82583 = 4, + e1000_80003es2lan = 5, + e1000_ich8lan = 6, + e1000_ich9lan = 7, + e1000_ich10lan = 8, + e1000_pchlan = 9, + e1000_pch2lan = 10, + e1000_pch_lpt = 11, + e1000_pch_spt = 12, + e1000_pch_cnp = 13, + e1000_pch_tgp = 14, + e1000_pch_adp = 15, + e1000_pch_mtp = 16, + e1000_pch_lnp = 17, + e1000_pch_ptp = 18, + e1000_pch_nvp = 19, +}; + +enum e1000_nvm_type___2 { + e1000_nvm_unknown___2 = 0, + e1000_nvm_none___2 = 1, + e1000_nvm_eeprom_spi___2 = 2, + e1000_nvm_flash_hw___2 = 3, + e1000_nvm_flash_sw___2 = 4, +}; + +enum e1000_phy_type___2 { + e1000_phy_unknown___2 = 0, + e1000_phy_none___2 = 1, + e1000_phy_m88___2 = 2, + e1000_phy_igp___2 = 3, + e1000_phy_igp_2___2 = 4, + e1000_phy_gg82563___2 = 5, + e1000_phy_igp_3___2 = 6, + e1000_phy_ife___2 = 7, + e1000_phy_bm = 8, + e1000_phy_82578 = 9, + e1000_phy_82577 = 10, + e1000_phy_82579 = 11, + e1000_phy_i217 = 12, +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress = 1, + e1000_serdes_link_autoneg_complete = 2, + e1000_serdes_link_forced_up = 3, +}; + +struct e1000_hw_stats___2 { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_hw___2; + +struct e1000_mac_operations___2 { + s32 (*id_led_init)(struct e1000_hw___2 *); + s32 (*blink_led)(struct e1000_hw___2 *); + bool (*check_mng_mode)(struct e1000_hw___2 *); + s32 (*check_for_link)(struct e1000_hw___2 *); + s32 (*cleanup_led)(struct e1000_hw___2 *); + void (*clear_hw_cntrs)(struct e1000_hw___2 *); + void (*clear_vfta)(struct e1000_hw___2 *); + s32 (*get_bus_info)(struct e1000_hw___2 *); + void (*set_lan_id)(struct e1000_hw___2 *); + s32 (*get_link_up_info)(struct e1000_hw___2 *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw___2 *); + s32 (*led_off)(struct e1000_hw___2 *); + void (*update_mc_addr_list)(struct e1000_hw___2 *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw___2 *); + s32 (*init_hw)(struct e1000_hw___2 *); + s32 (*setup_link)(struct e1000_hw___2 *); + s32 (*setup_physical_interface)(struct e1000_hw___2 *); + s32 (*setup_led)(struct e1000_hw___2 *); + void (*write_vfta)(struct e1000_hw___2 *, u32, u32); + void (*config_collision_dist)(struct e1000_hw___2 *); + int (*rar_set)(struct e1000_hw___2 *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw___2 *); + u32 (*rar_get_count)(struct e1000_hw___2 *); +}; + +struct e1000_mac_info___2 { + struct e1000_mac_operations___2 ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type___2 type; + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_fc_info___2 { + u32 high_water; + u32 low_water; + u16 pause_time; + u16 refresh_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*cfg_on_link_up)(struct e1000_hw___2 *); + s32 (*check_polarity)(struct e1000_hw___2 *); + s32 (*check_reset_block)(struct e1000_hw___2 *); + s32 (*commit)(struct e1000_hw___2 *); + s32 (*force_speed_duplex)(struct e1000_hw___2 *); + s32 (*get_cfg_done)(struct e1000_hw___2 *); + s32 (*get_cable_length)(struct e1000_hw___2 *); + s32 (*get_info)(struct e1000_hw___2 *); + s32 (*set_page)(struct e1000_hw___2 *, u16); + s32 (*read_reg)(struct e1000_hw___2 *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw___2 *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw___2 *, u32, u16 *); + void (*release)(struct e1000_hw___2 *); + s32 (*reset)(struct e1000_hw___2 *); + s32 (*set_d0_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*write_reg)(struct e1000_hw___2 *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw___2 *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw___2 *, u32, u16); + void (*power_up)(struct e1000_hw___2 *); + void (*power_down)(struct e1000_hw___2 *); +}; + +struct e1000_phy_info___2 { + struct e1000_phy_operations___2 ops; + enum e1000_phy_type___2 type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + u32 retry_count; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; + bool retry_enabled; +}; + +struct e1000_nvm_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*read)(struct e1000_hw___2 *, u16, u16, u16 *); + void (*release)(struct e1000_hw___2 *); + void (*reload)(struct e1000_hw___2 *); + s32 (*update)(struct e1000_hw___2 *); + s32 (*valid_led_default)(struct e1000_hw___2 *, u16 *); + s32 (*validate)(struct e1000_hw___2 *); + s32 (*write)(struct e1000_hw___2 *, u16, u16, u16 *); +}; + +struct e1000_nvm_info___2 { + struct e1000_nvm_operations___2 ops; + enum e1000_nvm_type___2 type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info___2 { + enum e1000_bus_width width; + u16 func; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +enum e1000_ulp_state { + e1000_ulp_state_unknown = 0, + e1000_ulp_state_off = 1, + e1000_ulp_state_on = 2, +}; + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[2048]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; +}; + +struct e1000_adapter; + +struct e1000_hw___2 { + struct e1000_adapter *adapter; + void *hw_addr; + void *flash_address; + struct e1000_mac_info___2 mac; + struct e1000_fc_info___2 fc; + struct e1000_phy_info___2 phy; + struct e1000_nvm_info___2 nvm; + struct e1000_bus_info___2 bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +struct e1000_phy_regs { + u16 bmcr; + u16 bmsr; + u16 advertise; + u16 lpa; + u16 expansion; + u16 ctrl1000; + u16 stat1000; + u16 estatus; +}; + +struct e1000_buffer; + +struct e1000_ring { + struct e1000_adapter *adapter; + void *desc; + dma_addr_t dma; + unsigned int size; + unsigned int count; + u16 next_to_use; + u16 next_to_clean; + void *head; + void *tail; + struct e1000_buffer *buffer_info; + char name[21]; + u32 ims_val; + u32 itr_val; + void *itr_register; + int set_itr; + struct sk_buff *rx_skb_top; +}; + +struct e1000_info___2; + +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + struct work_struct reset_task; + struct work_struct watchdog_task; + const struct e1000_info___2 *ei; + long unsigned int active_vlans[128]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + long unsigned int state; + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + struct e1000_ring *tx_ring; + u32 tx_fifo_limit; + struct napi_struct napi; + unsigned int uncorr_errors; + unsigned int corr_errors; + unsigned int restart_queue; + u32 txd_cmd; + bool detect_tx_hung; + bool tx_hang_recheck; + u8 tx_timeout_factor; + u32 tx_int_delay; + u32 tx_abs_int_delay; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + long: 32; + u64 tpt_old; + u64 colc_old; + u32 gotc; + long: 32; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + bool (*clean_rx)(struct e1000_ring *, int *, int); + void (*alloc_rx_buf)(struct e1000_ring *, int, gfp_t); + struct e1000_ring *rx_ring; + u32 rx_int_delay; + u32 rx_abs_int_delay; + long: 32; + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + long: 32; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + struct net_device *netdev; + struct pci_dev *pdev; + struct e1000_hw___2 hw; + spinlock_t stats64_lock; + struct e1000_hw_stats___2 stats; + struct e1000_phy_info___2 phy_info; + struct e1000_phy_stats phy_stats; + struct e1000_phy_regs phy_regs; + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + bool fc_autoneg; + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + int phy_hang_count; + u16 tx_ring_count; + u16 rx_ring_count; + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + long unsigned int tx_hwtstamp_start; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; + long int ptp_delta; + u16 eee_advert; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct e1000_ps_page { + struct page *page; + long: 32; + u64 dma; +}; + +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + struct { + long unsigned int time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + struct { + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_info___2 { + enum e1000_mac_type___2 mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations___2 *mac_ops; + const struct e1000_phy_operations___2 *phy_ops; + const struct e1000_nvm_operations___2 *nvm_ops; +}; + +struct xhci_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; +}; + +struct xhci_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dev_notification; + __le64 cmd_ring; + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + __le32 reserved4[241]; + __le32 port_status_base; + __le32 port_power_base; + __le32 port_link_base; + __le32 reserved5; + __le32 reserved6[1016]; +}; + +struct xhci_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +struct xhci_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct xhci_intr_reg ir_set[128]; +}; + +struct xhci_doorbell_array { + __le32 doorbell[256]; +}; + +struct xhci_container_ctx { + unsigned int type; + int size; + u8 *bytes; + dma_addr_t dma; +}; + +struct xhci_slot_ctx { + __le32 dev_info; + __le32 dev_info2; + __le32 tt_info; + __le32 dev_state; + __le32 reserved[4]; +}; + +struct xhci_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 reserved[3]; +}; + +struct xhci_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +union xhci_trb; + +struct xhci_command { + struct xhci_container_ctx *in_ctx; + u32 status; + int slot_id; + struct completion *completion; + union xhci_trb *command_trb; + struct list_head cmd_list; + unsigned int timeout_ms; +}; + +struct xhci_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +struct xhci_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +struct xhci_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +struct xhci_generic_trb { + __le32 field[4]; +}; + +union xhci_trb { + struct xhci_link_trb link; + struct xhci_transfer_event trans_event; + struct xhci_event_cmd event_cmd; + struct xhci_generic_trb generic; +}; + +struct xhci_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +struct xhci_ring; + +struct xhci_stream_info { + struct xhci_ring **stream_rings; + unsigned int num_streams; + struct xhci_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct xarray trb_address_map; + struct xhci_command *free_streams_command; +}; + +enum xhci_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC = 1, + TYPE_BULK = 2, + TYPE_INTR = 3, + TYPE_STREAM = 4, + TYPE_COMMAND = 5, + TYPE_EVENT = 6, +}; + +struct xhci_segment; + +struct xhci_ring { + struct xhci_segment *first_seg; + struct xhci_segment *last_seg; + union xhci_trb *enqueue; + struct xhci_segment *enq_seg; + union xhci_trb *dequeue; + struct xhci_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int stream_id; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + enum xhci_ring_type type; + bool last_td_was_short; + struct xarray *trb_address_map; +}; + +struct xhci_bw_info { + unsigned int ep_interval; + unsigned int mult; + unsigned int num_packets; + unsigned int max_packet_size; + unsigned int max_esit_payload; + unsigned int type; +}; + +struct xhci_virt_device; + +struct xhci_hcd; + +struct xhci_virt_ep { + struct xhci_virt_device *vdev; + unsigned int ep_index; + struct xhci_ring *ring; + struct xhci_stream_info *stream_info; + struct xhci_ring *new_ring; + unsigned int err_count; + unsigned int ep_state; + struct list_head cancelled_td_list; + struct xhci_hcd *xhci; + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; + bool skip; + struct xhci_bw_info bw_info; + struct list_head bw_endpoint_list; + long unsigned int stop_time; + int next_frame_id; + bool use_extended_tbc; +}; + +struct xhci_port; + +struct xhci_interval_bw_table; + +struct xhci_tt_bw_info; + +struct xhci_virt_device { + int slot_id; + struct usb_device *udev; + struct xhci_container_ctx *out_ctx; + struct xhci_container_ctx *in_ctx; + struct xhci_virt_ep eps[31]; + struct xhci_port *rhub_port; + struct xhci_interval_bw_table *bw_table; + struct xhci_tt_bw_info *tt_info; + long unsigned int flags; + u16 current_mel; + void *debugfs_private; +}; + +struct s3_save { + u32 command; + u32 dev_nt; + u64 dcbaa_ptr; + u32 config_reg; + long: 32; +}; + +struct xhci_bus_state { + long unsigned int bus_suspended; + long unsigned int next_statechange; + u32 port_c_suspend; + u32 suspended_ports; + u32 port_remote_wakeup; + long unsigned int resuming_ports; +}; + +struct xhci_hub { + struct xhci_port **ports; + unsigned int num_ports; + struct usb_hcd *hcd; + struct xhci_bus_state bus_state; + u8 maj_rev; + u8 min_rev; +}; + +struct xhci_device_context_array; + +struct xhci_interrupter; + +struct xhci_scratchpad; + +struct xhci_root_port_bw_info; + +struct xhci_port_cap; + +struct xhci_hcd { + struct usb_hcd *main_hcd; + struct usb_hcd *shared_hcd; + struct xhci_cap_regs *cap_regs; + struct xhci_op_regs *op_regs; + struct xhci_run_regs *run_regs; + struct xhci_doorbell_array *dba; + __u32 hcs_params1; + __u32 hcs_params2; + __u32 hcs_params3; + __u32 hcc_params; + __u32 hcc_params2; + spinlock_t lock; + u16 hci_version; + u16 max_interrupters; + u32 imod_interval; + int page_size; + int page_shift; + int nvecs; + struct clk *clk; + struct clk *reg_clk; + struct reset_control *reset; + struct xhci_device_context_array *dcbaa; + struct xhci_interrupter **interrupters; + struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; + struct list_head cmd_list; + unsigned int cmd_ring_reserved_trbs; + struct delayed_work cmd_timer; + struct completion cmd_ring_stop_completion; + struct xhci_command *current_cmd; + struct xhci_scratchpad *scratchpad; + struct mutex mutex; + struct xhci_virt_device *devs[256]; + struct xhci_root_port_bw_info *rh_bw; + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + struct dma_pool *small_streams_pool; + struct dma_pool *medium_streams_pool; + unsigned int xhc_state; + long unsigned int run_graceperiod; + long: 32; + struct s3_save s3; + long long unsigned int quirks; + unsigned int num_active_eps; + unsigned int limit_active_eps; + struct xhci_port *hw_ports; + struct xhci_hub usb2_rhub; + struct xhci_hub usb3_rhub; + unsigned int hw_lpm_support: 1; + unsigned int broken_suspend: 1; + unsigned int allow_single_roothub: 1; + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; + u16 test_mode; + struct dentry *debugfs_root; + struct dentry *debugfs_slots; + struct list_head regset_list; + void *dbc; + long unsigned int priv[0]; +}; + +struct xhci_segment { + union xhci_trb *trbs; + struct xhci_segment *next; + unsigned int num; + dma_addr_t dma; + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +struct xhci_interval_bw { + unsigned int num_packets; + struct list_head endpoints; + unsigned int overhead[3]; +}; + +struct xhci_interval_bw_table { + unsigned int interval0_esit_payload; + struct xhci_interval_bw interval_bw[16]; + unsigned int bw_used; + unsigned int ss_bw_in; + unsigned int ss_bw_out; +}; + +struct xhci_port { + __le32 *addr; + int hw_portnum; + int hcd_portnum; + struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; + unsigned int lpm_incapable: 1; + long unsigned int resume_timestamp; + bool rexit_active; + int slot_id; + struct completion rexit_done; + struct completion u3exit_done; +}; + +struct xhci_tt_bw_info { + struct list_head tt_list; + int slot_id; + int ttport; + struct xhci_interval_bw_table bw_table; + int active_eps; +}; + +struct xhci_root_port_bw_info { + struct list_head tts; + unsigned int num_active_tts; + struct xhci_interval_bw_table bw_table; +}; + +struct xhci_device_context_array { + __le64 dev_context_ptrs[256]; + dma_addr_t dma; + long: 32; +}; + +enum xhci_cancelled_td_status { + TD_DIRTY = 0, + TD_HALTED = 1, + TD_CLEARING_CACHE = 2, + TD_CLEARING_CACHE_DEFERRED = 3, + TD_CLEARED = 4, +}; + +struct xhci_td { + struct list_head td_list; + struct list_head cancelled_td_list; + int status; + enum xhci_cancelled_td_status cancel_status; + struct urb *urb; + struct xhci_segment *start_seg; + union xhci_trb *start_trb; + struct xhci_segment *end_seg; + union xhci_trb *end_trb; + struct xhci_segment *bounce_seg; + bool urb_length_set; + bool error_mid_td; +}; + +struct xhci_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 rsvd; +}; + +struct xhci_erst { + struct xhci_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; +}; + +struct xhci_scratchpad { + u64 *sp_array; + dma_addr_t sp_dma; + void **sp_buffers; +}; + +struct urb_priv { + int num_tds; + int num_tds_done; + struct xhci_td td[0]; +}; + +struct xhci_interrupter { + struct xhci_ring *event_ring; + struct xhci_erst erst; + struct xhci_intr_reg *ir_set; + unsigned int intr_num; + bool ip_autoclear; + u32 isoc_bei_interval; + u32 s3_irq_pending; + u32 s3_irq_control; + u32 s3_erst_size; + long: 32; + u64 s3_erst_base; + u64 s3_erst_dequeue; +}; + +struct xhci_port_cap { + u32 *psi; + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; + u32 protocol_caps; +}; + +struct i2c_board_info { + char type[20]; + short unsigned int flags; + short unsigned int addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct software_node *swnode; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +struct i2c_smbus_alert_setup { + int irq; +}; + +struct trace_event_raw_smbus_write { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_read { + struct trace_entry ent; + int adapter_nr; + __u16 flags; + __u16 addr; + __u8 command; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_reply { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_result { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 read_write; + __u8 command; + __s16 res; + __u32 protocol; + char __data[0]; +}; + +struct trace_event_data_offsets_smbus_write {}; + +struct trace_event_data_offsets_smbus_read {}; + +struct trace_event_data_offsets_smbus_reply {}; + +struct trace_event_data_offsets_smbus_result {}; + +typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *); + +typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int); + +typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *, int); + +typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, int); + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED = 1, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE = 1, + THERMAL_TRIP_HOT = 2, + THERMAL_TRIP_CRITICAL = 3, +}; + +enum thermal_trend { + THERMAL_TREND_STABLE = 0, + THERMAL_TREND_RAISING = 1, + THERMAL_TREND_DROPPING = 2, +}; + +enum thermal_notify_event { + THERMAL_EVENT_UNSPECIFIED = 0, + THERMAL_EVENT_TEMP_SAMPLE = 1, + THERMAL_TRIP_VIOLATED = 2, + THERMAL_TRIP_CHANGED = 3, + THERMAL_DEVICE_DOWN = 4, + THERMAL_DEVICE_UP = 5, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, + THERMAL_TABLE_CHANGED = 7, + THERMAL_EVENT_KEEP_ALIVE = 8, + THERMAL_TZ_BIND_CDEV = 9, + THERMAL_TZ_UNBIND_CDEV = 10, + THERMAL_INSTANCE_WEIGHT_CHANGED = 11, + THERMAL_TZ_RESUME = 12, + THERMAL_TZ_ADD_THRESHOLD = 13, + THERMAL_TZ_DEL_THRESHOLD = 14, + THERMAL_TZ_FLUSH_THRESHOLDS = 15, +}; + +struct thermal_trip { + int temperature; + int hysteresis; + enum thermal_trip_type type; + u8 flags; + void *priv; +}; + +struct cooling_spec { + long unsigned int upper; + long unsigned int lower; + unsigned int weight; +}; + +struct thermal_zone_device; + +struct thermal_cooling_device; + +struct thermal_zone_device_ops { + bool (*should_bind)(struct thermal_zone_device *, const struct thermal_trip *, struct thermal_cooling_device *, struct cooling_spec *); + int (*get_temp)(struct thermal_zone_device *, int *); + int (*set_trips)(struct thermal_zone_device *, int, int); + int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); + int (*set_trip_temp)(struct thermal_zone_device *, const struct thermal_trip *, int); + int (*get_crit_temp)(struct thermal_zone_device *, int *); + int (*set_emul_temp)(struct thermal_zone_device *, int); + int (*get_trend)(struct thermal_zone_device *, const struct thermal_trip *, enum thermal_trend *); + void (*hot)(struct thermal_zone_device *); + void (*critical)(struct thermal_zone_device *); +}; + +struct thermal_attr { + struct device_attribute attr; + char name[20]; +}; + +struct thermal_trip_attrs { + struct thermal_attr type; + struct thermal_attr temp; + struct thermal_attr hyst; +}; + +struct thermal_trip_desc { + struct thermal_trip trip; + struct thermal_trip_attrs trip_attrs; + struct list_head list_node; + struct list_head thermal_instances; + int threshold; +}; + +struct thermal_zone_params; + +struct thermal_governor; + +struct thermal_zone_device { + int id; + char type[20]; + struct device device; + struct completion removal; + struct completion resume; + struct attribute_group trips_attribute_group; + struct list_head trips_high; + struct list_head trips_reached; + struct list_head trips_invalid; + enum thermal_device_mode mode; + void *devdata; + int num_trips; + long unsigned int passive_delay_jiffies; + long unsigned int polling_delay_jiffies; + long unsigned int recheck_delay_jiffies; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + struct thermal_zone_device_ops ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; + u8 state; + struct list_head user_thresholds; + struct thermal_trip_desc trips[0]; + long: 32; +}; + +struct thermal_cooling_device_ops; + +struct thermal_cooling_device { + int id; + const char *type; + long unsigned int max_state; + long: 32; + struct device device; + struct device_node *np; + void *devdata; + void *stats; + const struct thermal_cooling_device_ops *ops; + bool updated; + struct mutex lock; + struct list_head thermal_instances; + struct list_head node; + long: 32; +}; + +struct thermal_cooling_device_ops { + int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); + int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); + int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, long unsigned int, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, long unsigned int *); +}; + +typedef struct thermal_cooling_device *class_cooling_dev_t; + +struct thermal_zone_params { + const char *governor_name; + bool no_hwmon; + u32 sustainable_power; + s32 k_po; + s32 k_pu; + s32 k_i; + s32 k_d; + s32 integral_cutoff; + int slope; + int offset; +}; + +struct thermal_governor { + const char *name; + int (*bind_to_tz)(struct thermal_zone_device *); + void (*unbind_from_tz)(struct thermal_zone_device *); + void (*trip_crossed)(struct thermal_zone_device *, const struct thermal_trip *, bool); + void (*manage)(struct thermal_zone_device *); + void (*update_tz)(struct thermal_zone_device *, enum thermal_notify_event); + struct list_head governor_list; +}; + +typedef struct thermal_zone_device *class_thermal_zone_t; + +struct thermal_instance { + int id; + char name[20]; + struct thermal_cooling_device *cdev; + const struct thermal_trip *trip; + bool initialized; + long unsigned int upper; + long unsigned int lower; + long unsigned int target; + char attr_name[20]; + struct device_attribute attr; + char weight_attr_name[20]; + struct device_attribute weight_attr; + struct list_head trip_node; + struct list_head cdev_node; + unsigned int weight; + bool upper_no_limit; +}; + +struct quirks_list_struct { + struct hid_device_id hid_bl_item; + struct list_head node; +}; + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + +struct net_rate_estimator { + struct gnet_stats_basic_sync *bstats; + spinlock_t *stats_lock; + bool running; + struct gnet_stats_basic_sync *cpu_bstats; + u8 ewma_log; + u8 intvl_log; + seqcount_t seq; + u64 last_packets; + u64 last_bytes; + u64 avpps; + u64 avbps; + long unsigned int next_jiffies; + struct timer_list timer; + struct callback_head rcu; +}; + +struct page_pool_params_fast { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; +}; + +struct page_pool_alloc_stats { + u64 fast; + u64 slow; + u64 slow_high_order; + u64 empty; + u64 refill; + u64 waive; +}; + +struct pp_alloc_cache { + u32 count; + netmem_ref cache[128]; +}; + +struct ptr_ring { + int producer; + spinlock_t producer_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + int consumer_head; + int consumer_tail; + spinlock_t consumer_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + int size; + int batch; + void **queue; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct page_pool_params_slow { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; +}; + +struct page_pool_recycle_stats; + +struct page_pool { + struct page_pool_params_fast p; + int cpuid; + u32 pages_state_hold_cnt; + bool has_init_callback: 1; + bool dma_map: 1; + bool dma_sync: 1; + bool system: 1; + long: 32; + __u8 __cacheline_group_begin__frag[0]; + long int frag_users; + netmem_ref frag_page; + unsigned int frag_offset; + __u8 __cacheline_group_end__frag[0]; + long: 32; + struct {} __cacheline_group_pad__frag; + struct delayed_work release_dw; + void (*disconnect)(void *); + long unsigned int defer_start; + long unsigned int defer_warn; + struct page_pool_alloc_stats alloc_stats; + u32 xdp_mem_id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct pp_alloc_cache alloc; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ptr_ring ring; + void *mp_priv; + struct page_pool_recycle_stats *recycle_stats; + atomic_t pages_state_release_cnt; + refcount_t user_cnt; + u64 destroy_cnt; + struct page_pool_params_slow slow; + long: 32; + struct { + struct hlist_node list; + u64 detach_time; + u32 napi_id; + u32 id; + } user; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct dmabuf_genpool_chunk_owner; + +struct net_iov { + long unsigned int __unused_padding; + long unsigned int pp_magic; + struct page_pool *pp; + struct dmabuf_genpool_chunk_owner *owner; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; +}; + +struct pp_memory_provider_params { + void *mp_priv; +}; + +struct rps_map; + +struct rps_dev_flow_table; + +struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; + struct rps_map *rps_map; + struct rps_dev_flow_table *rps_flow_table; + struct kobject kobj; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct napi_struct *napi; + struct pp_memory_provider_params mp_params; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct page_pool_params { + union { + struct { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; + }; + struct page_pool_params_fast fast; + }; + union { + struct { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; + }; + struct page_pool_params_slow slow; + }; +}; + +struct page_pool_recycle_stats { + u64 cached; + u64 cache_full; + u64 ring; + u64 ring_full; + u64 released_refcnt; +}; + +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC = 0, + ETHTOOL_A_BITSET_BIT_INDEX = 1, + ETHTOOL_A_BITSET_BIT_NAME = 2, + ETHTOOL_A_BITSET_BIT_VALUE = 3, + __ETHTOOL_A_BITSET_BIT_CNT = 4, + ETHTOOL_A_BITSET_BIT_MAX = 3, +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC = 0, + ETHTOOL_A_BITSET_BITS_BIT = 1, + __ETHTOOL_A_BITSET_BITS_CNT = 2, + ETHTOOL_A_BITSET_BITS_MAX = 1, +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC = 0, + ETHTOOL_A_BITSET_NOMASK = 1, + ETHTOOL_A_BITSET_SIZE = 2, + ETHTOOL_A_BITSET_BITS = 3, + ETHTOOL_A_BITSET_VALUE = 4, + ETHTOOL_A_BITSET_MASK = 5, + __ETHTOOL_A_BITSET_CNT = 6, + ETHTOOL_A_BITSET_MAX = 5, +}; + +struct plca_reply_data { + struct ethnl_reply_data base; + struct phy_plca_cfg plca_cfg; + struct phy_plca_status plca_st; +}; + +enum nft_chain_flags { + NFT_CHAIN_BASE = 1, + NFT_CHAIN_HW_OFFLOAD = 2, + NFT_CHAIN_BINDING = 4, +}; + +enum nft_immediate_attributes { + NFTA_IMMEDIATE_UNSPEC = 0, + NFTA_IMMEDIATE_DREG = 1, + NFTA_IMMEDIATE_DATA = 2, + __NFTA_IMMEDIATE_MAX = 3, +}; + +struct nft_rule { + struct list_head list; + u64 handle: 42; + u64 genmask: 2; + u64 dlen: 12; + u64 udata: 1; + unsigned char data[0]; +}; + +struct nft_immediate_expr { + struct nft_data data; + u8 dreg; + u8 dlen; + long: 32; +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_tipc { + __be32 key; +}; + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_addrs addrs; + long: 32; +}; + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + unsigned int rtm_flags; +}; + +enum rt_class_t { + RT_TABLE_UNSPEC = 0, + RT_TABLE_COMPAT = 252, + RT_TABLE_DEFAULT = 253, + RT_TABLE_MAIN = 254, + RT_TABLE_LOCAL = 255, + RT_TABLE_MAX = 4294967295, +}; + +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify: 1; + u8 skip_notify_kernel: 1; +}; + +struct uncached_list { + spinlock_t lock; + struct list_head head; +}; + +struct ipv4_addr_key { + __be32 addr; + int vif; +}; + +struct inetpeer_addr { + union { + struct ipv4_addr_key a4; + struct in6_addr a6; + u32 key[4]; + }; + __u16 family; +}; + +struct inet_peer { + struct rb_node rb_node; + struct inetpeer_addr daddr; + u32 metrics[17]; + u32 rate_tokens; + u32 n_redirects; + long unsigned int rate_last; + union { + struct { + atomic_t rid; + }; + struct callback_head rcu; + }; + __u32 dtime; + refcount_t refcnt; +}; + +struct fib_table; + +struct fib_result { + __be32 prefix; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + u32 tclassid; + dscp_t dscp; + struct fib_nh_common *nhc; + struct fib_info *fi; + struct fib_table *table; + struct hlist_head *fa_head; +}; + +struct fib_table { + struct hlist_node tb_hlist; + u32 tb_id; + int tb_num_default; + struct callback_head rcu; + long unsigned int *tb_data; + long unsigned int __data[0]; +}; + +struct fib_rt_info { + struct fib_info *fi; + u32 tb_id; + __be32 dst; + int dst_len; + dscp_t dscp; + u8 type; + u8 offload: 1; + u8 trap: 1; + u8 offload_failed: 1; + u8 unused: 5; +}; + +struct fib6_result; + +struct fib6_config; + +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); + int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); + struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); + int (*ipv6_route_input)(struct sk_buff *); + struct fib6_table * (*fib6_get_table)(struct net *, u32); + int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); + int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); + void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); + u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); + int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); + void (*fib6_nh_release)(struct fib6_nh *); + void (*fib6_nh_release_dsts)(struct fib6_nh *); + void (*fib6_update_sernum)(struct net *, struct fib6_info *); + int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); + void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); + void (*udpv6_encap_enable)(); + void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); + struct neigh_table *nd_tbl; + int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + struct net_device * (*ipv6_dev_find)(struct net *, const struct in6_addr *, struct net_device *); + int (*ip6_xmit)(const struct sock *, struct sk_buff *, struct flowi6 *, __u32, struct ipv6_txoptions *, int, u32); +}; + +struct fib6_result { + struct fib6_nh *nh; + struct fib6_info *f6i; + u32 fib6_flags; + u8 fib6_type; + struct rt6_info *rt6; +}; + +struct fib6_config { + u32 fc_table; + u32 fc_metric; + int fc_dst_len; + int fc_src_len; + int fc_ifindex; + u32 fc_flags; + u32 fc_protocol; + u16 fc_type; + u16 fc_delete_all_nh: 1; + u16 fc_ignore_dev_down: 1; + u16 __unused: 14; + u32 fc_nh_id; + struct in6_addr fc_dst; + struct in6_addr fc_src; + struct in6_addr fc_prefsrc; + struct in6_addr fc_gateway; + long unsigned int fc_expires; + struct nlattr *fc_mx; + int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; + bool fc_is_fdb; +}; + +struct rt_cache_stat { + unsigned int in_slow_tot; + unsigned int in_slow_mc; + unsigned int in_no_route; + unsigned int in_brd; + unsigned int in_martian_dst; + unsigned int in_martian_src; + unsigned int out_slow_tot; + unsigned int out_slow_mc; +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, + NETEVENT_REDIRECT = 2, + NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, + NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, + NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, +}; + +struct fib_alias { + struct hlist_node fa_list; + struct fib_info *fa_info; + dscp_t fa_dscp; + u8 fa_type; + u8 fa_state; + u8 fa_slen; + u32 tb_id; + s16 fa_default; + u8 offload; + u8 trap; + u8 offload_failed; + struct callback_head rcu; +}; + +struct fib_prop { + int error; + u8 scope; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct rtnexthop { + short unsigned int rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +struct fib_config { + u8 fc_dst_len; + dscp_t fc_dscp; + u8 fc_protocol; + u8 fc_scope; + u8 fc_type; + u8 fc_gw_family; + u32 fc_table; + __be32 fc_dst; + union { + __be32 fc_gw4; + struct in6_addr fc_gw6; + }; + int fc_oif; + u32 fc_flags; + u32 fc_priority; + __be32 fc_prefsrc; + u32 fc_nh_id; + struct nlattr *fc_mx; + struct rtnexthop *fc_mp; + int fc_mx_len; + int fc_mp_len; + u32 fc_flow; + u32 fc_nlflags; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; +}; + +struct fib_result_nl { + __be32 fl_addr; + u32 fl_mark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + unsigned char tb_id; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; + +struct fib_dump_filter { + u32 table_id; + bool filter_set; + bool dump_routes; + bool dump_exceptions; + bool rtnl_held; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + +struct rtentry { + long unsigned int rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + long unsigned int rt_pad3; + void *rt_pad4; + short int rt_metric; + char *rt_dev; + long unsigned int rt_mtu; + long unsigned int rt_window; + short unsigned int rt_irtt; +}; + +enum { + IFLA_INET6_UNSPEC = 0, + IFLA_INET6_FLAGS = 1, + IFLA_INET6_CONF = 2, + IFLA_INET6_STATS = 3, + IFLA_INET6_MCAST = 4, + IFLA_INET6_CACHEINFO = 5, + IFLA_INET6_ICMP6STATS = 6, + IFLA_INET6_TOKEN = 7, + IFLA_INET6_ADDR_GEN_MODE = 8, + IFLA_INET6_RA_MTU = 9, + __IFLA_INET6_MAX = 10, +}; + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64 = 0, + IN6_ADDR_GEN_MODE_NONE = 1, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, + IN6_ADDR_GEN_MODE_RANDOM = 3, +}; + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; + +enum { + IFA_UNSPEC = 0, + IFA_ADDRESS = 1, + IFA_LOCAL = 2, + IFA_LABEL = 3, + IFA_BROADCAST = 4, + IFA_ANYCAST = 5, + IFA_CACHEINFO = 6, + IFA_MULTICAST = 7, + IFA_FLAGS = 8, + IFA_RT_PRIORITY = 9, + IFA_TARGET_NETNSID = 10, + IFA_PROTO = 11, + __IFA_MAX = 12, +}; + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + short unsigned int prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum { + PREFIX_UNSPEC = 0, + PREFIX_ADDRESS = 1, + PREFIX_CACHEINFO = 2, + __PREFIX_MAX = 3, +}; + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT = 1, + DEVCONF_MTU6 = 2, + DEVCONF_ACCEPT_RA = 3, + DEVCONF_ACCEPT_REDIRECTS = 4, + DEVCONF_AUTOCONF = 5, + DEVCONF_DAD_TRANSMITS = 6, + DEVCONF_RTR_SOLICITS = 7, + DEVCONF_RTR_SOLICIT_INTERVAL = 8, + DEVCONF_RTR_SOLICIT_DELAY = 9, + DEVCONF_USE_TEMPADDR = 10, + DEVCONF_TEMP_VALID_LFT = 11, + DEVCONF_TEMP_PREFERED_LFT = 12, + DEVCONF_REGEN_MAX_RETRY = 13, + DEVCONF_MAX_DESYNC_FACTOR = 14, + DEVCONF_MAX_ADDRESSES = 15, + DEVCONF_FORCE_MLD_VERSION = 16, + DEVCONF_ACCEPT_RA_DEFRTR = 17, + DEVCONF_ACCEPT_RA_PINFO = 18, + DEVCONF_ACCEPT_RA_RTR_PREF = 19, + DEVCONF_RTR_PROBE_INTERVAL = 20, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, + DEVCONF_PROXY_NDP = 22, + DEVCONF_OPTIMISTIC_DAD = 23, + DEVCONF_ACCEPT_SOURCE_ROUTE = 24, + DEVCONF_MC_FORWARDING = 25, + DEVCONF_DISABLE_IPV6 = 26, + DEVCONF_ACCEPT_DAD = 27, + DEVCONF_FORCE_TLLAO = 28, + DEVCONF_NDISC_NOTIFY = 29, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, + DEVCONF_SUPPRESS_FRAG_NDISC = 32, + DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, + DEVCONF_USE_OPTIMISTIC = 34, + DEVCONF_ACCEPT_RA_MTU = 35, + DEVCONF_STABLE_SECRET = 36, + DEVCONF_USE_OIF_ADDRS_ONLY = 37, + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, + DEVCONF_DROP_UNSOLICITED_NA = 41, + DEVCONF_KEEP_ADDR_ON_DOWN = 42, + DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, + DEVCONF_SEG6_ENABLED = 44, + DEVCONF_SEG6_REQUIRE_HMAC = 45, + DEVCONF_ENHANCED_DAD = 46, + DEVCONF_ADDR_GEN_MODE = 47, + DEVCONF_DISABLE_POLICY = 48, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, + DEVCONF_NDISC_TCLASS = 50, + DEVCONF_RPL_SEG_ENABLED = 51, + DEVCONF_RA_DEFRTR_METRIC = 52, + DEVCONF_IOAM6_ENABLED = 53, + DEVCONF_IOAM6_ID = 54, + DEVCONF_IOAM6_ID_WIDE = 55, + DEVCONF_NDISC_EVICT_NOCARRIER = 56, + DEVCONF_ACCEPT_UNTRACKED_NA = 57, + DEVCONF_ACCEPT_RA_MIN_LFT = 58, + DEVCONF_MAX = 59, +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; + +enum { + INET6_IFADDR_STATE_PREDAD = 0, + INET6_IFADDR_STATE_DAD = 1, + INET6_IFADDR_STATE_POSTDAD = 2, + INET6_IFADDR_STATE_ERRDAD = 3, + INET6_IFADDR_STATE_DEAD = 4, +}; + +struct inet6_ifaddr { + struct in6_addr addr; + __u32 prefix_len; + __u32 rt_priority; + __u32 valid_lft; + __u32 prefered_lft; + refcount_t refcnt; + spinlock_t lock; + int state; + __u32 flags; + __u8 dad_probes; + __u8 stable_privacy_retry; + __u16 scope; + long: 32; + __u64 dad_nonce; + long unsigned int cstamp; + long unsigned int tstamp; + struct delayed_work dad_work; + struct inet6_dev *idev; + struct fib6_info *rt; + struct hlist_node addr_lst; + struct list_head if_list; + struct list_head if_list_aux; + struct list_head tmp_list; + struct inet6_ifaddr *ifpub; + int regen_count; + bool tokenized; + u8 ifa_proto; + struct callback_head rcu; + struct in6_addr peer_addr; +}; + +struct in6_validator_info { + struct in6_addr i6vi_addr; + struct inet6_dev *i6vi_dev; + struct netlink_ext_ack *extack; +}; + +struct ifa6_config { + const struct in6_addr *pfx; + unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; + u32 rt_priority; + u32 ifa_flags; + u32 preferred_lft; + u32 valid_lft; + u16 scope; +}; + +union fwnet_hwaddr { + u8 u[16]; + struct { + __be64 uniq_id; + u8 max_rec; + u8 sspd; + u8 fifo[6]; + } uc; +}; + +struct netconfmsg { + __u8 ncm_family; +}; + +enum { + NETCONFA_UNSPEC = 0, + NETCONFA_IFINDEX = 1, + NETCONFA_FORWARDING = 2, + NETCONFA_RP_FILTER = 3, + NETCONFA_MC_FORWARDING = 4, + NETCONFA_PROXY_NEIGH = 5, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, + NETCONFA_INPUT = 7, + NETCONFA_BC_FORWARDING = 8, + __NETCONFA_MAX = 9, +}; + +enum cleanup_prefix_rt_t { + CLEANUP_PREFIX_RT_NOP = 0, + CLEANUP_PREFIX_RT_DEL = 1, + CLEANUP_PREFIX_RT_EXPIRE = 2, +}; + +enum { + IPV6_SADDR_RULE_INIT = 0, + IPV6_SADDR_RULE_LOCAL = 1, + IPV6_SADDR_RULE_SCOPE = 2, + IPV6_SADDR_RULE_PREFERRED = 3, + IPV6_SADDR_RULE_OIF = 4, + IPV6_SADDR_RULE_LABEL = 5, + IPV6_SADDR_RULE_PRIVACY = 6, + IPV6_SADDR_RULE_ORCHID = 7, + IPV6_SADDR_RULE_PREFIX = 8, + IPV6_SADDR_RULE_MAX = 9, +}; + +struct ipv6_saddr_score { + int rule; + int addr_type; + struct inet6_ifaddr *ifa; + long unsigned int scorebits[1]; + int scopedist; + int matchlen; +}; + +struct ipv6_saddr_dst { + const struct in6_addr *addr; + int ifindex; + int scope; + int label; + unsigned int prefs; +}; + +struct if6_iter_state { + struct seq_net_private p; + int bucket; + int offset; +}; + +enum addr_type_t { + UNICAST_ADDR = 0, + MULTICAST_ADDR = 1, + ANYCAST_ADDR = 2, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; +}; + +enum { + DAD_PROCESS = 0, + DAD_BEGIN = 1, + DAD_ABORT = 2, +}; + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +struct jit_context { + struct bpf_prog *program; + u32 *descriptors; + u32 *target; + u32 bpf_index; + u32 jit_index; + u32 changes; + u32 accessed; + u32 clobbered; + u32 stack_size; + u32 saved_size; + u32 stack_used; +}; + +enum pci_p2pdma_map_type { + PCI_P2PDMA_MAP_UNKNOWN = 0, + PCI_P2PDMA_MAP_NOT_SUPPORTED = 1, + PCI_P2PDMA_MAP_BUS_ADDR = 2, + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE = 3, +}; + +struct pci_p2pdma_map_state { + struct dev_pagemap *pgmap; + int map; + u64 bus_off; +}; + +struct io_tlb_pool; + +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER = 1, + CSID_S390_TOD = 2, + CSID_X86_TSC_EARLY = 3, + CSID_X86_TSC = 4, + CSID_X86_KVM_CLK = 5, + CSID_X86_ART = 6, + CSID_MAX = 7, +}; + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE = 0, + VDSO_CLOCKMODE_R4K = 1, + VDSO_CLOCKMODE_GIC = 2, + VDSO_CLOCKMODE_MAX = 3, + VDSO_CLOCKMODE_TIMENS = 2147483647, +}; + +struct clocksource_base; + +struct clocksource { + u64 (*read)(struct clocksource *); + long: 32; + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; + u32 uncertainty_margin; + u64 max_cycles; + const char *name; + struct list_head list; + u32 freq_khz; + int rating; + enum clocksource_ids id; + enum vdso_clock_mode vdso_clock_mode; + long unsigned int flags; + struct clocksource_base *base; + int (*enable)(struct clocksource *); + void (*disable)(struct clocksource *); + void (*suspend)(struct clocksource *); + void (*resume)(struct clocksource *); + void (*mark_unstable)(struct clocksource *); + void (*tick_stable)(struct clocksource *); + struct list_head wd_list; + long: 32; + u64 cs_last; + u64 wd_last; + struct module *owner; + long: 32; +}; + +struct clocksource_base { + enum clocksource_ids id; + u32 freq_khz; + u64 offset; + u32 numerator; + u32 denominator; +}; + +enum { + FUTEX_STATE_OK = 0, + FUTEX_STATE_EXITING = 1, + FUTEX_STATE_DEAD = 2, +}; + +enum futex_access { + FUTEX_READ = 0, + FUTEX_WRITE = 1, +}; + +struct rt_wake_q_head { + struct wake_q_head head; + struct task_struct *rtlock_task; +}; + +struct kernel_clone_args { + u64 flags; + int *pidfd; + int *child_tid; + int *parent_tid; + const char *name; + int exit_signal; + u32 kthread: 1; + u32 io_thread: 1; + u32 user_worker: 1; + u32 no_files: 1; + long unsigned int stack; + long unsigned int stack_size; + long unsigned int tls; + pid_t *set_tid; + size_t set_tid_size; + int cgroup; + int idle; + int (*fn)(void *); + void *fn_arg; + struct cgroup *cgrp; + struct css_set *cset; + long: 32; +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); + ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); +}; + +struct psi_window { + u64 size; + u64 start_time; + u64 start_value; + u64 prev_growth; +}; + +struct psi_trigger { + enum psi_states state; + long: 32; + u64 threshold; + struct list_head node; + struct psi_group *group; + wait_queue_head_t event_wait; + struct kernfs_open_file *of; + int event; + struct psi_window win; + u64 last_event_time; + bool pending_event; + enum psi_aggregators aggregator; +}; + +enum { + CSS_TASK_ITER_PROCS = 1, + CSS_TASK_ITER_THREADED = 2, + CSS_TASK_ITER_SKIPPED = 65536, +}; + +struct trace_event_raw_cgroup_root { + struct trace_entry ent; + int root; + u16 ss_mask; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_cgroup { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cgroup_migrate { + struct trace_entry ent; + int dst_root; + int dst_level; + u64 dst_id; + int pid; + u32 __data_loc_dst_path; + u32 __data_loc_comm; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cgroup_event { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + int val; + char __data[0]; +}; + +struct trace_event_raw_cgroup_rstat { + struct trace_entry ent; + int root; + int level; + u64 id; + int cpu; + bool contended; + char __data[0]; +}; + +struct trace_event_data_offsets_cgroup_root { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cgroup { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_migrate { + u32 dst_path; + const void *dst_path_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_cgroup_event { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_rstat {}; + +typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_rstat_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock_fastpath)(void *, struct cgroup *, int, bool); + +enum cgroup_opt_features { + OPT_FEATURE_PRESSURE = 0, + OPT_FEATURE_COUNT = 1, +}; + +enum cgroup2_param { + Opt_nsdelegate = 0, + Opt_favordynmods___2 = 1, + Opt_memory_localevents = 2, + Opt_memory_recursiveprot = 3, + Opt_memory_hugetlb_accounting = 4, + Opt_pids_localevents = 5, + nr__cgroup2_params = 6, +}; + +enum { + BPF_F_INGRESS = 1, + BPF_F_BROADCAST = 8, + BPF_F_EXCLUDE_INGRESS = 16, +}; + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct xdp_dev_bulk_queue { + struct xdp_frame *q[16]; + struct list_head flush_node; + struct net_device *dev; + struct net_device *dev_rx; + struct bpf_prog *xdp_prog; + unsigned int count; +}; + +struct bpf_dtab_netdev { + struct net_device *dev; + struct hlist_node index_hlist; + struct bpf_prog *xdp_prog; + struct callback_head rcu; + unsigned int idx; + struct bpf_devmap_val val; +}; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev **netdev_map; + struct list_head list; + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; + long: 32; +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +enum page_walk_lock { + PGWALK_RDLOCK = 0, + PGWALK_WRLOCK = 1, + PGWALK_WRLOCK_VERIFY = 2, +}; + +struct mm_walk; + +struct mm_walk_ops { + int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); + int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); + int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *); + void (*post_vma)(struct mm_walk *); + int (*install_pte)(long unsigned int, long unsigned int, pte_t *, struct mm_walk *); + enum page_walk_lock walk_lock; +}; + +enum page_walk_action { + ACTION_SUBTREE = 0, + ACTION_CONTINUE = 1, + ACTION_AGAIN = 2, +}; + +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + pgd_t *pgd; + struct vm_area_struct *vma; + enum page_walk_action action; + bool no_vma; + void *private; +}; + +typedef struct { + long unsigned int fds_bits[32]; +} __kernel_fd_set; + +typedef __kernel_fd_set fd_set; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +struct poll_table_page; + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[18]; +}; + +struct poll_table_page { + struct poll_table_page *next; + struct poll_table_entry *entry; + struct poll_table_entry entries[0]; +}; + +enum poll_time_type { + PT_TIMEVAL = 0, + PT_OLD_TIMEVAL = 1, + PT_TIMESPEC = 2, + PT_OLD_TIMESPEC = 3, +}; + +typedef struct { + long unsigned int *in; + long unsigned int *out; + long unsigned int *ex; + long unsigned int *res_in; + long unsigned int *res_out; + long unsigned int *res_ex; +} fd_set_bits; + +struct sigset_argpack { + sigset_t *p; + size_t size; +}; + +struct poll_list { + struct poll_list *next; + unsigned int len; + struct pollfd entries[0]; +}; + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); + void (*submit_io)(const struct iomap_iter *, struct bio *, loff_t); + struct bio_set *bio_set; +}; + +struct fname; + +struct dir_private_info { + struct rb_root root; + struct rb_node *curr_node; + struct fname *extra_fname; + long: 32; + loff_t last_pos; + __u32 curr_hash; + __u32 curr_minor_hash; + __u32 next_hash; + long: 32; + u64 cookie; + bool initialized; + long: 32; +}; + +typedef s64 int64_t; + +struct fuse_attr { + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint32_t rdev; + uint32_t blksize; + uint32_t flags; +}; + +struct fuse_sx_time { + int64_t tv_sec; + uint32_t tv_nsec; + int32_t __reserved; +}; + +struct fuse_statx { + uint32_t mask; + uint32_t blksize; + uint64_t attributes; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint16_t mode; + uint16_t __spare0[1]; + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t attributes_mask; + struct fuse_sx_time atime; + struct fuse_sx_time btime; + struct fuse_sx_time ctime; + struct fuse_sx_time mtime; + uint32_t rdev_major; + uint32_t rdev_minor; + uint32_t dev_major; + uint32_t dev_minor; + uint64_t __spare2[14]; +}; + +enum fuse_ext_type { + FUSE_MAX_NR_SECCTX = 31, + FUSE_EXT_GROUPS = 32, +}; + +enum fuse_opcode { + FUSE_LOOKUP = 1, + FUSE_FORGET = 2, + FUSE_GETATTR = 3, + FUSE_SETATTR = 4, + FUSE_READLINK = 5, + FUSE_SYMLINK = 6, + FUSE_MKNOD = 8, + FUSE_MKDIR = 9, + FUSE_UNLINK = 10, + FUSE_RMDIR = 11, + FUSE_RENAME = 12, + FUSE_LINK = 13, + FUSE_OPEN = 14, + FUSE_READ = 15, + FUSE_WRITE = 16, + FUSE_STATFS = 17, + FUSE_RELEASE = 18, + FUSE_FSYNC = 20, + FUSE_SETXATTR = 21, + FUSE_GETXATTR = 22, + FUSE_LISTXATTR = 23, + FUSE_REMOVEXATTR = 24, + FUSE_FLUSH = 25, + FUSE_INIT = 26, + FUSE_OPENDIR = 27, + FUSE_READDIR = 28, + FUSE_RELEASEDIR = 29, + FUSE_FSYNCDIR = 30, + FUSE_GETLK = 31, + FUSE_SETLK = 32, + FUSE_SETLKW = 33, + FUSE_ACCESS = 34, + FUSE_CREATE = 35, + FUSE_INTERRUPT = 36, + FUSE_BMAP = 37, + FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, + FUSE_BATCH_FORGET = 42, + FUSE_FALLOCATE = 43, + FUSE_READDIRPLUS = 44, + FUSE_RENAME2 = 45, + FUSE_LSEEK = 46, + FUSE_COPY_FILE_RANGE = 47, + FUSE_SETUPMAPPING = 48, + FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, + FUSE_TMPFILE = 51, + FUSE_STATX = 52, + CUSE_INIT = 4096, + CUSE_INIT_BSWAP_RESERVED = 1048576, + FUSE_INIT_BSWAP_RESERVED = 436207616, +}; + +struct fuse_entry_out { + uint64_t nodeid; + uint64_t generation; + uint64_t entry_valid; + uint64_t attr_valid; + uint32_t entry_valid_nsec; + uint32_t attr_valid_nsec; + struct fuse_attr attr; +}; + +struct fuse_forget_one { + uint64_t nodeid; + uint64_t nlookup; +}; + +struct fuse_getattr_in { + uint32_t getattr_flags; + uint32_t dummy; + uint64_t fh; +}; + +struct fuse_attr_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t dummy; + struct fuse_attr attr; +}; + +struct fuse_statx_in { + uint32_t getattr_flags; + uint32_t reserved; + uint64_t fh; + uint32_t sx_flags; + uint32_t sx_mask; +}; + +struct fuse_statx_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t flags; + uint64_t spare[2]; + struct fuse_statx stat; +}; + +struct fuse_mknod_in { + uint32_t mode; + uint32_t rdev; + uint32_t umask; + uint32_t padding; +}; + +struct fuse_mkdir_in { + uint32_t mode; + uint32_t umask; +}; + +struct fuse_rename2_in { + uint64_t newdir; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_link_in { + uint64_t oldnodeid; +}; + +struct fuse_setattr_in { + uint32_t valid; + uint32_t padding; + uint64_t fh; + uint64_t size; + uint64_t lock_owner; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t unused4; + uint32_t uid; + uint32_t gid; + uint32_t unused5; +}; + +struct fuse_create_in { + uint32_t flags; + uint32_t mode; + uint32_t umask; + uint32_t open_flags; +}; + +struct fuse_open_out { + uint64_t fh; + uint32_t open_flags; + int32_t backing_id; +}; + +struct fuse_release_in { + uint64_t fh; + uint32_t flags; + uint32_t release_flags; + uint64_t lock_owner; +}; + +struct fuse_access_in { + uint32_t mask; + uint32_t padding; +}; + +struct fuse_in_header { + uint32_t len; + uint32_t opcode; + uint64_t unique; + uint64_t nodeid; + uint32_t uid; + uint32_t gid; + uint32_t pid; + uint16_t total_extlen; + uint16_t padding; +}; + +struct fuse_out_header { + uint32_t len; + int32_t error; + uint64_t unique; +}; + +struct fuse_secctx { + uint32_t size; + uint32_t padding; +}; + +struct fuse_secctx_header { + uint32_t size; + uint32_t nr_secctx; +}; + +struct fuse_ext_header { + uint32_t size; + uint32_t type; +}; + +struct fuse_supp_groups { + uint32_t nr_groups; + uint32_t groups[0]; +}; + +struct fuse_forget_link { + struct fuse_forget_one forget_one; + struct fuse_forget_link *next; + long: 32; +}; + +struct fuse_submount_lookup { + refcount_t count; + long: 32; + u64 nodeid; + struct fuse_forget_link *forget; + long: 32; +}; + +struct fuse_backing { + struct file *file; + struct cred *cred; + refcount_t count; + struct callback_head rcu; +}; + +struct fuse_inode { + struct inode inode; + u64 nodeid; + u64 nlookup; + struct fuse_forget_link *forget; + long: 32; + u64 i_time; + u32 inval_mask; + umode_t orig_i_mode; + struct timespec64 i_btime; + u64 orig_ino; + u64 attr_version; + union { + struct { + struct list_head write_files; + struct list_head queued_writes; + int writectr; + int iocachectr; + wait_queue_head_t page_waitq; + wait_queue_head_t direct_io_waitq; + struct rb_root writepages; + }; + struct { + bool cached; + long: 32; + loff_t size; + loff_t pos; + u64 version; + struct timespec64 mtime; + u64 iversion; + spinlock_t lock; + long: 32; + } rdc; + }; + long unsigned int state; + struct mutex mutex; + spinlock_t lock; + struct fuse_submount_lookup *submount_lookup; + struct fuse_backing *fb; +}; + +enum { + FUSE_I_ADVISE_RDPLUS = 0, + FUSE_I_INIT_RDPLUS = 1, + FUSE_I_SIZE_UNSTABLE = 2, + FUSE_I_BAD = 3, + FUSE_I_BTIME = 4, + FUSE_I_CACHE_IO_MODE = 5, +}; + +struct fuse_mount; + +union fuse_file_args; + +struct fuse_file { + struct fuse_mount *fm; + union fuse_file_args *args; + u64 kh; + u64 fh; + u64 nodeid; + refcount_t count; + u32 open_flags; + struct list_head write_entry; + struct { + loff_t pos; + loff_t cache_off; + u64 version; + } readdir; + struct rb_node polled_node; + wait_queue_head_t poll_wait; + enum { + IOM_NONE = 0, + IOM_CACHED = 1, + IOM_UNCACHED = 2, + } iomode; + struct file *passthrough; + const struct cred *cred; + bool flock: 1; +}; + +struct fuse_conn; + +struct fuse_mount { + struct fuse_conn *fc; + struct super_block *sb; + struct list_head fc_entry; + struct callback_head rcu; +}; + +struct fuse_in_arg { + unsigned int size; + const void *value; +}; + +struct fuse_arg { + unsigned int size; + void *value; +}; + +struct fuse_args { + uint64_t nodeid; + uint32_t opcode; + uint8_t in_numargs; + uint8_t out_numargs; + uint8_t ext_idx; + bool force: 1; + bool noreply: 1; + bool nocreds: 1; + bool in_pages: 1; + bool out_pages: 1; + bool user_pages: 1; + bool out_argvar: 1; + bool page_zeroing: 1; + bool page_replace: 1; + bool may_block: 1; + bool is_ext: 1; + bool is_pinned: 1; + bool invalidate_vmap: 1; + struct fuse_in_arg in_args[3]; + struct fuse_arg out_args[2]; + void (*end)(struct fuse_mount *, struct fuse_args *, int); + void *vmap_base; + long: 32; +}; + +struct fuse_release_args { + struct fuse_args args; + struct fuse_release_in inarg; + struct inode *inode; + long: 32; +}; + +union fuse_file_args { + struct fuse_open_out open_outarg; + struct fuse_release_args release_args; +}; + +struct fuse_folio_desc { + unsigned int length; + unsigned int offset; +}; + +struct fuse_args_pages { + struct fuse_args args; + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int num_folios; + long: 32; +}; + +struct fuse_req { + struct list_head list; + struct list_head intr_entry; + struct fuse_args *args; + refcount_t count; + long unsigned int flags; + long: 32; + struct { + struct fuse_in_header h; + } in; + struct { + struct fuse_out_header h; + } out; + wait_queue_head_t waitq; + struct fuse_mount *fm; +}; + +struct fuse_iqueue; + +struct fuse_iqueue_ops { + void (*send_forget)(struct fuse_iqueue *, struct fuse_forget_link *); + void (*send_interrupt)(struct fuse_iqueue *, struct fuse_req *); + void (*send_req)(struct fuse_iqueue *, struct fuse_req *); + void (*release)(struct fuse_iqueue *); +}; + +struct fuse_iqueue { + unsigned int connected; + spinlock_t lock; + wait_queue_head_t waitq; + long: 32; + u64 reqctr; + struct list_head pending; + struct list_head interrupts; + struct fuse_forget_link forget_list_head; + struct fuse_forget_link *forget_list_tail; + int forget_batch; + struct fasync_struct *fasync; + const struct fuse_iqueue_ops *ops; + void *priv; + long: 32; +}; + +struct fuse_sync_bucket; + +struct fuse_conn { + spinlock_t lock; + refcount_t count; + atomic_t dev_count; + struct callback_head rcu; + kuid_t user_id; + kgid_t group_id; + struct pid_namespace *pid_ns; + struct user_namespace *user_ns; + unsigned int max_read; + unsigned int max_write; + unsigned int max_pages; + unsigned int max_pages_limit; + long: 32; + struct fuse_iqueue iq; + atomic64_t khctr; + struct rb_root polled_files; + unsigned int max_background; + unsigned int congestion_threshold; + unsigned int num_background; + unsigned int active_background; + struct list_head bg_queue; + spinlock_t bg_lock; + int initialized; + int blocked; + wait_queue_head_t blocked_waitq; + unsigned int connected; + bool aborted; + unsigned int conn_error: 1; + unsigned int conn_init: 1; + unsigned int async_read: 1; + unsigned int abort_err: 1; + unsigned int atomic_o_trunc: 1; + unsigned int export_support: 1; + unsigned int writeback_cache: 1; + unsigned int parallel_dirops: 1; + unsigned int handle_killpriv: 1; + unsigned int cache_symlinks: 1; + unsigned int legacy_opts_show: 1; + unsigned int handle_killpriv_v2: 1; + unsigned int no_open: 1; + unsigned int no_opendir: 1; + unsigned int no_fsync: 1; + unsigned int no_fsyncdir: 1; + unsigned int no_flush: 1; + unsigned int no_setxattr: 1; + unsigned int setxattr_ext: 1; + unsigned int no_getxattr: 1; + unsigned int no_listxattr: 1; + unsigned int no_removexattr: 1; + unsigned int no_lock: 1; + unsigned int no_access: 1; + unsigned int no_create: 1; + unsigned int no_interrupt: 1; + unsigned int no_bmap: 1; + unsigned int no_poll: 1; + unsigned int big_writes: 1; + unsigned int dont_mask: 1; + unsigned int no_flock: 1; + unsigned int no_fallocate: 1; + unsigned int no_rename2: 1; + unsigned int auto_inval_data: 1; + unsigned int explicit_inval_data: 1; + unsigned int do_readdirplus: 1; + unsigned int readdirplus_auto: 1; + unsigned int async_dio: 1; + unsigned int no_lseek: 1; + unsigned int posix_acl: 1; + unsigned int default_permissions: 1; + unsigned int allow_other: 1; + unsigned int no_copy_file_range: 1; + unsigned int destroy: 1; + unsigned int delete_stale: 1; + unsigned int no_control: 1; + unsigned int no_force_umount: 1; + unsigned int auto_submounts: 1; + unsigned int sync_fs: 1; + unsigned int init_security: 1; + unsigned int create_supp_group: 1; + unsigned int inode_dax: 1; + unsigned int no_tmpfile: 1; + unsigned int direct_io_allow_mmap: 1; + unsigned int no_statx: 1; + unsigned int passthrough: 1; + unsigned int use_pages_for_kvec_io: 1; + int max_stack_depth; + atomic_t num_waiting; + unsigned int minor; + struct list_head entry; + dev_t dev; + struct dentry *ctl_dentry[5]; + int ctl_ndents; + u32 scramble_key[4]; + long: 32; + atomic64_t attr_version; + atomic64_t evict_ctr; + void (*release)(struct fuse_conn *); + struct rw_semaphore killsb; + struct list_head devices; + struct list_head mounts; + struct fuse_sync_bucket *curr_bucket; + struct idr backing_files_map; +}; + +struct fuse_sync_bucket { + atomic_t count; + wait_queue_head_t waitq; + struct callback_head rcu; +}; + +union fuse_dentry { + u64 time; + struct callback_head rcu; +}; + +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, + NOTIFY_KEY_UPDATED = 1, + NOTIFY_KEY_LINKED = 2, + NOTIFY_KEY_UNLINKED = 3, + NOTIFY_KEY_CLEARED = 4, + NOTIFY_KEY_REVOKED = 5, + NOTIFY_KEY_INVALIDATED = 6, + NOTIFY_KEY_SETATTR = 7, +}; + +enum rsapubkey_actions { + ACT_rsa_get_e = 0, + ACT_rsa_get_n = 1, + NR__rsapubkey_actions = 2, +}; + +enum rsaprivkey_actions { + ACT_rsa_get_d = 0, + ACT_rsa_get_dp = 1, + ACT_rsa_get_dq = 2, + ACT_rsa_get_e___2 = 3, + ACT_rsa_get_n___2 = 4, + ACT_rsa_get_p = 5, + ACT_rsa_get_q = 6, + ACT_rsa_get_qinv = 7, + NR__rsaprivkey_actions = 8, +}; + +struct crypto_wait { + struct completion completion; + int err; +}; + +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_akcipher { + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct akcipher_alg { + int (*encrypt)(struct akcipher_request *); + int (*decrypt)(struct akcipher_request *); + int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); + unsigned int (*max_size)(struct crypto_akcipher *); + int (*init)(struct crypto_akcipher *); + void (*exit)(struct crypto_akcipher *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct crypto_sig { + struct crypto_tfm base; +}; + +struct sig_alg { + int (*sign)(struct crypto_sig *, const void *, unsigned int, void *, unsigned int); + int (*verify)(struct crypto_sig *, const void *, unsigned int, const void *, unsigned int); + int (*set_pub_key)(struct crypto_sig *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_sig *, const void *, unsigned int); + unsigned int (*key_size)(struct crypto_sig *); + unsigned int (*digest_size)(struct crypto_sig *); + unsigned int (*max_size)(struct crypto_sig *); + int (*init)(struct crypto_sig *); + void (*exit)(struct crypto_sig *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +struct sig_instance { + void (*free)(struct sig_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + }; + struct sig_alg alg; + }; +}; + +struct hash_prefix { + const char *name; + const u8 *data; + size_t size; +}; + +struct rsassa_pkcs1_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct rsassa_pkcs1_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct hash_prefix *hash_prefix; +}; + +typedef struct { + __be64 a; + __be64 b; +} be128; + +struct gcm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; +}; + +struct crypto_gcm_ctx { + struct crypto_skcipher *ctr; + struct crypto_ahash *ghash; +}; + +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4106_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct aead_request subreq; +}; + +struct crypto_rfc4543_instance_ctx { + struct crypto_aead_spawn aead; +}; + +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + struct crypto_sync_skcipher *null; + u8 nonce[4]; +}; + +struct crypto_rfc4543_req_ctx { + struct aead_request subreq; +}; + +struct crypto_gcm_ghash_ctx { + unsigned int cryptlen; + struct scatterlist *src; + int (*complete)(struct aead_request *, u32); +}; + +struct crypto_gcm_req_priv_ctx { + u8 iv[16]; + u8 auth_tag[16]; + u8 iauth_tag[16]; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct scatterlist sg; + struct crypto_gcm_ghash_ctx ghash_ctx; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + } u; +}; + +enum { + QUEUE_FLAG_DYING = 0, + QUEUE_FLAG_NOMERGES = 1, + QUEUE_FLAG_SAME_COMP = 2, + QUEUE_FLAG_FAIL_IO = 3, + QUEUE_FLAG_NOXMERGES = 4, + QUEUE_FLAG_SAME_FORCE = 5, + QUEUE_FLAG_INIT_DONE = 6, + QUEUE_FLAG_STATS = 7, + QUEUE_FLAG_REGISTERED = 8, + QUEUE_FLAG_QUIESCED = 9, + QUEUE_FLAG_RQ_ALLOC_TIME = 10, + QUEUE_FLAG_HCTX_ACTIVE = 11, + QUEUE_FLAG_SQ_SCHED = 12, + QUEUE_FLAG_MAX = 13, +}; + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct gendisk *, char *); + ssize_t (*store)(struct gendisk *, const char *, size_t); + void (*load_module)(struct gendisk *, const char *, size_t); +}; + +enum blktrace_cat { + BLK_TC_READ = 1, + BLK_TC_WRITE = 2, + BLK_TC_FLUSH = 4, + BLK_TC_SYNC = 8, + BLK_TC_SYNCIO = 8, + BLK_TC_QUEUE = 16, + BLK_TC_REQUEUE = 32, + BLK_TC_ISSUE = 64, + BLK_TC_COMPLETE = 128, + BLK_TC_FS = 256, + BLK_TC_PC = 512, + BLK_TC_NOTIFY = 1024, + BLK_TC_AHEAD = 2048, + BLK_TC_META = 4096, + BLK_TC_DISCARD = 8192, + BLK_TC_DRV_DATA = 16384, + BLK_TC_FUA = 32768, + BLK_TC_END = 32768, +}; + +struct io_ftrunc { + struct file *file; + long: 32; + loff_t len; +}; + +typedef unsigned char Byte; + +typedef unsigned int uInt; + +typedef long unsigned int uLong; + +struct internal_state; + +struct z_stream_s { + const Byte *next_in; + uLong avail_in; + uLong total_in; + Byte *next_out; + uLong avail_out; + uLong total_out; + char *msg; + struct internal_state *state; + void *workspace; + int data_type; + uLong adler; + uLong reserved; +}; + +typedef struct z_stream_s z_stream; + +typedef z_stream *z_streamp; + +typedef struct { + unsigned char op; + unsigned char bits; + short unsigned int val; +} code; + +typedef enum { + CODES = 0, + LENS = 1, + DISTS = 2, +} codetype; + +typedef enum { + HEAD = 0, + FLAGS = 1, + TIME = 2, + OS = 3, + EXLEN = 4, + EXTRA = 5, + NAME = 6, + COMMENT = 7, + HCRC = 8, + DICTID = 9, + DICT = 10, + TYPE = 11, + TYPEDO = 12, + STORED = 13, + COPY = 14, + TABLE = 15, + LENLENS = 16, + CODELENS = 17, + LEN = 18, + LENEXT = 19, + DIST = 20, + DISTEXT = 21, + MATCH = 22, + LIT = 23, + CHECK = 24, + LENGTH = 25, + DONE = 26, + BAD = 27, + MEM = 28, + SYNC = 29, +} inflate_mode; + +struct inflate_state { + inflate_mode mode; + int last; + int wrap; + int havedict; + int flags; + unsigned int dmax; + long unsigned int check; + long unsigned int total; + unsigned int wbits; + unsigned int wsize; + unsigned int whave; + unsigned int write; + unsigned char *window; + long unsigned int hold; + unsigned int bits; + unsigned int length; + unsigned int offset; + unsigned int extra; + const code *lencode; + const code *distcode; + unsigned int lenbits; + unsigned int distbits; + unsigned int ncode; + unsigned int nlen; + unsigned int ndist; + unsigned int have; + code *next; + short unsigned int lens[320]; + short unsigned int work[288]; + code codes[2048]; +}; + +struct inflate_workspace { + struct inflate_state inflate_state; + unsigned char working_window[32768]; +}; + +typedef unsigned char uch; + +typedef short unsigned int ush; + +typedef long unsigned int ulg; + +struct ct_data_s { + union { + ush freq; + ush code; + } fc; + union { + ush dad; + ush len; + } dl; +}; + +typedef struct ct_data_s ct_data; + +struct static_tree_desc_s { + const ct_data *static_tree; + const int *extra_bits; + int extra_base; + int elems; + int max_length; +}; + +typedef struct static_tree_desc_s static_tree_desc; + +struct tree_desc_s { + ct_data *dyn_tree; + int max_code; + static_tree_desc *stat_desc; +}; + +typedef ush Pos; + +typedef unsigned int IPos; + +struct deflate_state { + z_streamp strm; + int status; + Byte *pending_buf; + ulg pending_buf_size; + Byte *pending_out; + int pending; + int noheader; + Byte data_type; + Byte method; + int last_flush; + uInt w_size; + uInt w_bits; + uInt w_mask; + Byte *window; + ulg window_size; + Pos *prev; + Pos *head; + uInt ins_h; + uInt hash_size; + uInt hash_bits; + uInt hash_mask; + uInt hash_shift; + long int block_start; + uInt match_length; + IPos prev_match; + int match_available; + uInt strstart; + uInt match_start; + uInt lookahead; + uInt prev_length; + uInt max_chain_length; + uInt max_lazy_match; + int level; + int strategy; + uInt good_match; + int nice_match; + struct ct_data_s dyn_ltree[573]; + struct ct_data_s dyn_dtree[61]; + struct ct_data_s bl_tree[39]; + struct tree_desc_s l_desc; + struct tree_desc_s d_desc; + struct tree_desc_s bl_desc; + ush bl_count[16]; + int heap[573]; + int heap_len; + int heap_max; + uch depth[573]; + uch *l_buf; + uInt lit_bufsize; + uInt last_lit; + ush *d_buf; + ulg opt_len; + ulg static_len; + ulg compressed_len; + uInt matches; + int last_eob_len; + ush bi_buf; + int bi_valid; +}; + +typedef struct deflate_state deflate_state; + +typedef enum { + need_more = 0, + block_done = 1, + finish_started = 2, + finish_done = 3, +} block_state; + +typedef block_state (*compress_func)(deflate_state *, int); + +struct deflate_workspace { + deflate_state deflate_memory; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; +}; + +typedef struct deflate_workspace deflate_workspace; + +struct config_s { + ush good_length; + ush max_lazy; + ush nice_length; + ush max_chain; + compress_func func; +}; + +typedef struct config_s config; + +struct font_data { + unsigned int extra[4]; + const unsigned char data[0]; +}; + +typedef int word_type; + +struct DWstruct { + int high; + int low; +}; + +typedef union { + struct DWstruct s; + long long int ll; +} DWunion; + +struct resource_entry { + struct list_head node; + struct resource *res; + resource_size_t offset; + struct resource __res; +}; + +struct pci_bus_resource { + struct list_head list; + struct resource *res; +}; + +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +struct vt_notifier_param { + struct vc_data *vc; + unsigned int c; +}; + +struct vcs_poll_data { + struct notifier_block notifier; + unsigned int cons_num; + int event; + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; + kuid_t uid; + kgid_t gid; + struct device *dev; +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +}; + +struct async_domain { + struct list_head pending; + unsigned int registered: 1; +}; + +enum { + GENHD_FL_REMOVABLE = 1, + GENHD_FL_HIDDEN = 2, + GENHD_FL_NO_PART = 4, +}; + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + long unsigned int start; +}; + +typedef bool busy_tag_iter_fn(struct request *, void *); + +enum { + BLK_MQ_F_SHOULD_MERGE = 1, + BLK_MQ_F_TAG_QUEUE_SHARED = 2, + BLK_MQ_F_STACKING = 4, + BLK_MQ_F_TAG_HCTX_SHARED = 8, + BLK_MQ_F_BLOCKING = 16, + BLK_MQ_F_NO_SCHED = 32, + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 64, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, +}; + +enum { + BLK_MQ_REQ_NOWAIT = 1, + BLK_MQ_REQ_RESERVED = 2, + BLK_MQ_REQ_PM = 4, +}; + +struct pr_keys { + u32 generation; + u32 num_keys; + u64 keys[0]; +}; + +struct pr_held_reservation { + u64 key; + u32 generation; + enum pr_type type; +}; + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, + NVME_NQN_NVME = 2, + NVME_NQN_CURR = 3, +}; + +enum nvme_ctrl_type { + NVME_CTRL_IO = 1, + NVME_CTRL_DISC = 2, + NVME_CTRL_ADMIN = 3, +}; + +enum nvme_dctype { + NVME_DCTYPE_NOT_REPORTED = 0, + NVME_DCTYPE_DDC = 1, + NVME_DCTYPE_CDC = 2, +}; + +enum { + NVME_REG_CAP = 0, + NVME_REG_VS = 8, + NVME_REG_INTMS = 12, + NVME_REG_INTMC = 16, + NVME_REG_CC = 20, + NVME_REG_CSTS = 28, + NVME_REG_NSSR = 32, + NVME_REG_AQA = 36, + NVME_REG_ASQ = 40, + NVME_REG_ACQ = 48, + NVME_REG_CMBLOC = 56, + NVME_REG_CMBSZ = 60, + NVME_REG_BPINFO = 64, + NVME_REG_BPRSEL = 68, + NVME_REG_BPMBL = 72, + NVME_REG_CMBMSC = 80, + NVME_REG_CRTO = 104, + NVME_REG_PMRCAP = 3584, + NVME_REG_PMRCTL = 3588, + NVME_REG_PMRSTS = 3592, + NVME_REG_PMREBS = 3596, + NVME_REG_PMRSWTP = 3600, + NVME_REG_DBS = 4096, +}; + +enum { + NVME_CC_ENABLE = 1, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 96, + NVME_CC_CSS_MASK = 112, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 2048, + NVME_CC_AMS_VS = 14336, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 16384, + NVME_CC_SHN_ABRUPT = 32768, + NVME_CC_SHN_MASK = 49152, + NVME_CC_IOSQES = 393216, + NVME_CC_IOCQES = 4194304, + NVME_CC_CRIME = 16777216, +}; + +enum { + NVME_CSTS_RDY = 1, + NVME_CSTS_CFS = 2, + NVME_CSTS_NSSRO = 16, + NVME_CSTS_PP = 32, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 4, + NVME_CSTS_SHST_CMPLT = 8, + NVME_CSTS_SHST_MASK = 12, +}; + +enum { + NVME_CAP_CSS_NVM = 1, + NVME_CAP_CSS_CSI = 64, +}; + +enum { + NVME_CAP_CRMS_CRWMS = 576460752303423488ULL, + NVME_CAP_CRMS_CRIMS = 1152921504606846976ULL, +}; + +struct nvme_id_power_state { + __le16 max_power; + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; + __le32 exit_lat; + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; + +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1, + NVME_PS_FLAGS_NON_OP_STATE = 2, +}; + +enum nvme_ctrl_attr { + NVME_CTRL_ATTR_HID_128_BIT = 1, + NVME_CTRL_ATTR_TBKAS = 64, + NVME_CTRL_ATTR_ELBAS = 32768, +}; + +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 cmic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[11]; + __u8 cntrltype; + __u8 fguid[16]; + __le16 crdt1; + __le16 crdt2; + __le16 crdt3; + __u8 rsvd134[122]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __le16 edstt; + __u8 dsto; + __u8 fwug; + __le16 kas; + __le16 hctma; + __le16 mntmt; + __le16 mxtmt; + __le32 sanicap; + __le32 hmminds; + __le16 hmmaxd; + __le16 nvmsetidmax; + __le16 endgidmax; + __u8 anatt; + __u8 anacap; + __le32 anagrpmax; + __le32 nanagrpid; + __u8 rsvd352[160]; + __u8 sqes; + __u8 cqes; + __le16 maxcmd; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 nwpc; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __le32 mnan; + __u8 rsvd544[224]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[2]; + __u8 dctype; + __u8 rsvd1807[241]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; + +enum { + NVME_CTRL_CMIC_MULTI_PORT = 1, + NVME_CTRL_CMIC_MULTI_CTRL = 2, + NVME_CTRL_CMIC_ANA = 8, + NVME_CTRL_ONCS_COMPARE = 1, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 2, + NVME_CTRL_ONCS_DSM = 4, + NVME_CTRL_ONCS_WRITE_ZEROES = 8, + NVME_CTRL_ONCS_RESERVATIONS = 32, + NVME_CTRL_ONCS_TIMESTAMP = 64, + NVME_CTRL_VWC_PRESENT = 1, + NVME_CTRL_OACS_SEC_SUPP = 1, + NVME_CTRL_OACS_NS_MNGT_SUPP = 8, + NVME_CTRL_OACS_DIRECTIVES = 32, + NVME_CTRL_OACS_DBBUF_SUPP = 256, + NVME_CTRL_LPA_CMD_EFFECTS_LOG = 2, + NVME_CTRL_CTRATT_128_ID = 1, + NVME_CTRL_CTRATT_NON_OP_PSP = 2, + NVME_CTRL_CTRATT_NVM_SETS = 4, + NVME_CTRL_CTRATT_READ_RECV_LVLS = 8, + NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 16, + NVME_CTRL_CTRATT_PREDICTABLE_LAT = 32, + NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 128, + NVME_CTRL_CTRATT_UUID_LIST = 512, + NVME_CTRL_SGLS_BYTE_ALIGNED = 1, + NVME_CTRL_SGLS_DWORD_ALIGNED = 2, + NVME_CTRL_SGLS_KSDBDS = 4, + NVME_CTRL_SGLS_MSDS = 524288, + NVME_CTRL_SGLS_SAOS = 1048576, +}; + +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 dlfeat; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __le16 noiob; + __u8 nvmcap[16]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __u8 rsvd74[18]; + __le32 anagrpid; + __u8 rsvd96[3]; + __u8 nsattr; + __le16 nvmsetid; + __le16 endgid; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[64]; + __u8 vs[3712]; +}; + +struct nvme_id_ns_cs_indep { + __u8 nsfeat; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __le32 anagrpid; + __u8 nsattr; + __u8 rsvd9; + __le16 nvmsetid; + __le16 endgid; + __u8 nstat; + __u8 rsvd15[4081]; +}; + +struct nvme_id_ns_nvm { + __le64 lbstm; + __u8 pic; + __u8 rsvd9[3]; + __le32 elbaf[64]; + __u8 rsvd268[3828]; +}; + +enum { + NVME_ID_NS_NVM_STS_MASK = 127, + NVME_ID_NS_NVM_GUARD_SHIFT = 7, + NVME_ID_NS_NVM_GUARD_MASK = 3, + NVME_ID_NS_NVM_QPIF_SHIFT = 9, + NVME_ID_NS_NVM_QPIF_MASK = 15, + NVME_ID_NS_NVM_QPIFS = 8, +}; + +struct nvme_id_ctrl_nvm { + __u8 vsl; + __u8 wzsl; + __u8 wusl; + __u8 dmrl; + __le32 dmrsl; + __le64 dmsl; + __u8 rsvd16[4080]; +}; + +enum { + NVME_ID_CNS_NS = 0, + NVME_ID_CNS_CTRL = 1, + NVME_ID_CNS_NS_ACTIVE_LIST = 2, + NVME_ID_CNS_NS_DESC_LIST = 3, + NVME_ID_CNS_CS_NS = 5, + NVME_ID_CNS_CS_CTRL = 6, + NVME_ID_CNS_NS_ACTIVE_LIST_CS = 7, + NVME_ID_CNS_NS_CS_INDEP = 8, + NVME_ID_CNS_NS_PRESENT_LIST = 16, + NVME_ID_CNS_NS_PRESENT = 17, + NVME_ID_CNS_CTRL_NS_LIST = 18, + NVME_ID_CNS_CTRL_LIST = 19, + NVME_ID_CNS_SCNDRY_CTRL_LIST = 21, + NVME_ID_CNS_NS_GRANULARITY = 22, + NVME_ID_CNS_UUID_LIST = 23, + NVME_ID_CNS_ENDGRP_LIST = 25, +}; + +enum { + NVME_CSI_NVM = 0, + NVME_CSI_ZNS = 2, +}; + +enum { + NVME_NS_FEAT_THIN = 1, + NVME_NS_FEAT_ATOMICS = 2, + NVME_NS_FEAT_IO_OPT = 16, + NVME_NS_ATTR_RO = 1, + NVME_NS_FLBAS_LBA_MASK = 15, + NVME_NS_FLBAS_LBA_UMASK = 96, + NVME_NS_FLBAS_LBA_SHIFT = 1, + NVME_NS_FLBAS_META_EXT = 16, + NVME_NS_NMIC_SHARED = 1, + NVME_NS_ROTATIONAL = 16, + NVME_NS_VWC_NOT_PRESENT = 32, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 16, + NVME_NS_DPC_PI_FIRST = 8, + NVME_NS_DPC_PI_TYPE3 = 4, + NVME_NS_DPC_PI_TYPE2 = 2, + NVME_NS_DPC_PI_TYPE1 = 1, + NVME_NS_DPS_PI_FIRST = 8, + NVME_NS_DPS_PI_MASK = 7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +enum { + NVME_NSTAT_NRDY = 1, +}; + +enum { + NVME_NVM_NS_16B_GUARD = 0, + NVME_NVM_NS_32B_GUARD = 1, + NVME_NVM_NS_64B_GUARD = 2, + NVME_NVM_NS_QTYPE_GUARD = 3, +}; + +struct nvme_ns_id_desc { + __u8 nidt; + __u8 nidl; + __le16 reserved; +}; + +enum { + NVME_NIDT_EUI64 = 1, + NVME_NIDT_NGUID = 2, + NVME_NIDT_UUID = 3, + NVME_NIDT_CSI = 4, +}; + +struct nvme_fw_slot_info_log { + __u8 afi; + __u8 rsvd1[7]; + __le64 frs[7]; + __u8 rsvd64[448]; +}; + +enum { + NVME_CMD_EFFECTS_CSUPP = 1, + NVME_CMD_EFFECTS_LBCC = 2, + NVME_CMD_EFFECTS_NCC = 4, + NVME_CMD_EFFECTS_NIC = 8, + NVME_CMD_EFFECTS_CCC = 16, + NVME_CMD_EFFECTS_CSER_MASK = 49152, + NVME_CMD_EFFECTS_CSE_MASK = 458752, + NVME_CMD_EFFECTS_UUID_SEL = 524288, + NVME_CMD_EFFECTS_SCOPE_MASK = 4293918720, +}; + +struct nvme_effects_log { + __le32 acs[256]; + __le32 iocs[256]; + __u8 resv[2048]; +}; + +enum { + NVME_AER_ERROR = 0, + NVME_AER_SMART = 1, + NVME_AER_NOTICE = 2, + NVME_AER_CSS = 6, + NVME_AER_VS = 7, +}; + +enum { + NVME_AER_ERROR_PERSIST_INT_ERR = 3, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0, + NVME_AER_NOTICE_FW_ACT_STARTING = 1, + NVME_AER_NOTICE_ANA = 3, + NVME_AER_NOTICE_DISC_CHANGED = 240, +}; + +enum { + NVME_AEN_CFG_NS_ATTR = 256, + NVME_AEN_CFG_FW_ACT = 512, + NVME_AEN_CFG_ANA_CHANGE = 2048, + NVME_AEN_CFG_DISC_CHANGE = -2147483648, +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +enum { + NVME_CMD_FUSE_FIRST = 1, + NVME_CMD_FUSE_SECOND = 2, + NVME_CMD_SGL_METABUF = 64, + NVME_CMD_SGL_METASEG = 128, + NVME_CMD_SGL_ALL = 192, +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + union { + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + }; + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + } cdws; + }; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2; + __le32 cdw3; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +enum { + NVME_RW_LR = 32768, + NVME_RW_FUA = 16384, + NVME_RW_APPEND_PIREMAP = 512, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0, + NVME_RW_DSM_LATENCY_IDLE = 16, + NVME_RW_DSM_LATENCY_NORM = 32, + NVME_RW_DSM_LATENCY_LOW = 48, + NVME_RW_DSM_SEQ_REQ = 64, + NVME_RW_DSM_COMPRESSED = 128, + NVME_RW_PRINFO_PRCHK_REF = 1024, + NVME_RW_PRINFO_PRCHK_APP = 2048, + NVME_RW_PRINFO_PRCHK_GUARD = 4096, + NVME_RW_PRINFO_PRACT = 8192, + NVME_RW_DTYPE_STREAMS = 16, + NVME_WZ_DEAC = 512, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +enum { + NVME_DSMGMT_IDR = 1, + NVME_DSMGMT_IDW = 2, + NVME_DSMGMT_AD = 4, +}; + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +enum nvme_zone_mgmt_action { + NVME_ZONE_CLOSE = 1, + NVME_ZONE_FINISH = 2, + NVME_ZONE_OPEN = 3, + NVME_ZONE_RESET = 4, + NVME_ZONE_OFFLINE = 5, + NVME_ZONE_SET_DESC_EXT = 16, +}; + +struct nvme_zone_mgmt_send_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le32 cdw12; + __u8 zsa; + __u8 select_all; + __u8 rsvd13[2]; + __le32 cdw14[2]; +}; + +struct nvme_zone_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __le64 slba; + __le32 numd; + __u8 zra; + __u8 zrasf; + __u8 pr; + __u8 rsvd13; + __le32 cdw14[2]; +}; + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + +struct nvme_feat_host_behavior { + __u8 acre; + __u8 etdas; + __u8 lbafee; + __u8 resv1[509]; +}; + +enum { + NVME_ENABLE_ACRE = 1, + NVME_ENABLE_LBAFEE = 1, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = 1, + NVME_CQ_IRQ_ENABLED = 2, + NVME_SQ_PRIO_URGENT = 0, + NVME_SQ_PRIO_HIGH = 2, + NVME_SQ_PRIO_MEDIUM = 4, + NVME_SQ_PRIO_LOW = 6, + NVME_FEAT_ARBITRATION = 1, + NVME_FEAT_POWER_MGMT = 2, + NVME_FEAT_LBA_RANGE = 3, + NVME_FEAT_TEMP_THRESH = 4, + NVME_FEAT_ERR_RECOVERY = 5, + NVME_FEAT_VOLATILE_WC = 6, + NVME_FEAT_NUM_QUEUES = 7, + NVME_FEAT_IRQ_COALESCE = 8, + NVME_FEAT_IRQ_CONFIG = 9, + NVME_FEAT_WRITE_ATOMIC = 10, + NVME_FEAT_ASYNC_EVENT = 11, + NVME_FEAT_AUTO_PST = 12, + NVME_FEAT_HOST_MEM_BUF = 13, + NVME_FEAT_TIMESTAMP = 14, + NVME_FEAT_KATO = 15, + NVME_FEAT_HCTM = 16, + NVME_FEAT_NOPSC = 17, + NVME_FEAT_RRL = 18, + NVME_FEAT_PLM_CONFIG = 19, + NVME_FEAT_PLM_WINDOW = 20, + NVME_FEAT_HOST_BEHAVIOR = 22, + NVME_FEAT_SANITIZE = 23, + NVME_FEAT_SW_PROGRESS = 128, + NVME_FEAT_HOST_ID = 129, + NVME_FEAT_RESV_MASK = 130, + NVME_FEAT_RESV_PERSIST = 131, + NVME_FEAT_WRITE_PROTECT = 132, + NVME_FEAT_VENDOR_START = 192, + NVME_FEAT_VENDOR_END = 255, + NVME_LOG_SUPPORTED = 0, + NVME_LOG_ERROR = 1, + NVME_LOG_SMART = 2, + NVME_LOG_FW_SLOT = 3, + NVME_LOG_CHANGED_NS = 4, + NVME_LOG_CMD_EFFECTS = 5, + NVME_LOG_DEVICE_SELF_TEST = 6, + NVME_LOG_TELEMETRY_HOST = 7, + NVME_LOG_TELEMETRY_CTRL = 8, + NVME_LOG_ENDURANCE_GROUP = 9, + NVME_LOG_ANA = 12, + NVME_LOG_FEATURES = 18, + NVME_LOG_RMI = 22, + NVME_LOG_DISC = 112, + NVME_LOG_RESERVATION = 128, + NVME_FWACT_REPL = 0, + NVME_FWACT_REPL_ACTV = 8, + NVME_FWACT_ACTV = 16, +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; + __le16 cnssid; + __u8 rsvd11; + __u8 csi; + __u32 rsvd12[4]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + union nvme_data_ptr dptr; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 lsp; + __le16 numdl; + __le16 numdu; + __le16 lsi; + union { + struct { + __le32 lpol; + __le32 lpou; + }; + __le64 lpo; + }; + __u8 rsvd14[3]; + __u8 csi; + __u32 rsvd15; +}; + +struct nvme_directive_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 numd; + __u8 doper; + __u8 dtype; + __le16 dspec; + __u8 endir; + __u8 tdtype; + __u16 rsvd15; + __u32 rsvd16[3]; +}; + +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 127, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + +struct nvmf_auth_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al_tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_send_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_receive_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al; + __u8 resv4[16]; +}; + +struct nvme_dbbuf { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __u32 rsvd12[6]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_zone_mgmt_send_cmd zms; + struct nvme_zone_mgmt_recv_cmd zmr; + struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; + struct nvmf_auth_common_command auth_common; + struct nvmf_auth_send_command auth_send; + struct nvmf_auth_receive_command auth_receive; + struct nvme_dbbuf dbbuf; + struct nvme_directive_cmd directive; + }; +}; + +enum { + NVME_SCT_GENERIC = 0, + NVME_SC_SUCCESS = 0, + NVME_SC_INVALID_OPCODE = 1, + NVME_SC_INVALID_FIELD = 2, + NVME_SC_CMDID_CONFLICT = 3, + NVME_SC_DATA_XFER_ERROR = 4, + NVME_SC_POWER_LOSS = 5, + NVME_SC_INTERNAL = 6, + NVME_SC_ABORT_REQ = 7, + NVME_SC_ABORT_QUEUE = 8, + NVME_SC_FUSED_FAIL = 9, + NVME_SC_FUSED_MISSING = 10, + NVME_SC_INVALID_NS = 11, + NVME_SC_CMD_SEQ_ERROR = 12, + NVME_SC_SGL_INVALID_LAST = 13, + NVME_SC_SGL_INVALID_COUNT = 14, + NVME_SC_SGL_INVALID_DATA = 15, + NVME_SC_SGL_INVALID_METADATA = 16, + NVME_SC_SGL_INVALID_TYPE = 17, + NVME_SC_CMB_INVALID_USE = 18, + NVME_SC_PRP_INVALID_OFFSET = 19, + NVME_SC_ATOMIC_WU_EXCEEDED = 20, + NVME_SC_OP_DENIED = 21, + NVME_SC_SGL_INVALID_OFFSET = 22, + NVME_SC_RESERVED = 23, + NVME_SC_HOST_ID_INCONSIST = 24, + NVME_SC_KA_TIMEOUT_EXPIRED = 25, + NVME_SC_KA_TIMEOUT_INVALID = 26, + NVME_SC_ABORTED_PREEMPT_ABORT = 27, + NVME_SC_SANITIZE_FAILED = 28, + NVME_SC_SANITIZE_IN_PROGRESS = 29, + NVME_SC_SGL_INVALID_GRANULARITY = 30, + NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 31, + NVME_SC_NS_WRITE_PROTECTED = 32, + NVME_SC_CMD_INTERRUPTED = 33, + NVME_SC_TRANSIENT_TR_ERR = 34, + NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 36, + NVME_SC_INVALID_IO_CMD_SET = 44, + NVME_SC_LBA_RANGE = 128, + NVME_SC_CAP_EXCEEDED = 129, + NVME_SC_NS_NOT_READY = 130, + NVME_SC_RESERVATION_CONFLICT = 131, + NVME_SC_FORMAT_IN_PROGRESS = 132, + NVME_SCT_COMMAND_SPECIFIC = 256, + NVME_SC_CQ_INVALID = 256, + NVME_SC_QID_INVALID = 257, + NVME_SC_QUEUE_SIZE = 258, + NVME_SC_ABORT_LIMIT = 259, + NVME_SC_ABORT_MISSING = 260, + NVME_SC_ASYNC_LIMIT = 261, + NVME_SC_FIRMWARE_SLOT = 262, + NVME_SC_FIRMWARE_IMAGE = 263, + NVME_SC_INVALID_VECTOR = 264, + NVME_SC_INVALID_LOG_PAGE = 265, + NVME_SC_INVALID_FORMAT = 266, + NVME_SC_FW_NEEDS_CONV_RESET = 267, + NVME_SC_INVALID_QUEUE = 268, + NVME_SC_FEATURE_NOT_SAVEABLE = 269, + NVME_SC_FEATURE_NOT_CHANGEABLE = 270, + NVME_SC_FEATURE_NOT_PER_NS = 271, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 272, + NVME_SC_FW_NEEDS_RESET = 273, + NVME_SC_FW_NEEDS_MAX_TIME = 274, + NVME_SC_FW_ACTIVATE_PROHIBITED = 275, + NVME_SC_OVERLAPPING_RANGE = 276, + NVME_SC_NS_INSUFFICIENT_CAP = 277, + NVME_SC_NS_ID_UNAVAILABLE = 278, + NVME_SC_NS_ALREADY_ATTACHED = 280, + NVME_SC_NS_IS_PRIVATE = 281, + NVME_SC_NS_NOT_ATTACHED = 282, + NVME_SC_THIN_PROV_NOT_SUPP = 283, + NVME_SC_CTRL_LIST_INVALID = 284, + NVME_SC_SELT_TEST_IN_PROGRESS = 285, + NVME_SC_BP_WRITE_PROHIBITED = 286, + NVME_SC_CTRL_ID_INVALID = 287, + NVME_SC_SEC_CTRL_STATE_INVALID = 288, + NVME_SC_CTRL_RES_NUM_INVALID = 289, + NVME_SC_RES_ID_INVALID = 290, + NVME_SC_PMR_SAN_PROHIBITED = 291, + NVME_SC_ANA_GROUP_ID_INVALID = 292, + NVME_SC_ANA_ATTACH_FAILED = 293, + NVME_SC_BAD_ATTRIBUTES = 384, + NVME_SC_INVALID_PI = 385, + NVME_SC_READ_ONLY = 386, + NVME_SC_ONCS_NOT_SUPPORTED = 387, + NVME_SC_CONNECT_FORMAT = 384, + NVME_SC_CONNECT_CTRL_BUSY = 385, + NVME_SC_CONNECT_INVALID_PARAM = 386, + NVME_SC_CONNECT_RESTART_DISC = 387, + NVME_SC_CONNECT_INVALID_HOST = 388, + NVME_SC_DISCOVERY_RESTART = 400, + NVME_SC_AUTH_REQUIRED = 401, + NVME_SC_ZONE_BOUNDARY_ERROR = 440, + NVME_SC_ZONE_FULL = 441, + NVME_SC_ZONE_READ_ONLY = 442, + NVME_SC_ZONE_OFFLINE = 443, + NVME_SC_ZONE_INVALID_WRITE = 444, + NVME_SC_ZONE_TOO_MANY_ACTIVE = 445, + NVME_SC_ZONE_TOO_MANY_OPEN = 446, + NVME_SC_ZONE_INVALID_TRANSITION = 447, + NVME_SCT_MEDIA_ERROR = 512, + NVME_SC_WRITE_FAULT = 640, + NVME_SC_READ_ERROR = 641, + NVME_SC_GUARD_CHECK = 642, + NVME_SC_APPTAG_CHECK = 643, + NVME_SC_REFTAG_CHECK = 644, + NVME_SC_COMPARE_FAILED = 645, + NVME_SC_ACCESS_DENIED = 646, + NVME_SC_UNWRITTEN_BLOCK = 647, + NVME_SCT_PATH = 768, + NVME_SC_INTERNAL_PATH_ERROR = 768, + NVME_SC_ANA_PERSISTENT_LOSS = 769, + NVME_SC_ANA_INACCESSIBLE = 770, + NVME_SC_ANA_TRANSITION = 771, + NVME_SC_CTRL_PATH_ERROR = 864, + NVME_SC_HOST_PATH_ERROR = 880, + NVME_SC_HOST_ABORTED_CMD = 881, + NVME_SC_MASK = 255, + NVME_SCT_MASK = 1792, + NVME_SCT_SC_MASK = 2047, + NVME_STATUS_CRD = 6144, + NVME_STATUS_MORE = 8192, + NVME_STATUS_DNR = 16384, +}; + +union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; +}; + +enum nvme_quirks { + NVME_QUIRK_STRIPE_SIZE = 1, + NVME_QUIRK_IDENTIFY_CNS = 2, + NVME_QUIRK_DEALLOCATE_ZEROES = 4, + NVME_QUIRK_DELAY_BEFORE_CHK_RDY = 8, + NVME_QUIRK_NO_APST = 16, + NVME_QUIRK_NO_DEEPEST_PS = 32, + NVME_QUIRK_QDEPTH_ONE = 64, + NVME_QUIRK_MEDIUM_PRIO_SQ = 128, + NVME_QUIRK_IGNORE_DEV_SUBNQN = 256, + NVME_QUIRK_DISABLE_WRITE_ZEROES = 512, + NVME_QUIRK_SIMPLE_SUSPEND = 1024, + NVME_QUIRK_SINGLE_VECTOR = 2048, + NVME_QUIRK_128_BYTES_SQES = 4096, + NVME_QUIRK_SHARED_TAGS = 8192, + NVME_QUIRK_NO_TEMP_THRESH_CHANGE = 16384, + NVME_QUIRK_NO_NS_DESC_LIST = 32768, + NVME_QUIRK_DMA_ADDRESS_BITS_48 = 65536, + NVME_QUIRK_SKIP_CID_GEN = 131072, + NVME_QUIRK_BOGUS_NID = 262144, + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = 524288, + NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = 1048576, + NVME_QUIRK_BROKEN_MSI = 2097152, +}; + +struct nvme_ctrl; + +struct nvme_request { + struct nvme_command *cmd; + long: 32; + union nvme_result result; + u8 genctr; + u8 retries; + u8 flags; + u16 status; + struct nvme_ctrl *ctrl; + long: 32; +}; + +enum nvme_ctrl_state { + NVME_CTRL_NEW = 0, + NVME_CTRL_LIVE = 1, + NVME_CTRL_RESETTING = 2, + NVME_CTRL_CONNECTING = 3, + NVME_CTRL_DELETING = 4, + NVME_CTRL_DELETING_NOIO = 5, + NVME_CTRL_DEAD = 6, +}; + +struct nvme_fault_inject {}; + +struct nvme_ctrl_ops; + +struct nvme_subsystem; + +struct nvmf_ctrl_options; + +struct nvme_ctrl { + bool comp_seen; + bool identified; + bool passthru_err_log_enabled; + enum nvme_ctrl_state state; + spinlock_t lock; + struct mutex scan_lock; + const struct nvme_ctrl_ops *ops; + struct request_queue *admin_q; + struct request_queue *connect_q; + struct request_queue *fabrics_q; + struct device *dev; + int instance; + int numa_node; + struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; + struct list_head namespaces; + struct mutex namespaces_lock; + struct srcu_struct srcu; + long: 32; + struct device ctrl_device; + struct device *device; + struct cdev cdev; + struct work_struct reset_work; + struct work_struct delete_work; + wait_queue_head_t state_wq; + struct nvme_subsystem *subsys; + struct list_head subsys_entry; + struct opal_dev *opal_dev; + u16 cntlid; + u16 mtfa; + u32 ctrl_config; + u32 queue_count; + u64 cap; + u32 max_hw_sectors; + u32 max_segments; + u32 max_integrity_segments; + u32 max_zeroes_sectors; + u16 crdt[3]; + u16 oncs; + u8 dmrl; + u32 dmrsl; + u16 oacs; + u16 sqsize; + u32 max_namespaces; + atomic_t abort_limit; + u8 vwc; + u32 vs; + u32 sgls; + u16 kas; + u8 npss; + u8 apsta; + u16 wctemp; + u16 cctemp; + u32 oaes; + u32 aen_result; + u32 ctratt; + unsigned int shutdown_timeout; + unsigned int kato; + bool subsystem; + long unsigned int quirks; + struct nvme_id_power_state psd[32]; + struct nvme_effects_log *effects; + struct xarray cels; + struct work_struct scan_work; + struct work_struct async_event_work; + struct delayed_work ka_work; + struct delayed_work failfast_work; + long: 32; + struct nvme_command ka_cmd; + long unsigned int ka_last_check_time; + struct work_struct fw_act_work; + long unsigned int events; + key_serial_t tls_pskid; + long: 32; + u64 ps_max_latency_us; + bool apst_enabled; + u16 hmmaxd; + u32 hmpre; + u32 hmmin; + u32 hmminds; + u32 ioccsz; + u32 iorcsz; + u16 icdoff; + u16 maxcmd; + int nr_reconnects; + long unsigned int flags; + struct nvmf_ctrl_options *opts; + struct page *discard_page; + long unsigned int discard_page_busy; + struct nvme_fault_inject fault_inject; + enum nvme_ctrl_type cntrltype; + enum nvme_dctype dctype; +}; + +enum { + NVME_REQ_CANCELLED = 1, + NVME_REQ_USERCMD = 2, + NVME_MPATH_IO_STATS = 4, + NVME_MPATH_CNT_ACTIVE = 8, +}; + +enum nvme_ctrl_flags { + NVME_CTRL_FAILFAST_EXPIRED = 0, + NVME_CTRL_ADMIN_Q_STOPPED = 1, + NVME_CTRL_STARTED_ONCE = 2, + NVME_CTRL_STOPPED = 3, + NVME_CTRL_SKIP_ID_CNS_CS = 4, + NVME_CTRL_DIRTY_CAPABILITY = 5, + NVME_CTRL_FROZEN = 6, +}; + +struct nvme_ctrl_ops { + const char *name; + struct module *module; + unsigned int flags; + const struct attribute_group **dev_attr_groups; + int (*reg_read32)(struct nvme_ctrl *, u32, u32 *); + int (*reg_write32)(struct nvme_ctrl *, u32, u32); + int (*reg_read64)(struct nvme_ctrl *, u32, u64 *); + void (*free_ctrl)(struct nvme_ctrl *); + void (*submit_async_event)(struct nvme_ctrl *); + int (*subsystem_reset)(struct nvme_ctrl *); + void (*delete_ctrl)(struct nvme_ctrl *); + void (*stop_ctrl)(struct nvme_ctrl *); + int (*get_address)(struct nvme_ctrl *, char *, int); + void (*print_device_info)(struct nvme_ctrl *); + bool (*supports_pci_p2pdma)(struct nvme_ctrl *); +}; + +struct nvme_subsystem { + int instance; + long: 32; + struct device dev; + struct kref ref; + struct list_head entry; + struct mutex lock; + struct list_head ctrls; + struct list_head nsheads; + char subnqn[223]; + char serial[20]; + char model[40]; + char firmware_rev[8]; + u8 cmic; + enum nvme_subsys_type subtype; + u16 vendor_id; + u16 awupf; + struct ida ns_ida; + long: 32; +}; + +struct nvmf_host; + +struct nvmf_ctrl_options { + unsigned int mask; + int max_reconnects; + char *transport; + char *subsysnqn; + char *traddr; + char *trsvcid; + char *host_traddr; + char *host_iface; + size_t queue_size; + unsigned int nr_io_queues; + unsigned int reconnect_delay; + bool discovery_nqn; + bool duplicate_connect; + unsigned int kato; + struct nvmf_host *host; + char *dhchap_secret; + char *dhchap_ctrl_secret; + struct key *keyring; + struct key *tls_key; + bool tls; + bool disable_sqflow; + bool hdr_digest; + bool data_digest; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; + int tos; + int fast_io_fail_tmo; +}; + +struct nvme_ns_ids { + u8 eui64[8]; + u8 nguid[16]; + uuid_t uuid; + u8 csi; +}; + +struct nvme_ns_head { + struct list_head list; + struct srcu_struct srcu; + struct nvme_subsystem *subsys; + struct nvme_ns_ids ids; + u8 lba_shift; + u16 ms; + u16 pi_size; + u8 pi_type; + u8 guard_type; + struct list_head entry; + struct kref ref; + bool shared; + bool rotational; + bool passthru_err_log_enabled; + struct nvme_effects_log *effects; + long: 32; + u64 nuse; + unsigned int ns_id; + int instance; + long unsigned int features; + struct ratelimit_state rs_nuse; + struct cdev cdev; + long: 32; + struct device cdev_device; + struct gendisk *disk; + long: 32; +}; + +enum nvme_ns_features { + NVME_NS_EXT_LBAS = 1, + NVME_NS_METADATA_SUPPORTED = 2, + NVME_NS_DEAC = 4, +}; + +struct nvme_ns { + struct list_head list; + struct nvme_ctrl *ctrl; + struct request_queue *queue; + struct gendisk *disk; + struct list_head siblings; + struct kref kref; + struct nvme_ns_head *head; + long unsigned int flags; + struct cdev cdev; + long: 32; + struct device cdev_device; + struct nvme_fault_inject fault_inject; +}; + +enum { + NVME_SUBMIT_AT_HEAD = 1, + NVME_SUBMIT_NOWAIT = 2, + NVME_SUBMIT_RESERVED = 4, + NVME_SUBMIT_RETRY = 8, +}; + +struct nvme_zone_info { + u64 zone_size; + unsigned int max_open_zones; + unsigned int max_active_zones; +}; + +struct nvmf_host { + struct kref ref; + struct list_head list; + char nqn[223]; + uuid_t id; +}; + +struct trace_event_raw_nvme_setup_cmd { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + u8 opcode; + u8 flags; + u8 fctype; + u16 cid; + u32 nsid; + bool metadata; + u8 cdw10[24]; + char __data[0]; +}; + +struct trace_event_raw_nvme_complete_rq { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + int cid; + long: 32; + u64 result; + u8 retries; + u8 flags; + u16 status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_nvme_async_event { + struct trace_entry ent; + int ctrl_id; + u32 result; + char __data[0]; +}; + +struct trace_event_raw_nvme_sq { + struct trace_entry ent; + int ctrl_id; + char disk[32]; + int qid; + u16 sq_head; + u16 sq_tail; + char __data[0]; +}; + +struct trace_event_data_offsets_nvme_setup_cmd {}; + +struct trace_event_data_offsets_nvme_complete_rq {}; + +struct trace_event_data_offsets_nvme_async_event {}; + +struct trace_event_data_offsets_nvme_sq {}; + +typedef void (*btf_trace_nvme_setup_cmd)(void *, struct request *, struct nvme_command *); + +typedef void (*btf_trace_nvme_complete_rq)(void *, struct request *); + +typedef void (*btf_trace_nvme_async_event)(void *, struct nvme_ctrl *, u32); + +typedef void (*btf_trace_nvme_sq)(void *, struct request *, __le16, int); + +struct nvme_ns_info { + struct nvme_ns_ids ids; + u32 nsid; + __le32 anagrpid; + u8 pi_offset; + bool is_shared; + bool is_readonly; + bool is_ready; + bool is_removed; + bool is_rotational; + bool no_vwc; +}; + +enum nvme_disposition { + COMPLETE = 0, + RETRY = 1, + FAILOVER = 2, + AUTHENTICATE = 3, +}; + +struct nvme_core_quirk_entry { + u16 vid; + const char *mn; + const char *fr; + long unsigned int quirks; +}; + +struct async_scan_info { + struct nvme_ctrl *ctrl; + atomic_t next_nsid; + __le32 *ns_list; +}; + +enum mac_version { + RTL_GIGA_MAC_VER_02 = 0, + RTL_GIGA_MAC_VER_03 = 1, + RTL_GIGA_MAC_VER_04 = 2, + RTL_GIGA_MAC_VER_05 = 3, + RTL_GIGA_MAC_VER_06 = 4, + RTL_GIGA_MAC_VER_07 = 5, + RTL_GIGA_MAC_VER_08 = 6, + RTL_GIGA_MAC_VER_09 = 7, + RTL_GIGA_MAC_VER_10 = 8, + RTL_GIGA_MAC_VER_11 = 9, + RTL_GIGA_MAC_VER_14 = 10, + RTL_GIGA_MAC_VER_17 = 11, + RTL_GIGA_MAC_VER_18 = 12, + RTL_GIGA_MAC_VER_19 = 13, + RTL_GIGA_MAC_VER_20 = 14, + RTL_GIGA_MAC_VER_21 = 15, + RTL_GIGA_MAC_VER_22 = 16, + RTL_GIGA_MAC_VER_23 = 17, + RTL_GIGA_MAC_VER_24 = 18, + RTL_GIGA_MAC_VER_25 = 19, + RTL_GIGA_MAC_VER_26 = 20, + RTL_GIGA_MAC_VER_28 = 21, + RTL_GIGA_MAC_VER_29 = 22, + RTL_GIGA_MAC_VER_30 = 23, + RTL_GIGA_MAC_VER_31 = 24, + RTL_GIGA_MAC_VER_32 = 25, + RTL_GIGA_MAC_VER_33 = 26, + RTL_GIGA_MAC_VER_34 = 27, + RTL_GIGA_MAC_VER_35 = 28, + RTL_GIGA_MAC_VER_36 = 29, + RTL_GIGA_MAC_VER_37 = 30, + RTL_GIGA_MAC_VER_38 = 31, + RTL_GIGA_MAC_VER_39 = 32, + RTL_GIGA_MAC_VER_40 = 33, + RTL_GIGA_MAC_VER_42 = 34, + RTL_GIGA_MAC_VER_43 = 35, + RTL_GIGA_MAC_VER_44 = 36, + RTL_GIGA_MAC_VER_46 = 37, + RTL_GIGA_MAC_VER_48 = 38, + RTL_GIGA_MAC_VER_51 = 39, + RTL_GIGA_MAC_VER_52 = 40, + RTL_GIGA_MAC_VER_53 = 41, + RTL_GIGA_MAC_VER_61 = 42, + RTL_GIGA_MAC_VER_63 = 43, + RTL_GIGA_MAC_VER_64 = 44, + RTL_GIGA_MAC_VER_65 = 45, + RTL_GIGA_MAC_VER_66 = 46, + RTL_GIGA_MAC_NONE = 47, +}; + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *, struct phy_device *); + +struct phy_reg { + u16 reg; + u16 val; +}; + +enum reset_control_flags { + RESET_CONTROL_EXCLUSIVE = 4, + RESET_CONTROL_EXCLUSIVE_DEASSERTED = 12, + RESET_CONTROL_EXCLUSIVE_RELEASED = 0, + RESET_CONTROL_SHARED = 1, + RESET_CONTROL_SHARED_DEASSERTED = 9, + RESET_CONTROL_OPTIONAL_EXCLUSIVE = 6, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = 14, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = 2, + RESET_CONTROL_OPTIONAL_SHARED = 3, + RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = 11, +}; + +struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); +}; + +typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +enum mce_kbd_mode { + MCIR2_MODE_KEYBOARD = 0, + MCIR2_MODE_MOUSE = 1, + MCIR2_MODE_UNKNOWN = 2, +}; + +enum mce_kbd_state { + STATE_INACTIVE___3 = 0, + STATE_HEADER_BIT_START = 1, + STATE_HEADER_BIT_END = 2, + STATE_BODY_BIT_START = 3, + STATE_BODY_BIT_END = 4, + STATE_FINISHED___2 = 5, +}; + +struct mdp_superblock_1 { + __le32 magic; + __le32 major_version; + __le32 feature_map; + __le32 pad0; + __u8 set_uuid[16]; + char set_name[32]; + __le64 ctime; + __le32 level; + __le32 layout; + __le64 size; + __le32 chunksize; + __le32 raid_disks; + union { + __le32 bitmap_offset; + struct { + __le16 offset; + __le16 size; + } ppl; + }; + __le32 new_level; + __le64 reshape_position; + __le32 delta_disks; + __le32 new_layout; + __le32 new_chunk; + __le32 new_offset; + __le64 data_offset; + __le64 data_size; + __le64 super_offset; + union { + __le64 recovery_offset; + __le64 journal_tail; + }; + __le32 dev_number; + __le32 cnt_corrected_read; + __u8 device_uuid[16]; + __u8 devflags; + __u8 bblog_shift; + __le16 bblog_size; + __le32 bblog_offset; + __le64 utime; + __le64 events; + __le64 resync_offset; + __le32 sb_csum; + __le32 max_dev; + __u8 pad3[32]; + __le16 dev_roles[0]; +}; + +struct raid10_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + sector_t head_position; + int recovery_disabled; + long: 32; +}; + +struct geom { + int raid_disks; + int near_copies; + int far_copies; + int far_offset; + sector_t stride; + int far_set_size; + int chunk_shift; + sector_t chunk_mask; +}; + +struct r10conf { + struct mddev *mddev; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new; + struct raid10_info *mirrors_old; + spinlock_t device_lock; + long: 32; + struct geom prev; + struct geom geo; + int copies; + long: 32; + sector_t dev_sectors; + sector_t reshape_progress; + sector_t reshape_safe; + long unsigned int reshape_checkpoint; + long: 32; + sector_t offset_diff; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + seqlock_t resync_lock; + atomic_t nr_pending; + int nr_waiting; + int nr_queued; + int barrier; + int array_freeze_pending; + long: 32; + sector_t next_resync; + int fullsync; + int have_replacement; + wait_queue_head_t wait_barrier; + mempool_t r10bio_pool; + mempool_t r10buf_pool; + struct page *tmppage; + struct bio_set bio_split; + struct md_thread *thread; + long: 32; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r10dev { + struct bio *bio; + union { + struct bio *repl_bio; + struct md_rdev *rdev; + }; + sector_t addr; + int devnum; + long: 32; +}; + +struct r10bio { + atomic_t remaining; + long: 32; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_slot; + struct list_head retry_list; + long: 32; + struct r10dev devs[0]; +}; + +enum r10bio_state { + R10BIO_Uptodate = 0, + R10BIO_IsSync = 1, + R10BIO_IsRecover = 2, + R10BIO_IsReshape = 3, + R10BIO_Degraded = 4, + R10BIO_ReadError = 5, + R10BIO_MadeGood = 6, + R10BIO_WriteError = 7, + R10BIO_Previous = 8, + R10BIO_FailFast = 9, + R10BIO_Discard = 10, +}; + +struct strip_zone { + sector_t zone_end; + sector_t dev_start; + int nb_dev; + int disk_shift; +}; + +enum r0layout { + RAID0_ORIG_LAYOUT = 1, + RAID0_ALT_MULTIZONE_LAYOUT = 2, +}; + +struct r0conf { + struct strip_zone *strip_zone; + struct md_rdev **devlist; + int nr_strip_zones; + enum r0layout layout; +}; + +enum geo_type { + geo_new = 0, + geo_old = 1, + geo_start = 2, +}; + +struct rsync_pages; + +enum { + NETNSA_NONE = 0, + NETNSA_NSID = 1, + NETNSA_PID = 2, + NETNSA_FD = 3, + NETNSA_TARGET_NSID = 4, + NETNSA_CURRENT_NSID = 5, + __NETNSA_MAX = 6, +}; + +struct net_fill_args { + u32 portid; + u32 seq; + int flags; + int cmd; + int nsid; + bool add_ref; + int ref_nsid; +}; + +struct rtnl_net_dump_cb { + struct net *tgt_net; + struct net *ref_net; + struct sk_buff *skb; + struct net_fill_args fillargs; + int idx; + int s_idx; +}; + +struct trace_event_raw_kfree_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + void *rx_sk; + short unsigned int protocol; + enum skb_drop_reason reason; + char __data[0]; +}; + +struct trace_event_raw_consume_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + char __data[0]; +}; + +struct trace_event_raw_skb_copy_datagram_iovec { + struct trace_entry ent; + const void *skbaddr; + int len; + char __data[0]; +}; + +struct trace_event_data_offsets_kfree_skb {}; + +struct trace_event_data_offsets_consume_skb {}; + +struct trace_event_data_offsets_skb_copy_datagram_iovec {}; + +typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *, enum skb_drop_reason, struct sock *); + +typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *, void *); + +typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); + +struct trace_event_raw_net_dev_start_xmit { + struct trace_entry ent; + u32 __data_loc_name; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + unsigned int len; + unsigned int data_len; + int network_offset; + bool transport_offset_valid; + int transport_offset; + u8 tx_flags; + u16 gso_size; + u16 gso_segs; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + int rc; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit_timeout { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_driver; + int queue_index; + char __data[0]; +}; + +struct trace_event_raw_net_dev_template { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_verbose_template { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int napi_id; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + u32 hash; + bool l4_hash; + unsigned int len; + unsigned int data_len; + unsigned int truesize; + bool mac_header_valid; + int mac_header; + unsigned char nr_frags; + u16 gso_size; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_exit_template { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_net_dev_start_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit_timeout { + u32 name; + const void *name_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_net_dev_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_verbose_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_exit_template {}; + +typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); + +typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); + +typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); + +typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); + +typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); + +struct trace_event_raw_napi_poll { + struct trace_entry ent; + struct napi_struct *napi; + u32 __data_loc_dev_name; + int work; + int budget; + char __data[0]; +}; + +struct trace_event_raw_dql_stall_detected { + struct trace_entry ent; + short unsigned int thrs; + unsigned int len; + long unsigned int last_reap; + long unsigned int hist_head; + long unsigned int now; + long unsigned int hist[4]; + char __data[0]; +}; + +struct trace_event_data_offsets_napi_poll { + u32 dev_name; + const void *dev_name_ptr_; +}; + +struct trace_event_data_offsets_dql_stall_detected {}; + +typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); + +typedef void (*btf_trace_dql_stall_detected)(void *, short unsigned int, unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int *); + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4, +}; + +struct trace_event_raw_sock_rcvqueue_full { + struct trace_entry ent; + int rmem_alloc; + unsigned int truesize; + int sk_rcvbuf; + char __data[0]; +}; + +struct trace_event_raw_sock_exceed_buf_limit { + struct trace_entry ent; + char name[32]; + long int sysctl_mem[3]; + long int allocated; + int sysctl_rmem; + int rmem_alloc; + int sysctl_wmem; + int wmem_alloc; + int wmem_queued; + int kind; + char __data[0]; +}; + +struct trace_event_raw_inet_sock_set_state { + struct trace_entry ent; + const void *skaddr; + int oldstate; + int newstate; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_inet_sk_error_report { + struct trace_entry ent; + int error; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_sk_data_ready { + struct trace_entry ent; + const void *skaddr; + __u16 family; + __u16 protocol; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_sock_msg_length { + struct trace_entry ent; + void *sk; + __u16 family; + __u16 protocol; + int ret; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_sock_rcvqueue_full {}; + +struct trace_event_data_offsets_sock_exceed_buf_limit {}; + +struct trace_event_data_offsets_inet_sock_set_state {}; + +struct trace_event_data_offsets_inet_sk_error_report {}; + +struct trace_event_data_offsets_sk_data_ready {}; + +struct trace_event_data_offsets_sock_msg_length {}; + +typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int); + +typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); + +typedef void (*btf_trace_inet_sk_error_report)(void *, const struct sock *); + +typedef void (*btf_trace_sk_data_ready)(void *, const struct sock *); + +typedef void (*btf_trace_sock_send_length)(void *, struct sock *, int, int); + +typedef void (*btf_trace_sock_recv_length)(void *, struct sock *, int, int); + +struct trace_event_raw_udp_fail_queue_rcv_skb { + struct trace_entry ent; + int rc; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; + +typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *, struct sk_buff *); + +struct trace_event_raw_tcp_event_sk_skb { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_send_reset { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + enum sk_rst_reason reason; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + long: 32; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_retransmit_synack { + struct trace_entry ent; + const void *skaddr; + const void *req; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_probe { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 mark; + __u16 data_len; + __u32 snd_nxt; + __u32 snd_una; + __u32 snd_cwnd; + __u32 ssthresh; + __u32 snd_wnd; + __u32 srtt; + __u32 rcv_wnd; + long: 32; + __u64 sock_cookie; + const void *skbaddr; + const void *skaddr; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_skb { + struct trace_entry ent; + const void *skbaddr; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_cong_state_set { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u8 cong_state; + char __data[0]; +}; + +struct trace_event_raw_tcp_hash_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_tcp_ao_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + __u8 keyid; + __u8 rnext; + __u8 maclen; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sk { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u8 keyid; + __u8 rnext; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sne { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 new_sne; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_tcp_event_sk_skb {}; + +struct trace_event_data_offsets_tcp_send_reset {}; + +struct trace_event_data_offsets_tcp_event_sk {}; + +struct trace_event_data_offsets_tcp_retransmit_synack {}; + +struct trace_event_data_offsets_tcp_probe {}; + +struct trace_event_data_offsets_tcp_event_skb {}; + +struct trace_event_data_offsets_tcp_cong_state_set {}; + +struct trace_event_data_offsets_tcp_hash_event {}; + +struct trace_event_data_offsets_tcp_ao_event {}; + +struct trace_event_data_offsets_tcp_ao_event_sk {}; + +struct trace_event_data_offsets_tcp_ao_event_sne {}; + +typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *, const enum sk_rst_reason); + +typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); + +typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); + +typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); + +typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_tcp_bad_csum)(void *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_cong_state_set)(void *, struct sock *, const u8); + +typedef void (*btf_trace_tcp_hash_bad_header)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_unexpected)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_mismatch)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_ao_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_ao_handshake_failure)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_wrong_maclen)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_mismatch)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_key_not_found)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_rnext_request)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_synack_no_key)(void *, const struct sock *, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_snd_sne_update)(void *, const struct sock *, __u32); + +typedef void (*btf_trace_tcp_ao_rcv_sne_update)(void *, const struct sock *, __u32); + +struct trace_event_raw_fib_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + u8 proto; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[4]; + __u8 dst[4]; + __u8 gw4[4]; + __u8 gw6[16]; + u16 sport; + u16 dport; + char name[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib_table_lookup {}; + +typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); + +struct trace_event_raw_qdisc_dequeue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + int packets; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + long unsigned int txq_state; + char __data[0]; +}; + +struct trace_event_raw_qdisc_enqueue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + char __data[0]; +}; + +struct trace_event_raw_qdisc_reset { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_destroy { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_create { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + char __data[0]; +}; + +struct trace_event_data_offsets_qdisc_dequeue {}; + +struct trace_event_data_offsets_qdisc_enqueue {}; + +struct trace_event_data_offsets_qdisc_reset { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_destroy { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_create { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); + +typedef void (*btf_trace_qdisc_enqueue)(void *, struct Qdisc *, const struct netdev_queue *, struct sk_buff *); + +typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); + +struct mac_addr { + unsigned char addr[6]; +}; + +typedef struct mac_addr mac_addr; + +struct net_bridge_fdb_key { + mac_addr addr; + u16 vlan_id; +}; + +struct net_bridge_fdb_entry { + struct rhash_head rhnode; + struct net_bridge_port *dst; + struct net_bridge_fdb_key key; + struct hlist_node fdb_node; + long unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int updated; + long unsigned int used; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct trace_event_raw_br_fdb_add { + struct trace_entry ent; + u8 ndm_flags; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + u16 nlh_flags; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_external_learn_add { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_fdb_delete { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_update { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_br_mdb_full { + struct trace_entry ent; + u32 __data_loc_dev; + int af; + u16 vid; + __u8 src[16]; + __u8 grp[16]; + __u8 grpmac[6]; + char __data[0]; +}; + +struct trace_event_data_offsets_br_fdb_add { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_external_learn_add { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_fdb_delete { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_update { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_mdb_full { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16); + +typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16); + +typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *); + +typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, long unsigned int); + +typedef void (*btf_trace_br_mdb_full)(void *, const struct net_device *, const struct br_ip *); + +struct trace_event_raw_page_pool_release { + struct trace_entry ent; + const struct page_pool *pool; + s32 inflight; + u32 hold; + u32 release; + u64 cnt; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_release { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 release; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_hold { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 hold; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_update_nid { + struct trace_entry ent; + const struct page_pool *pool; + int pool_nid; + int new_nid; + char __data[0]; +}; + +struct trace_event_data_offsets_page_pool_release {}; + +struct trace_event_data_offsets_page_pool_state_release {}; + +struct trace_event_data_offsets_page_pool_state_hold {}; + +struct trace_event_data_offsets_page_pool_update_nid {}; + +typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); + +typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); + +struct trace_event_raw_neigh_create { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + int entries; + u8 created; + u8 gc_exempt; + u8 primary_key4[4]; + u8 primary_key6[16]; + char __data[0]; +}; + +struct trace_event_raw_neigh_update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u8 new_lladdr[32]; + u8 new_state; + u32 update_flags; + u32 pid; + char __data[0]; +}; + +struct trace_event_raw_neigh__update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u32 err; + char __data[0]; +}; + +struct trace_event_data_offsets_neigh_create { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh_update { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh__update { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); + +typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); + +typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); + +enum nfnl_acct_msg_types { + NFNL_MSG_ACCT_NEW = 0, + NFNL_MSG_ACCT_GET = 1, + NFNL_MSG_ACCT_GET_CTRZERO = 2, + NFNL_MSG_ACCT_DEL = 3, + NFNL_MSG_ACCT_OVERQUOTA = 4, + NFNL_MSG_ACCT_MAX = 5, +}; + +enum nfnl_acct_flags { + NFACCT_F_QUOTA_PKTS = 1, + NFACCT_F_QUOTA_BYTES = 2, + NFACCT_F_OVERQUOTA = 4, +}; + +enum nfnl_acct_type { + NFACCT_UNSPEC = 0, + NFACCT_NAME = 1, + NFACCT_PKTS = 2, + NFACCT_BYTES = 3, + NFACCT_USE = 4, + NFACCT_FLAGS = 5, + NFACCT_QUOTA = 6, + NFACCT_FILTER = 7, + NFACCT_PAD = 8, + __NFACCT_MAX = 9, +}; + +enum nfnl_attr_filter_type { + NFACCT_FILTER_UNSPEC = 0, + NFACCT_FILTER_MASK = 1, + NFACCT_FILTER_VALUE = 2, + __NFACCT_FILTER_MAX = 3, +}; + +enum { + NFACCT_NO_QUOTA = -1, + NFACCT_UNDERQUOTA = 0, + NFACCT_OVERQUOTA = 1, +}; + +struct nf_acct { + atomic64_t pkts; + atomic64_t bytes; + long unsigned int flags; + struct list_head head; + refcount_t refcnt; + char name[32]; + struct callback_head callback_head; + char data[0]; +}; + +struct nfacct_filter { + u32 value; + u32 mask; +}; + +struct nfnl_acct_net { + struct list_head nfnl_acct_list; +}; + +struct vlan_ethhdr { + union { + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + }; + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + } addrs; + }; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +enum nf_tables_msg_types { + NFT_MSG_NEWTABLE = 0, + NFT_MSG_GETTABLE = 1, + NFT_MSG_DELTABLE = 2, + NFT_MSG_NEWCHAIN = 3, + NFT_MSG_GETCHAIN = 4, + NFT_MSG_DELCHAIN = 5, + NFT_MSG_NEWRULE = 6, + NFT_MSG_GETRULE = 7, + NFT_MSG_DELRULE = 8, + NFT_MSG_NEWSET = 9, + NFT_MSG_GETSET = 10, + NFT_MSG_DELSET = 11, + NFT_MSG_NEWSETELEM = 12, + NFT_MSG_GETSETELEM = 13, + NFT_MSG_DELSETELEM = 14, + NFT_MSG_NEWGEN = 15, + NFT_MSG_GETGEN = 16, + NFT_MSG_TRACE = 17, + NFT_MSG_NEWOBJ = 18, + NFT_MSG_GETOBJ = 19, + NFT_MSG_DELOBJ = 20, + NFT_MSG_GETOBJ_RESET = 21, + NFT_MSG_NEWFLOWTABLE = 22, + NFT_MSG_GETFLOWTABLE = 23, + NFT_MSG_DELFLOWTABLE = 24, + NFT_MSG_GETRULE_RESET = 25, + NFT_MSG_DESTROYTABLE = 26, + NFT_MSG_DESTROYCHAIN = 27, + NFT_MSG_DESTROYRULE = 28, + NFT_MSG_DESTROYSET = 29, + NFT_MSG_DESTROYSETELEM = 30, + NFT_MSG_DESTROYOBJ = 31, + NFT_MSG_DESTROYFLOWTABLE = 32, + NFT_MSG_GETSETELEM_RESET = 33, + NFT_MSG_MAX = 34, +}; + +enum nft_trace_attributes { + NFTA_TRACE_UNSPEC = 0, + NFTA_TRACE_TABLE = 1, + NFTA_TRACE_CHAIN = 2, + NFTA_TRACE_RULE_HANDLE = 3, + NFTA_TRACE_TYPE = 4, + NFTA_TRACE_VERDICT = 5, + NFTA_TRACE_ID = 6, + NFTA_TRACE_LL_HEADER = 7, + NFTA_TRACE_NETWORK_HEADER = 8, + NFTA_TRACE_TRANSPORT_HEADER = 9, + NFTA_TRACE_IIF = 10, + NFTA_TRACE_IIFTYPE = 11, + NFTA_TRACE_OIF = 12, + NFTA_TRACE_OIFTYPE = 13, + NFTA_TRACE_MARK = 14, + NFTA_TRACE_NFPROTO = 15, + NFTA_TRACE_POLICY = 16, + NFTA_TRACE_PAD = 17, + __NFTA_TRACE_MAX = 18, +}; + +enum nft_trace_types { + NFT_TRACETYPE_UNSPEC = 0, + NFT_TRACETYPE_POLICY = 1, + NFT_TRACETYPE_RETURN = 2, + NFT_TRACETYPE_RULE = 3, + __NFT_TRACETYPE_MAX = 4, +}; + +enum { + NFT_PKTINFO_L4PROTO = 1, + NFT_PKTINFO_INNER = 2, + NFT_PKTINFO_INNER_FULL = 4, +}; + +struct nft_rule_dp { + u64 is_last: 1; + u64 dlen: 12; + u64 handle: 42; + long: 0; + unsigned char data[0]; +}; + +struct nft_rule_dp_last { + struct nft_rule_dp end; + struct callback_head h; + struct nft_rule_blob *blob; + const struct nft_chain *chain; +}; + +enum nft_chain_types { + NFT_CHAIN_T_DEFAULT = 0, + NFT_CHAIN_T_ROUTE = 1, + NFT_CHAIN_T_NAT = 2, + NFT_CHAIN_T_MAX = 3, +}; + +struct nft_chain_type { + const char *name; + enum nft_chain_types type; + int family; + struct module *owner; + unsigned int hook_mask; + nf_hookfn *hooks[6]; + int (*ops_register)(struct net *, const struct nf_hook_ops *); + void (*ops_unregister)(struct net *, const struct nf_hook_ops *); +}; + +struct nft_stats { + u64 bytes; + u64 pkts; + struct u64_stats_sync syncp; + long: 32; +}; + +struct nft_base_chain { + struct nf_hook_ops ops; + struct list_head hook_list; + const struct nft_chain_type *type; + u8 policy; + u8 flags; + struct nft_stats *stats; + long: 32; + struct nft_chain chain; + struct flow_block flow_block; +}; + +struct nft_traceinfo { + bool trace; + bool nf_trace; + bool packet_dumped; + enum nft_trace_types type: 8; + u32 skbid; + const struct nft_base_chain *basechain; +}; + +struct xt_log_info { + unsigned char level; + unsigned char logflags; + char prefix[30]; +}; + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + u_int16_t flags; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +struct tcp_plb_state { + u8 consec_cong_rounds: 5; + u8 unused: 3; + u32 pause_until; +}; + +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); + void (*add_port)(struct net_device *, struct udp_tunnel_info *); + void (*del_port)(struct net_device *, struct udp_tunnel_info *); + void (*reset_ntf)(struct net_device *); + size_t (*dump_size)(struct net_device *, unsigned int); + int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); +}; + +struct btf_id_set8 { + u32 cnt; + u32 flags; + struct { + u32 id; + u32 flags; + } pairs[0]; +}; + +typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *, u32); + +struct btf_kfunc_id_set { + struct module *owner; + struct btf_id_set8 *set; + btf_kfunc_filter_t filter; +}; + +typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); + +struct bpf_struct_ops_tcp_congestion_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct tcp_congestion_ops data; +}; + +enum br_boolopt_id { + BR_BOOLOPT_NO_LL_LEARN = 0, + BR_BOOLOPT_MCAST_VLAN_SNOOPING = 1, + BR_BOOLOPT_MST_ENABLE = 2, + BR_BOOLOPT_MAX = 3, +}; + +enum { + BR_GROUPFWD_STP = 1, + BR_GROUPFWD_MACPAUSE = 2, + BR_GROUPFWD_LACP = 4, +}; + +enum { + BR_FDB_LOCAL = 0, + BR_FDB_STATIC = 1, + BR_FDB_STICKY = 2, + BR_FDB_ADDED_BY_USER = 3, + BR_FDB_ADDED_BY_EXT_LEARN = 4, + BR_FDB_OFFLOADED = 5, + BR_FDB_NOTIFY = 6, + BR_FDB_NOTIFY_INACTIVE = 7, + BR_FDB_LOCKED = 8, + BR_FDB_DYNAMIC_LEARNED = 9, +}; + +struct net_bridge_fdb_flush_desc { + long unsigned int flags; + long unsigned int flags_mask; + int port_ifindex; + u16 vlan_id; +}; + +enum net_bridge_opts { + BROPT_VLAN_ENABLED = 0, + BROPT_VLAN_STATS_ENABLED = 1, + BROPT_NF_CALL_IPTABLES = 2, + BROPT_NF_CALL_IP6TABLES = 3, + BROPT_NF_CALL_ARPTABLES = 4, + BROPT_GROUP_ADDR_SET = 5, + BROPT_MULTICAST_ENABLED = 6, + BROPT_MULTICAST_QUERY_USE_IFADDR = 7, + BROPT_MULTICAST_STATS_ENABLED = 8, + BROPT_HAS_IPV6_ADDR = 9, + BROPT_NEIGH_SUPPRESS_ENABLED = 10, + BROPT_MTU_SET_BY_USER = 11, + BROPT_VLAN_STATS_PER_PORT = 12, + BROPT_NO_LL_LEARN = 13, + BROPT_VLAN_BRIDGE_BINDING = 14, + BROPT_MCAST_VLAN_SNOOPING_ENABLED = 15, + BROPT_MST_ENABLED = 16, +}; + +enum { + Root_NFS = 255, + Root_CIFS = 254, + Root_Generic = 253, + Root_RAM0 = 1048576, +}; + +enum rpc_display_format_t { + RPC_DISPLAY_ADDR = 0, + RPC_DISPLAY_PORT = 1, + RPC_DISPLAY_PROTO = 2, + RPC_DISPLAY_HEX_ADDR = 3, + RPC_DISPLAY_HEX_PORT = 4, + RPC_DISPLAY_NETID = 5, + RPC_DISPLAY_MAX = 6, +}; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_CLONE = 71, + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, +}; + +typedef struct { + seqcount_t seqcount; +} seqcount_latch_t; + +struct dev_printk_info { + char subsystem[16]; + char device[48]; +}; + +enum cons_flags { + CON_PRINTBUFFER = 1, + CON_CONSDEV = 2, + CON_ENABLED = 4, + CON_BOOT = 8, + CON_ANYTIME = 16, + CON_BRL = 32, + CON_EXTENDED = 64, + CON_SUSPENDED = 128, + CON_NBCON = 256, +}; + +enum nbcon_prio { + NBCON_PRIO_NONE = 0, + NBCON_PRIO_NORMAL = 1, + NBCON_PRIO_EMERGENCY = 2, + NBCON_PRIO_PANIC = 3, + NBCON_PRIO_MAX = 4, +}; + +struct console; + +struct printk_buffers; + +struct nbcon_context { + struct console *console; + unsigned int spinwait_max_us; + enum nbcon_prio prio; + unsigned int allow_unsafe_takeover: 1; + unsigned int backlog: 1; + struct printk_buffers *pbufs; + long: 32; + u64 seq; +}; + +struct nbcon_write_context; + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned int); + int (*read)(struct console *, char *, unsigned int); + struct tty_driver * (*device)(struct console *, int *); + void (*unblank)(); + int (*setup)(struct console *, char *); + int (*exit)(struct console *); + int (*match)(struct console *, char *, int, char *); + short int flags; + short int index; + int cflag; + uint ispeed; + uint ospeed; + long: 32; + u64 seq; + long unsigned int dropped; + void *data; + struct hlist_node node; + void (*write_atomic)(struct console *, struct nbcon_write_context *); + void (*write_thread)(struct console *, struct nbcon_write_context *); + void (*device_lock)(struct console *, long unsigned int *); + void (*device_unlock)(struct console *, long unsigned int); + atomic_t nbcon_state; + atomic_long_t nbcon_seq; + struct nbcon_context nbcon_device_ctxt; + atomic_long_t nbcon_prev_seq; + struct printk_buffers *pbufs; + struct task_struct *kthread; + struct rcuwait rcuwait; + struct irq_work irq_work; +}; + +struct printk_buffers { + char outbuf[2048]; + char scratchbuf[1024]; +}; + +struct nbcon_write_context { + struct nbcon_context ctxt; + char *outbuf; + unsigned int len; + bool unsafe_takeover; + long: 32; +}; + +enum con_flush_mode { + CONSOLE_FLUSH_PENDING = 0, + CONSOLE_REPLAY_ALL = 1, +}; + +struct syscore_ops { + struct list_head node; + int (*suspend)(); + void (*resume)(); + void (*shutdown)(); +}; + +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF = 0, + KMSG_DUMP_PANIC = 1, + KMSG_DUMP_OOPS = 2, + KMSG_DUMP_EMERG = 3, + KMSG_DUMP_SHUTDOWN = 4, + KMSG_DUMP_MAX = 5, +}; + +struct kmsg_dump_iter { + u64 cur_seq; + u64 next_seq; +}; + +struct kmsg_dump_detail { + enum kmsg_dump_reason reason; + const char *description; +}; + +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *, struct kmsg_dump_detail *); + enum kmsg_dump_reason max_reason; + bool registered; +}; + +struct trace_event_raw_console { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_console { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_console)(void *, const char *, size_t); + +struct printk_info { + u64 seq; + u64 ts_nsec; + u16 text_len; + u8 facility; + u8 flags: 5; + u8 level: 3; + u32 caller_id; + struct dev_printk_info dev_info; +}; + +struct printk_record { + struct printk_info *info; + char *text_buf; + unsigned int text_buf_size; +}; + +struct prb_data_blk_lpos { + long unsigned int begin; + long unsigned int next; +}; + +struct prb_desc { + atomic_long_t state_var; + struct prb_data_blk_lpos text_blk_lpos; +}; + +struct prb_data_ring { + unsigned int size_bits; + char *data; + atomic_long_t head_lpos; + atomic_long_t tail_lpos; +}; + +struct prb_desc_ring { + unsigned int count_bits; + struct prb_desc *descs; + struct printk_info *infos; + atomic_long_t head_id; + atomic_long_t tail_id; + atomic_long_t last_finalized_seq; +}; + +struct printk_ringbuffer { + struct prb_desc_ring desc_ring; + struct prb_data_ring text_data_ring; + atomic_long_t fail; +}; + +struct prb_reserved_entry { + struct printk_ringbuffer *rb; + long unsigned int irqflags; + long unsigned int id; + unsigned int text_space; +}; + +enum desc_state { + desc_miss = -1, + desc_reserved = 0, + desc_committed = 1, + desc_finalized = 2, + desc_reusable = 3, +}; + +struct console_cmdline { + char name[16]; + int index; + char devname[32]; + bool user_specified; + char *options; +}; + +enum printk_info_flags { + LOG_FORCE_CON = 1, + LOG_NEWLINE = 2, + LOG_CONT = 8, +}; + +struct console_flush_type { + bool nbcon_atomic; + bool nbcon_offload; + bool legacy_direct; + bool legacy_offload; +}; + +struct printk_message { + struct printk_buffers *pbufs; + unsigned int outbuf_len; + u64 seq; + long unsigned int dropped; + long: 32; +}; + +enum devkmsg_log_bits { + __DEVKMSG_LOG_BIT_ON = 0, + __DEVKMSG_LOG_BIT_OFF = 1, + __DEVKMSG_LOG_BIT_LOCK = 2, +}; + +enum devkmsg_log_masks { + DEVKMSG_LOG_MASK_ON = 1, + DEVKMSG_LOG_MASK_OFF = 2, + DEVKMSG_LOG_MASK_LOCK = 4, +}; + +enum con_msg_format_flags { + MSG_FORMAT_DEFAULT = 0, + MSG_FORMAT_SYSLOG = 1, +}; + +struct latched_seq { + seqcount_latch_t latch; + long: 32; + u64 val[2]; +}; + +struct devkmsg_user { + atomic64_t seq; + struct ratelimit_state rs; + struct mutex lock; + struct printk_buffers pbufs; + long: 32; +}; + +struct sched_domain_attr { + int relax_domain_level; +}; + +struct uf_node { + struct uf_node *parent; + unsigned int rank; +}; + +struct fmeter { + int cnt; + int val; + time64_t time; + spinlock_t lock; + long: 32; +}; + +enum prs_errcode { + PERR_NONE = 0, + PERR_INVCPUS = 1, + PERR_INVPARENT = 2, + PERR_NOTPART = 3, + PERR_NOTEXCL = 4, + PERR_NOCPUS = 5, + PERR_HOTPLUG = 6, + PERR_CPUSEMPTY = 7, + PERR_HKEEPING = 8, + PERR_ACCESS = 9, +}; + +typedef enum { + CS_ONLINE = 0, + CS_CPU_EXCLUSIVE = 1, + CS_MEM_EXCLUSIVE = 2, + CS_MEM_HARDWALL = 3, + CS_MEMORY_MIGRATE = 4, + CS_SCHED_LOAD_BALANCE = 5, + CS_SPREAD_PAGE = 6, + CS_SPREAD_SLAB = 7, +} cpuset_flagbits_t; + +typedef enum { + FILE_MEMORY_MIGRATE = 0, + FILE_CPULIST = 1, + FILE_MEMLIST = 2, + FILE_EFFECTIVE_CPULIST = 3, + FILE_EFFECTIVE_MEMLIST = 4, + FILE_SUBPARTS_CPULIST = 5, + FILE_EXCLUSIVE_CPULIST = 6, + FILE_EFFECTIVE_XCPULIST = 7, + FILE_ISOLATED_CPULIST = 8, + FILE_CPU_EXCLUSIVE = 9, + FILE_MEM_EXCLUSIVE = 10, + FILE_MEM_HARDWALL = 11, + FILE_SCHED_LOAD_BALANCE = 12, + FILE_PARTITION_ROOT = 13, + FILE_SCHED_RELAX_DOMAIN_LEVEL = 14, + FILE_MEMORY_PRESSURE_ENABLED = 15, + FILE_MEMORY_PRESSURE = 16, + FILE_SPREAD_PAGE = 17, + FILE_SPREAD_SLAB = 18, +} cpuset_filetype_t; + +struct cpuset { + struct cgroup_subsys_state css; + long unsigned int flags; + cpumask_var_t cpus_allowed; + nodemask_t mems_allowed; + cpumask_var_t effective_cpus; + nodemask_t effective_mems; + cpumask_var_t effective_xcpus; + cpumask_var_t exclusive_cpus; + nodemask_t old_mems_allowed; + struct fmeter fmeter; + int attach_in_progress; + int relax_domain_level; + int nr_subparts; + int partition_root_state; + int nr_deadline_tasks; + int nr_migrate_dl_tasks; + u64 sum_migrate_dl_bw; + enum prs_errcode prs_err; + struct cgroup_file partition_file; + struct list_head remote_sibling; + struct uf_node node; +}; + +struct tmpmasks { + cpumask_var_t addmask; + cpumask_var_t delmask; + cpumask_var_t new_cpus; +}; + +enum partition_cmd { + partcmd_enable = 0, + partcmd_enablei = 1, + partcmd_disable = 2, + partcmd_update = 3, + partcmd_invalidate = 4, +}; + +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +struct trace_probe_log { + const char *subsystem; + const char **argv; + int argc; + int index; +}; + +struct bpf_mem_cache { + struct llist_head free_llist; + local_t active; + struct llist_head free_llist_extra; + struct irq_work refill_work; + struct obj_cgroup *objcg; + int unit_size; + int free_cnt; + int low_watermark; + int high_watermark; + int batch; + int percpu_size; + bool draining; + struct bpf_mem_cache *tgt; + struct llist_head free_by_rcu; + struct llist_node *free_by_rcu_tail; + struct llist_head waiting_for_gp; + struct llist_node *waiting_for_gp_tail; + struct callback_head rcu; + atomic_t call_rcu_in_progress; + struct llist_head free_llist_extra_rcu; + struct llist_head free_by_rcu_ttrace; + struct llist_head waiting_for_gp_ttrace; + struct callback_head rcu_ttrace; + atomic_t call_rcu_ttrace_in_progress; +}; + +struct bpf_mem_caches { + struct bpf_mem_cache cache[11]; +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +struct bpf_struct_ops_arg_info { + struct bpf_ctx_arg_aux *info; + u32 cnt; +}; + +struct bpf_struct_ops_desc { + struct bpf_struct_ops *st_ops; + const struct btf_type *type; + const struct btf_type *value_type; + u32 type_id; + u32 value_id; + struct bpf_struct_ops_arg_info *arg_info; +}; + +struct bpf_struct_ops_value { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + char data[0]; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + const struct bpf_struct_ops_desc *st_ops_desc; + struct mutex lock; + struct bpf_link **links; + struct bpf_ksym **ksyms; + u32 funcs_cnt; + u32 image_pages_cnt; + void *image_pages[8]; + struct btf *btf; + struct bpf_struct_ops_value *uvalue; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_struct_ops_value kvalue; +}; + +struct bpf_struct_ops_link { + struct bpf_link link; + struct bpf_map *map; + wait_queue_head_t wait_hup; +}; + +enum { + IDX_MODULE_ID = 0, + IDX_ST_OPS_COMMON_VALUE_ID = 1, +}; + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 524288, + AOP_TRUNCATED_PAGE = 524289, +}; + +struct swap_iocb { + struct kiocb iocb; + struct bio_vec bvec[32]; + int pages; + int len; +}; + +enum objext_flags { + OBJEXTS_ALLOC_FAIL = 4, + __NR_OBJEXTS_FLAGS = 8, +}; + +typedef __kernel_ulong_t __kernel_ino_t; + +typedef unsigned int __kernel_mode_t; + +struct stat { + unsigned int st_dev; + long int st_pad1[3]; + __kernel_ino_t st_ino; + __kernel_mode_t st_mode; + __u32 st_nlink; + __kernel_uid32_t st_uid; + __kernel_gid32_t st_gid; + unsigned int st_rdev; + long int st_pad2[2]; + long int st_size; + long int st_pad3; + long int st_atime; + long int st_atime_nsec; + long int st_mtime; + long int st_mtime_nsec; + long int st_ctime; + long int st_ctime_nsec; + long int st_blksize; + long int st_blocks; + long int st_pad4[14]; +}; + +struct stat64 { + long unsigned int st_dev; + long unsigned int st_pad0[3]; + long long unsigned int st_ino; + __kernel_mode_t st_mode; + __u32 st_nlink; + __kernel_uid32_t st_uid; + __kernel_gid32_t st_gid; + long unsigned int st_rdev; + long unsigned int st_pad1[3]; + long long int st_size; + long int st_atime; + long unsigned int st_atime_nsec; + long int st_mtime; + long unsigned int st_mtime_nsec; + long int st_ctime; + long unsigned int st_ctime_nsec; + long unsigned int st_blksize; + long unsigned int st_pad2; + long long int st_blocks; +}; + +typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); + +enum { + DIO_LOCKING = 1, + DIO_SKIP_HOLES = 2, +}; + +struct dio_submit { + struct bio *bio; + unsigned int blkbits; + unsigned int blkfactor; + unsigned int start_zero_done; + int pages_in_io; + long: 32; + sector_t block_in_file; + unsigned int blocks_available; + int reap_counter; + sector_t final_block_in_request; + int boundary; + get_block_t *get_block; + loff_t logical_offset_in_bio; + sector_t final_block_in_bio; + sector_t next_block_for_io; + struct page *cur_page; + unsigned int cur_page_offset; + unsigned int cur_page_len; + long: 32; + sector_t cur_page_block; + loff_t cur_page_fs_offset; + struct iov_iter *iter; + unsigned int head; + unsigned int tail; + size_t from; + size_t to; + long: 32; +}; + +struct dio { + int flags; + blk_opf_t opf; + struct gendisk *bio_disk; + struct inode *inode; + loff_t i_size; + dio_iodone_t *end_io; + bool is_pinned; + void *private; + spinlock_t bio_lock; + int page_errors; + int is_async; + bool defer_completion; + bool should_dirty; + int io_error; + long unsigned int refcount; + struct bio *bio_list; + struct task_struct *waiter; + struct kiocb *iocb; + ssize_t result; + union { + struct page *pages[64]; + struct work_struct complete_work; + }; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef __kernel_long_t __kernel_off_t; + +typedef __kernel_off_t off_t; + +struct disk_stats { + u64 nsecs[4]; + long unsigned int sectors[4]; + long unsigned int ios[4]; + long unsigned int merges[4]; + long unsigned int io_ticks; + local_t in_flight[2]; + long: 32; +}; + +enum stat_group { + STAT_READ = 0, + STAT_WRITE = 1, + STAT_DISCARD = 2, + STAT_FLUSH = 3, + NR_STAT_GROUPS = 4, +}; + +enum { + attr_noop = 0, + attr_delayed_allocation_blocks = 1, + attr_session_write_kbytes = 2, + attr_lifetime_write_kbytes = 3, + attr_reserved_clusters = 4, + attr_sra_exceeded_retry_limit = 5, + attr_inode_readahead = 6, + attr_trigger_test_error = 7, + attr_first_error_time = 8, + attr_last_error_time = 9, + attr_clusters_in_group = 10, + attr_mb_order = 11, + attr_feature = 12, + attr_pointer_pi = 13, + attr_pointer_ui = 14, + attr_pointer_ul = 15, + attr_pointer_u64 = 16, + attr_pointer_u8 = 17, + attr_pointer_string = 18, + attr_pointer_atomic = 19, + attr_journal_task = 20, +}; + +enum { + ptr_explicit = 0, + ptr_ext4_sb_info_offset = 1, + ptr_ext4_super_block_offset = 2, +}; + +struct ext4_attr { + struct attribute attr; + short int attr_id; + short int attr_ptr; + short unsigned int attr_size; + union { + int offset; + void *explicit_ptr; + } u; +}; + +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; + +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long int tm_year; + int tm_wday; + int tm_yday; +}; + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +struct fat_boot_fsinfo { + __le32 signature1; + __le32 reserved1[120]; + __le32 signature2; + __le32 free_clusters; + __le32 next_cluster; + __le32 reserved2[4]; +}; + +typedef long long unsigned int llu; + +struct debugfs_blob_wrapper { + void *data; + long unsigned int size; +}; + +struct debugfs_reg32 { + char *name; + long unsigned int offset; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void *base; + struct device *dev; +}; + +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + +typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); + +struct debugfs_short_fops { + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + loff_t (*llseek)(struct file *, loff_t, int); +}; + +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +struct debugfs_fsdata { + const struct file_operations *real_fops; + const struct debugfs_short_fops *short_fops; + union { + debugfs_automount_t automount; + struct { + refcount_t active_users; + struct completion active_users_drained; + struct mutex cancellations_mtx; + struct list_head cancellations; + }; + }; +}; + +struct debugfs_devm_entry { + int (*read)(struct seq_file *, void *); + struct device *dev; +}; + +struct btrfs_free_space_entry { + __le64 offset; + __le64 bytes; + __u8 type; +} __attribute__((packed)); + +struct btrfs_free_space_header { + struct btrfs_disk_key location; + __le64 generation; + __le64 num_entries; + __le64 num_bitmaps; +} __attribute__((packed)); + +enum btrfs_disk_cache_state { + BTRFS_DC_WRITTEN = 0, + BTRFS_DC_ERROR = 1, + BTRFS_DC_CLEAR = 2, + BTRFS_DC_SETUP = 3, +}; + +enum btrfs_block_group_flags { + BLOCK_GROUP_FLAG_IREF = 0, + BLOCK_GROUP_FLAG_REMOVED = 1, + BLOCK_GROUP_FLAG_TO_COPY = 2, + BLOCK_GROUP_FLAG_RELOCATING_REPAIR = 3, + BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED = 4, + BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE = 5, + BLOCK_GROUP_FLAG_ZONED_DATA_RELOC = 6, + BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE = 7, + BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE = 8, + BLOCK_GROUP_FLAG_NEW = 9, +}; + +struct btrfs_trim_range { + u64 start; + u64 bytes; + struct list_head list; +}; + +typedef int __kernel_key_t; + +typedef int __kernel_ipc_pid_t; + +typedef unsigned int __kernel_uid_t; + +typedef unsigned int __kernel_gid_t; + +typedef __kernel_key_t key_t; + +enum { + HUGETLB_SHMFS_INODE = 1, + HUGETLB_ANONHUGE_INODE = 2, +}; + +struct ipc_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + short unsigned int seq; +}; + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned char __pad1[0]; + short unsigned int seq; + short unsigned int __pad2; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; +}; + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + long unsigned int seq; + void *security; + struct rhash_head khtnode; + struct callback_head rcu; + refcount_t refcount; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + +struct shmid64_ds { + struct ipc64_perm shm_perm; + __kernel_size_t shm_segsz; + long unsigned int shm_atime; + long unsigned int shm_dtime; + long unsigned int shm_ctime; + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + long unsigned int shm_nattch; + short unsigned int shm_atime_high; + short unsigned int shm_dtime_high; + short unsigned int shm_ctime_high; + short unsigned int __unused1; +}; + +struct shminfo64 { + long unsigned int shmmax; + long unsigned int shmmin; + long unsigned int shmmni; + long unsigned int shmseg; + long unsigned int shmall; + long unsigned int __unused1; + long unsigned int __unused2; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; + +struct shared_policy {}; + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; + long unsigned int flags; + long unsigned int alloced; + long unsigned int swapped; + union { + struct offset_ctx dir_offsets; + struct { + struct list_head shrinklist; + struct list_head swaplist; + }; + }; + long: 32; + struct timespec64 i_crtime; + struct shared_policy policy; + struct simple_xattrs xattrs; + long unsigned int fallocend; + unsigned int fsflags; + atomic_t stop_eviction; + struct inode vfs_inode; +}; + +struct ipc_params { + key_t key; + int flg; + union { + size_t size; + int nsems; + } u; +}; + +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); +}; + +struct shmid_kernel { + struct kern_ipc_perm shm_perm; + struct file *shm_file; + long unsigned int shm_nattch; + long unsigned int shm_segsz; + long: 32; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct ucounts *mlock_ucounts; + struct task_struct *shm_creator; + struct list_head shm_clist; + struct ipc_namespace *ns; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +struct hmac_ctx { + struct crypto_shash *hash; + u8 pads[0]; +}; + +struct xor_block_template { + struct xor_block_template *next; + const char *name; + int speed; + void (*do_2)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict); + void (*do_3)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_4)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_5)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; + struct list_head queued[2]; + unsigned int nr_queued[2]; + struct rb_root_cached pending_tree; + unsigned int nr_pending; + long unsigned int first_pending_disptime; + struct timer_list pending_timer; +}; + +struct throtl_data { + struct throtl_service_queue service_queue; + struct request_queue *queue; + unsigned int nr_queued[2]; + unsigned int throtl_slice; + struct work_struct dispatch_work; + bool track_bio_latency; +}; + +struct throtl_grp; + +struct throtl_qnode { + struct list_head node; + struct bio_list bios; + struct throtl_grp *tg; +}; + +struct throtl_grp { + struct blkg_policy_data pd; + struct rb_node rb_node; + struct throtl_data *td; + struct throtl_service_queue service_queue; + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + long unsigned int disptime; + unsigned int flags; + bool has_rules_bps[2]; + bool has_rules_iops[2]; + uint64_t bps[2]; + unsigned int iops[2]; + uint64_t bytes_disp[2]; + unsigned int io_disp[2]; + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + long long int carryover_bytes[2]; + int carryover_ios[2]; + long unsigned int last_check_time; + long unsigned int slice_start[2]; + long unsigned int slice_end[2]; + long: 32; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1, + THROTL_TG_WAS_EMPTY = 2, + THROTL_TG_CANCELING = 4, +}; + +enum io_uring_msg_ring_flags { + IORING_MSG_DATA = 0, + IORING_MSG_SEND_FD = 1, +}; + +enum { + IOU_F_TWQ_LAZY_WAKE = 1, +}; + +struct io_msg { + struct file *file; + struct file *src_file; + struct callback_head tw; + u64 user_data; + u32 len; + u32 cmd; + u32 src_fd; + union { + u32 dst_fd; + u32 cqe_flags; + }; + u32 flags; + long: 32; +}; + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[64]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2s_iv { + BLAKE2S_IV0 = 1779033703, + BLAKE2S_IV1 = 3144134277, + BLAKE2S_IV2 = 1013904242, + BLAKE2S_IV3 = 2773480762, + BLAKE2S_IV4 = 1359893119, + BLAKE2S_IV5 = 2600822924, + BLAKE2S_IV6 = 528734635, + BLAKE2S_IV7 = 1541459225, +}; + +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +struct node_groups { + unsigned int id; + union { + unsigned int ngroups; + unsigned int ncpus; + }; +}; + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char *); + ssize_t (*store)(struct device_driver *, const char *, size_t); +}; + +struct pci_dynid { + struct list_head node; + struct pci_device_id id; +}; + +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +struct n_tty_data { + size_t read_head; + size_t commit_head; + size_t canon_head; + size_t echo_head; + size_t echo_commit; + size_t echo_mark; + long unsigned int char_map[8]; + long unsigned int overrun_time; + unsigned int num_overrun; + bool no_room; + unsigned char lnext: 1; + unsigned char erasing: 1; + unsigned char raw: 1; + unsigned char real_raw: 1; + unsigned char icanon: 1; + unsigned char push: 1; + u8 read_buf[4096]; + long unsigned int read_flags[128]; + u8 echo_buf[4096]; + size_t read_tail; + size_t line_start; + size_t lookahead_count; + unsigned int column; + unsigned int canon_column; + size_t echo_tail; + struct mutex atomic_read_lock; + struct mutex output_lock; +}; + +enum { + ERASE = 0, + WERASE = 1, + KILL = 2, +}; + +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[8]; +}; + +struct swnode { + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + int id; + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + unsigned int allocated: 1; + unsigned int managed: 1; +}; + +struct request_sense; + +struct cdrom_generic_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + union { + void *reserved[1]; + void *unused; + }; +}; + +struct request_sense { + __u8 valid: 1; + __u8 error_code: 7; + __u8 segment_number; + __u8 reserved1: 2; + __u8 ili: 1; + __u8 reserved2: 1; + __u8 sense_key: 4; + __u8 information[4]; + __u8 add_sense_len; + __u8 command_info[4]; + __u8 asc; + __u8 ascq; + __u8 fruc; + __u8 sks[3]; + __u8 asb[46]; +}; + +enum scsi_msg_byte { + COMMAND_COMPLETE = 0, + EXTENDED_MESSAGE = 1, + SAVE_POINTERS = 2, + RESTORE_POINTERS = 3, + DISCONNECT = 4, + INITIATOR_ERROR = 5, + ABORT_TASK_SET = 6, + MESSAGE_REJECT = 7, + NOP = 8, + MSG_PARITY_ERROR = 9, + LINKED_CMD_COMPLETE = 10, + LINKED_FLG_CMD_COMPLETE = 11, + TARGET_RESET = 12, + ABORT_TASK = 13, + CLEAR_TASK_SET = 14, + INITIATE_RECOVERY = 15, + RELEASE_RECOVERY = 16, + TERMINATE_IO_PROC = 17, + CLEAR_ACA = 22, + LOGICAL_UNIT_RESET = 23, + SIMPLE_QUEUE_TAG = 32, + HEAD_OF_QUEUE_TAG = 33, + ORDERED_QUEUE_TAG = 34, + IGNORE_WIDE_RESIDUE = 35, + ACA = 36, + QAS_REQUEST = 85, + BUS_DEVICE_RESET = 12, + ABORT = 6, +}; + +struct rq_map_data { + struct page **pages; + long unsigned int offset; + short unsigned int page_order; + short unsigned int nr_entries; + bool null_mapped; + bool from_user; +}; + +struct scsi_failure { + int result; + u8 sense; + u8 asc; + u8 ascq; + s8 allowed; + s8 retries; +}; + +struct scsi_failures { + int total_allowed; + int total_retries; + struct scsi_failure *failure_definitions; +}; + +struct scsi_exec_args { + unsigned char *sense; + unsigned int sense_len; + struct scsi_sense_hdr *sshdr; + blk_mq_req_flags_t req_flags; + int scmd_flags; + int *resid; + struct scsi_failures *failures; +}; + +struct scsi_ioctl_command { + unsigned int inlen; + unsigned int outlen; + unsigned char data[0]; +}; + +struct scsi_idlun { + __u32 dev_id; + __u32 host_unique_id; +}; + +struct sg_io_hdr { + int interface_id; + int dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + unsigned int dxfer_len; + void *dxferp; + unsigned char *cmdp; + void *sbp; + unsigned int timeout; + unsigned int flags; + int pack_id; + void *usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + int resid; + unsigned int duration; + unsigned int info; +}; + +struct usb_device_id { + __u16 match_flags; + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice_lo; + __u16 bcdDevice_hi; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 bInterfaceNumber; + kernel_ulong_t driver_info; +}; + +struct quirk_entry { + u16 vid; + u16 pid; + u32 flags; +}; + +typedef struct { + spinlock_t *lock; + long unsigned int flags; +} class_spinlock_irqsave_t; + +typedef class_mutex_t class_mutex_intr_t; + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +struct serio_driver; + +struct serio { + void *port_data; + char name[32]; + char phys[32]; + char firmware_id[128]; + bool manual_bind; + struct serio_device_id id; + spinlock_t lock; + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + struct serio *parent; + struct list_head child_node; + struct list_head children; + unsigned int depth; + struct serio_driver *drv; + struct mutex drv_mutex; + struct device dev; + struct list_head node; + struct mutex *ps2_cmd_mutex; + long: 32; +}; + +struct serio_driver { + const char *description; + const struct serio_device_id *id_table; + bool manual_bind; + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *); + int (*reconnect)(struct serio *); + int (*fast_reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + struct device_driver driver; +}; + +typedef struct serio *class_serio_pause_rx_t; + +enum serio_event_type { + SERIO_RESCAN_PORT = 0, + SERIO_RECONNECT_PORT = 1, + SERIO_RECONNECT_SUBTREE = 2, + SERIO_REGISTER_PORT = 3, + SERIO_ATTACH_DRIVER = 4, +}; + +struct serio_event { + enum serio_event_type type; + void *object; + struct module *owner; + struct list_head node; +}; + +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_kparams { + int api_version; + int mode; + struct pps_ktime assert_off_tu; + struct pps_ktime clear_off_tu; +}; + +struct pps_device; + +struct pps_source_info { + char name[32]; + char path[32]; + int mode; + void (*echo)(struct pps_device *, int, void *); + struct module *owner; + struct device *dev; +}; + +struct pps_device { + struct pps_source_info info; + struct pps_kparams params; + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + unsigned int last_ev; + wait_queue_head_t queue; + unsigned int id; + const void *lookup_cookie; + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; + spinlock_t lock; + long: 32; +}; + +enum { + PERCPU_REF_INIT_ATOMIC = 1, + PERCPU_REF_INIT_DEAD = 2, + PERCPU_REF_ALLOW_REINIT = 4, +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1, + DISK_EVENT_EJECT_REQUEST = 2, +}; + +struct mdp_device_descriptor_s { + __u32 number; + __u32 major; + __u32 minor; + __u32 raid_disk; + __u32 state; + __u32 reserved[27]; +}; + +typedef struct mdp_device_descriptor_s mdp_disk_t; + +struct mdp_superblock_s { + __u32 md_magic; + __u32 major_version; + __u32 minor_version; + __u32 patch_version; + __u32 gvalid_words; + __u32 set_uuid0; + __u32 ctime; + __u32 level; + __u32 size; + __u32 nr_disks; + __u32 raid_disks; + __u32 md_minor; + __u32 not_persistent; + __u32 set_uuid1; + __u32 set_uuid2; + __u32 set_uuid3; + __u32 gstate_creserved[16]; + __u32 utime; + __u32 state; + __u32 active_disks; + __u32 working_disks; + __u32 failed_disks; + __u32 spare_disks; + __u32 sb_csum; + __u32 events_hi; + __u32 events_lo; + __u32 cp_events_hi; + __u32 cp_events_lo; + __u32 recovery_cp; + __u64 reshape_position; + __u32 new_level; + __u32 delta_disks; + __u32 new_layout; + __u32 new_chunk; + __u32 gstate_sreserved[14]; + __u32 layout; + __u32 chunk_size; + __u32 root_pv; + __u32 root_block; + __u32 pstate_reserved[60]; + mdp_disk_t disks[27]; + __u32 reserved[0]; + mdp_disk_t this_disk; +}; + +typedef struct mdp_superblock_s mdp_super_t; + +struct mdu_version_s { + int major; + int minor; + int patchlevel; +}; + +typedef struct mdu_version_s mdu_version_t; + +struct mdu_array_info_s { + int major_version; + int minor_version; + int patch_version; + unsigned int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + unsigned int utime; + int state; + int active_disks; + int working_disks; + int failed_disks; + int spare_disks; + int layout; + int chunk_size; +}; + +typedef struct mdu_array_info_s mdu_array_info_t; + +struct mdu_disk_info_s { + int number; + int major; + int minor; + int raid_disk; + int state; +}; + +typedef struct mdu_disk_info_s mdu_disk_info_t; + +struct mdu_bitmap_file_s { + char pathname[4096]; +}; + +typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; + +enum { + MD_RESYNC_NONE = 0, + MD_RESYNC_YIELDED = 1, + MD_RESYNC_DELAYED = 2, + MD_RESYNC_ACTIVE = 3, +}; + +enum md_ro_state { + MD_RDWR = 0, + MD_RDONLY = 1, + MD_AUTO_READ = 2, + MD_MAX_STATE = 3, +}; + +struct md_io_clone { + struct mddev *mddev; + struct bio *orig_bio; + long unsigned int start_time; + long: 32; + struct bio bio_clone; +}; + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(struct md_rdev *, struct md_rdev *, int); + int (*validate_super)(struct mddev *, struct md_rdev *, struct md_rdev *); + void (*sync_super)(struct mddev *, struct md_rdev *); + long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t); + int (*allow_new_offset)(struct md_rdev *, long long unsigned int); +}; + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct md_rdev *, char *); + ssize_t (*store)(struct md_rdev *, const char *, size_t); +}; + +enum array_state { + clear = 0, + inactive = 1, + suspended = 2, + readonly = 3, + read_auto = 4, + clean = 5, + active = 6, + write_pending = 7, + active_idle = 8, + broken = 9, + bad_word = 10, +}; + +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +struct xdp_frame_bulk { + int count; + void *xa; + void *q[16]; +}; + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + }; + struct rhash_head node; + struct callback_head rcu; +}; + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + netdevice_tracker dev_tracker; + int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); + bool (*id_match)(struct packet_type *, struct sock *); + struct net *af_packet_net; + void *af_packet_priv; + struct list_head list; +}; + +struct llc_addr { + unsigned char lsap; + unsigned char mac[6]; +}; + +struct llc_sap { + unsigned char state; + unsigned char p_bit; + unsigned char f_bit; + refcount_t refcnt; + int (*rcv_func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + struct llc_addr laddr; + struct list_head node; + spinlock_t sk_lock; + int sk_count; + struct hlist_nulls_head sk_laddr_hash[64]; + struct hlist_head sk_dev_hash[64]; + struct callback_head rcu; +}; + +struct llc_pdu_sn { + u8 dsap; + u8 ssap; + u8 ctrl_1; + u8 ctrl_2; +}; + +struct llc_pdu_un { + u8 dsap; + u8 ssap; + u8 ctrl_1; +}; + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +struct nf_defrag_hook { + struct module *owner; + int (*enable)(struct net *); + void (*disable)(struct net *); +}; + +struct bpf_nf_ctx { + const struct nf_hook_state *state; + struct sk_buff *skb; +}; + +struct bpf_nf_link { + struct bpf_link link; + struct nf_hook_ops hook_ops; + netns_tracker ns_tracker; + struct net *net; + u32 dead; + const struct nf_defrag_hook *defrag_hook; + long: 32; +}; + +enum { + IPT_TTL_EQ = 0, + IPT_TTL_NE = 1, + IPT_TTL_LT = 2, + IPT_TTL_GT = 3, +}; + +struct ipt_ttl_info { + __u8 mode; + __u8 ttl; +}; + +enum { + IP6T_HL_EQ = 0, + IP6T_HL_NE = 1, + IP6T_HL_LT = 2, + IP6T_HL_GT = 3, +}; + +struct ip6t_hl_info { + __u8 mode; + __u8 hop_limit; +}; + +enum tcp_metric_index { + TCP_METRIC_RTT = 0, + TCP_METRIC_RTTVAR = 1, + TCP_METRIC_SSTHRESH = 2, + TCP_METRIC_CWND = 3, + TCP_METRIC_REORDERING = 4, + TCP_METRIC_RTT_US = 5, + TCP_METRIC_RTTVAR_US = 6, + __TCP_METRIC_MAX = 7, +}; + +enum { + TCP_METRICS_ATTR_UNSPEC = 0, + TCP_METRICS_ATTR_ADDR_IPV4 = 1, + TCP_METRICS_ATTR_ADDR_IPV6 = 2, + TCP_METRICS_ATTR_AGE = 3, + TCP_METRICS_ATTR_TW_TSVAL = 4, + TCP_METRICS_ATTR_TW_TS_STAMP = 5, + TCP_METRICS_ATTR_VALS = 6, + TCP_METRICS_ATTR_FOPEN_MSS = 7, + TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, + TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, + TCP_METRICS_ATTR_FOPEN_COOKIE = 10, + TCP_METRICS_ATTR_SADDR_IPV4 = 11, + TCP_METRICS_ATTR_SADDR_IPV6 = 12, + TCP_METRICS_ATTR_PAD = 13, + __TCP_METRICS_ATTR_MAX = 14, +}; + +enum { + TCP_METRICS_CMD_UNSPEC = 0, + TCP_METRICS_CMD_GET = 1, + TCP_METRICS_CMD_DEL = 2, + __TCP_METRICS_CMD_MAX = 3, +}; + +enum genl_validate_flags { + GENL_DONT_VALIDATE_STRICT = 1, + GENL_DONT_VALIDATE_DUMP = 2, + GENL_DONT_VALIDATE_DUMP_STRICT = 4, +}; + +struct tcp_fastopen_metrics { + u16 mss; + u16 syn_loss: 10; + u16 try_exp: 2; + long unsigned int last_syn_loss; + struct tcp_fastopen_cookie cookie; +}; + +struct tcp_metrics_block { + struct tcp_metrics_block *tcpm_next; + struct net *tcpm_net; + struct inetpeer_addr tcpm_saddr; + struct inetpeer_addr tcpm_daddr; + long unsigned int tcpm_stamp; + u32 tcpm_lock; + u32 tcpm_vals[5]; + long: 32; + struct tcp_fastopen_metrics tcpm_fastopen; + struct callback_head callback_head; +}; + +struct tcpm_hash_bucket { + struct tcp_metrics_block *chain; +}; + +struct ac6_iter_state { + struct seq_net_private p; + struct net_device *dev; +}; + +struct snmp_mib { + const char *name; + int entry; +}; + +enum { + IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC = 0, + IFLA_BRIDGE_VLAN_TUNNEL_ID = 1, + IFLA_BRIDGE_VLAN_TUNNEL_VID = 2, + IFLA_BRIDGE_VLAN_TUNNEL_FLAGS = 3, + __IFLA_BRIDGE_VLAN_TUNNEL_MAX = 4, +}; + +struct net_bridge_vlan_group { + struct rhashtable vlan_hash; + struct rhashtable tunnel_hash; + struct list_head vlan_list; + u16 num_vlans; + u16 pvid; + u8 pvid_state; +}; + +struct vtunnel_info { + u32 tunid; + u16 vid; + u16 flags; +}; + +enum scx_public_consts { + SCX_OPS_NAME_LEN = 128ULL, + SCX_SLICE_DFL = 20000000ULL, + SCX_SLICE_INF = 18446744073709551615ULL, +}; + +enum scx_dsq_id_flags { + SCX_DSQ_FLAG_BUILTIN = 9223372036854775808ULL, + SCX_DSQ_FLAG_LOCAL_ON = 4611686018427387904ULL, + SCX_DSQ_INVALID = 9223372036854775808ULL, + SCX_DSQ_GLOBAL = 9223372036854775809ULL, + SCX_DSQ_LOCAL = 9223372036854775810ULL, + SCX_DSQ_LOCAL_ON = 13835058055282163712ULL, + SCX_DSQ_LOCAL_CPU_MASK = 4294967295ULL, +}; + +struct wq_flusher; + +struct worker; + +struct workqueue_attrs; + +struct pool_workqueue; + +struct wq_device; + +struct wq_node_nr_active; + +struct workqueue_struct { + struct list_head pwqs; + struct list_head list; + struct mutex mutex; + int work_color; + int flush_color; + atomic_t nr_pwqs_to_flush; + struct wq_flusher *first_flusher; + struct list_head flusher_queue; + struct list_head flusher_overflow; + struct list_head maydays; + struct worker *rescuer; + int nr_drainers; + int max_active; + int min_active; + int saved_max_active; + int saved_min_active; + struct workqueue_attrs *unbound_attrs; + struct pool_workqueue *dfl_pwq; + struct wq_device *wq_dev; + char name[32]; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + unsigned int flags; + struct pool_workqueue **cpu_pwq; + struct wq_node_nr_active *node_nr_active[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum wq_affn_scope { + WQ_AFFN_DFL = 0, + WQ_AFFN_CPU = 1, + WQ_AFFN_SMT = 2, + WQ_AFFN_CACHE = 3, + WQ_AFFN_NUMA = 4, + WQ_AFFN_SYSTEM = 5, + WQ_AFFN_NR_TYPES = 6, +}; + +struct workqueue_attrs { + int nice; + cpumask_var_t cpumask; + cpumask_var_t __pod_cpumask; + bool affn_strict; + enum wq_affn_scope affn_scope; + bool ordered; +}; + +struct execute_work { + struct work_struct work; +}; + +enum wq_consts { + WQ_MAX_ACTIVE = 2048, + WQ_UNBOUND_MAX_ACTIVE = 2048, + WQ_DFL_ACTIVE = 1024, + WQ_DFL_MIN_ACTIVE = 8, +}; + +struct worker_pool; + +struct worker { + union { + struct list_head entry; + struct hlist_node hentry; + }; + struct work_struct *current_work; + work_func_t current_func; + struct pool_workqueue *current_pwq; + long: 32; + u64 current_at; + unsigned int current_color; + int sleeping; + work_func_t last_func; + struct list_head scheduled; + struct task_struct *task; + struct worker_pool *pool; + struct list_head node; + long unsigned int last_active; + unsigned int flags; + int id; + char desc[32]; + struct workqueue_struct *rescue_wq; + long: 32; +}; + +struct pool_workqueue { + struct worker_pool *pool; + struct workqueue_struct *wq; + int work_color; + int flush_color; + int refcnt; + int nr_in_flight[16]; + bool plugged; + int nr_active; + struct list_head inactive_works; + struct list_head pending_node; + struct list_head pwqs_node; + struct list_head mayday_node; + long: 32; + u64 stats[8]; + struct kthread_work release_work; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct worker_pool { + raw_spinlock_t lock; + int cpu; + int node; + int id; + unsigned int flags; + long unsigned int watchdog_ts; + bool cpu_stall; + int nr_running; + struct list_head worklist; + int nr_workers; + int nr_idle; + struct list_head idle_list; + struct timer_list idle_timer; + struct work_struct idle_cull_work; + struct timer_list mayday_timer; + struct hlist_head busy_hash[64]; + struct worker *manager; + struct list_head workers; + struct ida worker_ida; + struct workqueue_attrs *attrs; + struct hlist_node hash_node; + int refcnt; + struct callback_head rcu; +}; + +enum worker_pool_flags { + POOL_BH = 1, + POOL_MANAGER_ACTIVE = 2, + POOL_DISASSOCIATED = 4, + POOL_BH_DRAINING = 8, +}; + +enum worker_flags { + WORKER_DIE = 2, + WORKER_IDLE = 4, + WORKER_PREP = 8, + WORKER_CPU_INTENSIVE = 64, + WORKER_UNBOUND = 128, + WORKER_REBOUND = 256, + WORKER_NOT_RUNNING = 456, +}; + +enum work_cancel_flags { + WORK_CANCEL_DELAYED = 1, + WORK_CANCEL_DISABLE = 2, +}; + +enum wq_internal_consts { + NR_STD_WORKER_POOLS = 2, + UNBOUND_POOL_HASH_ORDER = 6, + BUSY_WORKER_HASH_ORDER = 6, + MAX_IDLE_WORKERS_RATIO = 4, + IDLE_WORKER_TIMEOUT = 300000, + MAYDAY_INITIAL_TIMEOUT = 10, + MAYDAY_INTERVAL = 100, + CREATE_COOLDOWN = 1000, + RESCUER_NICE_LEVEL = -20, + HIGHPRI_NICE_LEVEL = -20, + WQ_NAME_LEN = 32, + WORKER_ID_LEN = 42, +}; + +enum pool_workqueue_stats { + PWQ_STAT_STARTED = 0, + PWQ_STAT_COMPLETED = 1, + PWQ_STAT_CPU_TIME = 2, + PWQ_STAT_CPU_INTENSIVE = 3, + PWQ_STAT_CM_WAKEUP = 4, + PWQ_STAT_REPATRIATED = 5, + PWQ_STAT_MAYDAY = 6, + PWQ_STAT_RESCUED = 7, + PWQ_NR_STATS = 8, +}; + +struct wq_flusher { + struct list_head list; + int flush_color; + struct completion done; +}; + +struct wq_node_nr_active { + int max; + atomic_t nr; + raw_spinlock_t lock; + struct list_head pending_pwqs; +}; + +struct wq_device { + struct workqueue_struct *wq; + long: 32; + struct device dev; +}; + +struct wq_pod_type { + int nr_pods; + cpumask_var_t *pod_cpus; + int *pod_node; + int *cpu_pod; +}; + +struct work_offq_data { + u32 pool_id; + u32 disable; + u32 flags; +}; + +struct trace_event_raw_workqueue_queue_work { + struct trace_entry ent; + void *work; + void *function; + u32 __data_loc_workqueue; + int req_cpu; + int cpu; + char __data[0]; +}; + +struct trace_event_raw_workqueue_activate_work { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_data_offsets_workqueue_queue_work { + u32 workqueue; + const void *workqueue_ptr_; +}; + +struct trace_event_data_offsets_workqueue_activate_work {}; + +struct trace_event_data_offsets_workqueue_execute_start {}; + +struct trace_event_data_offsets_workqueue_execute_end {}; + +typedef void (*btf_trace_workqueue_queue_work)(void *, int, struct pool_workqueue *, struct work_struct *); + +typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); + +struct wq_drain_dead_softirq_work { + struct work_struct work; + struct worker_pool *pool; + struct completion done; +}; + +struct wq_barrier { + struct work_struct work; + struct completion done; + struct task_struct *task; +}; + +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; + struct workqueue_attrs *attrs; + struct list_head list; + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[0]; +}; + +struct pr_cont_work_struct { + bool comma; + work_func_t func; + long int ctr; +}; + +struct work_for_cpu { + struct work_struct work; + long int (*fn)(void *); + void *arg; + long int ret; +}; + +enum pidcg_event { + PIDCG_MAX = 0, + PIDCG_FORKFAIL = 1, + NR_PIDCG_EVENTS = 2, +}; + +struct pids_cgroup { + struct cgroup_subsys_state css; + atomic64_t counter; + atomic64_t limit; + int64_t watermark; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + atomic64_t events[2]; + atomic64_t events_local[2]; +}; + +struct j_format { + unsigned int opcode: 6; + unsigned int target: 26; +}; + +struct i_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + int simmediate: 16; +}; + +struct u_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + unsigned int uimmediate: 16; +}; + +struct c_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int c_op: 3; + unsigned int cache: 2; + unsigned int simmediate: 16; +}; + +struct r_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + unsigned int rd: 5; + unsigned int re: 5; + unsigned int func: 6; +}; + +struct c0r_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + unsigned int rd: 5; + unsigned int z: 8; + unsigned int sel: 3; +}; + +struct mfmc0_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + unsigned int rd: 5; + unsigned int re: 5; + unsigned int sc: 1; + char: 2; + unsigned int sel: 3; +}; + +struct co_format { + unsigned int opcode: 6; + unsigned int co: 1; + unsigned int code: 19; + unsigned int func: 6; +}; + +struct p_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + unsigned int rd: 5; + unsigned int re: 5; + unsigned int func: 6; +}; + +struct f_format { + unsigned int opcode: 6; + char: 1; + unsigned int fmt: 4; + unsigned int rt: 5; + unsigned int rd: 5; + unsigned int re: 5; + unsigned int func: 6; +}; + +struct ma_format { + unsigned int opcode: 6; + unsigned int fr: 5; + unsigned int ft: 5; + unsigned int fs: 5; + unsigned int fd: 5; + unsigned int func: 4; + unsigned int fmt: 2; +}; + +struct b_format { + unsigned int opcode: 6; + unsigned int code: 20; + unsigned int func: 6; +}; + +struct ps_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int ft: 5; + unsigned int fs: 5; + unsigned int fd: 5; + unsigned int func: 6; +}; + +struct v_format { + unsigned int opcode: 6; + unsigned int sel: 4; + unsigned int fmt: 1; + unsigned int vt: 5; + unsigned int vs: 5; + unsigned int vd: 5; + unsigned int func: 6; +}; + +struct msa_mi10_format { + unsigned int opcode: 6; + int s10: 10; + unsigned int rs: 5; + unsigned int wd: 5; + unsigned int func: 4; + unsigned int df: 2; +}; + +struct dsp_format { + unsigned int opcode: 6; + unsigned int base: 5; + unsigned int index: 5; + unsigned int rd: 5; + unsigned int op: 5; + unsigned int func: 6; +}; + +struct mxu_lx_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + unsigned int rd: 5; + unsigned int strd: 2; + unsigned int op: 3; + unsigned int func: 6; +}; + +struct spec3_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int rt: 5; + int simmediate: 9; + unsigned int func: 7; +}; + +struct fb_format { + unsigned int opcode: 6; + unsigned int bc: 5; + unsigned int cc: 3; + unsigned int flag: 2; + int simmediate: 16; +}; + +struct fp0_format { + unsigned int opcode: 6; + unsigned int fmt: 5; + unsigned int ft: 5; + unsigned int fs: 5; + unsigned int fd: 5; + unsigned int func: 6; +}; + +struct mm_fp0_format { + unsigned int opcode: 6; + unsigned int ft: 5; + unsigned int fs: 5; + unsigned int fd: 5; + unsigned int fmt: 3; + unsigned int op: 2; + unsigned int func: 6; +}; + +struct fp1_format { + unsigned int opcode: 6; + unsigned int op: 5; + unsigned int rt: 5; + unsigned int fs: 5; + unsigned int fd: 5; + unsigned int func: 6; +}; + +struct mm_fp1_format { + unsigned int opcode: 6; + unsigned int rt: 5; + unsigned int fs: 5; + unsigned int fmt: 2; + unsigned int op: 8; + unsigned int func: 6; +}; + +struct mm_fp2_format { + unsigned int opcode: 6; + unsigned int fd: 5; + unsigned int fs: 5; + unsigned int cc: 3; + unsigned int zero: 2; + unsigned int fmt: 2; + unsigned int op: 3; + unsigned int func: 6; +}; + +struct mm_fp3_format { + unsigned int opcode: 6; + unsigned int rt: 5; + unsigned int fs: 5; + unsigned int fmt: 3; + unsigned int op: 7; + unsigned int func: 6; +}; + +struct mm_fp4_format { + unsigned int opcode: 6; + unsigned int rt: 5; + unsigned int fs: 5; + unsigned int cc: 3; + unsigned int fmt: 3; + unsigned int cond: 4; + unsigned int func: 6; +}; + +struct mm_fp5_format { + unsigned int opcode: 6; + unsigned int index: 5; + unsigned int base: 5; + unsigned int fd: 5; + unsigned int op: 5; + unsigned int func: 6; +}; + +struct fp6_format { + unsigned int opcode: 6; + unsigned int fr: 5; + unsigned int ft: 5; + unsigned int fs: 5; + unsigned int fd: 5; + unsigned int func: 6; +}; + +struct mm_fp6_format { + unsigned int opcode: 6; + unsigned int ft: 5; + unsigned int fs: 5; + unsigned int fd: 5; + unsigned int fr: 5; + unsigned int func: 6; +}; + +struct mm_i_format { + unsigned int opcode: 6; + unsigned int rt: 5; + unsigned int rs: 5; + int simmediate: 16; +}; + +struct mm_m_format { + unsigned int opcode: 6; + unsigned int rd: 5; + unsigned int base: 5; + unsigned int func: 4; + int simmediate: 12; +}; + +struct mm_x_format { + unsigned int opcode: 6; + unsigned int index: 5; + unsigned int base: 5; + unsigned int rd: 5; + unsigned int func: 11; +}; + +struct mm_a_format { + unsigned int opcode: 6; + unsigned int rs: 3; + int simmediate: 23; +}; + +struct mm_b0_format { + unsigned int opcode: 6; + int simmediate: 10; +}; + +struct mm_b1_format { + unsigned int opcode: 6; + unsigned int rs: 3; + int simmediate: 7; +}; + +struct mm16_m_format { + unsigned int opcode: 6; + unsigned int func: 4; + unsigned int rlist: 2; + unsigned int imm: 4; +}; + +struct mm16_rb_format { + unsigned int opcode: 6; + unsigned int rt: 3; + unsigned int base: 3; + int simmediate: 4; +}; + +struct mm16_r3_format { + unsigned int opcode: 6; + unsigned int rt: 3; + int simmediate: 7; +}; + +struct mm16_r5_format { + unsigned int opcode: 6; + unsigned int rt: 5; + unsigned int imm: 5; +}; + +struct loongson3_lswc2_format { + unsigned int opcode: 6; + unsigned int base: 5; + unsigned int rt: 5; + unsigned int fr: 1; + unsigned int offset: 9; + unsigned int ls: 1; + unsigned int rq: 5; +}; + +struct loongson3_lsdc2_format { + unsigned int opcode: 6; + unsigned int base: 5; + unsigned int rt: 5; + unsigned int index: 5; + unsigned int offset: 8; + unsigned int opcode1: 3; +}; + +struct loongson3_lscsr_format { + unsigned int opcode: 6; + unsigned int rs: 5; + unsigned int fr: 5; + unsigned int rd: 5; + unsigned int fd: 5; + unsigned int func: 6; +}; + +union mips_instruction { + unsigned int word; + short unsigned int halfword[2]; + unsigned char byte[4]; + struct j_format j_format; + struct i_format i_format; + struct u_format u_format; + struct c_format c_format; + struct r_format r_format; + struct c0r_format c0r_format; + struct mfmc0_format mfmc0_format; + struct co_format co_format; + struct p_format p_format; + struct f_format f_format; + struct ma_format ma_format; + struct msa_mi10_format msa_mi10_format; + struct b_format b_format; + struct ps_format ps_format; + struct v_format v_format; + struct dsp_format dsp_format; + struct spec3_format spec3_format; + struct fb_format fb_format; + struct fp0_format fp0_format; + struct mm_fp0_format mm_fp0_format; + struct fp1_format fp1_format; + struct mm_fp1_format mm_fp1_format; + struct mm_fp2_format mm_fp2_format; + struct mm_fp3_format mm_fp3_format; + struct mm_fp4_format mm_fp4_format; + struct mm_fp5_format mm_fp5_format; + struct fp6_format fp6_format; + struct mm_fp6_format mm_fp6_format; + struct mm_i_format mm_i_format; + struct mm_m_format mm_m_format; + struct mm_x_format mm_x_format; + struct mm_a_format mm_a_format; + struct mm_b0_format mm_b0_format; + struct mm_b1_format mm_b1_format; + struct mm16_m_format mm16_m_format; + struct mm16_rb_format mm16_rb_format; + struct mm16_r3_format mm16_r3_format; + struct mm16_r5_format mm16_r5_format; + struct loongson3_lswc2_format loongson3_lswc2_format; + struct loongson3_lsdc2_format loongson3_lsdc2_format; + struct loongson3_lscsr_format loongson3_lscsr_format; + struct mxu_lx_format mxu_lx_format; +}; + +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[0]; +}; + +typedef int (*objpool_init_obj_cb)(void *, void *); + +struct objpool_head; + +typedef int (*objpool_fini_cb)(struct objpool_head *, void *); + +struct objpool_head { + int obj_size; + int nr_objs; + int nr_possible_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + long unsigned int flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +typedef union mips_instruction kprobe_opcode_t; + +struct arch_specific_insn { + kprobe_opcode_t *insn; +}; + +struct kprobe; + +struct prev_kprobe { + struct kprobe *kp; + long unsigned int status; + long unsigned int old_SR; + long unsigned int saved_SR; + long unsigned int saved_epc; +}; + +typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); + +typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); + +struct kprobe { + struct hlist_node hlist; + struct list_head list; + long unsigned int nmissed; + kprobe_opcode_t *addr; + const char *symbol_name; + unsigned int offset; + kprobe_pre_handler_t pre_handler; + kprobe_post_handler_t post_handler; + kprobe_opcode_t opcode; + struct arch_specific_insn ainsn; + u32 flags; +}; + +struct kprobe_ctlblk { + long unsigned int kprobe_status; + long unsigned int kprobe_old_SR; + long unsigned int kprobe_saved_SR; + long unsigned int kprobe_saved_epc; + long unsigned int flags; + long unsigned int target_epc; + struct prev_kprobe prev_kprobe; +}; + +struct kretprobe_instance; + +typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); + +struct kretprobe_holder; + +struct kretprobe_instance { + struct callback_head rcu; + struct llist_node llist; + struct kretprobe_holder *rph; + kprobe_opcode_t *ret_addr; + void *fp; + char data[0]; +}; + +struct kretprobe; + +struct kretprobe_holder { + struct kretprobe *rp; + struct objpool_head pool; +}; + +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct kretprobe_holder *rph; +}; + +struct kprobe_blacklist_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; +}; + +struct kprobe_insn_cache { + struct mutex mutex; + void * (*alloc)(); + void (*free)(void *); + const char *sym; + struct list_head pages; + size_t insn_size; + int nr_garbage; +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, + PERF_RECORD_KSYMBOL_TYPE_MAX = 3, +}; + +enum execmem_type { + EXECMEM_DEFAULT = 0, + EXECMEM_MODULE_TEXT = 0, + EXECMEM_KPROBES = 1, + EXECMEM_FTRACE = 2, + EXECMEM_BPF = 3, + EXECMEM_MODULE_DATA = 4, + EXECMEM_TYPE_MAX = 5, +}; + +struct kprobe_insn_page { + struct list_head list; + kprobe_opcode_t *insns; + struct kprobe_insn_cache *cache; + int nused; + int ngarbage; + char slot_used[0]; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, +}; + +struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + +struct btf_var_secinfo { + __u32 type; + __u32 offset; + __u32 size; +}; + +enum bpf_cond_pseudo_jmp { + BPF_MAY_GOTO = 0, +}; + +enum bpf_addr_space_cast { + BPF_ADDR_SPACE_CAST = 1, +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __be16 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 data_meta; + union { + struct bpf_flow_keys *flow_keys; + }; + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { + struct bpf_sock *sk; + }; + __u32 gso_size; + __u8 tstamp_type; + __u64 hwtstamp; +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS = 1, +}; + +struct bpf_dynptr { + __u64 __opaque[2]; +}; + +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, + BPF_CORE_FIELD_BYTE_SIZE = 1, + BPF_CORE_FIELD_EXISTS = 2, + BPF_CORE_FIELD_SIGNED = 3, + BPF_CORE_FIELD_LSHIFT_U64 = 4, + BPF_CORE_FIELD_RSHIFT_U64 = 5, + BPF_CORE_TYPE_ID_LOCAL = 6, + BPF_CORE_TYPE_ID_TARGET = 7, + BPF_CORE_TYPE_EXISTS = 8, + BPF_CORE_TYPE_SIZE = 9, + BPF_CORE_ENUMVAL_EXISTS = 10, + BPF_CORE_ENUMVAL_VALUE = 11, + BPF_CORE_TYPE_MATCHES = 12, +}; + +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + +struct btf_id_set { + u32 cnt; + u32 ids[0]; +}; + +enum { + BTF_TRACING_TYPE_TASK = 0, + BTF_TRACING_TYPE_FILE = 1, + BTF_TRACING_TYPE_VMA = 2, + MAX_BTF_TRACING_TYPE = 3, +}; + +struct bpf_attach_target_info { + struct btf_func_model fmodel; + long int tgt_addr; + struct module *tgt_mod; + const char *tgt_name; + const struct btf_type *tgt_type; +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL = 0, +}; + +struct bpf_kfunc_desc { + struct btf_func_model func_model; + u32 func_id; + s32 imm; + u16 offset; + long unsigned int addr; +}; + +struct bpf_kfunc_desc_tab { + struct bpf_kfunc_desc descs[256]; + u32 nr_descs; +}; + +struct bpf_kfunc_btf { + struct btf *btf; + struct module *module; + u16 offset; +}; + +struct bpf_kfunc_btf_tab { + struct bpf_kfunc_btf descs[256]; + u32 nr_descs; +}; + +struct bpf_array_aux { + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + long: 32; + union { + struct { + struct {} __empty_value; + char value[0]; + }; + struct { + struct {} __empty_ptrs; + void *ptrs[0]; + }; + struct { + struct {} __empty_pptrs; + void *pptrs[0]; + }; + }; +}; + +enum { + BPF_MAX_LOOPS = 8388608, +}; + +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + +struct bpf_core_ctx { + struct bpf_verifier_log *log; + const struct btf *btf; +}; + +struct bpf_bprintf_data { + u32 *bin_args; + char *buf; + bool get_bin_args; + bool get_buf; +}; + +struct vm_struct { + struct vm_struct *next; + void *addr; + long unsigned int size; + long unsigned int flags; + struct page **pages; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +enum bpf_stack_slot_type { + STACK_INVALID = 0, + STACK_SPILL = 1, + STACK_MISC = 2, + STACK_ZERO = 3, + STACK_DYNPTR = 4, + STACK_ITER = 5, +}; + +struct bpf_verifier_stack_elem { + struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; + struct bpf_verifier_stack_elem *next; + u32 log_pos; +}; + +typedef void (*bpf_insn_print_t)(void *, const char *, ...); + +typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); + +typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; + bool raw_mode; + bool pkt_access; + u8 release_regno; + int regno; + int access_size; + int mem_size; + long: 32; + u64 msize_max_value; + int ref_obj_id; + int dynptr_id; + int map_uid; + int func_id; + struct btf *btf; + u32 btf_id; + struct btf *ret_btf; + u32 ret_btf_id; + u32 subprogno; + struct btf_field *kptr_field; +}; + +struct bpf_kfunc_call_arg_meta { + struct btf *btf; + u32 func_id; + u32 kfunc_flags; + const struct btf_type *func_proto; + const char *func_name; + u32 ref_obj_id; + u8 release_regno; + bool r0_rdonly; + u32 ret_btf_id; + u64 r0_size; + u32 subprogno; + long: 32; + struct { + u64 value; + bool found; + long: 32; + } arg_constant; + struct btf *arg_btf; + u32 arg_btf_id; + bool arg_owning_ref; + struct { + struct btf_field *field; + } arg_list_head; + struct { + struct btf_field *field; + } arg_rbtree_root; + struct { + enum bpf_dynptr_type type; + u32 id; + u32 ref_obj_id; + } initialized_dynptr; + struct { + u8 spi; + u8 frameno; + } iter; + struct { + struct bpf_map *ptr; + int uid; + } map; + long: 32; + u64 mem_size; +}; + +enum reg_arg_type { + SRC_OP = 0, + DST_OP = 1, + DST_OP_NO_MARK = 2, +}; + +struct linked_reg { + u8 frameno; + union { + u8 spi; + u8 regno; + }; + bool is_reg; +}; + +struct linked_regs { + int cnt; + struct linked_reg entries[6]; +}; + +enum bpf_access_src { + ACCESS_DIRECT = 1, + ACCESS_HELPER = 2, +}; + +struct task_struct__safe_rcu { + const cpumask_t *cpus_ptr; + struct css_set *cgroups; + struct task_struct *real_parent; + struct task_struct *group_leader; +}; + +struct cgroup__safe_rcu { + struct kernfs_node *kn; +}; + +struct css_set__safe_rcu { + struct cgroup *dfl_cgrp; +}; + +struct mm_struct__safe_rcu_or_null { + struct file *exe_file; +}; + +struct sk_buff__safe_rcu_or_null { + struct sock *sk; +}; + +struct request_sock__safe_rcu_or_null { + struct sock *sk; +}; + +struct bpf_iter_meta__safe_trusted { + struct seq_file *seq; +}; + +struct bpf_iter__task__safe_trusted { + struct bpf_iter_meta *meta; + struct task_struct *task; +}; + +struct linux_binprm__safe_trusted { + struct file *file; +}; + +struct file__safe_trusted { + struct inode *f_inode; +}; + +struct dentry__safe_trusted { + struct inode *d_inode; +}; + +struct socket__safe_trusted_or_null { + struct sock *sk; +}; + +struct bpf_reg_types { + const enum bpf_reg_type types[10]; + u32 *btf_id; +}; + +enum { + AT_PKT_END = -1, + BEYOND_PKT_END = -2, +}; + +typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *, int); + +enum { + KF_ARG_DYNPTR_ID = 0, + KF_ARG_LIST_HEAD_ID = 1, + KF_ARG_LIST_NODE_ID = 2, + KF_ARG_RB_ROOT_ID = 3, + KF_ARG_RB_NODE_ID = 4, + KF_ARG_WORKQUEUE_ID = 5, +}; + +enum kfunc_ptr_arg_type { + KF_ARG_PTR_TO_CTX = 0, + KF_ARG_PTR_TO_ALLOC_BTF_ID = 1, + KF_ARG_PTR_TO_REFCOUNTED_KPTR = 2, + KF_ARG_PTR_TO_DYNPTR = 3, + KF_ARG_PTR_TO_ITER = 4, + KF_ARG_PTR_TO_LIST_HEAD = 5, + KF_ARG_PTR_TO_LIST_NODE = 6, + KF_ARG_PTR_TO_BTF_ID = 7, + KF_ARG_PTR_TO_MEM = 8, + KF_ARG_PTR_TO_MEM_SIZE = 9, + KF_ARG_PTR_TO_CALLBACK = 10, + KF_ARG_PTR_TO_RB_ROOT = 11, + KF_ARG_PTR_TO_RB_NODE = 12, + KF_ARG_PTR_TO_NULL = 13, + KF_ARG_PTR_TO_CONST_STR = 14, + KF_ARG_PTR_TO_MAP = 15, + KF_ARG_PTR_TO_WORKQUEUE = 16, +}; + +enum special_kfunc_type { + KF_bpf_obj_new_impl = 0, + KF_bpf_obj_drop_impl = 1, + KF_bpf_refcount_acquire_impl = 2, + KF_bpf_list_push_front_impl = 3, + KF_bpf_list_push_back_impl = 4, + KF_bpf_list_pop_front = 5, + KF_bpf_list_pop_back = 6, + KF_bpf_cast_to_kern_ctx = 7, + KF_bpf_rdonly_cast = 8, + KF_bpf_rcu_read_lock = 9, + KF_bpf_rcu_read_unlock = 10, + KF_bpf_rbtree_remove = 11, + KF_bpf_rbtree_add_impl = 12, + KF_bpf_rbtree_first = 13, + KF_bpf_dynptr_from_skb = 14, + KF_bpf_dynptr_from_xdp = 15, + KF_bpf_dynptr_slice = 16, + KF_bpf_dynptr_slice_rdwr = 17, + KF_bpf_dynptr_clone = 18, + KF_bpf_percpu_obj_new_impl = 19, + KF_bpf_percpu_obj_drop_impl = 20, + KF_bpf_throw = 21, + KF_bpf_wq_set_callback_impl = 22, + KF_bpf_preempt_disable = 23, + KF_bpf_preempt_enable = 24, + KF_bpf_iter_css_task_new = 25, + KF_bpf_session_cookie = 26, + KF_bpf_get_kmem_cache = 27, +}; + +enum { + REASON_BOUNDS = -1, + REASON_TYPE = -2, + REASON_PATHS = -3, + REASON_LIMIT = -4, + REASON_STACK = -5, +}; + +struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; + bool mask_to_left; + long: 32; +}; + +enum { + DISCOVERED = 16, + EXPLORED = 32, + FALLTHROUGH = 1, + BRANCH = 2, +}; + +enum { + DONE_EXPLORING = 0, + KEEP_EXPLORING = 1, +}; + +enum exact_level { + NOT_EXACT = 0, + EXACT = 1, + RANGE_WITHIN = 2, +}; + +struct bpf_iter; + +struct radix_tree_iter { + long unsigned int index; + long unsigned int next_index; + long unsigned int tags; + struct xa_node *node; +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 15, + RADIX_TREE_ITER_TAGGED = 16, + RADIX_TREE_ITER_CONTIG = 32, +}; + +enum wb_state { + WB_registered = 0, + WB_writeback_running = 1, + WB_has_dirty_io = 2, + WB_start_all = 3, +}; + +struct wb_stats { + long unsigned int nr_dirty; + long unsigned int nr_io; + long unsigned int nr_more_io; + long unsigned int nr_dirty_time; + long unsigned int nr_writeback; + long unsigned int nr_reclaimable; + long unsigned int nr_dirtied; + long unsigned int nr_written; + long unsigned int dirty_thresh; + long unsigned int wb_thresh; +}; + +enum pgt_entry { + NORMAL_PMD = 0, + HPAGE_PMD = 1, + NORMAL_PUD = 2, + HPAGE_PUD = 3, +}; + +enum vmpressure_levels { + VMPRESSURE_LOW = 0, + VMPRESSURE_MEDIUM = 1, + VMPRESSURE_CRITICAL = 2, + VMPRESSURE_NUM_LEVELS = 3, +}; + +enum vmpressure_modes { + VMPRESSURE_NO_PASSTHROUGH = 0, + VMPRESSURE_HIERARCHY = 1, + VMPRESSURE_LOCAL = 2, + VMPRESSURE_NUM_MODES = 3, +}; + +struct eventfd_ctx; + +struct vmpressure_event { + struct eventfd_ctx *efd; + enum vmpressure_levels level; + enum vmpressure_modes mode; + struct list_head node; +}; + +typedef long int intptr_t; + +struct genradix_root; + +struct __genradix { + struct genradix_root *root; +}; + +struct genradix_node { + union { + struct genradix_node *children[128]; + u8 data[512]; + }; +}; + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +enum resctrl_conf_type { + CDP_NONE = 0, + CDP_CODE = 1, + CDP_DATA = 2, +}; + +typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); + +enum proc_mem_force { + PROC_MEM_FORCE_ALWAYS = 0, + PROC_MEM_FORCE_PTRACE = 1, + PROC_MEM_FORCE_NEVER = 2, +}; + +struct pid_entry { + const char *name; + unsigned int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; + union proc_op op; +}; + +struct limit_names { + const char *name; + const char *unit; +}; + +struct map_files_info { + long unsigned int start; + long unsigned int end; + fmode_t mode; +}; + +struct tgid_iter { + unsigned int tgid; + struct task_struct *task; +}; + +struct iso_directory_record { + __u8 length[1]; + __u8 ext_attr_length[1]; + __u8 extent[8]; + __u8 size[8]; + __u8 date[7]; + __u8 flags[1]; + __u8 file_unit_size[1]; + __u8 interleave[1]; + __u8 volume_sequence_number[4]; + __u8 name_len[1]; + char name[0]; +}; + +enum isofs_file_format { + isofs_file_normal = 0, + isofs_file_sparse = 1, + isofs_file_compressed = 2, +}; + +struct iso_inode_info { + long unsigned int i_iget5_block; + long unsigned int i_iget5_offset; + unsigned int i_first_extent; + unsigned char i_file_format; + unsigned char i_format_parm[3]; + long unsigned int i_next_section_block; + long unsigned int i_next_section_offset; + off_t i_section_size; + long: 32; + struct inode vfs_inode; +}; + +struct isofs_sb_info { + long unsigned int s_ninodes; + long unsigned int s_nzones; + long unsigned int s_firstdatazone; + long unsigned int s_log_zone_size; + long unsigned int s_max_size; + int s_rock_offset; + s32 s_sbsector; + unsigned char s_joliet_level; + unsigned char s_mapping; + unsigned char s_check; + unsigned char s_session; + unsigned int s_high_sierra: 1; + unsigned int s_rock: 2; + unsigned int s_cruft: 1; + unsigned int s_nocompress: 1; + unsigned int s_hide: 1; + unsigned int s_showassoc: 1; + unsigned int s_overriderockperm: 1; + unsigned int s_uid_set: 1; + unsigned int s_gid_set: 1; + umode_t s_fmode; + umode_t s_dmode; + kgid_t s_gid; + kuid_t s_uid; + struct nls_table *s_nls_iocharset; +}; + +struct SU_SP_s { + __u8 magic[2]; + __u8 skip; +}; + +struct SU_CE_s { + __u8 extent[8]; + __u8 offset[8]; + __u8 size[8]; +}; + +struct SU_ER_s { + __u8 len_id; + __u8 len_des; + __u8 len_src; + __u8 ext_ver; + __u8 data[0]; +}; + +struct RR_RR_s { + __u8 flags[1]; +}; + +struct RR_PX_s { + __u8 mode[8]; + __u8 n_links[8]; + __u8 uid[8]; + __u8 gid[8]; +}; + +struct RR_PN_s { + __u8 dev_high[8]; + __u8 dev_low[8]; +}; + +struct SL_component { + __u8 flags; + __u8 len; + __u8 text[0]; +}; + +struct RR_SL_s { + __u8 flags; + struct SL_component link; +}; + +struct RR_NM_s { + __u8 flags; + char name[0]; +}; + +struct RR_CL_s { + __u8 location[8]; +}; + +struct RR_PL_s { + __u8 location[8]; +}; + +struct stamp { + __u8 time[7]; +}; + +struct RR_TF_s { + __u8 flags; + struct stamp times[0]; +}; + +struct RR_ZF_s { + __u8 algorithm[2]; + __u8 parms[2]; + __u8 real_size[8]; +}; + +struct rock_ridge { + __u8 signature[2]; + __u8 len; + __u8 version; + union { + struct SU_SP_s SP; + struct SU_CE_s CE; + struct SU_ER_s ER; + struct RR_RR_s RR; + struct RR_PX_s PX; + struct RR_PN_s PN; + struct RR_SL_s SL; + struct RR_NM_s NM; + struct RR_CL_s CL; + struct RR_PL_s PL; + struct RR_TF_s TF; + struct RR_ZF_s ZF; + } u; +}; + +struct rock_state { + void *buffer; + unsigned char *chr; + int len; + int cont_size; + int cont_extent; + int cont_offset; + int cont_loops; + struct inode *inode; +}; + +struct wait_bit_key { + long unsigned int *flags; + int bit_nr; + long unsigned int timeout; +}; + +typedef int wait_bit_action_f(struct wait_bit_key *, int); + +struct trace_print_flags_u64 { + long long unsigned int mask; + const char *name; + long: 32; +}; + +struct btrfs_ioctl_vol_args { + __s64 fd; + char name[4088]; +}; + +struct btrfs_root_ref { + __le64 dirid; + __le64 sequence; + __le16 name_len; +} __attribute__((packed)); + +enum { + IO_TREE_FS_PINNED_EXTENTS = 0, + IO_TREE_FS_EXCLUDED_EXTENTS = 1, + IO_TREE_BTREE_INODE_IO = 2, + IO_TREE_INODE_IO = 3, + IO_TREE_RELOC_BLOCKS = 4, + IO_TREE_TRANS_DIRTY_PAGES = 5, + IO_TREE_ROOT_DIRTY_LOG_PAGES = 6, + IO_TREE_INODE_FILE_EXTENT = 7, + IO_TREE_LOG_CSUM_RANGE = 8, + IO_TREE_SELFTEST = 9, + IO_TREE_DEVICE_ALLOC_STATE = 10, +}; + +struct btrfs_workqueue { + struct workqueue_struct *normal_wq; + struct btrfs_fs_info *fs_info; + struct list_head ordered_list; + spinlock_t list_lock; + atomic_t pending; + int limit_active; + int current_active; + int thresh; + unsigned int count; + spinlock_t thres_lock; +}; + +struct btrfs_zoned_device_info { + u64 zone_size; + u8 zone_size_shift; + u32 nr_zones; + unsigned int max_active_zones; + int reserved_active_zones; + atomic_t active_zones_left; + long unsigned int *seq_zones; + long unsigned int *empty_zones; + long unsigned int *active_zones; + struct blk_zone *zone_cache; + long: 32; + struct blk_zone sb_zones[6]; +}; + +struct btrfs_device_info { + struct btrfs_device *dev; + long: 32; + u64 dev_offset; + u64 max_avail; + u64 total_avail; +}; + +struct btrfs_raid_attr { + u8 sub_stripes; + u8 dev_stripes; + u8 devs_max; + u8 devs_min; + u8 tolerated_failures; + u8 devs_increment; + u8 ncopies; + u8 nparity; + u8 mindev_error; + const char raid_name[8]; + long: 32; + u64 bg_flag; +}; + +enum btrfs_flush_state { + FLUSH_DELAYED_ITEMS_NR = 1, + FLUSH_DELAYED_ITEMS = 2, + FLUSH_DELAYED_REFS_NR = 3, + FLUSH_DELAYED_REFS = 4, + FLUSH_DELALLOC = 5, + FLUSH_DELALLOC_WAIT = 6, + FLUSH_DELALLOC_FULL = 7, + ALLOC_CHUNK = 8, + ALLOC_CHUNK_FORCE = 9, + RUN_DELAYED_IPUTS = 10, + COMMIT_TRANS = 11, +}; + +struct btrfs_qgroup_extent_record { + u64 num_bytes; + u32 data_rsv; + long: 32; + u64 data_rsv_refroot; + struct ulist *old_roots; + long: 32; +}; + +struct btrfs_qgroup_rsv { + u64 values[3]; +}; + +struct btrfs_qgroup { + u64 qgroupid; + u64 rfer; + u64 rfer_cmpr; + u64 excl; + u64 excl_cmpr; + u64 lim_flags; + u64 max_rfer; + u64 max_excl; + u64 rsv_rfer; + u64 rsv_excl; + struct btrfs_qgroup_rsv rsv; + struct list_head groups; + struct list_head members; + struct list_head dirty; + struct list_head iterator; + struct list_head nested_iterator; + struct rb_node node; + long: 32; + u64 old_refcnt; + u64 new_refcnt; + struct kobject kobj; + long: 32; +}; + +enum { + __QGROUP_RESERVE_BIT = 0, + QGROUP_RESERVE = 1, + __QGROUP_RESERVE_SEQ = 0, + __QGROUP_RELEASE_BIT = 1, + QGROUP_RELEASE = 2, + __QGROUP_RELEASE_SEQ = 1, + __QGROUP_FREE_BIT = 2, + QGROUP_FREE = 4, + __QGROUP_FREE_SEQ = 2, +}; + +enum btrfs_rbio_ops { + BTRFS_RBIO_WRITE = 0, + BTRFS_RBIO_READ_REBUILD = 1, + BTRFS_RBIO_PARITY_SCRUB = 2, +}; + +struct sector_ptr; + +struct btrfs_raid_bio { + struct btrfs_io_context *bioc; + struct list_head hash_list; + struct list_head stripe_cache; + struct work_struct work; + struct bio_list bio_list; + spinlock_t bio_list_lock; + struct list_head plug_list; + long unsigned int flags; + enum btrfs_rbio_ops operation; + u16 nr_pages; + u16 nr_sectors; + u8 nr_data; + u8 real_stripes; + u8 stripe_npages; + u8 stripe_nsectors; + u8 scrubp; + int bio_list_bytes; + refcount_t refs; + atomic_t stripes_pending; + wait_queue_head_t io_wait; + long unsigned int dbitmap; + long unsigned int finish_pbitmap; + struct page **stripe_pages; + struct sector_ptr *bio_sectors; + struct sector_ptr *stripe_sectors; + void **finish_pointers; + long unsigned int *error_bitmap; + u8 *csum_buf; + long unsigned int *csum_bitmap; +}; + +struct raid56_bio_trace_info { + u64 devid; + u32 offset; + u8 stripe_nr; +}; + +enum btrfs_extent_allocation_policy { + BTRFS_EXTENT_ALLOC_CLUSTERED = 0, + BTRFS_EXTENT_ALLOC_ZONED = 1, +}; + +struct find_free_extent_ctl { + u64 ram_bytes; + u64 num_bytes; + u64 min_alloc_size; + u64 empty_size; + u64 flags; + int delalloc; + long: 32; + u64 search_start; + u64 empty_cluster; + struct btrfs_free_cluster *last_ptr; + bool use_cluster; + bool have_caching_bg; + bool orig_have_caching_bg; + bool for_treelog; + bool for_data_reloc; + int index; + int loop; + bool retry_uncached; + int cached; + long: 32; + u64 max_extent_size; + u64 total_free_space; + u64 found_offset; + u64 hint_byte; + enum btrfs_extent_allocation_policy policy; + bool hinted; + enum btrfs_block_group_size_class size_class; + long: 32; +}; + +struct trace_event_raw_btrfs_transaction_commit { + struct trace_entry ent; + u8 fsid[16]; + u64 generation; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__inode { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 blocks; + u64 disk_i_size; + u64 generation; + u64 last_trans; + u64 logged_trans; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + u64 start; + u64 len; + u32 flags; + int refs; + char __data[0]; +}; + +struct trace_event_raw_btrfs_handle_em_exist { + struct trace_entry ent; + u8 fsid[16]; + u64 e_start; + u64 e_len; + u64 map_start; + u64 map_len; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_regular { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 extent_offset; + u8 extent_type; + u8 compression; + long: 32; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_inline { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u8 extent_type; + u8 compression; + long: 32; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 file_offset; + u64 start; + u64 len; + u64 disk_len; + u64 bytes_left; + long unsigned int flags; + int compress_type; + int refs; + long: 32; + u64 root_objectid; + u64 truncated_len; + char __data[0]; +}; + +struct trace_event_raw_btrfs_finish_ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 len; + bool uptodate; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__writepage { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + long unsigned int index; + long int nr_to_write; + long int pages_skipped; + long: 32; + loff_t range_start; + loff_t range_end; + char for_kupdate; + char for_reclaim; + char range_cyclic; + long unsigned int writeback_index; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_writepage_end_io_hook { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 end; + int uptodate; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_file { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 parent; + int datasync; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_fs { + struct trace_entry ent; + u8 fsid[16]; + int wait; + char __data[0]; +}; + +struct trace_event_raw_btrfs_add_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 offset; + u64 size; + u64 flags; + u64 bytes_used; + u64 bytes_super; + int create; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_delayed_tree_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + long: 32; + u64 parent; + u64 ref_root; + int level; + int type; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_data_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + long: 32; + u64 parent; + u64 ref_root; + u64 owner; + u64 offset; + int type; + long: 32; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_ref_head { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + int is_data; + char __data[0]; +}; + +struct trace_event_raw_btrfs__chunk { + struct trace_entry ent; + u8 fsid[16]; + int num_stripes; + long: 32; + u64 type; + int sub_stripes; + long: 32; + u64 offset; + u64 size; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_cow_block { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 buf_start; + int refs; + long: 32; + u64 cow_start; + int buf_level; + int cow_level; + char __data[0]; +}; + +struct trace_event_raw_btrfs_space_reservation { + struct trace_entry ent; + u8 fsid[16]; + u32 __data_loc_type; + long: 32; + u64 val; + u64 bytes; + int reserve; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_trigger_flush { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + int flush; + u32 __data_loc_reason; + char __data[0]; +}; + +struct trace_event_raw_btrfs_flush_space { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 num_bytes; + int state; + int ret; + bool for_preempt; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__reserved_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_search_loop { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_have_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + bool hinted; + long: 32; + u64 bg_start; + u64 bg_flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs__reserve_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + int bg_size_class; + long: 32; + u64 start; + u64 len; + u64 loop; + bool hinted; + int size_class; + char __data[0]; +}; + +struct trace_event_raw_btrfs_find_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 bytes; + u64 empty_size; + u64 min_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_failed_cluster_setup { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_setup_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 max_size; + u64 size; + int bitmap; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_alloc_extent_state { + struct trace_entry ent; + const struct extent_state *state; + long unsigned int mask; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_free_extent_state { + struct trace_entry ent; + const struct extent_state *state; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work { + struct trace_entry ent; + u8 fsid[16]; + const void *work; + const void *wq; + const void *func; + const void *ordered_func; + const void *normal_work; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work__done { + struct trace_entry ent; + u8 fsid[16]; + const void *wtag; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue_done { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + char __data[0]; +}; + +struct trace_event_raw_btrfs__qgroup_rsv_data { + struct trace_entry ent; + u8 fsid[16]; + u64 rootid; + u64 ino; + u64 start; + u64 len; + u64 reserved; + int op; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_qgroup_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + char __data[0]; +}; + +struct trace_event_raw_qgroup_num_dirty_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 num_dirty_extents; + char __data[0]; +}; + +struct trace_event_raw_btrfs_qgroup_account_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 bytenr; + u64 num_bytes; + u64 nr_old_roots; + u64 nr_new_roots; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_counters { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 old_rfer; + u64 old_excl; + u64 cur_old_count; + u64 cur_new_count; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 cur_reserved; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_qgroup_meta_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_qgroup_meta_convert { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_qgroup_meta_free_all_pertrans { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__prelim_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 root_id; + u64 objectid; + u8 type; + long: 32; + u64 offset; + int level; + int old_count; + u64 parent; + u64 bytenr; + int mod_count; + long: 32; + u64 tree_size; + char __data[0]; +}; + +struct trace_event_raw_btrfs_inode_mod_outstanding_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + int mod; + unsigned int outstanding; + char __data[0]; +}; + +struct trace_event_raw_btrfs__block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 len; + u64 used; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs_set_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_clear_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int clear_bits; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_convert_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + unsigned int clear_bits; + char __data[0]; +}; + +struct trace_event_raw_btrfs_dump_space_info { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 reclaim_size; + int clamp; + long: 32; + u64 global_reserved; + u64 trans_reserved; + u64 delayed_refs_reserved; + u64 delayed_reserved; + u64 free_chunk_space; + u64 delalloc_bytes; + u64 ordered_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_reserve_ticket { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + u64 start_ns; + int flush; + int error; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sleep_tree_lock { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 start_ns; + u64 end_ns; + u64 diff_ns; + u64 owner; + int is_log_tree; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_locking_events { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 owner; + int is_log_tree; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__space_info_update { + struct trace_entry ent; + u8 fsid[16]; + u64 type; + u64 old; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_btrfs_raid56_bio { + struct trace_entry ent; + u8 fsid[16]; + u64 full_stripe; + u64 physical; + u64 devid; + u32 offset; + u32 len; + u8 opf; + u8 total_stripes; + u8 real_stripes; + u8 nr_data; + u8 stripe_nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_insert_one_raid_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + int num_stripes; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_raid_extent_delete { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 end; + u64 found_start; + u64 found_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_raid_extent_offset { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + u64 physical; + u64 devid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_count { + struct trace_entry ent; + u8 fsid[16]; + long int nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_enter { + struct trace_entry ent; + u8 fsid[16]; + long int nr_to_scan; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_exit { + struct trace_entry ent; + u8 fsid[16]; + long int nr_dropped; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_remove_em { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 root_id; + u64 start; + u64 len; + u32 flags; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_btrfs_transaction_commit {}; + +struct trace_event_data_offsets_btrfs__inode {}; + +struct trace_event_data_offsets_btrfs_get_extent {}; + +struct trace_event_data_offsets_btrfs_handle_em_exist {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_regular {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_inline {}; + +struct trace_event_data_offsets_btrfs__ordered_extent {}; + +struct trace_event_data_offsets_btrfs_finish_ordered_extent {}; + +struct trace_event_data_offsets_btrfs__writepage {}; + +struct trace_event_data_offsets_btrfs_writepage_end_io_hook {}; + +struct trace_event_data_offsets_btrfs_sync_file {}; + +struct trace_event_data_offsets_btrfs_sync_fs {}; + +struct trace_event_data_offsets_btrfs_add_block_group {}; + +struct trace_event_data_offsets_btrfs_delayed_tree_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_data_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_ref_head {}; + +struct trace_event_data_offsets_btrfs__chunk {}; + +struct trace_event_data_offsets_btrfs_cow_block {}; + +struct trace_event_data_offsets_btrfs_space_reservation { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_btrfs_trigger_flush { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_btrfs_flush_space {}; + +struct trace_event_data_offsets_btrfs__reserved_extent {}; + +struct trace_event_data_offsets_find_free_extent {}; + +struct trace_event_data_offsets_find_free_extent_search_loop {}; + +struct trace_event_data_offsets_find_free_extent_have_block_group {}; + +struct trace_event_data_offsets_btrfs__reserve_extent {}; + +struct trace_event_data_offsets_btrfs_find_cluster {}; + +struct trace_event_data_offsets_btrfs_failed_cluster_setup {}; + +struct trace_event_data_offsets_btrfs_setup_cluster {}; + +struct trace_event_data_offsets_alloc_extent_state {}; + +struct trace_event_data_offsets_free_extent_state {}; + +struct trace_event_data_offsets_btrfs__work {}; + +struct trace_event_data_offsets_btrfs__work__done {}; + +struct trace_event_data_offsets_btrfs_workqueue { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_btrfs_workqueue_done {}; + +struct trace_event_data_offsets_btrfs__qgroup_rsv_data {}; + +struct trace_event_data_offsets_btrfs_qgroup_extent {}; + +struct trace_event_data_offsets_qgroup_num_dirty_extents {}; + +struct trace_event_data_offsets_btrfs_qgroup_account_extent {}; + +struct trace_event_data_offsets_qgroup_update_counters {}; + +struct trace_event_data_offsets_qgroup_update_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_convert {}; + +struct trace_event_data_offsets_qgroup_meta_free_all_pertrans {}; + +struct trace_event_data_offsets_btrfs__prelim_ref {}; + +struct trace_event_data_offsets_btrfs_inode_mod_outstanding_extents {}; + +struct trace_event_data_offsets_btrfs__block_group {}; + +struct trace_event_data_offsets_btrfs_set_extent_bit {}; + +struct trace_event_data_offsets_btrfs_clear_extent_bit {}; + +struct trace_event_data_offsets_btrfs_convert_extent_bit {}; + +struct trace_event_data_offsets_btrfs_dump_space_info {}; + +struct trace_event_data_offsets_btrfs_reserve_ticket {}; + +struct trace_event_data_offsets_btrfs_sleep_tree_lock {}; + +struct trace_event_data_offsets_btrfs_locking_events {}; + +struct trace_event_data_offsets_btrfs__space_info_update {}; + +struct trace_event_data_offsets_btrfs_raid56_bio {}; + +struct trace_event_data_offsets_btrfs_insert_one_raid_extent {}; + +struct trace_event_data_offsets_btrfs_raid_extent_delete {}; + +struct trace_event_data_offsets_btrfs_get_raid_extent_offset {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_count {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_enter {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_exit {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_remove_em {}; + +typedef void (*btf_trace_btrfs_transaction_commit)(void *, const struct btrfs_fs_info *); + +typedef void (*btf_trace_btrfs_inode_new)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_request)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_evict)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_get_extent)(void *, const struct btrfs_root *, const struct btrfs_inode *, const struct extent_map *); + +typedef void (*btf_trace_btrfs_handle_em_exist)(void *, const struct btrfs_fs_info *, const struct extent_map *, const struct extent_map *, u64, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_ordered_extent_add)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_remove)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_start)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_put)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_for_logging)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_split)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_dec_test_pending)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_mark_finished)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_finish_ordered_extent)(void *, const struct btrfs_inode *, u64, u64, bool); + +typedef void (*btf_trace_extent_writepage)(void *, const struct folio *, const struct inode *, const struct writeback_control *); + +typedef void (*btf_trace_btrfs_writepage_end_io_hook)(void *, const struct btrfs_inode *, u64, u64, int); + +typedef void (*btf_trace_btrfs_sync_file)(void *, const struct file *, int); + +typedef void (*btf_trace_btrfs_sync_fs)(void *, const struct btrfs_fs_info *, int); + +typedef void (*btf_trace_btrfs_add_block_group)(void *, const struct btrfs_fs_info *, const struct btrfs_block_group *, int); + +typedef void (*btf_trace_add_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_run_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_btrfs_chunk_alloc)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_chunk_free)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_cow_block)(void *, const struct btrfs_root *, const struct extent_buffer *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_space_reservation)(void *, const struct btrfs_fs_info *, const char *, u64, u64, int); + +typedef void (*btf_trace_btrfs_trigger_flush)(void *, const struct btrfs_fs_info *, u64, u64, int, const char *); + +typedef void (*btf_trace_btrfs_flush_space)(void *, const struct btrfs_fs_info *, u64, u64, int, int, bool); + +typedef void (*btf_trace_btrfs_reserved_extent_alloc)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_reserved_extent_free)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_find_free_extent)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_search_loop)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_have_block_group)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reserve_extent)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_reserve_extent_cluster)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_find_cluster)(void *, const struct btrfs_block_group *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_failed_cluster_setup)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_setup_cluster)(void *, const struct btrfs_block_group *, const struct btrfs_free_cluster *, u64, int); + +typedef void (*btf_trace_alloc_extent_state)(void *, const struct extent_state *, gfp_t, long unsigned int); + +typedef void (*btf_trace_free_extent_state)(void *, const struct extent_state *, long unsigned int); + +typedef void (*btf_trace_btrfs_work_queued)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_work_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_all_work_done)(void *, const struct btrfs_fs_info *, const void *); + +typedef void (*btf_trace_btrfs_ordered_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_workqueue_alloc)(void *, const struct btrfs_workqueue *, const char *); + +typedef void (*btf_trace_btrfs_workqueue_destroy)(void *, const struct btrfs_workqueue *); + +typedef void (*btf_trace_btrfs_qgroup_reserve_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_release_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_account_extents)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_btrfs_qgroup_trace_extent)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_qgroup_num_dirty_extents)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_qgroup_account_extent)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64, u64); + +typedef void (*btf_trace_qgroup_update_counters)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, u64, u64); + +typedef void (*btf_trace_qgroup_update_reserve)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, s64, int); + +typedef void (*btf_trace_qgroup_meta_reserve)(void *, const struct btrfs_root *, s64, int); + +typedef void (*btf_trace_qgroup_meta_convert)(void *, const struct btrfs_root *, s64); + +typedef void (*btf_trace_qgroup_meta_free_all_pertrans)(void *, struct btrfs_root *); + +typedef void (*btf_trace_btrfs_prelim_ref_merge)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_prelim_ref_insert)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_inode_mod_outstanding_extents)(void *, const struct btrfs_root *, u64, int, unsigned int); + +typedef void (*btf_trace_btrfs_remove_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_skip_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_set_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_clear_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_convert_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int, unsigned int); + +typedef void (*btf_trace_btrfs_done_preemptive_reclaim)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_fail_all_tickets)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_reserve_ticket)(void *, const struct btrfs_fs_info *, u64, u64, u64, int, int); + +typedef void (*btf_trace_btrfs_tree_read_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock_blocking)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_read)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_write)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_try_tree_read_lock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_lock_atomic)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_update_bytes_may_use)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_pinned)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_zone_unusable)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_raid56_read)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_raid56_write)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_btrfs_insert_one_raid_extent)(void *, const struct btrfs_fs_info *, u64, u64, int); + +typedef void (*btf_trace_btrfs_raid_extent_delete)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_get_raid_extent_offset)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_count)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_enter)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_exit)(void *, const struct btrfs_fs_info *, long int, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_remove_em)(void *, const struct btrfs_inode *, const struct extent_map *); + +struct btrfs_fs_context { + char *subvol_name; + long: 32; + u64 subvol_objectid; + u64 max_inline; + u32 commit_interval; + u32 metadata_ratio; + u32 thread_pool_size; + long: 32; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + refcount_t refs; + long: 32; +}; + +enum { + Opt_acl = 0, + Opt_clear_cache = 1, + Opt_commit_interval = 2, + Opt_compress = 3, + Opt_compress_force = 4, + Opt_compress_force_type = 5, + Opt_compress_type = 6, + Opt_degraded = 7, + Opt_device = 8, + Opt_fatal_errors = 9, + Opt_flushoncommit = 10, + Opt_max_inline = 11, + Opt_barrier = 12, + Opt_datacow = 13, + Opt_datasum = 14, + Opt_defrag = 15, + Opt_discard = 16, + Opt_discard_mode = 17, + Opt_ratio = 18, + Opt_rescan_uuid_tree = 19, + Opt_skip_balance = 20, + Opt_space_cache = 21, + Opt_space_cache_version = 22, + Opt_ssd = 23, + Opt_ssd_spread = 24, + Opt_subvol = 25, + Opt_subvol_empty = 26, + Opt_subvolid = 27, + Opt_thread_pool = 28, + Opt_treelog = 29, + Opt_user_subvol_rm_allowed = 30, + Opt_norecovery = 31, + Opt_rescue = 32, + Opt_usebackuproot = 33, + Opt_nologreplay = 34, + Opt_enospc_debug = 35, + Opt_err = 36, +}; + +enum { + Opt_fatal_errors_panic = 0, + Opt_fatal_errors_bug = 1, +}; + +enum { + Opt_discard_sync = 0, + Opt_discard_async = 1, +}; + +enum { + Opt_space_cache_v1 = 0, + Opt_space_cache_v2 = 1, +}; + +enum { + Opt_rescue_usebackuproot = 0, + Opt_rescue_nologreplay = 1, + Opt_rescue_ignorebadroots = 2, + Opt_rescue_ignoredatacsums = 3, + Opt_rescue_ignoremetacsums = 4, + Opt_rescue_ignoresuperflags = 5, + Opt_rescue_parameter_all = 6, +}; + +struct init_sequence { + int (*init_func)(); + void (*exit_func)(); +}; + +enum OID { + OID_id_dsa_with_sha1 = 0, + OID_id_dsa = 1, + OID_id_ecPublicKey = 2, + OID_id_prime192v1 = 3, + OID_id_prime256v1 = 4, + OID_id_ecdsa_with_sha1 = 5, + OID_id_ecdsa_with_sha224 = 6, + OID_id_ecdsa_with_sha256 = 7, + OID_id_ecdsa_with_sha384 = 8, + OID_id_ecdsa_with_sha512 = 9, + OID_rsaEncryption = 10, + OID_sha1WithRSAEncryption = 11, + OID_sha256WithRSAEncryption = 12, + OID_sha384WithRSAEncryption = 13, + OID_sha512WithRSAEncryption = 14, + OID_sha224WithRSAEncryption = 15, + OID_data = 16, + OID_signed_data = 17, + OID_email_address = 18, + OID_contentType = 19, + OID_messageDigest = 20, + OID_signingTime = 21, + OID_smimeCapabilites = 22, + OID_smimeAuthenticatedAttrs = 23, + OID_mskrb5 = 24, + OID_krb5 = 25, + OID_krb5u2u = 26, + OID_msIndirectData = 27, + OID_msStatementType = 28, + OID_msSpOpusInfo = 29, + OID_msPeImageDataObjId = 30, + OID_msIndividualSPKeyPurpose = 31, + OID_msOutlookExpress = 32, + OID_ntlmssp = 33, + OID_negoex = 34, + OID_spnego = 35, + OID_IAKerb = 36, + OID_PKU2U = 37, + OID_Scram = 38, + OID_certAuthInfoAccess = 39, + OID_sha1 = 40, + OID_id_ansip384r1 = 41, + OID_id_ansip521r1 = 42, + OID_sha256 = 43, + OID_sha384 = 44, + OID_sha512 = 45, + OID_sha224 = 46, + OID_commonName = 47, + OID_surname = 48, + OID_countryName = 49, + OID_locality = 50, + OID_stateOrProvinceName = 51, + OID_organizationName = 52, + OID_organizationUnitName = 53, + OID_title = 54, + OID_description = 55, + OID_name = 56, + OID_givenName = 57, + OID_initials = 58, + OID_generationalQualifier = 59, + OID_subjectKeyIdentifier = 60, + OID_keyUsage = 61, + OID_subjectAltName = 62, + OID_issuerAltName = 63, + OID_basicConstraints = 64, + OID_crlDistributionPoints = 65, + OID_certPolicies = 66, + OID_authorityKeyIdentifier = 67, + OID_extKeyUsage = 68, + OID_NetlogonMechanism = 69, + OID_appleLocalKdcSupported = 70, + OID_gostCPSignA = 71, + OID_gostCPSignB = 72, + OID_gostCPSignC = 73, + OID_gost2012PKey256 = 74, + OID_gost2012PKey512 = 75, + OID_gost2012Digest256 = 76, + OID_gost2012Digest512 = 77, + OID_gost2012Signature256 = 78, + OID_gost2012Signature512 = 79, + OID_gostTC26Sign256A = 80, + OID_gostTC26Sign256B = 81, + OID_gostTC26Sign256C = 82, + OID_gostTC26Sign256D = 83, + OID_gostTC26Sign512A = 84, + OID_gostTC26Sign512B = 85, + OID_gostTC26Sign512C = 86, + OID_sm2 = 87, + OID_sm3 = 88, + OID_SM2_with_SM3 = 89, + OID_sm3WithRSAEncryption = 90, + OID_TPMLoadableKey = 91, + OID_TPMImportableKey = 92, + OID_TPMSealedData = 93, + OID_sha3_256 = 94, + OID_sha3_384 = 95, + OID_sha3_512 = 96, + OID_id_ecdsa_with_sha3_256 = 97, + OID_id_ecdsa_with_sha3_384 = 98, + OID_id_ecdsa_with_sha3_512 = 99, + OID_id_rsassa_pkcs1_v1_5_with_sha3_256 = 100, + OID_id_rsassa_pkcs1_v1_5_with_sha3_384 = 101, + OID_id_rsassa_pkcs1_v1_5_with_sha3_512 = 102, + OID__NR = 103, +}; + +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; + long unsigned int key_eflags; +}; + +struct asymmetric_key_id; + +struct public_key_signature { + struct asymmetric_key_id *auth_ids[3]; + u8 *s; + u8 *digest; + u32 s_size; + u32 digest_size; + const char *pkey_algo; + const char *hash_algo; + const char *encoding; +}; + +struct asymmetric_key_id { + short unsigned int len; + unsigned char data[0]; +}; + +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + short unsigned int name_len; + void (*describe)(const struct key *, struct seq_file *); + void (*destroy)(void *, void *); + int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*verify_signature)(const struct key *, const struct public_key_signature *); +}; + +enum asymmetric_payload_bits { + asym_crypto = 0, + asym_subtype = 1, + asym_key_ids = 2, + asym_auth = 3, +}; + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + long: 32; + u64 batch; +}; + +struct blk_stat_callback { + struct list_head list; + struct timer_list timer; + struct blk_rq_stat *cpu_stat; + int (*bucket_fn)(const struct request *); + unsigned int buckets; + struct blk_rq_stat *stat; + void (*timer_fn)(struct blk_stat_callback *); + void *data; + struct callback_head rcu; +}; + +struct rq_wait { + wait_queue_head_t wait; + atomic_t inflight; +}; + +struct rq_depth { + unsigned int max_depth; + int scale_step; + bool scaled_max; + unsigned int queue_depth; + unsigned int default_depth; +}; + +typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); + +typedef void cleanup_cb_t(struct rq_wait *, void *); + +struct trace_event_raw_wbt_stat { + struct trace_entry ent; + char name[32]; + s64 rmean; + u64 rmin; + u64 rmax; + s64 rnr_samples; + s64 rtime; + s64 wmean; + u64 wmin; + u64 wmax; + s64 wnr_samples; + s64 wtime; + char __data[0]; +}; + +struct trace_event_raw_wbt_lat { + struct trace_entry ent; + char name[32]; + long unsigned int lat; + char __data[0]; +}; + +struct trace_event_raw_wbt_step { + struct trace_entry ent; + char name[32]; + const char *msg; + int step; + long unsigned int window; + unsigned int bg; + unsigned int normal; + unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_wbt_timer { + struct trace_entry ent; + char name[32]; + unsigned int status; + int step; + unsigned int inflight; + char __data[0]; +}; + +struct trace_event_data_offsets_wbt_stat {}; + +struct trace_event_data_offsets_wbt_lat {}; + +struct trace_event_data_offsets_wbt_step {}; + +struct trace_event_data_offsets_wbt_timer {}; + +typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); + +typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, long unsigned int); + +typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, long unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, unsigned int); + +enum wbt_flags { + WBT_TRACKED = 1, + WBT_READ = 2, + WBT_SWAP = 4, + WBT_DISCARD = 8, + WBT_NR_BITS = 4, +}; + +enum { + WBT_RWQ_BG = 0, + WBT_RWQ_SWAP = 1, + WBT_RWQ_DISCARD = 2, + WBT_NUM_RWQ = 3, +}; + +enum { + WBT_STATE_ON_DEFAULT = 1, + WBT_STATE_ON_MANUAL = 2, + WBT_STATE_OFF_DEFAULT = 3, + WBT_STATE_OFF_MANUAL = 4, +}; + +struct rq_wb { + unsigned int wb_background; + unsigned int wb_normal; + short int enable_state; + unsigned int unknown_cnt; + u64 win_nsec; + u64 cur_win_nsec; + struct blk_stat_callback *cb; + long: 32; + u64 sync_issue; + void *sync_cookie; + long unsigned int last_issue; + long unsigned int last_comp; + long unsigned int min_lat_nsec; + struct rq_qos rqos; + struct rq_wait rq_wait[3]; + struct rq_depth rq_depth; +}; + +enum { + RWB_DEF_DEPTH = 16, + RWB_WINDOW_NSEC = 100000000, + RWB_MIN_WRITE_SAMPLES = 3, + RWB_UNKNOWN_BUMP = 5, +}; + +enum { + LAT_OK = 1, + LAT_UNKNOWN = 2, + LAT_UNKNOWN_WRITES = 3, + LAT_EXCEEDED = 4, +}; + +struct wbt_wait_data { + struct rq_wb *rwb; + enum wbt_flags wb_acct; + blk_opf_t opf; +}; + +union uu { + short unsigned int us; + unsigned char b[2]; +}; + +struct internal_state { + int dummy; +}; + +enum maddf_flags { + MADDF_NEGATE_PRODUCT = 1, + MADDF_NEGATE_ADDITION = 2, +}; + +enum support_mode { + ALLOW_LEGACY = 0, + DENY_LEGACY = 1, +}; + +enum { + FB_BLANK_UNBLANK = 0, + FB_BLANK_NORMAL = 1, + FB_BLANK_VSYNC_SUSPEND = 2, + FB_BLANK_HSYNC_SUSPEND = 3, + FB_BLANK_POWERDOWN = 4, +}; + +struct fb_cmap_user { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +typedef short unsigned int ushort; + +struct unipair { + short unsigned int unicode; + short unsigned int fontpos; +}; + +enum translation_map { + LAT1_MAP = 0, + GRAF_MAP = 1, + IBMPC_MAP = 2, + USER_MAP = 3, + FIRST_MAP = 0, + LAST_MAP = 3, +}; + +struct uni_pagedict { + u16 **uni_pgdir[32]; + long unsigned int refcount; + long unsigned int sum; + unsigned char *inverse_translations[4]; + u16 *inverse_trans_unicode; +}; + +typedef int (*pm_callback_t)(struct device *); + +enum scsi_devinfo_key { + SCSI_DEVINFO_GLOBAL = 0, + SCSI_DEVINFO_SPI = 1, +}; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, struct device *); + int (*configure)(struct transport_container *, struct device *, struct device *); + int (*remove)(struct transport_container *, struct device *, struct device *); +}; + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +struct spi_transport_attrs { + int period; + int min_period; + int offset; + int max_offset; + unsigned int width: 1; + unsigned int max_width: 1; + unsigned int iu: 1; + unsigned int max_iu: 1; + unsigned int dt: 1; + unsigned int qas: 1; + unsigned int max_qas: 1; + unsigned int wr_flow: 1; + unsigned int rd_strm: 1; + unsigned int rti: 1; + unsigned int pcomp_en: 1; + unsigned int hold_mcs: 1; + unsigned int initial_dv: 1; + long unsigned int flags; + unsigned int support_sync: 1; + unsigned int support_wide: 1; + unsigned int support_dt: 1; + unsigned int support_dt_only; + unsigned int support_ius; + unsigned int support_qas; + unsigned int dv_pending: 1; + unsigned int dv_in_progress: 1; + struct mutex dv_mutex; +}; + +enum spi_signal_type { + SPI_SIGNAL_UNKNOWN = 1, + SPI_SIGNAL_SE = 2, + SPI_SIGNAL_LVD = 3, + SPI_SIGNAL_HVD = 4, +}; + +struct spi_host_attrs { + enum spi_signal_type signalling; +}; + +struct spi_function_template { + void (*get_period)(struct scsi_target *); + void (*set_period)(struct scsi_target *, int); + void (*get_offset)(struct scsi_target *); + void (*set_offset)(struct scsi_target *, int); + void (*get_width)(struct scsi_target *); + void (*set_width)(struct scsi_target *, int); + void (*get_iu)(struct scsi_target *); + void (*set_iu)(struct scsi_target *, int); + void (*get_dt)(struct scsi_target *); + void (*set_dt)(struct scsi_target *, int); + void (*get_qas)(struct scsi_target *); + void (*set_qas)(struct scsi_target *, int); + void (*get_wr_flow)(struct scsi_target *); + void (*set_wr_flow)(struct scsi_target *, int); + void (*get_rd_strm)(struct scsi_target *); + void (*set_rd_strm)(struct scsi_target *, int); + void (*get_rti)(struct scsi_target *); + void (*set_rti)(struct scsi_target *, int); + void (*get_pcomp_en)(struct scsi_target *); + void (*set_pcomp_en)(struct scsi_target *, int); + void (*get_hold_mcs)(struct scsi_target *); + void (*set_hold_mcs)(struct scsi_target *, int); + void (*get_signalling)(struct Scsi_Host *); + void (*set_signalling)(struct Scsi_Host *, enum spi_signal_type); + int (*deny_binding)(struct scsi_target *); + long unsigned int show_period: 1; + long unsigned int show_offset: 1; + long unsigned int show_width: 1; + long unsigned int show_iu: 1; + long unsigned int show_dt: 1; + long unsigned int show_qas: 1; + long unsigned int show_wr_flow: 1; + long unsigned int show_rd_strm: 1; + long unsigned int show_rti: 1; + long unsigned int show_pcomp_en: 1; + long unsigned int show_hold_mcs: 1; +}; + +enum { + SPI_BLIST_NOIUS = 1, +}; + +struct spi_internal { + struct scsi_transport_template t; + struct spi_function_template *f; +}; + +enum spi_compare_returns { + SPI_COMPARE_SUCCESS = 0, + SPI_COMPARE_FAILURE = 1, + SPI_COMPARE_SKIP_TEST = 2, +}; + +struct work_queue_wrapper { + struct work_struct work; + struct scsi_device *sdev; +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = 1, + GPIOD_OUT_LOW = 3, + GPIOD_OUT_HIGH = 7, + GPIOD_OUT_LOW_OPEN_DRAIN = 11, + GPIOD_OUT_HIGH_OPEN_DRAIN = 15, +}; + +struct mdio_board_info { + const char *bus_id; + char modalias[32]; + int mdio_addr; + const void *platform_data; +}; + +struct trace_event_raw_mdio_access { + struct trace_entry ent; + char busid[61]; + char read; + u8 addr; + u16 val; + unsigned int regnum; + char __data[0]; +}; + +struct trace_event_data_offsets_mdio_access {}; + +typedef void (*btf_trace_mdio_access)(void *, struct mii_bus *, char, u8, unsigned int, u16, int); + +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +struct tasklet_struct { + struct tasklet_struct *next; + long unsigned int state; + atomic_t count; + bool use_callback; + union { + void (*func)(long unsigned int); + void (*callback)(struct tasklet_struct *); + }; + long unsigned int data; +}; + +enum { + TASKLET_STATE_SCHED = 0, + TASKLET_STATE_RUN = 1, +}; + +struct usb_dynids { + struct list_head list; +}; + +struct usb_driver { + const char *name; + int (*probe)(struct usb_interface *, const struct usb_device_id *); + void (*disconnect)(struct usb_interface *); + int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); + int (*suspend)(struct usb_interface *, pm_message_t); + int (*resume)(struct usb_interface *); + int (*reset_resume)(struct usb_interface *); + int (*pre_reset)(struct usb_interface *); + int (*post_reset)(struct usb_interface *); + void (*shutdown)(struct usb_interface *); + const struct usb_device_id *id_table; + const struct attribute_group **dev_groups; + struct usb_dynids dynids; + struct device_driver driver; + unsigned int no_dynamic_id: 1; + unsigned int supports_autosuspend: 1; + unsigned int disable_hub_initiated_lpm: 1; + unsigned int soft_unbind: 1; +}; + +struct usb_device_driver { + const char *name; + bool (*match)(struct usb_device *); + int (*probe)(struct usb_device *); + void (*disconnect)(struct usb_device *); + int (*suspend)(struct usb_device *, pm_message_t); + int (*resume)(struct usb_device *, pm_message_t); + int (*choose_configuration)(struct usb_device *); + const struct attribute_group **dev_groups; + struct device_driver driver; + const struct usb_device_id *id_table; + unsigned int supports_autosuspend: 1; + unsigned int generic_subclass: 1; +}; + +typedef u32 acpi_size; + +typedef u64 acpi_io_address; + +typedef u32 acpi_status; + +typedef char *acpi_string; + +typedef u32 acpi_object_type; + +union acpi_object { + acpi_object_type type; + struct { + acpi_object_type type; + long: 32; + u64 value; + } integer; + struct { + acpi_object_type type; + u32 length; + char *pointer; + } string; + struct { + acpi_object_type type; + u32 length; + u8 *pointer; + } buffer; + struct { + acpi_object_type type; + u32 count; + union acpi_object *elements; + } package; + struct { + acpi_object_type type; + acpi_object_type actual_type; + acpi_handle handle; + } reference; + struct { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + long: 32; + } processor; + struct { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +struct acpi_buffer { + acpi_size length; + void *pointer; +}; + +enum spd_duplex { + NWAY_10M_HALF = 0, + NWAY_10M_FULL = 1, + NWAY_100M_HALF = 2, + NWAY_100M_FULL = 3, + NWAY_1000M_FULL = 4, + FORCE_10M_HALF = 5, + FORCE_10M_FULL = 6, + FORCE_100M_HALF = 7, + FORCE_100M_FULL = 8, + FORCE_1000M_FULL = 9, + NWAY_2500M_FULL = 10, +}; + +enum rtl_register_content { + _2500bps = 1024, + _1250bps = 512, + _500bps = 256, + _tx_flow = 64, + _rx_flow = 32, + _1000bps = 16, + _100bps = 8, + _10bps = 4, + LINK_STATUS = 2, + FULL_DUP = 1, +}; + +enum rtl8152_flags { + RTL8152_INACCESSIBLE = 0, + RTL8152_SET_RX_MODE = 1, + WORK_ENABLE = 2, + RTL8152_LINK_CHG = 3, + SELECTIVE_SUSPEND = 4, + PHY_RESET = 5, + SCHEDULE_TASKLET = 6, + GREEN_ETHERNET = 7, + RX_EPROTO = 8, + IN_PRE_RESET = 9, + PROBED_WITH_NO_ERRORS = 10, + PROBE_SHOULD_RETRY = 11, +}; + +struct tally_counter { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; +}; + +struct rx_desc { + __le32 opts1; + __le32 opts2; + __le32 opts3; + __le32 opts4; + __le32 opts5; + __le32 opts6; +}; + +struct tx_desc { + __le32 opts1; + __le32 opts2; +}; + +struct r8152; + +struct rx_agg { + struct list_head list; + struct list_head info_list; + struct urb *urb; + struct r8152 *context; + struct page *page; + void *buffer; +}; + +struct tx_agg { + struct list_head list; + struct urb *urb; + struct r8152 *context; + void *buffer; + void *head; + u32 skb_num; + u32 skb_len; +}; + +struct rtl_ops { + void (*init)(struct r8152 *); + int (*enable)(struct r8152 *); + void (*disable)(struct r8152 *); + void (*up)(struct r8152 *); + void (*down)(struct r8152 *); + void (*unload)(struct r8152 *); + int (*eee_get)(struct r8152 *, struct ethtool_keee *); + int (*eee_set)(struct r8152 *, struct ethtool_keee *); + bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); + void (*autosuspend_en)(struct r8152 *, bool); + void (*change_mtu)(struct r8152 *); +}; + +struct ups_info { + u32 r_tune: 1; + u32 _10m_ckdiv: 1; + u32 _250m_ckdiv: 1; + u32 aldps: 1; + u32 lite_mode: 2; + u32 speed_duplex: 4; + u32 eee: 1; + u32 eee_lite: 1; + u32 eee_ckdiv: 1; + u32 eee_plloff_100: 1; + u32 eee_plloff_giga: 1; + u32 eee_cmod_lv: 1; + u32 green: 1; + u32 flow_control: 1; + u32 ctap_short_off: 1; +}; + +struct rtl_fw___2 { + const char *fw_name; + const struct firmware *fw; + char version[32]; + int (*pre_fw)(struct r8152 *); + int (*post_fw)(struct r8152 *); + bool retry; +}; + +struct r8152 { + long unsigned int flags; + struct usb_device *udev; + struct napi_struct napi; + struct usb_interface *intf; + struct net_device *netdev; + struct urb *intr_urb; + struct tx_agg tx_info[4]; + struct list_head rx_info; + struct list_head rx_used; + struct list_head rx_done; + struct list_head tx_free; + struct sk_buff_head tx_queue; + struct sk_buff_head rx_queue; + spinlock_t rx_lock; + spinlock_t tx_lock; + struct delayed_work schedule; + struct delayed_work hw_phy_work; + struct mii_if_info mii; + struct mutex control; + struct tasklet_struct tx_tl; + struct rtl_ops rtl_ops; + struct ups_info ups_info; + struct rtl_fw___2 rtl_fw; + atomic_t rx_count; + bool eee_en; + int intr_interval; + u32 saved_wolopts; + u32 msg_enable; + u32 tx_qlen; + u32 coalesce; + u32 advertising; + u32 rx_buf_sz; + u32 rx_copybreak; + u32 rx_pending; + u32 fc_pause_on; + u32 fc_pause_off; + unsigned int pipe_in; + unsigned int pipe_out; + unsigned int pipe_intr; + unsigned int pipe_ctrl_in; + unsigned int pipe_ctrl_out; + u32 support_2500full: 1; + u32 lenovo_macpassthru: 1; + u32 dell_tb_rx_agg_bug: 1; + u16 ocp_base; + u16 speed; + u16 eee_adv; + u8 *intr_buff; + u8 version; + u8 duplex; + u8 autoneg; + unsigned int reg_access_reset_count; +}; + +struct fw_block { + __le32 type; + __le32 length; +}; + +struct fw_header { + u8 checksum[32]; + char version[32]; + struct fw_block blocks[0]; +}; + +enum rtl8152_fw_flags { + FW_FLAGS_USB = 0, + FW_FLAGS_PLA = 1, + FW_FLAGS_START = 2, + FW_FLAGS_STOP = 3, + FW_FLAGS_NC = 4, + FW_FLAGS_NC1 = 5, + FW_FLAGS_NC2 = 6, + FW_FLAGS_UC2 = 7, + FW_FLAGS_UC = 8, + FW_FLAGS_SPEED_UP = 9, + FW_FLAGS_VER = 10, +}; + +enum rtl8152_fw_fixup_cmd { + FW_FIXUP_AND = 0, + FW_FIXUP_OR = 1, + FW_FIXUP_NOT = 2, + FW_FIXUP_XOR = 3, +}; + +struct fw_phy_set { + __le16 addr; + __le16 data; +}; + +struct fw_phy_speed_up { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 version; + __le16 fw_reg; + __le16 reserved; + char info[0]; +}; + +struct fw_phy_ver { + struct fw_block blk_hdr; + struct fw_phy_set ver; + __le32 reserved; +}; + +struct fw_phy_fixup { + struct fw_block blk_hdr; + struct fw_phy_set setting; + __le16 bit_cmd; + __le16 reserved; +}; + +struct fw_phy_union { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + struct fw_phy_set pre_set[2]; + struct fw_phy_set bp[8]; + struct fw_phy_set bp_en; + u8 pre_num; + u8 bp_num; + char info[0]; +} __attribute__((packed)); + +struct fw_mac { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 bp_ba_addr; + __le16 bp_ba_value; + __le16 bp_en_addr; + __le16 bp_en_value; + __le16 bp_start; + __le16 bp_num; + __le16 bp[16]; + __le32 reserved; + __le16 fw_ver_reg; + u8 fw_ver_data; + char info[0]; +} __attribute__((packed)); + +struct fw_phy_patch_key { + struct fw_block blk_hdr; + __le16 key_reg; + __le16 key_data; + __le32 reserved; +}; + +struct fw_phy_nc { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 ba_reg; + __le16 ba_data; + __le16 patch_en_addr; + __le16 patch_en_value; + __le16 mode_reg; + __le16 mode_pre; + __le16 mode_post; + __le16 reserved; + __le16 bp_start; + __le16 bp_num; + __le16 bp[4]; + char info[0]; +}; + +enum rtl_fw_type { + RTL_FW_END = 0, + RTL_FW_PLA = 1, + RTL_FW_USB = 2, + RTL_FW_PHY_START = 3, + RTL_FW_PHY_STOP = 4, + RTL_FW_PHY_NC = 5, + RTL_FW_PHY_FIXUP = 6, + RTL_FW_PHY_UNION_NC = 7, + RTL_FW_PHY_UNION_NC1 = 8, + RTL_FW_PHY_UNION_NC2 = 9, + RTL_FW_PHY_UNION_UC2 = 10, + RTL_FW_PHY_UNION_UC = 11, + RTL_FW_PHY_UNION_MISC = 12, + RTL_FW_PHY_SPEED_UP = 13, + RTL_FW_PHY_VER = 14, +}; + +enum rtl_version { + RTL_VER_UNKNOWN = 0, + RTL_VER_01 = 1, + RTL_VER_02 = 2, + RTL_VER_03 = 3, + RTL_VER_04 = 4, + RTL_VER_05 = 5, + RTL_VER_06 = 6, + RTL_VER_07 = 7, + RTL_VER_08 = 8, + RTL_VER_09 = 9, + RTL_TEST_01 = 10, + RTL_VER_10 = 11, + RTL_VER_11 = 12, + RTL_VER_12 = 13, + RTL_VER_13 = 14, + RTL_VER_14 = 15, + RTL_VER_15 = 16, + RTL_VER_MAX = 17, +}; + +enum tx_csum_stat { + TX_CSUM_SUCCESS = 0, + TX_CSUM_TSO = 1, + TX_CSUM_NONE = 2, +}; + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, + MEMBLOCK_DRIVER_MANAGED = 8, + MEMBLOCK_RSRV_NOINIT = 16, +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; +}; + +typedef __be32 fdt32_t; + +struct fdt_header { + fdt32_t magic; + fdt32_t totalsize; + fdt32_t off_dt_struct; + fdt32_t off_dt_strings; + fdt32_t off_mem_rsvmap; + fdt32_t version; + fdt32_t last_comp_version; + fdt32_t boot_cpuid_phys; + fdt32_t size_dt_strings; + fdt32_t size_dt_struct; +}; + +struct serial_icounter_struct { + int cts; + int dsr; + int rng; + int dcd; + int rx; + int tx; + int frame; + int overrun; + int parity; + int brk; + int buf_overrun; + int reserved[9]; +}; + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char[1]; + int hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + long unsigned int iomap_base; +}; + +enum { + BPF_F_RECOMPUTE_CSUM = 1, + BPF_F_INVALIDATE_HASH = 2, +}; + +enum { + BPF_F_HDR_FIELD_MASK = 15, +}; + +enum { + BPF_F_PSEUDO_HDR = 16, + BPF_F_MARK_MANGLED_0 = 32, + BPF_F_MARK_ENFORCE = 64, +}; + +enum { + BPF_F_TUNINFO_IPV6 = 1, +}; + +enum { + BPF_F_ZERO_CSUM_TX = 2, + BPF_F_DONT_FRAGMENT = 4, + BPF_F_SEQ_NUMBER = 8, + BPF_F_NO_TUNNEL_KEY = 16, +}; + +enum { + BPF_F_TUNINFO_FLAGS = 16, +}; + +enum { + BPF_F_INDEX_MASK = 4294967295ULL, + BPF_F_CURRENT_CPU = 4294967295ULL, + BPF_F_CTXLEN_MASK = 4503595332403200ULL, +}; + +enum { + BPF_CSUM_LEVEL_QUERY = 0, + BPF_CSUM_LEVEL_INC = 1, + BPF_CSUM_LEVEL_DEC = 2, + BPF_CSUM_LEVEL_RESET = 3, +}; + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = 1, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128, + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256, +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + +enum { + BPF_SK_LOOKUP_F_REPLACE = 1, + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, +}; + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET = 0, + BPF_ADJ_ROOM_MAC = 1, +}; + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC = 0, + BPF_HDR_START_NET = 1, +}; + +enum { + BPF_SKB_TSTAMP_UNSPEC = 0, + BPF_SKB_TSTAMP_DELIVERY_MONO = 1, + BPF_SKB_CLOCK_REALTIME = 0, + BPF_SKB_CLOCK_MONOTONIC = 1, + BPF_SKB_CLOCK_TAI = 2, +}; + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + union { + __u16 tunnel_ext; + __be16 tunnel_flags; + }; + __u32 tunnel_label; + union { + __u32 local_ipv4; + __u32 local_ipv6[4]; + }; +}; + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + __u64 bytes_acked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +struct bpf_xdp_sock { + __u32 queue_id; +}; + +struct sk_msg_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + union { + struct bpf_sock *sk; + }; +}; + +struct sk_reuseport_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 len; + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; + long: 32; + union { + struct bpf_sock *sk; + }; + union { + struct bpf_sock *migrating_sk; + }; +}; + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + __u32 user_ip6[4]; + __u32 user_port; + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + __u32 msg_src_ip6[4]; + long: 32; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { + struct bpf_sock *sk; + }; + union { + void *skb_data; + }; + union { + void *skb_data_end; + }; + __u32 skb_len; + __u32 skb_tcp_flags; + __u64 skb_hwtstamp; +}; + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = 1, + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, + BPF_SOCK_OPS_STATE_CB_FLAG = 4, + BPF_SOCK_OPS_RTT_CB_FLAG = 8, + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, + BPF_SOCK_OPS_ALL_CB_FLAGS = 127, +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, + TCP_BPF_DELACK_MAX = 1003, + TCP_BPF_RTO_MIN = 1004, + TCP_BPF_SYN = 1005, + TCP_BPF_SYN_IP = 1006, + TCP_BPF_SYN_MAC = 1007, + TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = 1, +}; + +enum { + BPF_FIB_LOOKUP_DIRECT = 1, + BPF_FIB_LOOKUP_OUTPUT = 2, + BPF_FIB_LOOKUP_SKIP_NEIGH = 4, + BPF_FIB_LOOKUP_TBID = 8, + BPF_FIB_LOOKUP_SRC = 16, + BPF_FIB_LOOKUP_MARK = 32, +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS = 0, + BPF_FIB_LKUP_RET_BLACKHOLE = 1, + BPF_FIB_LKUP_RET_UNREACHABLE = 2, + BPF_FIB_LKUP_RET_PROHIBIT = 3, + BPF_FIB_LKUP_RET_NOT_FWDED = 4, + BPF_FIB_LKUP_RET_FWD_DISABLED = 5, + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, + BPF_FIB_LKUP_RET_NO_NEIGH = 7, + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9, +}; + +struct bpf_fib_lookup { + __u8 family; + __u8 l4_protocol; + __be16 sport; + __be16 dport; + union { + __u16 tot_len; + __u16 mtu_result; + }; + __u32 ifindex; + union { + __u8 tos; + __be32 flowinfo; + __u32 rt_metric; + }; + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + union { + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + __u32 tbid; + }; + union { + struct { + __u32 mark; + }; + struct { + __u8 smac[6]; + __u8 dmac[6]; + }; + }; +}; + +struct bpf_redir_neigh { + __u32 nh_family; + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; + }; +}; + +enum bpf_check_mtu_flags { + BPF_MTU_CHK_SEGS = 1, +}; + +enum bpf_check_mtu_ret { + BPF_MTU_CHK_RET_SUCCESS = 0, + BPF_MTU_CHK_RET_FRAG_NEEDED = 1, + BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, +}; + +struct bpf_sk_lookup { + union { + union { + struct bpf_sock *sk; + }; + __u64 cookie; + }; + __u32 family; + __u32 protocol; + __u32 remote_ip4; + __u32 remote_ip6[4]; + __be16 remote_port; + __u32 local_ip4; + __u32 local_ip6[4]; + __u32 local_port; + __u32 ingress_ifindex; + long: 32; +}; + +struct sock_fprog { + short unsigned int len; + struct sock_filter *filter; +}; + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + void *rw_image; + u32 image_off; + struct bpf_ksym ksym; +}; + +typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); + +struct bpf_empty_prog_array { + struct bpf_prog_array hdr; + struct bpf_prog *null_prog; + long: 32; +}; + +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + struct sock *migrating_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +typedef u32 compat_uptr_t; + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + const void *data; + const void *data_end; +}; + +enum { + SKBFL_ZEROCOPY_ENABLE = 1, + SKBFL_SHARED_FRAG = 2, + SKBFL_PURE_ZEROCOPY = 4, + SKBFL_DONT_ORPHAN = 8, + SKBFL_MANAGED_FRAG_REFS = 16, +}; + +enum skb_tstamp_type { + SKB_CLOCK_REALTIME = 0, + SKB_CLOCK_MONOTONIC = 1, + SKB_CLOCK_TAI = 2, + __SKB_CLOCK_MAX = 2, +}; + +struct seg6_pernet_data { + struct mutex lock; + struct in6_addr *tun_src; +}; + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[128]; + u8 buckets[0]; +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + unsigned char data[20]; +}; + +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + u64 tmp_reg; + void *t_ctx; + u32 uaddrlen; +}; + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + __be16 sport; + u16 dport; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + struct sock *selected_sk; + u32 ingress_ifindex; + bool no_reuseport; +}; + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + u32 tw_last_oow_ack_time; + int tw_ts_recent_stamp; + u32 tw_tx_delay; +}; + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ipv6_bpf_stub { + int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); + struct sock * (*udp6_lib_lookup)(const struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); + int (*ipv6_setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*ipv6_getsockopt)(struct sock *, int, int, sockptr_t, sockptr_t); + int (*ipv6_dev_get_saddr)(struct net *, const struct net_device *, const struct in6_addr *, unsigned int, struct in6_addr *); +}; + +enum { + INET_ECN_NOT_ECT = 0, + INET_ECN_ECT_1 = 1, + INET_ECN_ECT_0 = 2, + INET_ECN_CE = 3, + INET_ECN_MASK = 3, +}; + +struct mptcp_sock {}; + +struct bpf_tcp_req_attrs { + u32 rcv_tsval; + u32 rcv_tsecr; + u16 mss; + u8 rcv_wscale; + u8 snd_wscale; + u8 ecn_ok; + u8 wscale_ok; + u8 sack_ok; + u8 tstamp_ok; + u8 usec_ts_ok; + u8 reserved[3]; +}; + +struct strp_msg { + int full_len; + int offset; +}; + +struct _strp_msg { + struct strp_msg strp; + int accum_len; +}; + +struct tls_msg { + u8 control; +}; + +struct sk_skb_cb { + unsigned char data[20]; + unsigned char pad[4]; + struct _strp_msg strp; + struct tls_msg tls; + u64 temp_reg; +}; + +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; + long: 32; +}; + +struct xdp_sock { + struct sock sk; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xsk_queue *rx; + struct net_device *dev; + struct xdp_umem *umem; + struct list_head flush_node; + struct xsk_buff_pool *pool; + u16 queue_id; + bool zc; + bool sg; + enum { + XSK_READY = 0, + XSK_BOUND = 1, + XSK_UNBOUND = 2, + } state; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xsk_queue *tx; + struct list_head tx_list; + u32 tx_budget_spent; + spinlock_t rx_lock; + long: 32; + u64 rx_dropped; + u64 rx_queue_full; + struct sk_buff *skb; + struct list_head map_list; + spinlock_t map_list_lock; + struct mutex mutex; + struct xsk_queue *fq_tmp; + struct xsk_queue *cq_tmp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct tls_crypto_info { + __u16 version; + __u16 cipher_type; +}; + +struct tls12_crypto_info_aes_gcm_128 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[32]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_chacha20_poly1305 { + struct tls_crypto_info info; + unsigned char iv[12]; + unsigned char key[32]; + unsigned char salt[0]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls_strparser { + struct sock *sk; + u32 mark: 8; + u32 stopped: 1; + u32 copy_mode: 1; + u32 mixed_decrypted: 1; + bool msg_ready; + struct strp_msg stm; + struct sk_buff *anchor; + struct work_struct work; +}; + +struct tls_sw_context_rx { + struct crypto_aead *aead_recv; + struct crypto_wait async_wait; + struct sk_buff_head rx_list; + void (*saved_data_ready)(struct sock *); + u8 reader_present; + u8 async_capable: 1; + u8 zc_capable: 1; + u8 reader_contended: 1; + struct tls_strparser strp; + atomic_t decrypt_pending; + struct sk_buff_head async_hold; + struct wait_queue_head wq; +}; + +struct tls_prot_info { + u16 version; + u16 cipher_type; + u16 prepend_size; + u16 tag_size; + u16 overhead_size; + u16 iv_size; + u16 salt_size; + u16 rec_seq_size; + u16 aad_size; + u16 tail_size; +}; + +struct cipher_context { + char iv[20]; + char rec_seq[8]; +}; + +union tls_crypto_context { + struct tls_crypto_info info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; + }; +}; + +struct tls_context { + struct tls_prot_info prot_info; + u8 tx_conf: 3; + u8 rx_conf: 3; + u8 zerocopy_sendfile: 1; + u8 rx_no_pad: 1; + int (*push_pending_record)(struct sock *, int); + void (*sk_write_space)(struct sock *); + void *priv_ctx_tx; + void *priv_ctx_rx; + struct net_device *netdev; + struct cipher_context tx; + struct cipher_context rx; + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + bool splicing_pages; + bool pending_open_record_frags; + struct mutex tx_lock; + long unsigned int flags; + struct proto *sk_proto; + struct sock *sk; + void (*sk_destruct)(struct sock *); + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + struct list_head list; + refcount_t refcount; + struct callback_head rcu; +}; + +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; + +typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); + +typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); + +typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); + +typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); + +typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); + +typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); + +enum { + BPF_F_NEIGH = 65536, + BPF_F_PEER = 131072, + BPF_F_NEXTHOP = 262144, +}; + +typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_redirect)(u32, u64); + +typedef u64 (*btf_bpf_redirect_peer)(u32, u64); + +typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); + +typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_get_cgroup_classid_curr)(); + +typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); + +typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); + +typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); + +typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_xdp_get_buff_len)(struct xdp_buff *); + +typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_load_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_store_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); + +typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); + +typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); + +typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); + +typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); + +typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); + +typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); + +typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sk_msg)(struct sk_msg *); + +typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); + +typedef u64 (*btf_bpf_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); + +typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); + +typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_release)(struct sock *); + +typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); + +typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); + +typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); + +typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tstamp)(struct sk_buff *, u64, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *, struct tcphdr *); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *); + +typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); + +typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_unix_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_mptcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_sock_from_file)(struct file *); + +struct eeprom_req_info { + struct ethnl_req_info base; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; +}; + +struct eeprom_reply_data { + struct ethnl_reply_data base; + u32 length; + u8 *data; +}; + +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + +struct nf_conntrack_l4proto { + u_int8_t l4proto; + bool allow_clash; + u16 nlattr_size; + bool (*can_early_drop)(const struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *, bool); + int (*from_nlattr)(struct nlattr **, struct nf_conn *); + int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *); + unsigned int (*nlattr_tuple_size)(); + int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *, u_int32_t); + const struct nla_policy *nla_policy; + struct { + int (*nlattr_to_obj)(struct nlattr **, struct net *, void *); + int (*obj_to_nlattr)(struct sk_buff *, const void *); + u16 obj_size; + u16 nlattr_max; + const struct nla_policy *nla_policy; + } ctnl_timeout; + void (*print_conntrack)(struct seq_file *, struct nf_conn *); +}; + +enum ctattr_l4proto { + CTA_PROTO_UNSPEC = 0, + CTA_PROTO_NUM = 1, + CTA_PROTO_SRC_PORT = 2, + CTA_PROTO_DST_PORT = 3, + CTA_PROTO_ICMP_ID = 4, + CTA_PROTO_ICMP_TYPE = 5, + CTA_PROTO_ICMP_CODE = 6, + CTA_PROTO_ICMPV6_ID = 7, + CTA_PROTO_ICMPV6_TYPE = 8, + CTA_PROTO_ICMPV6_CODE = 9, + __CTA_PROTO_MAX = 10, +}; + +enum nft_exthdr_flags { + NFT_EXTHDR_F_PRESENT = 1, +}; + +enum nft_exthdr_op { + NFT_EXTHDR_OP_IPV6 = 0, + NFT_EXTHDR_OP_TCPOPT = 1, + NFT_EXTHDR_OP_IPV4 = 2, + NFT_EXTHDR_OP_SCTP = 3, + NFT_EXTHDR_OP_DCCP = 4, + __NFT_EXTHDR_OP_MAX = 5, +}; + +enum nft_exthdr_attributes { + NFTA_EXTHDR_UNSPEC = 0, + NFTA_EXTHDR_DREG = 1, + NFTA_EXTHDR_TYPE = 2, + NFTA_EXTHDR_OFFSET = 3, + NFTA_EXTHDR_LEN = 4, + NFTA_EXTHDR_FLAGS = 5, + NFTA_EXTHDR_OP = 6, + NFTA_EXTHDR_SREG = 7, + __NFTA_EXTHDR_MAX = 8, +}; + +struct dccp_hdr { + __be16 dccph_sport; + __be16 dccph_dport; + __u8 dccph_doff; + __u8 dccph_ccval: 4; + __u8 dccph_cscov: 4; + __sum16 dccph_checksum; + __u8 dccph_reserved: 3; + __u8 dccph_type: 4; + __u8 dccph_x: 1; + __u8 dccph_seq2; + __be16 dccph_seq; +}; + +enum dccp_pkt_type { + DCCP_PKT_REQUEST = 0, + DCCP_PKT_RESPONSE = 1, + DCCP_PKT_DATA = 2, + DCCP_PKT_ACK = 3, + DCCP_PKT_DATAACK = 4, + DCCP_PKT_CLOSEREQ = 5, + DCCP_PKT_CLOSE = 6, + DCCP_PKT_RESET = 7, + DCCP_PKT_SYNC = 8, + DCCP_PKT_SYNCACK = 9, + DCCP_PKT_INVALID = 10, +}; + +enum { + DCCPO_PADDING = 0, + DCCPO_MANDATORY = 1, + DCCPO_MIN_RESERVED = 3, + DCCPO_MAX_RESERVED = 31, + DCCPO_CHANGE_L = 32, + DCCPO_CONFIRM_L = 33, + DCCPO_CHANGE_R = 34, + DCCPO_CONFIRM_R = 35, + DCCPO_NDP_COUNT = 37, + DCCPO_ACK_VECTOR_0 = 38, + DCCPO_ACK_VECTOR_1 = 39, + DCCPO_TIMESTAMP = 41, + DCCPO_TIMESTAMP_ECHO = 42, + DCCPO_ELAPSED_TIME = 43, + DCCPO_MAX = 45, + DCCPO_MIN_RX_CCID_SPECIFIC = 128, + DCCPO_MAX_RX_CCID_SPECIFIC = 191, + DCCPO_MIN_TX_CCID_SPECIFIC = 192, + DCCPO_MAX_TX_CCID_SPECIFIC = 255, +}; + +struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +}; + +struct nft_exthdr { + u8 type; + u8 offset; + u8 len; + u8 op; + u8 dreg; + u8 sreg; + u8 flags; +}; + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct compat_group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +struct ipcm_cookie { + struct sockcm_cookie sockc; + __be32 addr; + int oif; + struct ip_options_rcu *opt; + __u8 protocol; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + long: 32; +}; + +struct raw_hashinfo { + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct hlist_head ht[256]; +}; + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; +}; + +struct ip6fl_iter_state { + struct seq_net_private p; + struct pid_namespace *pid_ns; + int bucket; +}; + +enum { + MDBA_UNSPEC = 0, + MDBA_MDB = 1, + MDBA_ROUTER = 2, + __MDBA_MAX = 3, +}; + +enum { + MDBA_MDB_UNSPEC = 0, + MDBA_MDB_ENTRY = 1, + __MDBA_MDB_MAX = 2, +}; + +enum { + MDBA_MDB_ENTRY_UNSPEC = 0, + MDBA_MDB_ENTRY_INFO = 1, + __MDBA_MDB_ENTRY_MAX = 2, +}; + +enum { + MDBA_MDB_EATTR_UNSPEC = 0, + MDBA_MDB_EATTR_TIMER = 1, + MDBA_MDB_EATTR_SRC_LIST = 2, + MDBA_MDB_EATTR_GROUP_MODE = 3, + MDBA_MDB_EATTR_SOURCE = 4, + MDBA_MDB_EATTR_RTPROT = 5, + MDBA_MDB_EATTR_DST = 6, + MDBA_MDB_EATTR_DST_PORT = 7, + MDBA_MDB_EATTR_VNI = 8, + MDBA_MDB_EATTR_IFINDEX = 9, + MDBA_MDB_EATTR_SRC_VNI = 10, + __MDBA_MDB_EATTR_MAX = 11, +}; + +enum { + MDBA_MDB_SRCLIST_UNSPEC = 0, + MDBA_MDB_SRCLIST_ENTRY = 1, + __MDBA_MDB_SRCLIST_MAX = 2, +}; + +enum { + MDBA_MDB_SRCATTR_UNSPEC = 0, + MDBA_MDB_SRCATTR_ADDRESS = 1, + MDBA_MDB_SRCATTR_TIMER = 2, + __MDBA_MDB_SRCATTR_MAX = 3, +}; + +enum { + MDBA_ROUTER_UNSPEC = 0, + MDBA_ROUTER_PORT = 1, + __MDBA_ROUTER_MAX = 2, +}; + +enum { + MDBA_ROUTER_PATTR_UNSPEC = 0, + MDBA_ROUTER_PATTR_TIMER = 1, + MDBA_ROUTER_PATTR_TYPE = 2, + MDBA_ROUTER_PATTR_INET_TIMER = 3, + MDBA_ROUTER_PATTR_INET6_TIMER = 4, + MDBA_ROUTER_PATTR_VID = 5, + __MDBA_ROUTER_PATTR_MAX = 6, +}; + +enum { + MDBE_ATTR_UNSPEC = 0, + MDBE_ATTR_SOURCE = 1, + MDBE_ATTR_SRC_LIST = 2, + MDBE_ATTR_GROUP_MODE = 3, + MDBE_ATTR_RTPROT = 4, + MDBE_ATTR_DST = 5, + MDBE_ATTR_DST_PORT = 6, + MDBE_ATTR_VNI = 7, + MDBE_ATTR_IFINDEX = 8, + MDBE_ATTR_SRC_VNI = 9, + MDBE_ATTR_STATE_MASK = 10, + __MDBE_ATTR_MAX = 11, +}; + +enum { + MDBE_SRC_LIST_UNSPEC = 0, + MDBE_SRC_LIST_ENTRY = 1, + __MDBE_SRC_LIST_MAX = 2, +}; + +enum { + MDBE_SRCATTR_UNSPEC = 0, + MDBE_SRCATTR_ADDRESS = 1, + __MDBE_SRCATTR_MAX = 2, +}; + +struct br_mdb_src_entry { + struct br_ip addr; +}; + +struct br_mdb_config { + struct net_bridge *br; + struct net_bridge_port *p; + struct br_mdb_entry *entry; + struct br_ip group; + bool src_entry; + u8 filter_mode; + u16 nlflags; + struct br_mdb_src_entry *src_entries; + int num_src_entries; + u8 rt_protocol; +}; + +enum { + BR_VLFLAG_PER_PORT_STATS = 1, + BR_VLFLAG_ADDED_BY_SWITCHDEV = 2, + BR_VLFLAG_MCAST_ENABLED = 4, + BR_VLFLAG_GLOBAL_MCAST_ENABLED = 8, + BR_VLFLAG_NEIGH_SUPPRESS_ENABLED = 16, +}; + +struct net_bridge_mcast_gc { + struct hlist_node gc_node; + void (*destroy)(struct net_bridge_mcast_gc *); +}; + +struct net_bridge_port_group; + +struct net_bridge_group_src { + struct hlist_node node; + struct br_ip addr; + struct net_bridge_port_group *pg; + u8 flags; + u8 src_query_rexmit_cnt; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct net_bridge_port_group_sg_key { + struct net_bridge_port *port; + struct br_ip addr; +}; + +struct net_bridge_port_group { + struct net_bridge_port_group *next; + struct net_bridge_port_group_sg_key key; + unsigned char eth_addr[6]; + unsigned char flags; + unsigned char filter_mode; + unsigned char grp_query_rexmit_cnt; + unsigned char rt_protocol; + struct hlist_head src_list; + unsigned int src_ents; + struct timer_list timer; + struct timer_list rexmit_timer; + struct hlist_node mglist; + struct rb_root eht_set_tree; + struct rb_root eht_host_tree; + struct rhash_head rhnode; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct net_bridge_mdb_entry { + struct rhash_head rhnode; + struct net_bridge *br; + struct net_bridge_port_group *ports; + struct br_ip addr; + bool host_joined; + struct timer_list timer; + struct hlist_node mdb_node; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct br_mdb_flush_desc { + u32 port_ifindex; + u16 vid; + u8 rt_protocol; + u8 state; + u8 state_mask; +}; + +struct plat_smp_ops { + void (*send_ipi_single)(int, unsigned int); + void (*send_ipi_mask)(const struct cpumask *, unsigned int); + void (*init_secondary)(); + void (*smp_finish)(); + int (*boot_secondary)(int, struct task_struct *); + void (*smp_setup)(); + void (*prepare_cpus)(unsigned int); + void (*prepare_boot_cpu)(); + void (*kexec_nonboot_cpu)(); +}; + +struct mips_machine { + const struct of_device_id *matches; + const void *fdt; + bool (*detect)(); + const void * (*fixup_fdt)(const void *, const void *); + unsigned int (*measure_hpt_freq)(); +}; + +struct mips_fdt_fixup { + int (*apply)(void *); + const char *description; +}; + +enum proc_cn_event { + PROC_EVENT_NONE = 0, + PROC_EVENT_FORK = 1, + PROC_EVENT_EXEC = 2, + PROC_EVENT_UID = 4, + PROC_EVENT_GID = 64, + PROC_EVENT_SID = 128, + PROC_EVENT_PTRACE = 256, + PROC_EVENT_COMM = 512, + PROC_EVENT_NONZERO_EXIT = 536870912, + PROC_EVENT_COREDUMP = 1073741824, + PROC_EVENT_EXIT = 2147483648, +}; + +typedef int suspend_state_t; + +enum { + GP_IDLE = 0, + GP_ENTER = 1, + GP_PASSED = 2, + GP_EXIT = 3, + GP_REPLAY = 4, +}; + +typedef long unsigned int kimage_entry_t; + +struct kexec_segment { + union { + void *buf; + void *kbuf; + }; + size_t bufsz; + long unsigned int mem; + size_t memsz; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + long unsigned int start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + long unsigned int nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + long unsigned int control_page; + unsigned int type: 1; + unsigned int preserve_context: 1; + unsigned int file_mode: 1; + void *elf_headers; + long unsigned int elf_headers_sz; + long unsigned int elf_load_addr; +}; + +struct kexec_load_limit { + struct mutex mutex; + int limit; +}; + +enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, + BPF_TOKEN_CREATE = 36, + __MAX_BPF_CMD = 37, +}; + +enum bpf_perf_event_type { + BPF_PERF_EVENT_UNSPEC = 0, + BPF_PERF_EVENT_UPROBE = 1, + BPF_PERF_EVENT_URETPROBE = 2, + BPF_PERF_EVENT_KPROBE = 3, + BPF_PERF_EVENT_KRETPROBE = 4, + BPF_PERF_EVENT_TRACEPOINT = 5, + BPF_PERF_EVENT_EVENT = 6, +}; + +enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +}; + +struct bpf_btf_info { + __u64 btf; + __u32 btf_size; + __u32 id; + __u64 name; + __u32 name_len; + __u32 kernel_btf; +}; + +struct bpf_spin_lock { + __u32 val; +}; + +enum fixed_addresses { + FIX_CMAP_BEGIN = 0, + FIX_CMAP_END = 16, + __end_of_fixed_addresses = 17, +}; + +struct bpf_tramp_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + struct bpf_run_ctx *saved_run_ctx; + long: 32; +}; + +struct bpf_tracing_link { + struct bpf_tramp_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; + long: 32; +}; + +struct bpf_mprog_fp { + struct bpf_prog *prog; +}; + +struct bpf_mprog_bundle; + +struct bpf_mprog_entry { + struct bpf_mprog_fp fp_items[64]; + struct bpf_mprog_bundle *parent; +}; + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX = 3, +}; + +struct bpf_mprog_cp { + struct bpf_link *link; +}; + +struct bpf_mprog_bundle { + struct bpf_mprog_entry a; + struct bpf_mprog_entry b; + struct bpf_mprog_cp cp_items[64]; + struct bpf_prog *ref; + long: 32; + atomic64_t revision; + u32 count; + long: 32; +}; + +enum bpf_audit { + BPF_AUDIT_LOAD = 0, + BPF_AUDIT_UNLOAD = 1, + BPF_AUDIT_MAX = 2, +}; + +struct bpf_prog_kstats { + u64 nsecs; + u64 cnt; + u64 misses; +}; + +struct bpf_perf_link { + struct bpf_link link; + struct file *perf_file; + long: 32; +}; + +typedef u64 (*btf_bpf_sys_bpf)(int, union bpf_attr *, u32); + +typedef u64 (*btf_bpf_sys_close)(u32); + +typedef u64 (*btf_bpf_kallsyms_lookup_name)(const char *, int, int, u64 *); + +struct audit_context; + +struct audit_buffer; + +enum { + XA_CHECK_SCHED = 4096, +}; + +struct wb_lock_cookie { + bool locked; + long unsigned int flags; +}; + +struct dirty_throttle_control { + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + long unsigned int avail; + long unsigned int dirty; + long unsigned int thresh; + long unsigned int bg_thresh; + long unsigned int wb_dirty; + long unsigned int wb_thresh; + long unsigned int wb_bg_thresh; + long unsigned int pos_ratio; + bool freerun; + bool dirty_exceeded; +}; + +struct vmap_area { + long unsigned int va_start; + long unsigned int va_end; + struct rb_node rb_node; + struct list_head list; + union { + long unsigned int subtree_max_size; + struct vm_struct *vm; + }; + long unsigned int flags; +}; + +typedef unsigned int kasan_vmalloc_flags_t; + +enum memcg_stat_item { + MEMCG_SWAP = 45, + MEMCG_SOCK = 46, + MEMCG_PERCPU_B = 47, + MEMCG_VMALLOC = 48, + MEMCG_KMEM = 49, + MEMCG_ZSWAP_B = 50, + MEMCG_ZSWAPPED = 51, + MEMCG_NR_STAT = 52, +}; + +struct trace_event_raw_alloc_vmap_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int size; + long unsigned int align; + long unsigned int vstart; + long unsigned int vend; + int failed; + char __data[0]; +}; + +struct trace_event_raw_purge_vmap_area_lazy { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + unsigned int npurged; + char __data[0]; +}; + +struct trace_event_raw_free_vmap_area_noflush { + struct trace_entry ent; + long unsigned int va_start; + long unsigned int nr_lazy; + long unsigned int nr_lazy_max; + char __data[0]; +}; + +struct trace_event_data_offsets_alloc_vmap_area {}; + +struct trace_event_data_offsets_purge_vmap_area_lazy {}; + +struct trace_event_data_offsets_free_vmap_area_noflush {}; + +typedef void (*btf_trace_alloc_vmap_area)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_purge_vmap_area_lazy)(void *, long unsigned int, long unsigned int, unsigned int); + +typedef void (*btf_trace_free_vmap_area_noflush)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; + +struct rb_list { + struct rb_root root; + struct list_head head; + spinlock_t lock; +}; + +struct vmap_pool { + struct list_head head; + long unsigned int len; +}; + +struct vmap_node { + struct vmap_pool pool[256]; + spinlock_t pool_lock; + bool skip_populate; + struct rb_list busy; + struct rb_list lazy; + struct list_head purge_list; + struct work_struct purge_work; + long unsigned int nr_purged; +}; + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, + LE_FIT_TYPE = 2, + RE_FIT_TYPE = 3, + NE_FIT_TYPE = 4, +}; + +struct vmap_block_queue { + spinlock_t lock; + struct list_head free; + struct xarray vmap_blocks; +}; + +struct vmap_block { + spinlock_t lock; + struct vmap_area *va; + long unsigned int free; + long unsigned int dirty; + long unsigned int used_map[2]; + long unsigned int dirty_min; + long unsigned int dirty_max; + struct list_head free_list; + struct callback_head callback_head; + struct list_head purge; + unsigned int cpu; +}; + +typedef bool (*smp_cond_func_t)(int, void *); + +enum hash_algo { + HASH_ALGO_MD4 = 0, + HASH_ALGO_MD5 = 1, + HASH_ALGO_SHA1 = 2, + HASH_ALGO_RIPE_MD_160 = 3, + HASH_ALGO_SHA256 = 4, + HASH_ALGO_SHA384 = 5, + HASH_ALGO_SHA512 = 6, + HASH_ALGO_SHA224 = 7, + HASH_ALGO_RIPE_MD_128 = 8, + HASH_ALGO_RIPE_MD_256 = 9, + HASH_ALGO_RIPE_MD_320 = 10, + HASH_ALGO_WP_256 = 11, + HASH_ALGO_WP_384 = 12, + HASH_ALGO_WP_512 = 13, + HASH_ALGO_TGR_128 = 14, + HASH_ALGO_TGR_160 = 15, + HASH_ALGO_TGR_192 = 16, + HASH_ALGO_SM3_256 = 17, + HASH_ALGO_STREEBOG_256 = 18, + HASH_ALGO_STREEBOG_512 = 19, + HASH_ALGO_SHA3_256 = 20, + HASH_ALGO_SHA3_384 = 21, + HASH_ALGO_SHA3_512 = 22, + HASH_ALGO__LAST = 23, +}; + +struct postprocess_bh_ctx { + struct work_struct work; + struct buffer_head *bh; +}; + +struct bh_lru { + struct buffer_head *bhs[16]; +}; + +struct bh_accounting { + int nr; + int ratelimit; +}; + +struct fsverity_info; + +struct fscrypt_inode_info; + +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[8]; + __be64 h_commit_sec; + __be32 h_commit_nsec; + long: 32; +}; + +struct journal_block_tag3_s { + __be32 t_blocknr; + __be32 t_flags; + __be32 t_blocknr_high; + __be32 t_checksum; +}; + +typedef struct journal_block_tag3_s journal_block_tag3_t; + +struct journal_block_tag_s { + __be32 t_blocknr; + __be16 t_checksum; + __be16 t_flags; + __be32 t_blocknr_high; +}; + +typedef struct journal_block_tag_s journal_block_tag_t; + +struct jbd2_journal_block_tail { + __be32 t_checksum; +}; + +struct jbd2_journal_revoke_header_s { + journal_header_t r_header; + __be32 r_count; +}; + +typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; + +struct recovery_info { + tid_t start_transaction; + tid_t end_transaction; + long unsigned int head_block; + int nr_replays; + int nr_revokes; + int nr_revoke_hits; +}; + +struct autofs_packet_missing { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +struct autofs_packet_expire_multi { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +union autofs_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_packet_missing missing; + struct autofs_packet_expire expire; + struct autofs_packet_expire_multi expire_multi; +}; + +struct autofs_v5_packet { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + __u32 dev; + __u64 ino; + __u32 uid; + __u32 gid; + __u32 pid; + __u32 tgid; + __u32 len; + char name[256]; + long: 32; +}; + +typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_missing_direct_t; + +typedef struct autofs_v5_packet autofs_packet_expire_direct_t; + +union autofs_v5_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_v5_packet v5_packet; + autofs_packet_missing_indirect_t missing_indirect; + autofs_packet_expire_indirect_t expire_indirect; + autofs_packet_missing_direct_t missing_direct; + autofs_packet_expire_direct_t expire_direct; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +struct fileattr { + u32 flags; + u32 fsx_xflags; + u32 fsx_extsize; + u32 fsx_nextents; + u32 fsx_projid; + u32 fsx_cowextsize; + bool flags_valid: 1; + bool fsx_valid: 1; +}; + +struct btrfs_ioctl_qgroup_limit_args { + __u64 qgroupid; + struct btrfs_qgroup_limit lim; +}; + +struct btrfs_ioctl_vol_args_v2 { + __s64 fd; + __u64 transid; + __u64 flags; + union { + struct { + __u64 size; + struct btrfs_qgroup_inherit *qgroup_inherit; + long: 32; + }; + __u64 unused[4]; + }; + union { + char name[4040]; + __u64 devid; + __u64 subvolid; + }; +}; + +struct btrfs_ioctl_scrub_args { + __u64 devid; + __u64 start; + __u64 end; + __u64 flags; + struct btrfs_scrub_progress progress; + __u64 unused[109]; +}; + +struct btrfs_ioctl_dev_replace_start_params { + __u64 srcdevid; + __u64 cont_reading_from_srcdev_mode; + __u8 srcdev_name[1025]; + __u8 tgtdev_name[1025]; + long: 32; +}; + +struct btrfs_ioctl_dev_replace_status_params { + __u64 replace_state; + __u64 progress_1000; + __u64 time_started; + __u64 time_stopped; + __u64 num_write_errors; + __u64 num_uncorrectable_read_errors; +}; + +struct btrfs_ioctl_dev_replace_args { + __u64 cmd; + __u64 result; + union { + struct btrfs_ioctl_dev_replace_start_params start; + struct btrfs_ioctl_dev_replace_status_params status; + }; + __u64 spare[64]; +}; + +struct btrfs_ioctl_dev_info_args { + __u64 devid; + __u8 uuid[16]; + __u64 bytes_used; + __u64 total_bytes; + __u8 fsid[16]; + __u64 unused[377]; + __u8 path[1024]; +}; + +struct btrfs_ioctl_fs_info_args { + __u64 max_id; + __u64 num_devices; + __u8 fsid[16]; + __u32 nodesize; + __u32 sectorsize; + __u32 clone_alignment; + __u16 csum_type; + __u16 csum_size; + __u64 flags; + __u64 generation; + __u8 metadata_uuid[16]; + __u8 reserved[944]; +}; + +struct btrfs_ioctl_feature_flags { + __u64 compat_flags; + __u64 compat_ro_flags; + __u64 incompat_flags; +}; + +struct btrfs_ioctl_balance_args { + __u64 flags; + __u64 state; + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + struct btrfs_balance_progress stat; + __u64 unused[72]; +}; + +struct btrfs_ioctl_ino_lookup_args { + __u64 treeid; + __u64 objectid; + char name[4080]; +}; + +struct btrfs_ioctl_ino_lookup_user_args { + __u64 dirid; + __u64 treeid; + char name[256]; + char path[3824]; +}; + +struct btrfs_ioctl_search_key { + __u64 tree_id; + __u64 min_objectid; + __u64 max_objectid; + __u64 min_offset; + __u64 max_offset; + __u64 min_transid; + __u64 max_transid; + __u32 min_type; + __u32 max_type; + __u32 nr_items; + __u32 unused; + __u64 unused1; + __u64 unused2; + __u64 unused3; + __u64 unused4; +}; + +struct btrfs_ioctl_search_header { + __u64 transid; + __u64 objectid; + __u64 offset; + __u32 type; + __u32 len; +}; + +struct btrfs_ioctl_search_args { + struct btrfs_ioctl_search_key key; + char buf[3992]; +}; + +struct btrfs_ioctl_search_args_v2 { + struct btrfs_ioctl_search_key key; + __u64 buf_size; + __u64 buf[0]; +}; + +struct btrfs_ioctl_space_info { + __u64 flags; + __u64 total_bytes; + __u64 used_bytes; +}; + +struct btrfs_ioctl_space_args { + __u64 space_slots; + __u64 total_spaces; + struct btrfs_ioctl_space_info spaces[0]; +}; + +struct btrfs_ioctl_ino_path_args { + __u64 inum; + __u64 size; + __u64 reserved[4]; + __u64 fspath; +}; + +struct btrfs_ioctl_logical_ino_args { + __u64 logical; + __u64 size; + __u64 reserved[3]; + __u64 flags; + __u64 inodes; +}; + +struct btrfs_ioctl_get_dev_stats { + __u64 devid; + __u64 nr_items; + __u64 flags; + __u64 values[5]; + __u64 unused[121]; +}; + +struct btrfs_ioctl_quota_ctl_args { + __u64 cmd; + __u64 status; +}; + +struct btrfs_ioctl_quota_rescan_args { + __u64 flags; + __u64 progress; + __u64 reserved[6]; +}; + +struct btrfs_ioctl_qgroup_assign_args { + __u64 assign; + __u64 src; + __u64 dst; +}; + +struct btrfs_ioctl_qgroup_create_args { + __u64 create; + __u64 qgroupid; +}; + +struct btrfs_ioctl_timespec { + __u64 sec; + __u32 nsec; + long: 32; +}; + +struct btrfs_ioctl_received_subvol_args { + char uuid[16]; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 flags; + __u64 reserved[16]; +}; + +struct btrfs_ioctl_send_args { + __s64 send_fd; + __u64 clone_sources_count; + __u64 *clone_sources; + long: 32; + __u64 parent_root; + __u64 flags; + __u32 version; + __u8 reserved[28]; +}; + +struct btrfs_ioctl_get_subvol_info_args { + __u64 treeid; + char name[256]; + __u64 parent_id; + __u64 dirid; + __u64 generation; + __u64 flags; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __u64 ctransid; + __u64 otransid; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec ctime; + struct btrfs_ioctl_timespec otime; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 reserved[8]; +}; + +struct btrfs_ioctl_get_subvol_rootref_args { + __u64 min_treeid; + struct { + __u64 treeid; + __u64 dirid; + } rootref[255]; + __u8 num_items; + __u8 align[7]; +}; + +struct btrfs_ioctl_subvol_wait { + __u64 subvolid; + __u32 mode; + __u32 count; +}; + +enum btrfs_err_code { + BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, + BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET = 2, + BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET = 3, + BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET = 4, + BTRFS_ERROR_DEV_TGT_REPLACE = 5, + BTRFS_ERROR_DEV_MISSING_NOT_FOUND = 6, + BTRFS_ERROR_DEV_ONLY_WRITABLE = 7, + BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS = 8, + BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET = 9, + BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET = 10, +}; + +struct btrfs_new_inode_args { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool orphan; + bool subvol; + struct posix_acl *default_acl; + struct posix_acl *acl; + struct fscrypt_name fname; +}; + +struct btrfs_dev_lookup_args { + u64 devid; + u8 *uuid; + u8 *fsid; + bool missing; + long: 32; +}; + +enum btrfs_feature_set { + FEAT_COMPAT = 0, + FEAT_COMPAT_RO = 1, + FEAT_INCOMPAT = 2, + FEAT_MAX = 3, +}; + +struct btrfs_qgroup_list { + struct list_head next_group; + struct list_head next_member; + struct btrfs_qgroup *group; + struct btrfs_qgroup *member; +}; + +struct btrfs_uring_priv { + struct io_uring_cmd *cmd; + struct page **pages; + long unsigned int nr_pages; + long: 32; + struct kiocb iocb; + struct iovec *iov; + long: 32; + struct iov_iter iter; + struct extent_state *cached_state; + long: 32; + u64 count; + u64 start; + u64 lockend; + int err; + bool compressed; +}; + +struct io_btrfs_cmd { + struct btrfs_uring_priv *priv; +}; + +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kuid_t rootid; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_acomp { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct comp_alg_common { + struct crypto_alg base; +}; + +struct crypto_scomp { + struct crypto_tfm base; +}; + +struct scomp_alg { + void * (*alloc_ctx)(struct crypto_scomp *); + void (*free_ctx)(struct crypto_scomp *, void *); + int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct scomp_scratch { + spinlock_t lock; + void *src; + void *dst; +}; + +enum x509_actions { + ACT_x509_extract_key_data = 0, + ACT_x509_extract_name_segment = 1, + ACT_x509_note_OID = 2, + ACT_x509_note_issuer = 3, + ACT_x509_note_not_after = 4, + ACT_x509_note_not_before = 5, + ACT_x509_note_params = 6, + ACT_x509_note_serial = 7, + ACT_x509_note_sig_algo = 8, + ACT_x509_note_signature = 9, + ACT_x509_note_subject = 10, + ACT_x509_note_tbs_certificate = 11, + ACT_x509_process_extension = 12, + NR__x509_actions = 13, +}; + +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + int (*parse)(struct key_preparsed_payload *); +}; + +struct asymmetric_key_ids { + void *id[3]; +}; + +enum blacklist_hash_type { + BLACKLIST_HASH_X509_TBS = 1, + BLACKLIST_HASH_BINARY = 2, +}; + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; + struct public_key *pub; + struct public_key_signature *sig; + char *issuer; + char *subject; + struct asymmetric_key_id *id; + struct asymmetric_key_id *skid; + time64_t valid_from; + time64_t valid_to; + const void *tbs; + unsigned int tbs_size; + unsigned int raw_sig_size; + const void *raw_sig; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_subject; + unsigned int raw_subject_size; + unsigned int raw_skid_size; + const void *raw_skid; + unsigned int index; + bool seen; + bool verified; + bool self_signed; + bool unsupported_sig; + bool blacklisted; + long: 32; +}; + +struct rq_qos_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wait *rqw; + acquire_inflight_cb_t *cb; + void *private_data; + bool got_token; +}; + +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; + +enum io_uring_sqe_flags_bit { + IOSQE_FIXED_FILE_BIT = 0, + IOSQE_IO_DRAIN_BIT = 1, + IOSQE_IO_LINK_BIT = 2, + IOSQE_IO_HARDLINK_BIT = 3, + IOSQE_ASYNC_BIT = 4, + IOSQE_BUFFER_SELECT_BIT = 5, + IOSQE_CQE_SKIP_SUCCESS_BIT = 6, +}; + +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 user_addr; +}; + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 user_addr; +}; + +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; + +enum io_uring_register_op { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + IORING_REGISTER_PBUF_RING = 22, + IORING_UNREGISTER_PBUF_RING = 23, + IORING_REGISTER_SYNC_CANCEL = 24, + IORING_REGISTER_FILE_ALLOC_RANGE = 25, + IORING_REGISTER_PBUF_STATUS = 26, + IORING_REGISTER_NAPI = 27, + IORING_UNREGISTER_NAPI = 28, + IORING_REGISTER_CLOCK = 29, + IORING_REGISTER_CLONE_BUFFERS = 30, + IORING_REGISTER_SEND_MSG_RING = 31, + IORING_REGISTER_RESIZE_RINGS = 33, + IORING_REGISTER_MEM_REGION = 34, + IORING_REGISTER_LAST = 35, + IORING_REGISTER_USE_REGISTERED_RING = 2147483648, +}; + +enum { + IORING_REG_WAIT_TS = 1, +}; + +struct io_uring_reg_wait { + struct __kernel_timespec ts; + __u32 min_wait_usec; + __u32 flags; + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad[3]; + __u64 pad2[2]; +}; + +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 min_wait_usec; + __u64 ts; +}; + +enum { + REQ_F_FIXED_FILE_BIT = 0, + REQ_F_IO_DRAIN_BIT = 1, + REQ_F_LINK_BIT = 2, + REQ_F_HARDLINK_BIT = 3, + REQ_F_FORCE_ASYNC_BIT = 4, + REQ_F_BUFFER_SELECT_BIT = 5, + REQ_F_CQE_SKIP_BIT = 6, + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT = 9, + REQ_F_CUR_POS_BIT = 10, + REQ_F_NOWAIT_BIT = 11, + REQ_F_LINK_TIMEOUT_BIT = 12, + REQ_F_NEED_CLEANUP_BIT = 13, + REQ_F_POLLED_BIT = 14, + REQ_F_HYBRID_IOPOLL_STATE_BIT = 15, + REQ_F_BUFFER_SELECTED_BIT = 16, + REQ_F_BUFFER_RING_BIT = 17, + REQ_F_REISSUE_BIT = 18, + REQ_F_CREDS_BIT = 19, + REQ_F_REFCOUNT_BIT = 20, + REQ_F_ARM_LTIMEOUT_BIT = 21, + REQ_F_ASYNC_DATA_BIT = 22, + REQ_F_SKIP_LINK_CQES_BIT = 23, + REQ_F_SINGLE_POLL_BIT = 24, + REQ_F_DOUBLE_POLL_BIT = 25, + REQ_F_APOLL_MULTISHOT_BIT = 26, + REQ_F_CLEAR_POLLIN_BIT = 27, + REQ_F_SUPPORT_NOWAIT_BIT = 28, + REQ_F_ISREG_BIT = 29, + REQ_F_POLL_NO_LAZY_BIT = 30, + REQ_F_CAN_POLL_BIT = 31, + REQ_F_BL_EMPTY_BIT = 32, + REQ_F_BL_NO_RECYCLE_BIT = 33, + REQ_F_BUFFERS_COMMIT_BIT = 34, + REQ_F_BUF_NODE_BIT = 35, + __REQ_F_LAST_BIT = 36, +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +struct trace_event_raw_io_uring_create { + struct trace_entry ent; + int fd; + void *ctx; + u32 sq_entries; + u32 cq_entries; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_register { + struct trace_entry ent; + void *ctx; + unsigned int opcode; + unsigned int nr_files; + unsigned int nr_bufs; + long int ret; + char __data[0]; +}; + +struct trace_event_raw_io_uring_file_get { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int fd; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_queue_async_work { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + u8 opcode; + long: 32; + long long unsigned int flags; + struct io_wq_work *work; + int rw; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_defer { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int data; + u8 opcode; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_link { + struct trace_entry ent; + void *ctx; + void *req; + void *target_req; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqring_wait { + struct trace_entry ent; + void *ctx; + int min_events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_fail_link { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + void *link; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_complete { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int res; + unsigned int cflags; + u64 extra1; + u64 extra2; + char __data[0]; +}; + +struct trace_event_raw_io_uring_submit_req { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + long: 32; + long long unsigned int flags; + bool sq_thread; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_arm { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + int events; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_add { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_req_failed { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + u8 flags; + u8 ioprio; + long: 32; + u64 off; + u64 addr; + u32 len; + u32 op_flags; + u16 buf_index; + u16 personality; + u32 file_index; + u64 pad1; + u64 addr3; + int error; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqe_overflow { + struct trace_entry ent; + void *ctx; + long: 32; + long long unsigned int user_data; + s32 res; + u32 cflags; + void *ocqe; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_task_work_run { + struct trace_entry ent; + void *tctx; + unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_io_uring_short_write { + struct trace_entry ent; + void *ctx; + long: 32; + u64 fpos; + u64 wanted; + u64 got; + char __data[0]; +}; + +struct trace_event_raw_io_uring_local_work_run { + struct trace_entry ent; + void *ctx; + int count; + unsigned int loops; + char __data[0]; +}; + +struct trace_event_data_offsets_io_uring_create {}; + +struct trace_event_data_offsets_io_uring_register {}; + +struct trace_event_data_offsets_io_uring_file_get {}; + +struct trace_event_data_offsets_io_uring_queue_async_work { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_defer { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_link {}; + +struct trace_event_data_offsets_io_uring_cqring_wait {}; + +struct trace_event_data_offsets_io_uring_fail_link { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_complete {}; + +struct trace_event_data_offsets_io_uring_submit_req { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_poll_arm { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_task_add { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_req_failed { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_cqe_overflow {}; + +struct trace_event_data_offsets_io_uring_task_work_run {}; + +struct trace_event_data_offsets_io_uring_short_write {}; + +struct trace_event_data_offsets_io_uring_local_work_run {}; + +typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); + +typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, long int); + +typedef void (*btf_trace_io_uring_file_get)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_queue_async_work)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_defer)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); + +typedef void (*btf_trace_io_uring_fail_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_complete)(void *, struct io_ring_ctx *, void *, struct io_uring_cqe *); + +typedef void (*btf_trace_io_uring_submit_req)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_poll_arm)(void *, struct io_kiocb *, int, int); + +typedef void (*btf_trace_io_uring_task_add)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_req_failed)(void *, const struct io_uring_sqe *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_cqe_overflow)(void *, void *, long long unsigned int, s32, u32, void *); + +typedef void (*btf_trace_io_uring_task_work_run)(void *, void *, unsigned int); + +typedef void (*btf_trace_io_uring_short_write)(void *, void *, u64, u64, u64); + +typedef void (*btf_trace_io_uring_local_work_run)(void *, void *, int, unsigned int); + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_CONCURRENT = 16, + IO_WQ_HASH_SHIFT = 24, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK = 0, + IO_WQ_CANCEL_RUNNING = 1, + IO_WQ_CANCEL_NOTFOUND = 2, +}; + +typedef bool work_cancel_fn(struct io_wq_work *, void *); + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned int cq_tail; + unsigned int cq_min_tail; + unsigned int nr_timeouts; + int hit_timeout; + ktime_t min_timeout; + ktime_t timeout; + struct hrtimer t; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; + long: 32; +}; + +enum { + IO_CHECK_CQ_OVERFLOW_BIT = 0, + IO_CHECK_CQ_DROPPED_BIT = 1, +}; + +enum { + IOBL_BUF_RING = 1, + IOBL_MMAP = 2, + IOBL_INC = 4, +}; + +enum { + IO_APOLL_OK = 0, + IO_APOLL_ABORTED = 1, + IO_APOLL_READY = 2, +}; + +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; +}; + +struct ext_arg { + size_t argsz; + long: 32; + struct timespec64 ts; + const sigset_t *sig; + long: 32; + ktime_t min_time; + bool ts_set; + long: 32; +}; + +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; + +struct io_task_cancel { + struct io_uring_task *tctx; + bool all; +}; + +struct creds; + +enum pci_bar_type { + pci_bar_unknown = 0, + pci_bar_io = 1, + pci_bar_mem32 = 2, + pci_bar_mem64 = 3, +}; + +struct pci_domain_busn_res { + struct list_head list; + struct resource res; + int domain_nr; +}; + +typedef void (*of_init_fn_1)(struct device_node *); + +struct clk_fixed_rate { + struct clk_hw hw; + long unsigned int fixed_rate; + long unsigned int fixed_accuracy; + long unsigned int flags; +}; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *); + long: 32; +}; + +struct eeprom_93cx6 { + void *data; + void (*register_read)(struct eeprom_93cx6 *); + void (*register_write)(struct eeprom_93cx6 *); + int width; + unsigned int quirks; + char drive_data; + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +struct scsi_lun { + __u8 scsi_lun[8]; +}; + +enum scsi_pr_type { + SCSI_PR_WRITE_EXCLUSIVE = 1, + SCSI_PR_EXCLUSIVE_ACCESS = 3, + SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 5, + SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 6, + SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 7, + SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 8, +}; + +enum { + ATA_MAX_DEVICES = 2, + ATA_MAX_PRD = 256, + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_LBA48 = 65535, + ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_TRUSTED = 48, + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_SATA_CAPABILITY = 76, + ATA_ID_SATA_CAPABILITY_2 = 77, + ATA_ID_FEATURE_SUPP = 78, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, + ATA_ID_COMMAND_SET_3 = 119, + ATA_ID_COMMAND_SET_4 = 120, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = 2, + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + ATA_PCI_CTL_OFS = 2, + ATA_PIO0 = 1, + ATA_PIO1 = 3, + ATA_PIO2 = 7, + ATA_PIO3 = 15, + ATA_PIO4 = 31, + ATA_PIO5 = 63, + ATA_PIO6 = 127, + ATA_PIO4_ONLY = 16, + ATA_SWDMA0 = 1, + ATA_SWDMA1 = 3, + ATA_SWDMA2 = 7, + ATA_SWDMA2_ONLY = 4, + ATA_MWDMA0 = 1, + ATA_MWDMA1 = 3, + ATA_MWDMA2 = 7, + ATA_MWDMA3 = 15, + ATA_MWDMA4 = 31, + ATA_MWDMA12_ONLY = 6, + ATA_MWDMA2_ONLY = 4, + ATA_UDMA0 = 1, + ATA_UDMA1 = 3, + ATA_UDMA2 = 7, + ATA_UDMA3 = 15, + ATA_UDMA4 = 31, + ATA_UDMA5 = 63, + ATA_UDMA6 = 127, + ATA_UDMA7 = 255, + ATA_UDMA24_ONLY = 20, + ATA_UDMA_MASK_40C = 7, + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = 2048, + ATA_PRD_EOT = -2147483648, + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = 8, + ATA_DMA_START = 1, + ATA_DMA_INTR = 4, + ATA_DMA_ERR = 2, + ATA_DMA_ACTIVE = 1, + ATA_HOB = 128, + ATA_NIEN = 2, + ATA_LBA = 64, + ATA_DEV1 = 16, + ATA_DEVICE_OBS = 160, + ATA_DEVCTL_OBS = 8, + ATA_BUSY = 128, + ATA_DRDY = 64, + ATA_DF = 32, + ATA_DSC = 16, + ATA_DRQ = 8, + ATA_CORR = 4, + ATA_SENSE = 2, + ATA_ERR = 1, + ATA_SRST = 4, + ATA_ICRC = 128, + ATA_BBK = 128, + ATA_UNC = 64, + ATA_MC = 32, + ATA_IDNF = 16, + ATA_MCR = 8, + ATA_ABORTED = 4, + ATA_TRK0NF = 2, + ATA_AMNF = 1, + ATAPI_LFS = 240, + ATAPI_EOM = 2, + ATAPI_ILI = 1, + ATAPI_IO = 2, + ATAPI_COD = 1, + ATA_REG_DATA = 0, + ATA_REG_ERR = 1, + ATA_REG_NSECT = 2, + ATA_REG_LBAL = 3, + ATA_REG_LBAM = 4, + ATA_REG_LBAH = 5, + ATA_REG_DEVICE = 6, + ATA_REG_STATUS = 7, + ATA_REG_FEATURE = 1, + ATA_REG_CMD = 7, + ATA_REG_BYTEL = 4, + ATA_REG_BYTEH = 5, + ATA_REG_DEVSEL = 6, + ATA_REG_IRQ = 2, + ATA_CMD_DEV_RESET = 8, + ATA_CMD_CHK_POWER = 229, + ATA_CMD_STANDBY = 226, + ATA_CMD_IDLE = 227, + ATA_CMD_EDD = 144, + ATA_CMD_DOWNLOAD_MICRO = 146, + ATA_CMD_DOWNLOAD_MICRO_DMA = 147, + ATA_CMD_NOP = 0, + ATA_CMD_FLUSH = 231, + ATA_CMD_FLUSH_EXT = 234, + ATA_CMD_ID_ATA = 236, + ATA_CMD_ID_ATAPI = 161, + ATA_CMD_SERVICE = 162, + ATA_CMD_READ = 200, + ATA_CMD_READ_EXT = 37, + ATA_CMD_READ_QUEUED = 38, + ATA_CMD_READ_STREAM_EXT = 43, + ATA_CMD_READ_STREAM_DMA_EXT = 42, + ATA_CMD_WRITE = 202, + ATA_CMD_WRITE_EXT = 53, + ATA_CMD_WRITE_QUEUED = 54, + ATA_CMD_WRITE_STREAM_EXT = 59, + ATA_CMD_WRITE_STREAM_DMA_EXT = 58, + ATA_CMD_WRITE_FUA_EXT = 61, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 62, + ATA_CMD_FPDMA_READ = 96, + ATA_CMD_FPDMA_WRITE = 97, + ATA_CMD_NCQ_NON_DATA = 99, + ATA_CMD_FPDMA_SEND = 100, + ATA_CMD_FPDMA_RECV = 101, + ATA_CMD_PIO_READ = 32, + ATA_CMD_PIO_READ_EXT = 36, + ATA_CMD_PIO_WRITE = 48, + ATA_CMD_PIO_WRITE_EXT = 52, + ATA_CMD_READ_MULTI = 196, + ATA_CMD_READ_MULTI_EXT = 41, + ATA_CMD_WRITE_MULTI = 197, + ATA_CMD_WRITE_MULTI_EXT = 57, + ATA_CMD_WRITE_MULTI_FUA_EXT = 206, + ATA_CMD_SET_FEATURES = 239, + ATA_CMD_SET_MULTI = 198, + ATA_CMD_PACKET = 160, + ATA_CMD_VERIFY = 64, + ATA_CMD_VERIFY_EXT = 66, + ATA_CMD_WRITE_UNCORR_EXT = 69, + ATA_CMD_STANDBYNOW1 = 224, + ATA_CMD_IDLEIMMEDIATE = 225, + ATA_CMD_SLEEP = 230, + ATA_CMD_INIT_DEV_PARAMS = 145, + ATA_CMD_READ_NATIVE_MAX = 248, + ATA_CMD_READ_NATIVE_MAX_EXT = 39, + ATA_CMD_SET_MAX = 249, + ATA_CMD_SET_MAX_EXT = 55, + ATA_CMD_READ_LOG_EXT = 47, + ATA_CMD_WRITE_LOG_EXT = 63, + ATA_CMD_READ_LOG_DMA_EXT = 71, + ATA_CMD_WRITE_LOG_DMA_EXT = 87, + ATA_CMD_TRUSTED_NONDATA = 91, + ATA_CMD_TRUSTED_RCV = 92, + ATA_CMD_TRUSTED_RCV_DMA = 93, + ATA_CMD_TRUSTED_SND = 94, + ATA_CMD_TRUSTED_SND_DMA = 95, + ATA_CMD_PMP_READ = 228, + ATA_CMD_PMP_READ_DMA = 233, + ATA_CMD_PMP_WRITE = 232, + ATA_CMD_PMP_WRITE_DMA = 235, + ATA_CMD_CONF_OVERLAY = 177, + ATA_CMD_SEC_SET_PASS = 241, + ATA_CMD_SEC_UNLOCK = 242, + ATA_CMD_SEC_ERASE_PREP = 243, + ATA_CMD_SEC_ERASE_UNIT = 244, + ATA_CMD_SEC_FREEZE_LOCK = 245, + ATA_CMD_SEC_DISABLE_PASS = 246, + ATA_CMD_CONFIG_STREAM = 81, + ATA_CMD_SMART = 176, + ATA_CMD_MEDIA_LOCK = 222, + ATA_CMD_MEDIA_UNLOCK = 223, + ATA_CMD_DSM = 6, + ATA_CMD_CHK_MED_CRD_TYP = 209, + ATA_CMD_CFA_REQ_EXT_ERR = 3, + ATA_CMD_CFA_WRITE_NE = 56, + ATA_CMD_CFA_TRANS_SECT = 135, + ATA_CMD_CFA_ERASE = 192, + ATA_CMD_CFA_WRITE_MULT_NE = 205, + ATA_CMD_REQ_SENSE_DATA = 11, + ATA_CMD_SANITIZE_DEVICE = 180, + ATA_CMD_ZAC_MGMT_IN = 74, + ATA_CMD_ZAC_MGMT_OUT = 159, + ATA_CMD_RESTORE = 16, + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 1, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 2, + ATA_SUBCMD_FPDMA_SEND_DSM = 0, + ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 2, + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 5, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 6, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 7, + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0, + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 1, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 2, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 3, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 4, + ATA_LOG_DIRECTORY = 0, + ATA_LOG_SATA_NCQ = 16, + ATA_LOG_NCQ_NON_DATA = 18, + ATA_LOG_NCQ_SEND_RECV = 19, + ATA_LOG_CDL = 24, + ATA_LOG_CDL_SIZE = 512, + ATA_LOG_IDENTIFY_DEVICE = 48, + ATA_LOG_SENSE_NCQ = 15, + ATA_LOG_SENSE_NCQ_SIZE = 1024, + ATA_LOG_CONCURRENT_POSITIONING_RANGES = 71, + ATA_LOG_SUPPORTED_CAPABILITIES = 3, + ATA_LOG_CURRENT_SETTINGS = 4, + ATA_LOG_SECURITY = 6, + ATA_LOG_SATA_SETTINGS = 8, + ATA_LOG_ZONED_INFORMATION = 9, + ATA_LOG_DEVSLP_OFFSET = 48, + ATA_LOG_DEVSLP_SIZE = 8, + ATA_LOG_DEVSLP_MDAT = 0, + ATA_LOG_DEVSLP_MDAT_MASK = 31, + ATA_LOG_DEVSLP_DETO = 1, + ATA_LOG_DEVSLP_VALID = 7, + ATA_LOG_DEVSLP_VALID_MASK = 128, + ATA_LOG_NCQ_PRIO_OFFSET = 9, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = 1, + ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 4, + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = 1, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 8, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 12, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 16, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = 2, + ATA_LOG_NCQ_SEND_RECV_SIZE = 20, + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = 1, + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = 2, + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = 4, + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = 8, + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = 16, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 28, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = 1, + ATA_LOG_NCQ_NON_DATA_SIZE = 64, + ATA_CMD_READ_LONG = 34, + ATA_CMD_READ_LONG_ONCE = 35, + ATA_CMD_WRITE_LONG = 50, + ATA_CMD_WRITE_LONG_ONCE = 51, + SETFEATURES_XFER = 3, + XFER_UDMA_7 = 71, + XFER_UDMA_6 = 70, + XFER_UDMA_5 = 69, + XFER_UDMA_4 = 68, + XFER_UDMA_3 = 67, + XFER_UDMA_2 = 66, + XFER_UDMA_1 = 65, + XFER_UDMA_0 = 64, + XFER_MW_DMA_4 = 36, + XFER_MW_DMA_3 = 35, + XFER_MW_DMA_2 = 34, + XFER_MW_DMA_1 = 33, + XFER_MW_DMA_0 = 32, + XFER_SW_DMA_2 = 18, + XFER_SW_DMA_1 = 17, + XFER_SW_DMA_0 = 16, + XFER_PIO_6 = 14, + XFER_PIO_5 = 13, + XFER_PIO_4 = 12, + XFER_PIO_3 = 11, + XFER_PIO_2 = 10, + XFER_PIO_1 = 9, + XFER_PIO_0 = 8, + XFER_PIO_SLOW = 0, + SETFEATURES_WC_ON = 2, + SETFEATURES_WC_OFF = 130, + SETFEATURES_RA_ON = 170, + SETFEATURES_RA_OFF = 85, + SETFEATURES_AAM_ON = 66, + SETFEATURES_AAM_OFF = 194, + SETFEATURES_SPINUP = 7, + SETFEATURES_SPINUP_TIMEOUT = 30000, + SETFEATURES_SATA_ENABLE = 16, + SETFEATURES_SATA_DISABLE = 144, + SETFEATURES_CDL = 13, + SATA_FPDMA_OFFSET = 1, + SATA_FPDMA_AA = 2, + SATA_DIPM = 3, + SATA_FPDMA_IN_ORDER = 4, + SATA_AN = 5, + SATA_SSP = 6, + SATA_DEVSLP = 9, + SETFEATURE_SENSE_DATA = 195, + SETFEATURE_SENSE_DATA_SUCC_NCQ = 196, + ATA_SET_MAX_ADDR = 0, + ATA_SET_MAX_PASSWD = 1, + ATA_SET_MAX_LOCK = 2, + ATA_SET_MAX_UNLOCK = 3, + ATA_SET_MAX_FREEZE_LOCK = 4, + ATA_SET_MAX_PASSWD_DMA = 5, + ATA_SET_MAX_UNLOCK_DMA = 6, + ATA_DCO_RESTORE = 192, + ATA_DCO_FREEZE_LOCK = 193, + ATA_DCO_IDENTIFY = 194, + ATA_DCO_SET = 195, + ATA_SMART_ENABLE = 216, + ATA_SMART_READ_VALUES = 208, + ATA_SMART_READ_THRESHOLDS = 209, + ATA_DSM_TRIM = 1, + ATA_SMART_LBAM_PASS = 79, + ATA_SMART_LBAH_PASS = 194, + ATAPI_PKT_DMA = 1, + ATAPI_DMADIR = 4, + ATAPI_CDB_LEN = 16, + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + SATA_PMP_FEAT_BIST = 1, + SATA_PMP_FEAT_PMREQ = 2, + SATA_PMP_FEAT_DYNSSC = 4, + SATA_PMP_FEAT_NOTIFY = 8, + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, + ATA_CBL_PATA_UNK = 4, + ATA_CBL_PATA_IGN = 5, + ATA_CBL_SATA = 6, + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + SERR_DATA_RECOVERED = 1, + SERR_COMM_RECOVERED = 2, + SERR_DATA = 256, + SERR_PERSISTENT = 512, + SERR_PROTOCOL = 1024, + SERR_INTERNAL = 2048, + SERR_PHYRDY_CHG = 65536, + SERR_PHY_INT_ERR = 131072, + SERR_COMM_WAKE = 262144, + SERR_10B_8B_ERR = 524288, + SERR_DISPARITY = 1048576, + SERR_CRC = 2097152, + SERR_HANDSHAKE = 4194304, + SERR_LINK_SEQ_ERR = 8388608, + SERR_TRANS_ST_ERROR = 16777216, + SERR_UNRECOG_FIS = 33554432, + SERR_DEV_XCHG = 67108864, +}; + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +enum ata_quirks { + __ATA_QUIRK_DIAGNOSTIC = 0, + __ATA_QUIRK_NODMA = 1, + __ATA_QUIRK_NONCQ = 2, + __ATA_QUIRK_MAX_SEC_128 = 3, + __ATA_QUIRK_BROKEN_HPA = 4, + __ATA_QUIRK_DISABLE = 5, + __ATA_QUIRK_HPA_SIZE = 6, + __ATA_QUIRK_IVB = 7, + __ATA_QUIRK_STUCK_ERR = 8, + __ATA_QUIRK_BRIDGE_OK = 9, + __ATA_QUIRK_ATAPI_MOD16_DMA = 10, + __ATA_QUIRK_FIRMWARE_WARN = 11, + __ATA_QUIRK_1_5_GBPS = 12, + __ATA_QUIRK_NOSETXFER = 13, + __ATA_QUIRK_BROKEN_FPDMA_AA = 14, + __ATA_QUIRK_DUMP_ID = 15, + __ATA_QUIRK_MAX_SEC_LBA48 = 16, + __ATA_QUIRK_ATAPI_DMADIR = 17, + __ATA_QUIRK_NO_NCQ_TRIM = 18, + __ATA_QUIRK_NOLPM = 19, + __ATA_QUIRK_WD_BROKEN_LPM = 20, + __ATA_QUIRK_ZERO_AFTER_TRIM = 21, + __ATA_QUIRK_NO_DMA_LOG = 22, + __ATA_QUIRK_NOTRIM = 23, + __ATA_QUIRK_MAX_SEC_1024 = 24, + __ATA_QUIRK_MAX_TRIM_128M = 25, + __ATA_QUIRK_NO_NCQ_ON_ATI = 26, + __ATA_QUIRK_NO_ID_DEV_LOG = 27, + __ATA_QUIRK_NO_LOG_DIR = 28, + __ATA_QUIRK_NO_FUA = 29, + __ATA_QUIRK_MAX = 30, +}; + +enum { + LIBATA_MAX_PRD = 128, + LIBATA_DUMB_MAX_PRD = 64, + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = 32, + ATA_SHORT_PAUSE = 16, + ATAPI_MAX_DRAIN = 16384, + ATA_ALL_DEVICES = 3, + ATA_SHT_EMULATED = 1, + ATA_SHT_THIS_ID = -1, + ATA_TFLAG_LBA48 = 1, + ATA_TFLAG_ISADDR = 2, + ATA_TFLAG_DEVICE = 4, + ATA_TFLAG_WRITE = 8, + ATA_TFLAG_LBA = 16, + ATA_TFLAG_FUA = 32, + ATA_TFLAG_POLLING = 64, + ATA_DFLAG_LBA = 1, + ATA_DFLAG_LBA48 = 2, + ATA_DFLAG_CDB_INTR = 4, + ATA_DFLAG_NCQ = 8, + ATA_DFLAG_FLUSH_EXT = 16, + ATA_DFLAG_ACPI_PENDING = 32, + ATA_DFLAG_ACPI_FAILED = 64, + ATA_DFLAG_AN = 128, + ATA_DFLAG_TRUSTED = 256, + ATA_DFLAG_FUA = 512, + ATA_DFLAG_DMADIR = 1024, + ATA_DFLAG_NCQ_SEND_RECV = 2048, + ATA_DFLAG_NCQ_PRIO = 4096, + ATA_DFLAG_CDL = 8192, + ATA_DFLAG_CFG_MASK = 16383, + ATA_DFLAG_PIO = 16384, + ATA_DFLAG_NCQ_OFF = 32768, + ATA_DFLAG_SLEEPING = 65536, + ATA_DFLAG_DUBIOUS_XFER = 131072, + ATA_DFLAG_NO_UNLOAD = 262144, + ATA_DFLAG_UNLOCK_HPA = 524288, + ATA_DFLAG_INIT_MASK = 1048575, + ATA_DFLAG_NCQ_PRIO_ENABLED = 1048576, + ATA_DFLAG_CDL_ENABLED = 2097152, + ATA_DFLAG_RESUMING = 4194304, + ATA_DFLAG_DETACH = 16777216, + ATA_DFLAG_DETACHED = 33554432, + ATA_DFLAG_DA = 67108864, + ATA_DFLAG_DEVSLP = 134217728, + ATA_DFLAG_ACPI_DISABLED = 268435456, + ATA_DFLAG_D_SENSE = 536870912, + ATA_DFLAG_ZAC = 1073741824, + ATA_DFLAG_FEATURES_MASK = 201341696, + ATA_DEV_UNKNOWN = 0, + ATA_DEV_ATA = 1, + ATA_DEV_ATA_UNSUP = 2, + ATA_DEV_ATAPI = 3, + ATA_DEV_ATAPI_UNSUP = 4, + ATA_DEV_PMP = 5, + ATA_DEV_PMP_UNSUP = 6, + ATA_DEV_SEMB = 7, + ATA_DEV_SEMB_UNSUP = 8, + ATA_DEV_ZAC = 9, + ATA_DEV_ZAC_UNSUP = 10, + ATA_DEV_NONE = 11, + ATA_LFLAG_NO_HRST = 2, + ATA_LFLAG_NO_SRST = 4, + ATA_LFLAG_ASSUME_ATA = 8, + ATA_LFLAG_ASSUME_SEMB = 16, + ATA_LFLAG_ASSUME_CLASS = 24, + ATA_LFLAG_NO_RETRY = 32, + ATA_LFLAG_DISABLED = 64, + ATA_LFLAG_SW_ACTIVITY = 128, + ATA_LFLAG_NO_LPM = 256, + ATA_LFLAG_RST_ONCE = 512, + ATA_LFLAG_CHANGED = 1024, + ATA_LFLAG_NO_DEBOUNCE_DELAY = 2048, + ATA_FLAG_SLAVE_POSS = 1, + ATA_FLAG_SATA = 2, + ATA_FLAG_NO_LPM = 4, + ATA_FLAG_NO_LOG_PAGE = 32, + ATA_FLAG_NO_ATAPI = 64, + ATA_FLAG_PIO_DMA = 128, + ATA_FLAG_PIO_LBA48 = 256, + ATA_FLAG_PIO_POLLING = 512, + ATA_FLAG_NCQ = 1024, + ATA_FLAG_NO_POWEROFF_SPINDOWN = 2048, + ATA_FLAG_NO_HIBERNATE_SPINDOWN = 4096, + ATA_FLAG_DEBUGMSG = 8192, + ATA_FLAG_FPDMA_AA = 16384, + ATA_FLAG_IGN_SIMPLEX = 32768, + ATA_FLAG_NO_IORDY = 65536, + ATA_FLAG_ACPI_SATA = 131072, + ATA_FLAG_AN = 262144, + ATA_FLAG_PMP = 524288, + ATA_FLAG_FPDMA_AUX = 1048576, + ATA_FLAG_EM = 2097152, + ATA_FLAG_SW_ACTIVITY = 4194304, + ATA_FLAG_NO_DIPM = 8388608, + ATA_FLAG_SAS_HOST = 16777216, + ATA_PFLAG_EH_PENDING = 1, + ATA_PFLAG_EH_IN_PROGRESS = 2, + ATA_PFLAG_FROZEN = 4, + ATA_PFLAG_RECOVERED = 8, + ATA_PFLAG_LOADING = 16, + ATA_PFLAG_SCSI_HOTPLUG = 64, + ATA_PFLAG_INITIALIZING = 128, + ATA_PFLAG_RESETTING = 256, + ATA_PFLAG_UNLOADING = 512, + ATA_PFLAG_UNLOADED = 1024, + ATA_PFLAG_RESUMING = 65536, + ATA_PFLAG_SUSPENDED = 131072, + ATA_PFLAG_PM_PENDING = 262144, + ATA_PFLAG_INIT_GTM_VALID = 524288, + ATA_PFLAG_PIO32 = 1048576, + ATA_PFLAG_PIO32CHANGE = 2097152, + ATA_PFLAG_EXTERNAL = 4194304, + ATA_QCFLAG_ACTIVE = 1, + ATA_QCFLAG_DMAMAP = 2, + ATA_QCFLAG_RTF_FILLED = 4, + ATA_QCFLAG_IO = 8, + ATA_QCFLAG_RESULT_TF = 16, + ATA_QCFLAG_CLEAR_EXCL = 32, + ATA_QCFLAG_QUIET = 64, + ATA_QCFLAG_RETRY = 128, + ATA_QCFLAG_HAS_CDL = 256, + ATA_QCFLAG_EH = 65536, + ATA_QCFLAG_SENSE_VALID = 131072, + ATA_QCFLAG_EH_SCHEDULED = 262144, + ATA_QCFLAG_EH_SUCCESS_CMD = 524288, + ATA_HOST_SIMPLEX = 1, + ATA_HOST_STARTED = 2, + ATA_HOST_PARALLEL_SCAN = 4, + ATA_HOST_IGNORE_ATA = 8, + ATA_HOST_NO_PART = 16, + ATA_HOST_NO_SSC = 32, + ATA_HOST_NO_DEVSLP = 64, + ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, + ATA_TMOUT_FF_WAIT_LONG = 2000, + ATA_TMOUT_FF_WAIT = 800, + ATA_WAIT_AFTER_RESET = 150, + ATA_TMOUT_PMP_SRST_WAIT = 10000, + ATA_TMOUT_SPURIOUS_PHY = 10000, + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = 7, + ATA_SHIFT_UDMA = 12, + ATA_SHIFT_PRIO = 6, + ATA_PRIO_HIGH = 2, + ATA_DMA_PAD_SZ = 4, + ATA_ERING_SIZE = 32, + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + ATA_EH_DESC_LEN = 80, + ATA_EH_REVALIDATE = 1, + ATA_EH_SOFTRESET = 2, + ATA_EH_HARDRESET = 4, + ATA_EH_RESET = 6, + ATA_EH_ENABLE_LINK = 8, + ATA_EH_PARK = 32, + ATA_EH_GET_SUCCESS_SENSE = 64, + ATA_EH_SET_ACTIVE = 128, + ATA_EH_PERDEV_MASK = 225, + ATA_EH_ALL_ACTIONS = 15, + ATA_EHI_HOTPLUGGED = 1, + ATA_EHI_NO_AUTOPSY = 4, + ATA_EHI_QUIET = 8, + ATA_EHI_NO_RECOVERY = 16, + ATA_EHI_DID_SOFTRESET = 65536, + ATA_EHI_DID_HARDRESET = 131072, + ATA_EHI_PRINTINFO = 262144, + ATA_EHI_SETMODE = 524288, + ATA_EHI_POST_SETMODE = 1048576, + ATA_EHI_DID_PRINT_QUIRKS = 2097152, + ATA_EHI_DID_RESET = 196608, + ATA_EHI_TO_SLAVE_MASK = 12, + ATA_EH_MAX_TRIES = 5, + ATA_LINK_RESUME_TRIES = 5, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + SATA_PMP_RW_TIMEOUT = 3000, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, + ATA_QUIRK_DIAGNOSTIC = 1, + ATA_QUIRK_NODMA = 2, + ATA_QUIRK_NONCQ = 4, + ATA_QUIRK_MAX_SEC_128 = 8, + ATA_QUIRK_BROKEN_HPA = 16, + ATA_QUIRK_DISABLE = 32, + ATA_QUIRK_HPA_SIZE = 64, + ATA_QUIRK_IVB = 128, + ATA_QUIRK_STUCK_ERR = 256, + ATA_QUIRK_BRIDGE_OK = 512, + ATA_QUIRK_ATAPI_MOD16_DMA = 1024, + ATA_QUIRK_FIRMWARE_WARN = 2048, + ATA_QUIRK_1_5_GBPS = 4096, + ATA_QUIRK_NOSETXFER = 8192, + ATA_QUIRK_BROKEN_FPDMA_AA = 16384, + ATA_QUIRK_DUMP_ID = 32768, + ATA_QUIRK_MAX_SEC_LBA48 = 65536, + ATA_QUIRK_ATAPI_DMADIR = 131072, + ATA_QUIRK_NO_NCQ_TRIM = 262144, + ATA_QUIRK_NOLPM = 524288, + ATA_QUIRK_WD_BROKEN_LPM = 1048576, + ATA_QUIRK_ZERO_AFTER_TRIM = 2097152, + ATA_QUIRK_NO_DMA_LOG = 4194304, + ATA_QUIRK_NOTRIM = 8388608, + ATA_QUIRK_MAX_SEC_1024 = 16777216, + ATA_QUIRK_MAX_TRIM_128M = 33554432, + ATA_QUIRK_NO_NCQ_ON_ATI = 67108864, + ATA_QUIRK_NO_ID_DEV_LOG = 134217728, + ATA_QUIRK_NO_LOG_DIR = 268435456, + ATA_QUIRK_NO_FUA = 536870912, + ATA_DMA_MASK_ATA = 1, + ATA_DMA_MASK_ATAPI = 2, + ATA_DMA_MASK_CFA = 4, + ATAPI_READ = 0, + ATAPI_WRITE = 1, + ATAPI_READ_CD = 2, + ATAPI_PASS_THRU = 3, + ATAPI_MISC = 4, + ATA_TIMING_SETUP = 1, + ATA_TIMING_ACT8B = 2, + ATA_TIMING_REC8B = 4, + ATA_TIMING_CYC8B = 8, + ATA_TIMING_8BIT = 14, + ATA_TIMING_ACTIVE = 16, + ATA_TIMING_RECOVER = 32, + ATA_TIMING_DMACK_HOLD = 64, + ATA_TIMING_CYCLE = 128, + ATA_TIMING_UDMA = 256, + ATA_TIMING_ALL = 511, + ATA_ACPI_FILTER_SETXFER = 1, + ATA_ACPI_FILTER_LOCK = 2, + ATA_ACPI_FILTER_DIPM = 4, + ATA_ACPI_FILTER_FPDMA_OFFSET = 8, + ATA_ACPI_FILTER_FPDMA_AA = 16, + ATA_ACPI_FILTER_DEFAULT = 7, +}; + +enum ata_completion_errors { + AC_ERR_OK = 0, + AC_ERR_DEV = 1, + AC_ERR_HSM = 2, + AC_ERR_TIMEOUT = 4, + AC_ERR_MEDIA = 8, + AC_ERR_ATA_BUS = 16, + AC_ERR_HOST_BUS = 32, + AC_ERR_SYSTEM = 64, + AC_ERR_INVALID = 128, + AC_ERR_OTHER = 256, + AC_ERR_NODEV_HINT = 512, + AC_ERR_NCQ = 1024, +}; + +enum ata_lpm_policy { + ATA_LPM_UNKNOWN = 0, + ATA_LPM_MAX_POWER = 1, + ATA_LPM_MED_POWER = 2, + ATA_LPM_MED_POWER_WITH_DIPM = 3, + ATA_LPM_MIN_POWER_WITH_PARTIAL = 4, + ATA_LPM_MIN_POWER = 5, +}; + +struct ata_queued_cmd; + +typedef void (*ata_qc_cb_t)(struct ata_queued_cmd *); + +struct ata_taskfile { + long unsigned int flags; + u8 protocol; + u8 ctl; + u8 hob_feature; + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + union { + u8 error; + u8 feature; + }; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + union { + u8 status; + u8 command; + }; + u32 auxiliary; +}; + +struct ata_port; + +struct ata_device; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + struct ata_taskfile tf; + u8 cdb[16]; + long unsigned int flags; + unsigned int tag; + unsigned int hw_tag; + unsigned int n_elem; + unsigned int orig_n_elem; + int dma_dir; + unsigned int sect_size; + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + struct scatterlist sgent; + struct scatterlist *sg; + struct scatterlist *cursg; + unsigned int cursg_ofs; + unsigned int err_mask; + struct ata_taskfile result_tf; + ata_qc_cb_t complete_fn; + void *private_data; + void *lldd_task; +}; + +struct ata_link; + +typedef int (*ata_prereset_fn_t)(struct ata_link *, long unsigned int); + +struct ata_eh_info { + struct ata_device *dev; + u32 serror; + unsigned int err_mask; + unsigned int action; + unsigned int dev_action[2]; + unsigned int flags; + unsigned int probe_mask; + char desc[80]; + int desc_len; +}; + +struct ata_eh_context { + struct ata_eh_info i; + int tries[2]; + int cmd_timeout_idx[16]; + unsigned int classes[2]; + unsigned int did_probe_mask; + unsigned int unloaded_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[2]; + long unsigned int last_reset; +}; + +struct ata_ering_entry { + unsigned int eflags; + unsigned int err_mask; + u64 timestamp; +}; + +struct ata_ering { + int cursor; + long: 32; + struct ata_ering_entry ring[32]; +}; + +struct ata_cpr_log; + +struct ata_cdl; + +struct ata_device { + struct ata_link *link; + unsigned int devno; + unsigned int quirks; + long unsigned int flags; + struct scsi_device *sdev; + void *private_data; + struct device tdev; + u64 n_sectors; + u64 n_native_sectors; + unsigned int class; + long unsigned int unpark_deadline; + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; + unsigned int multi_count; + unsigned int max_sectors; + unsigned int cdb_len; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + u16 cylinders; + u16 heads; + u16 sectors; + union { + u16 id[256]; + u32 gscr[128]; + }; + u8 devslp_timing[8]; + u8 ncq_send_recv_cmds[20]; + u8 ncq_non_data_cmds[64]; + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; + struct ata_cpr_log *cpr_log; + struct ata_cdl *cdl; + int spdn_cnt; + struct ata_ering ering; + u8 sector_buf[512]; +}; + +struct ata_link { + struct ata_port *ap; + int pmp; + struct device tdev; + unsigned int active_tag; + u32 sactive; + unsigned int flags; + u32 saved_scontrol; + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; + enum ata_lpm_policy lpm_policy; + struct ata_eh_info eh_info; + struct ata_eh_context eh_context; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ata_device device[2]; + long unsigned int last_lpm_change; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef int (*ata_reset_fn_t)(struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*ata_postreset_fn_t)(struct ata_link *, unsigned int *); + +enum sw_activity { + OFF = 0, + BLINK_ON = 1, + BLINK_OFF = 2, +}; + +struct ata_ioports { + void *cmd_addr; + void *data_addr; + void *error_addr; + void *feature_addr; + void *nsect_addr; + void *lbal_addr; + void *lbam_addr; + void *lbah_addr; + void *device_addr; + void *status_addr; + void *command_addr; + void *altstatus_addr; + void *ctl_addr; + void *bmdma_addr; + void *scr_addr; +}; + +struct ata_port_operations; + +struct ata_host { + spinlock_t lock; + struct device *dev; + void * const *iomap; + unsigned int n_ports; + unsigned int n_tags; + void *private_data; + struct ata_port_operations *ops; + long unsigned int flags; + struct kref kref; + struct mutex eh_mutex; + struct task_struct *eh_owner; + struct ata_port *simplex_claimed; + struct ata_port *ports[0]; +}; + +struct ata_port_operations { + int (*qc_defer)(struct ata_queued_cmd *); + int (*check_atapi_dma)(struct ata_queued_cmd *); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *); + unsigned int (*qc_issue)(struct ata_queued_cmd *); + void (*qc_fill_rtf)(struct ata_queued_cmd *); + void (*qc_ncq_fill_rtf)(struct ata_port *, u64); + int (*cable_detect)(struct ata_port *); + unsigned int (*mode_filter)(struct ata_device *, unsigned int); + void (*set_piomode)(struct ata_port *, struct ata_device *); + void (*set_dmamode)(struct ata_port *, struct ata_device *); + int (*set_mode)(struct ata_link *, struct ata_device **); + unsigned int (*read_id)(struct ata_device *, struct ata_taskfile *, __le16 *); + void (*dev_config)(struct ata_device *); + void (*freeze)(struct ata_port *); + void (*thaw)(struct ata_port *); + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; + ata_prereset_fn_t pmp_prereset; + ata_reset_fn_t pmp_softreset; + ata_reset_fn_t pmp_hardreset; + ata_postreset_fn_t pmp_postreset; + void (*error_handler)(struct ata_port *); + void (*lost_interrupt)(struct ata_port *); + void (*post_internal_cmd)(struct ata_queued_cmd *); + void (*sched_eh)(struct ata_port *); + void (*end_eh)(struct ata_port *); + int (*scr_read)(struct ata_link *, unsigned int, u32 *); + int (*scr_write)(struct ata_link *, unsigned int, u32); + void (*pmp_attach)(struct ata_port *); + void (*pmp_detach)(struct ata_port *); + int (*set_lpm)(struct ata_link *, enum ata_lpm_policy, unsigned int); + int (*port_suspend)(struct ata_port *, pm_message_t); + int (*port_resume)(struct ata_port *); + int (*port_start)(struct ata_port *); + void (*port_stop)(struct ata_port *); + void (*host_stop)(struct ata_host *); + void (*sff_dev_select)(struct ata_port *, unsigned int); + void (*sff_set_devctl)(struct ata_port *, u8); + u8 (*sff_check_status)(struct ata_port *); + u8 (*sff_check_altstatus)(struct ata_port *); + void (*sff_tf_load)(struct ata_port *, const struct ata_taskfile *); + void (*sff_tf_read)(struct ata_port *, struct ata_taskfile *); + void (*sff_exec_command)(struct ata_port *, const struct ata_taskfile *); + unsigned int (*sff_data_xfer)(struct ata_queued_cmd *, unsigned char *, unsigned int, int); + void (*sff_irq_on)(struct ata_port *); + bool (*sff_irq_check)(struct ata_port *); + void (*sff_irq_clear)(struct ata_port *); + void (*sff_drain_fifo)(struct ata_queued_cmd *); + void (*bmdma_setup)(struct ata_queued_cmd *); + void (*bmdma_start)(struct ata_queued_cmd *); + void (*bmdma_stop)(struct ata_queued_cmd *); + u8 (*bmdma_status)(struct ata_port *); + ssize_t (*em_show)(struct ata_port *, char *); + ssize_t (*em_store)(struct ata_port *, const char *, size_t); + ssize_t (*sw_activity_show)(struct ata_device *, char *); + ssize_t (*sw_activity_store)(struct ata_device *, enum sw_activity); + ssize_t (*transmit_led_message)(struct ata_port *, u32, ssize_t); + const struct ata_port_operations *inherits; +}; + +struct ata_port_stats { + long unsigned int unhandled_irq; + long unsigned int idle_irq; + long unsigned int rw_reqbuf; +}; + +struct ata_port { + struct Scsi_Host *scsi_host; + struct ata_port_operations *ops; + spinlock_t *lock; + long unsigned int flags; + unsigned int pflags; + unsigned int print_id; + unsigned int port_no; + struct ata_ioports ioaddr; + u8 ctl; + u8 last_ctl; + struct ata_link *sff_pio_task_link; + struct delayed_work sff_pio_task; + struct ata_bmdma_prd *bmdma_prd; + dma_addr_t bmdma_prd_dma; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; + struct ata_queued_cmd qcmd[33]; + u64 qc_active; + int nr_active_links; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ata_link link; + struct ata_link *slave_link; + int nr_pmp_links; + struct ata_link *pmp_link; + struct ata_link *excl_link; + struct ata_port_stats stats; + struct ata_host *host; + struct device *dev; + long: 32; + struct device tdev; + struct mutex scsi_scan_mutex; + struct delayed_work hotplug_task; + struct delayed_work scsi_rescan_task; + unsigned int hsm_task_state; + struct list_head eh_done_q; + wait_queue_head_t eh_wait_q; + int eh_tries; + struct completion park_req_pending; + pm_message_t pm_mesg; + enum ata_lpm_policy target_lpm_policy; + struct timer_list fastdrain_timer; + unsigned int fastdrain_cnt; + long: 32; + async_cookie_t cookie; + int em_message_type; + void *private_data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ata_cpr { + u8 num; + u8 num_storage_elements; + long: 32; + u64 start_lba; + u64 num_lbas; +}; + +struct ata_cpr_log { + u8 nr_cpr; + long: 32; + struct ata_cpr cpr[0]; +}; + +struct ata_cdl { + u8 desc_log_buf[512]; + u8 ncq_sense_log_buf[1024]; +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED = 0, + ATA_DITER_ENABLED_REVERSE = 1, + ATA_DITER_ALL = 2, + ATA_DITER_ALL_REVERSE = 3, +}; + +struct ata_internal { + struct scsi_transport_template t; + struct device_attribute private_port_attrs[3]; + struct device_attribute private_link_attrs[3]; + struct device_attribute private_dev_attrs[9]; + struct transport_container link_attr_cont; + struct transport_container dev_attr_cont; + struct device_attribute *link_attrs[4]; + struct device_attribute *port_attrs[4]; + struct device_attribute *dev_attrs[10]; +}; + +struct ata_show_ering_arg { + char *buf; + int written; +}; + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +enum hwmon_sensor_types { + hwmon_chip = 0, + hwmon_temp = 1, + hwmon_in = 2, + hwmon_curr = 3, + hwmon_power = 4, + hwmon_energy = 5, + hwmon_humidity = 6, + hwmon_fan = 7, + hwmon_pwm = 8, + hwmon_intrusion = 9, + hwmon_max = 10, +}; + +enum hwmon_temp_attributes { + hwmon_temp_enable = 0, + hwmon_temp_input = 1, + hwmon_temp_type = 2, + hwmon_temp_lcrit = 3, + hwmon_temp_lcrit_hyst = 4, + hwmon_temp_min = 5, + hwmon_temp_min_hyst = 6, + hwmon_temp_max = 7, + hwmon_temp_max_hyst = 8, + hwmon_temp_crit = 9, + hwmon_temp_crit_hyst = 10, + hwmon_temp_emergency = 11, + hwmon_temp_emergency_hyst = 12, + hwmon_temp_alarm = 13, + hwmon_temp_lcrit_alarm = 14, + hwmon_temp_min_alarm = 15, + hwmon_temp_max_alarm = 16, + hwmon_temp_crit_alarm = 17, + hwmon_temp_emergency_alarm = 18, + hwmon_temp_fault = 19, + hwmon_temp_offset = 20, + hwmon_temp_label = 21, + hwmon_temp_lowest = 22, + hwmon_temp_highest = 23, + hwmon_temp_reset_history = 24, + hwmon_temp_rated_min = 25, + hwmon_temp_rated_max = 26, + hwmon_temp_beep = 27, +}; + +struct hwmon_ops { + umode_t visible; + umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); + int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *); + int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); + int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int); +}; + +struct hwmon_channel_info { + enum hwmon_sensor_types type; + const u32 *config; +}; + +struct hwmon_chip_info { + const struct hwmon_ops *ops; + const struct hwmon_channel_info * const *info; +}; + +struct netdev_queue_stats_rx { + u64 bytes; + u64 packets; + u64 alloc_fail; + u64 hw_drops; + u64 hw_drop_overruns; + u64 csum_unnecessary; + u64 csum_none; + u64 csum_bad; + u64 hw_gro_packets; + u64 hw_gro_bytes; + u64 hw_gro_wire_packets; + u64 hw_gro_wire_bytes; + u64 hw_drop_ratelimits; +}; + +struct netdev_queue_stats_tx { + u64 bytes; + u64 packets; + u64 hw_drops; + u64 hw_drop_errors; + u64 csum_none; + u64 needs_csum; + u64 hw_gso_packets; + u64 hw_gso_bytes; + u64 hw_gso_wire_packets; + u64 hw_gso_wire_bytes; + u64 hw_drop_ratelimits; + u64 stop; + u64 wake; +}; + +enum rtl_dash_type { + RTL_DASH_NONE = 0, + RTL_DASH_DP = 1, + RTL_DASH_EP = 2, +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_tc_offsets { + bool inited; + long: 32; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +struct r8169_led_classdev; + +struct TxDesc; + +struct RxDesc; + +struct rtl8169_counters; + +struct rtl8169_private { + void *mmio_addr; + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; + u32 cur_tx; + u32 dirty_tx; + struct TxDesc *TxDescArray; + struct RxDesc *RxDescArray; + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[256]; + struct ring_info tx_skb[256]; + u16 cp_cmd; + u16 tx_lpi_timer; + u32 irq_mask; + int irq; + struct clk *clk; + struct { + long unsigned int flags[1]; + struct work_struct work; + } wk; + raw_spinlock_t mac_ocp_lock; + struct mutex led_lock; + unsigned int supports_gmii: 1; + unsigned int aspm_manageable: 1; + unsigned int dash_enabled: 1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + const char *fw_name; + struct rtl_fw *rtl_fw; + struct r8169_led_classdev *leds; + u32 ocp_base; + long: 32; +}; + +enum rtl_registers { + MAC0 = 0, + MAC4 = 4, + MAR0 = 8, + CounterAddrLow = 16, + CounterAddrHigh = 20, + TxDescStartAddrLow = 32, + TxDescStartAddrHigh = 36, + TxHDescStartAddrLow = 40, + TxHDescStartAddrHigh = 44, + FLASH = 48, + ERSR = 54, + ChipCmd = 55, + TxPoll = 56, + IntrMask = 60, + IntrStatus = 62, + TxConfig = 64, + RxConfig = 68, + Cfg9346 = 80, + Config0 = 81, + Config1 = 82, + Config2 = 83, + Config3 = 84, + Config4 = 85, + Config5 = 86, + PHYAR = 96, + PHYstatus = 108, + RxMaxSize = 218, + CPlusCmd = 224, + IntrMitigate = 226, + RxDescAddrLow = 228, + RxDescAddrHigh = 232, + EarlyTxThres = 236, + MaxTxPacketSize = 236, + FuncEvent = 240, + FuncEventMask = 244, + FuncPresetState = 248, + IBCR0 = 248, + IBCR2 = 249, + IBIMR0 = 250, + IBISR0 = 251, + FuncForceEvent = 252, +}; + +enum rtl8168_8101_registers { + CSIDR = 100, + CSIAR = 104, + PMCH = 111, + EPHYAR = 128, + DLLPR = 208, + DBG_REG = 209, + TWSI = 210, + MCU = 211, + EFUSEAR = 220, + MISC_1 = 242, +}; + +enum rtl8168_registers { + LED_CTRL = 24, + LED_FREQ = 26, + EEE_LED = 27, + ERIDR = 112, + ERIAR = 116, + EPHY_RXER_NUM = 124, + OCPDR = 176, + OCPAR = 180, + GPHY_OCP = 184, + RDSAR1 = 208, + MISC = 240, +}; + +enum rtl8125_registers { + LEDSEL0 = 24, + INT_CFG0_8125 = 52, + IntrMask_8125 = 56, + IntrStatus_8125 = 60, + INT_CFG1_8125 = 122, + LEDSEL2 = 132, + LEDSEL1 = 134, + TxPoll_8125 = 144, + LEDSEL3 = 150, + MAC0_BKP = 6624, + RSS_CTRL_8125 = 17664, + Q_NUM_CTRL_8125 = 18432, + EEE_TXIDLE_TIMER_8125 = 24648, +}; + +enum rtl_register_content___2 { + SYSErr = 32768, + PCSTimeout = 16384, + SWInt = 256, + TxDescUnavail = 128, + RxFIFOOver = 64, + LinkChg = 32, + RxOverflow = 16, + TxErr = 8, + TxOK = 4, + RxErr = 2, + RxOK = 1, + RxRWT = 4194304, + RxRES = 2097152, + RxRUNT = 1048576, + RxCRC = 524288, + StopReq = 128, + CmdReset = 16, + CmdRxEnb = 8, + CmdTxEnb = 4, + RxBufEmpty = 1, + HPQ = 128, + NPQ = 64, + FSWInt = 1, + Cfg9346_Lock = 0, + Cfg9346_Unlock = 192, + AcceptErr = 32, + AcceptRunt = 16, + AcceptBroadcast = 8, + AcceptMulticast = 4, + AcceptMyPhys = 2, + AcceptAllPhys = 1, + TxInterFrameGapShift = 24, + TxDMAShift = 8, + LEDS1 = 128, + LEDS0 = 64, + Speed_down = 16, + MEMMAP = 8, + IOMAP = 4, + VPD = 2, + PMEnable = 1, + ClkReqEn = 128, + MSIEnable = 32, + PCI_Clock_66MHz = 1, + PCI_Clock_33MHz = 0, + MagicPacket = 32, + LinkUp = 16, + Jumbo_En0 = 4, + Rdy_to_L23 = 2, + Beacon_en = 1, + Jumbo_En1 = 2, + BWF = 64, + MWF = 32, + UWF = 16, + Spi_en = 8, + LanWake = 2, + PMEStatus = 1, + ASPM_en = 1, + EnableBist = 32768, + Mac_dbgo_oe = 16384, + EnAnaPLL = 16384, + Normal_mode = 8192, + Force_half_dup = 4096, + Force_rxflow_en = 2048, + Force_txflow_en = 1024, + Cxpl_dbg_sel = 512, + ASF = 256, + PktCntrDisable = 128, + Mac_dbgo_sel = 28, + RxVlan = 64, + RxChkSum = 32, + PCIDAC = 16, + PCIMulRW = 8, + TBI_Enable = 128, + TxFlowCtrl = 64, + RxFlowCtrl = 32, + _1000bpsF = 16, + _100bps___2 = 8, + _10bps___2 = 4, + LinkStatus = 2, + FullDup = 1, + CounterReset = 1, + CounterDump = 8, + MagicPacket_v2 = 65536, +}; + +enum rtl_desc_bit { + DescOwn = -2147483648, + RingEnd = 1073741824, + FirstFrag = 536870912, + LastFrag = 268435456, +}; + +enum rtl_tx_desc_bit { + TD_LSO = 134217728, + TxVlanTag = 131072, +}; + +enum rtl_tx_desc_bit_0 { + TD0_TCP_CS = 65536, + TD0_UDP_CS = 131072, + TD0_IP_CS = 262144, +}; + +enum rtl_tx_desc_bit_1 { + TD1_GTSENV4 = 67108864, + TD1_GTSENV6 = 33554432, + TD1_IPv6_CS = 268435456, + TD1_IPv4_CS = 536870912, + TD1_TCP_CS = 1073741824, + TD1_UDP_CS = -2147483648, +}; + +enum rtl_rx_desc_bit { + PID1 = 262144, + PID0 = 131072, + IPFail = 65536, + UDPFail = 32768, + TCPFail = 16384, + RxVlanTag = 65536, +}; + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; + __le64 tx_octets; + __le64 rx_octets; + __le64 rx_multicast64; + __le64 tx_unicast64; + __le64 tx_broadcast64; + __le64 tx_multicast64; + __le32 tx_pause_on; + __le32 tx_pause_off; + __le32 tx_pause_all; + __le32 tx_deferred; + __le32 tx_late_collision; + __le32 tx_all_collision; + __le32 tx_aborted32; + __le32 align_errors32; + __le32 rx_frame_too_long; + __le32 rx_runt; + __le32 rx_pause_on; + __le32 rx_pause_off; + __le32 rx_pause_all; + __le32 rx_unknown_opcode; + __le32 rx_mac_error; + __le32 tx_underrun32; + __le32 rx_mac_missed; + __le32 rx_tcam_dropped; + __le32 tdu; + __le32 rdu; +}; + +enum rtl_flag { + RTL_FLAG_TASK_RESET_PENDING = 0, + RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE = 1, + RTL_FLAG_TASK_TX_TIMEOUT = 2, + RTL_FLAG_MAX = 3, +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *); + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; +}; + +enum xmp_state { + STATE_INACTIVE___4 = 0, + STATE_LEADER_PULSE = 1, + STATE_NIBBLE_SPACE = 2, +}; + +struct clk_notifier_data { + struct clk *clk; + long unsigned int old_rate; + long unsigned int new_rate; +}; + +typedef int (*of_init_fn_1_ret)(struct device_node *); + +struct mbox_client { + struct device *dev; + bool tx_block; + long unsigned int tx_tout; + bool knows_txdone; + void (*rx_callback)(struct mbox_client *, void *); + void (*tx_prepare)(struct mbox_client *, void *); + void (*tx_done)(struct mbox_client *, void *, int); +}; + +struct mbox_chan; + +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *, void *); + int (*flush)(struct mbox_chan *, long unsigned int); + int (*startup)(struct mbox_chan *); + void (*shutdown)(struct mbox_chan *); + bool (*last_tx_done)(struct mbox_chan *); + bool (*peek_data)(struct mbox_chan *); +}; + +struct mbox_controller; + +struct mbox_chan { + struct mbox_controller *mbox; + unsigned int txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned int msg_count; + unsigned int msg_free; + void *msg_data[20]; + spinlock_t lock; + void *con_priv; +}; + +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned int txpoll_period; + struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); + long: 32; + struct hrtimer poll_hrt; + spinlock_t poll_hrt_lock; + struct list_head node; + long: 32; +}; + +struct rps_sock_flow_table { + u32 mask; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u32 ents[0]; +}; + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = 1, + ETH_TEST_FL_FAILED = 2, + ETH_TEST_FL_EXTERNAL_LB = 4, + ETH_TEST_FL_EXTERNAL_LB_DONE = 8, +}; + +struct net_packet_attrs { + const unsigned char *src; + const unsigned char *dst; + u32 ip_src; + u32 ip_dst; + bool tcp; + u16 sport; + u16 dport; + int timeout; + int size; + int max_size; + u8 id; + u16 queue_mapping; +}; + +struct net_test_priv { + struct net_packet_attrs *packet; + struct packet_type pt; + struct completion comp; + int double_vlan; + int vlan_id; + int ok; +}; + +struct netsfhdr { + __be32 version; + __be64 magic; + u8 id; +} __attribute__((packed)); + +struct net_test { + char name[32]; + int (*fn)(struct net_device *); +}; + +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +enum nft_payload_bases { + NFT_PAYLOAD_LL_HEADER = 0, + NFT_PAYLOAD_NETWORK_HEADER = 1, + NFT_PAYLOAD_TRANSPORT_HEADER = 2, + NFT_PAYLOAD_INNER_HEADER = 3, + NFT_PAYLOAD_TUN_HEADER = 4, +}; + +enum nft_payload_csum_types { + NFT_PAYLOAD_CSUM_NONE = 0, + NFT_PAYLOAD_CSUM_INET = 1, + NFT_PAYLOAD_CSUM_SCTP = 2, +}; + +enum nft_payload_csum_flags { + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 1, +}; + +enum nft_payload_attributes { + NFTA_PAYLOAD_UNSPEC = 0, + NFTA_PAYLOAD_DREG = 1, + NFTA_PAYLOAD_BASE = 2, + NFTA_PAYLOAD_OFFSET = 3, + NFTA_PAYLOAD_LEN = 4, + NFTA_PAYLOAD_SREG = 5, + NFTA_PAYLOAD_CSUM_TYPE = 6, + NFTA_PAYLOAD_CSUM_OFFSET = 7, + NFTA_PAYLOAD_CSUM_FLAGS = 8, + __NFTA_PAYLOAD_MAX = 9, +}; + +struct nft_payload { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 dreg; +}; + +enum { + NFT_PAYLOAD_CTX_INNER_TUN = 1, + NFT_PAYLOAD_CTX_INNER_LL = 2, + NFT_PAYLOAD_CTX_INNER_NH = 4, + NFT_PAYLOAD_CTX_INNER_TH = 8, +}; + +struct nft_inner_tun_ctx { + u16 type; + u16 inner_tunoff; + u16 inner_lloff; + u16 inner_nhoff; + u16 inner_thoff; + __be16 llproto; + u8 l4proto; + u8 flags; +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; + +struct gre_full_hdr { + struct gre_base_hdr fixed_header; + __be16 csum; + __be16 reserved1; + __be32 key; + __be32 seq; +}; + +struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +}; + +struct nft_payload_set { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 sreg; + u8 csum_type; + u8 csum_offset; + u8 csum_flags; +}; + +struct nft_payload_vlan_hdr { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; +}; + +struct net_proto_family { + int family; + int (*create)(struct net *, struct socket *, int, int); + struct module *owner; +}; + +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; +}; + +struct inet_protosw { + struct list_head list; + short unsigned int type; + short unsigned int protocol; + struct proto *prot; + const struct proto_ops *ops; + unsigned char flags; +}; + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); + void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + int (*icmpv6_err_convert)(u8, u8, int *); + void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); +}; + +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + int ipv6mr_ifindex; +}; + +struct ip6_ra_chain { + struct ip6_ra_chain *next; + struct sock *sk; + int sel; + void (*destructor)(struct sock *); +}; + +struct ipv6_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u8 first_segment; + __u8 flags; + __u16 tag; + struct in6_addr segments[0]; +}; + +enum { + INET_FRAG_FIRST_IN = 1, + INET_FRAG_LAST_IN = 2, + INET_FRAG_COMPLETE = 4, + INET_FRAG_HASH_DEAD = 8, + INET_FRAG_DROP = 16, +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +struct frag_queue { + struct inet_frag_queue q; + int iif; + __u16 nhoffset; + u8 ecn; +}; + +struct nft_ct_frag6_pernet { + struct ctl_table_header *nf_frag_frags_hdr; + struct fqdir *fqdir; +}; + +struct brport_attribute { + struct attribute attr; + ssize_t (*show)(struct net_bridge_port *, char *); + int (*store)(struct net_bridge_port *, long unsigned int); + int (*store_raw)(struct net_bridge_port *, char *); +}; + +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + +enum perf_event_mips_regs { + PERF_REG_MIPS_PC = 0, + PERF_REG_MIPS_R1 = 1, + PERF_REG_MIPS_R2 = 2, + PERF_REG_MIPS_R3 = 3, + PERF_REG_MIPS_R4 = 4, + PERF_REG_MIPS_R5 = 5, + PERF_REG_MIPS_R6 = 6, + PERF_REG_MIPS_R7 = 7, + PERF_REG_MIPS_R8 = 8, + PERF_REG_MIPS_R9 = 9, + PERF_REG_MIPS_R10 = 10, + PERF_REG_MIPS_R11 = 11, + PERF_REG_MIPS_R12 = 12, + PERF_REG_MIPS_R13 = 13, + PERF_REG_MIPS_R14 = 14, + PERF_REG_MIPS_R15 = 15, + PERF_REG_MIPS_R16 = 16, + PERF_REG_MIPS_R17 = 17, + PERF_REG_MIPS_R18 = 18, + PERF_REG_MIPS_R19 = 19, + PERF_REG_MIPS_R20 = 20, + PERF_REG_MIPS_R21 = 21, + PERF_REG_MIPS_R22 = 22, + PERF_REG_MIPS_R23 = 23, + PERF_REG_MIPS_R24 = 24, + PERF_REG_MIPS_R25 = 25, + PERF_REG_MIPS_R26 = 26, + PERF_REG_MIPS_R27 = 27, + PERF_REG_MIPS_R28 = 28, + PERF_REG_MIPS_R29 = 29, + PERF_REG_MIPS_R30 = 30, + PERF_REG_MIPS_R31 = 31, + PERF_REG_MIPS_MAX = 32, +}; + +enum reboot_mode { + REBOOT_UNDEFINED = -1, + REBOOT_COLD = 0, + REBOOT_WARM = 1, + REBOOT_HARD = 2, + REBOOT_SOFT = 3, + REBOOT_GPIO = 4, +}; + +enum reboot_type { + BOOT_TRIPLE = 116, + BOOT_KBD = 107, + BOOT_BIOS = 98, + BOOT_ACPI = 97, + BOOT_EFI = 101, + BOOT_CF9_FORCE = 112, + BOOT_CF9_SAFE = 113, +}; + +enum sys_off_mode { + SYS_OFF_MODE_POWER_OFF_PREPARE = 0, + SYS_OFF_MODE_POWER_OFF = 1, + SYS_OFF_MODE_RESTART_PREPARE = 2, + SYS_OFF_MODE_RESTART = 3, +}; + +struct sys_off_data { + int mode; + void *cb_data; + const char *cmd; + struct device *dev; +}; + +struct sys_off_handler { + struct notifier_block nb; + int (*sys_off_cb)(struct sys_off_data *); + void *cb_data; + enum sys_off_mode mode; + bool blocking; + void *list; + struct device *dev; +}; + +typedef __kernel_long_t __kernel_suseconds_t; + +typedef __kernel_suseconds_t suseconds_t; + +typedef __u64 timeu64_t; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct tick_sched { + long unsigned int flags; + unsigned int stalled_jiffies; + long unsigned int last_tick_jiffies; + long: 32; + struct hrtimer sched_timer; + ktime_t last_tick; + ktime_t next_tick; + long unsigned int idle_jiffies; + long: 32; + ktime_t idle_waketime; + unsigned int got_idle_tick; + seqcount_t idle_sleeptime_seq; + ktime_t idle_entrytime; + long unsigned int last_jiffies; + long: 32; + u64 timer_expires_base; + u64 timer_expires; + u64 next_timer; + ktime_t idle_expires; + long unsigned int idle_calls; + long unsigned int idle_sleeps; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + atomic_t tick_dep_mask; + long unsigned int check_clocks; +}; + +typedef __u16 comp_t; + +typedef __u32 comp2_t; + +struct acct { + char ac_flag; + char ac_version; + __u16 ac_uid16; + __u16 ac_gid16; + __u16 ac_tty; + __u32 ac_btime; + comp_t ac_utime; + comp_t ac_stime; + comp_t ac_etime; + comp_t ac_mem; + comp_t ac_io; + comp_t ac_rw; + comp_t ac_minflt; + comp_t ac_majflt; + comp_t ac_swaps; + __u16 ac_ahz; + __u32 ac_exitcode; + char ac_comm[17]; + __u8 ac_etime_hi; + __u16 ac_etime_lo; + __u32 ac_uid; + __u32 ac_gid; +}; + +typedef struct acct acct_t; + +struct bsd_acct_struct { + struct fs_pin pin; + atomic_long_t count; + struct callback_head rcu; + struct mutex lock; + int active; + long unsigned int needcheck; + struct file *file; + struct pid_namespace *ns; + struct work_struct work; + struct completion done; +}; + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +enum tp_func_state { + TP_FUNC_0 = 0, + TP_FUNC_1 = 1, + TP_FUNC_2 = 2, + TP_FUNC_N = 3, +}; + +enum tp_transition_sync { + TP_TRANSITION_SYNC_1_0_1 = 0, + TP_TRANSITION_SYNC_N_2_1 = 1, + _NR_TP_TRANSITION_SYNC = 2, +}; + +struct tp_transition_snapshot { + long unsigned int rcu; + bool ongoing; +}; + +struct tp_probes { + struct callback_head rcu; + struct tracepoint_func probes[0]; +}; + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, +}; + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 1, + TRACE_FLAG_NEED_RESCHED_LAZY = 2, + TRACE_FLAG_NEED_RESCHED = 4, + TRACE_FLAG_HARDIRQ = 8, + TRACE_FLAG_SOFTIRQ = 16, + TRACE_FLAG_PREEMPT_RESCHED = 32, + TRACE_FLAG_NMI = 64, + TRACE_FLAG_BH_OFF = 128, +}; + +typedef long unsigned int perf_trace_t[2048]; + +typedef void (*swap_r_func_t)(void *, void *, int, const void *); + +typedef int (*cmp_r_func_t)(const void *, const void *, const void *); + +struct btf_enum { + __u32 name_off; + __s32 val; +}; + +enum { + BTF_VAR_STATIC = 0, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +struct btf_var { + __u32 linkage; +}; + +struct btf_decl_tag { + __s32 component_idx; +}; + +struct btf_enum64 { + __u32 name_off; + __u32 val_lo32; + __u32 val_hi32; +}; + +struct bpf_cgroup_dev_ctx { + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + +struct bpf_sysctl { + __u32 write; + __u32 file_pos; +}; + +struct bpf_sockopt { + union { + struct bpf_sock *sk; + }; + union { + void *optval; + }; + union { + void *optval_end; + }; + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +enum { + BTF_F_COMPACT = 1, + BTF_F_NONAME = 2, + BTF_F_PTR_RAW = 4, + BTF_F_ZERO = 8, +}; + +typedef struct pt_regs bpf_user_pt_regs_t; + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; + +struct btf_id_dtor_kfunc { + u32 btf_id; + u32 kfunc_btf_id; +}; + +struct btf_struct_metas { + u32 cnt; + struct btf_struct_meta types[0]; +}; + +enum { + BTF_FIELDS_MAX = 11, +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + const struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + long: 32; + u64 tmp_reg; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + struct task_struct *current_task; + long: 32; + u64 tmp_reg; +}; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +struct bpf_core_cand { + const struct btf *btf; + __u32 id; +}; + +struct bpf_core_cand_list { + struct bpf_core_cand *cands; + int len; +}; + +struct bpf_core_accessor { + __u32 type_id; + __u32 idx; + const char *name; +}; + +struct bpf_core_spec { + const struct btf *btf; + struct bpf_core_accessor spec[64]; + __u32 root_type_id; + enum bpf_core_relo_kind relo_kind; + int len; + int raw_spec[64]; + int raw_len; + __u32 bit_offset; +}; + +struct bpf_core_relo_res { + __u64 orig_val; + __u64 new_val; + bool poison; + bool validate; + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; + long: 32; +}; + +enum btf_kfunc_hook { + BTF_KFUNC_HOOK_COMMON = 0, + BTF_KFUNC_HOOK_XDP = 1, + BTF_KFUNC_HOOK_TC = 2, + BTF_KFUNC_HOOK_STRUCT_OPS = 3, + BTF_KFUNC_HOOK_TRACING = 4, + BTF_KFUNC_HOOK_SYSCALL = 5, + BTF_KFUNC_HOOK_FMODRET = 6, + BTF_KFUNC_HOOK_CGROUP = 7, + BTF_KFUNC_HOOK_SCHED_ACT = 8, + BTF_KFUNC_HOOK_SK_SKB = 9, + BTF_KFUNC_HOOK_SOCKET_FILTER = 10, + BTF_KFUNC_HOOK_LWT = 11, + BTF_KFUNC_HOOK_NETFILTER = 12, + BTF_KFUNC_HOOK_KPROBE = 13, + BTF_KFUNC_HOOK_MAX = 14, +}; + +enum { + BTF_KFUNC_SET_MAX_CNT = 256, + BTF_DTOR_KFUNC_MAX_CNT = 256, + BTF_KFUNC_FILTER_MAX_CNT = 16, +}; + +struct btf_kfunc_hook_filter { + btf_kfunc_filter_t filters[16]; + u32 nr_filters; +}; + +struct btf_kfunc_set_tab { + struct btf_id_set8 *sets[14]; + struct btf_kfunc_hook_filter hook_filters[14]; +}; + +struct btf_id_dtor_kfunc_tab { + u32 cnt; + struct btf_id_dtor_kfunc dtors[0]; +}; + +struct btf_struct_ops_tab { + u32 cnt; + u32 capacity; + struct bpf_struct_ops_desc ops[0]; +}; + +enum verifier_phase { + CHECK_META = 0, + CHECK_TYPE = 1, +}; + +struct resolve_vertex { + const struct btf_type *t; + u32 type_id; + u16 next_member; +}; + +enum visit_state { + NOT_VISITED = 0, + VISITED = 1, + RESOLVED = 2, +}; + +enum resolve_mode { + RESOLVE_TBD = 0, + RESOLVE_PTR = 1, + RESOLVE_STRUCT_OR_ARRAY = 2, +}; + +struct btf_sec_info { + u32 off; + u32 len; +}; + +struct btf_verifier_env { + struct btf *btf; + u8 *visit_states; + struct resolve_vertex stack[32]; + struct bpf_verifier_log log; + u32 log_type_id; + u32 top_stack; + enum verifier_phase phase; + enum resolve_mode resolve_mode; +}; + +struct btf_show { + u64 flags; + void *target; + void (*showfn)(struct btf_show *, const char *, va_list); + const struct btf *btf; + struct { + u8 depth; + u8 depth_to_show; + u8 depth_check; + u8 array_member: 1; + u8 array_terminated: 1; + u16 array_encoding; + u32 type_id; + int status; + const struct btf_type *type; + const struct btf_member *member; + char name[80]; + } state; + struct { + u32 size; + void *head; + void *data; + u8 safe[32]; + } obj; +}; + +struct btf_kind_operations { + s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); + int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); + int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + void (*log_details)(struct btf_verifier_env *, const struct btf_type *); + void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); +}; + +enum { + BTF_FIELD_IGNORE = 0, + BTF_FIELD_FOUND = 1, +}; + +struct btf_field_info { + enum btf_field_type type; + u32 off; + union { + struct { + u32 type_id; + } kptr; + struct { + const char *node_name; + u32 value_btf_id; + } graph_root; + }; +}; + +struct bpf_ctx_convert { + struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; + struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; + struct xdp_md BPF_PROG_TYPE_XDP_prog; + struct xdp_buff BPF_PROG_TYPE_XDP_kern; + struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; + struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; + struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; + struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; + struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; + struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; + struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; + struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; + struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; + struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; + struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; + struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; + struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; + struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; + struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; + struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; + bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; + struct pt_regs BPF_PROG_TYPE_KPROBE_kern; + __u64 BPF_PROG_TYPE_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_TRACEPOINT_kern; + struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; + struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; + long: 32; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; + void *BPF_PROG_TYPE_TRACING_prog; + void *BPF_PROG_TYPE_TRACING_kern; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; + struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; + struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; + struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; + struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; + struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; + struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; + struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; + struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; + void *BPF_PROG_TYPE_STRUCT_OPS_prog; + void *BPF_PROG_TYPE_STRUCT_OPS_kern; + void *BPF_PROG_TYPE_EXT_prog; + void *BPF_PROG_TYPE_EXT_kern; + void *BPF_PROG_TYPE_SYSCALL_prog; + void *BPF_PROG_TYPE_SYSCALL_kern; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_prog; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_kern; + long: 32; +}; + +enum { + __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0, + __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1, + __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2, + __ctx_convertBPF_PROG_TYPE_XDP = 3, + __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6, + __ctx_convertBPF_PROG_TYPE_LWT_IN = 7, + __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8, + __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9, + __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10, + __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11, + __ctx_convertBPF_PROG_TYPE_SK_SKB = 12, + __ctx_convertBPF_PROG_TYPE_SK_MSG = 13, + __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14, + __ctx_convertBPF_PROG_TYPE_KPROBE = 15, + __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16, + __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19, + __ctx_convertBPF_PROG_TYPE_TRACING = 20, + __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21, + __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23, + __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 24, + __ctx_convertBPF_PROG_TYPE_SK_LOOKUP = 25, + __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 26, + __ctx_convertBPF_PROG_TYPE_EXT = 27, + __ctx_convertBPF_PROG_TYPE_SYSCALL = 28, + __ctx_convertBPF_PROG_TYPE_NETFILTER = 29, + __ctx_convert_unused = 30, +}; + +enum bpf_struct_walk_result { + WALK_SCALAR = 0, + WALK_PTR = 1, + WALK_STRUCT = 2, +}; + +struct bpf_cand_cache { + const char *name; + u32 name_len; + u16 kind; + u16 cnt; + struct { + const struct btf *btf; + u32 id; + } cands[0]; +}; + +enum btf_arg_tag { + ARG_TAG_CTX = 1, + ARG_TAG_NONNULL = 2, + ARG_TAG_TRUSTED = 4, + ARG_TAG_NULLABLE = 8, + ARG_TAG_ARENA = 16, +}; + +struct btf_show_snprintf { + struct btf_show show; + int len_left; + int len; +}; + +enum { + BTF_MODULE_F_LIVE = 1, +}; + +struct btf_module { + struct list_head list; + struct module *module; + struct btf *btf; + struct bin_attribute *sysfs_attr; + int flags; +}; + +typedef u64 (*btf_bpf_btf_find_by_name_kind)(char *, int, u32, int); + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); + +struct char_device_struct { + struct char_device_struct *next; + unsigned int major; + unsigned int baseminor; + int minorct; + char name[64]; + struct cdev *cdev; +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + +enum lru_status { + LRU_REMOVED = 0, + LRU_REMOVED_RETRY = 1, + LRU_ROTATE = 2, + LRU_SKIP = 3, + LRU_RETRY = 4, + LRU_STOP = 5, +}; + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, void *); + +struct inodes_stat_t { + long int nr_inodes; + long int nr_unused; + long int dummy[5]; +}; + +struct trace_event_raw_ctime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + u32 ctime_ns; + u32 gen; + char __data[0]; +}; + +struct trace_event_raw_ctime_ns_xchg { + struct trace_entry ent; + dev_t dev; + ino_t ino; + u32 gen; + u32 old; + u32 new; + u32 cur; + char __data[0]; +}; + +struct trace_event_raw_fill_mg_cmtime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + time64_t mtime_s; + u32 ctime_ns; + u32 mtime_ns; + u32 gen; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_ctime {}; + +struct trace_event_data_offsets_ctime_ns_xchg {}; + +struct trace_event_data_offsets_fill_mg_cmtime {}; + +typedef void (*btf_trace_inode_set_ctime_to_ts)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_xchg_skip)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_ns_xchg)(void *, struct inode *, u32, u32, u32); + +typedef void (*btf_trace_fill_mg_cmtime)(void *, struct inode *, struct timespec64 *, struct timespec64 *); + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + __s32 status; + __u32 reserved; +}; + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + +typedef void (*iomap_punch_t)(struct inode *, loff_t, loff_t, struct iomap *); + +struct iomap_ioend { + struct list_head io_list; + u16 io_type; + u16 io_flags; + struct inode *io_inode; + size_t io_size; + long: 32; + loff_t io_offset; + sector_t io_sector; + struct bio io_bio; +}; + +struct iomap_writepage_ctx; + +struct iomap_writeback_ops { + int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t, unsigned int); + int (*prepare_ioend)(struct iomap_ioend *, int); + void (*discard_folio)(struct folio *, loff_t); +}; + +struct iomap_writepage_ctx { + struct iomap iomap; + struct iomap_ioend *ioend; + const struct iomap_writeback_ops *ops; + u32 nr_folios; + long: 32; +}; + +struct iomap_folio_state { + spinlock_t state_lock; + unsigned int read_bytes_pending; + atomic_t write_bytes_pending; + long unsigned int state[0]; +}; + +struct iomap_readpage_ctx { + struct folio *cur_folio; + bool cur_folio_in_bio; + struct bio *bio; + struct readahead_control *rac; +}; + +struct isofs_fid { + u32 block; + u16 offset; + u16 parent_offset; + u32 generation; + u32 parent_block; + u32 parent_generation; +}; + +struct fuse_backing_map { + int32_t fd; + uint32_t flags; + uint64_t padding; +}; + +struct btrfs_em_shrink_ctx { + long int nr_to_scan; + long int scanned; +}; + +struct btrfs_bio; + +typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *); + +struct btrfs_bio { + struct btrfs_inode *inode; + long: 32; + u64 file_offset; + union { + struct { + u8 *csum; + u8 csum_inline[64]; + struct bvec_iter saved_iter; + }; + struct { + struct btrfs_ordered_extent *ordered; + struct btrfs_ordered_sum *sums; + u64 orig_physical; + }; + struct btrfs_tree_parent_check parent_check; + }; + btrfs_bio_end_io_t end_io; + void *private; + unsigned int mirror_num; + atomic_t pending_ios; + struct work_struct end_io_work; + struct btrfs_fs_info *fs_info; + blk_status_t status; + struct bio bio; +}; + +enum btrfs_map_op { + BTRFS_MAP_READ = 0, + BTRFS_MAP_WRITE = 1, + BTRFS_MAP_GET_READ_MIRRORS = 2, +}; + +struct btrfs_failed_bio { + struct btrfs_bio *bbio; + int num_copies; + atomic_t repair_count; +}; + +struct async_submit_bio { + struct btrfs_bio *bbio; + struct btrfs_io_context *bioc; + struct btrfs_io_stripe smap; + int mirror_num; + struct btrfs_work work; + long: 32; +}; + +struct sha512_state { + u64 state[8]; + u64 count[2]; + u8 buf[128]; +}; + +typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); +}; + +enum bfqq_state_flags { + BFQQF_just_created = 0, + BFQQF_busy = 1, + BFQQF_wait_request = 2, + BFQQF_non_blocking_wait_rq = 3, + BFQQF_fifo_expire = 4, + BFQQF_has_short_ttime = 5, + BFQQF_sync = 6, + BFQQF_IO_bound = 7, + BFQQF_in_large_burst = 8, + BFQQF_softrt_update = 9, + BFQQF_coop = 10, + BFQQF_split_coop = 11, +}; + +struct iov_iter_state { + size_t iov_offset; + size_t count; + long unsigned int nr_segs; +}; + +typedef u32 compat_size_t; + +typedef s32 compat_ssize_t; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +typedef size_t (*iov_step_f)(void *, size_t, size_t, void *, void *); + +typedef size_t (*iov_ustep_f)(void *, size_t, size_t, void *, void *); + +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + void (*xor_syndrome)(int, int, int, size_t, void **); + int (*valid)(); + const char *name; + int priority; +}; + +typedef u32 unative_t; + +enum pci_mmap_state { + pci_mmap_io = 0, + pci_mmap_mem = 1, +}; + +enum pci_mmap_api { + PCI_MMAP_SYSFS = 0, + PCI_MMAP_PROCFS = 1, +}; + +struct broken_edid { + u8 manufacturer[4]; + u32 model; + u32 fix; +}; + +struct __fb_timings { + u32 dclk; + u32 hfreq; + u32 vfreq; + u32 hactive; + u32 vactive; + u32 hblank; + u32 vblank; + u32 htotal; + u32 vtotal; +}; + +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT = 0, + DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, + DMA_FENCE_FLAG_USER_BITS = 3, +}; + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + __u64 sync_fence_info; +}; + +struct sync_set_deadline { + __u64 deadline_ns; + __u64 pad; +}; + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 255, +}; + +enum phy___2 { + phy_100a = 992, + phy_100c = 55575208, + phy_82555_tx = 22020776, + phy_nsc_tx = 1543512064, + phy_82562_et = 53478056, + phy_82562_em = 52429480, + phy_82562_ek = 51380904, + phy_82562_eh = 24117928, + phy_82552_v = 3496017997, + phy_unknown = 4294967295, +}; + +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 8, + rus_ready = 16, + rus_mask = 60, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0, + stat_ack_sw_gen = 4, + stat_ack_rnr = 16, + stat_ack_cu_idle = 32, + stat_ack_frame_rx = 64, + stat_ack_cu_cmd_done = 128, + stat_ack_not_present = 255, + stat_ack_rx = 84, + stat_ack_tx = 160, +}; + +enum scb_cmd_hi { + irq_mask_none = 0, + irq_mask_all = 1, + irq_sw_gen = 2, +}; + +enum scb_cmd_lo { + cuc_nop = 0, + ruc_start = 1, + ruc_load_base = 6, + cuc_start = 16, + cuc_resume = 32, + cuc_dump_addr = 64, + cuc_dump_stats = 80, + cuc_load_base = 96, + cuc_dump_reset = 112, +}; + +enum cuc_dump { + cuc_dump_complete = 40965, + cuc_dump_reset_complete = 40967, +}; + +enum port { + software_reset = 0, + selftest = 1, + selective_reset = 2, +}; + +enum eeprom_ctrl_lo { + eesk = 1, + eecs = 2, + eedi = 4, + eedo = 8, +}; + +enum mdi_ctrl { + mdi_write = 67108864, + mdi_read = 134217728, + mdi_ready = 268435456, +}; + +enum eeprom_op { + op_write = 5, + op_read = 6, + op_ewds = 16, + op_ewen = 19, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 3, + eeprom_phy_iface = 6, + eeprom_id = 10, + eeprom_config_asf = 13, + eeprom_smbus_addr = 144, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 128, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB = 1, + I82553C = 2, + I82503 = 3, + DP83840 = 4, + S80C240 = 5, + S80C24 = 6, + I82555 = 7, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 32, +}; + +enum eeprom_config_asf { + eeprom_asf = 32768, + eeprom_gcl = 16384, +}; + +enum cb_status { + cb_complete = 32768, + cb_ok = 8192, +}; + +enum cb_command { + cb_nop = 0, + cb_iaaddr = 1, + cb_config = 2, + cb_multi = 3, + cb_tx = 4, + cb_ucode = 5, + cb_dump = 6, + cb_tx_sf = 8, + cb_tx_nc = 16, + cb_cid = 7936, + cb_i = 8192, + cb_s = 16384, + cb_el = 32768, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next; + struct rx *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +struct config { + u8 pad0: 2; + u8 byte_count: 6; + u8 pad1: 1; + u8 tx_fifo_limit: 3; + u8 rx_fifo_limit: 4; + u8 adaptive_ifs; + u8 pad3: 4; + u8 term_write_cache_line: 1; + u8 read_align_enable: 1; + u8 type_enable: 1; + u8 mwi_enable: 1; + u8 pad4: 1; + u8 rx_dma_max_count: 7; + u8 dma_max_count_enable: 1; + u8 tx_dma_max_count: 7; + u8 rx_save_bad_frames: 1; + u8 rx_save_overruns: 1; + u8 standard_stat_counter: 1; + u8 standard_tcb: 1; + u8 cna_intr: 1; + u8 tno_intr: 1; + u8 direct_rx_dma: 1; + u8 late_scb_update: 1; + u8 tx_dynamic_tbd: 1; + u8 tx_two_frames_in_fifo: 1; + u8 rx_extended_rfd: 1; + u8 pad7: 2; + u8 tx_underrun_retry: 2; + u8 rx_discard_short_frames: 1; + u8 csma_disabled: 1; + u8 pad8: 6; + u8 mii_mode: 1; + u8 mcmatch_wake: 1; + u8 arp_wake: 1; + u8 link_status_wake: 1; + u8 vlan_arp_tco: 1; + u8 pad9: 3; + u8 rx_tcpudp_checksum: 1; + u8 loopback: 2; + u8 preamble_length: 2; + u8 no_source_addr_insertion: 1; + u8 pad10: 3; + u8 pad11: 5; + u8 linear_priority: 3; + u8 ifs: 4; + u8 pad12: 3; + u8 linear_priority_mode: 1; + u8 ip_addr_lo; + u8 ip_addr_hi; + u8 crs_or_cdt: 1; + u8 pad15_2: 1; + u8 crc_16_bit: 1; + u8 ignore_ul_bit: 1; + u8 pad15_1: 1; + u8 wait_after_win: 1; + u8 broadcast_disabled: 1; + u8 promiscuous_mode: 1; + u8 fc_delay_lo; + u8 fc_delay_hi; + u8 pad18: 1; + u8 fc_priority_threshold: 3; + u8 rx_long_ok: 1; + u8 rx_crc_transfer: 1; + u8 tx_padding: 1; + u8 rx_stripping: 1; + u8 full_duplex_pin: 1; + u8 full_duplex_force: 1; + u8 fc_reject: 1; + u8 fc_restart: 1; + u8 fc_restop: 1; + u8 fc_disable: 1; + u8 magic_packet_disable: 1; + u8 addr_wake: 1; + u8 pad20_2: 1; + u8 multi_ia: 1; + u8 fc_priority_location: 1; + u8 pad20_1: 5; + u8 pad21_2: 4; + u8 multicast_all: 1; + u8 pad21_1: 3; + u8 pad22: 6; + u8 rx_vlan_drop: 1; + u8 rx_d102_mode: 1; + u8 pad_d102[9]; +}; + +struct multi { + __le16 count; + u8 addr[386]; +}; + +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[6]; + __le32 ucode[134]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next; + struct cb *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, + lb_mac = 1, + lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames; + __le32 tx_max_collisions; + __le32 tx_late_collisions; + __le32 tx_underruns; + __le32 tx_lost_crs; + __le32 tx_deferred; + __le32 tx_single_collisions; + __le32 tx_multiple_collisions; + __le32 tx_total_collisions; + __le32 rx_good_frames; + __le32 rx_crc_errors; + __le32 rx_alignment_errors; + __le32 rx_resource_errors; + __le32 rx_overrun_errors; + __le32 rx_cdt_errors; + __le32 rx_short_frame_errors; + __le32 fc_xmt_pause; + __le32 fc_rcv_pause; + __le32 fc_rcv_unsupported; + __le16 xmt_tco_frames; + __le16 rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + u32 msg_enable; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *, u32, u32, u32, u16); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct rx *rxs; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t cb_lock; + spinlock_t cmd_lock; + struct csr *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + long: 32; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + enum { + ich = 1, + promiscuous = 2, + multicast_all = 4, + wol_magic = 8, + ich_10h_workaround = 16, + } flags; + enum mac mac; + enum phy___2 phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + struct mem *mem; + dma_addr_t dma_addr; + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum led_state { + led_on = 1, + led_off = 4, + led_on_559 = 5, + led_on_557 = 7, +}; + +enum usb_phy_interface { + USBPHY_INTERFACE_MODE_UNKNOWN = 0, + USBPHY_INTERFACE_MODE_UTMI = 1, + USBPHY_INTERFACE_MODE_UTMIW = 2, + USBPHY_INTERFACE_MODE_ULPI = 3, + USBPHY_INTERFACE_MODE_SERIAL = 4, + USBPHY_INTERFACE_MODE_HSIC = 5, +}; + +struct input_mt_slot { + int abs[14]; + unsigned int frame; + unsigned int key; +}; + +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[0]; +}; + +struct input_seq_state { + short unsigned int pos; + bool mutex_acquired; + int input_devices_state; +}; + +struct input_devres { + struct input_dev *input; +}; + +struct reserved_mem_ops; + +struct reserved_mem { + const char *name; + long unsigned int fdt_node; + const struct reserved_mem_ops *ops; + phys_addr_t base; + phys_addr_t size; + void *priv; +}; + +struct reserved_mem_ops { + int (*device_init)(struct reserved_mem *, struct device *); + void (*device_release)(struct reserved_mem *, struct device *); +}; + +typedef int (*reservedmem_of_init_fn)(struct reserved_mem *); + +struct memblock_type { + long unsigned int cnt; + long unsigned int max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +struct rmem_assigned_device { + struct device *dev; + struct reserved_mem *rmem; + struct list_head list; +}; + +enum { + SKB_FCLONE_UNAVAILABLE = 0, + SKB_FCLONE_ORIG = 1, + SKB_FCLONE_CLONE = 2, +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS = 0, + NF_NETDEV_EGRESS = 1, + NF_NETDEV_NUMHOOKS = 2, +}; + +enum tcx_action_base { + TCX_NEXT = -1, + TCX_PASS = 0, + TCX_DROP = 2, + TCX_REDIRECT = 7, +}; + +struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +}; + +typedef struct ifbond ifbond; + +struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +}; + +typedef struct ifslave ifslave; + +enum netdev_queue_type { + NETDEV_QUEUE_TYPE_RX = 0, + NETDEV_QUEUE_TYPE_TX = 1, +}; + +enum { + NAPIF_STATE_SCHED = 1, + NAPIF_STATE_MISSED = 2, + NAPIF_STATE_DISABLE = 4, + NAPIF_STATE_NPSVC = 8, + NAPIF_STATE_LISTED = 16, + NAPIF_STATE_NO_BUSY_POLL = 32, + NAPIF_STATE_IN_BUSY_POLL = 64, + NAPIF_STATE_PREFER_BUSY_POLL = 128, + NAPIF_STATE_THREADED = 256, + NAPIF_STATE_SCHED_THREADED = 512, +}; + +struct net_device_path_stack { + int num_paths; + struct net_device_path path[5]; +}; + +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; + int flags; +}; + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; + +struct cpu_rmap { + struct kref refcount; + u16 size; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +struct netdev_notifier_offload_xstats_rd { + struct rtnl_hw_stats64 stats; + bool used; + long: 32; +}; + +struct netdev_notifier_offload_xstats_ru { + bool used; +}; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; + enum netdev_offload_xstats_type type; + union { + struct netdev_notifier_offload_xstats_rd *report_delta; + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +enum { + NESTED_SYNC_IMM_BIT = 0, + NESTED_SYNC_TODO_BIT = 1, +}; + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED = 0, + __QDISC_STATE_DEACTIVATED = 1, + __QDISC_STATE_MISSED = 2, + __QDISC_STATE_DRAINING = 3, +}; + +enum qdisc_state2_t { + __QDISC_STATE2_RUNNING = 0, +}; + +struct tc_skb_cb { + struct qdisc_skb_cb qdisc_cb; + u32 drop_reason; + u16 zone; + u16 mru; + u8 post_ct: 1; + u8 post_ct_snat: 1; + u8 post_ct_dnat: 1; +}; + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + long unsigned int rcu_state; +}; + +struct tcx_entry { + struct mini_Qdisc *miniq; + long: 32; + struct bpf_mprog_bundle bundle; + u32 miniq_active; + struct callback_head rcu; + long: 32; +}; + +enum { + SCM_TSTAMP_SND = 0, + SCM_TSTAMP_SCHED = 1, + SCM_TSTAMP_ACK = 2, +}; + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[0]; +}; + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[0]; +}; + +typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); + +struct dev_kfree_skb_cb { + enum skb_drop_reason reason; +}; + +enum { + NAPI_F_PREFER_BUSY_POLL = 1, + NAPI_F_END_ON_RESCHED = 2, +}; + +struct netdev_adjacent { + struct net_device *dev; + netdevice_tracker dev_tracker; + bool master; + bool ignore; + u16 ref_nr; + void *private; + struct list_head list; + struct callback_head rcu; +}; + +enum ethtool_reset_flags { + ETH_RESET_MGMT = 1, + ETH_RESET_IRQ = 2, + ETH_RESET_DMA = 4, + ETH_RESET_FILTER = 8, + ETH_RESET_OFFLOAD = 16, + ETH_RESET_MAC = 32, + ETH_RESET_PHY = 64, + ETH_RESET_RAM = 128, + ETH_RESET_AP = 256, + ETH_RESET_DEDICATED = 65535, + ETH_RESET_ALL = 4294967295, +}; + +struct ethnl_module_fw_flash_ntf_params { + u32 portid; + u32 seq; + bool closed_sock; +}; + +struct ethtool_module_fw_flash_params { + __be32 password; + u8 password_valid: 1; +}; + +struct ethtool_cmis_fw_update_params { + struct net_device *dev; + struct ethtool_module_fw_flash_params params; + struct ethnl_module_fw_flash_ntf_params ntf_params; + const struct firmware *fw; +}; + +struct ethtool_cmis_cdb { + u8 cmis_rev; + u8 read_write_len_ext; + u16 max_completion_time; +}; + +enum ethtool_cmis_cdb_cmd_id { + ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 64, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 65, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 257, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 259, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL = 260, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 263, + ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 265, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 266, +}; + +struct ethtool_cmis_cdb_request { + __be16 id; + union { + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + }; + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + } body; + }; + u8 *epl; +}; + +struct ethtool_cmis_cdb_cmd_args { + struct ethtool_cmis_cdb_request req; + u16 max_duration; + u8 read_write_len_ext; + u8 msleep_pre_rpl; + u8 rpl_exp_len; + u8 flags; + char *err_msg; +}; + +struct cmis_fw_update_fw_mng_features { + u8 start_cmd_payload_size; + u8 write_mechanism; + u16 max_duration_start; + u16 max_duration_write; + u16 max_duration_complete; +}; + +struct cmis_cdb_fw_mng_features_rpl { + u8 resv1; + u8 resv2; + u8 start_cmd_payload_size; + u8 resv3; + u8 read_write_len_ext; + u8 write_mechanism; + u8 resv4; + u8 resv5; + __be16 max_duration_start; + __be16 resv6; + __be16 max_duration_write; + __be16 max_duration_complete; + __be16 resv7; +}; + +enum cmis_cdb_fw_write_mechanism { + CMIS_CDB_FW_WRITE_MECHANISM_NONE = 0, + CMIS_CDB_FW_WRITE_MECHANISM_LPL = 1, + CMIS_CDB_FW_WRITE_MECHANISM_EPL = 16, + CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 17, +}; + +struct cmis_cdb_start_fw_download_pl_h { + __be32 image_size; + __be32 resv1; +}; + +struct cmis_cdb_start_fw_download_pl { + union { + struct { + __be32 image_size; + __be32 resv1; + }; + struct cmis_cdb_start_fw_download_pl_h head; + }; + u8 vendor_data[112]; +}; + +struct cmis_cdb_write_fw_block_lpl_pl { + __be32 block_address; + u8 fw_block[116]; +}; + +struct cmis_cdb_write_fw_block_epl_pl { + u8 fw_block[2048]; +}; + +enum { + CMIS_MODULE_LOW_PWR = 1, + CMIS_MODULE_READY = 3, +}; + +struct cmis_cdb_run_fw_image_pl { + u8 resv1; + u8 image_to_run; + u16 delay_to_reset; +}; + +enum ip_conntrack_events { + IPCT_NEW = 0, + IPCT_RELATED = 1, + IPCT_DESTROY = 2, + IPCT_REPLY = 3, + IPCT_ASSURED = 4, + IPCT_PROTOINFO = 5, + IPCT_HELPER = 6, + IPCT_MARK = 7, + IPCT_SEQADJ = 8, + IPCT_NATSEQADJ = 8, + IPCT_SECMARK = 9, + IPCT_LABEL = 10, + IPCT_SYNPROXY = 11, + __IPCT_MAX = 12, +}; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); + void (*decode_session)(struct sk_buff *, struct flowi *); + void (*remove_nat_bysrc)(struct nf_conn *); +}; + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +struct nfnl_ct_hook { + size_t (*build_size)(const struct nf_conn *); + int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t); + int (*parse)(const struct nlattr *, struct nf_conn *); + int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); + void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); +}; + +struct nf_conntrack_tuple_mask { + struct { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + } src; +}; + +struct nf_ct_iter_data { + struct net *net; + void *data; + u32 portid; + int report; +}; + +struct nf_conntrack_expect { + struct hlist_node lnode; + struct hlist_node hnode; + struct nf_conntrack_tuple tuple; + struct nf_conntrack_tuple_mask mask; + refcount_t use; + unsigned int flags; + unsigned int class; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); + struct nf_conntrack_helper *helper; + struct nf_conn *master; + struct timer_list timeout; + union nf_inet_addr saved_addr; + union nf_conntrack_man_proto saved_proto; + enum ip_conntrack_dir dir; + struct callback_head rcu; +}; + +struct nf_ct_helper_expectfn { + struct list_head head; + const char *name; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); +}; + +struct nf_ct_seqadj { + u32 correction_pos; + s32 offset_before; + s32 offset_after; +}; + +struct nf_conn_seqadj { + struct nf_ct_seqadj seq[2]; +}; + +struct nf_conn_counter { + atomic64_t packets; + atomic64_t bytes; +}; + +struct nf_conn_acct { + struct nf_conn_counter counter[2]; +}; + +struct nf_conn_tstamp { + u_int64_t start; + u_int64_t stop; +}; + +struct nf_conn_labels { + long unsigned int bits[4]; +}; + +struct nf_conn_synproxy { + u32 isn; + u32 its; + u32 tsoff; +}; + +enum cntl_msg_types { + IPCTNL_MSG_CT_NEW = 0, + IPCTNL_MSG_CT_GET = 1, + IPCTNL_MSG_CT_DELETE = 2, + IPCTNL_MSG_CT_GET_CTRZERO = 3, + IPCTNL_MSG_CT_GET_STATS_CPU = 4, + IPCTNL_MSG_CT_GET_STATS = 5, + IPCTNL_MSG_CT_GET_DYING = 6, + IPCTNL_MSG_CT_GET_UNCONFIRMED = 7, + IPCTNL_MSG_MAX = 8, +}; + +enum ctnl_exp_msg_types { + IPCTNL_MSG_EXP_NEW = 0, + IPCTNL_MSG_EXP_GET = 1, + IPCTNL_MSG_EXP_DELETE = 2, + IPCTNL_MSG_EXP_GET_STATS_CPU = 3, + IPCTNL_MSG_EXP_MAX = 4, +}; + +enum ctattr_type { + CTA_UNSPEC = 0, + CTA_TUPLE_ORIG = 1, + CTA_TUPLE_REPLY = 2, + CTA_STATUS = 3, + CTA_PROTOINFO = 4, + CTA_HELP = 5, + CTA_NAT_SRC = 6, + CTA_TIMEOUT = 7, + CTA_MARK = 8, + CTA_COUNTERS_ORIG = 9, + CTA_COUNTERS_REPLY = 10, + CTA_USE = 11, + CTA_ID = 12, + CTA_NAT_DST = 13, + CTA_TUPLE_MASTER = 14, + CTA_SEQ_ADJ_ORIG = 15, + CTA_NAT_SEQ_ADJ_ORIG = 15, + CTA_SEQ_ADJ_REPLY = 16, + CTA_NAT_SEQ_ADJ_REPLY = 16, + CTA_SECMARK = 17, + CTA_ZONE = 18, + CTA_SECCTX = 19, + CTA_TIMESTAMP = 20, + CTA_MARK_MASK = 21, + CTA_LABELS = 22, + CTA_LABELS_MASK = 23, + CTA_SYNPROXY = 24, + CTA_FILTER = 25, + CTA_STATUS_MASK = 26, + __CTA_MAX = 27, +}; + +enum ctattr_tuple { + CTA_TUPLE_UNSPEC = 0, + CTA_TUPLE_IP = 1, + CTA_TUPLE_PROTO = 2, + CTA_TUPLE_ZONE = 3, + __CTA_TUPLE_MAX = 4, +}; + +enum ctattr_ip { + CTA_IP_UNSPEC = 0, + CTA_IP_V4_SRC = 1, + CTA_IP_V4_DST = 2, + CTA_IP_V6_SRC = 3, + CTA_IP_V6_DST = 4, + __CTA_IP_MAX = 5, +}; + +enum ctattr_protoinfo { + CTA_PROTOINFO_UNSPEC = 0, + CTA_PROTOINFO_TCP = 1, + CTA_PROTOINFO_DCCP = 2, + CTA_PROTOINFO_SCTP = 3, + __CTA_PROTOINFO_MAX = 4, +}; + +enum ctattr_counters { + CTA_COUNTERS_UNSPEC = 0, + CTA_COUNTERS_PACKETS = 1, + CTA_COUNTERS_BYTES = 2, + CTA_COUNTERS32_PACKETS = 3, + CTA_COUNTERS32_BYTES = 4, + CTA_COUNTERS_PAD = 5, + __CTA_COUNTERS_MAX = 6, +}; + +enum ctattr_tstamp { + CTA_TIMESTAMP_UNSPEC = 0, + CTA_TIMESTAMP_START = 1, + CTA_TIMESTAMP_STOP = 2, + CTA_TIMESTAMP_PAD = 3, + __CTA_TIMESTAMP_MAX = 4, +}; + +enum ctattr_seqadj { + CTA_SEQADJ_UNSPEC = 0, + CTA_SEQADJ_CORRECTION_POS = 1, + CTA_SEQADJ_OFFSET_BEFORE = 2, + CTA_SEQADJ_OFFSET_AFTER = 3, + __CTA_SEQADJ_MAX = 4, +}; + +enum ctattr_synproxy { + CTA_SYNPROXY_UNSPEC = 0, + CTA_SYNPROXY_ISN = 1, + CTA_SYNPROXY_ITS = 2, + CTA_SYNPROXY_TSOFF = 3, + __CTA_SYNPROXY_MAX = 4, +}; + +enum ctattr_expect { + CTA_EXPECT_UNSPEC = 0, + CTA_EXPECT_MASTER = 1, + CTA_EXPECT_TUPLE = 2, + CTA_EXPECT_MASK = 3, + CTA_EXPECT_TIMEOUT = 4, + CTA_EXPECT_ID = 5, + CTA_EXPECT_HELP_NAME = 6, + CTA_EXPECT_ZONE = 7, + CTA_EXPECT_FLAGS = 8, + CTA_EXPECT_CLASS = 9, + CTA_EXPECT_NAT = 10, + CTA_EXPECT_FN = 11, + __CTA_EXPECT_MAX = 12, +}; + +enum ctattr_expect_nat { + CTA_EXPECT_NAT_UNSPEC = 0, + CTA_EXPECT_NAT_DIR = 1, + CTA_EXPECT_NAT_TUPLE = 2, + __CTA_EXPECT_NAT_MAX = 3, +}; + +enum ctattr_stats_cpu { + CTA_STATS_UNSPEC = 0, + CTA_STATS_SEARCHED = 1, + CTA_STATS_FOUND = 2, + CTA_STATS_NEW = 3, + CTA_STATS_INVALID = 4, + CTA_STATS_IGNORE = 5, + CTA_STATS_DELETE = 6, + CTA_STATS_DELETE_LIST = 7, + CTA_STATS_INSERT = 8, + CTA_STATS_INSERT_FAILED = 9, + CTA_STATS_DROP = 10, + CTA_STATS_EARLY_DROP = 11, + CTA_STATS_ERROR = 12, + CTA_STATS_SEARCH_RESTART = 13, + CTA_STATS_CLASH_RESOLVE = 14, + CTA_STATS_CHAIN_TOOLONG = 15, + __CTA_STATS_MAX = 16, +}; + +enum ctattr_stats_global { + CTA_STATS_GLOBAL_UNSPEC = 0, + CTA_STATS_GLOBAL_ENTRIES = 1, + CTA_STATS_GLOBAL_MAX_ENTRIES = 2, + __CTA_STATS_GLOBAL_MAX = 3, +}; + +enum ctattr_expect_stats { + CTA_STATS_EXP_UNSPEC = 0, + CTA_STATS_EXP_NEW = 1, + CTA_STATS_EXP_CREATE = 2, + CTA_STATS_EXP_DELETE = 3, + __CTA_STATS_EXP_MAX = 4, +}; + +enum ctattr_filter { + CTA_FILTER_UNSPEC = 0, + CTA_FILTER_ORIG_FLAGS = 1, + CTA_FILTER_REPLY_FLAGS = 2, + __CTA_FILTER_MAX = 3, +}; + +struct ctnetlink_list_dump_ctx { + struct nf_conn *last; + unsigned int cpu; + bool done; +}; + +struct ctnetlink_filter_u32 { + u32 val; + u32 mask; +}; + +struct ctnetlink_filter { + u8 family; + bool zone_filter; + u_int32_t orig_flags; + u_int32_t reply_flags; + struct nf_conntrack_tuple orig; + struct nf_conntrack_tuple reply; + struct nf_conntrack_zone zone; + struct ctnetlink_filter_u32 mark; + struct ctnetlink_filter_u32 status; +}; + +struct xt_ecn_info { + __u8 operation; + __u8 invert; + __u8 ip_ect; + union { + struct { + __u8 ect; + } tcp; + } proto; +}; + +struct ip6t_ip6 { + struct in6_addr src; + struct in6_addr dst; + struct in6_addr smsk; + struct in6_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 tos; + __u8 flags; + __u8 invflags; +}; + +enum tcp_queue { + TCP_FRAG_IN_WRITE_QUEUE = 0, + TCP_FRAG_IN_RTX_QUEUE = 1, +}; + +enum tcp_skb_cb_sacked_flags { + TCPCB_SACKED_ACKED = 1, + TCPCB_SACKED_RETRANS = 2, + TCPCB_LOST = 4, + TCPCB_TAGBITS = 7, + TCPCB_REPAIRED = 16, + TCPCB_EVER_RETRANS = 128, + TCPCB_RETRANS = 146, +}; + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; + unsigned char nh_protocol; + unsigned char resvd; + unsigned int nh_flags; +}; + +struct nexthop_grp { + __u32 id; + __u8 weight; + __u8 weight_high; + __u16 resvd2; +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH = 0, + NEXTHOP_GRP_TYPE_RES = 1, + __NEXTHOP_GRP_TYPE_MAX = 2, +}; + +enum { + NHA_UNSPEC = 0, + NHA_ID = 1, + NHA_GROUP = 2, + NHA_GROUP_TYPE = 3, + NHA_BLACKHOLE = 4, + NHA_OIF = 5, + NHA_GATEWAY = 6, + NHA_ENCAP_TYPE = 7, + NHA_ENCAP = 8, + NHA_GROUPS = 9, + NHA_MASTER = 10, + NHA_FDB = 11, + NHA_RES_GROUP = 12, + NHA_RES_BUCKET = 13, + NHA_OP_FLAGS = 14, + NHA_GROUP_STATS = 15, + NHA_HW_STATS_ENABLE = 16, + NHA_HW_STATS_USED = 17, + __NHA_MAX = 18, +}; + +enum { + NHA_RES_GROUP_UNSPEC = 0, + NHA_RES_GROUP_PAD = 0, + NHA_RES_GROUP_BUCKETS = 1, + NHA_RES_GROUP_IDLE_TIMER = 2, + NHA_RES_GROUP_UNBALANCED_TIMER = 3, + NHA_RES_GROUP_UNBALANCED_TIME = 4, + __NHA_RES_GROUP_MAX = 5, +}; + +enum { + NHA_RES_BUCKET_UNSPEC = 0, + NHA_RES_BUCKET_PAD = 0, + NHA_RES_BUCKET_INDEX = 1, + NHA_RES_BUCKET_IDLE_TIME = 2, + NHA_RES_BUCKET_NH_ID = 3, + __NHA_RES_BUCKET_MAX = 4, +}; + +enum { + NHA_GROUP_STATS_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY = 1, + __NHA_GROUP_STATS_MAX = 2, +}; + +enum { + NHA_GROUP_STATS_ENTRY_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY_ID = 1, + NHA_GROUP_STATS_ENTRY_PACKETS = 2, + NHA_GROUP_STATS_ENTRY_PACKETS_HW = 3, + __NHA_GROUP_STATS_ENTRY_MAX = 4, +}; + +struct nh_config { + u32 nh_id; + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u8 nh_fdb; + u32 nh_flags; + int nh_ifindex; + struct net_device *dev; + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + struct nlattr *nh_grp; + u16 nh_grp_type; + u16 nh_grp_res_num_buckets; + long unsigned int nh_grp_res_idle_timer; + long unsigned int nh_grp_res_unbalanced_timer; + bool nh_grp_res_has_num_buckets; + bool nh_grp_res_has_idle_timer; + bool nh_grp_res_has_unbalanced_timer; + bool nh_hw_stats; + struct nlattr *nh_encap; + u16 nh_encap_type; + u32 nlflags; + struct nl_info nlinfo; +}; + +enum nexthop_event_type { + NEXTHOP_EVENT_DEL = 0, + NEXTHOP_EVENT_REPLACE = 1, + NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, + NEXTHOP_EVENT_BUCKET_REPLACE = 3, + NEXTHOP_EVENT_HW_STATS_REPORT_DELTA = 4, +}; + +enum nh_notifier_info_type { + NH_NOTIFIER_INFO_TYPE_SINGLE = 0, + NH_NOTIFIER_INFO_TYPE_GRP = 1, + NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, + NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, + NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS = 4, +}; + +struct nh_notifier_single_info { + struct net_device *dev; + u8 gw_family; + union { + __be32 ipv4; + struct in6_addr ipv6; + }; + u32 id; + u8 is_reject: 1; + u8 is_fdb: 1; + u8 has_encap: 1; +}; + +struct nh_notifier_grp_entry_info { + u16 weight; + struct nh_notifier_single_info nh; +}; + +struct nh_notifier_grp_info { + u16 num_nh; + bool is_fdb; + bool hw_stats; + struct nh_notifier_grp_entry_info nh_entries[0]; +}; + +struct nh_notifier_res_bucket_info { + u16 bucket_index; + unsigned int idle_timer_ms; + bool force; + struct nh_notifier_single_info old_nh; + struct nh_notifier_single_info new_nh; +}; + +struct nh_notifier_res_table_info { + u16 num_nh_buckets; + bool hw_stats; + struct nh_notifier_single_info nhs[0]; +}; + +struct nh_notifier_grp_hw_stats_entry_info { + u32 id; + long: 32; + u64 packets; +}; + +struct nh_notifier_grp_hw_stats_info { + u16 num_nh; + bool hw_stats_used; + long: 32; + struct nh_notifier_grp_hw_stats_entry_info stats[0]; +}; + +struct nh_notifier_info { + struct net *net; + struct netlink_ext_ack *extack; + u32 id; + enum nh_notifier_info_type type; + union { + struct nh_notifier_single_info *nh; + struct nh_notifier_grp_info *nh_grp; + struct nh_notifier_res_table_info *nh_res_table; + struct nh_notifier_res_bucket_info *nh_res_bucket; + struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats; + }; +}; + +struct nh_dump_filter { + u32 nh_id; + int dev_idx; + int master_idx; + bool group_filter; + bool fdb_filter; + u32 res_bucket_nh_id; + u32 op_flags; +}; + +struct rtm_dump_nh_ctx { + u32 idx; +}; + +struct rtm_dump_res_bucket_ctx { + struct rtm_dump_nh_ctx nh; + u16 bucket_index; +}; + +struct rtm_dump_nexthop_bucket_data { + struct rtm_dump_res_bucket_ctx *ctx; + struct nh_dump_filter filter; +}; + +enum { + XFRM_LOOKUP_ICMP = 1, + XFRM_LOOKUP_QUEUE = 2, + XFRM_LOOKUP_KEEP_DST_REF = 4, +}; + +typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *, const struct inet6_skb_parm *); + +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + +struct icmpv6_msg { + struct sk_buff *skb; + int offset; + uint8_t type; +}; + +struct icmp6_err { + int err; + int fatal; +}; + +enum switchdev_attr_id { + SWITCHDEV_ATTR_ID_UNDEFINED = 0, + SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1, + SWITCHDEV_ATTR_ID_PORT_MST_STATE = 2, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 3, + SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 4, + SWITCHDEV_ATTR_ID_PORT_MROUTER = 5, + SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 6, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 7, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL = 8, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 9, + SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 10, + SWITCHDEV_ATTR_ID_BRIDGE_MST = 11, + SWITCHDEV_ATTR_ID_MRP_PORT_ROLE = 12, + SWITCHDEV_ATTR_ID_VLAN_MSTI = 13, +}; + +struct switchdev_mst_state { + u16 msti; + u8 state; +}; + +struct switchdev_brport_flags { + long unsigned int val; + long unsigned int mask; +}; + +struct switchdev_vlan_msti { + u16 vid; + u16 msti; +}; + +struct switchdev_attr { + struct net_device *orig_dev; + enum switchdev_attr_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); + union { + u8 stp_state; + struct switchdev_mst_state mst_state; + struct switchdev_brport_flags brport_flags; + bool mrouter; + clock_t ageing_time; + bool vlan_filtering; + u16 vlan_protocol; + bool mst; + bool mc_disabled; + u8 mrp_port_role; + struct switchdev_vlan_msti vlan_msti; + } u; +}; + +struct br_config_bpdu { + unsigned int topology_change: 1; + unsigned int topology_change_ack: 1; + bridge_id root; + int root_path_cost; + bridge_id bridge_id; + port_id port_id; + int message_age; + int max_age; + int hello_time; + int forward_delay; +}; + +struct clk { + struct clk_core *core; + struct device *dev; + const char *dev_id; + const char *con_id; + long unsigned int min_rate; + long unsigned int max_rate; + unsigned int exclusive_count; + struct hlist_node clks_node; +}; + +enum format_type { + FORMAT_TYPE_NONE = 0, + FORMAT_TYPE_WIDTH = 1, + FORMAT_TYPE_PRECISION = 2, + FORMAT_TYPE_CHAR = 3, + FORMAT_TYPE_STR = 4, + FORMAT_TYPE_PTR = 5, + FORMAT_TYPE_PERCENT_CHAR = 6, + FORMAT_TYPE_INVALID = 7, + FORMAT_TYPE_LONG_LONG = 8, + FORMAT_TYPE_ULONG = 9, + FORMAT_TYPE_LONG = 10, + FORMAT_TYPE_UBYTE = 11, + FORMAT_TYPE_BYTE = 12, + FORMAT_TYPE_USHORT = 13, + FORMAT_TYPE_SHORT = 14, + FORMAT_TYPE_UINT = 15, + FORMAT_TYPE_INT = 16, + FORMAT_TYPE_SIZE_T = 17, + FORMAT_TYPE_PTRDIFF = 18, +}; + +struct printf_spec { + unsigned int type: 8; + int field_width: 24; + unsigned int flags: 8; + unsigned int base: 8; + int precision: 16; +}; + +struct page_flags_fields { + int width; + int shift; + int mask; + const struct printf_spec *spec; + const char *name; +}; + +struct trace_event_raw_rcu_utilization { + struct trace_entry ent; + const char *s; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_future_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long int gp_seq_req; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period_init { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + u8 level; + int grplo; + int grphi; + long unsigned int qsmask; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gpseq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_funnel_lock { + struct trace_entry ent; + const char *rcuname; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_preempt_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_unlock_preempted_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_quiescent_state_report { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long unsigned int mask; + long unsigned int qsmask; + u8 level; + int grplo; + int grphi; + u8 gp_tasks; + char __data[0]; +}; + +struct trace_event_raw_rcu_fqs { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int cpu; + const char *qsevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_stall_warning { + struct trace_entry ent; + const char *rcuname; + const char *msg; + char __data[0]; +}; + +struct trace_event_raw_rcu_watching { + struct trace_entry ent; + const char *polarity; + long int oldnesting; + long int newnesting; + int counter; + char __data[0]; +}; + +struct trace_event_raw_rcu_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_segcb_stats { + struct trace_entry ent; + const char *ctx; + long unsigned int gp_seq[4]; + long int seglen[4]; + char __data[0]; +}; + +struct trace_event_raw_rcu_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_start { + struct trace_entry ent; + const char *rcuname; + long int qlen; + long int blimit; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kfree_bulk_callback { + struct trace_entry ent; + const char *rcuname; + long unsigned int nr_records; + void **p; + char __data[0]; +}; + +struct trace_event_raw_rcu_sr_normal { + struct trace_entry ent; + const char *rcuname; + void *rhp; + const char *srevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_end { + struct trace_entry ent; + const char *rcuname; + int callbacks_invoked; + char cb; + char nr; + char iit; + char risk; + char __data[0]; +}; + +struct trace_event_raw_rcu_torture_read { + struct trace_entry ent; + char rcutorturename[8]; + struct callback_head *rhp; + long unsigned int secs; + long unsigned int c_old; + long unsigned int c; + char __data[0]; +}; + +struct trace_event_raw_rcu_barrier { + struct trace_entry ent; + const char *rcuname; + const char *s; + int cpu; + int cnt; + long unsigned int done; + char __data[0]; +}; + +struct trace_event_data_offsets_rcu_utilization {}; + +struct trace_event_data_offsets_rcu_grace_period {}; + +struct trace_event_data_offsets_rcu_future_grace_period {}; + +struct trace_event_data_offsets_rcu_grace_period_init {}; + +struct trace_event_data_offsets_rcu_exp_grace_period {}; + +struct trace_event_data_offsets_rcu_exp_funnel_lock {}; + +struct trace_event_data_offsets_rcu_preempt_task {}; + +struct trace_event_data_offsets_rcu_unlock_preempted_task {}; + +struct trace_event_data_offsets_rcu_quiescent_state_report {}; + +struct trace_event_data_offsets_rcu_fqs {}; + +struct trace_event_data_offsets_rcu_stall_warning {}; + +struct trace_event_data_offsets_rcu_watching {}; + +struct trace_event_data_offsets_rcu_callback {}; + +struct trace_event_data_offsets_rcu_segcb_stats {}; + +struct trace_event_data_offsets_rcu_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_batch_start {}; + +struct trace_event_data_offsets_rcu_invoke_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {}; + +struct trace_event_data_offsets_rcu_sr_normal {}; + +struct trace_event_data_offsets_rcu_batch_end {}; + +struct trace_event_data_offsets_rcu_torture_read {}; + +struct trace_event_data_offsets_rcu_barrier {}; + +typedef void (*btf_trace_rcu_utilization)(void *, const char *); + +typedef void (*btf_trace_rcu_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, long unsigned int, long unsigned int, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, long unsigned int, u8, int, int, long unsigned int); + +typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, long unsigned int, int); + +typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, long unsigned int, long unsigned int, long unsigned int, u8, int, int, int); + +typedef void (*btf_trace_rcu_fqs)(void *, const char *, long unsigned int, int, const char *); + +typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); + +typedef void (*btf_trace_rcu_watching)(void *, const char *, long int, long int, int); + +typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long int); + +typedef void (*btf_trace_rcu_segcb_stats)(void *, struct rcu_segcblist *, const char *); + +typedef void (*btf_trace_rcu_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int, long int); + +typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long int, long int); + +typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *); + +typedef void (*btf_trace_rcu_invoke_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int); + +typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, long unsigned int, void **); + +typedef void (*btf_trace_rcu_sr_normal)(void *, const char *, struct callback_head *, const char *); + +typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char); + +typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, long unsigned int); + +struct rcu_tasks; + +typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); + +typedef void (*pregp_func_t)(struct list_head *); + +typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); + +typedef void (*postscan_func_t)(struct list_head *); + +typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); + +typedef void (*postgp_func_t)(struct rcu_tasks *); + +struct rcu_tasks_percpu; + +struct rcu_tasks { + struct rcuwait cbs_wait; + raw_spinlock_t cbs_gbl_lock; + struct mutex tasks_gp_mutex; + int gp_state; + int gp_sleep; + int init_fract; + long unsigned int gp_jiffies; + long unsigned int gp_start; + long unsigned int tasks_gp_seq; + long unsigned int n_ipis; + long unsigned int n_ipis_fails; + struct task_struct *kthread_ptr; + long unsigned int lazy_jiffies; + rcu_tasks_gp_func_t gp_func; + pregp_func_t pregp_func; + pertask_func_t pertask_func; + postscan_func_t postscan_func; + holdouts_func_t holdouts_func; + postgp_func_t postgp_func; + call_rcu_func_t call_func; + unsigned int wait_state; + struct rcu_tasks_percpu *rtpcpu; + struct rcu_tasks_percpu **rtpcp_array; + int percpu_enqueue_shift; + int percpu_enqueue_lim; + int percpu_dequeue_lim; + long unsigned int percpu_dequeue_gpseq; + struct mutex barrier_q_mutex; + atomic_t barrier_q_count; + struct completion barrier_q_completion; + long unsigned int barrier_q_seq; + long unsigned int barrier_q_start; + char *name; + char *kname; +}; + +struct rcu_tasks_percpu { + struct rcu_segcblist cblist; + raw_spinlock_t lock; + long unsigned int rtp_jiffies; + long unsigned int rtp_n_lock_retries; + struct timer_list lazy_timer; + unsigned int urgent_gp; + struct work_struct rtp_work; + struct irq_work rtp_irq_work; + struct callback_head barrier_q_head; + struct list_head rtp_blkd_tasks; + struct list_head rtp_exit_list; + int cpu; + int index; + struct rcu_tasks *rtpp; +}; + +struct trc_stall_chk_rdr { + int nesting; + int ipi_to_cpu; + u8 needqs; +}; + +struct trace_event_raw_benchmark_event { + struct trace_entry ent; + char str[128]; + u64 delta; + char __data[0]; +}; + +struct trace_event_data_offsets_benchmark_event {}; + +typedef void (*btf_trace_benchmark_event)(void *, const char *, u64); + +enum { + BPF_F_SYSCTL_BASE_NAME = 1, +}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; + int retval; +}; + +struct bpf_sockopt_buf { + u8 data[32]; +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +struct bpf_prog_list { + struct hlist_node node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[2]; +}; + +typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_retval)(); + +typedef u64 (*btf_bpf_set_retval)(int); + +typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); + +typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); + +typedef u64 (*btf_bpf_get_netns_cookie_sockopt)(struct bpf_sockopt_kern *); + +enum vm_stat_item { + NR_DIRTY_THRESHOLD = 0, + NR_DIRTY_BG_THRESHOLD = 1, + NR_MEMMAP_PAGES = 2, + NR_MEMMAP_BOOT_PAGES = 3, + NR_VM_STAT_ITEMS = 4, +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + int generation; +}; + +struct lruvec_stats_percpu { + long int state[30]; + long int state_prev[30]; +}; + +struct lruvec_stats { + long int state[30]; + long int state_local[30]; + long int state_pending[30]; +}; + +struct memcg_vmstats { + long int state[37]; + long unsigned int events[20]; + long int state_local[37]; + long unsigned int events_local[20]; + long int state_pending[37]; + long unsigned int events_pending[20]; + long: 32; + atomic64_t stats_updates; +}; + +struct memcg_vmstats_percpu { + unsigned int stats_updates; + struct memcg_vmstats_percpu *parent; + struct memcg_vmstats *vmstats; + long int state[37]; + long unsigned int events[20]; + long int state_prev[37]; + long unsigned int events_prev[20]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct slabobj_ext { + struct obj_cgroup *objcg; + long: 32; +}; + +struct match_token { + int token; + const char *pattern; +}; + +enum { + MAX_OPT_ARGS = 3, +}; + +typedef struct { + char *from; + char *to; +} substring_t; + +struct trace_event_raw_memcg_rstat_stats { + struct trace_entry ent; + u64 id; + int item; + int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_rstat_events { + struct trace_entry ent; + u64 id; + int item; + long unsigned int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_flush_stats { + struct trace_entry ent; + u64 id; + s64 stats_updates; + bool force; + bool needs_flush; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_memcg_rstat_stats {}; + +struct trace_event_data_offsets_memcg_rstat_events {}; + +struct trace_event_data_offsets_memcg_flush_stats {}; + +typedef void (*btf_trace_mod_memcg_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_mod_memcg_lruvec_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_count_memcg_events)(void *, struct mem_cgroup *, int, long unsigned int); + +typedef void (*btf_trace_memcg_flush_stats)(void *, struct mem_cgroup *, s64, bool, bool); + +struct memory_stat { + const char *name; + unsigned int idx; +}; + +struct memcg_stock_pcp { + local_lock_t stock_lock; + struct mem_cgroup *cached; + unsigned int nr_pages; + struct obj_cgroup *cached_objcg; + struct pglist_data *cached_pgdat; + unsigned int nr_bytes; + int nr_slab_reclaimable_b; + int nr_slab_unreclaimable_b; + struct work_struct work; + long unsigned int flags; +}; + +struct aggregate_control { + long int *aggregate; + long int *local; + long int *pending; + long int *ppending; + long int *cstat; + long int *cstat_prev; + int size; +}; + +enum { + MEMORY_RECLAIM_SWAPPINESS = 0, + MEMORY_RECLAIM_NULL = 1, +}; + +struct uncharge_gather { + struct mem_cgroup *memcg; + long unsigned int nr_memory; + long unsigned int pgpgout; + long unsigned int nr_kmem; + int nid; +}; + +enum siginfo_layout { + SIL_KILL = 0, + SIL_TIMER = 1, + SIL_POLL = 2, + SIL_FAULT = 3, + SIL_FAULT_TRAPNO = 4, + SIL_FAULT_MCEERR = 5, + SIL_FAULT_BNDERR = 6, + SIL_FAULT_PKUERR = 7, + SIL_FAULT_PERF_EVENT = 8, + SIL_CHLD = 9, + SIL_RT = 10, + SIL_SYS = 11, +}; + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + __u16 __pad2; + __s32 ssi_syscall; + __u64 ssi_call_addr; + __u32 ssi_arch; + __u8 __pad[28]; +}; + +struct signalfd_ctx { + sigset_t sigmask; +}; + +typedef short unsigned int __kernel_uid16_t; + +typedef short unsigned int __kernel_gid16_t; + +typedef __kernel_uid16_t uid16_t; + +typedef __kernel_gid16_t gid16_t; + +struct ext4_group_desc { + __le32 bg_block_bitmap_lo; + __le32 bg_inode_bitmap_lo; + __le32 bg_inode_table_lo; + __le16 bg_free_blocks_count_lo; + __le16 bg_free_inodes_count_lo; + __le16 bg_used_dirs_count_lo; + __le16 bg_flags; + __le32 bg_exclude_bitmap_lo; + __le16 bg_block_bitmap_csum_lo; + __le16 bg_inode_bitmap_csum_lo; + __le16 bg_itable_unused_lo; + __le16 bg_checksum; + __le32 bg_block_bitmap_hi; + __le32 bg_inode_bitmap_hi; + __le32 bg_inode_table_hi; + __le16 bg_free_blocks_count_hi; + __le16 bg_free_inodes_count_hi; + __le16 bg_used_dirs_count_hi; + __le16 bg_itable_unused_hi; + __le32 bg_exclude_bitmap_hi; + __le16 bg_block_bitmap_csum_hi; + __le16 bg_inode_bitmap_csum_hi; + __u32 bg_reserved; +}; + +struct ext4_inode { + __le16 i_mode; + __le16 i_uid; + __le32 i_size_lo; + __le32 i_atime; + __le32 i_ctime; + __le32 i_mtime; + __le32 i_dtime; + __le16 i_gid; + __le16 i_links_count; + __le32 i_blocks_lo; + __le32 i_flags; + union { + struct { + __le32 l_i_version; + } linux1; + struct { + __u32 h_i_translator; + } hurd1; + struct { + __u32 m_i_reserved1; + } masix1; + } osd1; + __le32 i_block[15]; + __le32 i_generation; + __le32 i_file_acl_lo; + __le32 i_size_high; + __le32 i_obso_faddr; + union { + struct { + __le16 l_i_blocks_high; + __le16 l_i_file_acl_high; + __le16 l_i_uid_high; + __le16 l_i_gid_high; + __le16 l_i_checksum_lo; + __le16 l_i_reserved; + } linux2; + struct { + __le16 h_i_reserved1; + __u16 h_i_mode_high; + __u16 h_i_uid_high; + __u16 h_i_gid_high; + __u32 h_i_author; + } hurd2; + struct { + __le16 h_i_reserved1; + __le16 m_i_file_acl_high; + __u32 m_i_reserved2[2]; + } masix2; + } osd2; + __le16 i_extra_isize; + __le16 i_checksum_hi; + __le32 i_ctime_extra; + __le32 i_mtime_extra; + __le32 i_atime_extra; + __le32 i_crtime; + __le32 i_crtime_extra; + __le32 i_version_hi; + __le32 i_projid; +}; + +enum { + ES_WRITTEN_B = 0, + ES_UNWRITTEN_B = 1, + ES_DELAYED_B = 2, + ES_HOLE_B = 3, + ES_REFERENCED_B = 4, + ES_FLAGS = 5, +}; + +struct ext4_iloc { + struct buffer_head *bh; + long unsigned int offset; + ext4_group_t block_group; +}; + +struct ext4_xattr_ibody_header { + __le32 h_magic; +}; + +struct ext4_xattr_inode_array { + unsigned int count; + struct inode *inodes[0]; +}; + +struct mpage_da_data { + struct inode *inode; + struct writeback_control *wbc; + unsigned int can_map: 1; + long unsigned int first_page; + long unsigned int next_page; + long unsigned int last_page; + struct ext4_map_blocks map; + struct ext4_io_submit io_submit; + unsigned int do_map: 1; + unsigned int scanned_until_end: 1; + unsigned int journalled_more_data: 1; + long: 32; +}; + +struct fuse_ioctl_in { + uint64_t fh; + uint32_t flags; + uint32_t cmd; + uint64_t arg; + uint32_t in_size; + uint32_t out_size; +}; + +struct fuse_ioctl_iovec { + uint64_t base; + uint64_t len; +}; + +struct fuse_ioctl_out { + int32_t result; + uint32_t flags; + uint32_t in_iovs; + uint32_t out_iovs; +}; + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; + __u8 digest[0]; +}; + +enum btrfs_qgroup_mode { + BTRFS_QGROUP_MODE_DISABLED = 0, + BTRFS_QGROUP_MODE_FULL = 1, + BTRFS_QGROUP_MODE_SIMPLE = 2, +}; + +struct btrfs_feature_attr { + struct kobj_attribute kobj_attr; + enum btrfs_feature_set feature_set; + long: 32; + u64 feature_bit; +}; + +struct raid_kobject { + u64 flags; + struct kobject kobj; + long: 32; +}; + +struct sem; + +struct sem_queue; + +struct sem_undo; + +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + short unsigned int sem_nsems; +}; + +struct sem { + int semval; + struct pid *sempid; + spinlock_t lock; + struct list_head pending_alter; + struct list_head pending_const; + long: 32; + time64_t sem_otime; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sembuf; + +struct sem_queue { + struct list_head list; + struct task_struct *sleeper; + struct sem_undo *undo; + struct pid *pid; + int status; + struct sembuf *sops; + struct sembuf *blocking; + int nsops; + bool alter; + bool dupsop; +}; + +struct sem_undo { + struct list_head list_proc; + struct callback_head rcu; + struct sem_undo_list *ulp; + struct list_head list_id; + int semid; + short int semadj[0]; +}; + +struct semid64_ds { + struct ipc64_perm sem_perm; + long unsigned int sem_otime; + long unsigned int sem_ctime; + long unsigned int sem_nsems; + long unsigned int sem_otime_high; + long unsigned int sem_ctime_high; +}; + +struct sembuf { + short unsigned int sem_num; + short int sem_op; + short int sem_flg; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +struct sem_undo_list { + refcount_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + +struct sem_array { + struct kern_ipc_perm sem_perm; + time64_t sem_ctime; + struct list_head pending_alter; + struct list_head pending_const; + struct list_head list_id; + int sem_nsems; + int complex_count; + unsigned int use_global_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sem sems[0]; +}; + +typedef unsigned char u8___2; + +struct rand_data { + void *hash_state; + long: 32; + __u64 prev_time; + __u64 last_delta; + __s64 last_delta2; + unsigned int flags; + unsigned int osr; + unsigned char *mem; + unsigned int memlocation; + unsigned int memblocks; + unsigned int memblocksize; + unsigned int memaccessloops; + unsigned int rct_count; + unsigned int apt_cutoff; + unsigned int apt_cutoff_permanent; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + unsigned int health_failure; + unsigned int apt_base_set: 1; + long: 32; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + unsigned int authkeylen; + unsigned int enckeylen; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct essiv_instance_ctx { + union { + struct crypto_skcipher_spawn skcipher_spawn; + struct crypto_aead_spawn aead_spawn; + } u; + char essiv_cipher_name[128]; + char shash_driver_name[128]; +}; + +struct essiv_tfm_ctx { + union { + struct crypto_skcipher *skcipher; + struct crypto_aead *aead; + } u; + struct crypto_cipher *essiv_cipher; + struct crypto_shash *hash; + int ivoffset; +}; + +struct essiv_aead_request_ctx { + struct scatterlist sg[4]; + u8 *assoc; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct aead_request aead_req; +}; + +struct wait_page_key { + struct folio *folio; + int bit_nr; + int page_match; +}; + +struct io_async_rw { + size_t bytes_done; + long: 32; + struct iov_iter iter; + struct iov_iter_state iter_state; + struct iovec fast_iov; + struct iovec *free_iovec; + int free_iov_nr; + struct wait_page_queue wpq; +}; + +struct io_rw { + struct kiocb kiocb; + u64 addr; + u32 len; + rwf_t flags; +}; + +struct lwq { + spinlock_t lock; + struct llist_node *ready; + struct llist_head new; +}; + +typedef enum { + ZSTD_fast = 1, + ZSTD_dfast = 2, + ZSTD_greedy = 3, + ZSTD_lazy = 4, + ZSTD_lazy2 = 5, + ZSTD_btlazy2 = 6, + ZSTD_btopt = 7, + ZSTD_btultra = 8, + ZSTD_btultra2 = 9, +} ZSTD_strategy; + +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int minMatch; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef enum { + ZSTD_ps_auto = 0, + ZSTD_ps_enable = 1, + ZSTD_ps_disable = 2, +} ZSTD_paramSwitch_e; + +typedef unsigned int FSE_CTable; + +typedef enum { + FSE_repeat_none = 0, + FSE_repeat_check = 1, + FSE_repeat_valid = 2, +} FSE_repeat; + +typedef size_t HUF_CElt; + +typedef enum { + HUF_repeat_none = 0, + HUF_repeat_check = 1, + HUF_repeat_valid = 2, +} HUF_repeat; + +typedef enum { + ZSTD_no_overlap = 0, + ZSTD_overlap_src_before_dst = 1, +} ZSTD_overlap_e; + +struct seqDef_s { + U32 offBase; + U16 litLength; + U16 mlBase; +}; + +typedef struct seqDef_s seqDef; + +typedef enum { + ZSTD_llt_none = 0, + ZSTD_llt_literalLength = 1, + ZSTD_llt_matchLength = 2, +} ZSTD_longLengthType_e; + +typedef struct { + seqDef *sequencesStart; + seqDef *sequences; + BYTE *litStart; + BYTE *lit; + BYTE *llCode; + BYTE *mlCode; + BYTE *ofCode; + size_t maxNbSeq; + size_t maxNbLit; + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; +} seqStore_t; + +typedef struct { + HUF_CElt CTable[257]; + HUF_repeat repeatMode; +} ZSTD_hufCTables_t; + +typedef struct { + FSE_CTable offcodeCTable[193]; + FSE_CTable matchlengthCTable[363]; + FSE_CTable litlengthCTable[329]; + FSE_repeat offcode_repeatMode; + FSE_repeat matchlength_repeatMode; + FSE_repeat litlength_repeatMode; +} ZSTD_fseCTables_t; + +typedef struct { + ZSTD_hufCTables_t huf; + ZSTD_fseCTables_t fse; +} ZSTD_entropyCTables_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + U32 offset; + U32 litLength; + U32 matchLength; +} rawSeq; + +typedef struct { + rawSeq *seq; + size_t pos; + size_t posInSequence; + size_t size; + size_t capacity; +} rawSeqStore_t; + +typedef struct { + int price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[3]; +} ZSTD_optimal_t; + +typedef enum { + zop_dynamic = 0, + zop_predef = 1, +} ZSTD_OptPrice_e; + +typedef struct { + unsigned int *litFreq; + unsigned int *litLengthFreq; + unsigned int *matchLengthFreq; + unsigned int *offCodeFreq; + ZSTD_match_t *matchTable; + ZSTD_optimal_t *priceTable; + U32 litSum; + U32 litLengthSum; + U32 matchLengthSum; + U32 offCodeSum; + U32 litSumBasePrice; + U32 litLengthSumBasePrice; + U32 matchLengthSumBasePrice; + U32 offCodeSumBasePrice; + ZSTD_OptPrice_e priceType; + const ZSTD_entropyCTables_t *symbolCosts; + ZSTD_paramSwitch_e literalCompressionMode; +} optState_t; + +typedef struct { + const BYTE *nextSrc; + const BYTE *base; + const BYTE *dictBase; + U32 dictLimit; + U32 lowLimit; + U32 nbOverflowCorrections; +} ZSTD_window_t; + +struct ZSTD_matchState_t; + +typedef struct ZSTD_matchState_t ZSTD_matchState_t; + +struct ZSTD_matchState_t { + ZSTD_window_t window; + U32 loadedDictEnd; + U32 nextToUpdate; + U32 hashLog3; + U32 rowHashLog; + U16 *tagTable; + U32 hashCache[8]; + U32 *hashTable; + U32 *hashTable3; + U32 *chainTable; + U32 forceNonContiguous; + int dedicatedDictSearch; + optState_t opt; + const ZSTD_matchState_t *dictMatchState; + ZSTD_compressionParameters cParams; + const rawSeqStore_t *ldmSeqStore; +}; + +typedef enum { + ZSTD_dtlm_fast = 0, + ZSTD_dtlm_full = 1, +} ZSTD_dictTableLoadMethod_e; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; + long: 32; +}; + +enum dim_tune_state { + DIM_PARKING_ON_TOP = 0, + DIM_PARKING_TIRED = 1, + DIM_GOING_RIGHT = 2, + DIM_GOING_LEFT = 3, +}; + +struct pci_fixup { + u16 vendor; + u16 device; + u32 class; + unsigned int class_shift; + void (*hook)(struct pci_dev *); +}; + +typedef unsigned int __kernel_old_dev_t; + +enum { + DISK_EVENT_FLAG_POLL = 1, + DISK_EVENT_FLAG_UEVENT = 2, + DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 4, +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +enum { + LO_FLAGS_READ_ONLY = 1, + LO_FLAGS_AUTOCLEAR = 4, + LO_FLAGS_PARTSCAN = 8, + LO_FLAGS_DIRECT_IO = 16, +}; + +struct loop_info { + int lo_number; + __kernel_old_dev_t lo_device; + long unsigned int lo_inode; + __kernel_old_dev_t lo_rdevice; + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; + int lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + long unsigned int lo_init[2]; + char reserved[4]; +}; + +struct loop_info64 { + __u64 lo_device; + __u64 lo_inode; + __u64 lo_rdevice; + __u64 lo_offset; + __u64 lo_sizelimit; + __u32 lo_number; + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; + __u32 lo_flags; + __u8 lo_file_name[64]; + __u8 lo_crypt_name[64]; + __u8 lo_encrypt_key[32]; + __u64 lo_init[2]; +}; + +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + +enum { + Lo_unbound = 0, + Lo_bound = 1, + Lo_rundown = 2, + Lo_deleting = 3, +}; + +struct loop_device { + int lo_number; + long: 32; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + char lo_file_name[64]; + struct file *lo_backing_file; + struct block_device *lo_device; + gfp_t old_gfp_mask; + spinlock_t lo_lock; + int lo_state; + spinlock_t lo_work_lock; + struct workqueue_struct *workqueue; + struct work_struct rootcg_work; + struct list_head rootcg_cmd_list; + struct list_head idle_worker_list; + struct rb_root worker_tree; + struct timer_list timer; + bool use_dio; + bool sysfs_inited; + struct request_queue *lo_queue; + struct blk_mq_tag_set tag_set; + struct gendisk *lo_disk; + struct mutex lo_mutex; + bool idr_visible; +}; + +struct loop_cmd { + struct list_head list_entry; + bool use_aio; + atomic_t ref; + long int ret; + long: 32; + struct kiocb iocb; + struct bio_vec *bvec; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; + long: 32; +}; + +struct loop_worker { + struct rb_node rb_node; + struct work_struct work; + struct list_head cmd_list; + struct list_head idle_list; + struct loop_device *lo; + struct cgroup_subsys_state *blkcg_css; + long unsigned int last_ran_at; +}; + +enum dmi_field { + DMI_NONE = 0, + DMI_BIOS_VENDOR = 1, + DMI_BIOS_VERSION = 2, + DMI_BIOS_DATE = 3, + DMI_BIOS_RELEASE = 4, + DMI_EC_FIRMWARE_RELEASE = 5, + DMI_SYS_VENDOR = 6, + DMI_PRODUCT_NAME = 7, + DMI_PRODUCT_VERSION = 8, + DMI_PRODUCT_SERIAL = 9, + DMI_PRODUCT_UUID = 10, + DMI_PRODUCT_SKU = 11, + DMI_PRODUCT_FAMILY = 12, + DMI_BOARD_VENDOR = 13, + DMI_BOARD_NAME = 14, + DMI_BOARD_VERSION = 15, + DMI_BOARD_SERIAL = 16, + DMI_BOARD_ASSET_TAG = 17, + DMI_CHASSIS_VENDOR = 18, + DMI_CHASSIS_TYPE = 19, + DMI_CHASSIS_VERSION = 20, + DMI_CHASSIS_SERIAL = 21, + DMI_CHASSIS_ASSET_TAG = 22, + DMI_STRING_MAX = 23, + DMI_OEM_STRING = 24, +}; + +enum { + NVME_CMBSZ_SQS = 1, + NVME_CMBSZ_CQS = 2, + NVME_CMBSZ_LISTS = 4, + NVME_CMBSZ_RDS = 8, + NVME_CMBSZ_WDS = 16, + NVME_CMBSZ_SZ_SHIFT = 12, + NVME_CMBSZ_SZ_MASK = 1048575, + NVME_CMBSZ_SZU_SHIFT = 8, + NVME_CMBSZ_SZU_MASK = 15, +}; + +enum { + NVME_CMBMSC_CRE = 1, + NVME_CMBMSC_CMSE = 2, +}; + +enum { + NVME_SGL_FMT_DATA_DESC = 0, + NVME_SGL_FMT_SEG_DESC = 2, + NVME_SGL_FMT_LAST_SEG_DESC = 3, + NVME_KEY_SGL_FMT_DATA_DESC = 4, + NVME_TRANSPORT_SGL_DATA_DESC = 5, +}; + +enum { + NVME_HOST_MEM_ENABLE = 1, + NVME_HOST_MEM_RETURN = 2, +}; + +struct nvme_host_mem_buf_desc { + __le64 addr; + __le32 size; + __u32 rsvd; +}; + +struct nvme_completion { + union nvme_result result; + __le16 sq_head; + __le16 sq_id; + __u16 command_id; + __le16 status; +}; + +struct nvme_queue; + +struct nvme_dev { + struct nvme_queue *queues; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; + u32 *dbs; + struct device *dev; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + unsigned int online_queues; + unsigned int max_qid; + unsigned int io_queues[3]; + unsigned int num_vecs; + u32 q_depth; + int io_sqes; + u32 db_stride; + void *bar; + long unsigned int bar_mapped_size; + struct mutex shutdown_lock; + bool subsystem; + long: 32; + u64 cmb_size; + bool cmb_use_sqes; + u32 cmbsz; + u32 cmbloc; + long: 32; + struct nvme_ctrl ctrl; + u32 last_ps; + bool hmb; + struct sg_table *hmb_sgt; + mempool_t *iod_mempool; + mempool_t *iod_meta_mempool; + __le32 *dbbuf_dbs; + dma_addr_t dbbuf_dbs_dma_addr; + __le32 *dbbuf_eis; + dma_addr_t dbbuf_eis_dma_addr; + long: 32; + u64 host_mem_size; + u32 nr_host_mem_descs; + u32 host_mem_descs_size; + dma_addr_t host_mem_descs_dma; + struct nvme_host_mem_buf_desc *host_mem_descs; + void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; +}; + +struct nvme_queue { + struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t cq_poll_lock; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 *q_db; + u32 q_depth; + u16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + long unsigned int flags; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; + struct completion delete_done; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +union nvme_descriptor { + struct nvme_sgl_desc *sg_list; + __le64 *prp_list; +}; + +struct nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + bool aborted; + s8 nr_allocations; + unsigned int dma_len; + dma_addr_t first_dma; + dma_addr_t meta_dma; + struct sg_table sgt; + struct sg_table meta_sgt; + union nvme_descriptor meta_list; + union nvme_descriptor list[5]; +}; + +typedef __u32 __hc32; + +struct ehci_stats { + long unsigned int normal; + long unsigned int error; + long unsigned int iaa; + long unsigned int lost_iaa; + long unsigned int complete; + long unsigned int unlink; +}; + +struct ehci_per_sched { + struct usb_device *udev; + struct usb_host_endpoint *ep; + struct list_head ps_list; + u16 tt_usecs; + u16 cs_mask; + u16 period; + u16 phase; + u8 bw_phase; + u8 phase_uf; + u8 usecs; + u8 c_usecs; + u8 bw_uperiod; + u8 bw_period; +}; + +enum ehci_rh_state { + EHCI_RH_HALTED = 0, + EHCI_RH_SUSPENDED = 1, + EHCI_RH_RUNNING = 2, + EHCI_RH_STOPPING = 3, +}; + +enum ehci_hrtimer_event { + EHCI_HRTIMER_POLL_ASS = 0, + EHCI_HRTIMER_POLL_PSS = 1, + EHCI_HRTIMER_POLL_DEAD = 2, + EHCI_HRTIMER_UNLINK_INTR = 3, + EHCI_HRTIMER_FREE_ITDS = 4, + EHCI_HRTIMER_ACTIVE_UNLINK = 5, + EHCI_HRTIMER_START_UNLINK_INTR = 6, + EHCI_HRTIMER_ASYNC_UNLINKS = 7, + EHCI_HRTIMER_IAA_WATCHDOG = 8, + EHCI_HRTIMER_DISABLE_PERIODIC = 9, + EHCI_HRTIMER_DISABLE_ASYNC = 10, + EHCI_HRTIMER_IO_WATCHDOG = 11, + EHCI_HRTIMER_NUM_EVENTS = 12, +}; + +struct ehci_caps; + +struct ehci_regs; + +struct ehci_dbg_port; + +struct ehci_qh; + +union ehci_shadow; + +struct ehci_itd; + +struct ehci_sitd; + +struct ehci_hcd { + enum ehci_hrtimer_event next_hrtimer_event; + unsigned int enabled_hrtimer_events; + ktime_t hr_timeouts[12]; + struct hrtimer hrtimer; + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + struct ehci_caps *caps; + struct ehci_regs *regs; + struct ehci_dbg_port *debug; + __u32 hcs_params; + spinlock_t lock; + enum ehci_rh_state rh_state; + bool scanning: 1; + bool need_rescan: 1; + bool intr_unlinking: 1; + bool iaa_in_progress: 1; + bool async_unlinking: 1; + bool shutdown: 1; + struct ehci_qh *qh_scan_next; + struct ehci_qh *async; + struct ehci_qh *dummy; + struct list_head async_unlink; + struct list_head async_idle; + unsigned int async_unlink_cycle; + unsigned int async_count; + __hc32 old_current; + __hc32 old_token; + unsigned int periodic_size; + __hc32 *periodic; + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned int i_thresh; + union ehci_shadow *pshadow; + struct list_head intr_unlink_wait; + struct list_head intr_unlink; + unsigned int intr_unlink_wait_cycle; + unsigned int intr_unlink_cycle; + unsigned int now_frame; + unsigned int last_iso_frame; + unsigned int intr_count; + unsigned int isoc_count; + unsigned int periodic_count; + unsigned int uframe_periodic_max; + struct list_head cached_itd_list; + struct ehci_itd *last_itd_to_free; + struct list_head cached_sitd_list; + struct ehci_sitd *last_sitd_to_free; + long unsigned int reset_done[15]; + long unsigned int bus_suspended; + long unsigned int companion_ports; + long unsigned int owned_ports; + long unsigned int port_c_suspend; + long unsigned int suspended_ports; + long unsigned int resuming_ports; + struct dma_pool *qh_pool; + struct dma_pool *qtd_pool; + struct dma_pool *itd_pool; + struct dma_pool *sitd_pool; + unsigned int random_frame; + long unsigned int next_statechange; + long: 32; + ktime_t last_periodic_enable; + u32 command; + unsigned int no_selective_suspend: 1; + unsigned int has_fsl_port_bug: 1; + unsigned int has_fsl_hs_errata: 1; + unsigned int has_fsl_susp_errata: 1; + unsigned int has_ci_pec_bug: 1; + unsigned int big_endian_mmio: 1; + unsigned int big_endian_desc: 1; + unsigned int big_endian_capbase: 1; + unsigned int has_amcc_usb23: 1; + unsigned int need_io_watchdog: 1; + unsigned int amd_pll_fix: 1; + unsigned int use_dummy_qh: 1; + unsigned int has_synopsys_hc_bug: 1; + unsigned int frame_index_bug: 1; + unsigned int need_oc_pp_cycle: 1; + unsigned int imx28_write_fix: 1; + unsigned int spurious_oc: 1; + unsigned int is_aspeed: 1; + unsigned int zx_wakeup_clear_needed: 1; + __hc32 *ohci_hcctrl_reg; + unsigned int has_hostpc: 1; + unsigned int has_tdi_phy_lpm: 1; + unsigned int has_ppcd: 1; + u8 sbrn; + struct ehci_stats stats; + struct dentry *debug_dir; + u8 bandwidth[64]; + u8 tt_budget[64]; + struct list_head tt_list; + long: 32; + long unsigned int priv[0]; +}; + +struct ehci_caps { + u32 hc_capbase; + u32 hcs_params; + u32 hcc_params; + u8 portroute[8]; +}; + +struct ehci_regs { + u32 command; + u32 status; + u32 intr_enable; + u32 frame_index; + u32 segment; + u32 frame_list; + u32 async_next; + u32 reserved1[2]; + u32 txfill_tuning; + u32 reserved2[6]; + u32 configured_flag; + union { + u32 port_status[15]; + struct { + u32 reserved3[9]; + u32 usbmode; + }; + }; + union { + struct { + u32 reserved4; + u32 hostpc[15]; + }; + u32 brcm_insnreg[4]; + }; + u32 reserved5[2]; + u32 usbmode_ex; +}; + +struct ehci_dbg_port { + u32 control; + u32 pids; + u32 data03; + u32 data47; + u32 address; +}; + +struct ehci_fstn; + +union ehci_shadow { + struct ehci_qh *qh; + struct ehci_itd *itd; + struct ehci_sitd *sitd; + struct ehci_fstn *fstn; + __hc32 *hw_next; + void *ptr; +}; + +struct ehci_qh_hw; + +struct ehci_qtd; + +struct ehci_qh { + struct ehci_qh_hw *hw; + dma_addr_t qh_dma; + union ehci_shadow qh_next; + struct list_head qtd_list; + struct list_head intr_node; + struct ehci_qtd *dummy; + struct list_head unlink_node; + struct ehci_per_sched ps; + unsigned int unlink_cycle; + u8 qh_state; + u8 xacterrs; + u8 unlink_reason; + u8 gap_uf; + unsigned int is_out: 1; + unsigned int clearing_tt: 1; + unsigned int dequeue_during_giveback: 1; + unsigned int should_be_inactive: 1; +}; + +struct ehci_iso_stream; + +struct ehci_itd { + __hc32 hw_next; + __hc32 hw_transaction[8]; + __hc32 hw_bufp[7]; + __hc32 hw_bufp_hi[7]; + dma_addr_t itd_dma; + union ehci_shadow itd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head itd_list; + unsigned int frame; + unsigned int pg; + unsigned int index[8]; + long: 32; +}; + +struct ehci_sitd { + __hc32 hw_next; + __hc32 hw_fullspeed_ep; + __hc32 hw_uframe; + __hc32 hw_results; + __hc32 hw_buf[2]; + __hc32 hw_backpointer; + __hc32 hw_buf_hi[2]; + dma_addr_t sitd_dma; + union ehci_shadow sitd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head sitd_list; + unsigned int frame; + unsigned int index; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_qtd { + __hc32 hw_next; + __hc32 hw_alt_next; + __hc32 hw_token; + __hc32 hw_buf[5]; + __hc32 hw_buf_hi[5]; + dma_addr_t qtd_dma; + struct list_head qtd_list; + struct urb *urb; + size_t length; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_fstn { + __hc32 hw_next; + __hc32 hw_prev; + dma_addr_t fstn_dma; + union ehci_shadow fstn_next; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_qh_hw { + __hc32 hw_next; + __hc32 hw_info1; + __hc32 hw_info2; + __hc32 hw_current; + __hc32 hw_qtd_next; + __hc32 hw_alt_next; + __hc32 hw_token; + __hc32 hw_buf[5]; + __hc32 hw_buf_hi[5]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_iso_packet { + u64 bufp; + __hc32 transaction; + u8 cross; + u32 buf1; + long: 32; +}; + +struct ehci_iso_sched { + struct list_head td_list; + unsigned int span; + unsigned int first_packet; + struct ehci_iso_packet packet[0]; +}; + +struct ehci_iso_stream { + struct ehci_qh_hw *hw; + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; + struct list_head free_list; + struct ehci_per_sched ps; + unsigned int next_uframe; + __hc32 splits; + u16 uperiod; + u16 maxp; + unsigned int bandwidth; + __hc32 buf0; + __hc32 buf1; + __hc32 buf2; + __hc32 address; +}; + +struct ehci_tt { + u16 bandwidth[8]; + struct list_head tt_list; + struct list_head ps_list; + struct usb_tt *usb_tt; + int tt_port; +}; + +struct ehci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*port_power)(struct usb_hcd *, int, bool); +}; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); + struct usb_bus *bus; + struct mutex mutex; + size_t count; + char *output_buf; + size_t alloc_size; +}; + +struct hid_control_fifo { + unsigned char dir; + struct hid_report *report; + char *raw_report; +}; + +struct hid_output_fifo { + struct hid_report *report; + char *raw_report; +}; + +struct hid_class_descriptor { + __u8 bDescriptorType; + __le16 wDescriptorLength; +} __attribute__((packed)); + +struct hid_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdHID; + __u8 bCountryCode; + __u8 bNumDescriptors; + struct hid_class_descriptor desc[1]; +} __attribute__((packed)); + +struct usbhid_device { + struct hid_device *hid; + struct usb_interface *intf; + int ifnum; + unsigned int bufsize; + struct urb *urbin; + char *inbuf; + dma_addr_t inbuf_dma; + struct urb *urbctrl; + struct usb_ctrlrequest *cr; + struct hid_control_fifo ctrl[256]; + unsigned char ctrlhead; + unsigned char ctrltail; + char *ctrlbuf; + dma_addr_t ctrlbuf_dma; + long unsigned int last_ctrl; + struct urb *urbout; + struct hid_output_fifo out[256]; + unsigned char outhead; + unsigned char outtail; + char *outbuf; + dma_addr_t outbuf_dma; + long unsigned int last_out; + struct mutex mutex; + spinlock_t lock; + long unsigned int iofl; + struct timer_list io_retry; + long unsigned int stop_retry; + unsigned int retry_delay; + struct work_struct reset_work; + wait_queue_head_t wait; +}; + +struct dst_cache_pcpu { + long unsigned int refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +enum ethtool_multicast_groups { + ETHNL_MCGRP_MONITOR = 0, +}; + +struct genl_dumpit_info { + struct genl_split_ops op; + struct genl_info info; +}; + +enum ethnl_sock_type { + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH = 0, +}; + +struct ethnl_sock_priv { + struct net_device *dev; + u32 portid; + enum ethnl_sock_type type; +}; + +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + long unsigned int pos_ifindex; +}; + +typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); + +enum ethtool_c33_pse_ext_state { + ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1, + ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID = 2, + ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE = 3, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED = 4, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM = 5, + ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED = 6, + ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE = 7, + ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE = 8, + ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED = 9, +}; + +enum ethtool_c33_pse_ext_substate_error_condition { + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON = 4, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS = 5, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF = 6, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN = 7, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE = 8, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP = 9, +}; + +enum ethtool_c33_pse_ext_substate_mr_pse_enable { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1, +}; + +enum ethtool_c33_pse_ext_substate_option_detect_ted { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR = 2, +}; + +enum ethtool_c33_pse_ext_substate_option_vport_lim { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION = 3, +}; + +enum ethtool_c33_pse_ext_substate_ovld_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1, +}; + +enum ethtool_c33_pse_ext_substate_power_not_available { + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT = 4, +}; + +enum ethtool_c33_pse_ext_substate_short_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1, +}; + +enum ethtool_c33_pse_admin_state { + ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_c33_pse_pw_d_status { + ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_C33_PSE_PW_D_STATUS_TEST = 5, + ETHTOOL_C33_PSE_PW_D_STATUS_FAULT = 6, + ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT = 7, +}; + +enum ethtool_podl_pse_admin_state { + ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_podl_pse_pw_d_status { + ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP = 5, + ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE = 6, + ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR = 7, +}; + +struct ethtool_c33_pse_ext_state_info { + enum ethtool_c33_pse_ext_state c33_pse_ext_state; + union { + enum ethtool_c33_pse_ext_substate_error_condition error_condition; + enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; + enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; + enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; + enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; + enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; + enum ethtool_c33_pse_ext_substate_short_detected short_detected; + u32 __c33_pse_ext_substate; + }; +}; + +struct ethtool_c33_pse_pw_limit_range { + u32 min; + u32 max; +}; + +struct pse_control_config { + enum ethtool_podl_pse_admin_state podl_admin_control; + enum ethtool_c33_pse_admin_state c33_admin_control; +}; + +struct pse_control_status { + enum ethtool_podl_pse_admin_state podl_admin_state; + enum ethtool_podl_pse_pw_d_status podl_pw_status; + enum ethtool_c33_pse_admin_state c33_admin_state; + enum ethtool_c33_pse_pw_d_status c33_pw_status; + u32 c33_pw_class; + u32 c33_actual_pw; + struct ethtool_c33_pse_ext_state_info c33_ext_state_info; + u32 c33_avail_pw_limit; + struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; + u32 c33_pw_limit_nb_ranges; +}; + +enum { + ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC = 0, + ETHTOOL_A_C33_PSE_PW_LIMIT_MIN = 1, + ETHTOOL_A_C33_PSE_PW_LIMIT_MAX = 2, +}; + +struct pse_reply_data { + struct ethnl_reply_data base; + struct pse_control_status status; +}; + +struct nf_nat_range2 { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; + union nf_conntrack_man_proto base_proto; +}; + +union nf_conntrack_nat_help {}; + +struct nf_conn_nat { + union nf_conntrack_nat_help help; + int masq_index; +}; + +struct nf_nat_lookup_hook_priv { + struct nf_hook_entries *entries; + struct callback_head callback_head; +}; + +struct nf_nat_hooks_net { + struct nf_hook_ops *nat_hook_ops; + unsigned int users; +}; + +struct nat_net { + struct nf_nat_hooks_net nat_proto_net[11]; +}; + +struct nf_nat_proto_clean { + u8 l3proto; + u8 l4proto; +}; + +enum ctattr_nat { + CTA_NAT_UNSPEC = 0, + CTA_NAT_V4_MINIP = 1, + CTA_NAT_V4_MAXIP = 2, + CTA_NAT_PROTO = 3, + CTA_NAT_V6_MINIP = 4, + CTA_NAT_V6_MAXIP = 5, + __CTA_NAT_MAX = 6, +}; + +enum ctattr_protonat { + CTA_PROTONAT_UNSPEC = 0, + CTA_PROTONAT_PORT_MIN = 1, + CTA_PROTONAT_PORT_MAX = 2, + __CTA_PROTONAT_MAX = 3, +}; + +enum nft_set_elem_flags { + NFT_SET_ELEM_INTERVAL_END = 1, + NFT_SET_ELEM_CATCHALL = 2, +}; + +struct nft_set_type { + const struct nft_set_ops ops; + u32 features; +}; + +struct nft_timeout { + u64 timeout; + u64 expiration; +}; + +struct nft_trans_gc { + struct list_head list; + struct net *net; + struct nft_set *set; + u32 seq; + u16 count; + struct nft_elem_priv *priv[256]; + struct callback_head rcu; +}; + +struct nftables_pernet { + struct list_head tables; + struct list_head commit_list; + struct list_head commit_set_list; + struct list_head binding_list; + struct list_head module_list; + struct list_head notify_list; + struct mutex commit_mutex; + u64 table_handle; + u64 tstamp; + unsigned int base_seq; + unsigned int gc_seq; + u8 validate_state; + long: 32; +}; + +struct nft_rbtree { + struct rb_root root; + rwlock_t lock; + seqcount_rwlock_t count; + long unsigned int last_gc; +}; + +struct nft_rbtree_elem { + struct nft_elem_priv priv; + struct rb_node node; + struct nft_set_ext ext; +}; + +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +enum { + TCP_FLAG_CWR = 8388608, + TCP_FLAG_ECE = 4194304, + TCP_FLAG_URG = 2097152, + TCP_FLAG_ACK = 1048576, + TCP_FLAG_PSH = 524288, + TCP_FLAG_RST = 262144, + TCP_FLAG_SYN = 131072, + TCP_FLAG_FIN = 65536, + TCP_RESERVED_BITS = 251658240, + TCP_DATA_OFFSET = 4026531840, +}; + +enum tcp_tw_status { + TCP_TW_SUCCESS = 0, + TCP_TW_RST = 1, + TCP_TW_ACK = 2, + TCP_TW_SYN = 3, +}; + +struct ip_tunnel_parm { + char name[16]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +enum tunnel_encap_types { + TUNNEL_ENCAP_NONE = 0, + TUNNEL_ENCAP_FOU = 1, + TUNNEL_ENCAP_GUE = 2, + TUNNEL_ENCAP_MPLS = 3, +}; + +struct gro_cell; + +struct gro_cells { + struct gro_cell *cells; +}; + +struct ip_tunnel_prl_entry { + struct ip_tunnel_prl_entry *next; + __be32 addr; + u16 flags; + struct callback_head callback_head; +}; + +struct ip_tunnel { + struct ip_tunnel *next; + struct hlist_node hash_node; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net *net; + long unsigned int err_time; + int err_count; + u32 i_seqno; + atomic_t o_seqno; + int tun_hlen; + u32 index; + u8 erspan_ver; + u8 dir; + u16 hwid; + struct dst_cache dst_cache; + struct ip_tunnel_parm_kern parms; + int mlink; + int encap_hlen; + int hlen; + struct ip_tunnel_encap encap; + struct ip_tunnel_prl_entry *prl; + unsigned int prl_count; + unsigned int ip_tnl_net_id; + struct gro_cells gro_cells; + __u32 fwmark; + bool collect_md; + bool ignore_df; +}; + +struct tnl_ptk_info { + long unsigned int flags[1]; + __be16 proto; + __be32 key; + __be32 seq; + int hdr_len; +}; + +struct ip_tunnel_net { + struct net_device *fb_tunnel_dev; + struct rtnl_link_ops *rtnl_link_ops; + struct hlist_head tunnels[128]; + struct ip_tunnel *collect_md_tun; + int type; +}; + +union tcp_ao_addr { + struct in_addr a4; + struct in6_addr a6; +}; + +struct tcp_ao_hdr { + u8 kind; + u8 length; + u8 keyid; + u8 rnext_keyid; +}; + +struct tcp_ao_key { + struct hlist_node node; + union tcp_ao_addr addr; + u8 key[80]; + unsigned int tcp_sigpool_id; + unsigned int digest_size; + int l3index; + u8 prefixlen; + u8 family; + u8 keylen; + u8 keyflags; + u8 sndid; + u8 rcvid; + u8 maclen; + struct callback_head rcu; + long: 32; + atomic64_t pkt_good; + atomic64_t pkt_bad; + u8 traffic_keys[0]; +}; + +struct tcp_md5sig_key { + struct hlist_node node; + u8 keylen; + u8 family; + u8 prefixlen; + u8 flags; + union tcp_ao_addr addr; + int l3index; + u8 key[80]; + struct callback_head rcu; +}; + +enum tcp_seq_states { + TCP_SEQ_STATE_LISTENING = 0, + TCP_SEQ_STATE_ESTABLISHED = 1, +}; + +struct tcp_seq_afinfo { + sa_family_t family; +}; + +struct tcp_iter_state { + struct seq_net_private p; + enum tcp_seq_states state; + struct sock *syn_wait_sk; + int bucket; + int offset; + int sbucket; + int num; + long: 32; + loff_t last_pos; +}; + +struct tcp_key { + union { + struct { + struct tcp_ao_key *ao_key; + char *traffic_key; + u32 sne; + u8 rcv_next; + }; + struct tcp_md5sig_key *md5_key; + }; + enum { + TCP_KEY_NONE = 0, + TCP_KEY_MD5 = 1, + TCP_KEY_AO = 2, + } type; +}; + +typedef u32 inet6_ehashfn_t(const struct net *, const struct in6_addr *, const u16, const struct in6_addr *, const __be16); + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +struct sockaddr_pkt { + short unsigned int spkt_family; + unsigned char spkt_device[14]; + __be16 spkt_protocol; +}; + +struct sockaddr_ll { + short unsigned int sll_family; + __be16 sll_protocol; + int sll_ifindex; + short unsigned int sll_hatype; + unsigned char sll_pkttype; + unsigned char sll_halen; + unsigned char sll_addr[8]; +}; + +struct tpacket_stats { + unsigned int tp_packets; + unsigned int tp_drops; +}; + +struct tpacket_stats_v3 { + unsigned int tp_packets; + unsigned int tp_drops; + unsigned int tp_freeze_q_cnt; +}; + +struct tpacket_rollover_stats { + __u64 tp_all; + __u64 tp_huge; + __u64 tp_failed; +}; + +union tpacket_stats_u { + struct tpacket_stats stats1; + struct tpacket_stats_v3 stats3; +}; + +struct tpacket_auxdata { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; +}; + +struct tpacket_hdr { + long unsigned int tp_status; + unsigned int tp_len; + unsigned int tp_snaplen; + short unsigned int tp_mac; + short unsigned int tp_net; + unsigned int tp_sec; + unsigned int tp_usec; +}; + +struct tpacket2_hdr { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u8 tp_padding[4]; +}; + +struct tpacket_hdr_variant1 { + __u32 tp_rxhash; + __u32 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u16 tp_padding; +}; + +struct tpacket3_hdr { + __u32 tp_next_offset; + __u32 tp_sec; + __u32 tp_nsec; + __u32 tp_snaplen; + __u32 tp_len; + __u32 tp_status; + __u16 tp_mac; + __u16 tp_net; + union { + struct tpacket_hdr_variant1 hv1; + }; + __u8 tp_padding[8]; +}; + +struct tpacket_bd_ts { + unsigned int ts_sec; + union { + unsigned int ts_usec; + unsigned int ts_nsec; + }; +}; + +struct tpacket_hdr_v1 { + __u32 block_status; + __u32 num_pkts; + __u32 offset_to_first_pkt; + __u32 blk_len; + __u64 seq_num; + struct tpacket_bd_ts ts_first_pkt; + struct tpacket_bd_ts ts_last_pkt; +}; + +union tpacket_bd_header_u { + struct tpacket_hdr_v1 bh1; +}; + +struct tpacket_block_desc { + __u32 version; + __u32 offset_to_priv; + union tpacket_bd_header_u hdr; +}; + +enum tpacket_versions { + TPACKET_V1 = 0, + TPACKET_V2 = 1, + TPACKET_V3 = 2, +}; + +struct tpacket_req { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; +}; + +struct tpacket_req3 { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; + unsigned int tp_retire_blk_tov; + unsigned int tp_sizeof_priv; + unsigned int tp_feature_req_word; +}; + +union tpacket_req_u { + struct tpacket_req req; + struct tpacket_req3 req3; +}; + +struct fanout_args { + __u16 type_flags; + __u16 id; + __u32 max_num_members; +}; + +struct sock_skb_cb { + u32 dropcount; +}; + +enum ip_defrag_users { + IP_DEFRAG_LOCAL_DELIVER = 0, + IP_DEFRAG_CALL_RA_CHAIN = 1, + IP_DEFRAG_CONNTRACK_IN = 2, + __IP_DEFRAG_CONNTRACK_IN_END = 65537, + IP_DEFRAG_CONNTRACK_OUT = 65538, + __IP_DEFRAG_CONNTRACK_OUT_END = 131073, + IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, + IP_DEFRAG_VS_IN = 196610, + IP_DEFRAG_VS_OUT = 196611, + IP_DEFRAG_VS_FWD = 196612, + IP_DEFRAG_AF_PACKET = 196613, + IP_DEFRAG_MACVLAN = 196614, +}; + +typedef __u16 __virtio16; + +struct virtio_net_hdr { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + __virtio16 csum_start; + __virtio16 csum_offset; +}; + +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + __virtio16 num_buffers; +}; + +struct packet_mclist { + struct packet_mclist *next; + int ifindex; + int count; + short unsigned int type; + short unsigned int alen; + unsigned char addr[32]; +}; + +struct pgv; + +struct tpacket_kbdq_core { + struct pgv *pkbdq; + unsigned int feature_req_word; + unsigned int hdrlen; + unsigned char reset_pending_on_curr_blk; + unsigned char delete_blk_timer; + short unsigned int kactive_blk_num; + short unsigned int blk_sizeof_priv; + short unsigned int last_kactive_blk_num; + char *pkblk_start; + char *pkblk_end; + int kblk_size; + unsigned int max_frame_len; + unsigned int knum_blocks; + uint64_t knxt_seq_num; + char *prev; + char *nxt_offset; + struct sk_buff *skb; + rwlock_t blk_fill_in_prog_lock; + short unsigned int retire_blk_tov; + short unsigned int version; + long unsigned int tov_in_jiffies; + struct timer_list retire_blk_timer; +}; + +struct pgv { + char *buffer; +}; + +struct packet_ring_buffer { + struct pgv *pg_vec; + unsigned int head; + unsigned int frames_per_block; + unsigned int frame_size; + unsigned int frame_max; + unsigned int pg_vec_order; + unsigned int pg_vec_pages; + unsigned int pg_vec_len; + unsigned int *pending_refcnt; + long: 32; + union { + long unsigned int *rx_owner_map; + struct tpacket_kbdq_core prb_bdqc; + }; +}; + +struct packet_fanout { + possible_net_t net; + unsigned int num_members; + u32 max_num_members; + u16 id; + u8 type; + u8 flags; + union { + atomic_t rr_cur; + struct bpf_prog *bpf_prog; + }; + struct list_head list; + spinlock_t lock; + refcount_t sk_ref; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct packet_type prot_hook; + struct sock *arr[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct packet_rollover { + int sock; + atomic_long_t num; + atomic_long_t num_huge; + atomic_long_t num_failed; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u32 history[32]; +}; + +struct packet_sock { + struct sock sk; + struct packet_fanout *fanout; + union tpacket_stats_u stats; + struct packet_ring_buffer rx_ring; + struct packet_ring_buffer tx_ring; + int copy_thresh; + spinlock_t bind_lock; + struct mutex pg_vec_lock; + long unsigned int flags; + int ifindex; + u8 vnet_hdr_sz; + __be16 num; + struct packet_rollover *rollover; + struct packet_mclist *mclist; + atomic_long_t mapped; + enum tpacket_versions tp_version; + unsigned int tp_hdrlen; + unsigned int tp_reserve; + unsigned int tp_tstamp; + struct completion skb_completion; + struct net_device *cached_dev; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct packet_type prot_hook; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t tp_drops; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum packet_sock_flags { + PACKET_SOCK_ORIGDEV = 0, + PACKET_SOCK_AUXDATA = 1, + PACKET_SOCK_TX_HAS_OFF = 2, + PACKET_SOCK_TP_LOSS = 3, + PACKET_SOCK_RUNNING = 4, + PACKET_SOCK_PRESSURE = 5, + PACKET_SOCK_QDISC_BYPASS = 6, +}; + +struct packet_mreq_max { + int mr_ifindex; + short unsigned int mr_type; + short unsigned int mr_alen; + unsigned char mr_address[32]; +}; + +union tpacket_uhdr { + struct tpacket_hdr *h1; + struct tpacket2_hdr *h2; + struct tpacket3_hdr *h3; + void *raw; +}; + +struct packet_skb_cb { + union { + struct sockaddr_pkt pkt; + union { + unsigned int origlen; + struct sockaddr_ll ll; + }; + } sa; +}; + +enum mm_major_op { + mm_pool32a_op = 0, + mm_pool16a_op = 1, + mm_lbu16_op = 2, + mm_move16_op = 3, + mm_addi32_op = 4, + mm_lbu32_op = 5, + mm_sb32_op = 6, + mm_lb32_op = 7, + mm_pool32b_op = 8, + mm_pool16b_op = 9, + mm_lhu16_op = 10, + mm_andi16_op = 11, + mm_addiu32_op = 12, + mm_lhu32_op = 13, + mm_sh32_op = 14, + mm_lh32_op = 15, + mm_pool32i_op = 16, + mm_pool16c_op = 17, + mm_lwsp16_op = 18, + mm_pool16d_op = 19, + mm_ori32_op = 20, + mm_pool32f_op = 21, + mm_pool32s_op = 22, + mm_reserved2_op = 23, + mm_pool32c_op = 24, + mm_lwgp16_op = 25, + mm_lw16_op = 26, + mm_pool16e_op = 27, + mm_xori32_op = 28, + mm_jals32_op = 29, + mm_addiupc_op = 30, + mm_reserved3_op = 31, + mm_reserved4_op = 32, + mm_pool16f_op = 33, + mm_sb16_op = 34, + mm_beqz16_op = 35, + mm_slti32_op = 36, + mm_beq32_op = 37, + mm_swc132_op = 38, + mm_lwc132_op = 39, + mm_reserved5_op = 40, + mm_reserved6_op = 41, + mm_sh16_op = 42, + mm_bnez16_op = 43, + mm_sltiu32_op = 44, + mm_bne32_op = 45, + mm_sdc132_op = 46, + mm_ldc132_op = 47, + mm_reserved7_op = 48, + mm_reserved8_op = 49, + mm_swsp16_op = 50, + mm_b16_op = 51, + mm_andi32_op = 52, + mm_j32_op = 53, + mm_sd32_op = 54, + mm_ld32_op = 55, + mm_reserved11_op = 56, + mm_reserved12_op = 57, + mm_sw16_op = 58, + mm_li16_op = 59, + mm_jalx32_op = 60, + mm_jal32_op = 61, + mm_sw32_op = 62, + mm_lw32_op = 63, +}; + +enum mm_32i_minor_op { + mm_bltz_op = 0, + mm_bltzal_op = 1, + mm_bgez_op = 2, + mm_bgezal_op = 3, + mm_blez_op = 4, + mm_bnezc_op = 5, + mm_bgtz_op = 6, + mm_beqzc_op = 7, + mm_tlti_op = 8, + mm_tgei_op = 9, + mm_tltiu_op = 10, + mm_tgeiu_op = 11, + mm_tnei_op = 12, + mm_lui_op = 13, + mm_teqi_op = 14, + mm_reserved13_op = 15, + mm_synci_op = 16, + mm_bltzals_op = 17, + mm_reserved14_op = 18, + mm_bgezals_op = 19, + mm_bc2f_op = 20, + mm_bc2t_op = 21, + mm_reserved15_op = 22, + mm_reserved16_op = 23, + mm_reserved17_op = 24, + mm_reserved18_op = 25, + mm_bposge64_op = 26, + mm_bposge32_op = 27, + mm_bc1f_op = 28, + mm_bc1t_op = 29, + mm_reserved19_op = 30, + mm_reserved20_op = 31, + mm_bc1any2f_op = 32, + mm_bc1any2t_op = 33, + mm_bc1any4f_op = 34, + mm_bc1any4t_op = 35, +}; + +enum mm_32a_minor_op { + mm_sll32_op = 0, + mm_ins_op = 12, + mm_sllv32_op = 16, + mm_ext_op = 44, + mm_pool32axf_op = 60, + mm_srl32_op = 64, + mm_srlv32_op = 80, + mm_sra_op = 128, + mm_srav_op = 144, + mm_rotr_op = 192, + mm_lwxs_op = 280, + mm_addu32_op = 336, + mm_subu32_op = 464, + mm_wsbh_op = 492, + mm_mul_op = 528, + mm_and_op = 592, + mm_or32_op = 656, + mm_xor32_op = 784, + mm_slt_op = 848, + mm_sltu_op = 912, +}; + +enum mm_32axf_minor_op { + mm_mfc0_op = 3, + mm_mtc0_op = 11, + mm_tlbp_op = 13, + mm_mfhi32_op = 53, + mm_jalr_op = 60, + mm_tlbr_op = 77, + mm_mflo32_op = 117, + mm_jalrhb_op = 124, + mm_tlbwi_op = 141, + mm_mthi32_op = 181, + mm_tlbwr_op = 205, + mm_mtlo32_op = 245, + mm_di_op = 285, + mm_jalrs_op = 316, + mm_jalrshb_op = 380, + mm_sync_op = 429, + mm_syscall_op = 557, + mm_wait_op = 589, + mm_eret_op = 973, + mm_divu_op = 1500, +}; + +enum mm_16c_minor_op { + mm_lwm16_op = 4, + mm_swm16_op = 5, + mm_jr16_op = 12, + mm_jrc_op = 13, + mm_jalr16_op = 14, + mm_jalrs16_op = 15, + mm_jraddiusp_op = 24, +}; + +enum MIPS16e_ops { + MIPS16e_jal_op = 3, + MIPS16e_ld_op = 7, + MIPS16e_i8_op = 12, + MIPS16e_sd_op = 15, + MIPS16e_lb_op = 16, + MIPS16e_lh_op = 17, + MIPS16e_lwsp_op = 18, + MIPS16e_lw_op = 19, + MIPS16e_lbu_op = 20, + MIPS16e_lhu_op = 21, + MIPS16e_lwpc_op = 22, + MIPS16e_lwu_op = 23, + MIPS16e_sb_op = 24, + MIPS16e_sh_op = 25, + MIPS16e_swsp_op = 26, + MIPS16e_sw_op = 27, + MIPS16e_rr_op = 29, + MIPS16e_extend_op = 30, + MIPS16e_i64_op = 31, +}; + +enum MIPS16e_rr_func { + MIPS16e_jr_func = 0, +}; + +struct m16e_rr { + unsigned int opcode: 5; + unsigned int rx: 3; + unsigned int nd: 1; + unsigned int l: 1; + unsigned int ra: 1; + unsigned int func: 5; +}; + +struct m16e_jal { + unsigned int opcode: 5; + unsigned int x: 1; + unsigned int imm20_16: 5; + int imm25_21: 5; +}; + +struct m16e_i64 { + unsigned int opcode: 5; + unsigned int func: 3; + unsigned int imm: 8; +}; + +struct m16e_ri64 { + unsigned int opcode: 5; + unsigned int func: 3; + unsigned int ry: 3; + unsigned int imm: 5; +}; + +struct m16e_ri { + unsigned int opcode: 5; + unsigned int rx: 3; + unsigned int imm: 8; +}; + +struct m16e_rri { + unsigned int opcode: 5; + unsigned int rx: 3; + unsigned int ry: 3; + unsigned int imm: 5; +}; + +struct m16e_i8 { + unsigned int opcode: 5; + unsigned int func: 3; + unsigned int imm: 8; +}; + +union mips16e_instruction { + unsigned int full: 16; + struct m16e_rr rr; + struct m16e_jal jal; + struct m16e_i64 i64; + struct m16e_ri64 ri64; + struct m16e_ri ri; + struct m16e_rri rri; + struct m16e_i8 i8; +}; + +typedef unsigned int mips_instruction; + +struct mm_decoded_insn { + mips_instruction insn; + mips_instruction next_insn; + int pc_inc; + int next_pc_inc; + int micro_mips_mode; +}; + +enum fpu_mode { + FPU_32BIT = 0, + FPU_64BIT = 1, + FPU_AS_IS = 2, + FPU_HYBRID = 3, +}; + +struct stack_trace { + unsigned int nr_entries; + unsigned int max_entries; + long unsigned int *entries; + unsigned int skip; +}; + +typedef __u32 Elf32_Off; + +typedef __s32 Elf32_Sword; + +struct elf32_rel { + Elf32_Addr r_offset; + Elf32_Word r_info; +}; + +typedef struct elf32_rel Elf32_Rel; + +struct elf32_rela { + Elf32_Addr r_offset; + Elf32_Word r_info; + Elf32_Sword r_addend; +}; + +typedef struct elf32_rela Elf32_Rela; + +struct elf32_hdr { + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +}; + +typedef struct elf32_hdr Elf32_Ehdr; + +struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +}; + +typedef struct elf32_shdr Elf32_Shdr; + +struct mips_hi16 { + struct mips_hi16 *next; + Elf32_Addr *addr; + Elf32_Addr value; +}; + +struct flush_tlb_data { + struct vm_area_struct *vma; + long unsigned int addr1; + long unsigned int addr2; +}; + +struct proc_cpuinfo_notifier_args { + struct seq_file *m; + long unsigned int n; +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short int contexts; + bool contexts_maxed; +}; + +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +typedef int (*proc_visitor)(struct task_struct *, void *); + +struct oldold_utsname { + char sysname[9]; + char nodename[9]; + char release[9]; + char version[9]; + char machine[9]; +}; + +struct old_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; +}; + +enum uts_proc { + UTS_PROC_ARCH = 0, + UTS_PROC_OSTYPE = 1, + UTS_PROC_OSRELEASE = 2, + UTS_PROC_VERSION = 3, + UTS_PROC_HOSTNAME = 4, + UTS_PROC_DOMAINNAME = 5, +}; + +struct prctl_mm_map { + __u64 start_code; + __u64 end_code; + __u64 start_data; + __u64 end_data; + __u64 start_brk; + __u64 brk; + __u64 start_stack; + __u64 arg_start; + __u64 arg_end; + __u64 env_start; + __u64 env_end; + __u64 *auxv; + __u32 auxv_size; + __u32 exe_fd; + long: 32; +}; + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +struct getcpu_cache { + long unsigned int blob[32]; +}; + +enum { + PER_LINUX = 0, + PER_LINUX_32BIT = 8388608, + PER_LINUX_FDPIC = 524288, + PER_SVR4 = 68157441, + PER_SVR3 = 83886082, + PER_SCOSVR3 = 117440515, + PER_OSR5 = 100663299, + PER_WYSEV386 = 83886084, + PER_ISCR4 = 67108869, + PER_BSD = 6, + PER_SUNOS = 67108870, + PER_XENIX = 83886087, + PER_LINUX32 = 8, + PER_LINUX32_3GB = 134217736, + PER_IRIX32 = 67108873, + PER_IRIXN32 = 67108874, + PER_IRIX64 = 67108875, + PER_RISCOS = 12, + PER_SOLARIS = 67108877, + PER_UW7 = 68157454, + PER_OSF4 = 15, + PER_HPUX = 16, + PER_MASK = 255, +}; + +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long int tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + long: 32; + long long int offset; + long long int freq; + long long int maxerror; + long long int esterror; + int status; + long: 32; + long long int constant; + long long int precision; + long long int tolerance; + struct __kernel_timex_timeval time; + long long int tick; + long long int ppsfreq; + long long int jitter; + int shift; + long: 32; + long long int stabil; + long long int jitcnt; + long long int calcnt; + long long int errcnt; + long long int stbcnt; + int tai; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET = 0, + AUDIT_NTP_FREQ = 1, + AUDIT_NTP_STATUS = 2, + AUDIT_NTP_TAI = 3, + AUDIT_NTP_TICK = 4, + AUDIT_NTP_ADJUST = 5, + AUDIT_NTP_NVALS = 6, +}; + +struct audit_ntp_data {}; + +struct ntp_data { + long unsigned int tick_usec; + long: 32; + u64 tick_length; + u64 tick_length_base; + int time_state; + int time_status; + s64 time_offset; + long int time_constant; + long int time_maxerror; + long int time_esterror; + long: 32; + s64 time_freq; + time64_t time_reftime; + long int time_adjust; + long: 32; + s64 ntp_tick_adj; + time64_t ntp_next_leap_sec; +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + long unsigned int private; +}; + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +enum die_val { + DIE_OOPS = 1, + DIE_FP = 2, + DIE_TRAP = 3, + DIE_RI = 4, + DIE_PAGE_FAULT = 5, + DIE_BREAK = 6, + DIE_SSTEPBP = 7, + DIE_MSAFP = 8, + DIE_UPROBE = 9, + DIE_UPROBE_XOL = 10, +}; + +struct trace_export { + struct trace_export *next; + void (*write)(struct trace_export *, const void *, unsigned int); + int flags; +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + +enum event_trigger_type { + ETT_NONE = 0, + ETT_TRACE_ONOFF = 1, + ETT_SNAPSHOT = 2, + ETT_STACKTRACE = 4, + ETT_EVENT_ENABLE = 8, + ETT_EVENT_HIST = 16, + ETT_HIST_ENABLE = 32, + ETT_EVENT_EPROBE = 64, +}; + +enum trace_type { + __TRACE_FIRST_TYPE = 0, + TRACE_FN = 1, + TRACE_CTX = 2, + TRACE_WAKE = 3, + TRACE_STACK = 4, + TRACE_PRINT = 5, + TRACE_BPRINT = 6, + TRACE_MMIO_RW = 7, + TRACE_MMIO_MAP = 8, + TRACE_BRANCH = 9, + TRACE_GRAPH_RET = 10, + TRACE_GRAPH_ENT = 11, + TRACE_GRAPH_RETADDR_ENT = 12, + TRACE_USER_STACK = 13, + TRACE_BLK = 14, + TRACE_BPUTS = 15, + TRACE_HWLAT = 16, + TRACE_OSNOISE = 17, + TRACE_TIMERLAT = 18, + TRACE_RAW_DATA = 19, + TRACE_FUNC_REPEATS = 20, + __TRACE_LAST_TYPE = 21, +}; + +struct ftrace_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; +}; + +struct stack_entry { + struct trace_entry ent; + int size; + long unsigned int caller[0]; +}; + +struct bprint_entry { + struct trace_entry ent; + long unsigned int ip; + const char *fmt; + u32 buf[0]; +}; + +struct print_entry { + struct trace_entry ent; + long unsigned int ip; + char buf[0]; +}; + +struct raw_data_entry { + struct trace_entry ent; + unsigned int id; + char buf[0]; +}; + +struct bputs_entry { + struct trace_entry ent; + long unsigned int ip; + const char *str; +}; + +struct func_repeats_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; + u16 count; + u16 top_delta_ts; + u32 bottom_delta_ts; +}; + +typedef bool (*cond_update_fn_t)(struct trace_array *, void *); + +enum { + TRACE_ARRAY_FL_GLOBAL = 1, + TRACE_ARRAY_FL_BOOT = 2, +}; + +struct trace_parser { + bool cont; + char *buffer; + unsigned int idx; + unsigned int size; +}; + +enum trace_iterator_flags { + TRACE_ITER_PRINT_PARENT = 1, + TRACE_ITER_SYM_OFFSET = 2, + TRACE_ITER_SYM_ADDR = 4, + TRACE_ITER_VERBOSE = 8, + TRACE_ITER_RAW = 16, + TRACE_ITER_HEX = 32, + TRACE_ITER_BIN = 64, + TRACE_ITER_BLOCK = 128, + TRACE_ITER_FIELDS = 256, + TRACE_ITER_PRINTK = 512, + TRACE_ITER_ANNOTATE = 1024, + TRACE_ITER_USERSTACKTRACE = 2048, + TRACE_ITER_SYM_USEROBJ = 4096, + TRACE_ITER_PRINTK_MSGONLY = 8192, + TRACE_ITER_CONTEXT_INFO = 16384, + TRACE_ITER_LATENCY_FMT = 32768, + TRACE_ITER_RECORD_CMD = 65536, + TRACE_ITER_RECORD_TGID = 131072, + TRACE_ITER_OVERWRITE = 262144, + TRACE_ITER_STOP_ON_FREE = 524288, + TRACE_ITER_IRQ_INFO = 1048576, + TRACE_ITER_MARKERS = 2097152, + TRACE_ITER_EVENT_FORK = 4194304, + TRACE_ITER_TRACE_PRINTK = 8388608, + TRACE_ITER_PAUSE_ON_TRACE = 16777216, + TRACE_ITER_HASH_PTR = 33554432, + TRACE_ITER_STACKTRACE = 67108864, +}; + +struct trace_min_max_param { + struct mutex *lock; + u64 *val; + u64 *min; + u64 *max; +}; + +struct pipe_wait { + struct trace_iterator *iter; + int wait_index; +}; + +struct ftrace_stack { + long unsigned int calls[1024]; +}; + +struct ftrace_stacks { + struct ftrace_stack stacks[4]; +}; + +struct trace_buffer_struct { + int nesting; + char buffer[4096]; +}; + +struct ftrace_buffer_info { + struct trace_iterator iter; + void *spare; + unsigned int spare_cpu; + unsigned int spare_size; + unsigned int read; +}; + +struct err_info { + const char **errs; + u8 type; + u16 pos; + u64 ts; +}; + +struct tracing_log_err { + struct list_head list; + struct err_info info; + char loc[128]; + char *cmd; + long: 32; +}; + +struct buffer_ref { + struct trace_buffer *buffer; + void *page; + int cpu; + refcount_t refcount; +}; + +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS = 0, + BTF_FIELD_ITER_STRS = 1, +}; + +struct btf_field_desc { + int t_off_cnt; + int t_offs[2]; + int m_sz; + int m_off_cnt; + int m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + +struct btf_relocate { + struct btf *btf; + const struct btf *base_btf; + const struct btf *dist_base_btf; + unsigned int nr_base_types; + unsigned int nr_split_types; + unsigned int nr_dist_base_types; + int dist_str_len; + int base_str_len; + __u32 *id_map; + __u32 *str_map; +}; + +struct btf_name_info { + const char *name; + bool needs_size: 1; + unsigned int size: 31; + __u32 id; +}; + +enum { + FOLL_TOUCH = 65536, + FOLL_TRIED = 131072, + FOLL_REMOTE = 262144, + FOLL_PIN = 524288, + FOLL_FAST_ONLY = 1048576, + FOLL_UNLOCKABLE = 2097152, + FOLL_MADV_POPULATE = 4194304, +}; + +struct follow_page_context { + struct dev_pagemap *pgmap; + unsigned int page_mask; +}; + +struct pages_or_folios { + union { + struct page **pages; + struct folio **folios; + void **entries; + }; + bool has_folios; + long int nr_entries; +}; + +union swap_header { + struct { + char reserved[4086]; + char magic[10]; + } magic; + struct { + char bootbits[1024]; + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + +struct swap_extent { + struct rb_node rb_node; + long unsigned int start_page; + long unsigned int nr_pages; + long: 32; + sector_t start_block; +}; + +typedef int class_get_unused_fd_t; + +struct mnt_ns_info { + __u32 size; + __u32 nr_mounts; + __u64 mnt_ns_id; +}; + +struct stashed_operations { + void (*put_data)(void *); + int (*init_inode)(struct inode *, void *); +}; + +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +struct kioctx; + +struct kioctx_table { + struct callback_head rcu; + unsigned int nr; + struct kioctx *table[0]; +}; + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + +struct iocb { + __u64 aio_data; + __kernel_rwf_t aio_rw_flags; + __u32 aio_key; + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + __u64 aio_reserved2; + __u32 aio_flags; + __u32 aio_resfd; +}; + +typedef int kiocb_cancel_fn(struct kiocb *); + +struct aio_ring { + unsigned int id; + unsigned int nr; + unsigned int head; + unsigned int tail; + unsigned int magic; + unsigned int compat_features; + unsigned int incompat_features; + unsigned int header_length; + struct io_event io_events[0]; +}; + +struct kioctx_cpu; + +struct ctx_rq_wait; + +struct kioctx { + struct percpu_ref users; + atomic_t dead; + struct percpu_ref reqs; + long unsigned int user_id; + struct kioctx_cpu *cpu; + unsigned int req_batch; + unsigned int max_reqs; + unsigned int nr_events; + long unsigned int mmap_base; + long unsigned int mmap_size; + struct folio **ring_folios; + long int nr_pages; + struct rcu_work free_rwork; + struct ctx_rq_wait *rq_wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + atomic_t reqs_available; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + spinlock_t ctx_lock; + struct list_head active_reqs; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct mutex ring_lock; + wait_queue_head_t wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + unsigned int tail; + unsigned int completed_events; + spinlock_t completion_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct folio *internal_folios[8]; + struct file *aio_ring_file; + unsigned int id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kioctx_cpu { + unsigned int reqs_available; +}; + +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + +struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; + struct cred *creds; +}; + +struct poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool cancelled; + bool work_scheduled; + bool work_need_resched; + struct wait_queue_entry wait; + struct work_struct work; +}; + +struct aio_kiocb { + union { + struct file *ki_filp; + struct kiocb rw; + struct fsync_iocb fsync; + struct poll_iocb poll; + }; + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + struct io_event ki_res; + struct list_head ki_list; + refcount_t ki_refcnt; + struct eventfd_ctx *ki_eventfd; +}; + +struct aio_waiter { + struct wait_queue_entry w; + size_t min_nr; +}; + +struct aio_poll_table { + struct poll_table_struct pt; + struct aio_kiocb *iocb; + bool queued; + int error; +}; + +struct __aio_sigset { + const sigset_t *sigmask; + size_t sigsetsize; +}; + +struct pts_mount_opts { + int setuid; + int setgid; + kuid_t uid; + kgid_t gid; + umode_t mode; + umode_t ptmxmode; + int reserve; + int max; +}; + +enum { + Opt_uid___3 = 0, + Opt_gid___3 = 1, + Opt_mode___3 = 2, + Opt_ptmxmode = 3, + Opt_newinstance = 4, + Opt_max = 5, + Opt_err___2 = 6, +}; + +struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; + struct super_block *sb; + struct dentry *ptmx_dentry; +}; + +struct ext4_fc_tl { + __le16 fc_tag; + __le16 fc_len; +}; + +struct ext4_fc_head { + __le32 fc_features; + __le32 fc_tid; +}; + +struct ext4_fc_add_range { + __le32 fc_ino; + __u8 fc_ex[12]; +}; + +struct ext4_fc_del_range { + __le32 fc_ino; + __le32 fc_lblk; + __le32 fc_len; +}; + +struct ext4_fc_dentry_info { + __le32 fc_parent_ino; + __le32 fc_ino; + __u8 fc_dname[0]; +}; + +struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[0]; +}; + +struct ext4_fc_tail { + __le32 fc_tid; + __le32 fc_crc; +}; + +enum { + EXT4_FC_STATUS_OK = 0, + EXT4_FC_STATUS_INELIGIBLE = 1, + EXT4_FC_STATUS_SKIPPED = 2, + EXT4_FC_STATUS_FAILED = 3, +}; + +struct ext4_fc_dentry_update { + int fcd_op; + int fcd_parent; + int fcd_ino; + long: 32; + struct qstr fcd_name; + unsigned char fcd_iname[36]; + struct list_head fcd_list; + struct list_head fcd_dilist; + long: 32; +}; + +struct ext4_locality_group { + struct mutex lg_mutex; + struct list_head lg_prealloc_list[10]; + spinlock_t lg_prealloc_lock; +}; + +struct ext4_extent { + __le32 ee_block; + __le16 ee_len; + __le16 ee_start_hi; + __le32 ee_start_lo; +}; + +struct ext4_extent_idx { + __le32 ei_block; + __le32 ei_leaf_lo; + __le16 ei_leaf_hi; + __u16 ei_unused; +}; + +struct ext4_extent_header { + __le16 eh_magic; + __le16 eh_entries; + __le16 eh_max; + __le16 eh_depth; + __le32 eh_generation; +}; + +struct ext4_ext_path { + ext4_fsblk_t p_block; + __u16 p_depth; + __u16 p_maxdepth; + struct ext4_extent *p_ext; + struct ext4_extent_idx *p_idx; + struct ext4_extent_header *p_hdr; + struct buffer_head *p_bh; + long: 32; +}; + +struct __track_dentry_update_args { + struct dentry *dentry; + int op; +}; + +struct __track_range_args { + ext4_lblk_t start; + ext4_lblk_t end; +}; + +struct dentry_info_args { + int parent_ino; + int dname_len; + int ino; + int inode_len; + char *dname; +}; + +struct ext4_fc_tl_mem { + u16 fc_tag; + u16 fc_len; +}; + +struct btrfs_extent_owner_ref { + __le64 root_id; +}; + +enum btrfs_chunk_alloc_enum { + CHUNK_ALLOC_NO_FORCE = 0, + CHUNK_ALLOC_LIMITED = 1, + CHUNK_ALLOC_FORCE = 2, + CHUNK_ALLOC_FORCE_FOR_EXTENT = 3, +}; + +enum btrfs_caching_type { + BTRFS_CACHE_NO = 0, + BTRFS_CACHE_STARTED = 1, + BTRFS_CACHE_FINISHED = 2, + BTRFS_CACHE_ERROR = 3, +}; + +struct btrfs_discard_stripe { + struct btrfs_device *dev; + long: 32; + u64 physical; + u64 length; +}; + +struct btrfs_squota_delta { + u64 root; + u64 num_bytes; + u64 generation; + bool is_inc; + bool is_data; + long: 32; +}; + +enum btrfs_loop_type { + LOOP_CACHING_NOWAIT = 0, + LOOP_CACHING_WAIT = 1, + LOOP_UNSET_SIZE_CLASS = 2, + LOOP_ALLOC_CHUNK = 3, + LOOP_WRONG_SIZE_CLASS = 4, + LOOP_NO_EMPTY_SIZE = 5, +}; + +struct walk_control___2 { + u64 refs[8]; + u64 flags[8]; + struct btrfs_key update_progress; + struct btrfs_key drop_progress; + int drop_level; + int stage; + int level; + int shared_level; + int update_ref; + int keep_locks; + int reada_slot; + int reada_count; + int restarted; + int lookup_info; + long: 32; +}; + +struct btrfs_dev_extent { + __le64 chunk_tree; + __le64 chunk_objectid; + __le64 chunk_offset; + __le64 length; + __u8 chunk_tree_uuid[16]; +}; + +struct btrfs_block_group_item { + __le64 used; + __le64 chunk_objectid; + __le64 flags; +}; + +typedef union { + long unsigned int addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +struct blk_ia_range_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_independent_access_range *, char *); +}; + +struct io_uring_file_index_range { + __u32 off; + __u32 len; + __u64 resv; +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + +enum io_uring_napi_tracking_strategy { + IO_URING_NAPI_TRACKING_DYNAMIC = 0, + IO_URING_NAPI_TRACKING_STATIC = 1, + IO_URING_NAPI_TRACKING_INACTIVE = 255, +}; + +typedef struct { + ptrdiff_t value; + const void *stateTable; + const void *symbolTT; + unsigned int stateLog; +} FSE_CState_t; + +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; + +typedef enum { + ZSTD_noDict = 0, + ZSTD_extDict = 1, + ZSTD_dictMatchState = 2, + ZSTD_dedicatedDictSearch = 3, +} ZSTD_dictMode_e; + +struct repcodes_s { + U32 rep[3]; +}; + +typedef struct repcodes_s repcodes_t; + +typedef U32 (*ZSTD_getAllMatchesFn)(ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, const BYTE *, const U32 *, const U32, const U32); + +typedef struct { + rawSeqStore_t seqStore; + U32 startPosInBlock; + U32 endPosInBlock; + U32 offset; +} ZSTD_optLdm_t; + +enum bcop_op { + bcf_op = 0, + bct_op = 1, + bcfl_op = 2, + bctl_op = 3, +}; + +enum cop1_fmt { + s_fmt = 0, + d_fmt = 1, + e_fmt = 2, + q_fmt = 3, + w_fmt = 4, + l_fmt = 5, +}; + +enum cop1_sdw_func { + fadd_op = 0, + fsub_op = 1, + fmul_op = 2, + fdiv_op = 3, + fsqrt_op = 4, + fabs_op = 5, + fmov_op = 6, + fneg_op = 7, + froundl_op = 8, + ftruncl_op = 9, + fceill_op = 10, + ffloorl_op = 11, + fround_op = 12, + ftrunc_op = 13, + fceil_op = 14, + ffloor_op = 15, + fsel_op = 16, + fmovc_op = 17, + fmovz_op = 18, + fmovn_op = 19, + fseleqz_op = 20, + frecip_op = 21, + frsqrt_op = 22, + fselnez_op = 23, + fmaddf_op = 24, + fmsubf_op = 25, + frint_op = 26, + fclass_op = 27, + fmin_op = 28, + fmina_op = 29, + fmax_op = 30, + fmaxa_op = 31, + fcvts_op = 32, + fcvtd_op = 33, + fcvte_op = 34, + fcvtw_op = 36, + fcvtl_op = 37, + fcmp_op = 48, +}; + +enum cop1x_func { + lwxc1_op = 0, + ldxc1_op = 1, + swxc1_op = 8, + sdxc1_op = 9, + pfetch_op = 15, + madd_s_op = 32, + madd_d_op = 33, + madd_e_op = 34, + msub_s_op = 40, + msub_d_op = 41, + msub_e_op = 42, + nmadd_s_op = 48, + nmadd_d_op = 49, + nmadd_e_op = 50, + nmsub_s_op = 56, + nmsub_d_op = 57, + nmsub_e_op = 58, +}; + +enum mm_32f_minor_op { + mm_32f_00_op = 0, + mm_32f_01_op = 1, + mm_32f_02_op = 2, + mm_32f_10_op = 8, + mm_32f_11_op = 9, + mm_32f_12_op = 10, + mm_32f_20_op = 16, + mm_32f_30_op = 24, + mm_32f_40_op = 32, + mm_32f_41_op = 33, + mm_32f_42_op = 34, + mm_32f_50_op = 40, + mm_32f_51_op = 41, + mm_32f_52_op = 42, + mm_32f_60_op = 48, + mm_32f_70_op = 56, + mm_32f_73_op = 59, + mm_32f_74_op = 60, +}; + +enum mm_32f_10_minor_op { + mm_lwxc1_op = 1, + mm_swxc1_op = 2, + mm_ldxc1_op = 3, + mm_sdxc1_op = 4, + mm_luxc1_op = 5, + mm_suxc1_op = 6, +}; + +enum mm_32f_40_minor_op { + mm_fmovf_op = 0, + mm_fmovt_op = 1, +}; + +enum mm_32f_60_minor_op { + mm_fadd_op = 0, + mm_fsub_op = 1, + mm_fmul_op = 2, + mm_fdiv_op = 3, +}; + +enum mm_32f_70_minor_op { + mm_fmovn_op = 0, + mm_fmovz_op = 1, +}; + +enum mm_32f_73_minor_op { + mm_fmov0_op = 1, + mm_fcvtl_op = 4, + mm_movf0_op = 5, + mm_frsqrt_op = 8, + mm_ffloorl_op = 12, + mm_fabs0_op = 13, + mm_fcvtw_op = 36, + mm_movt0_op = 37, + mm_fsqrt_op = 40, + mm_ffloorw_op = 44, + mm_fneg0_op = 45, + mm_cfc1_op = 64, + mm_frecip_op = 72, + mm_fceill_op = 76, + mm_fcvtd0_op = 77, + mm_ctc1_op = 96, + mm_fceilw_op = 108, + mm_fcvts0_op = 109, + mm_mfc1_op = 128, + mm_fmov1_op = 129, + mm_movf1_op = 133, + mm_ftruncl_op = 140, + mm_fabs1_op = 141, + mm_mtc1_op = 160, + mm_movt1_op = 165, + mm_ftruncw_op = 172, + mm_fneg1_op = 173, + mm_mfhc1_op = 192, + mm_froundl_op = 204, + mm_fcvtd1_op = 205, + mm_mthc1_op = 224, + mm_froundw_op = 236, + mm_fcvts1_op = 237, +}; + +struct mips_fpu_emulator_stats { + long unsigned int emulated; + long unsigned int loads; + long unsigned int stores; + long unsigned int branches; + long unsigned int cp1ops; + long unsigned int cp1xops; + long unsigned int errors; + long unsigned int ieee754_inexact; + long unsigned int ieee754_underflow; + long unsigned int ieee754_overflow; + long unsigned int ieee754_zerodiv; + long unsigned int ieee754_invalidop; + long unsigned int ds_emul; + long unsigned int abs_s; + long unsigned int abs_d; + long unsigned int add_s; + long unsigned int add_d; + long unsigned int bc1eqz; + long unsigned int bc1nez; + long unsigned int ceil_w_s; + long unsigned int ceil_w_d; + long unsigned int ceil_l_s; + long unsigned int ceil_l_d; + long unsigned int class_s; + long unsigned int class_d; + long unsigned int cmp_af_s; + long unsigned int cmp_af_d; + long unsigned int cmp_eq_s; + long unsigned int cmp_eq_d; + long unsigned int cmp_le_s; + long unsigned int cmp_le_d; + long unsigned int cmp_lt_s; + long unsigned int cmp_lt_d; + long unsigned int cmp_ne_s; + long unsigned int cmp_ne_d; + long unsigned int cmp_or_s; + long unsigned int cmp_or_d; + long unsigned int cmp_ueq_s; + long unsigned int cmp_ueq_d; + long unsigned int cmp_ule_s; + long unsigned int cmp_ule_d; + long unsigned int cmp_ult_s; + long unsigned int cmp_ult_d; + long unsigned int cmp_un_s; + long unsigned int cmp_un_d; + long unsigned int cmp_une_s; + long unsigned int cmp_une_d; + long unsigned int cmp_saf_s; + long unsigned int cmp_saf_d; + long unsigned int cmp_seq_s; + long unsigned int cmp_seq_d; + long unsigned int cmp_sle_s; + long unsigned int cmp_sle_d; + long unsigned int cmp_slt_s; + long unsigned int cmp_slt_d; + long unsigned int cmp_sne_s; + long unsigned int cmp_sne_d; + long unsigned int cmp_sor_s; + long unsigned int cmp_sor_d; + long unsigned int cmp_sueq_s; + long unsigned int cmp_sueq_d; + long unsigned int cmp_sule_s; + long unsigned int cmp_sule_d; + long unsigned int cmp_sult_s; + long unsigned int cmp_sult_d; + long unsigned int cmp_sun_s; + long unsigned int cmp_sun_d; + long unsigned int cmp_sune_s; + long unsigned int cmp_sune_d; + long unsigned int cvt_d_l; + long unsigned int cvt_d_s; + long unsigned int cvt_d_w; + long unsigned int cvt_l_s; + long unsigned int cvt_l_d; + long unsigned int cvt_s_d; + long unsigned int cvt_s_l; + long unsigned int cvt_s_w; + long unsigned int cvt_w_s; + long unsigned int cvt_w_d; + long unsigned int div_s; + long unsigned int div_d; + long unsigned int floor_w_s; + long unsigned int floor_w_d; + long unsigned int floor_l_s; + long unsigned int floor_l_d; + long unsigned int maddf_s; + long unsigned int maddf_d; + long unsigned int max_s; + long unsigned int max_d; + long unsigned int maxa_s; + long unsigned int maxa_d; + long unsigned int min_s; + long unsigned int min_d; + long unsigned int mina_s; + long unsigned int mina_d; + long unsigned int mov_s; + long unsigned int mov_d; + long unsigned int msubf_s; + long unsigned int msubf_d; + long unsigned int mul_s; + long unsigned int mul_d; + long unsigned int neg_s; + long unsigned int neg_d; + long unsigned int recip_s; + long unsigned int recip_d; + long unsigned int rint_s; + long unsigned int rint_d; + long unsigned int round_w_s; + long unsigned int round_w_d; + long unsigned int round_l_s; + long unsigned int round_l_d; + long unsigned int rsqrt_s; + long unsigned int rsqrt_d; + long unsigned int sel_s; + long unsigned int sel_d; + long unsigned int seleqz_s; + long unsigned int seleqz_d; + long unsigned int selnez_s; + long unsigned int selnez_d; + long unsigned int sqrt_s; + long unsigned int sqrt_d; + long unsigned int sub_s; + long unsigned int sub_d; + long unsigned int trunc_w_s; + long unsigned int trunc_w_d; + long unsigned int trunc_l_s; + long unsigned int trunc_l_d; +}; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +struct clk_lookup_alloc { + struct clk_lookup cl; + char dev_id[24]; + char con_id[16]; +}; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +typedef u32 note_buf_t[72]; + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask * const map; +}; + +struct dma_fence_chain { + struct dma_fence base; + struct dma_fence *prev; + long: 32; + u64 prev_seqno; + struct dma_fence *fence; + union { + struct dma_fence_cb cb; + struct irq_work work; + }; + spinlock_t lock; +}; + +struct scsi_varlen_cdb_hdr { + __u8 opcode; + __u8 control; + __u8 misc[5]; + __u8 additional_cdb_length; + __be16 service_action; +}; + +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +struct e1000_tx_desc { + __le64 buffer_addr; + union { + __le32 data; + struct { + __le16 length; + u8 cso; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; + u8 css; + __le16 special; + } fields; + } upper; +}; + +enum e1000_state_t___2 { + __E1000_TESTING = 0, + __E1000_RESETTING = 1, + __E1000_ACCESS_SHARED_RESOURCE = 2, + __E1000_DOWN = 3, +}; + +enum { + NETDEV_STATS = 0, + E1000_STATS = 1, +}; + +struct e1000_stats { + char stat_string[32]; + int type; + int sizeof_stat; + int stat_offset; +}; + +struct dbc_regs { + __le32 capability; + __le32 doorbell; + __le32 ersts; + __le32 __reserved_0; + __le64 erstba; + __le64 erdp; + __le32 control; + __le32 status; + __le32 portsc; + __le32 __reserved_1; + __le64 dccp; + __le32 devinfo1; + __le32 devinfo2; +}; + +struct dbc_str_descs { + char string0[64]; + char manufacturer[64]; + char product[64]; + char serial[64]; +}; + +enum dbc_state { + DS_DISABLED = 0, + DS_INITIALIZED = 1, + DS_ENABLED = 2, + DS_CONNECTED = 3, + DS_CONFIGURED = 4, + DS_MAX = 5, +}; + +struct xhci_dbc; + +struct dbc_ep { + struct xhci_dbc *dbc; + struct list_head list_pending; + struct xhci_ring *ring; + unsigned int direction: 1; + unsigned int halted: 1; +}; + +struct dbc_driver; + +struct xhci_dbc { + spinlock_t lock; + struct device *dev; + struct xhci_hcd *xhci; + struct dbc_regs *regs; + struct xhci_ring *ring_evt; + struct xhci_ring *ring_in; + struct xhci_ring *ring_out; + struct xhci_erst erst; + struct xhci_container_ctx *ctx; + struct dbc_str_descs *string; + dma_addr_t string_dma; + size_t string_size; + u16 idVendor; + u16 idProduct; + u16 bcdDevice; + u8 bInterfaceProtocol; + enum dbc_state state; + struct delayed_work event_work; + unsigned int poll_interval; + unsigned int resume_required: 1; + struct dbc_ep eps[2]; + const struct dbc_driver *driver; + void *priv; +}; + +struct dbc_driver { + int (*configure)(struct xhci_dbc *); + void (*disconnect)(struct xhci_dbc *); +}; + +struct dbc_request { + void *buf; + unsigned int length; + dma_addr_t dma; + void (*complete)(struct xhci_dbc *, struct dbc_request *); + struct list_head list_pool; + int status; + unsigned int actual; + struct xhci_dbc *dbc; + struct list_head list_pending; + dma_addr_t trb_dma; + union xhci_trb *trb; + unsigned int direction: 1; +}; + +struct trace_event_raw_xhci_log_msg { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctx { + struct trace_entry ent; + int ctx_64; + unsigned int ctx_type; + dma_addr_t ctx_dma; + u8 *ctx_va; + unsigned int ctx_ep_num; + u32 __data_loc_ctx_data; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_trb { + struct trace_entry ent; + dma_addr_t dma; + u32 type; + u32 field0; + u32 field1; + u32 field2; + u32 field3; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_free_virt_dev { + struct trace_entry ent; + void *vdev; + long: 32; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int slot_id; + u16 current_mel; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_virt_dev { + struct trace_entry ent; + void *vdev; + long: 32; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int devnum; + int state; + int speed; + u8 portnum; + u8 level; + int slot_id; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_urb { + struct trace_entry ent; + u32 __data_loc_devname; + void *urb; + unsigned int pipe; + unsigned int stream; + int status; + unsigned int flags; + int num_mapped_sgs; + int num_sgs; + int length; + int actual; + int epnum; + int dir_in; + int type; + int slot_id; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_stream_ctx { + struct trace_entry ent; + unsigned int stream_id; + long: 32; + u64 stream_ring; + dma_addr_t ctx_array_dma; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_ep_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u64 deq; + u32 tx_info; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_slot_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u32 tt_info; + u32 state; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctrl_ctx { + struct trace_entry ent; + u32 drop; + u32 add; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ring { + struct trace_entry ent; + u32 type; + void *ring; + dma_addr_t enq; + dma_addr_t deq; + unsigned int num_segs; + unsigned int stream_id; + unsigned int cycle_state; + unsigned int bounce_buf_len; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_portsc { + struct trace_entry ent; + u32 busnum; + u32 portnum; + u32 portsc; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_doorbell { + struct trace_entry ent; + u32 slot; + u32 doorbell; + char __data[0]; +}; + +struct trace_event_raw_xhci_dbc_log_request { + struct trace_entry ent; + struct dbc_request *req; + bool dir; + unsigned int actual; + unsigned int length; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_xhci_log_msg { + u32 msg; + const void *msg_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_ctx { + u32 ctx_data; + const void *ctx_data_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_trb {}; + +struct trace_event_data_offsets_xhci_log_free_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_urb { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_stream_ctx {}; + +struct trace_event_data_offsets_xhci_log_ep_ctx {}; + +struct trace_event_data_offsets_xhci_log_slot_ctx {}; + +struct trace_event_data_offsets_xhci_log_ctrl_ctx {}; + +struct trace_event_data_offsets_xhci_log_ring {}; + +struct trace_event_data_offsets_xhci_log_portsc {}; + +struct trace_event_data_offsets_xhci_log_doorbell {}; + +struct trace_event_data_offsets_xhci_dbc_log_request {}; + +typedef void (*btf_trace_xhci_dbg_address)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_context_change)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_quirks)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_reset_ep)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_cancel_urb)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_init)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_ring_expansion)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_address_ctx)(void *, struct xhci_hcd *, struct xhci_container_ctx *, unsigned int); + +typedef void (*btf_trace_xhci_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_command)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_queue_trb)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_gadget_ep_queue)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_free_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_alloc_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_addressable_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_stop_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_urb_enqueue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_giveback)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_dequeue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_alloc_stream_info_ctx)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_stream)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_stop_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_config_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_add_endpoint)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_alloc_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_free_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_disable_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_discover_or_reset_device)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_setup_device_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_addr_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_address_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_ring_alloc)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_free)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_expansion)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_enq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_deq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_handle_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_get_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_hub_status_data)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_ring_ep_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_ring_host_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_dbc_alloc_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_free_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_queue_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_giveback_request)(void *, struct dbc_request *); + +struct posix_clock; + +struct posix_clock_context; + +struct posix_clock_operations { + struct module *owner; + int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); + int (*clock_gettime)(struct posix_clock *, struct timespec64 *); + int (*clock_getres)(struct posix_clock *, struct timespec64 *); + int (*clock_settime)(struct posix_clock *, const struct timespec64 *); + long int (*ioctl)(struct posix_clock_context *, unsigned int, long unsigned int); + int (*open)(struct posix_clock_context *, fmode_t); + __poll_t (*poll)(struct posix_clock_context *, struct file *, poll_table *); + int (*release)(struct posix_clock_context *); + ssize_t (*read)(struct posix_clock_context *, uint, char *, size_t); +}; + +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +struct posix_clock_context { + struct posix_clock *clk; + void *private_clkdata; +}; + +struct ptp_extts_event { + struct ptp_clock_time t; + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct timestamp_event_queue { + struct ptp_extts_event buf[128]; + int head; + int tail; + spinlock_t lock; + struct list_head qlist; + long unsigned int *mask; + struct dentry *debugfs_instance; + struct debugfs_u32_array dfs_bitmap; + long: 32; +}; + +struct ptp_clock { + struct posix_clock clock; + struct device dev; + struct ptp_clock_info *info; + dev_t devid; + int index; + struct pps_device *pps_source; + long int dialed_frequency; + struct list_head tsevqs; + spinlock_t tsevqs_lock; + struct mutex pincfg_mux; + wait_queue_head_t tsev_wq; + int defunct; + struct device_attribute *pin_dev_attr; + struct attribute **pin_attr; + struct attribute_group pin_attr_group; + const struct attribute_group *pin_attr_groups[2]; + struct kthread_worker *kworker; + struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + int *vclock_index; + struct mutex n_vclocks_mux; + bool is_virtual_clock; + bool has_cycles; + struct dentry *debugfs_root; +}; + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct hlist_node vclock_hash_node; + struct cyclecounter cc; + struct timecounter tc; + struct mutex lock; +}; + +struct usage_priority { + __u32 usage; + bool global; + unsigned int slot_overwrite; +}; + +typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int); + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +struct percpu_free_defer { + struct callback_head rcu; + void *ptr; +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +struct nf_log_buf { + unsigned int count; + char buf[1020]; +}; + +struct nf_conn___init { + struct nf_conn ct; +}; + +struct nf_nat_ipv4_range { + unsigned int flags; + __be32 min_ip; + __be32 max_ip; + union nf_conntrack_man_proto min; + union nf_conntrack_man_proto max; +}; + +struct nf_nat_ipv4_multi_range_compat { + unsigned int rangesize; + struct nf_nat_ipv4_range range[1]; +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; + +struct icmp_filter { + __u32 data; +}; + +struct icmp_err { + int errno; + unsigned int fatal: 1; +}; + +struct raw_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct raw_sock { + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; + +struct iptable_nat_pernet { + struct nf_hook_ops *nf_nat_ops; +}; + +struct sr6_tlv { + __u8 type; + __u8 len; + __u8 data[0]; +}; + +enum { + SEG6_ATTR_UNSPEC = 0, + SEG6_ATTR_DST = 1, + SEG6_ATTR_DSTLEN = 2, + SEG6_ATTR_HMACKEYID = 3, + SEG6_ATTR_SECRET = 4, + SEG6_ATTR_SECRETLEN = 5, + SEG6_ATTR_ALGID = 6, + SEG6_ATTR_HMACINFO = 7, + __SEG6_ATTR_MAX = 8, +}; + +enum { + SEG6_CMD_UNSPEC = 0, + SEG6_CMD_SETHMAC = 1, + SEG6_CMD_DUMPHMAC = 2, + SEG6_CMD_SET_TUNSRC = 3, + SEG6_CMD_GET_TUNSRC = 4, + __SEG6_CMD_MAX = 5, +}; + +typedef struct { + long unsigned int key[2]; +} hsiphash_key_t; + +enum { + AFFINITY = 0, + AFFINITY_LIST = 1, + EFFECTIVE = 2, + EFFECTIVE_LIST = 3, +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE = 0, + RDMACG_RESOURCE_HCA_OBJECT = 1, + RDMACG_RESOURCE_MAX = 2, +}; + +struct rdma_cgroup { + struct cgroup_subsys_state css; + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +enum rdmacg_file_type { + RDMACG_RESOURCE_TYPE_MAX = 0, + RDMACG_RESOURCE_TYPE_STAT = 1, +}; + +struct rdmacg_resource { + int max; + int usage; +}; + +struct rdmacg_resource_pool { + struct rdmacg_device *device; + struct rdmacg_resource resources[2]; + struct list_head cg_node; + struct list_head dev_node; + long: 32; + u64 usage_sum; + int num_max_cnt; + long: 32; +}; + +struct action_cache {}; + +struct notification; + +struct seccomp_filter { + refcount_t refs; + refcount_t users; + bool log; + bool wait_killable_recv; + struct action_cache cache; + struct seccomp_filter *prev; + struct bpf_prog *prog; + struct notification *notif; + struct mutex notify_lock; + wait_queue_head_t wqh; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +enum notify_state { + SECCOMP_NOTIFY_INIT = 0, + SECCOMP_NOTIFY_SENT = 1, + SECCOMP_NOTIFY_REPLIED = 2, +}; + +struct seccomp_knotif { + struct task_struct *task; + long: 32; + u64 id; + const struct seccomp_data *data; + enum notify_state state; + int error; + long int val; + u32 flags; + struct completion ready; + struct list_head list; + struct list_head addfd; + long: 32; +}; + +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + __u32 ioctl_flags; + union { + bool setfd; + int ret; + }; + struct completion completion; + struct list_head list; +}; + +struct notification { + atomic_t requests; + u32 flags; + u64 next_id; + struct list_head notifications; +}; + +struct seccomp_log_name { + u32 log; + const char *name; +}; + +enum { + BPF_F_UPROBE_MULTI_RETURN = 1, +}; + +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = 1, +}; + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; +}; + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +struct bpf_key { + struct key *key; + bool has_ref; +}; + +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +}; + +struct perf_event_query_bpf { + __u32 ids_len; + __u32 prog_cnt; + __u32 ids[0]; +}; + +typedef int perf_snapshot_branch_stack_t(struct perf_branch_entry *, unsigned int); + +struct trace_event_raw_bpf_trace_printk { + struct trace_entry ent; + u32 __data_loc_bpf_string; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trace_printk { + u32 bpf_string; + const void *bpf_string_ptr_; +}; + +typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); + +struct bpf_trace_module { + struct module *module; + struct list_head list; +}; + +typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32); + +typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_trace_vprintk)(char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); + +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); + +struct bpf_nested_pt_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_get_current_task)(); + +typedef u64 (*btf_bpf_get_current_task_btf)(); + +typedef u64 (*btf_bpf_task_pt_regs)(struct task_struct *); + +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; + enum pid_type type; + bool has_siginfo; + struct kernel_siginfo info; +}; + +typedef u64 (*btf_bpf_send_signal)(u32); + +typedef u64 (*btf_bpf_send_signal_thread)(u32); + +typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); + +typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_get_func_ip_tracing)(void *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_trace)(void *); + +typedef u64 (*btf_bpf_get_attach_cookie_pe)(struct bpf_perf_event_data_kern *); + +typedef u64 (*btf_bpf_get_attach_cookie_tracing)(void *); + +typedef u64 (*btf_bpf_get_branch_snapshot)(void *, u32, u64); + +typedef u64 (*btf_get_func_arg)(void *, u32, u64 *); + +typedef u64 (*btf_get_func_ret)(void *, u64 *); + +typedef u64 (*btf_get_func_arg_cnt)(void *); + +typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); + +typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); + +typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); + +struct bpf_session_run_ctx { + struct bpf_run_ctx run_ctx; + bool is_return; + void *data; +}; + +struct bpf_uprobe_multi_link; + +struct bpf_uprobe { + struct bpf_uprobe_multi_link *link; + long: 32; + loff_t offset; + long unsigned int ref_ctr_offset; + long: 32; + u64 cookie; + struct uprobe *uprobe; + long: 32; + struct uprobe_consumer consumer; + bool session; + long: 32; +}; + +struct bpf_uprobe_multi_link { + struct path path; + struct bpf_link link; + u32 cnt; + u32 flags; + struct bpf_uprobe *uprobes; + struct task_struct *task; +}; + +struct bpf_uprobe_multi_run_ctx { + struct bpf_session_run_ctx session_ctx; + long unsigned int entry_ip; + struct bpf_uprobe *uprobe; +}; + +struct mempolicy {}; + +enum zpool_mapmode { + ZPOOL_MM_RW = 0, + ZPOOL_MM_RO = 1, + ZPOOL_MM_WO = 2, + ZPOOL_MM_DEFAULT = 0, +}; + +struct crypto_acomp_ctx { + struct crypto_acomp *acomp; + struct acomp_req *req; + struct crypto_wait wait; + u8 *buffer; + struct mutex mutex; + bool is_sleepable; +}; + +struct zpool; + +struct zswap_pool { + struct zpool *zpool; + struct crypto_acomp_ctx *acomp_ctx; + struct percpu_ref ref; + struct list_head list; + struct work_struct release_work; + struct hlist_node node; + char tfm_name[128]; +}; + +struct zswap_entry { + swp_entry_t swpentry; + unsigned int length; + bool referenced; + struct zswap_pool *pool; + long unsigned int handle; + struct obj_cgroup *objcg; + struct list_head lru; +}; + +enum zswap_init_type { + ZSWAP_UNINIT = 0, + ZSWAP_INIT_SUCCEED = 1, + ZSWAP_INIT_FAILED = 2, +}; + +struct fd_range { + unsigned int from; + unsigned int to; +}; + +struct btrfs_stripe { + __le64 devid; + __le64 offset; + __u8 dev_uuid[16]; +}; + +struct btrfs_chunk { + __le64 length; + __le64 owner; + __le64 stripe_len; + __le64 type; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le16 num_stripes; + __le16 sub_stripes; + struct btrfs_stripe stripe; +}; + +struct btrfs_disk_balance_args { + __le64 profiles; + union { + __le64 usage; + struct { + __le32 usage_min; + __le32 usage_max; + }; + }; + __le64 devid; + __le64 pstart; + __le64 pend; + __le64 vstart; + __le64 vend; + __le64 target; + __le64 flags; + union { + __le64 limit; + struct { + __le32 limit_min; + __le32 limit_max; + }; + }; + __le32 stripes_min; + __le32 stripes_max; + __le64 unused[6]; +}; + +struct btrfs_balance_item { + __le64 flags; + struct btrfs_disk_balance_args data; + struct btrfs_disk_balance_args meta; + struct btrfs_disk_balance_args sys; + __le64 unused[4]; +}; + +struct btrfs_dev_stats_item { + __le64 values[5]; +}; + +struct btrfs_swapfile_pin { + struct rb_node node; + void *ptr; + struct inode *inode; + bool is_block_group; + int bg_extent_count; +}; + +struct btrfs_io_geometry { + u32 stripe_index; + u32 stripe_nr; + int mirror_num; + int num_stripes; + u64 stripe_offset; + u64 raid56_full_stripe_start; + int max_errors; + enum btrfs_map_op op; +}; + +struct alloc_chunk_ctl { + u64 start; + u64 type; + int num_stripes; + int sub_stripes; + int dev_stripes; + int devs_max; + int devs_min; + int devs_increment; + int ncopies; + int nparity; + u64 max_stripe_size; + u64 max_chunk_size; + u64 dev_extent_min; + u64 stripe_size; + u64 chunk_size; + int ndevs; + long: 32; +}; + +struct btrfs_raid_stride { + __le64 devid; + __le64 physical; +}; + +struct btrfs_stripe_extent { + struct { + struct {} __empty_strides; + struct btrfs_raid_stride strides[0]; + }; +}; + +struct akcipher_instance { + void (*free)(struct akcipher_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + +struct pkcs7_signed_info { + struct pkcs7_signed_info *next; + struct x509_certificate *signer; + unsigned int index; + bool unsupported_crypto; + bool blacklisted; + const void *msgdigest; + unsigned int msgdigest_len; + unsigned int authattrs_len; + const void *authattrs; + long unsigned int aa_set; + long: 32; + time64_t signing_time; + struct public_key_signature *sig; + long: 32; +}; + +struct pkcs7_message { + struct x509_certificate *certs; + struct x509_certificate *crl; + struct pkcs7_signed_info *signed_infos; + u8 version; + bool have_authattrs; + enum OID data_type; + size_t data_len; + size_t data_hdrlen; + const void *data; +}; + +enum { + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_S_MAX = 4, +}; + +enum { + BLK_MQ_NO_TAG = 4294967295, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = 4294967294, +}; + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; + +struct virtio_device; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + unsigned int num_max; + bool reset; + void *priv; +}; + +struct vringh_config_ops; + +struct virtio_config_ops; + +struct virtio_device { + int index; + bool failed; + bool config_core_enabled; + bool config_driver_disabled; + bool config_change_pending; + spinlock_t config_lock; + spinlock_t vqs_list_lock; + struct device dev; + struct virtio_device_id id; + const struct virtio_config_ops *config; + const struct vringh_config_ops *vringh_config; + struct list_head vqs; + u64 features; + void *priv; + long: 32; +}; + +struct virtqueue_info; + +struct virtio_shm_region; + +struct virtio_config_ops { + void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); + void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); + u32 (*generation)(struct virtio_device *); + u8 (*get_status)(struct virtio_device *); + void (*set_status)(struct virtio_device *, u8); + void (*reset)(struct virtio_device *); + int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, struct virtqueue_info *, struct irq_affinity *); + void (*del_vqs)(struct virtio_device *); + void (*synchronize_cbs)(struct virtio_device *); + u64 (*get_features)(struct virtio_device *); + int (*finalize_features)(struct virtio_device *); + const char * (*bus_name)(struct virtio_device *); + int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); + const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); + bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); + int (*disable_vq_and_reset)(struct virtqueue *); + int (*enable_vq_after_reset)(struct virtqueue *); +}; + +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +typedef void vq_callback_t(struct virtqueue *); + +struct virtqueue_info { + const char *name; + vq_callback_t *callback; + bool ctx; +}; + +struct io_nop { + struct file *file; + int result; + int fd; + int buffer; + unsigned int flags; +}; + +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + unsigned int __nents; + int __pg_advance; +}; + +struct sg_mapping_iter { + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +typedef ZSTD_ErrorCode ERR_enum; + +struct raid6_recov_calls { + void (*data2)(int, size_t, int, int, void **); + void (*datap)(int, size_t, int, void **); + int (*valid)(); + const char *name; + int priority; +}; + +typedef unsigned int u_int; + +struct virtio_pci_legacy_device { + struct pci_dev *pci_dev; + u8 *isr; + void *ioaddr; + struct virtio_device_id id; +}; + +struct component_master_ops { + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct aggregate_device; + +struct component { + struct list_head node; + struct aggregate_device *adev; + bool bound; + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct aggregate_device { + struct list_head node; + bool bound; + const struct component_master_ops *ops; + struct device *parent; + struct component_match *match; +}; + +struct builtin_fw { + char *name; + void *data; + long unsigned int size; +}; + +struct trace_event_raw_dma_fence { + struct trace_entry ent; + u32 __data_loc_driver; + u32 __data_loc_timeline; + unsigned int context; + unsigned int seqno; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_fence { + u32 driver; + const void *driver_ptr_; + u32 timeline; + const void *timeline_ptr_; +}; + +typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +enum { + BLK_TAG_ALLOC_FIFO = 0, + BLK_TAG_ALLOC_RR = 1, + BLK_TAG_ALLOC_MAX = 2, +}; + +struct ata_port_info { + long unsigned int flags; + long unsigned int link_flags; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + struct ata_port_operations *port_ops; + void *private_data; +}; + +struct pci_bits { + unsigned int reg; + unsigned int width; + long unsigned int mask; + long unsigned int val; +}; + +enum { + PIIX_IOCFG = 84, + ICH5_PMR = 144, + ICH5_PCS = 146, + PIIX_SIDPR_BAR = 5, + PIIX_SIDPR_LEN = 16, + PIIX_SIDPR_IDX = 0, + PIIX_SIDPR_DATA = 4, + PIIX_FLAG_CHECKINTR = 268435456, + PIIX_FLAG_SIDPR = 536870912, + PIIX_PATA_FLAGS = 1, + PIIX_SATA_FLAGS = 268435458, + PIIX_FLAG_PIO16 = 1073741824, + PIIX_80C_PRI = 48, + PIIX_80C_SEC = 192, + P0 = 0, + P1 = 1, + P2 = 2, + P3 = 3, + IDE = -1, + NA = -2, + RV = -3, + PIIX_AHCI_DEVICE = 6, + PIIX_HOST_BROKEN_SUSPEND = 16777216, +}; + +enum piix_controller_ids { + piix_pata_mwdma = 0, + piix_pata_33 = 1, + ich_pata_33 = 2, + ich_pata_66 = 3, + ich_pata_100 = 4, + ich_pata_100_nomwdma1 = 5, + ich5_sata = 6, + ich6_sata = 7, + ich6m_sata = 8, + ich8_sata = 9, + ich8_2port_sata = 10, + ich8m_apple_sata = 11, + tolapai_sata = 12, + piix_pata_vmw = 13, + ich8_sata_snb = 14, + ich8_2port_sata_snb = 15, + ich8_2port_sata_byt = 16, +}; + +struct piix_map_db { + const u32 mask; + const u16 port_enable; + const int map[0]; +}; + +struct piix_host_priv { + const int *map; + u32 saved_iocfg; + void *sidpr; +}; + +struct ich_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; + +struct ich8_hsfsts { + u16 flcdone: 1; + u16 flcerr: 1; + u16 dael: 1; + u16 berasesz: 2; + u16 flcinprog: 1; + u16 reserved1: 2; + u16 reserved2: 6; + u16 fldesvalid: 1; + u16 flockdn: 1; +}; + +union ich8_hws_flash_status { + struct ich8_hsfsts hsf_status; + u16 regval; +}; + +struct ich8_hsflctl { + u16 flcgo: 1; + u16 flcycle: 2; + u16 reserved: 5; + u16 fldbcount: 2; + u16 flockdn: 6; +}; + +union ich8_hws_flash_ctrl { + struct ich8_hsflctl hsf_ctrl; + u16 regval; +}; + +struct ich8_pr { + u32 base: 13; + u32 reserved1: 2; + u32 rpe: 1; + u32 limit: 13; + u32 reserved2: 2; + u32 wpe: 1; +}; + +union ich8_flash_protected_range { + struct ich8_pr range; + u32 regval; +}; + +typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *); + +struct serport { + struct tty_struct *tty; + wait_queue_head_t wait; + struct serio *serio; + struct serio_device_id id; + spinlock_t lock; + long unsigned int flags; +}; + +struct ir_raw_timings_pl { + unsigned int header_pulse; + unsigned int bit_space; + unsigned int bit_pulse[2]; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +struct of_bus; + +struct of_pci_range_parser { + struct device_node *node; + const struct of_bus *bus; + const __be32 *range; + const __be32 *end; + int na; + int ns; + int pna; + bool dma; +}; + +struct of_bus { + const char *name; + const char *addresses; + int (*match)(struct device_node *); + void (*count_cells)(struct device_node *, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int, int); + int (*translate)(__be32 *, u64, int); + int flag_cells; + unsigned int (*get_flags)(const __be32 *); +}; + +struct of_pci_range { + union { + u64 pci_addr; + u64 bus_addr; + }; + u64 cpu_addr; + u64 size; + u32 flags; + long: 32; +}; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE = 0, + FIB_EVENT_ENTRY_APPEND = 1, + FIB_EVENT_ENTRY_ADD = 2, + FIB_EVENT_ENTRY_DEL = 3, + FIB_EVENT_RULE_ADD = 4, + FIB_EVENT_RULE_DEL = 5, + FIB_EVENT_NH_ADD = 6, + FIB_EVENT_NH_DEL = 7, + FIB_EVENT_VIF_ADD = 8, + FIB_EVENT_VIF_DEL = 9, +}; + +struct fib_notifier_net { + struct list_head fib_notifier_ops; + struct atomic_notifier_head fib_chain; +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + spinlock_t lock; +}; + +typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); + +struct sock_map_seq_info { + struct bpf_map *map; + struct sock *sk; + u32 index; +}; + +struct bpf_iter__sockmap { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + struct sock *sk; + }; +}; + +struct bpf_shtab_elem { + struct callback_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct bpf_shtab_bucket { + struct hlist_head head; + spinlock_t lock; +}; + +struct bpf_shtab { + struct bpf_map map; + struct bpf_shtab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; +}; + +typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); + +struct sock_hash_seq_info { + struct bpf_map *map; + struct bpf_shtab *htab; + u32 bucket_id; +}; + +struct sockmap_link { + struct bpf_link link; + struct bpf_map *map; + enum bpf_attach_type attach_type; +}; + +enum { + ETHTOOL_A_MM_STAT_UNSPEC = 0, + ETHTOOL_A_MM_STAT_PAD = 1, + ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS = 2, + ETHTOOL_A_MM_STAT_SMD_ERRORS = 3, + ETHTOOL_A_MM_STAT_REASSEMBLY_OK = 4, + ETHTOOL_A_MM_STAT_RX_FRAG_COUNT = 5, + ETHTOOL_A_MM_STAT_TX_FRAG_COUNT = 6, + ETHTOOL_A_MM_STAT_HOLD_COUNT = 7, + __ETHTOOL_A_MM_STAT_CNT = 8, + ETHTOOL_A_MM_STAT_MAX = 7, +}; + +struct mm_reply_data { + struct ethnl_reply_data base; + struct ethtool_mm_state state; + long: 32; + struct ethtool_mm_stats stats; +}; + +struct arphdr { + __be16 ar_hrd; + __be16 ar_pro; + unsigned char ar_hln; + unsigned char ar_pln; + __be16 ar_op; +}; + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; + __be16 reserved; + __be32 spi; + __be32 seq_no; + __u8 auth_data[0]; +}; + +struct ip_esp_hdr { + __be32 spi; + __be32 seq_no; + __u8 enc_data[0]; +}; + +struct arppayload { + unsigned char mac_src[6]; + unsigned char ip_src[4]; + unsigned char mac_dst[6]; + unsigned char ip_dst[4]; +}; + +struct nft_pipapo_elem; + +union nft_pipapo_map_bucket { + struct { + long unsigned int to: 24; + long unsigned int n: 8; + }; + struct nft_pipapo_elem *e; +}; + +struct nft_pipapo_elem { + struct nft_elem_priv priv; + struct nft_set_ext ext; +}; + +struct nft_pipapo_field { + unsigned int rules; + unsigned int bsize; + unsigned int rules_alloc; + u8 groups; + u8 bb; + long unsigned int *lt; + union nft_pipapo_map_bucket *mt; +}; + +struct nft_pipapo_scratch { + u8 map_index; + u32 align_off; + long unsigned int map[0]; +}; + +struct nft_pipapo_match { + u8 field_count; + unsigned int bsize_max; + struct nft_pipapo_scratch **scratch; + struct callback_head rcu; + struct nft_pipapo_field f[0]; +}; + +struct nft_pipapo { + struct nft_pipapo_match *match; + struct nft_pipapo_match *clone; + int width; + long unsigned int last_gc; +}; + +typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); + +struct mmpin { + struct user_struct *user; + unsigned int num_pg; +}; + +struct ubuf_info_msgzc { + struct ubuf_info ubuf; + union { + struct { + long unsigned int desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy: 1; + u32 bytelen; + }; + }; + struct mmpin mmp; +}; + +struct hop_jumbo_hdr { + u8 nexthdr; + u8 hdrlen; + u8 tlv_type; + u8 tlv_len; + __be32 jumbo_payload_len; +}; + +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +enum { + LWTUNNEL_XMIT_DONE = 0, + LWTUNNEL_XMIT_CONTINUE = 256, +}; + +struct __bridge_info { + __u64 designated_root; + __u64 bridge_id; + __u32 root_path_cost; + __u32 max_age; + __u32 hello_time; + __u32 forward_delay; + __u32 bridge_max_age; + __u32 bridge_hello_time; + __u32 bridge_forward_delay; + __u8 topology_change; + __u8 topology_change_detected; + __u8 root_port; + __u8 stp_enabled; + __u32 ageing_time; + __u32 gc_interval; + __u32 hello_timer_value; + __u32 tcn_timer_value; + __u32 topology_change_timer_value; + __u32 gc_timer_value; +}; + +struct __port_info { + __u64 designated_root; + __u64 designated_bridge; + __u16 port_id; + __u16 designated_port; + __u32 path_cost; + __u32 designated_cost; + __u8 state; + __u8 top_change_ack; + __u8 config_pending; + __u8 unused0; + __u32 message_age_timer_value; + __u32 forward_delay_timer_value; + __u32 hold_timer_value; + long: 32; +}; + +typedef __u64 Elf64_Addr; + +typedef __u16 Elf64_Half; + +typedef __u64 Elf64_Off; + +typedef __u32 Elf64_Word; + +typedef __u64 Elf64_Xword; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +struct elf32_phdr { + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +}; + +struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +}; + +struct mips_elf_abiflags_v0 { + uint16_t version; + uint8_t isa_level; + uint8_t isa_rev; + uint8_t gpr_size; + uint8_t cpr1_size; + uint8_t cpr2_size; + uint8_t fp_abi; + uint32_t isa_ext; + uint32_t ases; + uint32_t flags1; + uint32_t flags2; +}; + +struct arch_elf_state { + int nan_2008; + int fp_abi; + int interp_fp_abi; + int overall_fp_mode; +}; + +enum { + FP_FRE = 0, + FP_FR0 = 1, + FP_FR1 = 2, +}; + +struct mode_req { + bool single; + bool soft; + bool fr1; + bool frdefault; + bool fre; +}; + +struct pin_cookie {}; + +typedef struct { + void *lock; + long unsigned int flags; +} class_irqsave_t; + +typedef struct { + void *lock; +} class_preempt_t; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_irq_t; + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +struct dl_bw { + raw_spinlock_t lock; + long: 32; + u64 bw; + u64 total_bw; +}; + +struct cpudl_item; + +struct cpudl { + raw_spinlock_t lock; + int size; + cpumask_var_t free_cpus; + struct cpudl_item *elements; +}; + +struct cpupri_vec { + atomic_t count; + cpumask_var_t mask; +}; + +struct cpupri { + struct cpupri_vec pri_to_cpu[101]; + int *cpu_to_pri; +}; + +struct perf_domain; + +struct root_domain { + atomic_t refcount; + atomic_t rto_count; + struct callback_head rcu; + cpumask_var_t span; + cpumask_var_t online; + bool overloaded; + bool overutilized; + cpumask_var_t dlo_mask; + atomic_t dlo_count; + long: 32; + struct dl_bw dl_bw; + struct cpudl cpudl; + u64 visit_gen; + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + int rto_loop; + int rto_cpu; + atomic_t rto_loop_next; + atomic_t rto_loop_start; + cpumask_var_t rto_mask; + struct cpupri cpupri; + struct perf_domain *pd; +}; + +struct cfs_rq { + struct load_weight load; + unsigned int nr_running; + unsigned int h_nr_running; + unsigned int idle_nr_running; + unsigned int idle_h_nr_running; + s64 avg_vruntime; + u64 avg_load; + u64 min_vruntime; + struct rb_root_cached tasks_timeline; + struct sched_entity *curr; + struct sched_entity *next; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_avg avg; + u64 last_update_time_copy; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + raw_spinlock_t lock; + int nr; + long unsigned int load_avg; + long unsigned int util_avg; + long unsigned int runnable_avg; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + } removed; + u64 last_update_tg_load_avg; + long unsigned int tg_load_avg_contrib; + long int propagate; + long int prop_runnable_sum; + long unsigned int h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; + struct rq *rq; + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; + int idle; + int runtime_enabled; + s64 runtime_remaining; + u64 throttled_pelt_idle; + u64 throttled_pelt_idle_copy; + u64 throttled_clock; + u64 throttled_clock_pelt; + u64 throttled_clock_pelt_time; + u64 throttled_clock_self; + u64 throttled_clock_self_time; + int throttled; + int throttle_count; + struct list_head throttled_list; + struct list_head throttled_csd_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct rt_prio_array { + long unsigned int bitmap[4]; + struct list_head queue[100]; +}; + +struct rt_rq { + struct rt_prio_array active; + unsigned int rt_nr_running; + unsigned int rr_nr_running; + struct { + int curr; + int next; + } highest_prio; + bool overloaded; + struct plist_head pushable_tasks; + int rt_queued; +}; + +struct dl_rq { + struct rb_root_cached root; + unsigned int dl_nr_running; + long: 32; + struct { + u64 curr; + u64 next; + } earliest_dl; + bool overloaded; + struct rb_root_cached pushable_dl_tasks_root; + long: 32; + u64 running_bw; + u64 this_bw; + u64 extra_bw; + u64 max_bw; + u64 bw_ratio; +}; + +struct balance_callback { + struct balance_callback *next; + void (*func)(struct rq *); +}; + +struct scx_rq { + struct scx_dispatch_q local_dsq; + struct list_head runnable_list; + struct list_head ddsp_deferred_locals; + long unsigned int ops_qseq; + long: 32; + u64 extra_enq_flags; + u32 nr_running; + u32 flags; + u32 cpuperf_target; + bool cpu_released; + cpumask_var_t cpus_to_kick; + cpumask_var_t cpus_to_kick_if_idle; + cpumask_var_t cpus_to_preempt; + cpumask_var_t cpus_to_wait; + long unsigned int pnt_seq; + struct balance_callback deferred_bal_cb; + struct irq_work deferred_irq_work; + struct irq_work kick_cpus_irq_work; + long: 32; +}; + +typedef int (*cpu_stop_fn_t)(void *); + +struct cpu_stop_done; + +struct cpu_stop_work { + struct list_head list; + cpu_stop_fn_t fn; + long unsigned int caller; + void *arg; + struct cpu_stop_done *done; +}; + +struct sched_domain; + +struct rq { + raw_spinlock_t __lock; + unsigned int nr_running; + unsigned int ttwu_pending; + long: 32; + u64 nr_switches; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cfs_rq cfs; + struct rt_rq rt; + struct dl_rq dl; + struct scx_rq scx; + struct sched_dl_entity fair_server; + struct list_head leaf_cfs_rq_list; + struct list_head *tmp_alone_branch; + unsigned int nr_uninterruptible; + union { + struct task_struct *donor; + struct task_struct *curr; + }; + struct sched_dl_entity *dl_server; + struct task_struct *idle; + struct task_struct *stop; + long unsigned int next_balance; + struct mm_struct *prev_mm; + unsigned int clock_update_flags; + long: 32; + u64 clock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u64 clock_task; + u64 clock_pelt; + long unsigned int lost_idle_time; + long: 32; + u64 clock_pelt_idle; + u64 clock_idle; + u64 clock_pelt_idle_copy; + u64 clock_idle_copy; + atomic_t nr_iowait; + long: 32; + u64 last_seen_need_resched_ns; + int ticks_without_resched; + int membarrier_state; + struct root_domain *rd; + struct sched_domain *sd; + long unsigned int cpu_capacity; + struct balance_callback *balance_callback; + unsigned char nohz_idle_balance; + unsigned char idle_balance; + long unsigned int misfit_task_load; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + int cpu; + int online; + struct list_head cfs_tasks; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_avg avg_rt; + struct sched_avg avg_dl; + u64 idle_stamp; + u64 avg_idle; + u64 max_idle_balance_cost; + long unsigned int calc_load_update; + long int calc_load_active; + call_single_data_t hrtick_csd; + struct hrtimer hrtick_timer; + ktime_t hrtick_time; + unsigned int nr_pinned; + unsigned int push_busy; + struct cpu_stop_work push_work; + cpumask_var_t scratch_mask; + long: 32; + call_single_data_t cfsb_csd; + struct list_head cfsb_csd_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cfs_bandwidth { + raw_spinlock_t lock; + long: 32; + ktime_t period; + u64 quota; + u64 runtime; + u64 burst; + u64 runtime_snap; + s64 hierarchical_quota; + u8 idle; + u8 period_active; + u8 slack_started; + long: 32; + struct hrtimer period_timer; + struct hrtimer slack_timer; + struct list_head throttled_cfs_rq; + int nr_periods; + int nr_throttled; + int nr_burst; + long: 32; + u64 throttled_time; + u64 burst_time; +}; + +struct task_group { + struct cgroup_subsys_state css; + int idle; + struct sched_entity **se; + struct cfs_rq **cfs_rq; + long unsigned int shares; + atomic_long_t load_avg; + u32 scx_flags; + u32 scx_weight; + struct callback_head rcu; + struct list_head list; + struct task_group *parent; + struct list_head siblings; + struct list_head children; + struct cfs_bandwidth cfs_bandwidth; + long: 32; + long: 32; +}; + +enum mm_cid_state { + MM_CID_UNSET = 4294967295, + MM_CID_LAZY_PUT = 2147483648, +}; + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; + int nr_idle_scan; +}; + +struct sched_group; + +struct sched_domain { + struct sched_domain *parent; + struct sched_domain *child; + struct sched_group *groups; + long unsigned int min_interval; + long unsigned int max_interval; + unsigned int busy_factor; + unsigned int imbalance_pct; + unsigned int cache_nice_tries; + unsigned int imb_numa_nr; + int nohz_idle; + int flags; + int level; + long unsigned int last_balance; + unsigned int balance_interval; + unsigned int nr_balance_failed; + long: 32; + u64 max_newidle_lb_cost; + long unsigned int last_decay_max_lb_cost; + char *name; + union { + void *private; + struct callback_head rcu; + }; + struct sched_domain_shared *shared; + unsigned int span_weight; + long unsigned int span[0]; +}; + +struct sched_group_capacity; + +struct sched_group { + struct sched_group *next; + atomic_t ref; + unsigned int group_weight; + unsigned int cores; + struct sched_group_capacity *sgc; + int asym_prefer_cpu; + int flags; + long unsigned int cpumask[0]; +}; + +struct sched_group_capacity { + atomic_t ref; + long unsigned int capacity; + long unsigned int min_capacity; + long unsigned int max_capacity; + long unsigned int next_update; + int imbalance; + int id; + long unsigned int cpumask[0]; +}; + +struct em_perf_state { + long unsigned int performance; + long unsigned int frequency; + long unsigned int power; + long unsigned int cost; + long unsigned int flags; +}; + +struct em_perf_table { + struct callback_head rcu; + struct kref kref; + struct em_perf_state state[0]; +}; + +struct em_perf_domain { + struct em_perf_table *em_table; + int nr_perf_states; + int min_perf_state; + int max_perf_state; + long unsigned int flags; + long unsigned int cpus[0]; +}; + +typedef struct { + void *lock; +} class_cpus_read_lock_t; + +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = 0, + RSEQ_EVENT_SIGNAL_BIT = 1, + RSEQ_EVENT_MIGRATE_BIT = 2, +}; + +struct sched_attr { + __u32 size; + __u32 sched_policy; + __u64 sched_flags; + __s32 sched_nice; + __u32 sched_priority; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; + __u32 sched_util_min; + __u32 sched_util_max; +}; + +struct trace_event_raw_sched_kthread_stop { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_stop_ret { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_queue_work { + struct trace_entry ent; + void *work; + void *function; + void *worker; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_wakeup_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int target_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_switch { + struct trace_entry ent; + char prev_comm[16]; + pid_t prev_pid; + int prev_prio; + long int prev_state; + char next_comm[16]; + pid_t next_pid; + int next_prio; + char __data[0]; +}; + +struct trace_event_raw_sched_migrate_task { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int orig_cpu; + int dest_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_process_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_wait { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_fork { + struct trace_entry ent; + char parent_comm[16]; + pid_t parent_pid; + char child_comm[16]; + pid_t child_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_process_exec { + struct trace_entry ent; + u32 __data_loc_filename; + pid_t pid; + pid_t old_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_prepare_exec { + struct trace_entry ent; + u32 __data_loc_interp; + u32 __data_loc_filename; + pid_t pid; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_runtime { + struct trace_entry ent; + char comm[16]; + pid_t pid; + long: 32; + u64 runtime; + char __data[0]; +}; + +struct trace_event_raw_sched_pi_setprio { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int oldprio; + int newprio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_hang { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_move_numa { + struct trace_entry ent; + pid_t pid; + pid_t tgid; + pid_t ngid; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_numa_pair_template { + struct trace_entry ent; + pid_t src_pid; + pid_t src_tgid; + pid_t src_ngid; + int src_cpu; + int src_nid; + pid_t dst_pid; + pid_t dst_tgid; + pid_t dst_ngid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_wake_idle_without_ipi { + struct trace_entry ent; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_kthread_stop {}; + +struct trace_event_data_offsets_sched_kthread_stop_ret {}; + +struct trace_event_data_offsets_sched_kthread_work_queue_work {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_start {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_end {}; + +struct trace_event_data_offsets_sched_wakeup_template {}; + +struct trace_event_data_offsets_sched_switch {}; + +struct trace_event_data_offsets_sched_migrate_task {}; + +struct trace_event_data_offsets_sched_process_template {}; + +struct trace_event_data_offsets_sched_process_wait {}; + +struct trace_event_data_offsets_sched_process_fork {}; + +struct trace_event_data_offsets_sched_process_exec { + u32 filename; + const void *filename_ptr_; +}; + +struct trace_event_data_offsets_sched_prepare_exec { + u32 interp; + const void *interp_ptr_; + u32 filename; + const void *filename_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_sched_stat_runtime {}; + +struct trace_event_data_offsets_sched_pi_setprio {}; + +struct trace_event_data_offsets_sched_process_hang {}; + +struct trace_event_data_offsets_sched_move_numa {}; + +struct trace_event_data_offsets_sched_numa_pair_template {}; + +struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; + +typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); + +typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, kthread_work_func_t); + +typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *, unsigned int); + +typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); + +typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); + +typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); + +typedef void (*btf_trace_sched_prepare_exec)(void *, struct task_struct *, struct linux_binprm *); + +typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); + +typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); + +typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_hw_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); + +typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); + +typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); + +typedef void (*btf_trace_sched_compute_energy_tp)(void *, struct task_struct *, int, long unsigned int, long unsigned int, long unsigned int); + +struct trace_event_raw_ipi_raise { + struct trace_entry ent; + u32 __data_loc_target_cpus; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpumask { + struct trace_entry ent; + u32 __data_loc_cpumask; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_handler { + struct trace_entry ent; + const char *reason; + char __data[0]; +}; + +struct trace_event_data_offsets_ipi_raise { + u32 target_cpus; + const void *target_cpus_ptr_; +}; + +struct trace_event_data_offsets_ipi_send_cpu {}; + +struct trace_event_data_offsets_ipi_send_cpumask { + u32 cpumask; + const void *cpumask_ptr_; +}; + +struct trace_event_data_offsets_ipi_handler {}; + +typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); + +typedef void (*btf_trace_ipi_send_cpu)(void *, const unsigned int, long unsigned int, void *); + +typedef void (*btf_trace_ipi_send_cpumask)(void *, const struct cpumask *, long unsigned int, void *); + +typedef void (*btf_trace_ipi_entry)(void *, const char *); + +typedef void (*btf_trace_ipi_exit)(void *, const char *); + +struct cpudl_item { + u64 dl; + int cpu; + int idx; +}; + +typedef int (*tg_visitor)(struct task_group *, void *); + +enum scx_rq_flags { + SCX_RQ_ONLINE = 1, + SCX_RQ_CAN_STOP_TICK = 2, + SCX_RQ_BAL_PENDING = 4, + SCX_RQ_BAL_KEEP = 8, + SCX_RQ_BYPASSING = 16, + SCX_RQ_IN_WAKEUP = 65536, + SCX_RQ_IN_BALANCE = 131072, +}; + +struct perf_domain { + struct em_perf_domain *em_pd; + struct perf_domain *next; + struct callback_head rcu; +}; + +struct rq_flags { + long unsigned int flags; + struct pin_cookie cookie; + unsigned int clock_update_flags; +}; + +typedef struct { + struct task_struct *lock; + struct rq *rq; + struct rq_flags rf; +} class_task_rq_lock_t; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irq_t; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irqsave_t; + +enum { + __SCHED_FEAT_PLACE_LAG = 0, + __SCHED_FEAT_PLACE_DEADLINE_INITIAL = 1, + __SCHED_FEAT_PLACE_REL_DEADLINE = 2, + __SCHED_FEAT_RUN_TO_PARITY = 3, + __SCHED_FEAT_PREEMPT_SHORT = 4, + __SCHED_FEAT_NEXT_BUDDY = 5, + __SCHED_FEAT_CACHE_HOT_BUDDY = 6, + __SCHED_FEAT_DELAY_DEQUEUE = 7, + __SCHED_FEAT_DELAY_ZERO = 8, + __SCHED_FEAT_WAKEUP_PREEMPTION = 9, + __SCHED_FEAT_HRTICK = 10, + __SCHED_FEAT_HRTICK_DL = 11, + __SCHED_FEAT_NONTASK_CAPACITY = 12, + __SCHED_FEAT_TTWU_QUEUE = 13, + __SCHED_FEAT_SIS_UTIL = 14, + __SCHED_FEAT_WARN_DOUBLE_CLOCK = 15, + __SCHED_FEAT_RT_PUSH_IPI = 16, + __SCHED_FEAT_RT_RUNTIME_SHARE = 17, + __SCHED_FEAT_LB_MIN = 18, + __SCHED_FEAT_ATTACH_AGE_LOAD = 19, + __SCHED_FEAT_WA_IDLE = 20, + __SCHED_FEAT_WA_WEIGHT = 21, + __SCHED_FEAT_WA_BIAS = 22, + __SCHED_FEAT_UTIL_EST = 23, + __SCHED_FEAT_LATENCY_WARN = 24, + __SCHED_FEAT_NR = 25, +}; + +struct affinity_context { + const struct cpumask *new_mask; + struct cpumask *user_mask; + unsigned int flags; +}; + +struct sched_enq_and_set_ctx { + struct task_struct *p; + int queue_flags; + bool queued; + bool running; +}; + +struct set_affinity_pending; + +struct migration_arg { + struct task_struct *task; + int dest_cpu; + struct set_affinity_pending *pending; +}; + +struct set_affinity_pending { + refcount_t refs; + unsigned int stop_pending; + struct completion done; + struct cpu_stop_work stop_work; + struct migration_arg arg; +}; + +struct cfs_schedulable_data { + struct task_group *tg; + long: 32; + u64 period; + u64 quota; +}; + +enum { + cpuset = 0, + possible = 1, + fail = 2, +}; + +union cpumask_rcuhead { + cpumask_t cpumask; + struct callback_head rcu; +}; + +typedef struct elf64_hdr Elf64_Ehdr; + +typedef struct elf64_phdr Elf64_Phdr; + +typedef long unsigned int elf_greg_t; + +typedef elf_greg_t elf_gregset_t[45]; + +struct elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct elf_prstatus_common { + struct elf_siginfo pr_info; + short int pr_cursig; + long unsigned int pr_sigpend; + long unsigned int pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; + struct __kernel_old_timeval pr_stime; + struct __kernel_old_timeval pr_cutime; + struct __kernel_old_timeval pr_cstime; +}; + +struct elf_prstatus { + struct elf_prstatus_common common; + elf_gregset_t pr_reg; + int pr_fpvalid; +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct range ranges[0]; +}; + +struct trace_bprintk_fmt { + struct list_head list; + const char *fmt; +}; + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +struct bpf_iter__bpf_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; +}; + +typedef u64 (*btf_bpf_cgrp_storage_get)(struct bpf_map *, struct cgroup *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_cgrp_storage_delete)(struct bpf_map *, struct cgroup *); + +enum mminit_level { + MMINIT_WARNING = 0, + MMINIT_VERIFY = 1, + MMINIT_TRACE = 2, +}; + +typedef int folio_walk_flags_t; + +enum folio_walk_level { + FW_LEVEL_PTE = 0, + FW_LEVEL_PMD = 1, + FW_LEVEL_PUD = 2, +}; + +struct folio_walk { + struct page *page; + enum folio_walk_level level; + union { + pte_t *ptep; + pud_t *pudp; + pmd_t *pmdp; + }; + union { + pte_t pte; + pud_t pud; + pmd_t pmd; + }; + struct vm_area_struct *vma; + spinlock_t *ptl; +}; + +typedef unsigned int isolate_mode_t; + +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *, struct page *, enum migrate_mode); + void (*putback_page)(struct page *); +}; + +enum rmp_flags { + RMP_LOCKED = 1, + RMP_USE_SHARED_ZEROPAGE = 2, +}; + +struct rmap_walk_arg { + struct folio *folio; + bool map_unused_to_zeropage; +}; + +enum { + PAGE_WAS_MAPPED = 1, + PAGE_WAS_MLOCKED = 2, + PAGE_OLD_STATES = 3, +}; + +struct migrate_pages_stats { + int nr_succeeded; + int nr_failed_pages; + int nr_thp_succeeded; + int nr_thp_failed; + int nr_thp_split; + int nr_split; +}; + +struct hugepage_subpool; + +struct mnt_idmap { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + refcount_t count; +}; + +enum { + VERBOSE_STATUS = 1, +}; + +enum { + Enabled = 0, + Magic = 1, +}; + +typedef struct { + struct list_head list; + long unsigned int flags; + int offset; + int size; + char *magic; + char *mask; + const char *interpreter; + char *name; + struct dentry *dentry; + struct file *interp_file; + refcount_t users; +} Node; + +struct ext4_allocation_request { + struct inode *inode; + unsigned int len; + ext4_lblk_t logical; + ext4_lblk_t lleft; + ext4_lblk_t lright; + long: 32; + ext4_fsblk_t goal; + ext4_fsblk_t pleft; + ext4_fsblk_t pright; + unsigned int flags; + long: 32; +}; + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} ext4_acl_entry; + +typedef struct { + __le32 a_version; +} ext4_acl_header; + +enum { + WORK_DONE_BIT = 0, + WORK_ORDER_DONE_BIT = 1, +}; + +struct btrfs_qgroup_status_item { + __le64 version; + __le64 generation; + __le64 flags; + __le64 rescan; + __le64 enable_gen; +}; + +struct btrfs_qgroup_info_item { + __le64 generation; + __le64 rfer; + __le64 rfer_cmpr; + __le64 excl; + __le64 excl_cmpr; +}; + +struct btrfs_qgroup_limit_item { + __le64 flags; + __le64 max_rfer; + __le64 max_excl; + __le64 rsv_rfer; + __le64 rsv_excl; +}; + +struct btrfs_qgroup_swapped_block { + struct rb_node node; + int level; + bool trace_leaf; + long: 32; + u64 subvol_bytenr; + u64 subvol_generation; + u64 reloc_bytenr; + u64 reloc_generation; + u64 last_snapshot; + struct btrfs_key first_key; + long: 32; +}; + +struct assoc_array_ops { + long unsigned int (*get_key_chunk)(const void *, int); + long unsigned int (*get_object_key_chunk)(const void *, int); + bool (*compare_object)(const void *, const void *); + int (*diff_objects)(const void *, const void *); + void (*free_object)(void *); +}; + +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[16]; + long unsigned int nr_leaves_on_branch; +}; + +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + long unsigned int index_key[0]; +}; + +struct assoc_array_edit { + struct callback_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[16]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long int adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[17]; +}; + +struct keyring_read_iterator_context { + size_t buflen; + size_t count; + key_serial_t *buffer; +}; + +struct crypto_rng; + +struct rng_alg { + int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); + int (*seed)(struct crypto_rng *, const u8 *, unsigned int); + void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); + unsigned int seedsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data *entropy_collector; + struct crypto_shash *tfm; + struct shash_desc *sdesc; +}; + +enum bio_merge_status { + BIO_MERGE_OK = 0, + BIO_MERGE_NONE = 1, + BIO_MERGE_FAILED = 2, +}; + +struct io_notif_data { + struct file *file; + struct ubuf_info uarg; + struct io_notif_data *next; + struct io_notif_data *head; + unsigned int account_pages; + bool zc_report; + bool zc_used; + bool zc_copied; +}; + +union nested_table { + union nested_table *table; + struct rhash_lock_head *bucket; +}; + +typedef mpi_limb_t UWtype; + +typedef struct { + size_t bitContainer; + unsigned int bitsConsumed; + const char *ptr; + const char *start; + const char *limitPtr; +} BIT_DStream_t; + +typedef enum { + BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3, +} BIT_DStream_status; + +typedef enum { + set_basic = 0, + set_rle = 1, + set_compressed = 2, + set_repeat = 3, +} symbolEncodingType_e; + +typedef struct { + U32 fastMode; + U32 tableLog; +} ZSTD_seqSymbol_header; + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; +} seq_t; + +typedef struct { + size_t state; + const ZSTD_seqSymbol *table; +} ZSTD_fseState; + +typedef struct { + BIT_DStream_t DStream; + ZSTD_fseState stateLL; + ZSTD_fseState stateOffb; + ZSTD_fseState stateML; + size_t prevOffset[3]; +} seqState_t; + +typedef enum { + ZSTD_lo_isRegularOffset = 0, + ZSTD_lo_isLongOffset = 1, +} ZSTD_longOffset_e; + +struct fb_event { + struct fb_info *info; + void *data; +}; + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY = 0, + BACKLIGHT_UPDATE_SYSFS = 1, +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM = 2, + BACKLIGHT_FIRMWARE = 3, + BACKLIGHT_TYPE_MAX = 4, +}; + +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR = 1, + BACKLIGHT_SCALE_NON_LINEAR = 2, +}; + +struct backlight_device; + +struct backlight_ops { + unsigned int options; + int (*update_status)(struct backlight_device *); + int (*get_brightness)(struct backlight_device *); + bool (*controls_device)(struct backlight_device *, struct device *); +}; + +struct backlight_properties { + int brightness; + int max_brightness; + int power; + enum backlight_type type; + unsigned int state; + enum backlight_scale scale; +}; + +struct backlight_device { + struct backlight_properties props; + struct mutex update_lock; + struct mutex ops_lock; + const struct backlight_ops *ops; + struct notifier_block fb_notif; + struct list_head entry; + struct device dev; + bool fb_bl_on[32]; + int use_count; + long: 32; +}; + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + short unsigned int kb_value; +}; + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; + +struct kbdiacr { + unsigned char diacr; + unsigned char base; + unsigned char result; +}; + +struct kbdiacrs { + unsigned int kb_cnt; + struct kbdiacr kbdiacr[256]; +}; + +struct kbdiacruc { + unsigned int diacr; + unsigned int base; + unsigned int result; +}; + +struct kbdiacrsuc { + unsigned int kb_cnt; + struct kbdiacruc kbdiacruc[256]; +}; + +struct kbkeycode { + unsigned int scancode; + unsigned int keycode; +}; + +struct kbd_repeat { + int delay; + int period; +}; + +struct keyboard_notifier_param { + struct vc_data *vc; + int down; + int shift; + int ledstate; + unsigned int value; +}; + +struct kbd_struct { + unsigned char lockstate; + unsigned char slockstate; + unsigned char ledmode: 1; + unsigned char ledflagstate: 4; + char: 3; + unsigned char default_ledflagstate: 4; + unsigned char kbdmode: 3; + long: 1; + unsigned char modeflags: 5; +}; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; + +typedef void k_handler_fn(struct vc_data *, unsigned char, char); + +typedef void fn_handler_fn(struct vc_data *); + +struct getset_keycode_data { + struct input_keymap_entry ke; + int error; +}; + +enum kernel_read_file_id { + READING_UNKNOWN = 0, + READING_FIRMWARE = 1, + READING_MODULE = 2, + READING_KEXEC_IMAGE = 3, + READING_KEXEC_INITRAMFS = 4, + READING_POLICY = 5, + READING_X509_CERTIFICATE = 6, + READING_MAX_ID = 7, +}; + +enum fw_opt { + FW_OPT_UEVENT = 1, + FW_OPT_NOWAIT = 2, + FW_OPT_USERHELPER = 4, + FW_OPT_NO_WARN = 8, + FW_OPT_NOCACHE = 16, + FW_OPT_NOFALLBACK_SYSFS = 32, + FW_OPT_FALLBACK_PLATFORM = 64, + FW_OPT_PARTIAL = 128, +}; + +enum fw_status { + FW_STATUS_UNKNOWN = 0, + FW_STATUS_LOADING = 1, + FW_STATUS_DONE = 2, + FW_STATUS_ABORTED = 3, +}; + +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct firmware_cache; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; + const char *fw_name; +}; + +struct firmware_cache { + spinlock_t lock; + struct list_head head; + int state; +}; + +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *, void *); + u32 opt_flags; +}; + +enum pr_status { + PR_STS_SUCCESS = 0, + PR_STS_IOERR = 2, + PR_STS_RESERVATION_CONFLICT = 24, + PR_STS_RETRY_PATH_FAILURE = 917504, + PR_STS_PATH_FAST_FAILED = 983040, + PR_STS_PATH_FAILED = 65536, +}; + +enum nvme_pr_type { + NVME_PR_WRITE_EXCLUSIVE = 1, + NVME_PR_EXCLUSIVE_ACCESS = 2, + NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +enum nvme_eds { + NVME_EXTENDED_DATA_STRUCT = 1, +}; + +struct nvme_registered_ctrl { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 hostid; + __le64 rkey; +}; + +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + struct nvme_registered_ctrl regctl_ds[0]; +}; + +struct nvme_registered_ctrl_ext { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 rkey; + __u8 hostid[16]; + __u8 rsvd32[32]; +}; + +struct nvme_reservation_status_ext { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + __u8 rsvd24[40]; + struct nvme_registered_ctrl_ext regctl_eds[0]; +}; + +struct nvmet_pr_register_data { + __le64 crkey; + __le64 nrkey; +}; + +struct nvmet_pr_acquire_data { + __le64 crkey; + __le64 prkey; +}; + +struct nvmet_pr_release_data { + __le64 crkey; +}; + +enum nvme_pr_register_action { + NVME_PR_REGISTER_ACT_REG = 0, + NVME_PR_REGISTER_ACT_UNREG = 1, + NVME_PR_REGISTER_ACT_REPLACE = 2, +}; + +enum nvme_pr_acquire_action { + NVME_PR_ACQUIRE_ACT_ACQUIRE = 0, + NVME_PR_ACQUIRE_ACT_PREEMPT = 1, + NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 2, +}; + +enum nvme_pr_release_action { + NVME_PR_RELEASE_ACT_RELEASE = 0, + NVME_PR_RELEASE_ACT_CLEAR = 1, +}; + +enum nvme_pr_change_ptpl { + NVME_PR_CPTPL_NO_CHANGE = 0, + NVME_PR_CPTPL_RESV = 1073741824, + NVME_PR_CPTPL_CLEARED = -2147483648, + NVME_PR_CPTPL_PERSIST = -1073741824, +}; + +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +struct igb_stats { + char stat_string[32]; + int sizeof_stat; + int stat_offset; +}; + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP = 1, + TEST_IRQ = 2, + TEST_LOOP = 3, + TEST_LINK = 4, +}; + +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +struct input_event { + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct pps_kinfo { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + long: 32; +}; + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +struct pps_bind_args { + int tsformat; + int edge; + int consumer; +}; + +struct firmware_map_entry { + u64 start; + u64 end; + const char *type; + struct list_head list; + struct kobject kobj; +}; + +struct memmap_attribute { + struct attribute attr; + ssize_t (*show)(struct firmware_map_entry *, char *); +}; + +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, + IORES_DESC_CXL = 9, +}; + +struct of_phandle_iterator { + const char *cells_name; + int cell_count; + const struct device_node *parent; + const __be32 *list_end; + const __be32 *phandle_end; + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); + +struct of_intc_desc { + struct list_head list; + of_irq_init_cb_t irq_init_cb; + struct device_node *dev; + struct device_node *interrupt_parent; +}; + +enum netdev_xdp_rx_metadata { + NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, + NETDEV_XDP_RX_METADATA_HASH = 2, + NETDEV_XDP_RX_METADATA_VLAN_TAG = 4, +}; + +enum netdev_xsk_flags { + NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, + NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, +}; + +enum netdev_qstats_scope { + NETDEV_QSTATS_SCOPE_QUEUE = 1, +}; + +enum { + NETDEV_A_DEV_IFINDEX = 1, + NETDEV_A_DEV_PAD = 2, + NETDEV_A_DEV_XDP_FEATURES = 3, + NETDEV_A_DEV_XDP_ZC_MAX_SEGS = 4, + NETDEV_A_DEV_XDP_RX_METADATA_FEATURES = 5, + NETDEV_A_DEV_XSK_FEATURES = 6, + __NETDEV_A_DEV_MAX = 7, + NETDEV_A_DEV_MAX = 6, +}; + +enum { + NETDEV_A_PAGE_POOL_ID = 1, + NETDEV_A_PAGE_POOL_IFINDEX = 2, + NETDEV_A_PAGE_POOL_NAPI_ID = 3, + NETDEV_A_PAGE_POOL_INFLIGHT = 4, + NETDEV_A_PAGE_POOL_INFLIGHT_MEM = 5, + NETDEV_A_PAGE_POOL_DETACH_TIME = 6, + NETDEV_A_PAGE_POOL_DMABUF = 7, + __NETDEV_A_PAGE_POOL_MAX = 8, + NETDEV_A_PAGE_POOL_MAX = 7, +}; + +enum { + NETDEV_A_NAPI_IFINDEX = 1, + NETDEV_A_NAPI_ID = 2, + NETDEV_A_NAPI_IRQ = 3, + NETDEV_A_NAPI_PID = 4, + NETDEV_A_NAPI_DEFER_HARD_IRQS = 5, + NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT = 6, + NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT = 7, + __NETDEV_A_NAPI_MAX = 8, + NETDEV_A_NAPI_MAX = 7, +}; + +enum { + NETDEV_A_QUEUE_ID = 1, + NETDEV_A_QUEUE_IFINDEX = 2, + NETDEV_A_QUEUE_TYPE = 3, + NETDEV_A_QUEUE_NAPI_ID = 4, + NETDEV_A_QUEUE_DMABUF = 5, + __NETDEV_A_QUEUE_MAX = 6, + NETDEV_A_QUEUE_MAX = 5, +}; + +enum { + NETDEV_A_QSTATS_IFINDEX = 1, + NETDEV_A_QSTATS_QUEUE_TYPE = 2, + NETDEV_A_QSTATS_QUEUE_ID = 3, + NETDEV_A_QSTATS_SCOPE = 4, + NETDEV_A_QSTATS_RX_PACKETS = 8, + NETDEV_A_QSTATS_RX_BYTES = 9, + NETDEV_A_QSTATS_TX_PACKETS = 10, + NETDEV_A_QSTATS_TX_BYTES = 11, + NETDEV_A_QSTATS_RX_ALLOC_FAIL = 12, + NETDEV_A_QSTATS_RX_HW_DROPS = 13, + NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS = 14, + NETDEV_A_QSTATS_RX_CSUM_COMPLETE = 15, + NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY = 16, + NETDEV_A_QSTATS_RX_CSUM_NONE = 17, + NETDEV_A_QSTATS_RX_CSUM_BAD = 18, + NETDEV_A_QSTATS_RX_HW_GRO_PACKETS = 19, + NETDEV_A_QSTATS_RX_HW_GRO_BYTES = 20, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS = 21, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES = 22, + NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS = 23, + NETDEV_A_QSTATS_TX_HW_DROPS = 24, + NETDEV_A_QSTATS_TX_HW_DROP_ERRORS = 25, + NETDEV_A_QSTATS_TX_CSUM_NONE = 26, + NETDEV_A_QSTATS_TX_NEEDS_CSUM = 27, + NETDEV_A_QSTATS_TX_HW_GSO_PACKETS = 28, + NETDEV_A_QSTATS_TX_HW_GSO_BYTES = 29, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS = 30, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES = 31, + NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS = 32, + NETDEV_A_QSTATS_TX_STOP = 33, + NETDEV_A_QSTATS_TX_WAKE = 34, + __NETDEV_A_QSTATS_MAX = 35, + NETDEV_A_QSTATS_MAX = 34, +}; + +enum { + NETDEV_A_DMABUF_IFINDEX = 1, + NETDEV_A_DMABUF_QUEUES = 2, + NETDEV_A_DMABUF_FD = 3, + NETDEV_A_DMABUF_ID = 4, + __NETDEV_A_DMABUF_MAX = 5, + NETDEV_A_DMABUF_MAX = 4, +}; + +enum { + NETDEV_CMD_DEV_GET = 1, + NETDEV_CMD_DEV_ADD_NTF = 2, + NETDEV_CMD_DEV_DEL_NTF = 3, + NETDEV_CMD_DEV_CHANGE_NTF = 4, + NETDEV_CMD_PAGE_POOL_GET = 5, + NETDEV_CMD_PAGE_POOL_ADD_NTF = 6, + NETDEV_CMD_PAGE_POOL_DEL_NTF = 7, + NETDEV_CMD_PAGE_POOL_CHANGE_NTF = 8, + NETDEV_CMD_PAGE_POOL_STATS_GET = 9, + NETDEV_CMD_QUEUE_GET = 10, + NETDEV_CMD_NAPI_GET = 11, + NETDEV_CMD_QSTATS_GET = 12, + NETDEV_CMD_BIND_RX = 13, + NETDEV_CMD_NAPI_SET = 14, + __NETDEV_CMD_MAX = 15, + NETDEV_CMD_MAX = 14, +}; + +struct net_devmem_dmabuf_binding { + struct dma_buf *dmabuf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct net_device *dev; + struct gen_pool *chunk_pool; + refcount_t ref; + struct list_head list; + struct xarray bound_rxqs; + u32 id; +}; + +enum { + NETDEV_NLGRP_MGMT = 0, + NETDEV_NLGRP_PAGE_POOL = 1, +}; + +struct netdev_nl_dump_ctx { + long unsigned int ifindex; + unsigned int rxq_idx; + unsigned int txq_idx; + unsigned int napi_id; +}; + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +struct unix_edge; + +struct scm_fp_list { + short int count; + short int count_unix; + short int max; + bool inflight; + bool dead; + struct list_head vertices; + struct unix_edge *edges; + struct user_struct *user; + struct file *fp[253]; +}; + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; +}; + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + short unsigned int nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED = 0, + NLMSGERR_ATTR_MSG = 1, + NLMSGERR_ATTR_OFFS = 2, + NLMSGERR_ATTR_COOKIE = 3, + NLMSGERR_ATTR_POLICY = 4, + NLMSGERR_ATTR_MISS_TYPE = 5, + NLMSGERR_ATTR_MISS_NEST = 6, + __NLMSGERR_ATTR_MAX = 7, + NLMSGERR_ATTR_MAX = 6, +}; + +struct nl_pktinfo { + __u32 group; +}; + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED = 1, +}; + +enum netlink_skb_flags { + NETLINK_SKB_DST = 8, +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +struct trace_event_raw_netlink_extack { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_netlink_extack { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_netlink_extack)(void *, const char *); + +enum { + NETLINK_F_KERNEL_SOCKET = 0, + NETLINK_F_RECV_PKTINFO = 1, + NETLINK_F_BROADCAST_SEND_ERROR = 2, + NETLINK_F_RECV_NO_ENOBUFS = 3, + NETLINK_F_LISTEN_ALL_NSID = 4, + NETLINK_F_CAP_ACK = 5, + NETLINK_F_EXT_ACK = 6, + NETLINK_F_STRICT_CHK = 7, +}; + +struct netlink_sock { + struct sock sk; + long unsigned int flags; + u32 portid; + u32 dst_portid; + u32 dst_group; + u32 subscriptions; + u32 ngroups; + long unsigned int *groups; + long unsigned int state; + size_t max_recvmsg_len; + wait_queue_head_t wait; + bool bound; + bool cb_running; + int dump_done_errno; + struct netlink_callback cb; + struct mutex nl_cb_mutex; + void (*netlink_rcv)(struct sk_buff *); + int (*netlink_bind)(struct net *, int); + void (*netlink_unbind)(struct net *, int); + void (*netlink_release)(struct sock *, long unsigned int *); + struct module *module; + struct rhash_head node; + struct callback_head rcu; + long: 32; +}; + +struct listeners; + +struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; + struct listeners *listeners; + unsigned int flags; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); + int registered; +}; + +struct listeners { + struct callback_head rcu; + long unsigned int masks[0]; +}; + +struct netlink_tap_net { + struct list_head netlink_tap_all; + struct mutex netlink_tap_lock; +}; + +struct netlink_compare_arg { + possible_net_t pnet; + u32 portid; +}; + +struct netlink_broadcast_data { + struct sock *exclude_sk; + struct net *net; + u32 portid; + u32 group; + int failure; + int delivery_failure; + int congested; + int delivered; + gfp_t allocation; + struct sk_buff *skb; + struct sk_buff *skb2; + int (*tx_filter)(struct sock *, struct sk_buff *, void *); + void *tx_data; +}; + +struct netlink_set_err_data { + struct sock *exclude_sk; + u32 portid; + u32 group; + int code; +}; + +struct nl_seq_iter { + struct seq_net_private p; + struct rhashtable_iter hti; + int link; +}; + +struct bpf_iter__netlink { + union { + struct bpf_iter_meta *meta; + }; + union { + struct netlink_sock *sk; + }; +}; + +enum nfulnl_msg_types { + NFULNL_MSG_PACKET = 0, + NFULNL_MSG_CONFIG = 1, + NFULNL_MSG_MAX = 2, +}; + +struct nfulnl_msg_packet_hdr { + __be16 hw_protocol; + __u8 hook; + __u8 _pad; +}; + +struct nfulnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfulnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfulnl_vlan_attr { + NFULA_VLAN_UNSPEC = 0, + NFULA_VLAN_PROTO = 1, + NFULA_VLAN_TCI = 2, + __NFULA_VLAN_MAX = 3, +}; + +enum nfulnl_attr_type { + NFULA_UNSPEC = 0, + NFULA_PACKET_HDR = 1, + NFULA_MARK = 2, + NFULA_TIMESTAMP = 3, + NFULA_IFINDEX_INDEV = 4, + NFULA_IFINDEX_OUTDEV = 5, + NFULA_IFINDEX_PHYSINDEV = 6, + NFULA_IFINDEX_PHYSOUTDEV = 7, + NFULA_HWADDR = 8, + NFULA_PAYLOAD = 9, + NFULA_PREFIX = 10, + NFULA_UID = 11, + NFULA_SEQ = 12, + NFULA_SEQ_GLOBAL = 13, + NFULA_GID = 14, + NFULA_HWTYPE = 15, + NFULA_HWHEADER = 16, + NFULA_HWLEN = 17, + NFULA_CT = 18, + NFULA_CT_INFO = 19, + NFULA_VLAN = 20, + NFULA_L2HDR = 21, + __NFULA_MAX = 22, +}; + +enum nfulnl_msg_config_cmds { + NFULNL_CFG_CMD_NONE = 0, + NFULNL_CFG_CMD_BIND = 1, + NFULNL_CFG_CMD_UNBIND = 2, + NFULNL_CFG_CMD_PF_BIND = 3, + NFULNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfulnl_msg_config_cmd { + __u8 command; +}; + +struct nfulnl_msg_config_mode { + __be32 copy_range; + __u8 copy_mode; + __u8 _pad; +} __attribute__((packed)); + +enum nfulnl_attr_config { + NFULA_CFG_UNSPEC = 0, + NFULA_CFG_CMD = 1, + NFULA_CFG_MODE = 2, + NFULA_CFG_NLBUFSIZ = 3, + NFULA_CFG_TIMEOUT = 4, + NFULA_CFG_QTHRESH = 5, + NFULA_CFG_FLAGS = 6, + __NFULA_CFG_MAX = 7, +}; + +struct nfulnl_instance { + struct hlist_node hlist; + spinlock_t lock; + refcount_t use; + unsigned int qlen; + struct sk_buff *skb; + struct timer_list timer; + struct net *net; + netns_tracker ns_tracker; + struct user_namespace *peer_user_ns; + u32 peer_portid; + unsigned int flushtimeout; + unsigned int nlbufsiz; + unsigned int qthreshold; + u_int32_t copy_range; + u_int32_t seq; + u_int16_t group_num; + u_int16_t flags; + u_int8_t copy_mode; + struct callback_head rcu; +}; + +struct nfnl_log_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; + atomic_t global_seq; +}; + +struct iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum nft_list_attributes { + NFTA_LIST_UNSPEC = 0, + NFTA_LIST_ELEM = 1, + __NFTA_LIST_MAX = 2, +}; + +enum nft_dynset_ops { + NFT_DYNSET_OP_ADD = 0, + NFT_DYNSET_OP_UPDATE = 1, + NFT_DYNSET_OP_DELETE = 2, +}; + +enum nft_dynset_flags { + NFT_DYNSET_F_INV = 1, + NFT_DYNSET_F_EXPR = 2, +}; + +enum nft_dynset_attributes { + NFTA_DYNSET_UNSPEC = 0, + NFTA_DYNSET_SET_NAME = 1, + NFTA_DYNSET_SET_ID = 2, + NFTA_DYNSET_OP = 3, + NFTA_DYNSET_SREG_KEY = 4, + NFTA_DYNSET_SREG_DATA = 5, + NFTA_DYNSET_TIMEOUT = 6, + NFTA_DYNSET_EXPR = 7, + NFTA_DYNSET_PAD = 8, + NFTA_DYNSET_FLAGS = 9, + NFTA_DYNSET_EXPRESSIONS = 10, + __NFTA_DYNSET_MAX = 11, +}; + +struct nft_set_elem_expr { + u8 size; + long: 32; + unsigned char data[0]; +}; + +struct nft_set_ext_type { + u8 len; + u8 align; +}; + +struct nft_set_ext_tmpl { + u16 len; + u8 offset[8]; + u8 ext_len[8]; +}; + +struct nft_dynset { + struct nft_set *set; + struct nft_set_ext_tmpl tmpl; + enum nft_dynset_ops op: 8; + u8 sreg_key; + u8 sreg_data; + bool invert; + bool expr; + u8 num_exprs; + long: 32; + u64 timeout; + struct nft_expr *expr_array[2]; + struct nft_set_binding binding; +}; + +struct arpreq { + struct sockaddr arp_pa; + struct sockaddr arp_ha; + int arp_flags; + struct sockaddr arp_netmask; + char arp_dev[16]; +}; + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); + unsigned int bucket; + unsigned int flags; +}; + +struct neighbour_cb { + long unsigned int sched_next; + unsigned int flags; +}; + +enum { + AX25_VALUES_IPDEFMODE = 0, + AX25_VALUES_AXDEFMODE = 1, + AX25_VALUES_BACKOFF = 2, + AX25_VALUES_CONMODE = 3, + AX25_VALUES_WINDOW = 4, + AX25_VALUES_EWINDOW = 5, + AX25_VALUES_T1 = 6, + AX25_VALUES_T2 = 7, + AX25_VALUES_T3 = 8, + AX25_VALUES_IDLE = 9, + AX25_VALUES_N2 = 10, + AX25_VALUES_PACLEN = 11, + AX25_VALUES_PROTOCOL = 12, + AX25_MAX_VALUES = 13, +}; + +enum ipt_reject_with { + IPT_ICMP_NET_UNREACHABLE = 0, + IPT_ICMP_HOST_UNREACHABLE = 1, + IPT_ICMP_PROT_UNREACHABLE = 2, + IPT_ICMP_PORT_UNREACHABLE = 3, + IPT_ICMP_ECHOREPLY = 4, + IPT_ICMP_NET_PROHIBITED = 5, + IPT_ICMP_HOST_PROHIBITED = 6, + IPT_TCP_RESET = 7, + IPT_ICMP_ADMIN_PROHIBITED = 8, +}; + +struct ipt_reject_info { + enum ipt_reject_with with; +}; + +struct ioam6_pernet_data { + struct mutex lock; + struct rhashtable namespaces; + struct rhashtable schemas; +}; + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; +}; + +struct ioam6_hdr { + __u8 opt_type; + __u8 opt_len; + char: 8; + __u8 type; +}; + +struct ioam6_trace_hdr { + __be16 namespace_id; + __u8 nodelen: 5; + __u8 overflow: 1; + char: 2; + char: 1; + __u8 remlen: 7; + union { + __be32 type_be32; + struct { + __u32 bit0: 1; + __u32 bit1: 1; + __u32 bit2: 1; + __u32 bit3: 1; + __u32 bit4: 1; + __u32 bit5: 1; + __u32 bit6: 1; + __u32 bit7: 1; + __u32 bit8: 1; + __u32 bit9: 1; + __u32 bit10: 1; + __u32 bit11: 1; + __u32 bit12: 1; + __u32 bit13: 1; + __u32 bit14: 1; + __u32 bit15: 1; + __u32 bit16: 1; + __u32 bit17: 1; + __u32 bit18: 1; + __u32 bit19: 1; + __u32 bit20: 1; + __u32 bit21: 1; + __u32 bit22: 1; + __u32 bit23: 1; + } type; + }; + __u8 data[0]; +}; + +enum ioam6_event_type { + IOAM6_EVENT_UNSPEC = 0, + IOAM6_EVENT_TRACE = 1, +}; + +struct ioam6_schema; + +struct ioam6_namespace { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_schema *schema; + __be16 id; + __be32 data; + __be64 data_wide; +}; + +struct ioam6_schema { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_namespace *ns; + u32 id; + int len; + __be32 hdr; + u8 data[0]; +}; + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __sum16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; + __u8 resv: 4; + __u8 suppress: 1; + __u8 qrv: 3; + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +enum { + PIM_TYPE_HELLO = 0, + PIM_TYPE_REGISTER = 1, + PIM_TYPE_REGISTER_STOP = 2, + PIM_TYPE_JOIN_PRUNE = 3, + PIM_TYPE_BOOTSTRAP = 4, + PIM_TYPE_ASSERT = 5, + PIM_TYPE_GRAFT = 6, + PIM_TYPE_GRAFT_ACK = 7, + PIM_TYPE_CANDIDATE_RP_ADV = 8, +}; + +struct pimhdr { + __u8 type; + __u8 reserved; + __be16 csum; +}; + +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; + __u8 mld2q_resv2: 4; + __u8 mld2q_suppress: 1; + __u8 mld2q_qrv: 3; + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +enum { + MDB_RTR_TYPE_DISABLED = 0, + MDB_RTR_TYPE_TEMP_QUERY = 1, + MDB_RTR_TYPE_PERM = 2, + MDB_RTR_TYPE_TEMP = 3, +}; + +enum { + BRIDGE_QUERIER_UNSPEC = 0, + BRIDGE_QUERIER_IP_ADDRESS = 1, + BRIDGE_QUERIER_IP_PORT = 2, + BRIDGE_QUERIER_IP_OTHER_TIMER = 3, + BRIDGE_QUERIER_PAD = 4, + BRIDGE_QUERIER_IPV6_ADDRESS = 5, + BRIDGE_QUERIER_IPV6_PORT = 6, + BRIDGE_QUERIER_IPV6_OTHER_TIMER = 7, + __BRIDGE_QUERIER_MAX = 8, +}; + +struct br_ip_list { + struct list_head list; + struct br_ip addr; +}; + +struct br_input_skb_cb { + struct net_device *brdev; + u16 frag_max_size; + u8 igmp; + u8 mrouters_only: 1; + u8 proxyarp_replied: 1; + u8 src_port_isolated: 1; + u8 promisc: 1; + u8 br_netfilter_broute: 1; + u32 backup_nhid; +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_usage srcuu; + struct srcu_struct srcu; + struct notifier_block *head; +}; + +struct die_args { + struct pt_regs *regs; + const char *str; + long int err; + int trapnr; + int signr; +}; + +struct trace_event_raw_notifier_info { + struct trace_entry ent; + void *cb; + char __data[0]; +}; + +struct trace_event_data_offsets_notifier_info {}; + +typedef void (*btf_trace_notifier_register)(void *, void *); + +typedef void (*btf_trace_notifier_unregister)(void *, void *); + +typedef void (*btf_trace_notifier_run)(void *, void *); + +struct mcs_spinlock { + struct mcs_spinlock *next; + int locked; + int count; +}; + +struct qnode { + struct mcs_spinlock mcs; +}; + +struct irq_domain_chip_generic_info { + const char *name; + irq_flow_handler_t handler; + unsigned int irqs_per_chip; + unsigned int num_ct; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + int (*init)(struct irq_chip_generic *); + void (*exit)(struct irq_chip_generic *); +}; + +struct irq_domain_info { + struct fwnode_handle *fwnode; + unsigned int domain_flags; + unsigned int size; + irq_hw_number_t hwirq_max; + int direct_max; + unsigned int hwirq_base; + unsigned int virq_base; + enum irq_domain_bus_token bus_token; + const char *name_suffix; + const struct irq_domain_ops *ops; + void *host_data; + struct irq_domain *parent; + struct irq_domain_chip_generic_info *dgc_info; + int (*init)(struct irq_domain *); + void (*exit)(struct irq_domain *); +}; + +struct irqchip_fwid { + struct fwnode_handle fwnode; + unsigned int type; + char *name; + phys_addr_t *pa; +}; + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC = 0, + TICKDEV_MODE_ONESHOT = 1, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT = 0, + TICK_BROADCAST_ENTER = 1, +}; + +enum misc_res_type { + MISC_CG_RES_TYPES = 0, +}; + +struct misc_res { + u64 max; + atomic64_t watermark; + atomic64_t usage; + atomic64_t events; + atomic64_t events_local; +}; + +struct misc_cg { + struct cgroup_subsys_state css; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct misc_res res[0]; +}; + +enum pm_qos_req_action { + PM_QOS_ADD_REQ = 0, + PM_QOS_UPDATE_REQ = 1, + PM_QOS_REMOVE_REQ = 2, +}; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED = 0, + CPUFREQ_TABLE_SORTED_ASCENDING = 1, + CPUFREQ_TABLE_SORTED_DESCENDING = 2, +}; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct cpufreq_stats; + +struct cpufreq_governor; + +struct cpufreq_frequency_table; + +struct cpufreq_policy { + cpumask_var_t cpus; + cpumask_var_t related_cpus; + cpumask_var_t real_cpus; + unsigned int shared_type; + unsigned int cpu; + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo; + unsigned int min; + unsigned int max; + unsigned int cur; + unsigned int suspend_freq; + unsigned int policy; + unsigned int last_policy; + struct cpufreq_governor *governor; + void *governor_data; + char last_governor[16]; + struct work_struct update; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + struct rw_semaphore rwsem; + bool fast_switch_possible; + bool fast_switch_enabled; + bool strict_target; + bool efficiencies_available; + unsigned int transition_delay_us; + bool dvfs_possible_from_any_cpu; + bool boost_enabled; + unsigned int cached_target_freq; + unsigned int cached_resolved_idx; + bool transition_ongoing; + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; + struct cpufreq_stats *stats; + void *driver_data; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct cpufreq_governor { + char name[16]; + int (*init)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*start)(struct cpufreq_policy *); + void (*stop)(struct cpufreq_policy *); + void (*limits)(struct cpufreq_policy *); + ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); + int (*store_setspeed)(struct cpufreq_policy *, unsigned int); + struct list_head governor_list; + struct module *owner; + u8 flags; +}; + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; + unsigned int frequency; +}; + +struct trace_event_raw_cpu { + struct trace_entry ent; + u32 state; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_idle_miss { + struct trace_entry ent; + u32 cpu_id; + u32 state; + bool below; + char __data[0]; +}; + +struct trace_event_raw_powernv_throttle { + struct trace_entry ent; + int chip_id; + u32 __data_loc_reason; + int pmax; + char __data[0]; +}; + +struct trace_event_raw_pstate_sample { + struct trace_entry ent; + u32 core_busy; + u32 scaled_busy; + u32 from; + u32 to; + u64 mperf; + u64 aperf; + u64 tsc; + u32 freq; + u32 io_boost; + char __data[0]; +}; + +struct trace_event_raw_cpu_frequency_limits { + struct trace_entry ent; + u32 min_freq; + u32 max_freq; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_start { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u32 __data_loc_parent; + u32 __data_loc_pm_ops; + int event; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_end { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + int error; + char __data[0]; +}; + +struct trace_event_raw_suspend_resume { + struct trace_entry ent; + const char *action; + int val; + bool start; + char __data[0]; +}; + +struct trace_event_raw_wakeup_source { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + char __data[0]; +}; + +struct trace_event_raw_clock { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_power_domain { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_latency_qos_request { + struct trace_entry ent; + s32 value; + char __data[0]; +}; + +struct trace_event_raw_pm_qos_update { + struct trace_entry ent; + enum pm_qos_req_action action; + int prev_value; + int curr_value; + char __data[0]; +}; + +struct trace_event_raw_dev_pm_qos_request { + struct trace_entry ent; + u32 __data_loc_name; + enum dev_pm_qos_req_type type; + s32 new_value; + char __data[0]; +}; + +struct trace_event_raw_guest_halt_poll_ns { + struct trace_entry ent; + bool grow; + unsigned int new; + unsigned int old; + char __data[0]; +}; + +struct trace_event_data_offsets_cpu {}; + +struct trace_event_data_offsets_cpu_idle_miss {}; + +struct trace_event_data_offsets_powernv_throttle { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_pstate_sample {}; + +struct trace_event_data_offsets_cpu_frequency_limits {}; + +struct trace_event_data_offsets_device_pm_callback_start { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; + u32 parent; + const void *parent_ptr_; + u32 pm_ops; + const void *pm_ops_ptr_; +}; + +struct trace_event_data_offsets_device_pm_callback_end { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_suspend_resume {}; + +struct trace_event_data_offsets_wakeup_source { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clock { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_power_domain { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cpu_latency_qos_request {}; + +struct trace_event_data_offsets_pm_qos_update {}; + +struct trace_event_data_offsets_dev_pm_qos_request { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_guest_halt_poll_ns {}; + +typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_idle_miss)(void *, unsigned int, unsigned int, bool); + +typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); + +typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); + +typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); + +typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); + +typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); + +typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); + +typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_pm_qos_add_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_guest_halt_poll_ns)(void *, bool, unsigned int, unsigned int); + +struct bpf_tuple { + struct bpf_prog *prog; + struct bpf_link *link; +}; + +struct reuseport_array { + struct bpf_map map; + struct sock *ptrs[0]; +}; + +struct dma_block { + struct dma_block *next_block; + dma_addr_t dma; +}; + +struct dma_pool { + struct list_head page_list; + spinlock_t lock; + struct dma_block *next_block; + size_t nr_blocks; + size_t nr_active; + size_t nr_pages; + struct device *dev; + unsigned int size; + unsigned int allocation; + unsigned int boundary; + char name[32]; + struct list_head pools; +}; + +struct dma_page { + struct list_head page_list; + void *vaddr; + dma_addr_t dma; +}; + +struct core_vma_metadata; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct file *file; + long unsigned int limit; + long unsigned int mm_flags; + int cpu; + long: 32; + loff_t written; + loff_t pos; + loff_t to_skip; + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; + long: 32; +}; + +struct core_vma_metadata { + long unsigned int start; + long unsigned int end; + long unsigned int flags; + long unsigned int dump_size; + long unsigned int pgoff; + struct file *file; +}; + +struct user_arg_ptr { + union { + const char * const *native; + } ptr; +}; + +struct pde_opener { + struct list_head lh; + struct file *file; + bool closing; + struct completion *c; +}; + +enum { + BIAS = 2147483648, +}; + +struct vmcore { + struct list_head list; + long long unsigned int paddr; + long long unsigned int size; + loff_t offset; +}; + +typedef struct elf32_phdr Elf32_Phdr; + +struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +}; + +typedef struct elf32_note Elf32_Nhdr; + +struct elf64_note { + Elf64_Word n_namesz; + Elf64_Word n_descsz; + Elf64_Word n_type; +}; + +typedef struct elf64_note Elf64_Nhdr; + +struct vmcore_cb { + bool (*pfn_is_ram)(struct vmcore_cb *, long unsigned int); + struct list_head next; +}; + +enum cc_attr { + CC_ATTR_MEM_ENCRYPT = 0, + CC_ATTR_HOST_MEM_ENCRYPT = 1, + CC_ATTR_GUEST_MEM_ENCRYPT = 2, + CC_ATTR_GUEST_STATE_ENCRYPT = 3, + CC_ATTR_GUEST_UNROLL_STRING_IO = 4, + CC_ATTR_GUEST_SEV_SNP = 5, + CC_ATTR_HOST_SEV_SNP = 6, +}; + +struct migrate_struct { + ext4_lblk_t first_block; + ext4_lblk_t last_block; + ext4_lblk_t curr_block; + long: 32; + ext4_fsblk_t first_pblock; + ext4_fsblk_t last_pblock; +}; + +enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_INVAL_INODE = 2, + FUSE_NOTIFY_INVAL_ENTRY = 3, + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, + FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, + FUSE_NOTIFY_CODE_MAX = 8, +}; + +struct fuse_forget_in { + uint64_t nlookup; +}; + +struct fuse_batch_forget_in { + uint32_t count; + uint32_t dummy; +}; + +struct fuse_interrupt_in { + uint64_t unique; +}; + +struct fuse_notify_poll_wakeup_out { + uint64_t kh; +}; + +struct fuse_notify_inval_inode_out { + uint64_t ino; + int64_t off; + int64_t len; +}; + +struct fuse_notify_inval_entry_out { + uint64_t parent; + uint32_t namelen; + uint32_t flags; +}; + +struct fuse_notify_delete_out { + uint64_t parent; + uint64_t child; + uint32_t namelen; + uint32_t padding; +}; + +struct fuse_notify_store_out { + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_out { + uint64_t notify_unique; + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_in { + uint64_t dummy1; + uint64_t offset; + uint32_t size; + uint32_t dummy2; + uint64_t dummy3; + uint64_t dummy4; +}; + +typedef union { +} release_pages_arg; + +enum fuse_req_flag { + FR_ISREPLY = 0, + FR_FORCE = 1, + FR_BACKGROUND = 2, + FR_WAITING = 3, + FR_ABORTED = 4, + FR_INTERRUPTED = 5, + FR_LOCKED = 6, + FR_PENDING = 7, + FR_SENT = 8, + FR_FINISHED = 9, + FR_PRIVATE = 10, + FR_ASYNC = 11, +}; + +struct fuse_pqueue { + unsigned int connected; + spinlock_t lock; + struct list_head *processing; + struct list_head io; +}; + +struct fuse_dev { + struct fuse_conn *fc; + struct fuse_pqueue pq; + struct list_head entry; +}; + +struct trace_event_raw_fuse_request_send { + struct trace_entry ent; + dev_t connection; + long: 32; + uint64_t unique; + enum fuse_opcode opcode; + uint32_t len; + char __data[0]; +}; + +struct trace_event_raw_fuse_request_end { + struct trace_entry ent; + dev_t connection; + long: 32; + uint64_t unique; + uint32_t len; + int32_t error; + char __data[0]; +}; + +struct trace_event_data_offsets_fuse_request_send {}; + +struct trace_event_data_offsets_fuse_request_end {}; + +typedef void (*btf_trace_fuse_request_send)(void *, const struct fuse_req *); + +typedef void (*btf_trace_fuse_request_end)(void *, const struct fuse_req *); + +struct fuse_copy_state { + int write; + struct fuse_req *req; + struct iov_iter *iter; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; + long unsigned int nr_segs; + struct page *pg; + unsigned int len; + unsigned int offset; + unsigned int move_pages: 1; +}; + +struct fuse_retrieve_args { + struct fuse_args_pages ap; + struct fuse_notify_retrieve_in inarg; +}; + +struct btrfs_async_delayed_work { + struct btrfs_delayed_root *delayed_root; + int nr; + struct btrfs_work work; +}; + +struct msg_msgseg { + struct msg_msgseg *next; +}; + +struct rtattr { + short unsigned int rta_len; + short unsigned int rta_type; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + unsigned int qlen; + unsigned int max_qlen; +}; + +struct crypto_attr_alg { + char name[128]; +}; + +enum { + CRYPTO_MSG_ALG_REQUEST = 0, + CRYPTO_MSG_ALG_REGISTER = 1, + CRYPTO_MSG_ALG_LOADED = 2, +}; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; + bool test_started; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + CRYPTOA_UNSPEC = 0, + CRYPTOA_ALG = 1, + CRYPTOA_TYPE = 2, + __CRYPTOA_MAX = 3, +}; + +enum submit_disposition { + ASYNC_TX_SUBMITTED = 0, + ASYNC_TX_CHANNEL_SWITCH = 1, + ASYNC_TX_DIRECT_SUBMIT = 2, +}; + +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void *data; +}; + +struct blkpg_partition { + long long int start; + long long int length; + int pno; + char devname[64]; + char volname[64]; + long: 32; +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +struct blk_iou_cmd { + int res; + bool nowait; +}; + +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +struct io_open { + struct file *file; + int dfd; + u32 file_slot; + struct filename *filename; + struct open_how how; + long unsigned int nofile; + long: 32; +}; + +struct io_close { + struct file *file; + int fd; + u32 file_slot; +}; + +struct io_fixed_install { + struct file *file; + unsigned int o_flags; +}; + +struct wrapper { + cmp_func_t cmp; + swap_func_t swap; +}; + +struct reciprocal_value_adv { + u32 m; + u8 sh; + u8 exp; + bool is_wide_m; +}; + +struct sha256_state { + u32 state[8]; + u64 count; + u8 buf[64]; +}; + +typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); + +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; + +struct pcie_link_state { + struct pci_dev *pdev; + struct pci_dev *downstream; + struct pcie_link_state *root; + struct pcie_link_state *parent; + struct list_head sibling; + u32 aspm_support: 7; + u32 aspm_enabled: 7; + u32 aspm_capable: 7; + u32 aspm_default: 7; + long: 4; + u32 aspm_disable: 7; + u32 clkpm_capable: 1; + u32 clkpm_enabled: 1; + u32 clkpm_default: 1; + u32 clkpm_disable: 1; +}; + +struct clk_multiplier { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +struct virtio_driver { + struct device_driver driver; + const struct virtio_device_id *id_table; + const unsigned int *feature_table; + unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *); + int (*probe)(struct virtio_device *); + void (*scan)(struct virtio_device *); + void (*remove)(struct virtio_device *); + void (*config_changed)(struct virtio_device *); + int (*freeze)(struct virtio_device *); + int (*restore)(struct virtio_device *); +}; + +struct dma_fence_array; + +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +struct dma_fence_array { + struct dma_fence base; + spinlock_t lock; + unsigned int num_fences; + atomic_t num_pending; + struct dma_fence **fences; + struct irq_work work; + struct dma_fence_array_cb callbacks[0]; +}; + +enum ata_link_iter_mode { + ATA_LITER_EDGE = 0, + ATA_LITER_HOST_FIRST = 1, + ATA_LITER_PMP_FIRST = 2, +}; + +enum { + VETH_INFO_UNSPEC = 0, + VETH_INFO_PEER = 1, + __VETH_INFO_MAX = 2, +}; + +struct veth_stats { + u64 rx_drops; + u64 xdp_packets; + u64 xdp_bytes; + u64 xdp_redirect; + u64 xdp_drops; + u64 xdp_tx; + u64 xdp_tx_err; + u64 peer_tq_xdp_xmit; + u64 peer_tq_xdp_xmit_err; +}; + +struct veth_rq_stats { + struct veth_stats vs; + struct u64_stats_sync syncp; + long: 32; +}; + +struct veth_rq { + struct napi_struct xdp_napi; + struct napi_struct *napi; + struct net_device *dev; + struct bpf_prog *xdp_prog; + struct xdp_mem_info xdp_mem; + long: 32; + struct veth_rq_stats stats; + bool rx_notify_masked; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ptr_ring xdp_ring; + struct xdp_rxq_info xdp_rxq; + struct page_pool *page_pool; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct veth_priv { + struct net_device *peer; + long: 32; + atomic64_t dropped; + struct bpf_prog *_xdp_prog; + struct veth_rq *rq; + unsigned int requested_headroom; + long: 32; +}; + +struct veth_xdp_tx_bq { + struct xdp_frame *q[16]; + unsigned int count; +}; + +struct veth_q_stat_desc { + char desc[32]; + size_t offset; +}; + +struct veth_xdp_buff { + struct xdp_buff xdp; + struct sk_buff *skb; +}; + +enum utf16_endian { + UTF16_HOST_ENDIAN = 0, + UTF16_LITTLE_ENDIAN = 1, + UTF16_BIG_ENDIAN = 2, +}; + +struct usb_cdc_header_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdCDC; +} __attribute__((packed)); + +struct usb_cdc_call_mgmt_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; + __u8 bDataInterface; +}; + +struct usb_cdc_acm_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; +}; + +struct usb_cdc_union_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bMasterInterface0; + __u8 bSlaveInterface0; +}; + +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; +}; + +struct usb_cdc_network_terminal_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bEntityId; + __u8 iName; + __u8 bChannelIndex; + __u8 bPhysicalInterface; +}; + +struct usb_cdc_ether_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iMACAddress; + __le32 bmEthernetStatistics; + __le16 wMaxSegmentSize; + __le16 wNumberMCFilters; + __u8 bNumberPowerFilters; +} __attribute__((packed)); + +struct usb_cdc_dmm_desc { + __u8 bFunctionLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u16 bcdVersion; + __le16 wMaxCommand; +} __attribute__((packed)); + +struct usb_cdc_mdlm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; + __u8 bGUID[16]; +} __attribute__((packed)); + +struct usb_cdc_mdlm_detail_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bGuidDescriptorType; + __u8 bDetailData[0]; +}; + +struct usb_cdc_obex_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; +} __attribute__((packed)); + +struct usb_cdc_ncm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdNcmVersion; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMVersion; + __le16 wMaxControlMessage; + __u8 bNumberFilters; + __u8 bMaxFilterSize; + __le16 wMaxSegmentSize; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_extended_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMExtendedVersion; + __u8 bMaxOutstandingCommandMessages; + __le16 wMTU; +} __attribute__((packed)); + +struct usb_cdc_parsed_header { + struct usb_cdc_union_desc *usb_cdc_union_desc; + struct usb_cdc_header_desc *usb_cdc_header_desc; + struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; + struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; + struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; + struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; + struct usb_cdc_ether_desc *usb_cdc_ether_desc; + struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; + struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; + struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; + struct usb_cdc_obex_desc *usb_cdc_obex_desc; + struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; + struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; + struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; + bool phonet_magic_present; +}; + +struct api_context { + struct completion done; + int status; +}; + +struct set_config_request { + struct usb_device *udev; + int config; + struct work_struct work; + struct list_head node; +}; + +struct ignore_entry { + u16 vid; + u16 pid; + u16 bcdmin; + u16 bcdmax; +}; + +enum jvc_state { + STATE_INACTIVE___5 = 0, + STATE_HEADER_SPACE___2 = 1, + STATE_BIT_PULSE___2 = 2, + STATE_BIT_SPACE___2 = 3, + STATE_TRAILER_PULSE___2 = 4, + STATE_TRAILER_SPACE___2 = 5, + STATE_CHECK_REPEAT = 6, +}; + +enum { + PARITY_DISABLE_RMW = 0, + PARITY_ENABLE_RMW = 1, + PARITY_PREFER_RMW = 2, +}; + +enum { + SYNDROME_SRC_ALL = 0, + SYNDROME_SRC_WANT_DRAIN = 1, + SYNDROME_SRC_WRITTEN = 2, +}; + +enum stripe_result { + STRIPE_SUCCESS = 0, + STRIPE_RETRY = 1, + STRIPE_SCHEDULE_AND_RETRY = 2, + STRIPE_FAIL = 3, + STRIPE_WAIT_RESHAPE = 4, +}; + +struct stripe_request_ctx { + struct stripe_head *batch_last; + long: 32; + sector_t first_sector; + sector_t last_sector; + long unsigned int sectors_to_do[9]; + bool do_flush; +}; + +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; + struct list_head temp_inactive_list[8]; +}; + +enum reshape_loc { + LOC_NO_RESHAPE = 0, + LOC_AHEAD_OF_RESHAPE = 1, + LOC_INSIDE_RESHAPE = 2, + LOC_BEHIND_RESHAPE = 3, +}; + +enum { + CTRL_CMD_UNSPEC = 0, + CTRL_CMD_NEWFAMILY = 1, + CTRL_CMD_DELFAMILY = 2, + CTRL_CMD_GETFAMILY = 3, + CTRL_CMD_NEWOPS = 4, + CTRL_CMD_DELOPS = 5, + CTRL_CMD_GETOPS = 6, + CTRL_CMD_NEWMCAST_GRP = 7, + CTRL_CMD_DELMCAST_GRP = 8, + CTRL_CMD_GETMCAST_GRP = 9, + CTRL_CMD_GETPOLICY = 10, + __CTRL_CMD_MAX = 11, +}; + +enum { + CTRL_ATTR_UNSPEC = 0, + CTRL_ATTR_FAMILY_ID = 1, + CTRL_ATTR_FAMILY_NAME = 2, + CTRL_ATTR_VERSION = 3, + CTRL_ATTR_HDRSIZE = 4, + CTRL_ATTR_MAXATTR = 5, + CTRL_ATTR_OPS = 6, + CTRL_ATTR_MCAST_GROUPS = 7, + CTRL_ATTR_POLICY = 8, + CTRL_ATTR_OP_POLICY = 9, + CTRL_ATTR_OP = 10, + __CTRL_ATTR_MAX = 11, +}; + +enum { + CTRL_ATTR_OP_UNSPEC = 0, + CTRL_ATTR_OP_ID = 1, + CTRL_ATTR_OP_FLAGS = 2, + __CTRL_ATTR_OP_MAX = 3, +}; + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC = 0, + CTRL_ATTR_MCAST_GRP_NAME = 1, + CTRL_ATTR_MCAST_GRP_ID = 2, + __CTRL_ATTR_MCAST_GRP_MAX = 3, +}; + +enum { + CTRL_ATTR_POLICY_UNSPEC = 0, + CTRL_ATTR_POLICY_DO = 1, + CTRL_ATTR_POLICY_DUMP = 2, + __CTRL_ATTR_POLICY_DUMP_MAX = 3, + CTRL_ATTR_POLICY_DUMP_MAX = 2, +}; + +struct genl_op_iter { + const struct genl_family *family; + struct genl_split_ops doit; + struct genl_split_ops dumpit; + int cmd_idx; + int entry_idx; + u32 cmd; + u8 flags; +}; + +struct genl_start_context { + const struct genl_family *family; + struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; + const struct genl_split_ops *ops; + int hdrlen; +}; + +struct netlink_policy_dump_state; + +struct ctrl_dump_policy_ctx { + struct netlink_policy_dump_state *state; + const struct genl_family *rt; + struct genl_op_iter *op_iter; + u32 op; + u16 fam_id; + u8 dump_map: 1; + u8 single_op: 1; +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +enum { + BPF_F_CURRENT_NETNS = -1, +}; + +struct bpf_ct_opts { + s32 netns_id; + s32 error; + u8 l4proto; + u8 dir; + u16 ct_zone_id; + u8 ct_zone_dir; + u8 reserved[3]; +}; + +enum { + NF_BPF_CT_OPTS_SZ = 16, +}; + +struct fib_entry_notifier_info { + struct fib_notifier_info info; + u32 dst; + int dst_len; + struct fib_info *fi; + dscp_t dscp; + u8 type; + u32 tb_id; +}; + +typedef unsigned int t_key; + +struct key_vector { + t_key key; + unsigned char pos; + unsigned char bits; + unsigned char slen; + union { + struct hlist_head leaf; + struct { + struct {} __empty_tnode; + struct key_vector *tnode[0]; + }; + }; +}; + +struct tnode { + struct callback_head rcu; + t_key empty_children; + t_key full_children; + struct key_vector *parent; + struct key_vector kv[1]; +}; + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int prefixes; + unsigned int nodesizes[32]; +}; + +struct trie { + struct key_vector kv[1]; +}; + +struct fib_trie_iter { + struct seq_net_private p; + struct fib_table *tb; + struct key_vector *tnode; + unsigned int index; + unsigned int depth; +}; + +struct fib_route_iter { + struct seq_net_private p; + struct fib_table *main_tb; + struct key_vector *tnode; + long: 32; + loff_t pos; + t_key key; + long: 32; +}; + +struct unix_vertex { + struct list_head edges; + struct list_head entry; + struct list_head scc_entry; + long unsigned int out_degree; + long unsigned int index; + long unsigned int scc_index; +}; + +struct scm_stat { + atomic_t nr_fds; + long unsigned int nr_unix_fds; +}; + +struct unix_address; + +struct unix_sock { + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock; + struct mutex bindlock; + struct sock *peer; + struct sock *listener; + struct unix_vertex *vertex; + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; + struct sk_buff *oob_skb; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct unix_address { + refcount_t refcnt; + int len; + struct sockaddr_un name[0]; +}; + +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_bridge_frag_data; + +struct nd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + __u8 opt[0]; +}; + +enum br_pkt_type { + BR_PKT_UNICAST = 0, + BR_PKT_MULTICAST = 1, + BR_PKT_BROADCAST = 2, +}; + +struct nf_br_ops { + int (*br_dev_xmit_hook)(struct sk_buff *); +}; + +struct vm_unmapped_area_info { + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + long unsigned int start_gap; +}; + +enum mmap_allocation_direction { + UP___2 = 0, + DOWN___2 = 1, +}; + +enum tlb_write_entry { + tlb_random = 0, + tlb_indexed = 1, +}; + +struct work_registers { + int r1; + int r2; + int r3; +}; + +struct tlb_reg_save { + long unsigned int a; + long unsigned int b; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum label_id___2 { + label_second_part = 1, + label_leave = 2, + label_vmalloc = 3, + label_vmalloc_done = 4, + label_tlbw_hazard_0 = 5, + label_split = 13, + label_tlbl_goaround1 = 14, + label_tlbl_goaround2 = 15, + label_nopage_tlbl = 16, + label_nopage_tlbs = 17, + label_nopage_tlbm = 18, + label_smp_pgtable_change = 19, + label_r3000_write_probe_fail = 20, + label_large_segbits_fault = 21, +}; + +enum vmalloc64_mode { + not_refill = 0, + refill_scratch = 1, + refill_noscratch = 2, +}; + +struct mips_huge_tlb_info { + int huge_pte; + int restore_scratch; + bool need_reload_pte; +}; + +enum ingenic_machine_type { + MACH_INGENIC_UNKNOWN = 0, + MACH_INGENIC_JZ4720 = 1, + MACH_INGENIC_JZ4725 = 2, + MACH_INGENIC_JZ4725B = 3, + MACH_INGENIC_JZ4730 = 4, + MACH_INGENIC_JZ4740 = 5, + MACH_INGENIC_JZ4750 = 6, + MACH_INGENIC_JZ4755 = 7, + MACH_INGENIC_JZ4760 = 8, + MACH_INGENIC_JZ4760B = 9, + MACH_INGENIC_JZ4770 = 10, + MACH_INGENIC_JZ4775 = 11, + MACH_INGENIC_JZ4780 = 12, + MACH_INGENIC_X1000 = 13, + MACH_INGENIC_X1000E = 14, + MACH_INGENIC_X1830 = 15, + MACH_INGENIC_X2000 = 16, + MACH_INGENIC_X2000E = 17, + MACH_INGENIC_X2000H = 18, + MACH_INGENIC_X2100 = 19, +}; + +struct taint_flag { + char c_true; + char c_false; + bool module; + const char *desc; +}; + +enum error_detector { + ERROR_DETECTOR_KFENCE = 0, + ERROR_DETECTOR_KASAN = 1, + ERROR_DETECTOR_WARN = 2, +}; + +struct warn_args { + const char *fmt; + va_list args; +}; + +typedef __kernel_timer_t timer_t; + +typedef long unsigned int old_sigset_t; + +struct sigaltstack { + void *ss_sp; + __kernel_size_t ss_size; + int ss_flags; +}; + +typedef struct sigaltstack stack_t; + +struct siginfo { + union { + struct { + int si_signo; + int si_code; + int si_errno; + union __sifields _sifields; + }; + int _si_pad[32]; + }; +}; + +typedef struct siginfo siginfo_t; + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct ucounts *ucounts; +}; + +enum alarmtimer_type { + ALARM_REALTIME = 0, + ALARM_BOOTTIME = 1, + ALARM_NUMTYPE = 2, + ALARM_REALTIME_FREEZER = 3, + ALARM_BOOTTIME_FREEZER = 4, +}; + +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + void (*function)(struct alarm *, ktime_t); + enum alarmtimer_type type; + int state; + void *data; +}; + +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + bool firing; + bool nanosleep; + struct task_struct *handling; +}; + +struct k_clock; + +struct k_itimer { + struct hlist_node list; + struct hlist_node ignored_list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_status; + bool it_sig_periodic; + s64 it_overrun; + s64 it_overrun_last; + unsigned int it_signal_seq; + unsigned int it_sigqueue_seq; + int it_sigev_notify; + enum pid_type it_pid_type; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue sigq; + rcuref_t rcuref; + long: 32; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +struct k_clock { + int (*clock_getres)(const clockid_t, struct timespec64 *); + int (*clock_set)(const clockid_t, const struct timespec64 *); + int (*clock_get_timespec)(const clockid_t, struct timespec64 *); + ktime_t (*clock_get_ktime)(const clockid_t); + int (*clock_adj)(const clockid_t, struct __kernel_timex *); + int (*timer_create)(struct k_itimer *); + int (*nsleep)(const clockid_t, int, const struct timespec64 *); + int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); + int (*timer_del)(struct k_itimer *); + void (*timer_get)(struct k_itimer *, struct itimerspec64 *); + void (*timer_rearm)(struct k_itimer *); + s64 (*timer_forward)(struct k_itimer *, ktime_t); + ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); + int (*timer_try_to_cancel)(struct k_itimer *); + void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); + void (*timer_wait_running)(struct k_itimer *); +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +enum { + TRACE_SIGNAL_DELIVERED = 0, + TRACE_SIGNAL_IGNORED = 1, + TRACE_SIGNAL_ALREADY_PENDING = 2, + TRACE_SIGNAL_OVERFLOW_FAIL = 3, + TRACE_SIGNAL_LOSE_INFO = 4, +}; + +struct trace_event_raw_signal_generate { + struct trace_entry ent; + int sig; + int errno; + int code; + char comm[16]; + pid_t pid; + int group; + int result; + char __data[0]; +}; + +struct trace_event_raw_signal_deliver { + struct trace_entry ent; + int sig; + int errno; + int code; + long unsigned int sa_handler; + long unsigned int sa_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_signal_generate {}; + +struct trace_event_data_offsets_signal_deliver {}; + +typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); + +typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); + +enum posix_timer_state { + POSIX_TIMER_DISARMED = 0, + POSIX_TIMER_ARMED = 1, + POSIX_TIMER_REQUEUE_PENDING = 2, +}; + +enum sig_handler { + HANDLER_CURRENT = 0, + HANDLER_SIG_DFL = 1, + HANDLER_EXIT = 2, +}; + +struct ktime_timestamps { + u64 mono; + u64 boot; + u64 real; +}; + +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t boot; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + long: 32; +}; + +struct system_counterval_t { + u64 cycles; + enum clocksource_ids cs_id; + bool use_nsecs; +}; + +struct tk_read_base { + struct clocksource *clock; + long: 32; + u64 mask; + u64 cycle_last; + u32 mult; + u32 shift; + u64 xtime_nsec; + ktime_t base; + u64 base_real; +}; + +struct timekeeper { + struct tk_read_base tkr_mono; + u64 xtime_sec; + long unsigned int ktime_sec; + long: 32; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + long: 32; + struct tk_read_base tkr_raw; + u64 raw_sec; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + struct timespec64 monotonic_to_boot; + u64 cycle_interval; + u64 xtime_interval; + s64 xtime_remainder; + u64 raw_interval; + ktime_t next_leap_ktime; + u64 ntp_tick; + s64 ntp_error; + u32 ntp_error_shift; + u32 ntp_err_mult; + u32 skip_second_overflow; + long: 32; +}; + +enum timekeeping_adv_mode { + TK_ADV_TICK = 0, + TK_ADV_FREQ = 1, +}; + +struct tk_data { + seqcount_raw_spinlock_t seq; + long: 32; + struct timekeeper timekeeper; + struct timekeeper shadow_timekeeper; + raw_spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct tk_fast { + seqcount_latch_t seq; + long: 32; + struct tk_read_base base[2]; +}; + +enum { + EVENT_TRIGGER_FL_PROBE = 1, +}; + +struct event_trigger_ops; + +struct event_command; + +struct event_trigger_data { + long unsigned int count; + int ref; + int flags; + struct event_trigger_ops *ops; + struct event_command *cmd_ops; + struct event_filter *filter; + char *filter_str; + void *private_data; + bool paused; + bool paused_tmp; + struct list_head list; + char *name; + struct list_head named_list; + struct event_trigger_data *named_data; +}; + +struct event_trigger_ops { + void (*trigger)(struct event_trigger_data *, struct trace_buffer *, void *, struct ring_buffer_event *); + int (*init)(struct event_trigger_data *); + void (*free)(struct event_trigger_data *); + int (*print)(struct seq_file *, struct event_trigger_data *); +}; + +struct event_command { + struct list_head list; + char *name; + enum event_trigger_type trigger_type; + int flags; + int (*parse)(struct event_command *, struct trace_event_file *, char *, char *, char *); + int (*reg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg_all)(struct trace_event_file *); + int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); + struct event_trigger_ops * (*get_trigger_ops)(char *, char *); +}; + +struct enable_trigger_data { + struct trace_event_file *file; + bool enable; + bool hist; +}; + +enum event_command_flags { + EVENT_CMD_FL_POST_TRIGGER = 1, + EVENT_CMD_FL_NEEDS_REC = 2, +}; + +typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *, struct bpf_tramp_run_ctx *); + +typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *, u64, struct bpf_tramp_run_ctx *); + +enum bpf_text_poke_type { + BPF_MOD_CALL = 0, + BPF_MOD_JUMP = 1, +}; + +struct trace_event_raw_mm_lru_insertion { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + enum lru_list lru; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_mm_lru_activate { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_lru_insertion {}; + +struct trace_event_data_offsets_mm_lru_activate {}; + +typedef void (*btf_trace_mm_lru_insertion)(void *, struct folio *); + +typedef void (*btf_trace_mm_lru_activate)(void *, struct folio *); + +struct cpu_fbatches { + local_lock_t lock; + struct folio_batch lru_add; + struct folio_batch lru_deactivate_file; + struct folio_batch lru_deactivate; + struct folio_batch lru_lazyfree; + struct folio_batch lru_activate; + local_lock_t lock_irq; + struct folio_batch lru_move_tail; +}; + +typedef void (*move_fn_t)(struct lruvec *, struct folio *); + +struct zpool_driver { + char *type; + struct module *owner; + atomic_t refcount; + struct list_head list; + void * (*create)(const char *, gfp_t); + void (*destroy)(void *); + bool malloc_support_movable; + int (*malloc)(void *, size_t, gfp_t, long unsigned int *); + void (*free)(void *, long unsigned int); + bool sleep_mapped; + void * (*map)(void *, long unsigned int, enum zpool_mapmode); + void (*unmap)(void *, long unsigned int); + u64 (*total_pages)(void *); +}; + +struct zpool { + struct zpool_driver *driver; + void *pool; +}; + +struct fs_error_report { + int error; + struct inode *inode; + struct super_block *sb; +}; + +typedef struct fsnotify_mark_connector *fsnotify_connp_t; + +struct mmp_struct { + __le32 mmp_magic; + __le32 mmp_seq; + __le64 mmp_time; + char mmp_nodename[64]; + char mmp_bdevname[32]; + __le16 mmp_check_interval; + __le16 mmp_pad1; + __le32 mmp_pad2[226]; + __le32 mmp_checksum; +}; + +enum jbd2_shrink_type { + JBD2_SHRINK_DESTROY = 0, + JBD2_SHRINK_BUSY_STOP = 1, + JBD2_SHRINK_BUSY_SKIP = 2, +}; + +struct getdents_callback { + struct dir_context ctx; + char *name; + long: 32; + u64 ino; + int found; + int sequence; +}; + +enum { + __PAGE_UNLOCK_BIT = 0, + PAGE_UNLOCK = 1, + __PAGE_UNLOCK_SEQ = 0, + __PAGE_START_WRITEBACK_BIT = 1, + PAGE_START_WRITEBACK = 2, + __PAGE_START_WRITEBACK_SEQ = 1, + __PAGE_END_WRITEBACK_BIT = 2, + PAGE_END_WRITEBACK = 4, + __PAGE_END_WRITEBACK_SEQ = 2, + __PAGE_SET_ORDERED_BIT = 3, + PAGE_SET_ORDERED = 8, + __PAGE_SET_ORDERED_SEQ = 3, +}; + +struct btrfs_iget_args { + u64 ino; + struct btrfs_root *root; + long: 32; +}; + +struct btrfs_rename_ctx { + u64 index; +}; + +struct data_reloc_warn { + struct btrfs_path path; + struct btrfs_fs_info *fs_info; + u64 extent_item_size; + u64 logical; + int mirror_num; + long: 32; +}; + +struct async_extent { + u64 start; + u64 ram_size; + u64 compressed_size; + struct folio **folios; + long unsigned int nr_folios; + int compress_type; + struct list_head list; + long: 32; +}; + +struct async_cow; + +struct async_chunk { + struct btrfs_inode *inode; + struct folio *locked_folio; + u64 start; + u64 end; + blk_opf_t write_flags; + struct list_head extents; + struct cgroup_subsys_state *blkcg_css; + struct btrfs_work work; + struct async_cow *async_cow; + long: 32; +}; + +struct async_cow { + atomic_t num_chunks; + long: 32; + struct async_chunk chunks[0]; +}; + +struct can_nocow_file_extent_args { + u64 start; + u64 end; + bool writeback_path; + bool strict; + bool free_path; + long: 32; + struct btrfs_file_extent file_extent; +}; + +struct btrfs_writepage_fixup { + struct folio *folio; + struct btrfs_inode *inode; + struct btrfs_work work; +}; + +struct dir_entry { + u64 ino; + u64 offset; + unsigned int type; + int name_len; +}; + +struct btrfs_delalloc_work { + struct inode *inode; + struct completion completion; + struct list_head list; + struct btrfs_work work; +}; + +struct btrfs_encoded_read_private { + wait_queue_head_t wait; + void *uring_ctx; + atomic_t pending; + blk_status_t status; +}; + +struct btrfs_swap_info { + u64 start; + u64 block_start; + u64 block_len; + u64 lowest_ppage; + u64 highest_ppage; + long unsigned int nr_pages; + int nr_extents; +}; + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[0]; +}; + +enum x509_akid_actions { + ACT_x509_akid_note_kid = 0, + ACT_x509_akid_note_name = 1, + ACT_x509_akid_note_serial = 2, + ACT_x509_extract_name_segment___2 = 3, + ACT_x509_note_OID___2 = 4, + NR__x509_akid_actions = 5, +}; + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = 65535, +}; + +struct mq_inflight { + struct block_device *part; + unsigned int inflight[2]; +}; + +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +struct blk_expired_data { + bool has_timedout_rq; + long unsigned int next; + long unsigned int timeout_start; +}; + +struct flush_busy_ctx_data { + struct blk_mq_hw_ctx *hctx; + struct list_head *list; +}; + +struct dispatch_rq_data { + struct blk_mq_hw_ctx *hctx; + struct request *rq; +}; + +enum prep_dispatch { + PREP_DISPATCH_OK = 0, + PREP_DISPATCH_NO_TAG = 1, + PREP_DISPATCH_NO_BUDGET = 2, +}; + +struct rq_iter_data { + struct blk_mq_hw_ctx *hctx; + bool has_rq; +}; + +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +struct rnd_state { + __u32 s1; + __u32 s2; + __u32 s3; + __u32 s4; +}; + +typedef struct { + FSE_CTable CTable[59]; + U32 scratchBuffer[41]; + unsigned int count[13]; + S16 norm[13]; +} HUF_CompressWeightsWksp; + +typedef struct { + HUF_CompressWeightsWksp wksp; + BYTE bitsToWeight[13]; + BYTE huffWeight[255]; +} HUF_WriteCTableWksp; + +struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +}; + +typedef struct nodeElt_s nodeElt; + +typedef struct { + U16 base; + U16 curr; +} rankPos; + +typedef nodeElt huffNodeTable[512]; + +typedef struct { + huffNodeTable huffNodeTbl; + rankPos rankPosition[192]; +} HUF_buildCTable_wksp_tables; + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + BYTE *startPtr; + BYTE *ptr; + BYTE *endPtr; +} HUF_CStream_t; + +typedef enum { + HUF_singleStream = 0, + HUF_fourStreams = 1, +} HUF_nbStreams_e; + +typedef struct { + unsigned int count[256]; + HUF_CElt CTable[257]; + union { + HUF_buildCTable_wksp_tables buildCTable_wksp; + HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[1024]; + } wksps; +} HUF_compress_tables_t; + +struct vga_device { + struct list_head list; + struct pci_dev *pdev; + unsigned int decodes; + unsigned int owns; + unsigned int locks; + unsigned int io_lock_cnt; + unsigned int mem_lock_cnt; + unsigned int io_norm_cnt; + unsigned int mem_norm_cnt; + bool bridge_has_one_vga; + bool is_firmware_default; + unsigned int (*set_decode)(struct pci_dev *, bool); +}; + +struct vga_arb_user_card { + struct pci_dev *pdev; + unsigned int mem_cnt; + unsigned int io_cnt; +}; + +struct vga_arb_private { + struct list_head list; + struct pci_dev *target; + struct vga_arb_user_card cards[16]; + spinlock_t lock; +}; + +struct clk_gate { + struct clk_hw hw; + void *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +struct tiocl_selection { + short unsigned int xs; + short unsigned int ys; + short unsigned int xe; + short unsigned int ye; + short unsigned int sel_mode; +}; + +struct vc_selection { + struct mutex lock; + struct vc_data *cons; + char *buffer; + unsigned int buf_len; + volatile int start; + int end; +}; + +struct phylib_stubs { + int (*hwtstamp_get)(struct phy_device *, struct kernel_hwtstamp_config *); + int (*hwtstamp_set)(struct phy_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +struct ep_device { + struct usb_endpoint_descriptor *desc; + struct usb_device *udev; + struct device dev; +}; + +enum ps2_disposition { + PS2_PROCESS = 0, + PS2_IGNORE = 1, + PS2_ERROR = 2, +}; + +struct ps2dev; + +typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8, unsigned int); + +typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8); + +struct ps2dev { + struct serio *serio; + struct mutex cmd_mutex; + wait_queue_head_t wait; + long unsigned int flags; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; + ps2_pre_receive_handler_t pre_receive_handler; + ps2_receive_handler_t receive_handler; +}; + +struct hid_debug_list { + struct { + union { + struct __kfifo kfifo; + char *type; + const char *const_type; + char (*rectype)[0]; + char *ptr; + const char *ptr_const; + }; + char buf[0]; + } hid_debug_fifo; + struct fasync_struct *fasync; + struct hid_device *hdev; + struct list_head node; + struct mutex read_mutex; +}; + +struct hid_usage_entry { + unsigned int page; + unsigned int usage; + const char *description; +}; + +enum { + TCA_STATS_UNSPEC = 0, + TCA_STATS_BASIC = 1, + TCA_STATS_RATE_EST = 2, + TCA_STATS_QUEUE = 3, + TCA_STATS_APP = 4, + TCA_STATS_RATE_EST64 = 5, + TCA_STATS_PAD = 6, + TCA_STATS_BASIC_HW = 7, + TCA_STATS_PKT64 = 8, + __TCA_STATS_MAX = 9, +}; + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; + long: 32; +}; + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +enum { + NETDEV_A_PAGE_POOL_STATS_INFO = 1, + NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW = 9, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER = 10, + NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY = 11, + NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL = 12, + NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE = 13, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED = 14, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL = 15, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING = 16, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL = 17, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT = 18, + __NETDEV_A_PAGE_POOL_STATS_MAX = 19, + NETDEV_A_PAGE_POOL_STATS_MAX = 18, +}; + +enum { + ETHTOOL_A_TS_STAT_UNSPEC = 0, + ETHTOOL_A_TS_STAT_TX_PKTS = 1, + ETHTOOL_A_TS_STAT_TX_LOST = 2, + ETHTOOL_A_TS_STAT_TX_ERR = 3, + __ETHTOOL_A_TS_STAT_CNT = 4, + ETHTOOL_A_TS_STAT_MAX = 3, +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct kernel_ethtool_ts_info ts_info; + struct ethtool_ts_stats stats; +}; + +struct nf_ct_tcp_flags { + __u8 flags; + __u8 mask; +}; + +enum nf_ct_tcp_action { + NFCT_TCP_IGNORE = 0, + NFCT_TCP_INVALID = 1, + NFCT_TCP_ACCEPT = 2, +}; + +enum tcp_bit_set { + TCP_SYN_SET = 0, + TCP_SYNACK_SET = 1, + TCP_FIN_SET = 2, + TCP_ACK_SET = 3, + TCP_RST_SET = 4, + TCP_NONE_SET = 5, +}; + +enum ctattr_protoinfo_tcp { + CTA_PROTOINFO_TCP_UNSPEC = 0, + CTA_PROTOINFO_TCP_STATE = 1, + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2, + CTA_PROTOINFO_TCP_WSCALE_REPLY = 3, + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4, + CTA_PROTOINFO_TCP_FLAGS_REPLY = 5, + __CTA_PROTOINFO_TCP_MAX = 6, +}; + +enum nft_inner_type { + NFT_INNER_UNSPEC = 0, + NFT_INNER_VXLAN = 1, + NFT_INNER_GENEVE = 2, +}; + +enum nft_inner_flags { + NFT_INNER_HDRSIZE = 1, + NFT_INNER_LL = 2, + NFT_INNER_NH = 4, + NFT_INNER_TH = 8, +}; + +enum nft_inner_attributes { + NFTA_INNER_UNSPEC = 0, + NFTA_INNER_NUM = 1, + NFTA_INNER_TYPE = 2, + NFTA_INNER_FLAGS = 3, + NFTA_INNER_HDRSIZE = 4, + NFTA_INNER_EXPR = 5, + __NFTA_INNER_MAX = 6, +}; + +enum nft_meta_keys { + NFT_META_LEN = 0, + NFT_META_PROTOCOL = 1, + NFT_META_PRIORITY = 2, + NFT_META_MARK = 3, + NFT_META_IIF = 4, + NFT_META_OIF = 5, + NFT_META_IIFNAME = 6, + NFT_META_OIFNAME = 7, + NFT_META_IFTYPE = 8, + NFT_META_OIFTYPE = 9, + NFT_META_SKUID = 10, + NFT_META_SKGID = 11, + NFT_META_NFTRACE = 12, + NFT_META_RTCLASSID = 13, + NFT_META_SECMARK = 14, + NFT_META_NFPROTO = 15, + NFT_META_L4PROTO = 16, + NFT_META_BRI_IIFNAME = 17, + NFT_META_BRI_OIFNAME = 18, + NFT_META_PKTTYPE = 19, + NFT_META_CPU = 20, + NFT_META_IIFGROUP = 21, + NFT_META_OIFGROUP = 22, + NFT_META_CGROUP = 23, + NFT_META_PRANDOM = 24, + NFT_META_SECPATH = 25, + NFT_META_IIFKIND = 26, + NFT_META_OIFKIND = 27, + NFT_META_BRI_IIFPVID = 28, + NFT_META_BRI_IIFVPROTO = 29, + NFT_META_TIME_NS = 30, + NFT_META_TIME_DAY = 31, + NFT_META_TIME_HOUR = 32, + NFT_META_SDIF = 33, + NFT_META_SDIFNAME = 34, + NFT_META_BRI_BROUTE = 35, + __NFT_META_IIFTYPE = 36, +}; + +struct nft_meta { + enum nft_meta_keys key: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +struct genevehdr { + u8 ver: 2; + u8 opt_len: 6; + u8 oam: 1; + u8 critical: 1; + u8 rsvd1: 6; + __be16 proto_type; + u8 vni[3]; + u8 rsvd2; + u8 options[0]; +}; + +struct __nft_expr { + const struct nft_expr_ops *ops; + long: 32; + union { + struct nft_payload payload; + struct nft_meta meta; + }; +}; + +enum { + NFT_INNER_EXPR_PAYLOAD = 0, + NFT_INNER_EXPR_META = 1, +}; + +struct nft_inner { + u8 flags; + u8 hdrsize; + u8 type; + u8 expr_type; + long: 32; + struct __nft_expr expr; +}; + +struct nft_expr_info { + const struct nft_expr_ops *ops; + const struct nlattr *attr; + struct nlattr *tb[17]; +}; + +struct sock_bh_locked { + struct sock *sock; + local_lock_t bh_lock; +}; + +struct ip_reply_arg { + struct kvec iov[1]; + int flags; + __wsum csum; + int csumoffset; + int bound_dev_if; + u8 tos; + kuid_t uid; +}; + +typedef u32 inet_ehashfn_t(const struct net *, const __be32, const __u16, const __be32, const __be16); + +struct bpf_tcp_iter_state { + struct tcp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; + long: 32; +}; + +struct bpf_iter__tcp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct sock_common *sk_common; + }; + uid_t uid; + long: 32; +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +struct bictcp { + u32 cnt; + u32 last_max_cwnd; + u32 last_cwnd; + u32 last_time; + u32 bic_origin_point; + u32 bic_K; + u32 delay_min; + u32 epoch_start; + u32 ack_cnt; + u32 tcp_cwnd; + u16 unused; + u8 sample_cnt; + u8 found; + u32 round_start; + u32 end_seq; + u32 last_ack; + u32 curr_rtt; +}; + +typedef __be64 fdt64_t; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +enum { + ASSUME_PERFECT = 255, + ASSUME_VALID_DTB = 1, + ASSUME_VALID_INPUT = 2, + ASSUME_LATEST = 4, + ASSUME_NO_ROLLBACK = 8, + ASSUME_LIBFDT_ORDER = 16, + ASSUME_LIBFDT_FLAWLESS = 32, +}; + +struct cpu_stop_done { + atomic_t nr_todo; + int ret; + struct completion completion; +}; + +struct cpu_stopper { + struct task_struct *thread; + raw_spinlock_t lock; + bool enabled; + struct list_head works; + struct cpu_stop_work stop_work; + long unsigned int caller; + cpu_stop_fn_t fn; +}; + +enum multi_stop_state { + MULTI_STOP_NONE = 0, + MULTI_STOP_PREPARE = 1, + MULTI_STOP_DISABLE_IRQ = 2, + MULTI_STOP_RUN = 3, + MULTI_STOP_EXIT = 4, +}; + +struct multi_stop_data { + cpu_stop_fn_t fn; + void *data; + unsigned int num_threads; + const struct cpumask *active_cpus; + enum multi_stop_state state; + atomic_t thread_ack; +}; + +struct eprobe_trace_entry_head { + struct trace_entry ent; +}; + +struct trace_eprobe { + const char *event_system; + const char *event_name; + char *filter_str; + struct trace_event_call *event; + struct dyn_event devent; + struct trace_probe tp; +}; + +struct eprobe_data { + struct trace_event_file *file; + struct trace_eprobe *ep; +}; + +struct bpf_iter_num { + __u64 __opaque[1]; +}; + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + long: 32; + u64 session_id; + u64 seq_num; + bool done_stop; + long: 32; + u8 target_private[0]; +}; + +typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_loop)(u32, void *, void *, u64); + +struct bpf_iter_num_kern { + int cur; + int end; +}; + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0: 1; + __u64 cap_bit0_is_deprecated: 1; + __u64 cap_user_rdpmc: 1; + __u64 cap_user_time: 1; + __u64 cap_user_time_zero: 1; + __u64 cap_user_time_short: 1; + __u64 cap_____res: 58; + }; + }; + __u16 pmc_width; + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + __u64 time_zero; + __u32 size; + __u32 __reserved_1; + __u64 time_cycles; + __u64 time_mask; + __u8 __reserved[928]; + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_AUX_OUTPUT_HW_ID = 21, + PERF_RECORD_MAX = 22, +}; + +struct perf_buffer { + refcount_t refcount; + struct callback_head callback_head; + struct work_struct work; + int page_order; + int nr_pages; + int overwrite; + int paused; + atomic_t poll; + local_t head; + unsigned int nest; + local_t events; + local_t wakeup; + local_t lost; + long int watermark; + long int aux_watermark; + spinlock_t event_lock; + struct list_head event_list; + atomic_t mmap_count; + long unsigned int mmap_locked; + struct user_struct *mmap_user; + struct mutex aux_mutex; + long int aux_head; + unsigned int aux_nest; + long int aux_wakeup; + long unsigned int aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + atomic_t aux_mmap_count; + long unsigned int aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + int aux_in_pause_resume; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct list_lru_memcg { + struct callback_head rcu; + struct list_lru_one node[0]; +}; + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; + swp_entry_t *slots_ret; + int n_ret; +}; + +struct dentry_stat_t { + long int nr_dentry; + long int nr_unused; + long int age_limit; + long int want_pages; + long int nr_negative; + long int dummy; +}; + +struct external_name { + union { + atomic_t count; + struct callback_head head; + } u; + unsigned char name[0]; +}; + +enum d_walk_ret { + D_WALK_CONTINUE = 0, + D_WALK_QUIT = 1, + D_WALK_NORETRY = 2, + D_WALK_SKIP = 3, +}; + +struct check_mount { + struct vfsmount *mnt; + unsigned int mounted; +}; + +struct select_data { + struct dentry *start; + union { + long int found; + struct dentry *victim; + }; + struct list_head dispose; +}; + +struct elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + long unsigned int pr_flag; + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct memelfnote { + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +struct membuf { + void *p; + size_t left; +}; + +struct user_regset; + +typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); + +typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); + +typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); + +typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); + +struct user_regset { + user_regset_get2_fn *regset_get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +struct elf_thread_core_info { + struct elf_thread_core_info *next; + struct task_struct *task; + struct elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info { + struct elf_thread_core_info *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct __fat_dirent { + long int d_ino; + __kernel_off_t d_off; + short unsigned int d_reclen; + char d_name[256]; +}; + +struct msdos_dir_entry { + __u8 name[11]; + __u8 attr; + __u8 lcase; + __u8 ctime_cs; + __le16 ctime; + __le16 cdate; + __le16 adate; + __le16 starthi; + __le16 time; + __le16 date; + __le16 start; + __le32 size; +}; + +struct msdos_dir_slot { + __u8 id; + __u8 name0_4[10]; + __u8 attr; + __u8 reserved; + __u8 alias_checksum; + __u8 name5_10[12]; + __le16 start; + __u8 name11_12[4]; +}; + +struct fat_slot_info { + loff_t i_pos; + loff_t slot_off; + int nr_slots; + struct msdos_dir_entry *de; + struct buffer_head *bh; + long: 32; +}; + +enum { + PARSE_INVALID = 1, + PARSE_NOT_LONGNAME = 2, + PARSE_EOF = 3, +}; + +struct fat_ioctl_filldir_callback { + struct dir_context ctx; + void *dirent; + int result; + const char *longname; + int long_len; + const char *shortname; + int short_len; +}; + +struct btrfs_eb_write_context { + struct writeback_control *wbc; + struct extent_buffer *eb; + struct btrfs_block_group *zoned_bg; +}; + +struct btrfs_bio_ctrl { + struct btrfs_bio *bbio; + enum btrfs_compression_type compress_type; + u32 len_to_oe_boundary; + blk_opf_t opf; + btrfs_bio_end_io_t end_io_func; + struct writeback_control *wbc; + long unsigned int submit_bitmap; +}; + +enum blake2b_lengths { + BLAKE2B_BLOCK_SIZE = 128, + BLAKE2B_HASH_SIZE = 64, + BLAKE2B_KEY_SIZE = 64, + BLAKE2B_160_HASH_SIZE = 20, + BLAKE2B_256_HASH_SIZE = 32, + BLAKE2B_384_HASH_SIZE = 48, + BLAKE2B_512_HASH_SIZE = 64, +}; + +struct blake2b_state { + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[128]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2b_iv { + BLAKE2B_IV0 = 7640891576956012808ULL, + BLAKE2B_IV1 = 13503953896175478587ULL, + BLAKE2B_IV2 = 4354685564936845355ULL, + BLAKE2B_IV3 = 11912009170470909681ULL, + BLAKE2B_IV4 = 5840696475078001361ULL, + BLAKE2B_IV5 = 11170449401992604703ULL, + BLAKE2B_IV6 = 2270897969802886507ULL, + BLAKE2B_IV7 = 6620516959819538809ULL, +}; + +typedef void (*blake2b_compress_t)(struct blake2b_state *, const u8 *, size_t, u32); + +struct blake2b_tfm_ctx { + u8 key[64]; + unsigned int keylen; +}; + +struct fat_boot_sector { + __u8 ignored[3]; + __u8 system_id[8]; + __u8 sector_size[2]; + __u8 sec_per_clus; + __le16 reserved; + __u8 fats; + __u8 dir_entries[2]; + __u8 sectors[2]; + __u8 media; + __le16 fat_length; + __le16 secs_track; + __le16 heads; + __le32 hidden; + __le32 total_sect; + union { + struct { + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat16; + struct { + __le32 length; + __le16 flags; + __u8 version[2]; + __le32 root_cluster; + __le16 info_sector; + __le16 backup_boot; + __le16 reserved2[6]; + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat32; + }; +}; + +enum msdos_sys_ind { + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 133, + WIN98_EXTENDED_PARTITION = 15, + LINUX_DATA_PARTITION = 131, + LINUX_LVM_PARTITION = 142, + LINUX_RAID_PARTITION = 253, + SOLARIS_X86_PARTITION = 130, + NEW_SOLARIS_X86_PARTITION = 191, + DM6_AUX1PARTITION = 81, + DM6_AUX3PARTITION = 83, + DM6_PARTITION = 84, + EZD_PARTITION = 85, + FREEBSD_PARTITION = 165, + OPENBSD_PARTITION = 166, + NETBSD_PARTITION = 169, + BSDI_PARTITION = 183, + MINIX_PARTITION = 129, + UNIXWARE_PARTITION = 99, +}; + +struct bd_holder_disk { + struct list_head list; + struct kobject *holder_dir; + int refcnt; +}; + +struct io_xattr { + struct file *file; + struct kernel_xattr_ctx ctx; + struct filename *filename; +}; + +struct sg_append_table { + struct sg_table sgt; + struct scatterlist *prv; + unsigned int total_nents; +}; + +typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); + +typedef void sg_free_fn(struct scatterlist *, unsigned int); + +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +struct timer_rand_state { + long unsigned int last_time; + long int last_delta; + long int last_delta2; +}; + +enum chacha_constants { + CHACHA_CONSTANT_EXPA = 1634760805, + CHACHA_CONSTANT_ND_3 = 857760878, + CHACHA_CONSTANT_2_BY = 2036477234, + CHACHA_CONSTANT_TE_K = 1797285236, +}; + +enum { + CRNG_EMPTY = 0, + CRNG_EARLY = 1, + CRNG_READY = 2, +}; + +enum { + CRNG_RESEED_START_INTERVAL = 1000, + CRNG_RESEED_INTERVAL = 60000, +}; + +struct crng { + u8 key[32]; + long unsigned int generation; + local_lock_t lock; +}; + +struct batch_u8 { + u8 entropy[96]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u16 { + u16 entropy[48]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u32 { + u32 entropy[24]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u64 { + u64 entropy[12]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +enum { + POOL_BITS = 256, + POOL_READY_BITS = 256, + POOL_EARLY_BITS = 128, +}; + +struct fast_pool { + long unsigned int pool[4]; + long unsigned int last; + unsigned int count; + struct timer_list mix; +}; + +struct entropy_timer_state { + long unsigned int entropy; + struct timer_list timer; + atomic_t samples; + unsigned int samples_per_bit; +}; + +enum { + NUM_TRIAL_SAMPLES = 8192, + MAX_SAMPLES_PER_BIT = 66, +}; + +enum { + MIX_INFLIGHT = 2147483648, +}; + +enum scsi_timeouts { + SCSI_DEFAULT_EH_TIMEOUT = 10000, +}; + +struct async_scan_data { + struct list_head list; + struct Scsi_Host *shost; + struct completion prev_finished; +}; + +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +enum { + AHCI_MAX_PORTS = 32, + AHCI_MAX_SG = 168, + AHCI_DMA_BOUNDARY = 4294967295, + AHCI_MAX_CMDS = 32, + AHCI_CMD_SZ = 32, + AHCI_CMD_SLOT_SZ = 1024, + AHCI_RX_FIS_SZ = 256, + AHCI_CMD_TBL_CDB = 64, + AHCI_CMD_TBL_HDR_SZ = 128, + AHCI_CMD_TBL_SZ = 2816, + AHCI_CMD_TBL_AR_SZ = 90112, + AHCI_PORT_PRIV_DMA_SZ = 91392, + AHCI_PORT_PRIV_FBS_DMA_SZ = 95232, + AHCI_IRQ_ON_SG = 2147483648, + AHCI_CMD_ATAPI = 32, + AHCI_CMD_WRITE = 64, + AHCI_CMD_PREFETCH = 128, + AHCI_CMD_RESET = 256, + AHCI_CMD_CLR_BUSY = 1024, + RX_FIS_PIO_SETUP = 32, + RX_FIS_D2H_REG = 64, + RX_FIS_SDB = 88, + RX_FIS_UNK = 96, + HOST_CAP = 0, + HOST_CTL = 4, + HOST_IRQ_STAT = 8, + HOST_PORTS_IMPL = 12, + HOST_VERSION = 16, + HOST_EM_LOC = 28, + HOST_EM_CTL = 32, + HOST_CAP2 = 36, + HOST_RESET = 1, + HOST_IRQ_EN = 2, + HOST_MRSM = 4, + HOST_AHCI_EN = 2147483648, + HOST_CAP_SXS = 32, + HOST_CAP_EMS = 64, + HOST_CAP_CCC = 128, + HOST_CAP_PART = 8192, + HOST_CAP_SSC = 16384, + HOST_CAP_PIO_MULTI = 32768, + HOST_CAP_FBS = 65536, + HOST_CAP_PMP = 131072, + HOST_CAP_ONLY = 262144, + HOST_CAP_CLO = 16777216, + HOST_CAP_LED = 33554432, + HOST_CAP_ALPM = 67108864, + HOST_CAP_SSS = 134217728, + HOST_CAP_MPS = 268435456, + HOST_CAP_SNTF = 536870912, + HOST_CAP_NCQ = 1073741824, + HOST_CAP_64 = 2147483648, + HOST_CAP2_BOH = 1, + HOST_CAP2_NVMHCI = 2, + HOST_CAP2_APST = 4, + HOST_CAP2_SDS = 8, + HOST_CAP2_SADM = 16, + HOST_CAP2_DESO = 32, + PORT_LST_ADDR = 0, + PORT_LST_ADDR_HI = 4, + PORT_FIS_ADDR = 8, + PORT_FIS_ADDR_HI = 12, + PORT_IRQ_STAT = 16, + PORT_IRQ_MASK = 20, + PORT_CMD = 24, + PORT_TFDATA = 32, + PORT_SIG = 36, + PORT_CMD_ISSUE = 56, + PORT_SCR_STAT = 40, + PORT_SCR_CTL = 44, + PORT_SCR_ERR = 48, + PORT_SCR_ACT = 52, + PORT_SCR_NTF = 60, + PORT_FBS = 64, + PORT_DEVSLP = 68, + PORT_IRQ_COLD_PRES = 2147483648, + PORT_IRQ_TF_ERR = 1073741824, + PORT_IRQ_HBUS_ERR = 536870912, + PORT_IRQ_HBUS_DATA_ERR = 268435456, + PORT_IRQ_IF_ERR = 134217728, + PORT_IRQ_IF_NONFATAL = 67108864, + PORT_IRQ_OVERFLOW = 16777216, + PORT_IRQ_BAD_PMP = 8388608, + PORT_IRQ_PHYRDY = 4194304, + PORT_IRQ_DMPS = 128, + PORT_IRQ_CONNECT = 64, + PORT_IRQ_SG_DONE = 32, + PORT_IRQ_UNK_FIS = 16, + PORT_IRQ_SDB_FIS = 8, + PORT_IRQ_DMAS_FIS = 4, + PORT_IRQ_PIOS_FIS = 2, + PORT_IRQ_D2H_REG_FIS = 1, + PORT_IRQ_FREEZE = 683671632, + PORT_IRQ_ERROR = 2025848912, + DEF_PORT_IRQ = 2025848959, + PORT_CMD_ASP = 134217728, + PORT_CMD_ALPE = 67108864, + PORT_CMD_ATAPI = 16777216, + PORT_CMD_FBSCP = 4194304, + PORT_CMD_ESP = 2097152, + PORT_CMD_CPD = 1048576, + PORT_CMD_MPSP = 524288, + PORT_CMD_HPCP = 262144, + PORT_CMD_PMP = 131072, + PORT_CMD_LIST_ON = 32768, + PORT_CMD_FIS_ON = 16384, + PORT_CMD_FIS_RX = 16, + PORT_CMD_CLO = 8, + PORT_CMD_POWER_ON = 4, + PORT_CMD_SPIN_UP = 2, + PORT_CMD_START = 1, + PORT_CMD_ICC_MASK = 4026531840, + PORT_CMD_ICC_ACTIVE = 268435456, + PORT_CMD_ICC_PARTIAL = 536870912, + PORT_CMD_ICC_SLUMBER = 1610612736, + PORT_CMD_CAP = 8126464, + PORT_FBS_DWE_OFFSET = 16, + PORT_FBS_ADO_OFFSET = 12, + PORT_FBS_DEV_OFFSET = 8, + PORT_FBS_DEV_MASK = 3840, + PORT_FBS_SDE = 4, + PORT_FBS_DEC = 2, + PORT_FBS_EN = 1, + PORT_DEVSLP_DM_OFFSET = 25, + PORT_DEVSLP_DM_MASK = 503316480, + PORT_DEVSLP_DITO_OFFSET = 15, + PORT_DEVSLP_MDAT_OFFSET = 10, + PORT_DEVSLP_DETO_OFFSET = 2, + PORT_DEVSLP_DSP = 2, + PORT_DEVSLP_ADSE = 1, + AHCI_HFLAG_NO_NCQ = 1, + AHCI_HFLAG_IGN_IRQ_IF_ERR = 2, + AHCI_HFLAG_IGN_SERR_INTERNAL = 4, + AHCI_HFLAG_32BIT_ONLY = 8, + AHCI_HFLAG_MV_PATA = 16, + AHCI_HFLAG_NO_MSI = 32, + AHCI_HFLAG_NO_PMP = 64, + AHCI_HFLAG_SECT255 = 256, + AHCI_HFLAG_YES_NCQ = 512, + AHCI_HFLAG_NO_SUSPEND = 1024, + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = 2048, + AHCI_HFLAG_NO_SNTF = 4096, + AHCI_HFLAG_NO_FPDMA_AA = 8192, + AHCI_HFLAG_YES_FBS = 16384, + AHCI_HFLAG_DELAY_ENGINE = 32768, + AHCI_HFLAG_NO_DEVSLP = 131072, + AHCI_HFLAG_NO_FBS = 262144, + AHCI_HFLAG_MULTI_MSI = 1048576, + AHCI_HFLAG_WAKE_BEFORE_STOP = 4194304, + AHCI_HFLAG_YES_ALPM = 8388608, + AHCI_HFLAG_NO_WRITE_TO_RO = 16777216, + AHCI_HFLAG_SUSPEND_PHYS = 33554432, + AHCI_HFLAG_NO_SXS = 67108864, + AHCI_HFLAG_43BIT_ONLY = 134217728, + AHCI_HFLAG_INTEL_PCS_QUIRK = 268435456, + AHCI_FLAG_COMMON = 393346, + ICH_MAP = 144, + PCS_6 = 146, + PCS_7 = 148, + EM_MAX_SLOTS = 15, + EM_MAX_RETRY = 5, + EM_CTL_RST = 512, + EM_CTL_TM = 256, + EM_CTL_MR = 1, + EM_CTL_ALHD = 67108864, + EM_CTL_XMT = 33554432, + EM_CTL_SMB = 16777216, + EM_CTL_SGPIO = 524288, + EM_CTL_SES = 262144, + EM_CTL_SAFTE = 131072, + EM_CTL_LED = 65536, + EM_MSG_TYPE_LED = 1, + EM_MSG_TYPE_SAFTE = 2, + EM_MSG_TYPE_SES2 = 4, + EM_MSG_TYPE_SGPIO = 8, +}; + +struct ahci_cmd_hdr { + __le32 opts; + __le32 status; + __le32 tbl_addr; + __le32 tbl_addr_hi; + __le32 reserved[4]; +}; + +struct ahci_em_priv { + enum sw_activity blink_policy; + struct timer_list timer; + long unsigned int saved_activity; + long unsigned int activity; + long unsigned int led_state; + struct ata_link *link; +}; + +struct ahci_port_priv { + struct ata_link *active_link; + struct ahci_cmd_hdr *cmd_slot; + dma_addr_t cmd_slot_dma; + void *cmd_tbl; + dma_addr_t cmd_tbl_dma; + void *rx_fis; + dma_addr_t rx_fis_dma; + unsigned int ncq_saw_d2h: 1; + unsigned int ncq_saw_dmas: 1; + unsigned int ncq_saw_sdb: 1; + spinlock_t lock; + u32 intr_mask; + bool fbs_supported; + bool fbs_enabled; + int fbs_last_dev; + struct ahci_em_priv em_priv[15]; + char *irq_desc; +}; + +struct ahci_host_priv { + unsigned int flags; + u32 mask_port_map; + void *mmio; + u32 cap; + u32 cap2; + u32 version; + u32 port_map; + u32 saved_cap; + u32 saved_cap2; + u32 saved_port_map; + u32 saved_port_cap[32]; + u32 em_loc; + u32 em_buf_sz; + u32 em_msg_type; + u32 remapped_nvme; + bool got_runtime_pm; + unsigned int n_clks; + struct clk_bulk_data *clks; + unsigned int f_rsts; + struct reset_control *rsts; + struct regulator **target_pwrs; + struct regulator *ahci_regulator; + struct regulator *phy_regulator; + struct phy **phys; + unsigned int nports; + void *plat_data; + unsigned int irq; + void (*start_engine)(struct ata_port *); + int (*stop_engine)(struct ata_port *); + irqreturn_t (*irq_handler)(int, void *); + int (*get_irq_vector)(struct ata_host *, int); +}; + +enum { + AHCI_PCI_BAR_STA2X11 = 0, + AHCI_PCI_BAR_CAVIUM = 0, + AHCI_PCI_BAR_LOONGSON = 0, + AHCI_PCI_BAR_ENMOTUS = 2, + AHCI_PCI_BAR_CAVIUM_GEN5 = 4, + AHCI_PCI_BAR_STANDARD = 5, +}; + +enum board_ids { + board_ahci = 0, + board_ahci_43bit_dma = 1, + board_ahci_ign_iferr = 2, + board_ahci_no_debounce_delay = 3, + board_ahci_no_msi = 4, + board_ahci_pcs_quirk = 5, + board_ahci_pcs_quirk_no_devslp = 6, + board_ahci_pcs_quirk_no_sntf = 7, + board_ahci_yes_fbs = 8, + board_ahci_al = 9, + board_ahci_avn = 10, + board_ahci_mcp65 = 11, + board_ahci_mcp77 = 12, + board_ahci_mcp89 = 13, + board_ahci_mv = 14, + board_ahci_sb600 = 15, + board_ahci_sb700 = 16, + board_ahci_vt8251 = 17, + board_ahci_mcp_linux = 11, + board_ahci_mcp67 = 11, + board_ahci_mcp73 = 11, + board_ahci_mcp79 = 12, +}; + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf = 1, + e1000_mng_mode_pt = 2, + e1000_mng_mode_ipmi = 3, + e1000_mng_mode_host_if_only = 4, +}; + +struct usb_phy_roothub { + struct phy *phy; + struct list_head list; +}; + +struct swoc_info { + __u8 rev; + __u8 reserved[8]; + __u16 LinuxSKU; + __u16 LinuxVer; + __u8 reserved2[47]; +} __attribute__((packed)); + +struct alias_prop { + struct list_head link; + const char *alias; + struct device_node *np; + int id; + char stem[0]; +}; + +struct sk_buff_fclones { + struct sk_buff skb1; + struct sk_buff skb2; + refcount_t fclone_ref; + long: 32; +}; + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; + __u32 frag_off; +}; + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +enum skb_drop_reason_subsys { + SKB_DROP_REASON_SUBSYS_CORE = 0, + SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE = 1, + SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR = 2, + SKB_DROP_REASON_SUBSYS_OPENVSWITCH = 3, + SKB_DROP_REASON_SUBSYS_NUM = 4, +}; + +struct drop_reason_list { + const char * const *reasons; + size_t n_reasons; +}; + +struct ts_state { + unsigned int offset; + char cb[48]; +}; + +struct ts_config; + +struct ts_ops { + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +struct ts_config { + struct ts_ops *ops; + int flags; + unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); + void (*finish)(struct ts_config *, struct ts_state *); +}; + +struct page_frag_1k { + void *va; + u16 offset; + bool pfmemalloc; +}; + +struct napi_alloc_cache { + local_lock_t bh_lock; + struct page_frag_cache page; + struct page_frag_1k page_small; + unsigned int skb_count; + void *skb_cache[64]; +}; + +struct skb_free_array { + unsigned int skb_count; + void *skb_array[16]; +}; + +typedef int (*sendmsg_func)(struct sock *, struct msghdr *); + +struct sch_frag_data { + long unsigned int dst; + struct qdisc_skb_cb cb; + __be16 inner_protocol; + u16 vlan_tci; + __be16 vlan_proto; + unsigned int l2_len; + u8 l2_data[18]; + int (*xmit)(struct sk_buff *); +}; + +enum ethtool_module_fw_flash_status { + ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS = 2, + ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED = 3, + ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR = 4, +}; + +enum { + SFF8024_ID_UNK = 0, + SFF8024_ID_SFF_8472 = 2, + SFF8024_ID_SFP = 3, + SFF8024_ID_DWDM_SFP = 11, + SFF8024_ID_QSFP_8438 = 12, + SFF8024_ID_QSFP_8436_8636 = 13, + SFF8024_ID_QSFP28_8636 = 17, + SFF8024_ID_QSFP_DD = 24, + SFF8024_ID_OSFP = 25, + SFF8024_ID_DSFP = 27, + SFF8024_ID_QSFP_PLUS_CMIS = 30, + SFF8024_ID_SFP_DD_CMIS = 31, + SFF8024_ID_SFP_PLUS_CMIS = 32, + SFF8024_ENCODING_UNSPEC = 0, + SFF8024_ENCODING_8B10B = 1, + SFF8024_ENCODING_4B5B = 2, + SFF8024_ENCODING_NRZ = 3, + SFF8024_ENCODING_8472_MANCHESTER = 4, + SFF8024_ENCODING_8472_SONET = 5, + SFF8024_ENCODING_8472_64B66B = 6, + SFF8024_ENCODING_8436_MANCHESTER = 6, + SFF8024_ENCODING_8436_SONET = 4, + SFF8024_ENCODING_8436_64B66B = 5, + SFF8024_ENCODING_256B257B = 7, + SFF8024_ENCODING_PAM4 = 8, + SFF8024_CONNECTOR_UNSPEC = 0, + SFF8024_CONNECTOR_SC = 1, + SFF8024_CONNECTOR_FIBERJACK = 6, + SFF8024_CONNECTOR_LC = 7, + SFF8024_CONNECTOR_MT_RJ = 8, + SFF8024_CONNECTOR_MU = 9, + SFF8024_CONNECTOR_SG = 10, + SFF8024_CONNECTOR_OPTICAL_PIGTAIL = 11, + SFF8024_CONNECTOR_MPO_1X12 = 12, + SFF8024_CONNECTOR_MPO_2X16 = 13, + SFF8024_CONNECTOR_HSSDC_II = 32, + SFF8024_CONNECTOR_COPPER_PIGTAIL = 33, + SFF8024_CONNECTOR_RJ45 = 34, + SFF8024_CONNECTOR_NOSEPARATE = 35, + SFF8024_CONNECTOR_MXC_2X16 = 36, + SFF8024_ECC_UNSPEC = 0, + SFF8024_ECC_100G_25GAUI_C2M_AOC = 1, + SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 2, + SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 3, + SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 4, + SFF8024_ECC_100GBASE_SR10 = 5, + SFF8024_ECC_100GBASE_CR4 = 11, + SFF8024_ECC_25GBASE_CR_S = 12, + SFF8024_ECC_25GBASE_CR_N = 13, + SFF8024_ECC_10GBASE_T_SFI = 22, + SFF8024_ECC_10GBASE_T_SR = 28, + SFF8024_ECC_5GBASE_T = 29, + SFF8024_ECC_2_5GBASE_T = 30, +}; + +enum { + SFP_PHYS_ID = 0, + SFP_PHYS_EXT_ID = 1, + SFP_PHYS_EXT_ID_SFP = 4, + SFP_CONNECTOR = 2, + SFP_COMPLIANCE = 3, + SFP_ENCODING = 11, + SFP_BR_NOMINAL = 12, + SFP_RATE_ID = 13, + SFF_RID_8079 = 1, + SFF_RID_8431_RX_ONLY = 2, + SFF_RID_8431_TX_ONLY = 4, + SFF_RID_8431 = 6, + SFF_RID_10G8G = 14, + SFP_LINK_LEN_SM_KM = 14, + SFP_LINK_LEN_SM_100M = 15, + SFP_LINK_LEN_50UM_OM2_10M = 16, + SFP_LINK_LEN_62_5UM_OM1_10M = 17, + SFP_LINK_LEN_COPPER_1M = 18, + SFP_LINK_LEN_50UM_OM4_10M = 18, + SFP_LINK_LEN_50UM_OM3_10M = 19, + SFP_VENDOR_NAME = 20, + SFP_VENDOR_OUI = 37, + SFP_VENDOR_PN = 40, + SFP_VENDOR_REV = 56, + SFP_OPTICAL_WAVELENGTH_MSB = 60, + SFP_OPTICAL_WAVELENGTH_LSB = 61, + SFP_CABLE_SPEC = 60, + SFP_CC_BASE = 63, + SFP_OPTIONS = 64, + SFP_OPTIONS_HIGH_POWER_LEVEL = 8192, + SFP_OPTIONS_PAGING_A2 = 4096, + SFP_OPTIONS_RETIMER = 2048, + SFP_OPTIONS_COOLED_XCVR = 1024, + SFP_OPTIONS_POWER_DECL = 512, + SFP_OPTIONS_RX_LINEAR_OUT = 256, + SFP_OPTIONS_RX_DECISION_THRESH = 128, + SFP_OPTIONS_TUNABLE_TX = 64, + SFP_OPTIONS_RATE_SELECT = 32, + SFP_OPTIONS_TX_DISABLE = 16, + SFP_OPTIONS_TX_FAULT = 8, + SFP_OPTIONS_LOS_INVERTED = 4, + SFP_OPTIONS_LOS_NORMAL = 2, + SFP_BR_MAX = 66, + SFP_BR_MIN = 67, + SFP_VENDOR_SN = 68, + SFP_DATECODE = 84, + SFP_DIAGMON = 92, + SFP_DIAGMON_DDM = 64, + SFP_DIAGMON_INT_CAL = 32, + SFP_DIAGMON_EXT_CAL = 16, + SFP_DIAGMON_RXPWR_AVG = 8, + SFP_DIAGMON_ADDRMODE = 4, + SFP_ENHOPTS = 93, + SFP_ENHOPTS_ALARMWARN = 128, + SFP_ENHOPTS_SOFT_TX_DISABLE = 64, + SFP_ENHOPTS_SOFT_TX_FAULT = 32, + SFP_ENHOPTS_SOFT_RX_LOS = 16, + SFP_ENHOPTS_SOFT_RATE_SELECT = 8, + SFP_ENHOPTS_APP_SELECT_SFF8079 = 4, + SFP_ENHOPTS_SOFT_RATE_SFF8431 = 2, + SFP_SFF8472_COMPLIANCE = 94, + SFP_SFF8472_COMPLIANCE_NONE = 0, + SFP_SFF8472_COMPLIANCE_REV9_3 = 1, + SFP_SFF8472_COMPLIANCE_REV9_5 = 2, + SFP_SFF8472_COMPLIANCE_REV10_2 = 3, + SFP_SFF8472_COMPLIANCE_REV10_4 = 4, + SFP_SFF8472_COMPLIANCE_REV11_0 = 5, + SFP_SFF8472_COMPLIANCE_REV11_3 = 6, + SFP_SFF8472_COMPLIANCE_REV11_4 = 7, + SFP_SFF8472_COMPLIANCE_REV12_0 = 8, + SFP_CC_EXT = 95, +}; + +struct ethtool_module_fw_flash { + struct list_head list; + netdevice_tracker dev_tracker; + struct work_struct work; + struct ethtool_cmis_fw_update_params fw_update; +}; + +struct module_reply_data { + struct ethnl_reply_data base; + struct ethtool_module_power_mode_params power; +}; + +struct masq_dev_work { + struct work_struct work; + struct net *net; + netns_tracker ns_tracker; + union nf_inet_addr addr; + int ifindex; + int (*iter)(struct nf_conn *, void *); +}; + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER = 0, + AUDIT_XT_OP_REPLACE = 1, + AUDIT_XT_OP_UNREGISTER = 2, + AUDIT_NFT_OP_TABLE_REGISTER = 3, + AUDIT_NFT_OP_TABLE_UNREGISTER = 4, + AUDIT_NFT_OP_CHAIN_REGISTER = 5, + AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, + AUDIT_NFT_OP_RULE_REGISTER = 7, + AUDIT_NFT_OP_RULE_UNREGISTER = 8, + AUDIT_NFT_OP_SET_REGISTER = 9, + AUDIT_NFT_OP_SET_UNREGISTER = 10, + AUDIT_NFT_OP_SETELEM_REGISTER = 11, + AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, + AUDIT_NFT_OP_GEN_REGISTER = 13, + AUDIT_NFT_OP_OBJ_REGISTER = 14, + AUDIT_NFT_OP_OBJ_UNREGISTER = 15, + AUDIT_NFT_OP_OBJ_RESET = 16, + AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, + AUDIT_NFT_OP_SETELEM_RESET = 19, + AUDIT_NFT_OP_RULE_RESET = 20, + AUDIT_NFT_OP_INVALID = 21, +}; + +struct xt_entry_match { + union { + struct { + __u16 match_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 match_size; + struct xt_match *match; + } kernel; + __u16 match_size; + } u; + unsigned char data[0]; +}; + +struct xt_error_target { + struct xt_entry_target target; + char errorname[30]; +}; + +struct xt_counters_info { + char name[32]; + unsigned int num_counters; + long: 32; + struct xt_counters counters[0]; +}; + +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char *mem; +}; + +struct xt_template { + struct list_head list; + int (*table_init)(struct net *); + struct module *me; + char name[32]; +}; + +struct xt_pernet { + struct list_head tables[11]; +}; + +struct xt_af { + struct mutex mutex; + struct list_head match; + struct list_head target; +}; + +struct nf_mttg_trav { + struct list_head *head; + struct list_head *curr; + uint8_t class; +}; + +enum { + MTTG_TRAV_INIT = 0, + MTTG_TRAV_NFP_UNSPEC = 1, + MTTG_TRAV_NFP_SPEC = 2, + MTTG_TRAV_DONE = 3, +}; + +struct fib6_gc_args { + int timeout; + int more; +}; + +enum fib6_walk_state { + FWS_L = 0, + FWS_R = 1, + FWS_C = 2, + FWS_U = 3, +}; + +struct fib6_walker { + struct list_head lh; + struct fib6_node *root; + struct fib6_node *node; + struct fib6_info *leaf; + enum fib6_walk_state state; + unsigned int skip; + unsigned int count; + unsigned int skip_in_node; + int (*func)(struct fib6_walker *); + void *args; +}; + +typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); + +struct fib6_entry_notifier_info { + struct fib_notifier_info info; + struct fib6_info *rt; + unsigned int nsiblings; +}; + +struct ipv6_route_iter { + struct seq_net_private p; + struct fib6_walker w; + loff_t skip; + struct fib6_table *tbl; + int sernum; +}; + +struct bpf_iter__ipv6_route { + union { + struct bpf_iter_meta *meta; + }; + union { + struct fib6_info *rt; + }; +}; + +struct rt6_rtnl_dump_arg { + struct sk_buff *skb; + struct netlink_callback *cb; + struct net *net; + struct fib_dump_filter filter; +}; + +struct fib6_cleaner { + struct fib6_walker w; + struct net *net; + int (*func)(struct fib6_info *, void *); + int sernum; + void *arg; + bool skip_notify; +}; + +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; + struct netlink_ext_ack *extack; +}; + +struct fib6_nh_pcpu_arg { + struct fib6_info *from; + const struct fib6_table *table; +}; + +struct lookup_args { + int offset; + const struct in6_addr *addr; +}; + +struct word_at_a_time { + const long unsigned int high_bits; + const long unsigned int low_bits; +}; + +enum { + STRICT = 0, + EMULATED = 1, + LEGACY = 2, + STD2008 = 3, + RELAXED = 4, +}; + +struct clone_args { + __u64 flags; + __u64 pidfd; + __u64 child_tid; + __u64 parent_tid; + __u64 exit_signal; + __u64 stack; + __u64 stack_size; + __u64 tls; + __u64 set_tid; + __u64 set_tid_size; + __u64 cgroup; +}; + +struct trace_event_raw_task_newtask { + struct trace_entry ent; + pid_t pid; + char comm[16]; + long unsigned int clone_flags; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_task_rename { + struct trace_entry ent; + pid_t pid; + char oldcomm[16]; + char newcomm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_data_offsets_task_newtask {}; + +struct trace_event_data_offsets_task_rename {}; + +typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int); + +typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); + +enum { + IRQTF_RUNTHREAD = 0, + IRQTF_WARNED = 1, + IRQTF_AFFINITY = 2, + IRQTF_FORCED_THREAD = 3, + IRQTF_READY = 4, +}; + +struct posix_clock_desc { + struct file *fp; + struct posix_clock *clk; +}; + +struct ctx_switch_entry { + struct trace_entry ent; + unsigned int prev_pid; + unsigned int next_pid; + unsigned int next_cpu; + unsigned char prev_prio; + unsigned char prev_state; + unsigned char next_prio; + unsigned char next_state; +}; + +struct userstack_entry { + struct trace_entry ent; + unsigned int tgid; + long unsigned int caller[8]; +}; + +struct hwlat_entry { + struct trace_entry ent; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + unsigned int nmi_count; + unsigned int seqnum; + unsigned int count; + long: 32; +}; + +struct osnoise_entry { + struct trace_entry ent; + u64 noise; + u64 runtime; + u64 max_sample; + unsigned int hw_count; + unsigned int nmi_count; + unsigned int irq_count; + unsigned int softirq_count; + unsigned int thread_count; + long: 32; +}; + +struct timerlat_entry { + struct trace_entry ent; + unsigned int seqnum; + int context; + u64 timer_latency; +}; + +struct trace_mark { + long long unsigned int val; + char sym; + long: 32; +}; + +struct latch_tree_root { + seqcount_latch_t seq; + struct rb_root tree[2]; +}; + +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *, struct latch_tree_node *); + int (*comp)(void *, struct latch_tree_node *); +}; + +struct bpf_binary_header { + u32 size; + long: 32; + u8 image[0]; +}; + +typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); + +enum page_size_enum { + __PAGE_SIZE = 4096, +}; + +struct bpf_prog_pack { + struct list_head list; + void *ptr; + long unsigned int bitmap[0]; +}; + +struct bpf_prog_dummy { + struct bpf_prog prog; +}; + +typedef u64 (*btf_bpf_user_rnd_u32)(); + +typedef u64 (*btf_bpf_get_raw_cpu_id)(); + +struct xdp_cpumap_stats { + unsigned int redirect; + unsigned int pass; + unsigned int drop; +}; + +struct _bpf_dtab_netdev { + struct net_device *dev; +}; + +struct trace_event_raw_xdp_exception { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_xdp_bulk_tx { + struct trace_entry ent; + int ifindex; + u32 act; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_xdp_redirect_template { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + int err; + int to_ifindex; + u32 map_id; + int map_index; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_kthread { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int sched; + unsigned int xdp_pass; + unsigned int xdp_drop; + unsigned int xdp_redirect; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_enqueue { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int to_cpu; + char __data[0]; +}; + +struct trace_event_raw_xdp_devmap_xmit { + struct trace_entry ent; + int from_ifindex; + u32 act; + int to_ifindex; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_mem_disconnect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + char __data[0]; +}; + +struct trace_event_raw_mem_connect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + const struct xdp_rxq_info *rxq; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_mem_return_failed { + struct trace_entry ent; + const struct page *page; + u32 mem_id; + u32 mem_type; + char __data[0]; +}; + +struct trace_event_raw_bpf_xdp_link_attach_failed { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_xdp_exception {}; + +struct trace_event_data_offsets_xdp_bulk_tx {}; + +struct trace_event_data_offsets_xdp_redirect_template {}; + +struct trace_event_data_offsets_xdp_cpumap_kthread {}; + +struct trace_event_data_offsets_xdp_cpumap_enqueue {}; + +struct trace_event_data_offsets_xdp_devmap_xmit {}; + +struct trace_event_data_offsets_mem_disconnect {}; + +struct trace_event_data_offsets_mem_connect {}; + +struct trace_event_data_offsets_mem_return_failed {}; + +struct trace_event_data_offsets_bpf_xdp_link_attach_failed { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); + +typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); + +typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); + +typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); + +typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); + +typedef void (*btf_trace_bpf_xdp_link_attach_failed)(void *, const char *); + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = -1, + RSEQ_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = 1, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, +}; + +struct rseq_cs { + __u32 version; + __u32 flags; + __u64 start_ip; + __u64 post_commit_offset; + __u64 abort_ip; +}; + +struct trace_event_raw_rseq_update { + struct trace_entry ent; + s32 cpu_id; + s32 node_id; + s32 mm_cid; + char __data[0]; +}; + +struct trace_event_raw_rseq_ip_fixup { + struct trace_entry ent; + long unsigned int regs_ip; + long unsigned int start_ip; + long unsigned int post_commit_offset; + long unsigned int abort_ip; + char __data[0]; +}; + +struct trace_event_data_offsets_rseq_update {}; + +struct trace_event_data_offsets_rseq_ip_fixup {}; + +typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); + +typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +struct reserve_mem_table { + char name[16]; + phys_addr_t start; + phys_addr_t size; +}; + +typedef struct { + spinlock_t *lock; +} class_spinlock_t; + +typedef struct fd class_fd_pos_t; + +struct anon_vma_name { + struct kref kref; + char name[0]; +}; + +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + +enum procmap_query_flags { + PROCMAP_QUERY_VMA_READABLE = 1, + PROCMAP_QUERY_VMA_WRITABLE = 2, + PROCMAP_QUERY_VMA_EXECUTABLE = 4, + PROCMAP_QUERY_VMA_SHARED = 8, + PROCMAP_QUERY_COVERING_OR_NEXT_VMA = 16, + PROCMAP_QUERY_FILE_BACKED_VMA = 32, +}; + +struct procmap_query { + __u64 size; + __u64 query_flags; + __u64 query_addr; + __u64 vma_start; + __u64 vma_end; + __u64 vma_flags; + __u64 vma_page_size; + __u64 vma_offset; + __u64 inode; + __u32 dev_major; + __u32 dev_minor; + __u32 vma_name_size; + __u32 build_id_size; + __u64 vma_name_addr; + __u64 build_id_addr; +}; + +struct proc_maps_private { + struct inode *inode; + struct task_struct *task; + struct mm_struct *mm; + struct vma_iterator iter; +}; + +struct mem_size_stats { + long unsigned int resident; + long unsigned int shared_clean; + long unsigned int shared_dirty; + long unsigned int private_clean; + long unsigned int private_dirty; + long unsigned int referenced; + long unsigned int anonymous; + long unsigned int lazyfree; + long unsigned int anonymous_thp; + long unsigned int shmem_thp; + long unsigned int file_thp; + long unsigned int swap; + long unsigned int shared_hugetlb; + long unsigned int private_hugetlb; + long unsigned int ksm; + long: 32; + u64 pss; + u64 pss_anon; + u64 pss_file; + u64 pss_shmem; + u64 pss_dirty; + u64 pss_locked; + u64 swap_pss; +}; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON = 2, + CLEAR_REFS_MAPPED = 3, + CLEAR_REFS_SOFT_DIRTY = 4, + CLEAR_REFS_MM_HIWATER_RSS = 5, + CLEAR_REFS_LAST = 6, +}; + +struct clear_refs_private { + enum clear_refs_types type; +}; + +typedef struct { + u64 pme; +} pagemap_entry_t; + +struct pagemapread { + int pos; + int len; + pagemap_entry_t *buffer; + bool show_pfn; +}; + +struct pagemap_scan_private { + struct pm_scan_arg arg; + long unsigned int masks_of_interest; + long unsigned int cur_vma_category; + struct page_region *vec_buf; + long unsigned int vec_buf_len; + long unsigned int vec_buf_index; + long unsigned int found_pages; + struct page_region *vec_out; + long: 32; +}; + +typedef unsigned int cycles_t; + +enum criteria { + CR_POWER2_ALIGNED = 0, + CR_GOAL_LEN_FAST = 1, + CR_BEST_AVAIL_LEN = 2, + CR_GOAL_LEN_SLOW = 3, + CR_ANY_FREE = 4, + EXT4_MB_NUM_CRS = 5, +}; + +struct ext4_free_data { + struct list_head efd_list; + struct rb_node efd_node; + ext4_group_t efd_group; + ext4_grpblk_t efd_start_cluster; + ext4_grpblk_t efd_count; + tid_t efd_tid; +}; + +struct ext4_prealloc_space { + union { + struct rb_node inode_node; + struct list_head lg_list; + } pa_node; + struct list_head pa_group_list; + union { + struct list_head pa_tmp_list; + struct callback_head pa_rcu; + } u; + spinlock_t pa_lock; + atomic_t pa_count; + unsigned int pa_deleted; + ext4_fsblk_t pa_pstart; + ext4_lblk_t pa_lstart; + ext4_grpblk_t pa_len; + ext4_grpblk_t pa_free; + short unsigned int pa_type; + union { + rwlock_t *inode_lock; + spinlock_t *lg_lock; + } pa_node_lock; + struct inode *pa_inode; +}; + +enum { + MB_INODE_PA = 0, + MB_GROUP_PA = 1, +}; + +struct ext4_free_extent { + ext4_lblk_t fe_logical; + ext4_grpblk_t fe_start; + ext4_group_t fe_group; + ext4_grpblk_t fe_len; +}; + +struct ext4_allocation_context { + struct inode *ac_inode; + struct super_block *ac_sb; + struct ext4_free_extent ac_o_ex; + struct ext4_free_extent ac_g_ex; + struct ext4_free_extent ac_b_ex; + struct ext4_free_extent ac_f_ex; + ext4_grpblk_t ac_orig_goal_len; + __u32 ac_flags; + __u32 ac_groups_linear_remaining; + __u16 ac_groups_scanned; + __u16 ac_found; + __u16 ac_cX_found[5]; + __u16 ac_tail; + __u16 ac_buddy; + __u8 ac_status; + __u8 ac_criteria; + __u8 ac_2order; + __u8 ac_op; + struct folio *ac_bitmap_folio; + struct folio *ac_buddy_folio; + struct ext4_prealloc_space *ac_pa; + struct ext4_locality_group *ac_lg; +}; + +struct ext4_buddy { + struct folio *bd_buddy_folio; + void *bd_buddy; + struct folio *bd_bitmap_folio; + void *bd_bitmap; + struct ext4_group_info *bd_info; + struct super_block *bd_sb; + __u16 bd_blkbits; + ext4_group_t bd_group; +}; + +typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); + +struct sg { + struct ext4_group_info info; + ext4_grpblk_t counters[18]; +}; + +struct fuse_kstatfs { + uint64_t blocks; + uint64_t bfree; + uint64_t bavail; + uint64_t files; + uint64_t ffree; + uint32_t bsize; + uint32_t namelen; + uint32_t frsize; + uint32_t padding; + uint32_t spare[6]; +}; + +struct fuse_statfs_out { + struct fuse_kstatfs st; +}; + +struct fuse_init_in { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint32_t flags2; + uint32_t unused[11]; +}; + +struct fuse_init_out { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint16_t max_background; + uint16_t congestion_threshold; + uint32_t max_write; + uint32_t time_gran; + uint16_t max_pages; + uint16_t map_alignment; + uint32_t flags2; + uint32_t max_stack_depth; + uint32_t unused[6]; +}; + +struct fuse_syncfs_in { + uint64_t padding; +}; + +enum fuse_dax_mode { + FUSE_DAX_INODE_DEFAULT = 0, + FUSE_DAX_ALWAYS = 1, + FUSE_DAX_NEVER = 2, + FUSE_DAX_INODE_USER = 3, +}; + +struct fuse_fs_context { + int fd; + struct file *file; + unsigned int rootmode; + kuid_t user_id; + kgid_t group_id; + bool is_bdev: 1; + bool fd_present: 1; + bool rootmode_present: 1; + bool user_id_present: 1; + bool group_id_present: 1; + bool default_permissions: 1; + bool allow_other: 1; + bool destroy: 1; + bool no_control: 1; + bool no_force_umount: 1; + bool legacy_opts_show: 1; + enum fuse_dax_mode dax_mode; + unsigned int max_read; + unsigned int blksize; + const char *subtype; + struct dax_device *dax_dev; + void **fudptr; +}; + +enum { + OPT_SOURCE = 0, + OPT_SUBTYPE = 1, + OPT_FD = 2, + OPT_ROOTMODE = 3, + OPT_USER_ID = 4, + OPT_GROUP_ID = 5, + OPT_DEFAULT_PERMISSIONS = 6, + OPT_ALLOW_OTHER = 7, + OPT_MAX_READ = 8, + OPT_BLKSIZE = 9, + OPT_ERR = 10, +}; + +struct fuse_inode_handle { + u64 nodeid; + u32 generation; + long: 32; +}; + +struct fuse_init_args { + struct fuse_args args; + struct fuse_init_in in; + struct fuse_init_out out; +}; + +struct scrub_sector_verification; + +struct scrub_stripe { + struct scrub_ctx *sctx; + struct btrfs_block_group *bg; + struct page *pages[16]; + struct scrub_sector_verification *sectors; + struct btrfs_device *dev; + u64 logical; + u64 physical; + u16 mirror_num; + u16 nr_sectors; + u16 nr_data_extents; + u16 nr_meta_extents; + atomic_t pending_io; + wait_queue_head_t io_wait; + wait_queue_head_t repair_wait; + long unsigned int state; + long unsigned int extent_sector_bitmap; + long unsigned int init_error_bitmap; + unsigned int init_nr_io_errors; + unsigned int init_nr_csum_errors; + unsigned int init_nr_meta_errors; + long unsigned int error_bitmap; + long unsigned int io_error_bitmap; + long unsigned int csum_error_bitmap; + long unsigned int meta_error_bitmap; + long unsigned int write_error_bitmap; + spinlock_t write_error_lock; + u8 *csums; + struct work_struct work; +}; + +struct scrub_ctx { + struct scrub_stripe stripes[128]; + struct scrub_stripe *raid56_data_stripes; + struct btrfs_fs_info *fs_info; + struct btrfs_path extent_path; + struct btrfs_path csum_path; + int first_free; + int cur_stripe; + atomic_t cancel_req; + int readonly; + ktime_t throttle_deadline; + u64 throttle_sent; + int is_dev_replace; + long: 32; + u64 write_pointer; + struct mutex wr_lock; + struct btrfs_device *wr_tgtdev; + long: 32; + struct btrfs_scrub_progress stat; + spinlock_t stat_lock; + refcount_t refs; +}; + +struct scrub_sector_verification { + bool is_metadata; + long: 32; + union { + u8 *csum; + u64 generation; + }; +}; + +enum scrub_stripe_flags { + SCRUB_STRIPE_FLAG_INITIALIZED = 0, + SCRUB_STRIPE_FLAG_REPAIR_DONE = 1, + SCRUB_STRIPE_FLAG_NO_REPORT = 2, +}; + +struct scrub_warning { + struct btrfs_path *path; + long: 32; + u64 extent_item_size; + const char *errstr; + long: 32; + u64 physical; + u64 logical; + struct btrfs_device *dev; + long: 32; +}; + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + unsigned int partial; + u8 buf[144]; + long: 32; +}; + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + +struct subsys_private; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; + struct subsys_private *sp; +}; + +struct uuidcmp { + const char *uuid; + int len; +}; + +enum io_uring_register_pbuf_ring_flags { + IOU_PBUF_RING_MMAP = 1, + IOU_PBUF_RING_INC = 2, +}; + +struct io_uring_buf_reg { + __u64 ring_addr; + __u32 ring_entries; + __u16 bgid; + __u16 flags; + __u64 resv[3]; +}; + +struct io_uring_buf_status { + __u32 buf_group; + __u32 head; + __u32 resv[8]; +}; + +enum { + KBUF_MODE_EXPAND = 1, + KBUF_MODE_FREE = 2, +}; + +struct buf_sel_arg { + struct iovec *iovs; + size_t out_len; + size_t max_len; + short unsigned int nr_iovs; + short unsigned int mode; +}; + +struct io_provide_buf { + struct file *file; + long: 32; + __u64 addr; + __u32 len; + __u32 bgid; + __u32 nbufs; + __u16 bid; +}; + +struct io_timeout_data { + struct io_kiocb *req; + long: 32; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; + u32 flags; +}; + +struct io_timeout { + struct file *file; + u32 off; + u32 target_seq; + u32 repeats; + struct list_head list; + struct io_kiocb *head; + struct io_kiocb *prev; +}; + +struct io_timeout_rem { + struct file *file; + long: 32; + u64 addr; + struct timespec64 ts; + u32 flags; + bool ltimeout; +}; + +enum string_size_units { + STRING_UNITS_10 = 0, + STRING_UNITS_2 = 1, + STRING_UNITS_MASK = 1, + STRING_UNITS_NO_SPACE = 1073741824, + STRING_UNITS_NO_BYTES = 2147483648, +}; + +struct strarray { + char **array; + size_t n; +}; + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty = 0, + assoc_array_walk_found_terminal_node = 1, + assoc_array_walk_found_wrong_shortcut = 2, +}; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + long unsigned int sc_segments; + long unsigned int dissimilarity; + } wrong_shortcut; +}; + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +typedef struct { + size_t bitContainer; + unsigned int bitPos; + char *startPtr; + char *ptr; + char *endPtr; +} BIT_CStream_t; + +typedef struct { + U32 offset; + U32 checksum; +} ldmEntry_t; + +typedef struct { + const BYTE *split; + U32 hash; + U32 checksum; + ldmEntry_t *bucket; +} ldmMatchCandidate_t; + +typedef struct { + ZSTD_window_t window; + ldmEntry_t *hashTable; + U32 loadedDictEnd; + BYTE *bucketOffsets; + size_t splitIndices[64]; + ldmMatchCandidate_t matchCandidates[64]; +} ldmState_t; + +typedef struct { + ZSTD_paramSwitch_e enableLdm; + U32 hashLog; + U32 bucketSizeLog; + U32 minMatchLength; + U32 hashRateLog; + U32 windowLog; +} ldmParams_t; + +typedef size_t (*ZSTD_blockCompressor)(ZSTD_matchState_t *, seqStore_t *, U32 *, const void *, size_t); + +typedef struct { + U64 rolling; + U64 stopMask; +} ldmRollingHashState_t; + +typedef unsigned int FSE_DTable; + +typedef struct { + size_t state; + const void *table; +} FSE_DState_t; + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; + +typedef struct { + short unsigned int newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; + +typedef struct { + short int ncount[256]; + FSE_DTable dtable[0]; +} FSE_DecompressWksp; + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +struct probe { + struct probe *next; + dev_t dev; + long unsigned int range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; +}; + +struct kobj_map { + struct probe *probes[255]; + struct mutex *lock; +}; + +typedef __u32 __virtio32; + +typedef __u64 __virtio64; + +struct virtio_blk_geometry { + __virtio16 cylinders; + __u8 heads; + __u8 sectors; +}; + +struct virtio_blk_zoned_characteristics { + __virtio32 zone_sectors; + __virtio32 max_open_zones; + __virtio32 max_active_zones; + __virtio32 max_append_sectors; + __virtio32 write_granularity; + __u8 model; + __u8 unused2[3]; +}; + +struct virtio_blk_config { + __virtio64 capacity; + __virtio32 size_max; + __virtio32 seg_max; + struct virtio_blk_geometry geometry; + __virtio32 blk_size; + __u8 physical_block_exp; + __u8 alignment_offset; + __virtio16 min_io_size; + __virtio32 opt_io_size; + __u8 wce; + __u8 unused; + __virtio16 num_queues; + __virtio32 max_discard_sectors; + __virtio32 max_discard_seg; + __virtio32 discard_sector_alignment; + __virtio32 max_write_zeroes_sectors; + __virtio32 max_write_zeroes_seg; + __u8 write_zeroes_may_unmap; + __u8 unused1[3]; + __virtio32 max_secure_erase_sectors; + __virtio32 max_secure_erase_seg; + __virtio32 secure_erase_sector_alignment; + struct virtio_blk_zoned_characteristics zoned; +}; + +struct virtio_blk_outhdr { + __virtio32 type; + __virtio32 ioprio; + __virtio64 sector; +}; + +struct virtio_blk_discard_write_zeroes { + __le64 sector; + __le32 num_sectors; + __le32 flags; +}; + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[16]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct virtio_blk { + struct mutex vdev_mutex; + struct virtio_device *vdev; + struct gendisk *disk; + struct blk_mq_tag_set tag_set; + struct work_struct config_work; + int index; + int num_vqs; + int io_queues[3]; + struct virtio_blk_vq *vqs; + unsigned int zone_sectors; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + union { + u8 status; + struct { + __virtio64 sector; + u8 status; + long: 32; + } zone_append; + } in_hdr; + size_t in_hdr_len; + struct sg_table sg_table; + struct scatterlist sg[0]; +}; + +enum ata_prot_flags { + ATA_PROT_FLAG_PIO = 1, + ATA_PROT_FLAG_DMA = 2, + ATA_PROT_FLAG_NCQ = 4, + ATA_PROT_FLAG_ATAPI = 8, + ATA_PROT_UNKNOWN = 255, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = 1, + ATA_PROT_DMA = 2, + ATA_PROT_NCQ_NODATA = 4, + ATA_PROT_NCQ = 6, + ATAPI_PROT_NODATA = 8, + ATAPI_PROT_PIO = 9, + ATAPI_PROT_DMA = 10, +}; + +enum ata_lpm_hints { + ATA_LPM_EMPTY = 1, + ATA_LPM_HIPM = 2, + ATA_LPM_WAKE_ONLY = 4, +}; + +enum { + ATA_READID_POSTRESET = 1, + ATA_DNXFER_PIO = 0, + ATA_DNXFER_DMA = 1, + ATA_DNXFER_40C = 2, + ATA_DNXFER_FORCE_PIO = 3, + ATA_DNXFER_FORCE_PIO0 = 4, + ATA_DNXFER_QUIET = -2147483648, +}; + +enum { + ATA_EH_SPDN_NCQ_OFF = 1, + ATA_EH_SPDN_SPEED_DOWN = 2, + ATA_EH_SPDN_FALLBACK_TO_PIO = 4, + ATA_EH_SPDN_KEEP_ERRORS = 8, + ATA_EFLAG_IS_IO = 1, + ATA_EFLAG_DUBIOUS_XFER = 2, + ATA_EFLAG_OLD_ER = -2147483648, + ATA_ECAT_NONE = 0, + ATA_ECAT_ATA_BUS = 1, + ATA_ECAT_TOUT_HSM = 2, + ATA_ECAT_UNK_DEV = 3, + ATA_ECAT_DUBIOUS_NONE = 4, + ATA_ECAT_DUBIOUS_ATA_BUS = 5, + ATA_ECAT_DUBIOUS_TOUT_HSM = 6, + ATA_ECAT_DUBIOUS_UNK_DEV = 7, + ATA_ECAT_NR = 8, + ATA_EH_CMD_DFL_TIMEOUT = 5000, + ATA_EH_RESET_COOL_DOWN = 5000, + ATA_EH_PRERESET_TIMEOUT = 10000, + ATA_EH_FASTDRAIN_INTERVAL = 3000, + ATA_EH_UA_TRIES = 5, + ATA_EH_PROBE_TRIAL_INTERVAL = 60000, + ATA_EH_PROBE_TRIALS = 2, +}; + +struct ata_eh_cmd_timeout_ent { + const u8 *commands; + const unsigned int *timeouts; +}; + +struct speed_down_verdict_arg { + u64 since; + int xfer_ok; + int nr_errors[8]; + long: 32; +}; + +enum rc6_mode { + RC6_MODE_0 = 0, + RC6_MODE_6A = 1, + RC6_MODE_UNKNOWN = 2, +}; + +enum rc6_state { + STATE_INACTIVE___6 = 0, + STATE_PREFIX_SPACE = 1, + STATE_HEADER_BIT_START___2 = 2, + STATE_HEADER_BIT_END___2 = 3, + STATE_TOGGLE_START = 4, + STATE_TOGGLE_END = 5, + STATE_BODY_BIT_START___2 = 6, + STATE_BODY_BIT_END___2 = 7, + STATE_FINISHED___3 = 8, +}; + +struct md_setup_args { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; + +struct dmabuf_token { + __u32 token_start; + __u32 token_count; +}; + +struct linger { + int l_onoff; + int l_linger; +}; + +enum { + SOCK_WAKE_IO = 0, + SOCK_WAKE_WAITD = 1, + SOCK_WAKE_SPACE = 2, + SOCK_WAKE_URG = 3, +}; + +struct so_timestamping { + int flags; + int bind_phc; +}; + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = 1, + SOF_TXTIME_REPORT_ERRORS = 2, + SOF_TXTIME_FLAGS_LAST = 2, + SOF_TXTIME_FLAGS_MASK = 3, +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, + ETHTOOL_A_PAUSE_STAT_PAD = 1, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, + __ETHTOOL_A_PAUSE_STAT_CNT = 4, + ETHTOOL_A_PAUSE_STAT_MAX = 3, +}; + +struct pause_req_info { + struct ethnl_req_info base; + enum ethtool_mac_stats_src src; +}; + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + long: 32; + struct ethtool_pause_stats pausestat; +}; + +enum nft_last_attributes { + NFTA_LAST_UNSPEC = 0, + NFTA_LAST_SET = 1, + NFTA_LAST_MSECS = 2, + NFTA_LAST_PAD = 3, + __NFTA_LAST_MAX = 4, +}; + +struct nft_last { + long unsigned int jiffies; + unsigned int set; +}; + +struct nft_last_priv { + struct nft_last *last; +}; + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct unix_edge { + struct unix_sock *predecessor; + struct unix_sock *successor; + struct list_head vertex_entry; + struct list_head stack_entry; +}; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + u32 consumed; +}; + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +struct bpf_unix_iter_state { + struct seq_net_private p; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct bpf_iter__unix { + union { + struct bpf_iter_meta *meta; + }; + union { + struct unix_sock *unix_sk; + }; + uid_t uid; + long: 32; +}; + +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; + +struct mips_vdso_image; + +struct mips_abi { + int (* const setup_frame)(void *, struct ksignal *, struct pt_regs *, sigset_t *); + int (* const setup_rt_frame)(void *, struct ksignal *, struct pt_regs *, sigset_t *); + const long unsigned int restart; + unsigned int off_sc_fpregs; + unsigned int off_sc_fpc_csr; + unsigned int off_sc_used_math; + struct mips_vdso_image *vdso; +}; + +struct mips_vdso_image { + void *data; + long unsigned int size; + long unsigned int off_sigreturn; + long unsigned int off_rt_sigreturn; + struct vm_special_mapping mapping; +}; + +struct mips_frame_info { + void *func; + long unsigned int func_size; + int frame_size; + int pc_offset; +}; + +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = -1, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; + +struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +}; + +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +enum { + IRQ_STARTUP_NORMAL = 0, + IRQ_STARTUP_MANAGED = 1, + IRQ_STARTUP_ABORT = 2, +}; + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source; + struct module *target; +}; + +enum kernel_load_data_id { + LOADING_UNKNOWN = 0, + LOADING_FIRMWARE = 1, + LOADING_MODULE = 2, + LOADING_KEXEC_IMAGE = 3, + LOADING_KEXEC_INITRAMFS = 4, + LOADING_POLICY = 5, + LOADING_X509_CERTIFICATE = 6, + LOADING_MAX_ID = 7, +}; + +struct load_info { + const char *name; + struct module *mod; + Elf32_Ehdr *hdr; + long unsigned int len; + Elf32_Shdr *sechdrs; + char *secstrings; + char *strtab; + long unsigned int symoffs; + long unsigned int stroffs; + long unsigned int init_typeoffs; + long unsigned int core_typeoffs; + bool sig_ok; + long unsigned int mod_kallsyms_init_off; + struct { + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; + } index; +}; + +enum mod_license { + NOT_GPL_ONLY = 0, + GPL_ONLY = 1, +}; + +struct find_symbol_arg { + const char *name; + bool gplok; + bool warn; + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +enum fail_dup_mod_reason { + FAIL_DUP_MOD_BECOMING = 0, + FAIL_DUP_MOD_LOAD = 1, +}; + +struct mod_tree_root { + struct latch_tree_root root; + long unsigned int addr_min; + long unsigned int addr_max; +}; + +struct trace_event_raw_module_load { + struct trace_entry ent; + unsigned int taints; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_free { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_refcnt { + struct trace_entry ent; + long unsigned int ip; + int refcnt; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_request { + struct trace_entry ent; + long unsigned int ip; + bool wait; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_module_load { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_refcnt { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_request { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_module_load)(void *, struct module *); + +typedef void (*btf_trace_module_free)(void *, struct module *); + +typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int); + +struct symsearch { + const struct kernel_symbol *start; + const struct kernel_symbol *stop; + const s32 *crcs; + enum mod_license license; +}; + +struct mod_initfree { + struct llist_node node; + void *init_text; + void *init_data; + void *init_rodata; +}; + +struct idempotent { + const void *cookie; + struct hlist_node entry; + struct completion complete; + int ret; +}; + +struct trace_dynamic_info { + u16 len; + u16 offset; +}; + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE = 2, + DYNEVENT_TYPE_NONE = 3, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +struct synth_field_desc { + const char *type; + const char *name; +}; + +struct synth_trace_event; + +struct synth_event; + +struct synth_event_trace_state { + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int cur_field; + unsigned int n_u64; + bool disabled; + bool add_next; + bool add_name; +}; + +union trace_synth_field { + u8 as_u8; + u16 as_u16; + u32 as_u32; + u64 as_u64; + struct trace_dynamic_info as_dynamic; +}; + +struct synth_trace_event { + struct trace_entry ent; + union trace_synth_field fields[0]; +}; + +struct synth_field; + +struct synth_event { + struct dyn_event devent; + int ref; + char *name; + struct synth_field **fields; + unsigned int n_fields; + struct synth_field **dynamic_fields; + unsigned int n_dynamic_fields; + unsigned int n_u64; + struct trace_event_class class; + struct trace_event_call call; + struct tracepoint *tp; + struct module *mod; +}; + +typedef int (*dynevent_check_arg_fn_t)(void *); + +struct dynevent_arg { + const char *str; + char separator; +}; + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; + char separator; +}; + +struct synth_field { + char *type; + char *name; + size_t size; + unsigned int offset; + unsigned int field_pos; + bool is_signed; + bool is_string; + bool is_dynamic; + bool is_stack; +}; + +enum { + SYNTH_ERR_BAD_NAME = 0, + SYNTH_ERR_INVALID_CMD = 1, + SYNTH_ERR_INVALID_DYN_CMD = 2, + SYNTH_ERR_EVENT_EXISTS = 3, + SYNTH_ERR_TOO_MANY_FIELDS = 4, + SYNTH_ERR_INCOMPLETE_TYPE = 5, + SYNTH_ERR_INVALID_TYPE = 6, + SYNTH_ERR_INVALID_FIELD = 7, + SYNTH_ERR_INVALID_ARRAY_SPEC = 8, +}; + +struct bpf_iter__bpf_map_elem { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + void *value; + }; +}; + +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +struct pcpu_group_info { + int nr_units; + long unsigned int base_offset; + unsigned int *cpu_map; +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[0]; +}; + +typedef int pcpu_fc_cpu_to_node_fn_t(int); + +typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); + +struct trace_event_raw_percpu_alloc_percpu { + struct trace_entry ent; + long unsigned int call_site; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + void *base_addr; + int off; + void *ptr; + size_t bytes_alloc; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_percpu_free_percpu { + struct trace_entry ent; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu_fail { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + char __data[0]; +}; + +struct trace_event_raw_percpu_create_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_raw_percpu_destroy_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_data_offsets_percpu_alloc_percpu {}; + +struct trace_event_data_offsets_percpu_free_percpu {}; + +struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; + +struct trace_event_data_offsets_percpu_create_chunk {}; + +struct trace_event_data_offsets_percpu_destroy_chunk {}; + +typedef void (*btf_trace_percpu_alloc_percpu)(void *, long unsigned int, bool, bool, size_t, size_t, void *, int, void *, size_t, gfp_t); + +typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *); + +typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); + +typedef void (*btf_trace_percpu_create_chunk)(void *, void *); + +typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); + +struct pcpu_block_md { + int scan_hint; + int scan_hint_start; + int contig_hint; + int contig_hint_start; + int left_free; + int right_free; + int first_free; + int nr_bits; +}; + +struct pcpuobj_ext { + struct obj_cgroup *cgroup; +}; + +struct pcpu_chunk { + struct list_head list; + int free_bytes; + struct pcpu_block_md chunk_md; + long unsigned int *bound_map; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *base_addr; + long unsigned int *alloc_map; + struct pcpu_block_md *md_blocks; + void *data; + bool immutable; + bool isolated; + int start_offset; + int end_offset; + struct pcpuobj_ext *obj_exts; + int nr_pages; + int nr_populated; + int nr_empty_pop_pages; + long unsigned int populated[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef u64 freelist_full_t; + +typedef union { + struct { + void *freelist; + long unsigned int counter; + }; + freelist_full_t full; +} freelist_aba_t; + +struct kmem_cache_cpu { + union { + struct { + void **freelist; + long unsigned int tid; + }; + freelist_aba_t freelist_tid; + }; + struct slab *slab; + struct slab *partial; + local_lock_t lock; +}; + +struct kmem_cache_node { + spinlock_t list_lock; + long unsigned int nr_partial; + struct list_head partial; + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +}; + +typedef u32 depot_stack_handle_t; + +struct memory_notify { + long unsigned int altmap_start_pfn; + long unsigned int altmap_nr_pages; + long unsigned int start_pfn; + long unsigned int nr_pages; + int status_change_nid_normal; + int status_change_nid; +}; + +struct partial_context { + gfp_t flags; + unsigned int orig_size; + void *object; +}; + +struct track { + long unsigned int addr; + depot_stack_handle_t handle; + int cpu; + int pid; + long unsigned int when; +}; + +enum track_item { + TRACK_ALLOC = 0, + TRACK_FREE = 1, +}; + +enum stat_item { + ALLOC_FASTPATH = 0, + ALLOC_SLOWPATH = 1, + FREE_FASTPATH = 2, + FREE_SLOWPATH = 3, + FREE_FROZEN = 4, + FREE_ADD_PARTIAL = 5, + FREE_REMOVE_PARTIAL = 6, + ALLOC_FROM_PARTIAL = 7, + ALLOC_SLAB = 8, + ALLOC_REFILL = 9, + ALLOC_NODE_MISMATCH = 10, + FREE_SLAB = 11, + CPUSLAB_FLUSH = 12, + DEACTIVATE_FULL = 13, + DEACTIVATE_EMPTY = 14, + DEACTIVATE_TO_HEAD = 15, + DEACTIVATE_TO_TAIL = 16, + DEACTIVATE_REMOTE_FREES = 17, + DEACTIVATE_BYPASS = 18, + ORDER_FALLBACK = 19, + CMPXCHG_DOUBLE_CPU_FAIL = 20, + CMPXCHG_DOUBLE_FAIL = 21, + CPU_PARTIAL_ALLOC = 22, + CPU_PARTIAL_FREE = 23, + CPU_PARTIAL_NODE = 24, + CPU_PARTIAL_DRAIN = 25, + NR_SLUB_STAT_ITEMS = 26, +}; + +struct slub_flush_work { + struct work_struct work; + struct kmem_cache *s; + bool skip; +}; + +struct detached_freelist { + struct slab *slab; + void *tail; + void *freelist; + int cnt; + struct kmem_cache *s; +}; + +struct location { + depot_stack_handle_t handle; + long unsigned int count; + long unsigned int addr; + long unsigned int waste; + long long int sum_time; + long int min_time; + long int max_time; + long int min_pid; + long int max_pid; + long unsigned int cpus[1]; + nodemask_t nodes; +}; + +struct loc_track { + long unsigned int max; + long unsigned int count; + struct location *loc; + long: 32; + loff_t idx; +}; + +enum slab_stat_type { + SL_ALL = 0, + SL_PARTIAL = 1, + SL_CPU = 2, + SL_OBJECTS = 3, + SL_TOTAL = 4, +}; + +struct slab_attribute { + struct attribute attr; + ssize_t (*show)(struct kmem_cache *, char *); + ssize_t (*store)(struct kmem_cache *, const char *, size_t); +}; + +struct saved_alias { + struct kmem_cache *s; + const char *name; + struct saved_alias *next; +}; + +typedef struct task_struct *class_task_lock_t; + +struct pidfd_info { + __u64 mask; + __u64 cgroupid; + __u32 pid; + __u32 tgid; + __u32 ppid; + __u32 ruid; + __u32 rgid; + __u32 euid; + __u32 egid; + __u32 suid; + __u32 sgid; + __u32 fsuid; + __u32 fsgid; + __u32 spare0[1]; +}; + +struct fiemap { + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; +}; + +struct fsuuid { + __u32 fsu_len; + __u32 fsu_flags; + __u8 fsu_uuid[0]; +}; + +struct move_extent { + __u32 reserved; + __u32 donor_fd; + __u64 orig_start; + __u64 donor_start; + __u64 len; + __u64 moved_len; +}; + +struct ext4_new_group_input { + __u32 group; + long: 32; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 unused; +}; + +struct ext4_new_group_data { + __u32 group; + long: 32; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 mdata_blocks; + __u32 free_clusters_count; + long: 32; +}; + +struct fsmap { + __u32 fmr_device; + __u32 fmr_flags; + __u64 fmr_physical; + __u64 fmr_owner; + __u64 fmr_offset; + __u64 fmr_length; + __u64 fmr_reserved[3]; +}; + +struct fsmap_head { + __u32 fmh_iflags; + __u32 fmh_oflags; + __u32 fmh_count; + __u32 fmh_entries; + __u64 fmh_reserved[6]; + struct fsmap fmh_keys[2]; + struct fsmap fmh_recs[0]; +}; + +struct ext4_fsmap { + struct list_head fmr_list; + dev_t fmr_device; + uint32_t fmr_flags; + uint64_t fmr_physical; + uint64_t fmr_owner; + uint64_t fmr_length; +}; + +struct ext4_fsmap_head { + uint32_t fmh_iflags; + uint32_t fmh_oflags; + unsigned int fmh_count; + unsigned int fmh_entries; + struct ext4_fsmap fmh_keys[2]; +}; + +typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); + +typedef void ext4_update_sb_callback(struct ext4_super_block *, const void *); + +struct getfsmap_info { + struct super_block *gi_sb; + struct fsmap_head *gi_data; + unsigned int gi_idx; + __u32 gi_last_flags; +}; + +struct fat_fid { + u32 i_gen; + u32 i_pos_low; + u16 i_pos_hi; + u16 parent_i_pos_hi; + u32 parent_i_pos_low; + u32 parent_i_gen; +}; + +struct compressed_bio { + unsigned int nr_folios; + struct folio **compressed_folios; + u64 start; + unsigned int len; + unsigned int compressed_len; + u8 compress_type; + bool writeback; + union { + struct btrfs_bio *orig_bbio; + struct work_struct write_end_work; + }; + long: 32; + struct btrfs_bio bbio; +}; + +struct workspace_manager { + struct list_head idle_ws; + spinlock_t ws_lock; + int free_ws; + atomic_t total_ws; + wait_queue_head_t ws_wait; +}; + +struct btrfs_compress_op { + struct workspace_manager *workspace_manager; + unsigned int max_level; + unsigned int default_level; +}; + +struct workspace { + void *mem; + void *buf; + void *cbuf; + struct list_head list; +}; + +struct ahash_alg { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_ahash *); + void (*exit_tfm)(struct crypto_ahash *); + int (*clone_tfm)(struct crypto_ahash *, struct crypto_ahash *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct hash_alg_common halg; +}; + +struct crypto_hash_walk { + char *data; + unsigned int offset; + unsigned int flags; + struct page *pg; + unsigned int entrylen; + unsigned int total; + struct scatterlist *sg; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[256]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +struct io_uring_rsrc_register { + __u32 nr; + __u32 flags; + __u64 resv2; + __u64 data; + __u64 tags; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __u64 data; + __u64 tags; + __u32 nr; + __u32 resv2; +}; + +enum { + IORING_REGISTER_SRC_REGISTERED = 1, + IORING_REGISTER_DST_REPLACE = 2, +}; + +struct io_uring_clone_buffers { + __u32 src_fd; + __u32 flags; + __u32 src_off; + __u32 dst_off; + __u32 nr; + __u32 pad[3]; +}; + +struct io_imu_folio_data { + unsigned int nr_pages_head; + unsigned int nr_pages_mid; + unsigned int folio_shift; +}; + +struct io_rsrc_update { + struct file *file; + long: 32; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct waitid_info { + pid_t pid; + uid_t uid; + int status; + int cause; +}; + +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + struct waitid_info *wo_info; + int wo_stat; + struct rusage *wo_rusage; + wait_queue_entry_t child_wait; + int notask_error; +}; + +struct io_waitid_async { + struct io_kiocb *req; + struct wait_opts wo; +}; + +struct io_waitid { + struct file *file; + int which; + pid_t upid; + int options; + atomic_t refs; + struct wait_queue_head *head; + struct siginfo *infop; + struct waitid_info info; +}; + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +struct vring_desc { + __virtio64 addr; + __virtio32 len; + __virtio16 flags; + __virtio16 next; +}; + +struct vring_avail { + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[0]; +}; + +struct vring_used_elem { + __virtio32 id; + __virtio32 len; +}; + +typedef struct vring_used_elem vring_used_elem_t; + +struct vring_used { + __virtio16 flags; + __virtio16 idx; + vring_used_elem_t ring[0]; +}; + +typedef struct vring_desc vring_desc_t; + +typedef struct vring_avail vring_avail_t; + +typedef struct vring_used vring_used_t; + +struct vring { + unsigned int num; + vring_desc_t *desc; + vring_avail_t *avail; + vring_used_t *used; +}; + +struct vring_packed_desc_event { + __le16 off_wrap; + __le16 flags; +}; + +struct vring_packed_desc { + __le64 addr; + __le32 len; + __le16 id; + __le16 flags; +}; + +struct vring_desc_state_split { + void *data; + struct vring_desc *indir_desc; +}; + +struct vring_desc_state_packed { + void *data; + struct vring_packed_desc *indir_desc; + u16 num; + u16 last; +}; + +struct vring_desc_extra { + dma_addr_t addr; + u32 len; + u16 flags; + u16 next; +}; + +struct vring_virtqueue_split { + struct vring vring; + u16 avail_flags_shadow; + u16 avail_idx_shadow; + struct vring_desc_state_split *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + u32 vring_align; + bool may_reduce_num; +}; + +struct vring_virtqueue_packed { + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + bool avail_wrap_counter; + u16 avail_used_flags; + u16 next_avail_idx; + u16 event_flags_shadow; + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; +}; + +struct vring_virtqueue { + struct virtqueue vq; + bool packed_ring; + bool use_dma_api; + bool weak_barriers; + bool broken; + bool indirect; + bool event; + unsigned int free_head; + unsigned int num_added; + u16 last_used_idx; + bool event_triggered; + union { + struct vring_virtqueue_split split; + struct vring_virtqueue_packed packed; + }; + bool (*notify)(struct virtqueue *); + bool we_own_ring; + struct device *dma_dev; +}; + +typedef void * (*devcon_match_fn_t)(const struct fwnode_handle *, const char *, void *); + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1, + BIP_MAPPED_INTEGRITY = 2, + BIP_CTRL_NOCHECK = 4, + BIP_DISK_NOCHECK = 8, + BIP_IP_CHECKSUM = 16, + BIP_COPY_USER = 32, +}; + +enum t10_dif_type { + T10_PI_TYPE0_PROTECTION = 0, + T10_PI_TYPE1_PROTECTION = 1, + T10_PI_TYPE2_PROTECTION = 2, + T10_PI_TYPE3_PROTECTION = 3, +}; + +struct scsi_io_group_descriptor { + u8 io_advice_hints_mode: 2; + u8 reserved1: 3; + u8 st_enble: 1; + u8 cs_enble: 1; + u8 ic_enable: 1; + u8 reserved2[3]; + u8 acdlu: 1; + u8 reserved3: 1; + u8 rlbsr: 2; + u8 lbm_descriptor_type: 4; + u8 params[2]; + u8 reserved4; + u8 reserved5[8]; +}; + +struct scsi_stream_status { + u8 perm: 1; + u8 reserved1: 7; + u8 reserved2; + __be16 stream_identifier; + u8 reserved3: 2; + u8 rel_lifetime: 6; + u8 reserved4[3]; +}; + +struct scsi_stream_status_header { + __be32 len; + u16 reserved; + __be16 number_of_open_streams; + struct { + struct {} __empty_stream_status; + struct scsi_stream_status stream_status[0]; + }; +}; + +struct scsi_mode_data { + __u32 length; + __u16 block_descriptor_length; + __u8 medium_type; + __u8 device_specific; + __u8 header_length; + __u8 longlba: 1; +}; + +enum scsi_prot_flags { + SCSI_PROT_TRANSFER_PI = 1, + SCSI_PROT_GUARD_CHECK = 2, + SCSI_PROT_REF_CHECK = 4, + SCSI_PROT_REF_INCREMENT = 8, + SCSI_PROT_IP_CHECKSUM = 16, +}; + +enum scsi_host_prot_capabilities { + SHOST_DIF_TYPE1_PROTECTION = 1, + SHOST_DIF_TYPE2_PROTECTION = 2, + SHOST_DIF_TYPE3_PROTECTION = 4, + SHOST_DIX_TYPE0_PROTECTION = 8, + SHOST_DIX_TYPE1_PROTECTION = 16, + SHOST_DIX_TYPE2_PROTECTION = 32, + SHOST_DIX_TYPE3_PROTECTION = 64, +}; + +enum { + SD_EXT_CDB_SIZE = 32, + SD_MEMPOOL_SIZE = 2, +}; + +enum { + SD_DEF_XFER_BLOCKS = 65535, + SD_MAX_XFER_BLOCKS = 4294967295, + SD_MAX_WS10_BLOCKS = 65535, + SD_MAX_WS16_BLOCKS = 8388607, +}; + +enum { + SD_LBP_FULL = 0, + SD_LBP_UNMAP = 1, + SD_LBP_WS16 = 2, + SD_LBP_WS10 = 3, + SD_LBP_ZERO = 4, + SD_LBP_DISABLE = 5, +}; + +enum { + SD_ZERO_WRITE = 0, + SD_ZERO_WS = 1, + SD_ZERO_WS16_UNMAP = 2, + SD_ZERO_WS10_UNMAP = 3, +}; + +union e1000_rx_desc_packet_split { + struct { + __le64 buffer_addr[4]; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length0; + __le16 vlan; + } middle; + struct { + __le16 header_status; + __le16 length[3]; + } upper; + __le64 reserved; + } wb; +}; + +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; + u8 ipcso; + __le16 ipcse; + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; + u8 tucso; + __le16 tucse; + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; + u8 hdr_len; + __le16 mss; + } fields; + } tcp_seg_setup; +}; + +enum e1000_boards { + board_82571 = 0, + board_82572 = 1, + board_82573 = 2, + board_82574 = 3, + board_82583 = 4, + board_80003es2lan = 5, + board_ich8lan = 6, + board_ich9lan = 7, + board_ich10lan = 8, + board_pchlan = 9, + board_pch2lan = 10, + board_pch_lpt = 11, + board_pch_spt = 12, + board_pch_cnp = 13, + board_pch_tgp = 14, + board_pch_adp = 15, + board_pch_mtp = 16, +}; + +struct trace_event_raw_e1000e_trace_mac_register { + struct trace_entry ent; + uint32_t reg; + char __data[0]; +}; + +struct trace_event_data_offsets_e1000e_trace_mac_register {}; + +typedef void (*btf_trace_e1000e_trace_mac_register)(void *, uint32_t); + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +struct my_u1 { + __le64 a; + __le64 b; + __le64 c; + __le64 d; +}; + +struct xhci_regset { + char name[32]; + struct debugfs_regset32 regset; + size_t nregs; + struct list_head list; +}; + +struct xhci_file_map { + const char *name; + int (*show)(struct seq_file *, void *); +}; + +struct xhci_ep_priv { + char name[32]; + struct dentry *root; + struct xhci_stream_info *stream_info; + struct xhci_ring *show_ring; + unsigned int stream_id; +}; + +struct xhci_slot_priv { + char name[32]; + struct dentry *root; + struct xhci_ep_priv *eps[31]; + struct xhci_virt_device *dev; +}; + +struct input_mt_pos { + s16 x; + s16 y; +}; + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +enum hwmon_chip_attributes { + hwmon_chip_temp_reset_history = 0, + hwmon_chip_in_reset_history = 1, + hwmon_chip_curr_reset_history = 2, + hwmon_chip_power_reset_history = 3, + hwmon_chip_register_tz = 4, + hwmon_chip_update_interval = 5, + hwmon_chip_alarms = 6, + hwmon_chip_samples = 7, + hwmon_chip_curr_samples = 8, + hwmon_chip_in_samples = 9, + hwmon_chip_power_samples = 10, + hwmon_chip_temp_samples = 11, + hwmon_chip_beep_enable = 12, + hwmon_chip_pec = 13, +}; + +enum hwmon_in_attributes { + hwmon_in_enable = 0, + hwmon_in_input = 1, + hwmon_in_min = 2, + hwmon_in_max = 3, + hwmon_in_lcrit = 4, + hwmon_in_crit = 5, + hwmon_in_average = 6, + hwmon_in_lowest = 7, + hwmon_in_highest = 8, + hwmon_in_reset_history = 9, + hwmon_in_label = 10, + hwmon_in_alarm = 11, + hwmon_in_min_alarm = 12, + hwmon_in_max_alarm = 13, + hwmon_in_lcrit_alarm = 14, + hwmon_in_crit_alarm = 15, + hwmon_in_rated_min = 16, + hwmon_in_rated_max = 17, + hwmon_in_beep = 18, + hwmon_in_fault = 19, +}; + +enum hwmon_curr_attributes { + hwmon_curr_enable = 0, + hwmon_curr_input = 1, + hwmon_curr_min = 2, + hwmon_curr_max = 3, + hwmon_curr_lcrit = 4, + hwmon_curr_crit = 5, + hwmon_curr_average = 6, + hwmon_curr_lowest = 7, + hwmon_curr_highest = 8, + hwmon_curr_reset_history = 9, + hwmon_curr_label = 10, + hwmon_curr_alarm = 11, + hwmon_curr_min_alarm = 12, + hwmon_curr_max_alarm = 13, + hwmon_curr_lcrit_alarm = 14, + hwmon_curr_crit_alarm = 15, + hwmon_curr_rated_min = 16, + hwmon_curr_rated_max = 17, + hwmon_curr_beep = 18, +}; + +enum hwmon_power_attributes { + hwmon_power_enable = 0, + hwmon_power_average = 1, + hwmon_power_average_interval = 2, + hwmon_power_average_interval_max = 3, + hwmon_power_average_interval_min = 4, + hwmon_power_average_highest = 5, + hwmon_power_average_lowest = 6, + hwmon_power_average_max = 7, + hwmon_power_average_min = 8, + hwmon_power_input = 9, + hwmon_power_input_highest = 10, + hwmon_power_input_lowest = 11, + hwmon_power_reset_history = 12, + hwmon_power_accuracy = 13, + hwmon_power_cap = 14, + hwmon_power_cap_hyst = 15, + hwmon_power_cap_max = 16, + hwmon_power_cap_min = 17, + hwmon_power_min = 18, + hwmon_power_max = 19, + hwmon_power_crit = 20, + hwmon_power_lcrit = 21, + hwmon_power_label = 22, + hwmon_power_alarm = 23, + hwmon_power_cap_alarm = 24, + hwmon_power_min_alarm = 25, + hwmon_power_max_alarm = 26, + hwmon_power_lcrit_alarm = 27, + hwmon_power_crit_alarm = 28, + hwmon_power_rated_min = 29, + hwmon_power_rated_max = 30, +}; + +enum hwmon_energy_attributes { + hwmon_energy_enable = 0, + hwmon_energy_input = 1, + hwmon_energy_label = 2, +}; + +enum hwmon_humidity_attributes { + hwmon_humidity_enable = 0, + hwmon_humidity_input = 1, + hwmon_humidity_label = 2, + hwmon_humidity_min = 3, + hwmon_humidity_min_hyst = 4, + hwmon_humidity_max = 5, + hwmon_humidity_max_hyst = 6, + hwmon_humidity_alarm = 7, + hwmon_humidity_fault = 8, + hwmon_humidity_rated_min = 9, + hwmon_humidity_rated_max = 10, + hwmon_humidity_min_alarm = 11, + hwmon_humidity_max_alarm = 12, +}; + +enum hwmon_fan_attributes { + hwmon_fan_enable = 0, + hwmon_fan_input = 1, + hwmon_fan_label = 2, + hwmon_fan_min = 3, + hwmon_fan_max = 4, + hwmon_fan_div = 5, + hwmon_fan_pulses = 6, + hwmon_fan_target = 7, + hwmon_fan_alarm = 8, + hwmon_fan_min_alarm = 9, + hwmon_fan_max_alarm = 10, + hwmon_fan_fault = 11, + hwmon_fan_beep = 12, +}; + +enum hwmon_pwm_attributes { + hwmon_pwm_input = 0, + hwmon_pwm_enable = 1, + hwmon_pwm_mode = 2, + hwmon_pwm_freq = 3, + hwmon_pwm_auto_channels_temp = 4, +}; + +enum hwmon_intrusion_attributes { + hwmon_intrusion_alarm = 0, + hwmon_intrusion_beep = 1, +}; + +struct trace_event_raw_hwmon_attr_class { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + long int val; + char __data[0]; +}; + +struct trace_event_raw_hwmon_attr_show_string { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + u32 __data_loc_label; + char __data[0]; +}; + +struct trace_event_data_offsets_hwmon_attr_class { + u32 attr_name; + const void *attr_name_ptr_; +}; + +struct trace_event_data_offsets_hwmon_attr_show_string { + u32 attr_name; + const void *attr_name_ptr_; + u32 label; + const void *label_ptr_; +}; + +typedef void (*btf_trace_hwmon_attr_show)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_store)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_show_string)(void *, int, const char *, const char *); + +struct hwmon_device { + const char *name; + const char *label; + struct device dev; + const struct hwmon_chip_info *chip; + struct list_head tzdata; + struct attribute_group group; + const struct attribute_group **groups; +}; + +struct hwmon_device_attribute { + struct device_attribute dev_attr; + const struct hwmon_ops *ops; + enum hwmon_sensor_types type; + u32 attr; + int index; + char name[32]; +}; + +struct hwmon_thermal_data { + struct list_head node; + struct device *dev; + int index; + struct thermal_zone_device *tzd; +}; + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long int tv_nsec; +}; + +struct user_msghdr { + void *msg_name; + int msg_namelen; + struct iovec *msg_iov; + __kernel_size_t msg_iovlen; + void *msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +struct ifconf { + int ifc_len; + union { + char *ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; + +typedef s32 compat_int_t; + +typedef u32 compat_uint_t; + +typedef u32 compat_ulong_t; + +typedef u32 compat_caddr_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + +struct used_address { + struct __kernel_sockaddr_storage name; + unsigned int name_len; +}; + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + short unsigned int overhead; + short int cell_align; + short unsigned int mpu; + __u32 rate; +}; + +struct tc_prio_qopt { + int bands; + __u8 priomap[16]; +}; + +struct skb_array { + struct ptr_ring ring; +}; + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u16 mpu; + u8 linklayer; + u8 shift; + long: 32; +}; + +struct psched_pktrate { + u64 rate_pkts_ps; + u32 mult; + u8 shift; +}; + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc **p_miniq; +}; + +struct pfifo_fast_priv { + struct skb_array q[3]; +}; + +struct ethtool_cmis_cdb_rpl_hdr { + u8 rpl_len; + u8 rpl_chk_code; +}; + +struct ethtool_cmis_cdb_rpl { + struct ethtool_cmis_cdb_rpl_hdr hdr; + u8 payload[120]; +}; + +struct cmis_rev_rpl { + u8 rev; +}; + +struct cmis_cdb_advert_rpl { + u8 inst_supported; + u8 read_write_len_ext; + u8 resv1; + u8 resv2; +}; + +struct cmis_password_entry_pl { + __be32 password; +}; + +struct cmis_cdb_query_status_pl { + u16 response_delay; +}; + +struct cmis_cdb_query_status_rpl { + u8 length; + u8 status; +}; + +struct cmis_cdb_module_features_rpl { + u8 resv1[34]; + __be16 max_completion_time; +}; + +struct cmis_wait_for_cond_rpl { + u8 state; +}; + +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +enum nft_rule_attributes { + NFTA_RULE_UNSPEC = 0, + NFTA_RULE_TABLE = 1, + NFTA_RULE_CHAIN = 2, + NFTA_RULE_HANDLE = 3, + NFTA_RULE_EXPRESSIONS = 4, + NFTA_RULE_COMPAT = 5, + NFTA_RULE_POSITION = 6, + NFTA_RULE_USERDATA = 7, + NFTA_RULE_PAD = 8, + NFTA_RULE_ID = 9, + NFTA_RULE_POSITION_ID = 10, + NFTA_RULE_CHAIN_ID = 11, + __NFTA_RULE_MAX = 12, +}; + +enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = 1, + NFT_RULE_COMPAT_F_INV = 2, + NFT_RULE_COMPAT_F_MASK = 2, +}; + +enum nft_rule_compat_attributes { + NFTA_RULE_COMPAT_UNSPEC = 0, + NFTA_RULE_COMPAT_PROTO = 1, + NFTA_RULE_COMPAT_FLAGS = 2, + __NFTA_RULE_COMPAT_MAX = 3, +}; + +enum nft_target_attributes { + NFTA_TARGET_UNSPEC = 0, + NFTA_TARGET_NAME = 1, + NFTA_TARGET_REV = 2, + NFTA_TARGET_INFO = 3, + __NFTA_TARGET_MAX = 4, +}; + +enum nft_match_attributes { + NFTA_MATCH_UNSPEC = 0, + NFTA_MATCH_NAME = 1, + NFTA_MATCH_REV = 2, + NFTA_MATCH_INFO = 3, + __NFTA_MATCH_MAX = 4, +}; + +enum { + NFNL_MSG_COMPAT_GET = 0, + NFNL_MSG_COMPAT_MAX = 1, +}; + +enum { + NFTA_COMPAT_UNSPEC = 0, + NFTA_COMPAT_NAME = 1, + NFTA_COMPAT_REV = 2, + NFTA_COMPAT_TYPE = 3, + __NFTA_COMPAT_MAX = 4, +}; + +struct ip6t_entry { + struct ip6t_ip6 ipv6; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + long: 32; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ebt_entry { + unsigned int bitmask; + unsigned int invflags; + __be16 ethproto; + char in[16]; + char logical_in[16]; + char out[16]; + char logical_out[16]; + unsigned char sourcemac[6]; + unsigned char sourcemsk[6]; + unsigned char destmac[6]; + unsigned char destmsk[6]; + union { + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + }; + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + } offsets; + }; + unsigned char elems[0]; +}; + +struct arpt_devaddr_info { + char addr[16]; + char mask[16]; +}; + +struct arpt_arp { + struct in_addr src; + struct in_addr tgt; + struct in_addr smsk; + struct in_addr tmsk; + __u8 arhln; + __u8 arhln_mask; + struct arpt_devaddr_info src_devaddr; + struct arpt_devaddr_info tgt_devaddr; + __be16 arpop; + __be16 arpop_mask; + __be16 arhrd; + __be16 arhrd_mask; + __be16 arpro; + __be16 arpro_mask; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u8 flags; + __u16 invflags; +}; + +struct arpt_entry { + struct arpt_arp arp; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + long: 32; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct nft_xt_match_priv { + void *info; +}; + +union nft_entry { + struct ipt_entry e4; + struct ip6t_entry e6; + struct ebt_entry ebt; + struct arpt_entry arp; +}; + +enum { + UDP_FLAGS_CORK = 0, + UDP_FLAGS_NO_CHECK6_TX = 1, + UDP_FLAGS_NO_CHECK6_RX = 2, + UDP_FLAGS_GRO_ENABLED = 3, + UDP_FLAGS_ACCEPT_FRAGLIST = 4, + UDP_FLAGS_ACCEPT_L4 = 5, + UDP_FLAGS_ENCAP_ENABLED = 6, + UDP_FLAGS_UDPLITE_SEND_CC = 7, + UDP_FLAGS_UDPLITE_RECV_CC = 8, +}; + +typedef struct sock * (*udp_lookup_t)(const struct sk_buff *, __be16, __be16); + +typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); + +struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; +}; + +struct udp_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + __u16 cscov; + __u8 partial_cov; +}; + +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +enum { + REGION_INTERSECTS = 0, + REGION_DISJOINT = 1, + REGION_MIXED = 2, +}; + +enum { + MAX_IORES_LEVEL = 5, +}; + +struct region_devres { + struct resource *parent; + resource_size_t start; + resource_size_t n; +}; + +enum wd_read_status { + WD_READ_SUCCESS = 0, + WD_READ_UNSTABLE = 1, + WD_READ_SKIP = 2, +}; + +enum { + Q_REQUEUE_PI_NONE = 0, + Q_REQUEUE_PI_IGNORE = 1, + Q_REQUEUE_PI_IN_PROGRESS = 2, + Q_REQUEUE_PI_WAIT = 3, + Q_REQUEUE_PI_DONE = 4, + Q_REQUEUE_PI_LOCKED = 5, +}; + +struct bpf_cpumap_val { + __u32 qsize; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct bpf_cpu_map_entry; + +struct xdp_bulk_queue { + void *q[8]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; + unsigned int count; +}; + +struct bpf_cpu_map_entry { + u32 cpu; + int map_id; + struct xdp_bulk_queue *bulkq; + struct ptr_ring *queue; + struct task_struct *kthread; + struct bpf_cpumap_val value; + struct bpf_prog *prog; + struct completion kthread_running; + struct rcu_work free_work; +}; + +struct bpf_cpu_map { + struct bpf_map map; + struct bpf_cpu_map_entry **cpu_map; + long: 32; +}; + +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; + +struct flock { + short int l_type; + short int l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; + long int l_sysid; + long int pad[4]; +}; + +struct flock64 { + short int l_type; + short int l_whence; + long: 32; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; + long: 32; +}; + +typedef long int __kernel_daddr_t; + +struct ustat { + __kernel_daddr_t f_tfree; + long unsigned int f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +struct statfs { + long int f_type; + long int f_bsize; + long int f_frsize; + long int f_blocks; + long int f_bfree; + long int f_files; + long int f_ffree; + long int f_bavail; + __kernel_fsid_t f_fsid; + long int f_namelen; + long int f_flags; + long int f_spare[5]; +}; + +struct statfs64 { + __u32 f_type; + __u32 f_bsize; + __u32 f_frsize; + __u32 __pad; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_files; + __u64 f_ffree; + __u64 f_bavail; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_flags; + __u32 f_spare[5]; + long: 32; +}; + +struct iomap_dio { + struct kiocb *iocb; + const struct iomap_dio_ops *dops; + loff_t i_size; + loff_t size; + atomic_t ref; + unsigned int flags; + int error; + size_t done_before; + bool wait_for_completion; + union { + struct { + struct iov_iter *iter; + struct task_struct *waiter; + } submit; + struct { + struct work_struct work; + } aio; + }; + long: 32; +}; + +struct pending_reservation { + struct rb_node rb_node; + ext4_lblk_t lclu; +}; + +struct rsvd_count { + int ndelayed; + bool first_do_lblk_found; + ext4_lblk_t first_do_lblk; + ext4_lblk_t last_do_lblk; + struct extent_status *left_es; + bool partial; + ext4_lblk_t lclu; +}; + +typedef u32 unicode_t; + +struct utf8_table { + int cmask; + int cval; + int shift; + long int lmask; + long int lval; +}; + +struct fuse_file_lock { + uint64_t start; + uint64_t end; + uint32_t type; + uint32_t pid; +}; + +struct fuse_open_in { + uint32_t flags; + uint32_t open_flags; +}; + +struct fuse_flush_in { + uint64_t fh; + uint32_t unused; + uint32_t padding; + uint64_t lock_owner; +}; + +struct fuse_read_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t read_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t write_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_out { + uint32_t size; + uint32_t padding; +}; + +struct fuse_fsync_in { + uint64_t fh; + uint32_t fsync_flags; + uint32_t padding; +}; + +struct fuse_lk_in { + uint64_t fh; + uint64_t owner; + struct fuse_file_lock lk; + uint32_t lk_flags; + uint32_t padding; +}; + +struct fuse_lk_out { + struct fuse_file_lock lk; +}; + +struct fuse_bmap_in { + uint64_t block; + uint32_t blocksize; + uint32_t padding; +}; + +struct fuse_bmap_out { + uint64_t block; +}; + +struct fuse_poll_in { + uint64_t fh; + uint64_t kh; + uint32_t flags; + uint32_t events; +}; + +struct fuse_poll_out { + uint32_t revents; + uint32_t padding; +}; + +struct fuse_fallocate_in { + uint64_t fh; + uint64_t offset; + uint64_t length; + uint32_t mode; + uint32_t padding; +}; + +struct fuse_lseek_in { + uint64_t fh; + uint64_t offset; + uint32_t whence; + uint32_t padding; +}; + +struct fuse_lseek_out { + uint64_t offset; +}; + +struct fuse_copy_file_range_in { + uint64_t fh_in; + uint64_t off_in; + uint64_t nodeid_out; + uint64_t fh_out; + uint64_t off_out; + uint64_t len; + uint64_t flags; +}; + +struct fuse_io_priv { + struct kref refcnt; + int async; + spinlock_t lock; + unsigned int reqs; + ssize_t bytes; + size_t size; + __u64 offset; + bool write; + bool should_dirty; + int err; + struct kiocb *iocb; + struct completion *done; + bool blocking; + long: 32; +}; + +struct fuse_io_args { + union { + struct { + struct fuse_read_in in; + u64 attr_ver; + } read; + struct { + struct fuse_write_in in; + struct fuse_write_out out; + bool folio_locked; + long: 32; + } write; + }; + struct fuse_args_pages ap; + struct fuse_io_priv *io; + struct fuse_file *ff; +}; + +struct fuse_writepage_args { + struct fuse_io_args ia; + struct rb_node writepages_entry; + struct list_head queue_entry; + struct fuse_writepage_args *next; + struct inode *inode; + struct fuse_sync_bucket *bucket; +}; + +struct fuse_fill_wb_data { + struct fuse_writepage_args *wpa; + struct fuse_file *ff; + struct inode *inode; + struct folio **orig_folios; + unsigned int max_folios; +}; + +typedef enum { + ZSTDcs_created = 0, + ZSTDcs_init = 1, + ZSTDcs_ongoing = 2, + ZSTDcs_ending = 3, +} ZSTD_compressionStage_e; + +typedef struct { + int contentSizeFlag; + int checksumFlag; + int noDictIDFlag; +} ZSTD_frameParameters; + +typedef enum { + ZSTD_dictDefaultAttach = 0, + ZSTD_dictForceAttach = 1, + ZSTD_dictForceCopy = 2, + ZSTD_dictForceLoad = 3, +} ZSTD_dictAttachPref_e; + +typedef enum { + ZSTD_sf_noBlockDelimiters = 0, + ZSTD_sf_explicitBlockDelimiters = 1, +} ZSTD_sequenceFormat_e; + +struct ZSTD_CCtx_params_s { + ZSTD_format_e format; + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; + int compressionLevel; + int forceWindow; + size_t targetCBlockSize; + int srcSizeHint; + ZSTD_dictAttachPref_e attachDictPref; + ZSTD_paramSwitch_e literalCompressionMode; + int nbWorkers; + size_t jobSize; + int overlapLog; + int rsyncable; + ldmParams_t ldmParams; + int enableDedicatedDictSearch; + ZSTD_bufferMode_e inBufferMode; + ZSTD_bufferMode_e outBufferMode; + ZSTD_sequenceFormat_e blockDelimiters; + int validateSequences; + ZSTD_paramSwitch_e useBlockSplitter; + ZSTD_paramSwitch_e useRowMatchFinder; + int deterministicRefPrefix; + ZSTD_customMem customMem; +}; + +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; + +typedef enum { + ZSTD_cwksp_alloc_objects = 0, + ZSTD_cwksp_alloc_buffers = 1, + ZSTD_cwksp_alloc_aligned = 2, +} ZSTD_cwksp_alloc_phase_e; + +typedef enum { + ZSTD_cwksp_dynamic_alloc = 0, + ZSTD_cwksp_static_alloc = 1, +} ZSTD_cwksp_static_alloc_e; + +typedef struct { + void *workspace; + void *workspaceEnd; + void *objectEnd; + void *tableEnd; + void *tableValidEnd; + void *allocStart; + BYTE allocFailed; + int workspaceOversizedDuration; + ZSTD_cwksp_alloc_phase_e phase; + ZSTD_cwksp_static_alloc_e isStatic; +} ZSTD_cwksp; + +struct POOL_ctx_s; + +typedef struct POOL_ctx_s ZSTD_threadPool; + +typedef struct { + unsigned int offset; + unsigned int litLength; + unsigned int matchLength; + unsigned int rep; +} ZSTD_Sequence; + +typedef struct { + int collectSequences; + ZSTD_Sequence *seqStart; + size_t seqIndex; + size_t maxSequences; +} SeqCollector; + +typedef struct { + ZSTD_entropyCTables_t entropy; + U32 rep[3]; +} ZSTD_compressedBlockState_t; + +typedef struct { + ZSTD_compressedBlockState_t *prevCBlock; + ZSTD_compressedBlockState_t *nextCBlock; + ZSTD_matchState_t matchState; +} ZSTD_blockState_t; + +typedef enum { + ZSTDb_not_buffered = 0, + ZSTDb_buffered = 1, +} ZSTD_buffered_policy_e; + +typedef enum { + zcss_init = 0, + zcss_load = 1, + zcss_flush = 2, +} ZSTD_cStreamStage; + +struct ZSTD_CDict_s; + +typedef struct ZSTD_CDict_s ZSTD_CDict; + +typedef struct { + void *dictBuffer; + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; + ZSTD_CDict *cdict; +} ZSTD_localDict; + +struct ZSTD_prefixDict_s { + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; +}; + +typedef struct ZSTD_prefixDict_s ZSTD_prefixDict; + +typedef struct { + symbolEncodingType_e hType; + BYTE hufDesBuffer[128]; + size_t hufDesSize; +} ZSTD_hufCTablesMetadata_t; + +typedef struct { + symbolEncodingType_e llType; + symbolEncodingType_e ofType; + symbolEncodingType_e mlType; + BYTE fseTablesBuffer[133]; + size_t fseTablesSize; + size_t lastCountSize; +} ZSTD_fseCTablesMetadata_t; + +typedef struct { + ZSTD_hufCTablesMetadata_t hufMetadata; + ZSTD_fseCTablesMetadata_t fseMetadata; +} ZSTD_entropyCTablesMetadata_t; + +typedef struct { + seqStore_t fullSeqStoreChunk; + seqStore_t firstHalfSeqStore; + seqStore_t secondHalfSeqStore; + seqStore_t currSeqStore; + seqStore_t nextSeqStore; + U32 partitions[196]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +} ZSTD_blockSplitCtx; + +struct ZSTD_CCtx_s { + ZSTD_compressionStage_e stage; + int cParamsChanged; + int bmi2; + ZSTD_CCtx_params requestedParams; + ZSTD_CCtx_params appliedParams; + ZSTD_CCtx_params simpleApiParams; + U32 dictID; + size_t dictContentSize; + ZSTD_cwksp workspace; + size_t blockSize; + long long unsigned int pledgedSrcSizePlusOne; + long long unsigned int consumedSrcSize; + long long unsigned int producedCSize; + struct xxh64_state xxhState; + ZSTD_customMem customMem; + ZSTD_threadPool *pool; + size_t staticSize; + SeqCollector seqCollector; + int isFirstBlock; + int initialized; + seqStore_t seqStore; + ldmState_t ldmState; + rawSeq *ldmSequences; + size_t maxNbLdmSequences; + rawSeqStore_t externSeqStore; + ZSTD_blockState_t blockState; + U32 *entropyWorkspace; + ZSTD_buffered_policy_e bufferedPolicy; + char *inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + char *outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage streamStage; + U32 frameEnded; + ZSTD_inBuffer expectedInBuffer; + size_t expectedOutBufferSize; + ZSTD_localDict localDict; + const ZSTD_CDict *cdict; + ZSTD_prefixDict prefixDict; + ZSTD_blockSplitCtx blockSplitCtx; +}; + +typedef struct ZSTD_CCtx_s ZSTD_CCtx; + +typedef ZSTD_CCtx ZSTD_CStream; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +typedef ZSTD_ErrorCode zstd_error_code; + +typedef ZSTD_compressionParameters zstd_compression_parameters; + +typedef ZSTD_parameters zstd_parameters; + +typedef ZSTD_inBuffer zstd_in_buffer; + +typedef ZSTD_outBuffer zstd_out_buffer; + +typedef ZSTD_CStream zstd_cstream; + +typedef ZSTD_DStream zstd_dstream; + +struct workspace___2 { + void *mem; + size_t size; + char *buf; + unsigned int level; + unsigned int req_level; + long unsigned int last_used; + struct list_head list; + struct list_head lru_list; + zstd_in_buffer in_buf; + zstd_out_buffer out_buf; +}; + +struct zstd_workspace_manager { + const struct btrfs_compress_op *ops; + spinlock_t lock; + struct list_head lru_list; + struct list_head idle_ws[15]; + long unsigned int active_map; + wait_queue_head_t wait; + struct timer_list timer; +}; + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +struct io_sync { + struct file *file; + long: 32; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +struct crypto_aes_ctx { + u32 key_enc[60]; + u32 key_dec[60]; + u32 key_length; +}; + +typedef struct tree_desc_s tree_desc; + +typedef U64 ZSTD_VecMask; + +typedef enum { + search_hashChain = 0, + search_binaryTree = 1, + search_rowHash = 2, +} searchMethod_e; + +enum { + KERNEL_PARAM_FL_UNSAFE = 1, + KERNEL_PARAM_FL_HWPARAM = 2, +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +enum { + FBCON_LOGO_CANSHOW = -1, + FBCON_LOGO_DRAW = -2, + FBCON_LOGO_DONTSHOW = -3, +}; + +struct fwnode_link { + struct fwnode_handle *supplier; + struct list_head s_hook; + struct fwnode_handle *consumer; + struct list_head c_hook; + u8 flags; +}; + +enum dpm_order { + DPM_ORDER_NONE = 0, + DPM_ORDER_DEV_AFTER_PARENT = 1, + DPM_ORDER_PARENT_BEFORE_DEV = 2, + DPM_ORDER_DEV_LAST = 3, +}; + +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe: 1; + const struct bus_type *bus; + struct device *dev_root; + struct kset glue_dirs; + const struct class *class; + struct lock_class_key lock_key; +}; + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +struct class_dir { + struct kobject kobj; + const struct class *class; +}; + +struct root_device { + struct device dev; + struct module *owner; + long: 32; +}; + +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + +struct usb_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; +}; + +struct usb_dev_cap_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH = 1, + POWER_SUPPLY_TECHNOLOGY_LION = 2, + POWER_SUPPLY_TECHNOLOGY_LIPO = 3, + POWER_SUPPLY_TECHNOLOGY_LiFe = 4, + POWER_SUPPLY_TECHNOLOGY_NiCd = 5, + POWER_SUPPLY_TECHNOLOGY_LiMn = 6, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM = 1, + POWER_SUPPLY_SCOPE_DEVICE = 2, +}; + +enum power_supply_property { + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE = 1, + POWER_SUPPLY_PROP_HEALTH = 2, + POWER_SUPPLY_PROP_PRESENT = 3, + POWER_SUPPLY_PROP_ONLINE = 4, + POWER_SUPPLY_PROP_AUTHENTIC = 5, + POWER_SUPPLY_PROP_TECHNOLOGY = 6, + POWER_SUPPLY_PROP_CYCLE_COUNT = 7, + POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, + POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, + POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, + POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, + POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, + POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, + POWER_SUPPLY_PROP_CURRENT_MAX = 16, + POWER_SUPPLY_PROP_CURRENT_NOW = 17, + POWER_SUPPLY_PROP_CURRENT_AVG = 18, + POWER_SUPPLY_PROP_CURRENT_BOOT = 19, + POWER_SUPPLY_PROP_POWER_NOW = 20, + POWER_SUPPLY_PROP_POWER_AVG = 21, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, + POWER_SUPPLY_PROP_CHARGE_FULL = 24, + POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, + POWER_SUPPLY_PROP_CHARGE_NOW = 26, + POWER_SUPPLY_PROP_CHARGE_AVG = 27, + POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, + POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, + POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR = 37, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 38, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 39, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 40, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 41, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 42, + POWER_SUPPLY_PROP_ENERGY_FULL = 43, + POWER_SUPPLY_PROP_ENERGY_EMPTY = 44, + POWER_SUPPLY_PROP_ENERGY_NOW = 45, + POWER_SUPPLY_PROP_ENERGY_AVG = 46, + POWER_SUPPLY_PROP_CAPACITY = 47, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 48, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 49, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 50, + POWER_SUPPLY_PROP_CAPACITY_LEVEL = 51, + POWER_SUPPLY_PROP_TEMP = 52, + POWER_SUPPLY_PROP_TEMP_MAX = 53, + POWER_SUPPLY_PROP_TEMP_MIN = 54, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 55, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 56, + POWER_SUPPLY_PROP_TEMP_AMBIENT = 57, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 58, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 59, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 60, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 61, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 62, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 63, + POWER_SUPPLY_PROP_TYPE = 64, + POWER_SUPPLY_PROP_USB_TYPE = 65, + POWER_SUPPLY_PROP_SCOPE = 66, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 67, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 68, + POWER_SUPPLY_PROP_CALIBRATE = 69, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 70, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 71, + POWER_SUPPLY_PROP_MANUFACTURE_DAY = 72, + POWER_SUPPLY_PROP_MODEL_NAME = 73, + POWER_SUPPLY_PROP_MANUFACTURER = 74, + POWER_SUPPLY_PROP_SERIAL_NUMBER = 75, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY = 1, + POWER_SUPPLY_TYPE_UPS = 2, + POWER_SUPPLY_TYPE_MAINS = 3, + POWER_SUPPLY_TYPE_USB = 4, + POWER_SUPPLY_TYPE_USB_DCP = 5, + POWER_SUPPLY_TYPE_USB_CDP = 6, + POWER_SUPPLY_TYPE_USB_ACA = 7, + POWER_SUPPLY_TYPE_USB_TYPE_C = 8, + POWER_SUPPLY_TYPE_USB_PD = 9, + POWER_SUPPLY_TYPE_USB_PD_DRP = 10, + POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, + POWER_SUPPLY_TYPE_WIRELESS = 12, +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED = 0, +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + void *drv_data; + const struct attribute_group **attr_grp; + char **supplied_to; + size_t num_supplicants; + bool no_wakeup_source; +}; + +struct power_supply; + +struct power_supply_desc { + const char *name; + enum power_supply_type type; + u8 charge_behaviours; + u32 usb_types; + const enum power_supply_property *properties; + size_t num_properties; + int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); + int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); + int (*property_is_writeable)(struct power_supply *, enum power_supply_property); + void (*external_power_changed)(struct power_supply *); + void (*set_charged)(struct power_supply *); + bool no_thermal; + int use_for_apm; +}; + +struct power_supply_battery_info; + +struct power_supply { + const struct power_supply_desc *desc; + char **supplied_to; + size_t num_supplicants; + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + void *drv_data; + long: 32; + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; + struct power_supply_battery_info *battery_info; + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; + long: 32; +}; + +struct power_supply_maintenance_charge_table; + +struct power_supply_battery_ocv_table; + +struct power_supply_resistance_temp_table; + +struct power_supply_vbat_ri_table; + +struct power_supply_battery_info { + unsigned int technology; + int energy_full_design_uwh; + int charge_full_design_uah; + int voltage_min_design_uv; + int voltage_max_design_uv; + int tricklecharge_current_ua; + int precharge_current_ua; + int precharge_voltage_max_uv; + int charge_term_current_ua; + int charge_restart_voltage_uv; + int overvoltage_limit_uv; + int constant_charge_current_max_ua; + int constant_charge_voltage_max_uv; + const struct power_supply_maintenance_charge_table *maintenance_charge; + int maintenance_charge_size; + int alert_low_temp_charge_current_ua; + int alert_low_temp_charge_voltage_uv; + int alert_high_temp_charge_current_ua; + int alert_high_temp_charge_voltage_uv; + int factory_internal_resistance_uohm; + int factory_internal_resistance_charging_uohm; + int ocv_temp[20]; + int temp_ambient_alert_min; + int temp_ambient_alert_max; + int temp_alert_min; + int temp_alert_max; + int temp_min; + int temp_max; + const struct power_supply_battery_ocv_table *ocv_table[20]; + int ocv_table_size[20]; + const struct power_supply_resistance_temp_table *resist_table; + int resist_table_size; + const struct power_supply_vbat_ri_table *vbat2ri_discharging; + int vbat2ri_discharging_size; + const struct power_supply_vbat_ri_table *vbat2ri_charging; + int vbat2ri_charging_size; + int bti_resistance_ohm; + int bti_resistance_tolerance; +}; + +struct power_supply_battery_ocv_table { + int ocv; + int capacity; +}; + +struct power_supply_resistance_temp_table { + int temp; + int resistance; +}; + +struct power_supply_vbat_ri_table { + int vbat_uv; + int ri_uohm; +}; + +struct power_supply_maintenance_charge_table { + int charge_current_max_ua; + int charge_voltage_max_uv; + int charge_safety_timer_minutes; +}; + +struct psy_am_i_supplied_data { + struct power_supply *psy; + unsigned int count; +}; + +struct psy_get_supplier_prop_data { + struct power_supply *psy; + enum power_supply_property psp; + union power_supply_propval *val; +}; + +enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, + __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, +}; + +enum flow_dissector_ctrl_flags { + FLOW_DIS_IS_FRAGMENT = 1, + FLOW_DIS_FIRST_FRAG = 2, + FLOW_DIS_F_TUNNEL_CSUM = 4, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = 8, + FLOW_DIS_F_TUNNEL_OAM = 16, + FLOW_DIS_F_TUNNEL_CRIT_OPT = 32, + FLOW_DIS_ENCAPSULATION = 64, +}; + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD = 0, + FLOW_DISSECT_RET_OUT_BAD = 1, + FLOW_DISSECT_RET_PROTO_AGAIN = 2, + FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, + FLOW_DISSECT_RET_CONTINUE = 4, +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl: 8; + u32 mpls_bos: 1; + u32 mpls_tc: 3; + u32 mpls_label: 20; +}; + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +struct flow_dissector_key_enc_opts { + u8 data[255]; + u8 len; + u32 dst_opt_type; +}; + +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +struct flow_dissector_key_hash { + u32 hash; +}; + +struct flow_dissector_key_num_of_vlans { + u8 num_of_vlans; +}; + +struct flow_dissector_key_pppoe { + __be16 session_id; + __be16 ppp_proto; + __be16 type; +}; + +struct flow_dissector_key_l2tpv3 { + __be32 session_id; +}; + +struct flow_dissector_key_ipsec { + __be32 spi; +}; + +struct flow_dissector_key_cfm { + u8 mdl_ver; + u8 opcode; +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; +}; + +struct flow_keys_digest { + u8 data[16]; +}; + +enum bpf_ret_code { + BPF_OK = 0, + BPF_DROP = 2, + BPF_REDIRECT = 7, + BPF_LWT_REROUTE = 128, + BPF_FLOW_DISSECTOR_CONTINUE = 129, +}; + +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1, + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2, + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4, +}; + +struct pptp_gre_header { + struct gre_base_hdr gre_hd; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; +}; + +struct tipc_basic_hdr { + __be32 w[4]; +}; + +enum dccp_state { + DCCP_OPEN = 1, + DCCP_REQUESTING = 2, + DCCP_LISTEN = 10, + DCCP_RESPOND = 3, + DCCP_ACTIVE_CLOSEREQ = 4, + DCCP_PASSIVE_CLOSE = 8, + DCCP_CLOSING = 11, + DCCP_TIME_WAIT = 6, + DCCP_CLOSED = 7, + DCCP_NEW_SYN_RECV = 12, + DCCP_PARTOPEN = 14, + DCCP_PASSIVE_CLOSEREQ = 15, + DCCP_MAX_STATES = 16, +}; + +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +}; + +struct pppoe_hdr { + __u8 ver: 4; + __u8 type: 4; + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +}; + +struct hsr_tag { + __be16 path_and_LSDU_size; + __be16 sequence_nr; + __be16 encap_proto; +}; + +struct mpls_label { + __be32 entry; +}; + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; + u8 ver; + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __attribute__((packed)); + +enum batadv_packettype { + BATADV_IV_OGM = 0, + BATADV_BCAST = 1, + BATADV_CODED = 2, + BATADV_ELP = 3, + BATADV_OGM2 = 4, + BATADV_MCAST = 5, + BATADV_UNICAST = 64, + BATADV_UNICAST_FRAG = 65, + BATADV_UNICAST_4ADDR = 66, + BATADV_ICMP = 67, + BATADV_UNICAST_TVLV = 68, +}; + +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; + __u8 dest[6]; +}; + +struct _flow_keys_digest_data { + __be16 n_proto; + u8 ip_proto; + u8 padding; + __be32 ports; + __be32 src; + __be32 dst; +}; + +struct dmabuf_genpool_chunk_owner { + long unsigned int base_virtual; + dma_addr_t base_dma_addr; + struct net_iov *niovs; + size_t num_niovs; + struct net_devmem_dmabuf_binding *binding; +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[2]; + u32 wanted[2]; + u32 active[2]; + u32 nochange[2]; + u32 all[2]; +}; + +enum ip_conntrack_expect_events { + IPEXP_NEW = 0, + IPEXP_DESTROY = 1, +}; + +struct nf_conntrack_net { + atomic_t count; + unsigned int expect_count; + unsigned int users4; + unsigned int users6; + unsigned int users_bridge; + struct ctl_table_header *sysctl_header; +}; + +struct ct_expect_iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum nft_counter_attributes { + NFTA_COUNTER_UNSPEC = 0, + NFTA_COUNTER_BYTES = 1, + NFTA_COUNTER_PACKETS = 2, + NFTA_COUNTER_PAD = 3, + __NFTA_COUNTER_MAX = 4, +}; + +struct nft_counter { + u64_stats_t bytes; + u64_stats_t packets; +}; + +struct nft_counter_tot { + s64 bytes; + s64 packets; +}; + +struct nft_counter_percpu_priv { + struct nft_counter *counter; +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +struct pingfakehdr { + struct icmphdr icmph; + struct msghdr *msg; + sa_family_t family; + __wsum wcheck; +}; + +struct user_pt_regs { + __u64 regs[32]; + __u64 lo; + __u64 hi; + __u64 cp0_epc; + __u64 cp0_badvaddr; + __u64 cp0_status; + __u64 cp0_cause; +}; + +enum pt_watch_style { + pt_watch_style_mips32 = 0, + pt_watch_style_mips64 = 1, +}; + +struct mips32_watch_regs { + unsigned int watchlo[8]; + short unsigned int watchhi[8]; + short unsigned int watch_masks[8]; + unsigned int num_valid; + long: 32; +}; + +struct mips64_watch_regs { + long long unsigned int watchlo[8]; + short unsigned int watchhi[8]; + short unsigned int watch_masks[8]; + unsigned int num_valid; + long: 32; +}; + +struct pt_watch_regs { + enum pt_watch_style style; + long: 32; + union { + struct mips32_watch_regs mips32; + struct mips64_watch_regs mips64; + }; +}; + +typedef struct { + void *lock; +} class_rcu_tasks_trace_t; + +struct trace_event_raw_sys_enter { + struct trace_entry ent; + long int id; + long unsigned int args[6]; + char __data[0]; +}; + +struct trace_event_raw_sys_exit { + struct trace_entry ent; + long int id; + long int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_sys_enter {}; + +struct trace_event_data_offsets_sys_exit {}; + +typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int); + +typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int); + +enum mips_regset { + REGSET_GPR = 0, + REGSET_DSP = 1, + REGSET_FPR = 2, + REGSET_FP_MODE = 3, +}; + +struct pt_regs_offset { + const char *name; + int offset; +}; + +struct maar_config { + phys_addr_t lower; + phys_addr_t upper; + unsigned int attrs; +}; + +struct maar_walk_info { + struct maar_config cfg[16]; + unsigned int num_cfg; +}; + +typedef struct { + unsigned int __softirq_pending; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +} irq_cpustat_t; + +struct softirq_action { + void (*action)(); +}; + +struct trace_event_raw_irq_handler_entry { + struct trace_entry ent; + int irq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_irq_handler_exit { + struct trace_entry ent; + int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_softirq { + struct trace_entry ent; + unsigned int vec; + char __data[0]; +}; + +struct trace_event_raw_tasklet { + struct trace_entry ent; + void *tasklet; + void *func; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_handler_entry { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_irq_handler_exit {}; + +struct trace_event_data_offsets_softirq {}; + +struct trace_event_data_offsets_tasklet {}; + +typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); + +typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); + +typedef void (*btf_trace_softirq_entry)(void *, unsigned int); + +typedef void (*btf_trace_softirq_exit)(void *, unsigned int); + +typedef void (*btf_trace_softirq_raise)(void *, unsigned int); + +typedef void (*btf_trace_tasklet_entry)(void *, struct tasklet_struct *, void *); + +typedef void (*btf_trace_tasklet_exit)(void *, struct tasklet_struct *, void *); + +struct tasklet_head { + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +struct clock_read_data { + u64 epoch_ns; + u64 epoch_cyc; + u64 sched_clock_mask; + u64 (*read_sched_clock)(); + u32 mult; + u32 shift; + long: 32; +}; + +struct clock_data { + seqcount_latch_t seq; + long: 32; + struct clock_read_data read_data[2]; + ktime_t wrap_kt; + long unsigned int rate; + u64 (*actual_read_sched_clock)(); +}; + +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = 1, + CGROUP_FREEZING_SELF = 2, + CGROUP_FREEZING_PARENT = 4, + CGROUP_FROZEN = 8, + CGROUP_FREEZING = 6, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; + long: 32; +}; + +enum blktrace_notify { + __BLK_TN_PROCESS = 0, + __BLK_TN_TIMESTAMP = 1, + __BLK_TN_MESSAGE = 2, + __BLK_TN_CGROUP = 256, +}; + +struct blk_io_trace { + __u32 magic; + __u32 sequence; + __u64 time; + __u64 sector; + __u32 bytes; + __u32 action; + __u32 pid; + __u32 device; + __u32 cpu; + __u16 error; + __u16 pdu_len; +}; + +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running = 2, + Blktrace_stopped = 3, +}; + +struct blk_user_trace_setup { + char name[32]; + __u16 act_mask; + __u32 buf_size; + __u32 buf_nr; + long: 32; + __u64 start_lba; + __u64 end_lba; + __u32 pid; + long: 32; +}; + +typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); + +struct bpf_cgroup_storage_map { + struct bpf_map map; + spinlock_t lock; + struct rb_root root; + struct list_head list; +}; + +enum lruvec_flags { + LRUVEC_CGROUP_CONGESTED = 0, + LRUVEC_NODE_CONGESTED = 1, +}; + +enum pgdat_flags { + PGDAT_DIRTY = 0, + PGDAT_WRITEBACK = 1, + PGDAT_RECLAIM_LOCKED = 2, +}; + +struct reclaim_stat { + unsigned int nr_dirty; + unsigned int nr_unqueued_dirty; + unsigned int nr_congested; + unsigned int nr_writeback; + unsigned int nr_immediate; + unsigned int nr_pageout; + unsigned int nr_activate[2]; + unsigned int nr_ref_keep; + unsigned int nr_unmap_fail; + unsigned int nr_lazyfree_fail; + unsigned int nr_demoted; +}; + +struct trace_event_raw_mm_vmscan_kswapd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_kswapd_wake { + struct trace_entry ent; + int nid; + int zid; + int order; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_wakeup_kswapd { + struct trace_entry ent; + int nid; + int zid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { + struct trace_entry ent; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { + struct trace_entry ent; + long unsigned int nr_reclaimed; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_start { + struct trace_entry ent; + struct shrinker *shr; + void *shrink; + int nid; + long int nr_objects_to_shrink; + long unsigned int gfp_flags; + long unsigned int cache_items; + long long unsigned int delta; + long unsigned int total_scan; + int priority; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_end { + struct trace_entry ent; + struct shrinker *shr; + int nid; + void *shrink; + long int unused_scan; + long int new_scan; + int retval; + long int total_scan; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_isolate { + struct trace_entry ent; + int highest_zoneidx; + int order; + long unsigned int nr_requested; + long unsigned int nr_scanned; + long unsigned int nr_skipped; + long unsigned int nr_taken; + int lru; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_write_folio { + struct trace_entry ent; + long unsigned int pfn; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_reclaim_pages { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_inactive { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_active { + struct trace_entry ent; + int nid; + long unsigned int nr_taken; + long unsigned int nr_active; + long unsigned int nr_deactivated; + long unsigned int nr_referenced; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_node_reclaim_begin { + struct trace_entry ent; + int nid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_throttled { + struct trace_entry ent; + int nid; + int usec_timeout; + int usec_delayed; + int reason; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; + +struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; + +struct trace_event_data_offsets_mm_shrink_slab_start {}; + +struct trace_event_data_offsets_mm_shrink_slab_end {}; + +struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; + +struct trace_event_data_offsets_mm_vmscan_write_folio {}; + +struct trace_event_data_offsets_mm_vmscan_reclaim_pages {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; + +struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; + +struct trace_event_data_offsets_mm_vmscan_throttled {}; + +typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); + +typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); + +typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int); + +typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_vmscan_write_folio)(void *, struct folio *); + +typedef void (*btf_trace_mm_vmscan_reclaim_pages)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_throttled)(void *, int, int, int, int); + +struct scan_control { + long unsigned int nr_to_reclaim; + nodemask_t *nodemask; + struct mem_cgroup *target_mem_cgroup; + long unsigned int anon_cost; + long unsigned int file_cost; + int *proactive_swappiness; + unsigned int may_deactivate: 2; + unsigned int force_deactivate: 1; + unsigned int skipped_deactivate: 1; + unsigned int may_writepage: 1; + unsigned int may_unmap: 1; + unsigned int may_swap: 1; + unsigned int no_cache_trim_mode: 1; + unsigned int cache_trim_mode_failed: 1; + unsigned int proactive: 1; + unsigned int memcg_low_reclaim: 1; + unsigned int memcg_low_skipped: 1; + unsigned int memcg_full_walk: 1; + unsigned int hibernation_mode: 1; + unsigned int compaction_ready: 1; + unsigned int cache_trim_mode: 1; + unsigned int file_is_tiny: 1; + unsigned int no_demotion: 1; + s8 order; + s8 priority; + s8 reclaim_idx; + gfp_t gfp_mask; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + struct { + unsigned int dirty; + unsigned int unqueued_dirty; + unsigned int congested; + unsigned int writeback; + unsigned int immediate; + unsigned int file_taken; + unsigned int taken; + } nr; + struct reclaim_state reclaim_state; +}; + +typedef enum { + PAGE_KEEP = 0, + PAGE_ACTIVATE = 1, + PAGE_SUCCESS = 2, + PAGE_CLEAN = 3, +} pageout_t; + +enum folio_references { + FOLIOREF_RECLAIM = 0, + FOLIOREF_RECLAIM_CLEAN = 1, + FOLIOREF_KEEP = 2, + FOLIOREF_ACTIVATE = 3, +}; + +enum scan_balance { + SCAN_EQUAL = 0, + SCAN_FRACT = 1, + SCAN_ANON = 2, + SCAN_FILE = 3, +}; + +typedef struct { + rwlock_t *lock; +} class_read_lock_t; + +typedef struct { + rwlock_t *lock; +} class_write_lock_t; + +typedef struct rw_semaphore *class_rwsem_read_t; + +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +struct statmount { + __u32 size; + __u32 mnt_opts; + __u64 mask; + __u32 sb_dev_major; + __u32 sb_dev_minor; + __u64 sb_magic; + __u32 sb_flags; + __u32 fs_type; + __u64 mnt_id; + __u64 mnt_parent_id; + __u32 mnt_id_old; + __u32 mnt_parent_id_old; + __u64 mnt_attr; + __u64 mnt_propagation; + __u64 mnt_peer_group; + __u64 mnt_master; + __u64 propagate_from; + __u32 mnt_root; + __u32 mnt_point; + __u64 mnt_ns_id; + __u32 fs_subtype; + __u32 sb_source; + __u32 opt_num; + __u32 opt_array; + __u32 opt_sec_num; + __u32 opt_sec_array; + __u64 __spare2[46]; + char str[0]; +}; + +struct mnt_id_req { + __u32 size; + __u32 spare; + __u64 mnt_id; + __u64 param; + __u64 mnt_ns_id; +}; + +struct mount_kattr { + unsigned int attr_set; + unsigned int attr_clr; + unsigned int propagation; + unsigned int lookup_flags; + bool recurse; + struct user_namespace *mnt_userns; + struct mnt_idmap *mnt_idmap; +}; + +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; + +enum mnt_tree_flags_t { + MNT_TREE_MOVE = 1, + MNT_TREE_BENEATH = 2, +}; + +struct kstatmount { + struct statmount *buf; + size_t bufsize; + struct vfsmount *mnt; + long: 32; + u64 mask; + struct path root; + struct statmount sm; + struct seq_file seq; +}; + +struct trace_event_raw_locks_get_lock_context { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + unsigned char type; + struct file_lock_context *ctx; + char __data[0]; +}; + +struct trace_event_raw_filelock_lock { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int pid; + unsigned int flags; + unsigned char type; + loff_t fl_start; + loff_t fl_end; + int ret; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_filelock_lease { + struct trace_entry ent; + struct file_lease *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + long unsigned int break_time; + long unsigned int downgrade_time; + char __data[0]; +}; + +struct trace_event_raw_generic_add_lease { + struct trace_entry ent; + long unsigned int i_ino; + int wcount; + int rcount; + int icount; + dev_t s_dev; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + char __data[0]; +}; + +struct trace_event_raw_leases_conflict { + struct trace_entry ent; + void *lease; + void *breaker; + unsigned int l_fl_flags; + unsigned int b_fl_flags; + unsigned char l_fl_type; + unsigned char b_fl_type; + bool conflict; + char __data[0]; +}; + +struct trace_event_data_offsets_locks_get_lock_context {}; + +struct trace_event_data_offsets_filelock_lock {}; + +struct trace_event_data_offsets_filelock_lease {}; + +struct trace_event_data_offsets_generic_add_lease {}; + +struct trace_event_data_offsets_leases_conflict {}; + +typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); + +typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lease *, struct file_lease *); + +struct file_lock_list_struct { + spinlock_t lock; + struct hlist_head hlist; +}; + +struct locks_iterator { + int li_cpu; + long: 32; + loff_t li_pos; +}; + +struct dax_holder_operations { + int (*notify_failure)(struct dax_device *, u64, u64, int); +}; + +struct ext4_lazy_init { + long unsigned int li_state; + struct list_head li_request_list; + struct mutex li_list_mtx; +}; + +struct partial_cluster { + ext4_fsblk_t pclu; + ext4_lblk_t lblk; + enum { + initial = 0, + tofree = 1, + nofree = 2, + } state; +}; + +struct ext4_journal_cb_entry { + struct list_head jce_list; + void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); +}; + +struct trace_event_raw_ext4_other_inode_update_time { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t orig_ino; + uid_t uid; + gid_t gid; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + uid_t uid; + gid_t gid; + __u64 blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_request_inode { + struct trace_entry ent; + dev_t dev; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_evict_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int nlink; + char __data[0]; +}; + +struct trace_event_raw_ext4_drop_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int drop; + char __data[0]; +}; + +struct trace_event_raw_ext4_nfs_commit_metadata { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_mark_inode_dirty { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_ext4_begin_ordered_truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t new_size; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__write_end { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int copied; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + long unsigned int writeback_index; + int sync_mode; + char for_kupdate; + char range_cyclic; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_da_write_pages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int first_page; + long int nr_to_write; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 lblk; + __u32 len; + __u32 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages_result { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + int pages_written; + long int pages_skipped; + long unsigned int writeback_index; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_ext4_invalidate_folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + size_t offset; + size_t length; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_blocks { + struct trace_entry ent; + dev_t dev; + long: 32; + __u64 blk; + __u64 count; + char __data[0]; +}; + +struct trace_event_raw_ext4__mb_new_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 pa_pstart; + __u64 pa_lstart; + __u32 pa_len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_mb_release_inode_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + __u32 count; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_mb_release_group_pa { + struct trace_entry ent; + dev_t dev; + long: 32; + __u64 pa_pstart; + __u32 pa_len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_discard_preallocations { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_discard_preallocations { + struct trace_entry ent; + dev_t dev; + int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_allocate_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_free_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + long unsigned int count; + int flags; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_sync_file_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + int datasync; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_fs { + struct trace_entry ent; + dev_t dev; + int wait; + char __data[0]; +}; + +struct trace_event_raw_ext4_alloc_da_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int data_blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_alloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 goal_logical; + int goal_start; + __u32 goal_group; + int goal_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + __u16 found; + __u16 groups; + __u16 buddy; + __u16 flags; + __u16 tail; + __u8 cr; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_prealloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4__mballoc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_forget { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + int is_metadata; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_update_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int used_blocks; + int reserved_data_blocks; + int quota_claim; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int reserve_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_da_release_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int freed_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_read_block_bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + bool prefetch; + char __data[0]; +}; + +struct trace_event_raw_ext4__fallocate_mode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + int mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_fallocate_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int blocks; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + long: 32; + loff_t size; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4__truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + ext4_lblk_t i_lblk; + unsigned int i_len; + ext4_fsblk_t i_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int flags; + long: 32; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + unsigned int len; + unsigned int mflags; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_load_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_load_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_sb { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_inode { + struct trace_entry ent; + long unsigned int ino; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_reserved { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4__trim { + struct trace_entry ent; + int dev_major; + int dev_minor; + __u32 group; + int start; + int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_handle_unwritten_extents { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + unsigned int allocated; + ext4_fsblk_t newblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + ext4_lblk_t lblk; + long: 32; + ext4_fsblk_t pblk; + unsigned int len; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_show_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + short unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_remove_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t from; + ext4_lblk_t to; + ext4_fsblk_t ee_pblk; + ext4_lblk_t ee_lblk; + short unsigned int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_leaf { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t ee_lblk; + ext4_fsblk_t ee_pblk; + short int ee_len; + long: 32; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_idx { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space_done { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + long: 32; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + short unsigned int eh_entries; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__es_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_es_remove_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t lblk; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_es_lookup_extent_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + int found; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_shrink_enter { + struct trace_entry ent; + dev_t dev; + int nr_to_scan; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_collapse_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_insert_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + long long unsigned int scan_time; + int nr_skipped; + int retried; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_insert_delayed_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + bool lclu_allocated; + bool end_allocated; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_fsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u32 agno; + long: 32; + u64 bno; + u64 len; + u64 owner; + char __data[0]; +}; + +struct trace_event_raw_ext4_getfsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u64 block; + u64 len; + u64 owner; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_shutdown { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_error { + struct trace_entry ent; + dev_t dev; + const char *function; + unsigned int line; + char __data[0]; +}; + +struct trace_event_raw_ext4_prefetch_bitmaps { + struct trace_entry ent; + dev_t dev; + __u32 group; + __u32 next; + __u32 ios; + char __data[0]; +}; + +struct trace_event_raw_ext4_lazy_itable_init { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay_scan { + struct trace_entry ent; + dev_t dev; + int error; + int off; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay { + struct trace_entry ent; + dev_t dev; + int tag; + int ino; + int priv1; + int priv2; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_start { + struct trace_entry ent; + dev_t dev; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_stop { + struct trace_entry ent; + dev_t dev; + int nblks; + int reason; + int num_fc; + int num_fc_ineligible; + int nblks_agg; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_stats { + struct trace_entry ent; + dev_t dev; + unsigned int fc_ineligible_rc[10]; + long unsigned int fc_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_numblks; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_dentry { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_inode { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_range { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + long int start; + long int end; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_cleanup { + struct trace_entry ent; + dev_t dev; + int j_fc_off; + int full; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_update_sb { + struct trace_entry ent; + dev_t dev; + long: 32; + ext4_fsblk_t fsblk; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_ext4_other_inode_update_time {}; + +struct trace_event_data_offsets_ext4_free_inode {}; + +struct trace_event_data_offsets_ext4_request_inode {}; + +struct trace_event_data_offsets_ext4_allocate_inode {}; + +struct trace_event_data_offsets_ext4_evict_inode {}; + +struct trace_event_data_offsets_ext4_drop_inode {}; + +struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; + +struct trace_event_data_offsets_ext4_mark_inode_dirty {}; + +struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; + +struct trace_event_data_offsets_ext4__write_begin {}; + +struct trace_event_data_offsets_ext4__write_end {}; + +struct trace_event_data_offsets_ext4_writepages {}; + +struct trace_event_data_offsets_ext4_da_write_pages {}; + +struct trace_event_data_offsets_ext4_da_write_pages_extent {}; + +struct trace_event_data_offsets_ext4_writepages_result {}; + +struct trace_event_data_offsets_ext4__folio_op {}; + +struct trace_event_data_offsets_ext4_invalidate_folio_op {}; + +struct trace_event_data_offsets_ext4_discard_blocks {}; + +struct trace_event_data_offsets_ext4__mb_new_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_group_pa {}; + +struct trace_event_data_offsets_ext4_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_request_blocks {}; + +struct trace_event_data_offsets_ext4_allocate_blocks {}; + +struct trace_event_data_offsets_ext4_free_blocks {}; + +struct trace_event_data_offsets_ext4_sync_file_enter {}; + +struct trace_event_data_offsets_ext4_sync_file_exit {}; + +struct trace_event_data_offsets_ext4_sync_fs {}; + +struct trace_event_data_offsets_ext4_alloc_da_blocks {}; + +struct trace_event_data_offsets_ext4_mballoc_alloc {}; + +struct trace_event_data_offsets_ext4_mballoc_prealloc {}; + +struct trace_event_data_offsets_ext4__mballoc {}; + +struct trace_event_data_offsets_ext4_forget {}; + +struct trace_event_data_offsets_ext4_da_update_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_release_space {}; + +struct trace_event_data_offsets_ext4__bitmap_load {}; + +struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; + +struct trace_event_data_offsets_ext4__fallocate_mode {}; + +struct trace_event_data_offsets_ext4_fallocate_exit {}; + +struct trace_event_data_offsets_ext4_unlink_enter {}; + +struct trace_event_data_offsets_ext4_unlink_exit {}; + +struct trace_event_data_offsets_ext4__truncate {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; + +struct trace_event_data_offsets_ext4__map_blocks_enter {}; + +struct trace_event_data_offsets_ext4__map_blocks_exit {}; + +struct trace_event_data_offsets_ext4_ext_load_extent {}; + +struct trace_event_data_offsets_ext4_load_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_sb {}; + +struct trace_event_data_offsets_ext4_journal_start_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_reserved {}; + +struct trace_event_data_offsets_ext4__trim {}; + +struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; + +struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; + +struct trace_event_data_offsets_ext4_ext_show_extent {}; + +struct trace_event_data_offsets_ext4_remove_blocks {}; + +struct trace_event_data_offsets_ext4_ext_rm_leaf {}; + +struct trace_event_data_offsets_ext4_ext_rm_idx {}; + +struct trace_event_data_offsets_ext4_ext_remove_space {}; + +struct trace_event_data_offsets_ext4_ext_remove_space_done {}; + +struct trace_event_data_offsets_ext4__es_extent {}; + +struct trace_event_data_offsets_ext4_es_remove_extent {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; + +struct trace_event_data_offsets_ext4__es_shrink_enter {}; + +struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; + +struct trace_event_data_offsets_ext4_collapse_range {}; + +struct trace_event_data_offsets_ext4_insert_range {}; + +struct trace_event_data_offsets_ext4_es_shrink {}; + +struct trace_event_data_offsets_ext4_es_insert_delayed_extent {}; + +struct trace_event_data_offsets_ext4_fsmap_class {}; + +struct trace_event_data_offsets_ext4_getfsmap_class {}; + +struct trace_event_data_offsets_ext4_shutdown {}; + +struct trace_event_data_offsets_ext4_error {}; + +struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; + +struct trace_event_data_offsets_ext4_lazy_itable_init {}; + +struct trace_event_data_offsets_ext4_fc_replay_scan {}; + +struct trace_event_data_offsets_ext4_fc_replay {}; + +struct trace_event_data_offsets_ext4_fc_commit_start {}; + +struct trace_event_data_offsets_ext4_fc_commit_stop {}; + +struct trace_event_data_offsets_ext4_fc_stats {}; + +struct trace_event_data_offsets_ext4_fc_track_dentry {}; + +struct trace_event_data_offsets_ext4_fc_track_inode {}; + +struct trace_event_data_offsets_ext4_fc_track_range {}; + +struct trace_event_data_offsets_ext4_fc_cleanup {}; + +struct trace_event_data_offsets_ext4_update_sb {}; + +typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); + +typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); + +typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int); + +typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); + +typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); + +typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); + +typedef void (*btf_trace_ext4_read_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_release_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_journalled_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int); + +typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int); + +typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); + +typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int); + +typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int); + +typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); + +typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); + +typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); + +typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int, bool); + +typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); + +typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); + +typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); + +typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); + +typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_sb)(void *, struct super_block *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_inode)(void *, struct inode *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int); + +typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int); + +typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); + +typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); + +typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); + +typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); + +typedef void (*btf_trace_ext4_es_insert_delayed_extent)(void *, struct inode *, struct extent_status *, bool, bool); + +typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); + +typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); + +typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); + +typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); + +typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *, tid_t); + +typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int, tid_t); + +typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_track_create)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_link)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_unlink)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_inode)(void *, handle_t *, struct inode *, int); + +typedef void (*btf_trace_ext4_fc_track_range)(void *, handle_t *, struct inode *, long int, long int, int); + +typedef void (*btf_trace_ext4_fc_cleanup)(void *, journal_t *, int, tid_t); + +typedef void (*btf_trace_ext4_update_sb)(void *, struct super_block *, ext4_fsblk_t, unsigned int); + +struct ext4_err_translation { + int code; + int errno; +}; + +enum { + Opt_bsd_df = 0, + Opt_minix_df = 1, + Opt_grpid = 2, + Opt_nogrpid = 3, + Opt_resgid = 4, + Opt_resuid = 5, + Opt_sb = 6, + Opt_nouid32 = 7, + Opt_debug = 8, + Opt_removed = 9, + Opt_user_xattr = 10, + Opt_acl___2 = 11, + Opt_auto_da_alloc = 12, + Opt_noauto_da_alloc = 13, + Opt_noload = 14, + Opt_commit = 15, + Opt_min_batch_time = 16, + Opt_max_batch_time = 17, + Opt_journal_dev = 18, + Opt_journal_path = 19, + Opt_journal_checksum = 20, + Opt_journal_async_commit = 21, + Opt_abort = 22, + Opt_data_journal = 23, + Opt_data_ordered = 24, + Opt_data_writeback = 25, + Opt_data_err_abort = 26, + Opt_data_err_ignore = 27, + Opt_test_dummy_encryption = 28, + Opt_inlinecrypt = 29, + Opt_usrjquota = 30, + Opt_grpjquota = 31, + Opt_quota = 32, + Opt_noquota = 33, + Opt_barrier___2 = 34, + Opt_nobarrier = 35, + Opt_err___3 = 36, + Opt_usrquota = 37, + Opt_grpquota = 38, + Opt_prjquota = 39, + Opt_dax = 40, + Opt_dax_always = 41, + Opt_dax_inode = 42, + Opt_dax_never = 43, + Opt_stripe = 44, + Opt_delalloc = 45, + Opt_nodelalloc = 46, + Opt_warn_on_error = 47, + Opt_nowarn_on_error = 48, + Opt_mblk_io_submit = 49, + Opt_debug_want_extra_isize = 50, + Opt_nomblk_io_submit = 51, + Opt_block_validity = 52, + Opt_noblock_validity = 53, + Opt_inode_readahead_blks = 54, + Opt_journal_ioprio = 55, + Opt_dioread_nolock = 56, + Opt_dioread_lock = 57, + Opt_discard___2 = 58, + Opt_nodiscard = 59, + Opt_init_itable = 60, + Opt_noinit_itable = 61, + Opt_max_dir_size_kb = 62, + Opt_nojournal_checksum = 63, + Opt_nombcache = 64, + Opt_no_prefetch_block_bitmaps = 65, + Opt_mb_optimize_scan = 66, + Opt_errors = 67, + Opt_data = 68, + Opt_data_err = 69, + Opt_jqfmt = 70, + Opt_dax_type = 71, +}; + +struct mount_opts { + int token; + int mount_opt; + int flags; +}; + +struct ext4_fs_context { + char *s_qf_names[3]; + struct fscrypt_dummy_policy dummy_enc_policy; + int s_jquota_fmt; + short unsigned int qname_spec; + long unsigned int vals_s_flags; + long unsigned int mask_s_flags; + long unsigned int journal_devnum; + long unsigned int s_commit_interval; + long unsigned int s_stripe; + unsigned int s_inode_readahead_blks; + unsigned int s_want_extra_isize; + unsigned int s_li_wait_mult; + unsigned int s_max_dir_size_kb; + unsigned int journal_ioprio; + unsigned int vals_s_mount_opt; + unsigned int mask_s_mount_opt; + unsigned int vals_s_mount_opt2; + unsigned int mask_s_mount_opt2; + unsigned int opt_flags; + unsigned int spec; + u32 s_max_batch_time; + u32 s_min_batch_time; + kuid_t s_resuid; + kgid_t s_resgid; + long: 32; + ext4_fsblk_t s_sb_block; +}; + +struct ext4_mount_options { + long unsigned int s_mount_opt; + long unsigned int s_mount_opt2; + kuid_t s_resuid; + kgid_t s_resgid; + long unsigned int s_commit_interval; + u32 s_min_batch_time; + u32 s_max_batch_time; +}; + +struct ipc_proc_iface { + const char *path; + const char *header; + int ids; + int (*show)(struct seq_file *, void *); +}; + +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct pid_namespace *pid_ns; + struct ipc_proc_iface *iface; +}; + +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; + MPI p; + MPI q; + MPI dp; + MPI dq; + MPI qinv; +}; + +struct crypto_rfc3686_ctx { + struct crypto_skcipher *child; + u8 nonce[4]; +}; + +struct crypto_rfc3686_req_ctx { + u8 iv[16]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct skcipher_request subreq; +}; + +struct x509_parse_context { + struct x509_certificate *cert; + long unsigned int data; + const void *key; + size_t key_size; + const void *params; + size_t params_size; + enum OID key_algo; + enum OID last_oid; + enum OID sig_algo; + u8 o_size; + u8 cn_size; + u8 email_size; + u16 o_offset; + u16 cn_offset; + u16 email_offset; + unsigned int raw_akid_size; + const void *raw_akid; + const void *akid_raw_issuer; + unsigned int akid_raw_issuer_size; +}; + +struct bt_iter_data { + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + busy_tag_iter_fn *fn; + void *data; + bool reserved; +}; + +struct bt_tags_iter_data { + struct blk_mq_tags *tags; + busy_tag_iter_fn *fn; + void *data; + unsigned int flags; +}; + +struct trace_event_raw_kyber_latency { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char type[8]; + u8 percentile; + u8 numerator; + u8 denominator; + unsigned int samples; + char __data[0]; +}; + +struct trace_event_raw_kyber_adjust { + struct trace_entry ent; + dev_t dev; + char domain[16]; + unsigned int depth; + char __data[0]; +}; + +struct trace_event_raw_kyber_throttled { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_kyber_latency {}; + +struct trace_event_data_offsets_kyber_adjust {}; + +struct trace_event_data_offsets_kyber_throttled {}; + +typedef void (*btf_trace_kyber_latency)(void *, dev_t, const char *, const char *, unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_kyber_adjust)(void *, dev_t, const char *, unsigned int); + +typedef void (*btf_trace_kyber_throttled)(void *, dev_t, const char *); + +enum { + KYBER_READ = 0, + KYBER_WRITE = 1, + KYBER_DISCARD = 2, + KYBER_OTHER = 3, + KYBER_NUM_DOMAINS = 4, +}; + +enum { + KYBER_ASYNC_PERCENT = 75, +}; + +enum { + KYBER_LATENCY_SHIFT = 2, + KYBER_GOOD_BUCKETS = 4, + KYBER_LATENCY_BUCKETS = 8, +}; + +enum { + KYBER_TOTAL_LATENCY = 0, + KYBER_IO_LATENCY = 1, +}; + +struct kyber_cpu_latency { + atomic_t buckets[48]; +}; + +struct kyber_ctx_queue { + spinlock_t lock; + struct list_head rq_list[4]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kyber_queue_data { + struct request_queue *q; + dev_t dev; + struct sbitmap_queue domain_tokens[4]; + unsigned int async_depth; + struct kyber_cpu_latency *cpu_latency; + struct timer_list timer; + unsigned int latency_buckets[48]; + long unsigned int latency_timeout[3]; + int domain_p99[3]; + long: 32; + u64 latency_targets[3]; +}; + +struct kyber_hctx_data { + spinlock_t lock; + struct list_head rqs[4]; + unsigned int cur_domain; + unsigned int batching; + struct kyber_ctx_queue *kcqs; + struct sbitmap kcq_map[4]; + struct sbq_wait domain_wait[4]; + struct sbq_wait_state *domain_ws[4]; + atomic_t wait_index[4]; +}; + +struct flush_kcq_data { + struct kyber_hctx_data *khd; + unsigned int sched_domain; + struct list_head *list; +}; + +struct ddebug_class_param { + union { + long unsigned int *bits; + unsigned int *lvl; + }; + char flags[8]; + const struct ddebug_class_map *map; +}; + +struct dim_cq_moder; + +struct dim_irq_moder { + u8 profile_flags; + u8 coal_flags; + u8 dim_rx_mode; + u8 dim_tx_mode; + struct dim_cq_moder *rx_profile; + struct dim_cq_moder *tx_profile; + void (*rx_dim_work)(struct work_struct *); + void (*tx_dim_work)(struct work_struct *); +}; + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; + struct callback_head rcu; +}; + +enum ib_uverbs_write_cmds { + IB_USER_VERBS_CMD_GET_CONTEXT = 0, + IB_USER_VERBS_CMD_QUERY_DEVICE = 1, + IB_USER_VERBS_CMD_QUERY_PORT = 2, + IB_USER_VERBS_CMD_ALLOC_PD = 3, + IB_USER_VERBS_CMD_DEALLOC_PD = 4, + IB_USER_VERBS_CMD_CREATE_AH = 5, + IB_USER_VERBS_CMD_MODIFY_AH = 6, + IB_USER_VERBS_CMD_QUERY_AH = 7, + IB_USER_VERBS_CMD_DESTROY_AH = 8, + IB_USER_VERBS_CMD_REG_MR = 9, + IB_USER_VERBS_CMD_REG_SMR = 10, + IB_USER_VERBS_CMD_REREG_MR = 11, + IB_USER_VERBS_CMD_QUERY_MR = 12, + IB_USER_VERBS_CMD_DEREG_MR = 13, + IB_USER_VERBS_CMD_ALLOC_MW = 14, + IB_USER_VERBS_CMD_BIND_MW = 15, + IB_USER_VERBS_CMD_DEALLOC_MW = 16, + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, + IB_USER_VERBS_CMD_CREATE_CQ = 18, + IB_USER_VERBS_CMD_RESIZE_CQ = 19, + IB_USER_VERBS_CMD_DESTROY_CQ = 20, + IB_USER_VERBS_CMD_POLL_CQ = 21, + IB_USER_VERBS_CMD_PEEK_CQ = 22, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, + IB_USER_VERBS_CMD_CREATE_QP = 24, + IB_USER_VERBS_CMD_QUERY_QP = 25, + IB_USER_VERBS_CMD_MODIFY_QP = 26, + IB_USER_VERBS_CMD_DESTROY_QP = 27, + IB_USER_VERBS_CMD_POST_SEND = 28, + IB_USER_VERBS_CMD_POST_RECV = 29, + IB_USER_VERBS_CMD_ATTACH_MCAST = 30, + IB_USER_VERBS_CMD_DETACH_MCAST = 31, + IB_USER_VERBS_CMD_CREATE_SRQ = 32, + IB_USER_VERBS_CMD_MODIFY_SRQ = 33, + IB_USER_VERBS_CMD_QUERY_SRQ = 34, + IB_USER_VERBS_CMD_DESTROY_SRQ = 35, + IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, + IB_USER_VERBS_CMD_OPEN_XRCD = 37, + IB_USER_VERBS_CMD_CLOSE_XRCD = 38, + IB_USER_VERBS_CMD_CREATE_XSRQ = 39, + IB_USER_VERBS_CMD_OPEN_QP = 40, +}; + +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, + IB_UVERBS_WC_FLUSH = 8, + IB_UVERBS_WC_ATOMIC_WRITE = 9, +}; + +enum ib_uverbs_create_qp_mask { + IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, +}; + +enum ib_uverbs_wr_opcode { + IB_UVERBS_WR_RDMA_WRITE = 0, + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, + IB_UVERBS_WR_SEND = 2, + IB_UVERBS_WR_SEND_WITH_IMM = 3, + IB_UVERBS_WR_RDMA_READ = 4, + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_UVERBS_WR_LOCAL_INV = 7, + IB_UVERBS_WR_BIND_MW = 8, + IB_UVERBS_WR_SEND_WITH_INV = 9, + IB_UVERBS_WR_TSO = 10, + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + IB_UVERBS_WR_FLUSH = 14, + IB_UVERBS_WR_ATOMIC_WRITE = 15, +}; + +enum ib_uverbs_device_cap_flags { + IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1ULL, + IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 2ULL, + IB_UVERBS_DEVICE_BAD_QKEY_CNTR = 4ULL, + IB_UVERBS_DEVICE_RAW_MULTI = 8ULL, + IB_UVERBS_DEVICE_AUTO_PATH_MIG = 16ULL, + IB_UVERBS_DEVICE_CHANGE_PHY_PORT = 32ULL, + IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = 64ULL, + IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = 128ULL, + IB_UVERBS_DEVICE_SHUTDOWN_PORT = 256ULL, + IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = 1024ULL, + IB_UVERBS_DEVICE_SYS_IMAGE_GUID = 2048ULL, + IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = 4096ULL, + IB_UVERBS_DEVICE_SRQ_RESIZE = 8192ULL, + IB_UVERBS_DEVICE_N_NOTIFY_CQ = 16384ULL, + IB_UVERBS_DEVICE_MEM_WINDOW = 131072ULL, + IB_UVERBS_DEVICE_UD_IP_CSUM = 262144ULL, + IB_UVERBS_DEVICE_XRC = 1048576ULL, + IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = 2097152ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = 8388608ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = 16777216ULL, + IB_UVERBS_DEVICE_RC_IP_CSUM = 33554432ULL, + IB_UVERBS_DEVICE_RAW_IP_CSUM = 67108864ULL, + IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = 536870912ULL, + IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 17179869184ULL, + IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 68719476736ULL, + IB_UVERBS_DEVICE_FLUSH_GLOBAL = 274877906944ULL, + IB_UVERBS_DEVICE_FLUSH_PERSISTENT = 549755813888ULL, + IB_UVERBS_DEVICE_ATOMIC_WRITE = 1099511627776ULL, +}; + +enum ib_uverbs_raw_packet_caps { + IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = 1, + IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = 2, + IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = 4, + IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = 8, +}; + +enum ib_uverbs_access_flags { + IB_UVERBS_ACCESS_LOCAL_WRITE = 1, + IB_UVERBS_ACCESS_REMOTE_WRITE = 2, + IB_UVERBS_ACCESS_REMOTE_READ = 4, + IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, + IB_UVERBS_ACCESS_MW_BIND = 16, + IB_UVERBS_ACCESS_ZERO_BASED = 32, + IB_UVERBS_ACCESS_ON_DEMAND = 64, + IB_UVERBS_ACCESS_HUGETLB = 128, + IB_UVERBS_ACCESS_FLUSH_GLOBAL = 256, + IB_UVERBS_ACCESS_FLUSH_PERSISTENT = 512, + IB_UVERBS_ACCESS_RELAXED_ORDERING = 1048576, + IB_UVERBS_ACCESS_OPTIONAL_RANGE = 1072693248, +}; + +enum ib_uverbs_srq_type { + IB_UVERBS_SRQT_BASIC = 0, + IB_UVERBS_SRQT_XRC = 1, + IB_UVERBS_SRQT_TM = 2, +}; + +enum ib_uverbs_wq_type { + IB_UVERBS_WQT_RQ = 0, +}; + +enum ib_uverbs_wq_flags { + IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1, + IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 2, + IB_UVERBS_WQ_FLAGS_DELAY_DROP = 4, + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 8, +}; + +enum ib_uverbs_qp_type { + IB_UVERBS_QPT_RC = 2, + IB_UVERBS_QPT_UC = 3, + IB_UVERBS_QPT_UD = 4, + IB_UVERBS_QPT_RAW_PACKET = 8, + IB_UVERBS_QPT_XRC_INI = 9, + IB_UVERBS_QPT_XRC_TGT = 10, + IB_UVERBS_QPT_DRIVER = 255, +}; + +enum ib_uverbs_qp_create_flags { + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, + IB_UVERBS_QP_CREATE_SCATTER_FCS = 256, + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 512, + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 2048, + IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 4096, +}; + +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB = 0, + IB_UVERBS_GID_TYPE_ROCE_V1 = 1, + IB_UVERBS_GID_TYPE_ROCE_V2 = 2, +}; + +enum ib_poll_context { + IB_POLL_SOFTIRQ = 0, + IB_POLL_WORKQUEUE = 1, + IB_POLL_UNBOUND_WORKQUEUE = 2, + IB_POLL_LAST_POOL_TYPE = 2, + IB_POLL_DIRECT = 3, +}; + +struct ddebug_table { + struct list_head link; + struct list_head maps; + const char *mod_name; + unsigned int num_ddebugs; + struct _ddebug *ddebugs; +}; + +struct ddebug_query { + const char *filename; + const char *module; + const char *function; + const char *format; + const char *class_string; + unsigned int first_lineno; + unsigned int last_lineno; +}; + +struct ddebug_iter { + struct ddebug_table *table; + int idx; +}; + +struct flag_settings { + unsigned int flags; + unsigned int mask; +}; + +struct flagsbuf { + char buf[8]; +}; + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; + long unsigned int acc; + unsigned int flags; +}; + +struct trace_event_raw_scsi_dispatch_cmd_start { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_dispatch_cmd_error { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int rtn; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_cmd_done_timeout_template { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int result; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + u8 sense_key; + u8 asc; + u8 ascq; + char __data[0]; +}; + +struct trace_event_raw_scsi_eh_wakeup { + struct trace_entry ent; + unsigned int host_no; + char __data[0]; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_start { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_error { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_cmd_done_timeout_template { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_eh_wakeup {}; + +typedef void (*btf_trace_scsi_dispatch_cmd_start)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_error)(void *, struct scsi_cmnd *, int); + +typedef void (*btf_trace_scsi_dispatch_cmd_done)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_timeout)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_eh_wakeup)(void *, struct Scsi_Host *); + +enum scsi_vpd_parameters { + SCSI_VPD_HEADER_SIZE = 4, + SCSI_VPD_LIST_SIZE = 36, +}; + +struct swmii_regs { + u16 bmsr; + u16 lpa; + u16 lpagb; + u16 estat; +}; + +enum { + SWMII_SPEED_10 = 0, + SWMII_SPEED_100 = 1, + SWMII_SPEED_1000 = 2, + SWMII_DUPLEX_HALF = 0, + SWMII_DUPLEX_FULL = 1, +}; + +struct vivaldi_data { + u32 function_row_physmap[24]; + unsigned int num_function_row_keys; +}; + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING = 1, + POWER_SUPPLY_STATUS_DISCHARGING = 2, + POWER_SUPPLY_STATUS_NOT_CHARGING = 3, + POWER_SUPPLY_STATUS_FULL = 4, +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE = 1, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, + POWER_SUPPLY_CHARGE_TYPE_FAST = 3, + POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, + POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, + POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, + POWER_SUPPLY_CHARGE_TYPE_BYPASS = 8, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD = 1, + POWER_SUPPLY_HEALTH_OVERHEAT = 2, + POWER_SUPPLY_HEALTH_DEAD = 3, + POWER_SUPPLY_HEALTH_OVERVOLTAGE = 4, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE = 5, + POWER_SUPPLY_HEALTH_COLD = 6, + POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE = 7, + POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE = 8, + POWER_SUPPLY_HEALTH_OVERCURRENT = 9, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED = 10, + POWER_SUPPLY_HEALTH_WARM = 11, + POWER_SUPPLY_HEALTH_COOL = 12, + POWER_SUPPLY_HEALTH_HOT = 13, + POWER_SUPPLY_HEALTH_NO_BATTERY = 14, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, + POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, + POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP = 1, + POWER_SUPPLY_USB_TYPE_DCP = 2, + POWER_SUPPLY_USB_TYPE_CDP = 3, + POWER_SUPPLY_USB_TYPE_ACA = 4, + POWER_SUPPLY_USB_TYPE_C = 5, + POWER_SUPPLY_USB_TYPE_PD = 6, + POWER_SUPPLY_USB_TYPE_PD_DRP = 7, + POWER_SUPPLY_USB_TYPE_PD_PPS = 8, + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, +}; + +enum power_supply_charge_behaviour { + POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0, + POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE = 1, + POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE = 2, +}; + +struct power_supply_attr { + const char *prop_name; + char attr_name[31]; + struct device_attribute dev_attr; + const char * const *text_values; + int text_values_len; +}; + +struct hid_item { + unsigned int format; + __u8 size; + __u8 type; + __u8 tag; + union { + __u8 u8; + __s8 s8; + __u16 u16; + __s16 s16; + __u32 u32; + __s32 s32; + const __u8 *longdata; + } data; +}; + +struct hid_global { + unsigned int usage_page; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + unsigned int report_id; + unsigned int report_size; + unsigned int report_count; +}; + +struct hid_local { + unsigned int usage[12288]; + u8 usage_size[12288]; + unsigned int collection_index[12288]; + unsigned int usage_index; + unsigned int usage_minimum; + unsigned int delimiter_depth; + unsigned int delimiter_branch; +}; + +struct hid_parser { + struct hid_global global; + struct hid_global global_stack[4]; + unsigned int global_stack_ptr; + struct hid_local local; + unsigned int *collection_stack; + unsigned int collection_stack_ptr; + unsigned int collection_stack_size; + struct hid_device *device; + unsigned int scan_flags; +}; + +struct hiddev { + int minor; + int exist; + int open; + struct mutex existancelock; + wait_queue_head_t wait; + struct hid_device *hid; + struct list_head list; + spinlock_t list_lock; + bool initialized; +}; + +struct hid_dynid { + struct list_head list; + struct hid_device_id id; +}; + +struct tso_t { + int next_frag_idx; + int size; + void *data; + u16 ip_id; + u8 tlen; + bool ipv6; + u32 tcp_seq; +}; + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS = 1, + __SK_REDIRECT = 2, + __SK_NONE = 3, +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED = 0, + SK_PSOCK_RX_STRP_ENABLED = 1, +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_keee eee; +}; + +struct nf_sockopt_ops { + struct list_head list; + u_int8_t pf; + int set_optmin; + int set_optmax; + int (*set)(struct sock *, int, sockptr_t, unsigned int); + int get_optmin; + int get_optmax; + int (*get)(struct sock *, int, void *, int *); + struct module *owner; +}; + +struct nf_ct_bridge_info { + struct nf_hook_ops *ops; + unsigned int ops_size; + struct module *me; +}; + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + struct Qdisc *sch; + enum flow_block_binder_type binder_type; + void *data; + void *cb_priv; + void (*cleanup)(struct flow_block_cb *); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +struct nft_hook { + struct list_head list; + struct nf_hook_ops ops; + struct callback_head rcu; +}; + +struct nft_trans { + struct list_head list; + struct net *net; + struct nft_table *table; + int msg_type; + u32 seq; + u16 flags; + u8 report: 1; + u8 put_net: 1; +}; + +struct nft_trans_binding { + struct nft_trans nft_trans; + struct list_head binding_list; +}; + +struct nft_trans_rule { + struct nft_trans nft_trans; + struct nft_rule *rule; + struct nft_chain *chain; + struct nft_flow_rule *flow; + u32 rule_id; + bool bound; +}; + +struct nft_trans_chain { + struct nft_trans_binding nft_trans_binding; + struct nft_chain *chain; + char *name; + struct nft_stats *stats; + u8 policy; + bool update; + bool bound; + u32 chain_id; + struct nft_base_chain *basechain; + struct list_head hook_list; +}; + +struct nft_offload_ethertype { + __be16 value; + __be16 mask; +}; + +struct ping_table { + struct hlist_head hash[64]; + spinlock_t lock; +}; + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + long unsigned int rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER = 0, + IP6_DEFRAG_CONNTRACK_IN = 1, + __IP6_DEFRAG_CONNTRACK_IN = 65536, + IP6_DEFRAG_CONNTRACK_OUT = 65537, + __IP6_DEFRAG_CONNTRACK_OUT = 131072, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, +}; + +struct freader { + void *buf; + u32 buf_sz; + int err; + long: 32; + union { + struct { + struct file *file; + struct folio *folio; + void *addr; + long: 32; + loff_t folio_off; + bool may_fault; + long: 32; + }; + struct { + const char *data; + long: 32; + u64 data_sz; + }; + }; +}; + +struct trace_event_raw_cpuhp_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_multi_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_exit { + struct trace_entry ent; + unsigned int cpu; + int state; + int idx; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_cpuhp_enter {}; + +struct trace_event_data_offsets_cpuhp_multi_enter {}; + +struct trace_event_data_offsets_cpuhp_exit {}; + +typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); + +typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); + +typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); + +struct cpuhp_cpu_state { + enum cpuhp_state state; + enum cpuhp_state target; + enum cpuhp_state fail; + struct task_struct *thread; + bool should_run; + bool rollback; + bool single; + bool bringup; + struct hlist_node *node; + struct hlist_node *last; + enum cpuhp_state cb_state; + int result; + atomic_t ap_sync_state; + struct completion done_up; + struct completion done_down; +}; + +struct cpuhp_step { + const char *name; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } startup; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } teardown; + struct hlist_head list; + bool cant_stop; + bool multi_instance; +}; + +enum cpuhp_sync_state { + SYNC_STATE_DEAD = 0, + SYNC_STATE_KICKED = 1, + SYNC_STATE_SHOULD_DIE = 2, + SYNC_STATE_ALIVE = 3, + SYNC_STATE_SHOULD_ONLINE = 4, + SYNC_STATE_ONLINE = 5, +}; + +enum cpu_mitigations { + CPU_MITIGATIONS_OFF = 0, + CPU_MITIGATIONS_AUTO = 1, + CPU_MITIGATIONS_AUTO_NOSMT = 2, +}; + +enum { + __SD_BALANCE_NEWIDLE = 0, + __SD_BALANCE_EXEC = 1, + __SD_BALANCE_FORK = 2, + __SD_BALANCE_WAKE = 3, + __SD_WAKE_AFFINE = 4, + __SD_ASYM_CPUCAPACITY = 5, + __SD_ASYM_CPUCAPACITY_FULL = 6, + __SD_SHARE_CPUCAPACITY = 7, + __SD_CLUSTER = 8, + __SD_SHARE_LLC = 9, + __SD_SERIALIZE = 10, + __SD_ASYM_PACKING = 11, + __SD_PREFER_SIBLING = 12, + __SD_OVERLAP = 13, + __SD_NUMA = 14, + __SD_FLAG_CNT = 15, +}; + +enum { + SD_BALANCE_NEWIDLE = 1, + SD_BALANCE_EXEC = 2, + SD_BALANCE_FORK = 4, + SD_BALANCE_WAKE = 8, + SD_WAKE_AFFINE = 16, + SD_ASYM_CPUCAPACITY = 32, + SD_ASYM_CPUCAPACITY_FULL = 64, + SD_SHARE_CPUCAPACITY = 128, + SD_CLUSTER = 256, + SD_SHARE_LLC = 512, + SD_SERIALIZE = 1024, + SD_ASYM_PACKING = 2048, + SD_PREFER_SIBLING = 4096, + SD_OVERLAP = 8192, + SD_NUMA = 16384, +}; + +struct sd_flag_debug { + unsigned int meta_flags; + char *name; +}; + +typedef const struct cpumask * (*sched_domain_mask_f)(int); + +typedef int (*sched_domain_flags_f)(); + +struct sd_data { + struct sched_domain **sd; + struct sched_domain_shared **sds; + struct sched_group **sg; + struct sched_group_capacity **sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; + char *name; +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = 1, + MEMBARRIER_FLAG_RSEQ = 2, +}; + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = 1, + MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, + MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, + MEMBARRIER_CMD_GET_REGISTRATIONS = 512, + MEMBARRIER_CMD_SHARED = 1, +}; + +enum membarrier_cmd_flag { + MEMBARRIER_CMD_FLAG_CPU = 1, +}; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE = 0, + SCHED_TUNABLESCALING_LOG = 1, + SCHED_TUNABLESCALING_LINEAR = 2, + SCHED_TUNABLESCALING_END = 3, +}; + +struct asym_cap_data { + struct list_head link; + struct callback_head rcu; + long unsigned int capacity; + long unsigned int cpus[0]; +}; + +enum cpuacct_stat_index { + CPUACCT_STAT_USER = 0, + CPUACCT_STAT_SYSTEM = 1, + CPUACCT_STAT_NSTATS = 2, +}; + +struct cpuacct { + struct cgroup_subsys_state css; + u64 *cpuusage; + struct kernel_cpustat *cpustat; +}; + +enum dl_param { + DL_RUNTIME = 0, + DL_PERIOD = 1, +}; + +struct s_data { + struct sched_domain **sd; + struct root_domain *rd; +}; + +enum s_alloc { + sa_rootdomain = 0, + sa_sd = 1, + sa_sd_storage = 2, + sa_none = 3, +}; + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = 2, + HK_FLAG_MISC = 4, + HK_FLAG_SCHED = 8, + HK_FLAG_TICK = 16, + HK_FLAG_DOMAIN = 32, + HK_FLAG_WQ = 64, + HK_FLAG_MANAGED_IRQ = 128, + HK_FLAG_KTHREAD = 256, +}; + +struct housekeeping { + struct cpumask cpumasks[9]; + long unsigned int flags; +}; + +struct bpf_iter__cgroup { + union { + struct bpf_iter_meta *meta; + }; + union { + struct cgroup *cgroup; + }; +}; + +struct cgroup_iter_priv { + struct cgroup_subsys_state *start_css; + bool visited_all; + bool terminate; + int order; +}; + +struct bpf_iter_css { + __u64 __opaque[3]; +}; + +struct bpf_iter_css_kern { + struct cgroup_subsys_state *start; + struct cgroup_subsys_state *pos; + unsigned int flags; + long: 32; +}; + +struct mlock_fbatch { + local_lock_t lock; + struct folio_batch fbatch; +}; + +struct files_stat_struct { + long unsigned int nr_files; + long unsigned int nr_free_files; + long unsigned int max_files; +}; + +struct backing_file { + struct file file; + union { + struct path user_path; + freeptr_t bf_freeptr; + }; +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + unsigned char f_handle[0]; +}; + +struct eventfd_ctx { + struct kref kref; + wait_queue_head_t wqh; + __u64 count; + unsigned int flags; + int id; +}; + +struct ext4_orphan_block_tail { + __le32 ob_magic; + __le32 ob_checksum; +}; + +enum btrfs_delayed_ref_flags { + BTRFS_DELAYED_REFS_FLUSHING = 0, +}; + +struct fsverity_descriptor { + __u8 version; + __u8 hash_algorithm; + __u8 log_blocksize; + __u8 salt_size; + __le32 sig_size; + __le64 data_size; + __u8 root_hash[64]; + __u8 salt[32]; + __u8 __reserved[144]; + __u8 signature[0]; +}; + +struct btrfs_stream_header { + char magic[13]; + __le32 version; +} __attribute__((packed)); + +struct btrfs_cmd_header { + __le32 len; + __le16 cmd; + __le32 crc; +} __attribute__((packed)); + +struct btrfs_tlv_header { + __le16 tlv_type; + __le16 tlv_len; +}; + +enum btrfs_send_cmd { + BTRFS_SEND_C_UNSPEC = 0, + BTRFS_SEND_C_SUBVOL = 1, + BTRFS_SEND_C_SNAPSHOT = 2, + BTRFS_SEND_C_MKFILE = 3, + BTRFS_SEND_C_MKDIR = 4, + BTRFS_SEND_C_MKNOD = 5, + BTRFS_SEND_C_MKFIFO = 6, + BTRFS_SEND_C_MKSOCK = 7, + BTRFS_SEND_C_SYMLINK = 8, + BTRFS_SEND_C_RENAME = 9, + BTRFS_SEND_C_LINK = 10, + BTRFS_SEND_C_UNLINK = 11, + BTRFS_SEND_C_RMDIR = 12, + BTRFS_SEND_C_SET_XATTR = 13, + BTRFS_SEND_C_REMOVE_XATTR = 14, + BTRFS_SEND_C_WRITE = 15, + BTRFS_SEND_C_CLONE = 16, + BTRFS_SEND_C_TRUNCATE = 17, + BTRFS_SEND_C_CHMOD = 18, + BTRFS_SEND_C_CHOWN = 19, + BTRFS_SEND_C_UTIMES = 20, + BTRFS_SEND_C_END = 21, + BTRFS_SEND_C_UPDATE_EXTENT = 22, + BTRFS_SEND_C_MAX_V1 = 22, + BTRFS_SEND_C_FALLOCATE = 23, + BTRFS_SEND_C_FILEATTR = 24, + BTRFS_SEND_C_ENCODED_WRITE = 25, + BTRFS_SEND_C_MAX_V2 = 25, + BTRFS_SEND_C_ENABLE_VERITY = 26, + BTRFS_SEND_C_MAX_V3 = 26, + BTRFS_SEND_C_MAX = 26, +}; + +enum { + BTRFS_SEND_A_UNSPEC = 0, + BTRFS_SEND_A_UUID = 1, + BTRFS_SEND_A_CTRANSID = 2, + BTRFS_SEND_A_INO = 3, + BTRFS_SEND_A_SIZE = 4, + BTRFS_SEND_A_MODE = 5, + BTRFS_SEND_A_UID = 6, + BTRFS_SEND_A_GID = 7, + BTRFS_SEND_A_RDEV = 8, + BTRFS_SEND_A_CTIME = 9, + BTRFS_SEND_A_MTIME = 10, + BTRFS_SEND_A_ATIME = 11, + BTRFS_SEND_A_OTIME = 12, + BTRFS_SEND_A_XATTR_NAME = 13, + BTRFS_SEND_A_XATTR_DATA = 14, + BTRFS_SEND_A_PATH = 15, + BTRFS_SEND_A_PATH_TO = 16, + BTRFS_SEND_A_PATH_LINK = 17, + BTRFS_SEND_A_FILE_OFFSET = 18, + BTRFS_SEND_A_DATA = 19, + BTRFS_SEND_A_CLONE_UUID = 20, + BTRFS_SEND_A_CLONE_CTRANSID = 21, + BTRFS_SEND_A_CLONE_PATH = 22, + BTRFS_SEND_A_CLONE_OFFSET = 23, + BTRFS_SEND_A_CLONE_LEN = 24, + BTRFS_SEND_A_MAX_V1 = 24, + BTRFS_SEND_A_FALLOCATE_MODE = 25, + BTRFS_SEND_A_FILEATTR = 26, + BTRFS_SEND_A_UNENCODED_FILE_LEN = 27, + BTRFS_SEND_A_UNENCODED_LEN = 28, + BTRFS_SEND_A_UNENCODED_OFFSET = 29, + BTRFS_SEND_A_COMPRESSION = 30, + BTRFS_SEND_A_ENCRYPTION = 31, + BTRFS_SEND_A_MAX_V2 = 31, + BTRFS_SEND_A_VERITY_ALGORITHM = 32, + BTRFS_SEND_A_VERITY_BLOCK_SIZE = 33, + BTRFS_SEND_A_VERITY_SALT_DATA = 34, + BTRFS_SEND_A_VERITY_SIG_DATA = 35, + BTRFS_SEND_A_MAX_V3 = 35, + __BTRFS_SEND_A_MAX = 35, +}; + +struct btrfs_lru_cache_entry { + struct list_head lru_list; + u64 key; + u64 gen; + struct list_head list; +}; + +struct btrfs_lru_cache { + struct list_head lru_list; + struct maple_tree entries; + unsigned int size; + unsigned int max_size; +}; + +struct fs_path { + union { + struct { + char *start; + char *end; + char *buf; + short unsigned int buf_len: 15; + short unsigned int reversed: 1; + char inline_buf[0]; + }; + char pad[256]; + }; +}; + +struct clone_root { + struct btrfs_root *root; + long: 32; + u64 ino; + u64 offset; + u64 num_bytes; + bool found_ref; + long: 32; +}; + +struct backref_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 root_ids[17]; + int num_roots; + long: 32; +}; + +struct send_ctx { + struct file *send_filp; + long: 32; + loff_t send_off; + char *send_buf; + u32 send_size; + u32 send_max_size; + bool put_data; + struct page **send_buf_pages; + long: 32; + u64 flags; + u32 proto; + struct btrfs_root *send_root; + struct btrfs_root *parent_root; + struct clone_root *clone_roots; + int clone_roots_cnt; + struct btrfs_path *left_path; + struct btrfs_path *right_path; + struct btrfs_key *cmp_key; + u64 last_reloc_trans; + u64 cur_ino; + u64 cur_inode_gen; + u64 cur_inode_size; + u64 cur_inode_mode; + u64 cur_inode_rdev; + u64 cur_inode_last_extent; + u64 cur_inode_next_write_offset; + bool cur_inode_new; + bool cur_inode_new_gen; + bool cur_inode_deleted; + bool ignore_cur_inode; + bool cur_inode_needs_verity; + void *verity_descriptor; + long: 32; + u64 send_progress; + struct list_head new_refs; + struct list_head deleted_refs; + struct btrfs_lru_cache name_cache; + struct inode *cur_inode; + struct file_ra_state ra; + u64 page_cache_clear_start; + bool clean_page_cache; + struct rb_root pending_dir_moves; + struct rb_root waiting_dir_moves; + struct rb_root orphan_dirs; + struct rb_root rbtree_new_refs; + struct rb_root rbtree_deleted_refs; + struct btrfs_lru_cache backref_cache; + long: 32; + u64 backref_cache_last_reloc_trans; + struct btrfs_lru_cache dir_created_cache; + struct btrfs_lru_cache dir_utimes_cache; +}; + +struct pending_dir_move { + struct rb_node node; + struct list_head list; + long: 32; + u64 parent_ino; + u64 ino; + u64 gen; + struct list_head update_refs; +}; + +struct waiting_dir_move { + struct rb_node node; + long: 32; + u64 ino; + u64 rmdir_ino; + u64 rmdir_gen; + bool orphanized; + long: 32; +}; + +struct orphan_dir_info { + struct rb_node node; + long: 32; + u64 ino; + u64 gen; + u64 last_dir_index_offset; + u64 dir_high_seq_ino; +}; + +struct name_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 parent_ino; + u64 parent_gen; + int ret; + int need_later_update; + int name_len; + char name[0]; + long: 32; +}; + +enum btrfs_compare_tree_result { + BTRFS_COMPARE_TREE_NEW = 0, + BTRFS_COMPARE_TREE_DELETED = 1, + BTRFS_COMPARE_TREE_CHANGED = 2, + BTRFS_COMPARE_TREE_SAME = 3, +}; + +struct btrfs_inode_info { + u64 size; + u64 gen; + u64 mode; + u64 uid; + u64 gid; + u64 rdev; + u64 fileattr; + u64 nlink; +}; + +typedef int (*iterate_inode_ref_t)(u64, struct fs_path *, void *); + +typedef int (*iterate_dir_item_t)(int, struct btrfs_key *, const char *, int, const char *, int, void *); + +struct backref_ctx { + struct send_ctx *sctx; + long: 32; + u64 found; + u64 cur_objectid; + u64 cur_offset; + u64 extent_len; + u64 bytenr; + u64 backref_owner; + u64 backref_offset; +}; + +enum inode_state { + inode_state_no_change = 0, + inode_state_will_create = 1, + inode_state_did_create = 2, + inode_state_will_delete = 3, + inode_state_did_delete = 4, +}; + +struct recorded_ref { + struct list_head list; + char *name; + struct fs_path *full_path; + u64 dir; + u64 dir_gen; + int name_len; + struct rb_node node; + struct rb_root *root; + long: 32; +}; + +struct find_xattr_ctx { + const char *name; + int name_len; + int found_idx; + char *found_data; + int found_data_len; +}; + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct pkcs1pad_inst_ctx { + struct crypto_akcipher_spawn spawn; +}; + +struct pkcs1pad_request { + struct scatterlist in_sg[2]; + struct scatterlist out_sg[1]; + uint8_t *in_buf; + uint8_t *out_buf; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct akcipher_request child_req; +}; + +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *, const u8 *, unsigned int); + int (*encrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*decrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*init)(struct crypto_lskcipher *); + void (*exit)(struct crypto_lskcipher *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct skcipher_alg_common co; +}; + +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[256]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + +struct badblocks_context { + sector_t start; + sector_t len; + int ack; + long: 32; +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async; + unsigned int last_cq_tail; + refcount_t refs; + atomic_t ops; + struct callback_head rcu; +}; + +enum { + IO_EVENTFD_OP_SIGNAL_BIT = 0, +}; + +enum { + IORING_MEM_REGION_TYPE_USER = 1, +}; + +struct io_uring_region_desc { + __u64 user_addr; + __u64 size; + __u32 flags; + __u32 id; + __u64 mmap_offset; + __u64 __resv[4]; +}; + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +enum dim_state { + DIM_START_MEASURE = 0, + DIM_MEASURE_IN_PROGRESS = 1, + DIM_APPLY_NEW_PROFILE = 2, +}; + +enum dim_stats_state { + DIM_STATS_WORSE = 0, + DIM_STATS_SAME = 1, + DIM_STATS_BETTER = 2, +}; + +enum dim_step_result { + DIM_STEPPED = 0, + DIM_TOO_TIRED = 1, + DIM_ON_EDGE = 2, +}; + +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +struct clk_parent_map; + +struct clk_core { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct module *owner; + struct device *dev; + struct hlist_node rpm_node; + struct device_node *of_node; + struct clk_core *parent; + struct clk_parent_map *parents; + u8 num_parents; + u8 new_parent_index; + long unsigned int rate; + long unsigned int req_rate; + long unsigned int new_rate; + struct clk_core *new_parent; + struct clk_core *new_child; + long unsigned int flags; + bool orphan; + bool rpm_enabled; + unsigned int enable_count; + unsigned int prepare_count; + unsigned int protect_count; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int accuracy; + int phase; + struct clk_duty duty; + struct hlist_head children; + struct hlist_node child_node; + struct hlist_head clks; + unsigned int notifier_count; + struct dentry *dentry; + struct hlist_node debug_node; + struct kref ref; +}; + +struct clk_onecell_data { + struct clk **clks; + unsigned int clk_num; +}; + +struct clk_hw_onecell_data { + unsigned int num; + struct clk_hw *hws[0]; +}; + +struct clk_parent_map { + const struct clk_hw *hw; + struct clk_core *core; + const char *fw_name; + const char *name; + int index; +}; + +struct trace_event_raw_clk { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_clk_rate { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int rate; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_range { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int min; + long unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_clk_parent { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + char __data[0]; +}; + +struct trace_event_raw_clk_phase { + struct trace_entry ent; + u32 __data_loc_name; + int phase; + char __data[0]; +}; + +struct trace_event_raw_clk_duty_cycle { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int num; + unsigned int den; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_request { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + long unsigned int min; + long unsigned int max; + long unsigned int prate; + char __data[0]; +}; + +struct trace_event_data_offsets_clk { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_range { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_parent { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +struct trace_event_data_offsets_clk_phase { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_duty_cycle { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_request { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +typedef void (*btf_trace_clk_enable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_min_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_max_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_range)(void *, struct clk_core *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_rate_request_start)(void *, struct clk_rate_request *); + +typedef void (*btf_trace_clk_rate_request_done)(void *, struct clk_rate_request *); + +struct clk_notifier_devres { + struct clk *clk; + struct notifier_block *nb; +}; + +struct of_clk_provider { + struct list_head link; + struct device_node *node; + struct clk * (*get)(struct of_phandle_args *, void *); + struct clk_hw * (*get_hw)(struct of_phandle_args *, void *); + void *data; +}; + +struct clock_provider { + void (*clk_init_cb)(struct device_node *); + struct device_node *np; + struct list_head node; +}; + +struct scsi_host_busy_iter_data { + bool (*fn)(struct scsi_cmnd *, void *); + void *priv; +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = 127, + ATA_MASK_MWDMA = 3968, + ATA_MASK_UDMA = 1044480, +}; + +enum hsm_task_states { + HSM_ST_IDLE = 0, + HSM_ST_FIRST = 1, + HSM_ST = 2, + HSM_ST_LAST = 3, + HSM_ST_ERR = 4, +}; + +struct trace_event_raw_ata_qc_issue_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + unsigned char proto; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_qc_complete_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char status; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char error; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_tf_load { + struct trace_entry ent; + unsigned int ata_port; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_exec_command_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char cmd; + unsigned char feature; + unsigned char hob_nsect; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_bmdma_status { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char host_stat; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy_qc { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_action_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_begin_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + long unsigned int deadline; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_end_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + int rc; + char __data[0]; +}; + +struct trace_event_raw_ata_port_eh_begin_template { + struct trace_entry ent; + unsigned int ata_port; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_hsm_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int protocol; + unsigned int hsm_state; + unsigned char dev_state; + char __data[0]; +}; + +struct trace_event_raw_ata_transfer_data_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int flags; + unsigned int offset; + unsigned int bytes; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned char hsm_state; + char __data[0]; +}; + +struct trace_event_data_offsets_ata_qc_issue_template {}; + +struct trace_event_data_offsets_ata_qc_complete_template {}; + +struct trace_event_data_offsets_ata_tf_load {}; + +struct trace_event_data_offsets_ata_exec_command_template {}; + +struct trace_event_data_offsets_ata_bmdma_status {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy_qc {}; + +struct trace_event_data_offsets_ata_eh_action_template {}; + +struct trace_event_data_offsets_ata_link_reset_begin_template {}; + +struct trace_event_data_offsets_ata_link_reset_end_template {}; + +struct trace_event_data_offsets_ata_port_eh_begin_template {}; + +struct trace_event_data_offsets_ata_sff_hsm_template {}; + +struct trace_event_data_offsets_ata_transfer_data_template {}; + +struct trace_event_data_offsets_ata_sff_template {}; + +typedef void (*btf_trace_ata_qc_prep)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_issue)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_internal)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_failed)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_done)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_tf_load)(void *, struct ata_port *, const struct ata_taskfile *); + +typedef void (*btf_trace_ata_exec_command)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_setup)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_start)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_stop)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_status)(void *, struct ata_port *, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy)(void *, struct ata_device *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy_qc)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_eh_about_to_do)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_done)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_slave_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_softreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_softreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_std_sched_eh)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_freeze)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_thaw)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_sff_hsm_state)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_hsm_command_complete)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_port_intr)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_send_cdb)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_sff_flush_pio_task)(void *, struct ata_port *); + +struct ata_force_param { + const char *name; + u8 cbl; + u8 spd_limit; + unsigned int xfer_mask; + unsigned int quirk_on; + unsigned int quirk_off; + u16 lflags_on; + u16 lflags_off; +}; + +struct ata_force_ent { + int port; + int device; + struct ata_force_param param; +}; + +struct ata_xfer_ent { + int shift; + int bits; + u8 base; +}; + +struct ata_dev_quirks_entry { + const char *model_num; + const char *model_rev; + unsigned int quirks; +}; + +struct find_interface_arg { + int minor; + struct device_driver *drv; +}; + +struct each_dev_arg { + void *data; + int (*fn)(struct usb_device *, void *); +}; + +struct i2c_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct i2c_device_identity { + u16 manufacturer_id; + u16 part_id; + u8 die_revision; +}; + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT = 0, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, +}; + +struct i2c_driver { + unsigned int class; + int (*probe)(struct i2c_client *); + void (*remove)(struct i2c_client *); + void (*shutdown)(struct i2c_client *); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); + int (*command)(struct i2c_client *, unsigned int, void *); + struct device_driver driver; + const struct i2c_device_id *id_table; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const short unsigned int *address_list; + struct list_head clients; + u32 flags; +}; + +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; + u32 digital_filter_width_ns; + u32 analog_filter_cutoff_freq_hz; +}; + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +struct trace_event_raw_i2c_write { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_read { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + char __data[0]; +}; + +struct trace_event_raw_i2c_reply { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_result { + struct trace_entry ent; + int adapter_nr; + __u16 nr_msgs; + __s16 ret; + char __data[0]; +}; + +struct trace_event_data_offsets_i2c_write { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_read {}; + +struct trace_event_data_offsets_i2c_reply { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_result {}; + +typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); + +struct i2c_cmd_arg { + unsigned int cmd; + void *arg; +}; + +struct ethtool_forced_speed_map { + u32 speed; + long unsigned int caps[4]; + const u32 *cap_arr; + u32 arr_size; +}; + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, +}; + +struct phy_req_info { + struct ethnl_req_info base; + struct phy_device_node *pdn; +}; + +struct ethnl_phy_dump_ctx { + struct phy_req_info *phy_req_info; + long unsigned int ifindex; + long unsigned int phy_index; +}; + +struct nft_bitwise_fast_expr { + u32 mask; + u32 xor; + u8 sreg; + u8 dreg; +}; + +struct nft_jumpstack { + const struct nft_rule_dp *rule; +}; + +enum nft_set_attributes { + NFTA_SET_UNSPEC = 0, + NFTA_SET_TABLE = 1, + NFTA_SET_NAME = 2, + NFTA_SET_FLAGS = 3, + NFTA_SET_KEY_TYPE = 4, + NFTA_SET_KEY_LEN = 5, + NFTA_SET_DATA_TYPE = 6, + NFTA_SET_DATA_LEN = 7, + NFTA_SET_POLICY = 8, + NFTA_SET_DESC = 9, + NFTA_SET_ID = 10, + NFTA_SET_TIMEOUT = 11, + NFTA_SET_GC_INTERVAL = 12, + NFTA_SET_USERDATA = 13, + NFTA_SET_PAD = 14, + NFTA_SET_OBJ_TYPE = 15, + NFTA_SET_HANDLE = 16, + NFTA_SET_EXPR = 17, + NFTA_SET_EXPRESSIONS = 18, + __NFTA_SET_MAX = 19, +}; + +struct nft_bitmap_elem { + struct nft_elem_priv priv; + struct list_head head; + struct nft_set_ext ext; +}; + +struct nft_bitmap { + struct list_head list; + u16 bitmap_size; + u8 bitmap[0]; +}; + +struct dmabuf_cmsg { + __u64 frag_offset; + __u32 frag_size; + __u32 frag_token; + __u32 dmabuf_id; + __u32 flags; +}; + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT = 2, + BPF_TCP_SYN_RECV = 3, + BPF_TCP_FIN_WAIT1 = 4, + BPF_TCP_FIN_WAIT2 = 5, + BPF_TCP_TIME_WAIT = 6, + BPF_TCP_CLOSE = 7, + BPF_TCP_CLOSE_WAIT = 8, + BPF_TCP_LAST_ACK = 9, + BPF_TCP_LISTEN = 10, + BPF_TCP_CLOSING = 11, + BPF_TCP_NEW_SYN_RECV = 12, + BPF_TCP_BOUND_INACTIVE = 13, + BPF_TCP_MAX_STATES = 14, +}; + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; + +struct tcp_bbr_info { + __u32 bbr_bw_lo; + __u32 bbr_bw_hi; + __u32 bbr_min_rtt; + __u32 bbr_pacing_gain; + __u32 bbr_cwnd_gain; +}; + +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; + struct tcp_bbr_info bbr; +}; + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +struct tcp_repair_window { + __u32 snd_wl1; + __u32 snd_wnd; + __u32 max_window; + __u32 rcv_wnd; + __u32 rcv_wup; +}; + +enum { + TCP_NO_QUEUE = 0, + TCP_RECV_QUEUE = 1, + TCP_SEND_QUEUE = 2, + TCP_QUEUES_NR = 3, +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale: 4; + __u8 tcpi_rcv_wscale: 4; + __u8 tcpi_delivery_rate_app_limited: 1; + __u8 tcpi_fastopen_client_fail: 2; + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + __u32 tcpi_total_retrans; + __u64 tcpi_pacing_rate; + __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; + __u64 tcpi_bytes_received; + __u32 tcpi_segs_out; + __u32 tcpi_segs_in; + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; + __u32 tcpi_data_segs_out; + __u64 tcpi_delivery_rate; + __u64 tcpi_busy_time; + __u64 tcpi_rwnd_limited; + __u64 tcpi_sndbuf_limited; + __u32 tcpi_delivered; + __u32 tcpi_delivered_ce; + __u64 tcpi_bytes_sent; + __u64 tcpi_bytes_retrans; + __u32 tcpi_dsack_dups; + __u32 tcpi_reord_seen; + __u32 tcpi_rcv_ooopack; + __u32 tcpi_snd_wnd; + __u32 tcpi_rcv_wnd; + __u32 tcpi_rehash; + __u16 tcpi_total_rto; + __u16 tcpi_total_rto_recoveries; + __u32 tcpi_total_rto_time; +}; + +enum { + TCP_NLA_PAD = 0, + TCP_NLA_BUSY = 1, + TCP_NLA_RWND_LIMITED = 2, + TCP_NLA_SNDBUF_LIMITED = 3, + TCP_NLA_DATA_SEGS_OUT = 4, + TCP_NLA_TOTAL_RETRANS = 5, + TCP_NLA_PACING_RATE = 6, + TCP_NLA_DELIVERY_RATE = 7, + TCP_NLA_SND_CWND = 8, + TCP_NLA_REORDERING = 9, + TCP_NLA_MIN_RTT = 10, + TCP_NLA_RECUR_RETRANS = 11, + TCP_NLA_DELIVERY_RATE_APP_LMT = 12, + TCP_NLA_SNDQ_SIZE = 13, + TCP_NLA_CA_STATE = 14, + TCP_NLA_SND_SSTHRESH = 15, + TCP_NLA_DELIVERED = 16, + TCP_NLA_DELIVERED_CE = 17, + TCP_NLA_BYTES_SENT = 18, + TCP_NLA_BYTES_RETRANS = 19, + TCP_NLA_DSACK_DUPS = 20, + TCP_NLA_REORD_SEEN = 21, + TCP_NLA_SRTT = 22, + TCP_NLA_TIMEOUT_REHASH = 23, + TCP_NLA_BYTES_NOTSENT = 24, + TCP_NLA_EDT = 25, + TCP_NLA_TTL = 26, + TCP_NLA_REHASH = 27, +}; + +struct tcp_zerocopy_receive { + __u64 address; + __u32 length; + __u32 recv_skip_hint; + __u32 inq; + __s32 err; + __u64 copybuf_address; + __s32 copybuf_len; + __u32 flags; + __u64 msg_control; + __u64 msg_controllen; + __u32 msg_flags; + __u32 reserved; +}; + +enum tcp_chrono { + TCP_CHRONO_UNSPEC = 0, + TCP_CHRONO_BUSY = 1, + TCP_CHRONO_RWND_LIMITED = 2, + TCP_CHRONO_SNDBUF_LIMITED = 3, + __TCP_CHRONO_MAX = 4, +}; + +enum { + TCP_CMSG_INQ = 1, + TCP_CMSG_TS = 2, +}; + +struct tcp_splice_state { + struct pipe_inode_info *pipe; + size_t len; + unsigned int flags; +}; + +struct tcp_xa_pool { + u8 max; + u8 idx; + __u32 tokens[17]; + netmem_ref netmems[17]; +}; + +struct igmp6_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct igmp6_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; + struct ifmcaddr6 *im; +}; + +enum rwsem_waiter_type { + RWSEM_WAITING_FOR_WRITE = 0, + RWSEM_WAITING_FOR_READ = 1, +}; + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + enum rwsem_waiter_type type; + long unsigned int timeout; + bool handoff_set; +}; + +enum rwsem_wake_type { + RWSEM_WAKE_ANY = 0, + RWSEM_WAKE_READERS = 1, + RWSEM_WAKE_READ_OWNED = 2, +}; + +enum owner_state { + OWNER_NULL = 1, + OWNER_WRITER = 2, + OWNER_READER = 4, + OWNER_NONSPINNABLE = 8, +}; + +struct nbcon_state { + union { + unsigned int atom; + struct { + unsigned int prio: 2; + unsigned int req_prio: 2; + unsigned int unsafe: 1; + unsigned int unsafe_takeover: 1; + unsigned int cpu: 24; + }; + }; +}; + +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +struct irq_desc_devres { + unsigned int from; + unsigned int cnt; +}; + +struct irq_generic_chip_devres { + struct irq_chip_generic *gc; + u32 msk; + unsigned int clr; + unsigned int set; +}; + +struct timer_list_iter { + int cpu; + bool second_pass; + u64 now; +}; + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +struct arch_vdso_time_data {}; + +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +struct vdso_data { + u32 seq; + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + union { + struct vdso_timestamp basetime[12]; + struct timens_offset offset[12]; + }; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; + struct arch_vdso_time_data arch_data; +}; + +struct rchan_percpu_buf_dispatcher { + struct rchan_buf *buf; + struct dentry *dentry; +}; + +struct bpf_iter_kmem_cache { + __u64 __opaque[1]; +}; + +struct bpf_iter_kmem_cache_kern { + struct kmem_cache *pos; + long: 32; +}; + +struct bpf_iter__kmem_cache { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kmem_cache *s; + }; +}; + +union kmem_cache_iter_priv { + struct bpf_iter_kmem_cache it; + struct bpf_iter_kmem_cache_kern kit; +}; + +struct trace_event_raw_mm_compaction_isolate_template { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int nr_scanned; + long unsigned int nr_taken; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_migratepages { + struct trace_entry ent; + long unsigned int nr_migrated; + long unsigned int nr_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_begin { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_end { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_try_to_compact_pages { + struct trace_entry ent; + int order; + long unsigned int gfp_mask; + int prio; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_suitable_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_defer_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + unsigned int considered; + unsigned int defer_shift; + int order_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_kcompactd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_kcompactd_wake_template { + struct trace_entry ent; + int nid; + int order; + enum zone_type highest_zoneidx; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_compaction_isolate_template {}; + +struct trace_event_data_offsets_mm_compaction_migratepages {}; + +struct trace_event_data_offsets_mm_compaction_begin {}; + +struct trace_event_data_offsets_mm_compaction_end {}; + +struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; + +struct trace_event_data_offsets_mm_compaction_suitable_template {}; + +struct trace_event_data_offsets_mm_compaction_defer_template {}; + +struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; + +struct trace_event_data_offsets_kcompactd_wake_template {}; + +typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_fast_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_migratepages)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_mm_compaction_begin)(void *, struct compact_control *, long unsigned int, long unsigned int, bool); + +typedef void (*btf_trace_mm_compaction_end)(void *, struct compact_control *, long unsigned int, long unsigned int, bool, int); + +typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); + +typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); + +typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); + +typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); + +typedef enum { + ISOLATE_ABORT = 0, + ISOLATE_NONE = 1, + ISOLATE_SUCCESS = 2, +} isolate_migrate_t; + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + short unsigned int d_reclen; + unsigned char d_type; + char d_name[0]; + long: 32; +}; + +struct old_linux_dirent { + long unsigned int d_ino; + long unsigned int d_offset; + short unsigned int d_namlen; + char d_name[0]; +}; + +struct readdir_callback { + struct dir_context ctx; + struct old_linux_dirent *dirent; + int result; +}; + +struct linux_dirent { + long unsigned int d_ino; + long unsigned int d_off; + short unsigned int d_reclen; + char d_name[0]; +}; + +struct getdents_callback___2 { + struct dir_context ctx; + struct linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct proc_fs_context { + struct pid_namespace *pid_ns; + unsigned int mask; + enum proc_hidepid hidepid; + int gid; + enum proc_pidonly pidonly; +}; + +enum proc_param { + Opt_gid___4 = 0, + Opt_hidepid = 1, + Opt_subset = 2, +}; + +enum { + BLOCK_BITMAP = 0, + INODE_BITMAP = 1, + INODE_TABLE = 2, + GROUP_TABLE_COUNT = 3, +}; + +struct ext4_rcu_ptr { + struct callback_head rcu; + void *ptr; +}; + +struct ext4_new_flex_group_data { + struct ext4_new_group_data *groups; + __u16 *bg_flags; + ext4_group_t resize_bg; + ext4_group_t count; +}; + +struct fatent_ra { + sector_t cur; + sector_t limit; + unsigned int ra_blocks; + long: 32; + sector_t ra_advance; + sector_t ra_next; + sector_t ra_limit; +}; + +struct file_extent_cluster { + u64 start; + u64 end; + u64 boundary[128]; + unsigned int nr; + long: 32; + u64 owning_root; +}; + +struct mapping_tree { + struct rb_root rb_root; + spinlock_t lock; +}; + +enum reloc_stage { + MOVE_DATA_EXTENTS = 0, + UPDATE_DATA_PTRS = 1, +}; + +struct reloc_control { + struct btrfs_block_group *block_group; + struct btrfs_root *extent_root; + struct inode *data_inode; + struct btrfs_block_rsv *block_rsv; + struct btrfs_backref_cache backref_cache; + struct file_extent_cluster cluster; + struct extent_io_tree processed_blocks; + struct mapping_tree reloc_root_tree; + struct list_head reloc_roots; + struct list_head dirty_subvol_roots; + u64 merging_rsv_size; + u64 nodes_relocated; + u64 reserved_bytes; + u64 search_start; + u64 extents_found; + enum reloc_stage stage; + bool create_reloc_tree; + bool merge_reloc_tree; + bool found_file_extent; +}; + +struct mapping_node { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + void *data; + long: 32; +}; + +struct tree_block { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + u64 owner; + struct btrfs_key key; + u8 level; + bool key_ready; + long: 32; +}; + +struct keyctl_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + __u32 __spare[7]; +}; + +enum { + Opt_err___4 = 0, + Opt_enc = 1, + Opt_hash = 2, +}; + +struct bpf_crypto_type { + void * (*alloc_tfm)(const char *); + void (*free_tfm)(void *); + int (*has_algo)(const char *); + int (*setkey)(void *, const u8 *, unsigned int); + int (*setauthsize)(void *, unsigned int); + int (*encrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + int (*decrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + unsigned int (*ivsize)(void *); + unsigned int (*statesize)(void *); + u32 (*get_flags)(void *); + struct module *owner; + char name[14]; +}; + +struct cmac_tfm_ctx { + struct crypto_cipher *child; + long: 32; + __be64 consts[0]; +}; + +struct cmac_desc_ctx { + unsigned int len; + u8 odds[0]; +}; + +struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; + void (*probe)(dev_t); +}; + +enum io_uring_socket_op { + SOCKET_URING_OP_SIOCINQ = 0, + SOCKET_URING_OP_SIOCOUTQ = 1, + SOCKET_URING_OP_GETSOCKOPT = 2, + SOCKET_URING_OP_SETSOCKOPT = 3, +}; + +struct uring_cache { + struct io_uring_sqe sqes[2]; +}; + +struct once_work { + struct work_struct work; + struct static_key_true *key; + struct module *module; +}; + +struct ZSTD_CDict_s { + const void *dictContent; + size_t dictContentSize; + ZSTD_dictContentType_e dictContentType; + U32 *entropyWorkspace; + ZSTD_cwksp workspace; + ZSTD_matchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_customMem customMem; + U32 dictID; + int compressionLevel; + ZSTD_paramSwitch_e useRowMatchFinder; +}; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_sequenceLength; + +struct devm_clk_state { + struct clk *clk; + void (*exit)(struct clk *); +}; + +struct clk_bulk_devres { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct virtio_pci_common_cfg { + __le32 device_feature_select; + __le32 device_feature; + __le32 guest_feature_select; + __le32 guest_feature; + __le16 msix_config; + __le16 num_queues; + __u8 device_status; + __u8 config_generation; + __le16 queue_select; + __le16 queue_size; + __le16 queue_msix_vector; + __le16 queue_enable; + __le16 queue_notify_off; + __le32 queue_desc_lo; + __le32 queue_desc_hi; + __le32 queue_avail_lo; + __le32 queue_avail_hi; + __le32 queue_used_lo; + __le32 queue_used_hi; +}; + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + struct virtio_pci_common_cfg *common; + void *device; + void *notify_base; + resource_size_t notify_pa; + u8 *isr; + size_t notify_len; + size_t device_len; + size_t common_len; + int notify_map_cap; + u32 notify_offset_multiplier; + int modern_bars; + struct virtio_device_id id; + int (*device_id_check)(struct pci_dev *); + long: 32; + u64 dma_mask; +}; + +struct virtio_pci_vq_info { + struct virtqueue *vq; + struct list_head node; + unsigned int msix_vector; +}; + +struct virtio_pci_admin_vq { + struct virtio_pci_vq_info *info; + spinlock_t lock; + u64 supported_cmds; + u64 supported_caps; + u8 max_dev_parts_objects; + struct ida dev_parts_ida; + char name[10]; + u16 vq_index; + long: 32; +}; + +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + long: 32; + union { + struct virtio_pci_legacy_device ldev; + struct virtio_pci_modern_device mdev; + }; + bool is_legacy; + u8 *isr; + spinlock_t lock; + struct list_head virtqueues; + struct list_head slow_virtqueues; + struct virtio_pci_vq_info **vqs; + struct virtio_pci_admin_vq admin_vq; + int msix_enabled; + int intx_enabled; + cpumask_var_t *msix_affinity_masks; + char (*msix_names)[256]; + unsigned int msix_vectors; + unsigned int msix_used_vectors; + bool per_vq_vectors; + struct virtqueue * (*setup_vq)(struct virtio_pci_device *, struct virtio_pci_vq_info *, unsigned int, void (*)(struct virtqueue *), const char *, bool, u16); + void (*del_vq)(struct virtio_pci_vq_info *); + u16 (*config_vector)(struct virtio_pci_device *, u16); + int (*avq_index)(struct virtio_device *, u16 *, u16 *); + long: 32; +}; + +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +enum vp_vq_vector_policy { + VP_VQ_VECTOR_POLICY_EACH = 0, + VP_VQ_VECTOR_POLICY_SHARED_SLOW = 1, + VP_VQ_VECTOR_POLICY_SHARED = 2, +}; + +struct sg_io_v4 { + __s32 guard; + __u32 protocol; + __u32 subprotocol; + __u32 request_len; + __u64 request; + __u64 request_tag; + __u32 request_attr; + __u32 request_priority; + __u32 request_extra; + __u32 max_response_len; + __u64 response; + __u32 dout_iovec_count; + __u32 dout_xfer_len; + __u32 din_iovec_count; + __u32 din_xfer_len; + __u64 dout_xferp; + __u64 din_xferp; + __u32 timeout; + __u32 flags; + __u64 usr_ptr; + __u32 spare_in; + __u32 driver_status; + __u32 transport_status; + __u32 device_status; + __u32 retry_delay; + __u32 info; + __u32 duration; + __u32 response_len; + __s32 din_resid; + __s32 dout_resid; + __u64 generated_tag; + __u32 spare_out; + __u32 padding; +}; + +typedef int bsg_sg_io_fn(struct request_queue *, struct sg_io_v4 *, bool, unsigned int); + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 1, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 2, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 3, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 4, + E1000_INVM_INVALIDATED_STRUCTURE = 15, +}; + +typedef struct thermal_zone_device *class_thermal_zone_reverse_t; + +struct trace_event_raw_thermal_temperature { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int temp_prev; + int temp; + char __data[0]; +}; + +struct trace_event_raw_cdev_update { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int target; + char __data[0]; +}; + +struct trace_event_raw_thermal_zone_trip { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int trip; + enum thermal_trip_type trip_type; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_temperature { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +struct trace_event_data_offsets_cdev_update { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_thermal_zone_trip { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); + +typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int); + +typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); + +typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_cell_post_process_t)(void *, const char *, int, unsigned int, void *, size_t); + +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM = 1, + NVMEM_TYPE_OTP = 2, + NVMEM_TYPE_BATTERY_BACKED = 3, + NVMEM_TYPE_FRAM = 4, +}; + +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + size_t raw_len; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; + struct device_node *np; + nvmem_cell_post_process_t read_post_process; + void *priv; +}; + +struct nvmem_layout; + +struct nvmem_device { + struct module *owner; + long: 32; + struct device dev; + struct list_head node; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + struct nvmem_layout *layout; + void *priv; + bool sysfs_cells_populated; + long: 32; +}; + +struct nvmem_layout { + struct device dev; + struct nvmem_device *nvmem; + int (*add_cells)(struct nvmem_layout *); +}; + +struct nvmem_layout_driver { + struct device_driver driver; + int (*probe)(struct nvmem_layout *); + void (*remove)(struct nvmem_layout *); +}; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +enum hwtstamp_flags { + HWTSTAMP_FLAG_BONDED_PHC_INDEX = 1, + HWTSTAMP_FLAG_LAST = 1, + HWTSTAMP_FLAG_MASK = 1, +}; + +struct datalink_proto { + unsigned char type[8]; + struct llc_sap *sap; + short unsigned int header_length; + int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + int (*request)(struct datalink_proto *, struct sk_buff *, const unsigned char *); + struct list_head node; +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = 1, + CA_ACK_WIN_UPDATE = 2, + CA_ACK_ECE = 4, +}; + +struct tcp_sacktag_state { + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; + long: 32; +}; + +struct rt6_exception { + struct hlist_node hlist; + struct rt6_info *rt6i; + long unsigned int stamp; + struct callback_head rcu; +}; + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; +}; + +struct trace_event_raw_fib6_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[16]; + __u8 dst[16]; + u16 sport; + u16 dport; + u8 proto; + u8 rt_type; + char name[16]; + __u8 gw[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib6_table_lookup {}; + +typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); + +enum rt6_nud_state { + RT6_NUD_FAIL_HARD = -3, + RT6_NUD_FAIL_PROBE = -2, + RT6_NUD_FAIL_DO_RR = -1, + RT6_NUD_SUCCEED = 1, +}; + +struct fib6_nh_dm_arg { + struct net *net; + const struct in6_addr *saddr; + int oif; + int flags; + struct fib6_nh *nh; +}; + +struct fib6_nh_frl_arg { + u32 flags; + int oif; + int strict; + int *mpri; + bool *do_rr; + struct fib6_nh *nh; +}; + +struct fib6_nh_excptn_arg { + struct rt6_info *rt; + int plen; +}; + +struct fib6_nh_match_arg { + const struct net_device *dev; + const struct in6_addr *gw; + struct fib6_nh *match; +}; + +struct fib6_nh_age_excptn_arg { + struct fib6_gc_args *gc_args; + long unsigned int now; +}; + +struct fib6_nh_rd_arg { + struct fib6_result *res; + struct flowi6 *fl6; + const struct in6_addr *gw; + struct rt6_info **ret; +}; + +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +struct fib6_nh_del_cached_rt_arg { + struct fib6_config *cfg; + struct fib6_info *f6i; +}; + +struct arg_dev_net_ip { + struct net *net; + struct in6_addr *addr; +}; + +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned char nh_flags; + long unsigned int event; + }; +}; + +struct rt6_mtu_change_arg { + struct net_device *dev; + unsigned int mtu; + struct fib6_info *f6i; +}; + +struct rt6_nh { + struct fib6_info *fib6_info; + struct fib6_config r_cfg; + struct list_head next; +}; + +struct fib6_nh_exception_dump_walker { + struct rt6_rtnl_dump_arg *dump; + struct fib6_info *rt; + unsigned int flags; + unsigned int skip; + unsigned int count; +}; + +enum { + IFLA_BR_UNSPEC = 0, + IFLA_BR_FORWARD_DELAY = 1, + IFLA_BR_HELLO_TIME = 2, + IFLA_BR_MAX_AGE = 3, + IFLA_BR_AGEING_TIME = 4, + IFLA_BR_STP_STATE = 5, + IFLA_BR_PRIORITY = 6, + IFLA_BR_VLAN_FILTERING = 7, + IFLA_BR_VLAN_PROTOCOL = 8, + IFLA_BR_GROUP_FWD_MASK = 9, + IFLA_BR_ROOT_ID = 10, + IFLA_BR_BRIDGE_ID = 11, + IFLA_BR_ROOT_PORT = 12, + IFLA_BR_ROOT_PATH_COST = 13, + IFLA_BR_TOPOLOGY_CHANGE = 14, + IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 15, + IFLA_BR_HELLO_TIMER = 16, + IFLA_BR_TCN_TIMER = 17, + IFLA_BR_TOPOLOGY_CHANGE_TIMER = 18, + IFLA_BR_GC_TIMER = 19, + IFLA_BR_GROUP_ADDR = 20, + IFLA_BR_FDB_FLUSH = 21, + IFLA_BR_MCAST_ROUTER = 22, + IFLA_BR_MCAST_SNOOPING = 23, + IFLA_BR_MCAST_QUERY_USE_IFADDR = 24, + IFLA_BR_MCAST_QUERIER = 25, + IFLA_BR_MCAST_HASH_ELASTICITY = 26, + IFLA_BR_MCAST_HASH_MAX = 27, + IFLA_BR_MCAST_LAST_MEMBER_CNT = 28, + IFLA_BR_MCAST_STARTUP_QUERY_CNT = 29, + IFLA_BR_MCAST_LAST_MEMBER_INTVL = 30, + IFLA_BR_MCAST_MEMBERSHIP_INTVL = 31, + IFLA_BR_MCAST_QUERIER_INTVL = 32, + IFLA_BR_MCAST_QUERY_INTVL = 33, + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 34, + IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 35, + IFLA_BR_NF_CALL_IPTABLES = 36, + IFLA_BR_NF_CALL_IP6TABLES = 37, + IFLA_BR_NF_CALL_ARPTABLES = 38, + IFLA_BR_VLAN_DEFAULT_PVID = 39, + IFLA_BR_PAD = 40, + IFLA_BR_VLAN_STATS_ENABLED = 41, + IFLA_BR_MCAST_STATS_ENABLED = 42, + IFLA_BR_MCAST_IGMP_VERSION = 43, + IFLA_BR_MCAST_MLD_VERSION = 44, + IFLA_BR_VLAN_STATS_PER_PORT = 45, + IFLA_BR_MULTI_BOOLOPT = 46, + IFLA_BR_MCAST_QUERIER_STATE = 47, + IFLA_BR_FDB_N_LEARNED = 48, + IFLA_BR_FDB_MAX_LEARNED = 49, + __IFLA_BR_MAX = 50, +}; + +enum { + LINK_XSTATS_TYPE_UNSPEC = 0, + LINK_XSTATS_TYPE_BRIDGE = 1, + LINK_XSTATS_TYPE_BOND = 2, + __LINK_XSTATS_TYPE_MAX = 3, +}; + +struct bridge_vlan_info { + __u16 flags; + __u16 vid; +}; + +struct bridge_vlan_xstats { + __u64 rx_bytes; + __u64 rx_packets; + __u64 tx_bytes; + __u64 tx_packets; + __u16 vid; + __u16 flags; + __u32 pad2; +}; + +enum { + BRIDGE_XSTATS_UNSPEC = 0, + BRIDGE_XSTATS_VLAN = 1, + BRIDGE_XSTATS_MCAST = 2, + BRIDGE_XSTATS_PAD = 3, + BRIDGE_XSTATS_STP = 4, + __BRIDGE_XSTATS_MAX = 5, +}; + +struct br_boolopt_multi { + __u32 optval; + __u32 optmask; +}; + +struct sigcontext { + unsigned int sc_regmask; + unsigned int sc_status; + long long unsigned int sc_pc; + long long unsigned int sc_regs[32]; + long long unsigned int sc_fpregs[32]; + unsigned int sc_acx; + unsigned int sc_fpc_csr; + unsigned int sc_fpc_eir; + unsigned int sc_used_math; + unsigned int sc_dsp; + long: 32; + long long unsigned int sc_mdhi; + long long unsigned int sc_mdlo; + long unsigned int sc_hi1; + long unsigned int sc_lo1; + long unsigned int sc_hi2; + long unsigned int sc_lo2; + long unsigned int sc_hi3; + long unsigned int sc_lo3; +}; + +struct extcontext { + unsigned int magic; + unsigned int size; +}; + +struct ucontext { + long unsigned int uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + long: 32; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; + long long unsigned int uc_extcontext[0]; +}; + +struct sigframe { + u32 sf_ass[4]; + u32 sf_pad[2]; + struct sigcontext sf_sc; + sigset_t sf_mask; + long long unsigned int sf_extcontext[0]; +}; + +struct rt_sigframe { + u32 rs_ass[4]; + u32 rs_pad[2]; + struct siginfo rs_info; + struct ucontext rs_uc; +}; + +enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +}; + +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +}; + +struct cpu_hw_events { + struct perf_event *events[4]; + long unsigned int used_mask[1]; + unsigned int saved_ctrl[4]; +}; + +struct mips_perf_event { + unsigned int event_id; + unsigned int cntr_mask; + enum { + T = 0, + V = 1, + P = 2, + } range; +}; + +struct mips_pmu { + u64 max_period; + u64 valid_count; + u64 overflow; + const char *name; + int irq; + u64 (*read_counter)(unsigned int); + void (*write_counter)(unsigned int, u64); + const struct mips_perf_event * (*map_raw_event)(u64); + const struct mips_perf_event (*general_event_map)[10]; + const struct mips_perf_event (*cache_event_map)[42]; + unsigned int num_counters; +}; + +struct prb_data_block { + long unsigned int id; + char data[0]; +}; + +struct mmap_unlock_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; + enum bpf_iter_task_type type; + u32 pid; + u32 pid_visiting; +}; + +struct bpf_iter_seq_task_info { + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +struct bpf_iter__task { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; +}; + +struct bpf_iter_seq_task_file_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + u32 tid; + u32 fd; +}; + +struct bpf_iter__task_file { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + u32 fd; + long: 32; + union { + struct file *file; + }; +}; + +struct bpf_iter_seq_task_vma_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + u32 tid; + long unsigned int prev_vm_start; + long unsigned int prev_vm_end; +}; + +enum bpf_task_vma_iter_find_op { + task_vma_iter_first_vma = 0, + task_vma_iter_next_vma = 1, + task_vma_iter_find_vma = 2, +}; + +struct bpf_iter__task_vma { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + union { + struct vm_area_struct *vma; + }; +}; + +typedef u64 (*btf_bpf_find_vma)(struct task_struct *, u64, bpf_callback_t, void *, u64); + +struct bpf_iter_task_vma_kern_data { + struct task_struct *task; + struct mm_struct *mm; + struct mmap_unlock_irq_work *work; + struct vma_iterator vmi; +}; + +struct bpf_iter_task_vma { + __u64 __opaque[1]; +}; + +struct bpf_iter_task_vma_kern { + struct bpf_iter_task_vma_kern_data *data; + long: 32; +}; + +struct bpf_iter_css_task { + __u64 __opaque[1]; +}; + +struct bpf_iter_css_task_kern { + struct css_task_iter *css_it; + long: 32; +}; + +struct bpf_iter_task { + __u64 __opaque[3]; +}; + +struct bpf_iter_task_kern { + struct task_struct *task; + struct task_struct *pos; + unsigned int flags; + long: 32; +}; + +enum { + BPF_TASK_ITER_ALL_PROCS = 0, + BPF_TASK_ITER_ALL_THREADS = 1, + BPF_TASK_ITER_PROC_THREADS = 2, +}; + +struct trace_event_raw_oom_score_adj_update { + struct trace_entry ent; + pid_t pid; + char comm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_reclaim_retry_zone { + struct trace_entry ent; + int node; + int zone_idx; + int order; + long unsigned int reclaimable; + long unsigned int available; + long unsigned int min_wmark; + int no_progress_loops; + bool wmark_check; + char __data[0]; +}; + +struct trace_event_raw_mark_victim { + struct trace_entry ent; + int pid; + u32 __data_loc_comm; + long unsigned int total_vm; + long unsigned int anon_rss; + long unsigned int file_rss; + long unsigned int shmem_rss; + uid_t uid; + long unsigned int pgtables; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_wake_reaper { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_start_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_finish_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_skip_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_compact_retry { + struct trace_entry ent; + int order; + int priority; + int result; + int retries; + int max_retries; + bool ret; + char __data[0]; +}; + +struct trace_event_data_offsets_oom_score_adj_update {}; + +struct trace_event_data_offsets_reclaim_retry_zone {}; + +struct trace_event_data_offsets_mark_victim { + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_wake_reaper {}; + +struct trace_event_data_offsets_start_task_reaping {}; + +struct trace_event_data_offsets_finish_task_reaping {}; + +struct trace_event_data_offsets_skip_task_reaping {}; + +struct trace_event_data_offsets_compact_retry {}; + +typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); + +typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool); + +typedef void (*btf_trace_mark_victim)(void *, struct task_struct *, uid_t); + +typedef void (*btf_trace_wake_reaper)(void *, int); + +typedef void (*btf_trace_start_task_reaping)(void *, int); + +typedef void (*btf_trace_finish_task_reaping)(void *, int); + +typedef void (*btf_trace_skip_task_reaping)(void *, int); + +typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); + +struct trace_event_raw_mmap_lock { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + char __data[0]; +}; + +struct trace_event_raw_mmap_lock_acquire_returned { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + bool success; + char __data[0]; +}; + +struct trace_event_data_offsets_mmap_lock { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +struct trace_event_data_offsets_mmap_lock_acquire_returned { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, const char *, bool, bool); + +typedef int cydp_t; + +struct madvise_walk_private { + struct mmu_gather *tlb; + bool pageout; +}; + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +enum { + DIR_OFFSET_MIN = 2, +}; + +struct simple_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; + char set_buf[24]; + void *data; + const char *fmt; + struct mutex mutex; +}; + +enum SHIFT_DIRECTION { + SHIFT_LEFT = 0, + SHIFT_RIGHT = 1, +}; + +struct ext4_extent_tail { + __le32 et_checksum; +}; + +enum btrfs_csum_type { + BTRFS_CSUM_TYPE_CRC32 = 0, + BTRFS_CSUM_TYPE_XXHASH = 1, + BTRFS_CSUM_TYPE_SHA256 = 2, + BTRFS_CSUM_TYPE_BLAKE2 = 3, +}; + +enum btrfs_mod_log_op { + BTRFS_MOD_LOG_KEY_REPLACE = 0, + BTRFS_MOD_LOG_KEY_ADD = 1, + BTRFS_MOD_LOG_KEY_REMOVE = 2, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING = 3, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING = 4, + BTRFS_MOD_LOG_MOVE_KEYS = 5, + BTRFS_MOD_LOG_ROOT_REPLACE = 6, +}; + +struct btrfs_csums { + u16 size; + const char name[10]; + const char driver[12]; +}; + +struct btrfs_dev_replace_item { + __le64 src_devid; + __le64 cursor_left; + __le64 cursor_right; + __le64 cont_reading_from_srcdev_mode; + __le64 replace_state; + __le64 time_started; + __le64 time_stopped; + __le64 num_write_errors; + __le64 num_uncorrectable_read_errors; +}; + +struct authenc_esn_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; +}; + +struct crypto_authenc_esn_ctx { + unsigned int reqoff; + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_esn_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct io_splice { + struct file *file_out; + long: 32; + loff_t off_out; + loff_t off_in; + u64 len; + int splice_fd_in; + unsigned int flags; + struct io_rsrc_node *rsrc_node; + long: 32; +}; + +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 pool_index_plus_1: 17; + u32 offset: 10; + u32 extra: 5; + }; +}; + +struct stack_record { + struct list_head hash_list; + u32 hash; + u32 size; + union handle_parts handle; + refcount_t count; + union { + long unsigned int entries[64]; + struct { + struct list_head free_list; + long unsigned int rcu_state; + }; + }; +}; + +typedef u32 depot_flags_t; + +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS = 0, + DEPOT_COUNTER_REFD_FREES = 1, + DEPOT_COUNTER_REFD_INUSE = 2, + DEPOT_COUNTER_FREELIST_SIZE = 3, + DEPOT_COUNTER_PERSIST_COUNT = 4, + DEPOT_COUNTER_PERSIST_BYTES = 5, + DEPOT_COUNTER_COUNT = 6, +}; + +typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); + +enum mips_gic_local_interrupt { + GIC_LOCAL_INT_WD = 0, + GIC_LOCAL_INT_COMPARE = 1, + GIC_LOCAL_INT_TIMER = 2, + GIC_LOCAL_INT_PERFCTR = 3, + GIC_LOCAL_INT_SWINT0 = 4, + GIC_LOCAL_INT_SWINT1 = 5, + GIC_LOCAL_INT_FDC = 6, + GIC_NUM_LOCAL_INTRS = 7, +}; + +typedef void (*vi_handler_t)(); + +struct gic_all_vpes_chip_data { + u32 map; + bool mask; +}; + +struct ldsem_waiter { + struct list_head list; + struct task_struct *task; +}; + +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + long: 32; + struct device classdev; +}; + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; + unsigned int is_wait_die; +}; + +struct dma_resv_list { + struct callback_head rcu; + u32 num_fences; + u32 max_fences; + struct dma_fence *table[0]; +}; + +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +enum led_trigger_netdev_modes { + TRIGGER_NETDEV_LINK = 0, + TRIGGER_NETDEV_LINK_10 = 1, + TRIGGER_NETDEV_LINK_100 = 2, + TRIGGER_NETDEV_LINK_1000 = 3, + TRIGGER_NETDEV_LINK_2500 = 4, + TRIGGER_NETDEV_LINK_5000 = 5, + TRIGGER_NETDEV_LINK_10000 = 6, + TRIGGER_NETDEV_HALF_DUPLEX = 7, + TRIGGER_NETDEV_FULL_DUPLEX = 8, + TRIGGER_NETDEV_TX = 9, + TRIGGER_NETDEV_RX = 10, + TRIGGER_NETDEV_TX_ERR = 11, + TRIGGER_NETDEV_RX_ERR = 12, + __TRIGGER_NETDEV_MAX = 13, +}; + +struct rtl821x_priv { + u16 phycr1; + u16 phycr2; + bool has_phycr2; + struct clk *clk; +}; + +struct iommu_fault_page_request { + u32 flags; + u32 pasid; + u32 grpid; + u32 perm; + u64 addr; + u64 private_data[2]; +}; + +struct iommu_fault { + u32 type; + long: 32; + struct iommu_fault_page_request prm; +}; + +struct iopf_fault { + struct iommu_fault fault; + struct list_head list; +}; + +struct iommu_attach_handle; + +struct iommu_fault_param; + +struct iopf_group { + struct iopf_fault last_fault; + struct list_head faults; + size_t fault_count; + struct list_head pending_node; + struct work_struct work; + struct iommu_attach_handle *attach_handle; + struct iommu_fault_param *fault_param; + struct list_head node; + u32 cookie; +}; + +struct iommu_fault_param {}; + +struct iommu_domain; + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); + +struct iommu_domain_ops; + +struct iommu_domain_geometry { + dma_addr_t aperture_start; + dma_addr_t aperture_end; + bool force_aperture; +}; + +struct iommu_dma_cookie; + +struct iommu_dirty_ops; + +struct iommu_ops; + +struct iommu_domain { + unsigned int type; + const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + const struct iommu_ops *owner; + long unsigned int pgsize_bitmap; + struct iommu_domain_geometry geometry; + struct iommu_dma_cookie *iova_cookie; + int (*iopf_handler)(struct iopf_group *); + void *fault_data; + union { + struct { + iommu_fault_handler_t handler; + void *handler_token; + }; + struct { + struct mm_struct *mm; + int users; + struct list_head next; + }; + }; +}; + +struct iommu_dirty_ops {}; + +struct iommu_ops {}; + +enum xhci_overhead_type { + LS_OVERHEAD_TYPE = 0, + FS_OVERHEAD_TYPE = 1, + HS_OVERHEAD_TYPE = 2, +}; + +enum xhci_setup_dev { + SETUP_CONTEXT_ONLY = 0, + SETUP_CONTEXT_ADDRESS = 1, +}; + +struct of_bus___2 { + void (*count_cells)(const void *, int, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int); + int (*translate)(__be32 *, u64, int); +}; + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(const struct class *, const struct class_attribute *, char *); + ssize_t (*store)(const struct class *, const struct class_attribute *, const char *, size_t); +}; + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *, char *); + ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); +}; + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *, char *); + ssize_t (*store)(struct netdev_queue *, const char *, size_t); +}; + +enum { + ETHTOOL_A_STRING_UNSPEC = 0, + ETHTOOL_A_STRING_INDEX = 1, + ETHTOOL_A_STRING_VALUE = 2, + __ETHTOOL_A_STRING_CNT = 3, + ETHTOOL_A_STRING_MAX = 2, +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC = 0, + ETHTOOL_A_STRINGS_STRING = 1, + __ETHTOOL_A_STRINGS_CNT = 2, + ETHTOOL_A_STRINGS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC = 0, + ETHTOOL_A_STRINGSET_ID = 1, + ETHTOOL_A_STRINGSET_COUNT = 2, + ETHTOOL_A_STRINGSET_STRINGS = 3, + __ETHTOOL_A_STRINGSET_CNT = 4, + ETHTOOL_A_STRINGSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC = 0, + ETHTOOL_A_STRINGSETS_STRINGSET = 1, + __ETHTOOL_A_STRINGSETS_CNT = 2, + ETHTOOL_A_STRINGSETS_MAX = 1, +}; + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[32]; +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[21]; +}; + +struct nf_hook_entries_rcu_head { + struct callback_head head; + void *allocation; +}; + +struct nf_ct_hook { + int (*update)(struct net *, struct sk_buff *); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); + void (*attach)(struct sk_buff *, const struct sk_buff *); + void (*set_closing)(struct nf_conntrack *); + int (*confirm)(struct sk_buff *); +}; + +enum nft_ct_keys { + NFT_CT_STATE = 0, + NFT_CT_DIRECTION = 1, + NFT_CT_STATUS = 2, + NFT_CT_MARK = 3, + NFT_CT_SECMARK = 4, + NFT_CT_EXPIRATION = 5, + NFT_CT_HELPER = 6, + NFT_CT_L3PROTOCOL = 7, + NFT_CT_SRC = 8, + NFT_CT_DST = 9, + NFT_CT_PROTOCOL = 10, + NFT_CT_PROTO_SRC = 11, + NFT_CT_PROTO_DST = 12, + NFT_CT_LABELS = 13, + NFT_CT_PKTS = 14, + NFT_CT_BYTES = 15, + NFT_CT_AVGPKT = 16, + NFT_CT_ZONE = 17, + NFT_CT_EVENTMASK = 18, + NFT_CT_SRC_IP = 19, + NFT_CT_DST_IP = 20, + NFT_CT_SRC_IP6 = 21, + NFT_CT_DST_IP6 = 22, + NFT_CT_ID = 23, + __NFT_CT_MAX = 24, +}; + +enum nft_ct_attributes { + NFTA_CT_UNSPEC = 0, + NFTA_CT_DREG = 1, + NFTA_CT_KEY = 2, + NFTA_CT_DIRECTION = 3, + NFTA_CT_SREG = 4, + __NFTA_CT_MAX = 5, +}; + +enum nft_ct_helper_attributes { + NFTA_CT_HELPER_UNSPEC = 0, + NFTA_CT_HELPER_NAME = 1, + NFTA_CT_HELPER_L3PROTO = 2, + NFTA_CT_HELPER_L4PROTO = 3, + __NFTA_CT_HELPER_MAX = 4, +}; + +enum nft_ct_expectation_attributes { + NFTA_CT_EXPECT_UNSPEC = 0, + NFTA_CT_EXPECT_L3PROTO = 1, + NFTA_CT_EXPECT_L4PROTO = 2, + NFTA_CT_EXPECT_DPORT = 3, + NFTA_CT_EXPECT_TIMEOUT = 4, + NFTA_CT_EXPECT_SIZE = 5, + __NFTA_CT_EXPECT_MAX = 6, +}; + +struct nft_ct { + enum nft_ct_keys key: 8; + enum ip_conntrack_dir dir: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +struct nft_ct_helper_obj { + struct nf_conntrack_helper *helper4; + struct nf_conntrack_helper *helper6; + u8 l4proto; +}; + +struct nft_ct_expect_obj { + u16 l3num; + __be16 dport; + u8 l4proto; + u8 size; + u32 timeout; +}; + +enum nf_br_hook_priorities { + NF_BR_PRI_FIRST = -2147483648, + NF_BR_PRI_NAT_DST_BRIDGED = -300, + NF_BR_PRI_FILTER_BRIDGED = -200, + NF_BR_PRI_BRNF = 0, + NF_BR_PRI_NAT_DST_OTHER = 100, + NF_BR_PRI_FILTER_OTHER = 200, + NF_BR_PRI_NAT_SRC = 300, + NF_BR_PRI_LAST = 2147483647, +}; + +struct brnf_net { + bool enabled; + struct ctl_table_header *ctl_hdr; + int call_iptables; + int call_ip6tables; + int call_arptables; + int filter_vlan_tagged; + int filter_pppoe_tagged; + int pass_vlan_indev; +}; + +struct brnf_frag_data { + local_lock_t bh_lock; + char mac[22]; + u8 encap_size; + u8 size; + u16 vlan_tci; + __be16 vlan_proto; +}; + +enum scx_ent_flags { + SCX_TASK_QUEUED = 1, + SCX_TASK_RESET_RUNNABLE_AT = 4, + SCX_TASK_DEQD_FOR_SLEEP = 8, + SCX_TASK_STATE_SHIFT = 8, + SCX_TASK_STATE_BITS = 2, + SCX_TASK_STATE_MASK = 768, + SCX_TASK_CURSOR = -2147483648, +}; + +enum scx_task_state { + SCX_TASK_NONE = 0, + SCX_TASK_INIT = 1, + SCX_TASK_READY = 2, + SCX_TASK_ENABLED = 3, + SCX_TASK_NR_STATES = 4, +}; + +enum scx_ent_dsq_flags { + SCX_TASK_DSQ_ON_PRIQ = 1, +}; + +enum scx_kf_mask { + SCX_KF_UNLOCKED = 0, + SCX_KF_CPU_RELEASE = 1, + SCX_KF_DISPATCH = 2, + SCX_KF_ENQUEUE = 4, + SCX_KF_SELECT_CPU = 8, + SCX_KF_REST = 16, + __SCX_KF_RQ_LOCKED = 31, + __SCX_KF_TERMINAL = 28, +}; + +enum scx_dsq_lnode_flags { + SCX_DSQ_LNODE_ITER_CURSOR = 1, + __SCX_DSQ_LNODE_PRIV_SHIFT = 16, +}; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_t; + +struct cpuidle_state_usage { + long long unsigned int disable; + long long unsigned int usage; + u64 time_ns; + long long unsigned int above; + long long unsigned int below; + long long unsigned int rejected; +}; + +struct cpuidle_device; + +struct cpuidle_driver; + +struct cpuidle_state { + char name[16]; + char desc[32]; + s64 exit_latency_ns; + s64 target_residency_ns; + unsigned int flags; + unsigned int exit_latency; + int power_usage; + unsigned int target_residency; + int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); + void (*enter_dead)(struct cpuidle_device *, int); + int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); + long: 32; +}; + +struct cpuidle_state_kobj; + +struct cpuidle_driver_kobj; + +struct cpuidle_device_kobj; + +struct cpuidle_device { + unsigned int registered: 1; + unsigned int enabled: 1; + unsigned int poll_time_limit: 1; + unsigned int cpu; + ktime_t next_hrtimer; + int last_state_idx; + long: 32; + u64 last_residency_ns; + u64 poll_limit_ns; + u64 forced_idle_latency_limit_ns; + struct cpuidle_state_usage states_usage[10]; + struct cpuidle_state_kobj *kobjs[10]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; +}; + +struct cpuidle_driver { + const char *name; + struct module *owner; + unsigned int bctimer: 1; + long: 32; + struct cpuidle_state states[10]; + int state_count; + int safe_state_index; + struct cpumask *cpumask; + const char *governor; +}; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_t; + +typedef struct { + struct rq *lock; + struct rq *lock2; +} class_double_rq_lock_t; + +struct idle_timer { + struct hrtimer timer; + int done; + long: 32; +}; + +typedef struct rt_rq *rt_rq_iter_t; + +enum dl_bw_request { + dl_bw_req_check_overflow = 0, + dl_bw_req_alloc = 1, + dl_bw_req_free = 2, +}; + +enum scx_consts { + SCX_DSP_DFL_MAX_BATCH = 32, + SCX_DSP_MAX_LOOPS = 32, + SCX_WATCHDOG_MAX_TIMEOUT = 30000, + SCX_EXIT_BT_LEN = 64, + SCX_EXIT_MSG_LEN = 1024, + SCX_EXIT_DUMP_DFL_LEN = 32768, + SCX_CPUPERF_ONE = 1024, + SCX_OPS_TASK_ITER_BATCH = 32, +}; + +enum scx_exit_kind { + SCX_EXIT_NONE = 0, + SCX_EXIT_DONE = 1, + SCX_EXIT_UNREG = 64, + SCX_EXIT_UNREG_BPF = 65, + SCX_EXIT_UNREG_KERN = 66, + SCX_EXIT_SYSRQ = 67, + SCX_EXIT_ERROR = 1024, + SCX_EXIT_ERROR_BPF = 1025, + SCX_EXIT_ERROR_STALL = 1026, +}; + +enum scx_exit_code { + SCX_ECODE_RSN_HOTPLUG = 4294967296ULL, + SCX_ECODE_ACT_RESTART = 281474976710656ULL, +}; + +struct scx_exit_info { + enum scx_exit_kind kind; + long: 32; + s64 exit_code; + const char *reason; + long unsigned int *bt; + u32 bt_len; + char *msg; + char *dump; + long: 32; +}; + +enum scx_ops_flags { + SCX_OPS_KEEP_BUILTIN_IDLE = 1, + SCX_OPS_ENQ_LAST = 2, + SCX_OPS_ENQ_EXITING = 4, + SCX_OPS_SWITCH_PARTIAL = 8, + SCX_OPS_HAS_CGROUP_WEIGHT = 65536, + SCX_OPS_ALL_FLAGS = 65551, +}; + +struct scx_init_task_args { + bool fork; + struct cgroup *cgroup; +}; + +struct scx_exit_task_args { + bool cancelled; +}; + +struct scx_cgroup_init_args { + u32 weight; +}; + +enum scx_cpu_preempt_reason { + SCX_CPU_PREEMPT_RT = 0, + SCX_CPU_PREEMPT_DL = 1, + SCX_CPU_PREEMPT_STOP = 2, + SCX_CPU_PREEMPT_UNKNOWN = 3, +}; + +struct scx_cpu_acquire_args {}; + +struct scx_cpu_release_args { + enum scx_cpu_preempt_reason reason; + struct task_struct *task; +}; + +struct scx_dump_ctx { + enum scx_exit_kind kind; + long: 32; + s64 exit_code; + const char *reason; + long: 32; + u64 at_ns; + u64 at_jiffies; +}; + +struct sched_ext_ops { + s32 (*select_cpu)(struct task_struct *, s32, u64); + void (*enqueue)(struct task_struct *, u64); + void (*dequeue)(struct task_struct *, u64); + void (*dispatch)(s32, struct task_struct *); + void (*tick)(struct task_struct *); + void (*runnable)(struct task_struct *, u64); + void (*running)(struct task_struct *); + void (*stopping)(struct task_struct *, bool); + void (*quiescent)(struct task_struct *, u64); + bool (*yield)(struct task_struct *, struct task_struct *); + bool (*core_sched_before)(struct task_struct *, struct task_struct *); + void (*set_weight)(struct task_struct *, u32); + void (*set_cpumask)(struct task_struct *, const struct cpumask *); + void (*update_idle)(s32, bool); + void (*cpu_acquire)(s32, struct scx_cpu_acquire_args *); + void (*cpu_release)(s32, struct scx_cpu_release_args *); + s32 (*init_task)(struct task_struct *, struct scx_init_task_args *); + void (*exit_task)(struct task_struct *, struct scx_exit_task_args *); + void (*enable)(struct task_struct *); + void (*disable)(struct task_struct *); + void (*dump)(struct scx_dump_ctx *); + void (*dump_cpu)(struct scx_dump_ctx *, s32, bool); + void (*dump_task)(struct scx_dump_ctx *, struct task_struct *); + s32 (*cgroup_init)(struct cgroup *, struct scx_cgroup_init_args *); + void (*cgroup_exit)(struct cgroup *); + s32 (*cgroup_prep_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_cancel_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_set_weight)(struct cgroup *, u32); + void (*cpu_online)(s32); + void (*cpu_offline)(s32); + s32 (*init)(); + void (*exit)(struct scx_exit_info *); + u32 dispatch_max_batch; + u64 flags; + u32 timeout_ms; + u32 exit_dump_len; + u64 hotplug_seq; + char name[128]; +}; + +enum scx_opi { + SCX_OPI_BEGIN = 0, + SCX_OPI_NORMAL_BEGIN = 0, + SCX_OPI_NORMAL_END = 29, + SCX_OPI_CPU_HOTPLUG_BEGIN = 29, + SCX_OPI_CPU_HOTPLUG_END = 31, + SCX_OPI_END = 31, +}; + +enum scx_wake_flags { + SCX_WAKE_FORK = 4, + SCX_WAKE_TTWU = 8, + SCX_WAKE_SYNC = 16, +}; + +enum scx_enq_flags { + SCX_ENQ_WAKEUP = 1ULL, + SCX_ENQ_HEAD = 16ULL, + SCX_ENQ_CPU_SELECTED = 1024ULL, + SCX_ENQ_PREEMPT = 4294967296ULL, + SCX_ENQ_REENQ = 1099511627776ULL, + SCX_ENQ_LAST = 2199023255552ULL, + __SCX_ENQ_INTERNAL_MASK = 18374686479671623680ULL, + SCX_ENQ_CLEAR_OPSS = 72057594037927936ULL, + SCX_ENQ_DSQ_PRIQ = 144115188075855872ULL, +}; + +enum scx_deq_flags { + SCX_DEQ_SLEEP = 1ULL, + SCX_DEQ_CORE_SCHED_EXEC = 4294967296ULL, +}; + +enum scx_pick_idle_cpu_flags { + SCX_PICK_IDLE_CORE = 1, +}; + +enum scx_kick_flags { + SCX_KICK_IDLE = 1, + SCX_KICK_PREEMPT = 2, + SCX_KICK_WAIT = 4, +}; + +enum scx_tg_flags { + SCX_TG_ONLINE = 1, + SCX_TG_INITED = 2, +}; + +enum scx_ops_enable_state { + SCX_OPS_ENABLING = 0, + SCX_OPS_ENABLED = 1, + SCX_OPS_DISABLING = 2, + SCX_OPS_DISABLED = 3, +}; + +enum scx_ops_state { + SCX_OPSS_NONE = 0, + SCX_OPSS_QUEUEING = 1, + SCX_OPSS_QUEUED = 2, + SCX_OPSS_DISPATCHING = 3, + SCX_OPSS_QSEQ_SHIFT = 2, +}; + +struct scx_dsp_buf_ent { + struct task_struct *task; + long unsigned int qseq; + u64 dsq_id; + u64 enq_flags; +}; + +struct scx_dsp_ctx { + struct rq *rq; + u32 cursor; + u32 nr_tasks; + long: 32; + struct scx_dsp_buf_ent buf[0]; +}; + +struct scx_bstr_buf { + u64 data[12]; + char line[1024]; +}; + +struct scx_dump_data { + s32 cpu; + bool first; + s32 cursor; + struct seq_buf *s; + const char *prefix; + long: 32; + struct scx_bstr_buf buf; +}; + +struct trace_event_raw_sched_ext_dump { + struct trace_entry ent; + u32 __data_loc_line; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_ext_dump { + u32 line; + const void *line_ptr_; +}; + +typedef void (*btf_trace_sched_ext_dump)(void *, const char *); + +enum scx_dsq_iter_flags { + SCX_DSQ_ITER_REV = 65536, + __SCX_DSQ_ITER_HAS_SLICE = 1073741824, + __SCX_DSQ_ITER_HAS_VTIME = 2147483648, + __SCX_DSQ_ITER_USER_FLAGS = 65536, + __SCX_DSQ_ITER_ALL_FLAGS = 3221291008, +}; + +struct bpf_iter_scx_dsq_kern { + struct scx_dsq_list_node cursor; + struct scx_dispatch_q *dsq; + long: 32; + u64 slice; + u64 vtime; +}; + +struct bpf_iter_scx_dsq { + u64 __opaque[6]; +}; + +struct scx_task_iter { + struct sched_ext_entity cursor; + struct task_struct *locked; + struct rq *rq; + struct rq_flags rf; + u32 cnt; + long: 32; +}; + +typedef struct task_struct *class_find_get_task_t; + +struct bpf_struct_ops_sched_ext_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_ext_ops data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_timer { + __u64 __opaque[2]; +}; + +struct bpf_wq { + __u64 __opaque[2]; +}; + +struct bpf_list_head { + __u64 __opaque[2]; +}; + +struct bpf_list_node { + __u64 __opaque[3]; +}; + +struct bpf_rb_root { + __u64 __opaque[2]; +}; + +struct bpf_rb_node { + __u64 __opaque[4]; +}; + +struct bpf_refcount { + __u32 __opaque[1]; +}; + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +enum { + BPF_F_TIMER_ABS = 1, + BPF_F_TIMER_CPU_PIN = 2, +}; + +enum bpf_kfunc_flags { + BPF_F_PAD_ZEROS = 1, +}; + +struct bpf_rb_node_kern { + struct rb_node rb_node; + void *owner; +}; + +struct bpf_list_node_kern { + struct list_head list_head; + void *owner; + long: 32; +}; + +typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + +typedef u64 (*btf_bpf_get_smp_processor_id)(); + +typedef u64 (*btf_bpf_get_numa_node_id)(); + +typedef u64 (*btf_bpf_ktime_get_ns)(); + +typedef u64 (*btf_bpf_ktime_get_boot_ns)(); + +typedef u64 (*btf_bpf_ktime_get_coarse_ns)(); + +typedef u64 (*btf_bpf_ktime_get_tai_ns)(); + +typedef u64 (*btf_bpf_get_current_pid_tgid)(); + +typedef u64 (*btf_bpf_get_current_uid_gid)(); + +typedef u64 (*btf_bpf_get_current_comm)(char *, u32); + +typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_jiffies64)(); + +typedef u64 (*btf_bpf_get_current_cgroup_id)(); + +typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); + +typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, s64 *); + +typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, u64 *); + +typedef u64 (*btf_bpf_strncmp)(const char *, u32, const char *); + +typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); + +typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_copy_from_user_task)(void *, u32, const void *, struct task_struct *, u64); + +typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); + +typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); + +struct bpf_bprintf_buffers { + char bin_args[512]; + char buf[1024]; +}; + +typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); + +struct bpf_async_cb { + struct bpf_map *map; + struct bpf_prog *prog; + void *callback_fn; + void *value; + union { + struct callback_head rcu; + struct work_struct delete_work; + }; + u64 flags; +}; + +struct bpf_hrtimer { + struct bpf_async_cb cb; + struct hrtimer timer; + atomic_t cancelling; + long: 32; +}; + +struct bpf_work { + struct bpf_async_cb cb; + struct work_struct work; + struct work_struct delete_work; +}; + +struct bpf_async_kern { + union { + struct bpf_async_cb *cb; + struct bpf_hrtimer *timer; + struct bpf_work *work; + }; + struct bpf_spin_lock lock; +}; + +enum bpf_async_type { + BPF_ASYNC_TYPE_TIMER = 0, + BPF_ASYNC_TYPE_WQ = 1, +}; + +typedef u64 (*btf_bpf_timer_init)(struct bpf_async_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_timer_set_callback)(struct bpf_async_kern *, void *, struct bpf_prog_aux *); + +typedef u64 (*btf_bpf_timer_start)(struct bpf_async_kern *, u64, u64); + +typedef u64 (*btf_bpf_timer_cancel)(struct bpf_async_kern *); + +typedef u64 (*btf_bpf_kptr_xchg)(void *, void *); + +typedef u64 (*btf_bpf_dynptr_from_mem)(void *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_dynptr_read)(void *, u32, const struct bpf_dynptr_kern *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_write)(const struct bpf_dynptr_kern *, u32, void *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_data)(const struct bpf_dynptr_kern *, u32, u32); + +typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); + +struct bpf_throw_ctx { + struct bpf_prog_aux *aux; + long: 32; + u64 sp; + u64 bp; + int cnt; + long: 32; +}; + +struct bpf_iter_bits { + __u64 __opaque[2]; +}; + +struct bpf_iter_bits_kern { + union { + __u64 *bits; + __u64 bits_copy; + }; + int nr_bits; + int bit; +}; + +struct tcx_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +struct cachestat_range { + __u64 off; + __u64 len; +}; + +struct cachestat { + __u64 nr_cache; + __u64 nr_dirty; + __u64 nr_writeback; + __u64 nr_evicted; + __u64 nr_recently_evicted; +}; + +struct trace_event_raw_mm_filemap_op_page_cache { + struct trace_entry ent; + long unsigned int pfn; + long unsigned int i_ino; + long unsigned int index; + dev_t s_dev; + unsigned char order; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_op_page_cache_range { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + long unsigned int last_index; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_fault { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_filemap_set_wb_err { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + errseq_t errseq; + char __data[0]; +}; + +struct trace_event_raw_file_check_and_advance_wb_err { + struct trace_entry ent; + struct file *file; + long unsigned int i_ino; + dev_t s_dev; + errseq_t old; + errseq_t new; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache {}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache_range {}; + +struct trace_event_data_offsets_mm_filemap_fault {}; + +struct trace_event_data_offsets_filemap_set_wb_err {}; + +struct trace_event_data_offsets_file_check_and_advance_wb_err {}; + +typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_get_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_map_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_fault)(void *, struct address_space *, long unsigned int); + +typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); + +typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); + +enum behavior { + EXCLUSIVE = 0, + SHARED = 1, + DROP = 2, +}; + +struct zbud_pool { + spinlock_t lock; + union { + struct list_head buddied; + struct list_head unbuddied[63]; + }; + long: 32; + u64 pages_nr; +}; + +struct zbud_header { + struct list_head buddy; + unsigned int first_chunks; + unsigned int last_chunks; +}; + +enum buddy { + FIRST = 0, + LAST = 1, +}; + +struct epoll_params { + __u32 busy_poll_usecs; + __u16 busy_poll_budget; + __u8 prefer_busy_poll; + __u8 __pad; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + long unsigned int timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + long unsigned int event_count; + long unsigned int active_count; + long unsigned int relax_count; + long unsigned int expire_count; + long unsigned int wakeup_count; + struct device *dev; + bool active: 1; + bool autosleep_enabled: 1; + long: 32; +}; + +struct epoll_filefd { + struct file *file; + int fd; +}; + +struct epitem; + +struct eppoll_entry { + struct eppoll_entry *next; + struct epitem *base; + wait_queue_entry_t wait; + wait_queue_head_t *whead; +}; + +struct eventpoll; + +struct epitem { + union { + struct rb_node rbn; + struct callback_head rcu; + }; + struct list_head rdllink; + struct epitem *next; + struct epoll_filefd ffd; + bool dying; + struct eppoll_entry *pwqlist; + struct eventpoll *ep; + struct hlist_node fllink; + struct wakeup_source *ws; + struct epoll_event event; +}; + +struct eventpoll { + struct mutex mtx; + wait_queue_head_t wq; + wait_queue_head_t poll_wait; + struct list_head rdllist; + rwlock_t lock; + struct rb_root_cached rbr; + struct epitem *ovflist; + struct wakeup_source *ws; + struct user_struct *user; + struct file *file; + u64 gen; + struct hlist_head refs; + refcount_t refcount; + unsigned int napi_id; + u32 busy_poll_usecs; + u16 busy_poll_budget; + bool prefer_busy_poll; + long: 32; +}; + +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + +struct epitems_head { + struct hlist_head epitems; + struct epitems_head *next; +}; + +struct ext4_getfsmap_info { + struct ext4_fsmap_head *gfi_head; + ext4_fsmap_format_t gfi_formatter; + void *gfi_format_arg; + long: 32; + ext4_fsblk_t gfi_next_fsblk; + u32 gfi_dev; + ext4_group_t gfi_agno; + struct ext4_fsmap gfi_low; + struct ext4_fsmap gfi_high; + struct ext4_fsmap gfi_lastfree; + struct list_head gfi_meta_list; + bool gfi_last; + long: 32; +}; + +struct ext4_getfsmap_dev { + int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); + u32 gfd_dev; +}; + +struct fat_bios_param_block { + u16 fat_sector_size; + u8 fat_sec_per_clus; + u16 fat_reserved; + u8 fat_fats; + u16 fat_dir_entries; + u16 fat_sectors; + u16 fat_fat_length; + u32 fat_total_sect; + u8 fat16_state; + u32 fat16_vol_id; + u32 fat32_length; + u32 fat32_root_cluster; + u16 fat32_info_sector; + u8 fat32_state; + u32 fat32_vol_id; +}; + +struct fat_floppy_defaults { + unsigned int nr_sectors; + unsigned int sec_per_clus; + unsigned int dir_entries; + unsigned int media; + unsigned int fat_length; +}; + +enum { + Opt_check = 0, + Opt_uid___4 = 1, + Opt_gid___5 = 2, + Opt_umask = 3, + Opt_dmask = 4, + Opt_fmask = 5, + Opt_allow_utime = 6, + Opt_codepage = 7, + Opt_usefree = 8, + Opt_nocase = 9, + Opt_quiet = 10, + Opt_showexec = 11, + Opt_debug___2 = 12, + Opt_immutable = 13, + Opt_dots = 14, + Opt_dotsOK = 15, + Opt_charset = 16, + Opt_shortname = 17, + Opt_utf8 = 18, + Opt_utf8_bool = 19, + Opt_uni_xl = 20, + Opt_uni_xl_bool = 21, + Opt_nonumtail = 22, + Opt_nonumtail_bool = 23, + Opt_obsolete = 24, + Opt_flush = 25, + Opt_tz = 26, + Opt_rodir = 27, + Opt_errors___2 = 28, + Opt_discard___3 = 29, + Opt_nfs = 30, + Opt_nfs_enum = 31, + Opt_time_offset = 32, + Opt_dos1xfloppy = 33, +}; + +struct eventfs_root_inode { + struct eventfs_inode ei; + struct dentry *events_dir; +}; + +enum { + EVENTFS_SAVE_MODE = 65536, + EVENTFS_SAVE_UID = 131072, + EVENTFS_SAVE_GID = 262144, +}; + +enum btrfs_tree_block_status { + BTRFS_TREE_BLOCK_CLEAN = 0, + BTRFS_TREE_BLOCK_INVALID_NRITEMS = 1, + BTRFS_TREE_BLOCK_INVALID_PARENT_KEY = 2, + BTRFS_TREE_BLOCK_BAD_KEY_ORDER = 3, + BTRFS_TREE_BLOCK_INVALID_LEVEL = 4, + BTRFS_TREE_BLOCK_INVALID_FREE_SPACE = 5, + BTRFS_TREE_BLOCK_INVALID_OFFSETS = 6, + BTRFS_TREE_BLOCK_INVALID_BLOCKPTR = 7, + BTRFS_TREE_BLOCK_INVALID_ITEM = 8, + BTRFS_TREE_BLOCK_INVALID_OWNER = 9, + BTRFS_TREE_BLOCK_WRITTEN_NOT_SET = 10, +}; + +struct btrfs_dio_data { + ssize_t submitted; + struct extent_changeset *data_reserved; + struct btrfs_ordered_extent *ordered; + bool data_space_reserved; + bool nocow_done; +}; + +struct btrfs_dio_private { + u64 file_offset; + u32 bytes; + long: 32; + struct btrfs_bio bbio; +}; + +struct pkcs7_parse_context { + struct pkcs7_message *msg; + struct pkcs7_signed_info *sinfo; + struct pkcs7_signed_info **ppsinfo; + struct x509_certificate *certs; + struct x509_certificate **ppcerts; + long unsigned int data; + enum OID last_oid; + unsigned int x509_index; + unsigned int sinfo_index; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_skid; + unsigned int raw_skid_size; + bool expect_skid; +}; + +struct disk_events { + struct list_head node; + struct gendisk *disk; + spinlock_t lock; + struct mutex block_mutex; + int block; + unsigned int pending; + unsigned int clearing; + long int poll_msecs; + struct delayed_work dwork; +}; + +struct io_uring_recvmsg_out { + __u32 namelen; + __u32 controllen; + __u32 payloadlen; + __u32 flags; +}; + +struct io_async_msghdr { + struct iovec fast_iov; + struct iovec *free_iov; + int free_iov_nr; + int namelen; + __kernel_size_t controllen; + __kernel_size_t payloadlen; + struct sockaddr *uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_shutdown { + struct file *file; + int how; +}; + +struct io_accept { + struct file *file; + struct sockaddr *addr; + int *addr_len; + int flags; + int iou_flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_socket { + struct file *file; + int domain; + int type; + int protocol; + int flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_connect { + struct file *file; + struct sockaddr *addr; + int addr_len; + bool in_progress; + bool seen_econnaborted; +}; + +struct io_bind { + struct file *file; + int addr_len; +}; + +struct io_listen { + struct file *file; + int backlog; +}; + +struct io_sr_msg { + struct file *file; + union { + struct compat_msghdr *umsg_compat; + struct user_msghdr *umsg; + void *buf; + }; + int len; + unsigned int done_io; + unsigned int msg_flags; + unsigned int nr_multishot_loops; + u16 flags; + u16 buf_group; + u16 buf_index; + void *msg_control; + struct io_kiocb *notif; +}; + +struct io_recvmsg_multishot_hdr { + struct io_uring_recvmsg_out msg; + struct __kernel_sockaddr_storage addr; +}; + +enum { + NVME_AEN_BIT_NS_ATTR = 8, + NVME_AEN_BIT_FW_ACT = 9, + NVME_AEN_BIT_ANA_CHANGE = 11, + NVME_AEN_BIT_DISC_CHANGE = 31, +}; + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, + SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, + SWITCHTEC_GAS_NTB_OFFSET = 65536, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, +}; + +struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info ntp_info[48]; +}; + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; + u64 lut_entry[512]; +}; + +struct pci_dev_reset_methods { + u16 vendor; + u16 device; + int (*reset)(struct pci_dev *, bool); +}; + +struct pci_dev_acs_enabled { + u16 vendor; + u16 device; + int (*acs_enabled)(struct pci_dev *, u16); +}; + +struct pci_dev_acs_ops { + u16 vendor; + u16 device; + int (*enable_acs)(struct pci_dev *); + int (*disable_acs_redir)(struct pci_dev *); +}; + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[0]; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED = 1, + PCE_STATUS_PREPARED = 2, + PCE_STATUS_ENABLED = 3, + PCE_STATUS_ERROR = 4, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; + bool enabled_when_prepared; +}; + +struct fixed_mdio_bus { + struct mii_bus *mii_bus; + struct list_head phys; +}; + +struct fixed_phy { + int addr; + struct phy_device *phydev; + struct fixed_phy_status status; + bool no_carrier; + int (*link_update)(struct net_device *, struct fixed_phy_status *); + struct list_head node; + struct gpio_desc *link_gpiod; +}; + +struct usb_otg_caps { + u16 otg_rev; + bool hnp_support; + bool srp_support; + bool adp_support; +}; + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN = 0, + USB_DR_MODE_HOST = 1, + USB_DR_MODE_PERIPHERAL = 2, + USB_DR_MODE_OTG = 3, +}; + +struct usb_dynid { + struct list_head node; + struct usb_device_id id; +}; + +enum xhci_ep_reset_type { + EP_HARD_RESET = 0, + EP_SOFT_RESET = 1, +}; + +struct ppl_header_entry { + __le64 data_sector; + __le32 pp_size; + __le32 data_size; + __le32 parity_disk; + __le32 checksum; +}; + +struct ppl_header { + __u8 reserved[512]; + __le32 signature; + __le32 padding; + __le64 generation; + __le32 entries_count; + __le32 checksum; + struct ppl_header_entry entries[148]; +}; + +struct ppl_log; + +struct ppl_io_unit { + struct ppl_log *log; + struct page *header_page; + unsigned int entries_count; + unsigned int pp_size; + u64 seq; + struct list_head log_sibling; + struct list_head stripe_list; + atomic_t pending_stripes; + atomic_t pending_flushes; + bool submitted; + long: 32; + struct bio bio; + struct bio_vec biovec[32]; +}; + +struct ppl_conf { + struct mddev *mddev; + struct ppl_log *child_logs; + int count; + int block_size; + u32 signature; + long: 32; + atomic64_t seq; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + struct bio_set flush_bs; + int recovered_entries; + int mismatch_count; + struct list_head no_mem_stripes; + spinlock_t no_mem_stripes_lock; + short unsigned int write_hint; + long: 32; +}; + +struct ppl_log { + struct ppl_conf *ppl_conf; + struct md_rdev *rdev; + struct mutex io_mutex; + struct ppl_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head io_list; + sector_t next_io_sector; + unsigned int entry_space; + bool use_multippl; + bool wb_cache_on; + long unsigned int disk_flush_bitmap; + long: 32; +}; + +struct scm_timestamping { + struct __kernel_old_timespec ts[3]; +}; + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; +}; + +struct tc_qopt_offload_stats { + struct gnet_stats_basic_sync *bstats; + struct gnet_stats_queue *qstats; +}; + +enum tc_mq_command { + TC_MQ_CREATE = 0, + TC_MQ_DESTROY = 1, + TC_MQ_STATS = 2, + TC_MQ_GRAFT = 3, +}; + +struct tc_mq_opt_offload_graft_params { + long unsigned int queue; + u32 child_handle; +}; + +struct tc_mq_qopt_offload { + enum tc_mq_command command; + u32 handle; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; +}; + +struct mq_sched { + struct Qdisc **qdiscs; +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE = 1, + __ETHTOOL_A_TUNNEL_UDP_CNT = 2, + ETHTOOL_A_TUNNEL_UDP_MAX = 1, +}; + +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN = 1, + UDP_TUNNEL_TYPE_GENEVE = 2, + UDP_TUNNEL_TYPE_VXLAN_GPE = 4, +}; + +enum udp_tunnel_nic_info_flags { + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, +}; + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + long unsigned int ifindex; +}; + +enum nft_byteorder_ops { + NFT_BYTEORDER_NTOH = 0, + NFT_BYTEORDER_HTON = 1, +}; + +enum nft_byteorder_attributes { + NFTA_BYTEORDER_UNSPEC = 0, + NFTA_BYTEORDER_SREG = 1, + NFTA_BYTEORDER_DREG = 2, + NFTA_BYTEORDER_OP = 3, + NFTA_BYTEORDER_LEN = 4, + NFTA_BYTEORDER_SIZE = 5, + __NFTA_BYTEORDER_MAX = 6, +}; + +struct nft_byteorder { + u8 sreg; + u8 dreg; + enum nft_byteorder_ops op: 8; + u8 len; + u8 size; +}; + +struct br_frame_type { + __be16 type; + int (*frame_handler)(struct net_bridge_port *, struct sk_buff *); + struct hlist_node list; +}; + +struct ce_unbind { + struct clock_event_device *ce; + int res; +}; + +struct tracer_stat { + const char *name; + void * (*stat_start)(struct tracer_stat *); + void * (*stat_next)(void *, int); + cmp_func_t stat_cmp; + int (*stat_show)(struct seq_file *, void *); + void (*stat_release)(void *); + int (*stat_headers)(struct seq_file *); +}; + +struct stat_node { + struct rb_node node; + void *stat; +}; + +struct stat_session { + struct list_head session_list; + struct tracer_stat *ts; + struct rb_root stat_root; + struct mutex stat_mutex; + struct dentry *file; +}; + +struct bpf_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; + long: 32; + u64 delegate_cmds; + u64 delegate_maps; + u64 delegate_progs; + u64 delegate_attachs; +}; + +struct bpf_preload_info { + char link_name[16]; + struct bpf_link *link; +}; + +struct bpf_preload_ops { + int (*preload)(struct bpf_preload_info *); + struct module *owner; +}; + +enum bpf_type { + BPF_TYPE_UNSPEC = 0, + BPF_TYPE_PROG = 1, + BPF_TYPE_MAP = 2, + BPF_TYPE_LINK = 3, +}; + +struct map_iter { + void *key; + bool done; +}; + +struct bpffs_btf_enums { + const struct btf *btf; + const struct btf_type *cmd_t; + const struct btf_type *map_t; + const struct btf_type *prog_t; + const struct btf_type *attach_t; +}; + +enum { + OPT_UID = 0, + OPT_GID = 1, + OPT_MODE = 2, + OPT_DELEGATE_CMDS = 3, + OPT_DELEGATE_MAPS = 4, + OPT_DELEGATE_PROGS = 5, + OPT_DELEGATE_ATTACHS = 6, +}; + +enum bpf_stack_build_id_status { + BPF_STACK_BUILD_ID_EMPTY = 0, + BPF_STACK_BUILD_ID_VALID = 1, + BPF_STACK_BUILD_ID_IP = 2, +}; + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + +enum { + BPF_F_SKIP_FIELD_MASK = 255, + BPF_F_USER_STACK = 256, + BPF_F_FAST_STACK_CMP = 512, + BPF_F_REUSE_STACKID = 1024, + BPF_F_USER_BUILD_ID = 2048, +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = 18446744073709551584ULL, + PERF_CONTEXT_KERNEL = 18446744073709551488ULL, + PERF_CONTEXT_USER = 18446744073709551104ULL, + PERF_CONTEXT_GUEST = 18446744073709549568ULL, + PERF_CONTEXT_GUEST_KERNEL = 18446744073709549440ULL, + PERF_CONTEXT_GUEST_USER = 18446744073709549056ULL, + PERF_CONTEXT_MAX = 18446744073709547521ULL, +}; + +struct pcpu_freelist_node; + +struct pcpu_freelist_head { + struct pcpu_freelist_node *first; + raw_spinlock_t lock; +}; + +struct pcpu_freelist_node { + struct pcpu_freelist_node *next; +}; + +struct pcpu_freelist { + struct pcpu_freelist_head *freelist; + struct pcpu_freelist_head extralist; +}; + +struct stack_map_bucket { + struct pcpu_freelist_node fnode; + u32 hash; + u32 nr; + long: 32; + u64 data[0]; +}; + +struct bpf_stack_map { + struct bpf_map map; + void *elems; + struct pcpu_freelist freelist; + u32 n_buckets; + struct stack_map_bucket *buckets[0]; + long: 32; +}; + +typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_sleepable)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack_sleepable)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct swap_cgroup_ctrl { + struct page **map; + long unsigned int length; + spinlock_t lock; +}; + +struct swap_cgroup { + short unsigned int id; +}; + +struct ext4_xattr_entry { + __u8 e_name_len; + __u8 e_name_index; + __le16 e_value_offs; + __le32 e_value_inum; + __le32 e_value_size; + __le32 e_hash; + char e_name[0]; +}; + +struct ext4_xattr_info { + const char *name; + const void *value; + size_t value_len; + int name_index; + int in_inode; +}; + +struct ext4_xattr_search { + struct ext4_xattr_entry *first; + void *base; + void *end; + struct ext4_xattr_entry *here; + int not_found; +}; + +struct ext4_xattr_ibody_find { + struct ext4_xattr_search s; + struct ext4_iloc iloc; +}; + +struct cdrom_msf0 { + __u8 minute; + __u8 second; + __u8 frame; +}; + +union cdrom_addr { + struct cdrom_msf0 msf; + int lba; +}; + +struct cdrom_tocentry { + __u8 cdte_track; + __u8 cdte_adr: 4; + __u8 cdte_ctrl: 4; + __u8 cdte_format; + union cdrom_addr cdte_addr; + __u8 cdte_datamode; +}; + +struct cdrom_multisession { + union cdrom_addr addr; + __u8 xa_flag; + __u8 addr_format; +}; + +struct cdrom_mcn { + __u8 medium_catalog_number[14]; +}; + +struct packet_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +struct cdrom_device_ops; + +struct cdrom_device_info { + const struct cdrom_device_ops *ops; + struct list_head list; + struct gendisk *disk; + void *handle; + int mask; + int speed; + int capacity; + unsigned int options: 30; + unsigned int mc_flags: 2; + unsigned int vfs_events; + unsigned int ioctl_events; + int use_count; + char name[20]; + __u8 sanyo_slot: 2; + __u8 keeplocked: 1; + __u8 reserved: 5; + int cdda_method; + __u8 last_sense; + __u8 media_written; + short unsigned int mmc3_profile; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; + bool opened_for_data; + long: 32; + __s64 last_media_change_ms; +}; + +struct cdrom_device_ops { + int (*open)(struct cdrom_device_info *, int); + void (*release)(struct cdrom_device_info *); + int (*drive_status)(struct cdrom_device_info *, int); + unsigned int (*check_events)(struct cdrom_device_info *, unsigned int, int); + int (*tray_move)(struct cdrom_device_info *, int); + int (*lock_door)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, long unsigned int); + int (*get_last_session)(struct cdrom_device_info *, struct cdrom_multisession *); + int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); + int (*reset)(struct cdrom_device_info *); + int (*audio_ioctl)(struct cdrom_device_info *, unsigned int, void *); + int (*generic_packet)(struct cdrom_device_info *, struct packet_command *); + int (*read_cdda_bpc)(struct cdrom_device_info *, void *, u32, u32, u8 *); + const int capability; +}; + +struct iso_volume_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2041]; +}; + +struct iso_primary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct iso_supplementary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 flags[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 escape[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct hs_volume_descriptor { + __u8 foo[8]; + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2033]; +}; + +struct hs_primary_descriptor { + __u8 foo[8]; + __u8 type[1]; + __u8 id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 unused4[28]; + __u8 root_directory_record[34]; +}; + +struct isofs_options { + unsigned int rock: 1; + unsigned int joliet: 1; + unsigned int cruft: 1; + unsigned int hide: 1; + unsigned int showassoc: 1; + unsigned int nocompress: 1; + unsigned int overriderockperm: 1; + unsigned int uid_set: 1; + unsigned int gid_set: 1; + unsigned char map; + unsigned char check; + unsigned int blocksize; + umode_t fmode; + umode_t dmode; + kgid_t gid; + kuid_t uid; + char *iocharset; + s32 session; + s32 sbsector; +}; + +enum { + Opt_block = 0, + Opt_check___2 = 1, + Opt_cruft = 2, + Opt_gid___6 = 3, + Opt_ignore___2 = 4, + Opt_iocharset = 5, + Opt_map = 6, + Opt_mode___4 = 7, + Opt_nojoliet = 8, + Opt_norock = 9, + Opt_sb___2 = 10, + Opt_session = 11, + Opt_uid___5 = 12, + Opt_unhide = 13, + Opt_utf8___2 = 14, + Opt_err___5 = 15, + Opt_nocompress = 16, + Opt_hide = 17, + Opt_showassoc = 18, + Opt_dmode = 19, + Opt_overriderockperm = 20, +}; + +struct isofs_iget5_callback_data { + long unsigned int block; + long unsigned int offset; +}; + +struct root_name_map { + u64 id; + const char *name; + long: 32; +}; + +struct msgbuf; + +struct ipc_kludge { + struct msgbuf *msgp; + long int msgtyp; +}; + +struct xxhash64_tfm_ctx { + u64 seed; +}; + +struct xxhash64_desc_ctx { + struct xxh64_state xxhstate; +}; + +enum { + REQ_FSEQ_PREFLUSH = 1, + REQ_FSEQ_DATA = 2, + REQ_FSEQ_POSTFLUSH = 4, + REQ_FSEQ_DONE = 8, + REQ_FSEQ_ACTIONS = 7, + FLUSH_PENDING_TIMEOUT = 5000, +}; + +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK = 1, +}; + +typedef enum { + trustInput = 0, + checkMaxSymbolValue = 1, +} HIST_checkInput_e; + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1, +} ZSTD_defaultPolicy_e; + +typedef struct { + S16 norm[53]; + U32 wksp[285]; +} ZSTD_BuildCTableWksp; + +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + union { + void *userptr; + struct file *file; + void *data; + } u; + void (*splice_eof)(struct splice_desc *); + long: 32; + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; + long: 32; +}; + +typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); + +struct memdev { + const char *name; + const struct file_operations *fops; + fmode_t fmode; + umode_t mode; +}; + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); + +typedef struct { + spinlock_t *lock; +} class_spinlock_irq_t; + +struct class_info { + int class; + char *class_name; +}; + +struct input_dev_poller { + void (*poll)(struct input_dev *); + unsigned int poll_interval; + unsigned int poll_interval_max; + unsigned int poll_interval_min; + struct input_dev *input; + struct delayed_work work; +}; + +struct ptp_clock_caps { + int max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int pps; + int n_pins; + int cross_timestamping; + int adjust_phase; + int max_phase_adj; + int rsv[11]; +}; + +struct ptp_sys_offset { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[51]; +}; + +struct ptp_sys_offset_extended { + unsigned int n_samples; + __kernel_clockid_t clockid; + unsigned int rsv[2]; + struct ptp_clock_time ts[75]; +}; + +struct ptp_sys_offset_precise { + struct ptp_clock_time device; + struct ptp_clock_time sys_realtime; + struct ptp_clock_time sys_monoraw; + unsigned int rsv[4]; +}; + +struct amba_cs_uci_id { + unsigned int devarch; + unsigned int devarch_mask; + unsigned int devtype; + void *data; +}; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + struct device_dma_parameters dma_parms; + unsigned int periphid; + struct mutex periphid_lock; + unsigned int cid; + struct amba_cs_uci_id uci; + unsigned int irq[9]; + const char *driver_override; +}; + +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID = 0, + NL_ATTR_TYPE_FLAG = 1, + NL_ATTR_TYPE_U8 = 2, + NL_ATTR_TYPE_U16 = 3, + NL_ATTR_TYPE_U32 = 4, + NL_ATTR_TYPE_U64 = 5, + NL_ATTR_TYPE_S8 = 6, + NL_ATTR_TYPE_S16 = 7, + NL_ATTR_TYPE_S32 = 8, + NL_ATTR_TYPE_S64 = 9, + NL_ATTR_TYPE_BINARY = 10, + NL_ATTR_TYPE_STRING = 11, + NL_ATTR_TYPE_NUL_STRING = 12, + NL_ATTR_TYPE_NESTED = 13, + NL_ATTR_TYPE_NESTED_ARRAY = 14, + NL_ATTR_TYPE_BITFIELD32 = 15, + NL_ATTR_TYPE_SINT = 16, + NL_ATTR_TYPE_UINT = 17, +}; + +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC = 0, + NL_POLICY_TYPE_ATTR_TYPE = 1, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, + NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, + NL_POLICY_TYPE_ATTR_PAD = 11, + NL_POLICY_TYPE_ATTR_MASK = 12, + __NL_POLICY_TYPE_ATTR_MAX = 13, + NL_POLICY_TYPE_ATTR_MAX = 12, +}; + +struct netlink_policy_dump_state { + unsigned int policy_idx; + unsigned int attr_idx; + unsigned int n_alloc; + struct { + const struct nla_policy *policy; + unsigned int maxtype; + } policies[0]; +}; + +enum { + ETHTOOL_A_PROFILE_UNSPEC = 0, + ETHTOOL_A_PROFILE_IRQ_MODERATION = 1, + __ETHTOOL_A_PROFILE_CNT = 2, + ETHTOOL_A_PROFILE_MAX = 1, +}; + +enum { + ETHTOOL_A_IRQ_MODERATION_UNSPEC = 0, + ETHTOOL_A_IRQ_MODERATION_USEC = 1, + ETHTOOL_A_IRQ_MODERATION_PKTS = 2, + ETHTOOL_A_IRQ_MODERATION_COMPS = 3, + __ETHTOOL_A_IRQ_MODERATION_CNT = 4, + ETHTOOL_A_IRQ_MODERATION_MAX = 3, +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + struct kernel_ethtool_coalesce kernel_coalesce; + u32 supported_params; +}; + +struct nf_conntrack_nat_helper { + struct list_head list; + char mod_name[16]; + struct module *module; +}; + +enum nft_range_ops { + NFT_RANGE_EQ = 0, + NFT_RANGE_NEQ = 1, +}; + +enum nft_range_attributes { + NFTA_RANGE_UNSPEC = 0, + NFTA_RANGE_SREG = 1, + NFTA_RANGE_OP = 2, + NFTA_RANGE_FROM_DATA = 3, + NFTA_RANGE_TO_DATA = 4, + __NFTA_RANGE_MAX = 5, +}; + +struct nft_range_expr { + struct nft_data data_from; + struct nft_data data_to; + u8 sreg; + u8 len; + enum nft_range_ops op: 8; + long: 32; +}; + +struct nf_nat_range { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; +}; + +struct udp_dev_scratch { + u32 _tsize_state; +}; + +struct udp_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct bpf_iter__udp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct udp_sock *udp_sk; + }; + uid_t uid; + long: 32; + int bucket; + long: 32; +}; + +struct bpf_udp_iter_state { + struct udp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + int offset; + struct sock **batch; + bool st_bucket_done; +}; + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + short unsigned int nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + short unsigned int nduseropt_pad2; + unsigned int nduseropt_pad3; +}; + +enum { + NDUSEROPT_UNSPEC = 0, + NDUSEROPT_SRCADDR = 1, + __NDUSEROPT_MAX = 2, +}; + +struct rs_msg { + struct icmp6hdr icmph; + __u8 opt[0]; +}; + +struct ra_msg { + struct icmp6hdr icmph; + __be32 reachable_time; + __be32 retrans_timer; +}; + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +enum { + FDB_NOTIFY_BIT = 1, + FDB_NOTIFY_INACTIVE_BIT = 2, +}; + +enum { + NFEA_UNSPEC = 0, + NFEA_ACTIVITY_NOTIFY = 1, + NFEA_DONT_REFRESH = 2, + __NFEA_MAX = 3, +}; + +struct __fdb_entry { + __u8 mac_addr[6]; + __u8 port_no; + __u8 is_local; + __u32 ageing_timer_value; + __u8 port_hi; + __u8 pad0; + __u16 unused; +}; + +struct fdt_errtabent { + const char *str; +}; + +struct ida_bitmap { + long unsigned int bitmap[32]; +}; + +enum { + KERNEL_PARAM_OPS_FL_NOARG = 1, +}; + +struct param_attribute { + struct module_attribute mattr; + const struct kernel_param *param; +}; + +struct module_param_attrs { + unsigned int num; + struct attribute_group grp; + struct param_attribute attrs[0]; +}; + +struct kmalloced_param { + struct list_head list; + char val[0]; +}; + +enum { + MEMREMAP_WB = 1, + MEMREMAP_WT = 2, + MEMREMAP_WC = 4, + MEMREMAP_ENC = 8, + MEMREMAP_DEC = 16, +}; + +struct dma_coherent_mem { + void *virt_base; + dma_addr_t device_base; + long unsigned int pfn_base; + int size; + long unsigned int *bitmap; + spinlock_t spinlock; + bool use_dev_dma_pfn_offset; +}; + +struct trace_event_raw_rpm_internal { + struct trace_entry ent; + u32 __data_loc_name; + int flags; + int usage_count; + int disable_depth; + int runtime_auto; + int request_pending; + int irq_safe; + int child_count; + char __data[0]; +}; + +struct trace_event_raw_rpm_return_int { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int ip; + int ret; + char __data[0]; +}; + +struct trace_event_raw_rpm_status { + struct trace_entry ent; + u32 __data_loc_name; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_rpm_internal { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_return_int { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_status { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_return_int)(void *, struct device *, long unsigned int, int); + +typedef void (*btf_trace_rpm_status)(void *, struct device *, enum rpm_status); + +struct bpf_lru_node { + struct list_head list; + u16 cpu; + u8 type; + u8 ref; +}; + +struct bpf_lru_list { + struct list_head lists[3]; + unsigned int counts[2]; + struct list_head *next_inactive_rotation; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_lru_locallist { + struct list_head lists[2]; + u16 next_steal; + raw_spinlock_t lock; +}; + +struct bpf_common_lru { + struct bpf_lru_list lru_list; + struct bpf_lru_locallist *local_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); + +struct bpf_lru { + union { + struct bpf_common_lru common_lru; + struct bpf_lru_list *percpu_lru; + }; + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; + unsigned int nr_scans; + bool percpu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bucket { + struct hlist_nulls_head head; + raw_spinlock_t raw_lock; +}; + +struct htab_elem; + +struct bpf_htab { + struct bpf_map map; + struct bpf_mem_alloc ma; + struct bpf_mem_alloc pcpu_ma; + struct bucket *buckets; + void *elems; + union { + struct pcpu_freelist freelist; + struct bpf_lru lru; + }; + struct htab_elem **extra_elems; + long: 32; + struct percpu_counter pcount; + atomic_t count; + bool use_percpu_counter; + u32 n_buckets; + u32 elem_size; + u32 hashrnd; + struct lock_class_key lockdep_key; + int *map_locked[8]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct htab_elem { + union { + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct pcpu_freelist_node fnode; + struct htab_elem *batch_flink; + }; + }; + }; + union { + void *ptr_to_pptr; + struct bpf_lru_node lru_node; + }; + u32 hash; + char key[0]; +}; + +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; + u32 bucket_id; + u32 skip_elems; +}; + +struct shmem_quota_limits { + qsize_t usrquota_bhardlimit; + qsize_t usrquota_ihardlimit; + qsize_t grpquota_bhardlimit; + qsize_t grpquota_ihardlimit; +}; + +struct shmem_sb_info { + long unsigned int max_blocks; + long: 32; + struct percpu_counter used_blocks; + long unsigned int max_inodes; + long unsigned int free_ispace; + raw_spinlock_t stat_lock; + umode_t mode; + unsigned char huge; + kuid_t uid; + kgid_t gid; + bool full_inums; + bool noswap; + ino_t next_ino; + ino_t *ino_batch; + struct mempolicy *mpol; + spinlock_t shrinklist_lock; + struct list_head shrinklist; + long unsigned int shrinklist_len; + struct shmem_quota_limits qlimits; +}; + +enum sgp_type { + SGP_READ = 0, + SGP_NOALLOC = 1, + SGP_CACHE = 2, + SGP_WRITE = 3, + SGP_FALLOC = 4, +}; + +struct shmem_falloc { + wait_queue_head_t *waitq; + long unsigned int start; + long unsigned int next; + long unsigned int nr_falloced; + long unsigned int nr_unswapped; +}; + +struct shmem_options { + long long unsigned int blocks; + long long unsigned int inodes; + struct mempolicy *mpol; + kuid_t uid; + kgid_t gid; + umode_t mode; + bool full_inums; + int huge; + int seen; + bool noswap; + short unsigned int quota_types; + long: 32; + struct shmem_quota_limits qlimits; +}; + +enum shmem_param { + Opt_gid___7 = 0, + Opt_huge = 1, + Opt_mode___5 = 2, + Opt_mpol = 3, + Opt_nr_blocks = 4, + Opt_nr_inodes = 5, + Opt_size = 6, + Opt_uid___6 = 7, + Opt_inode32 = 8, + Opt_inode64 = 9, + Opt_noswap = 10, + Opt_quota___2 = 11, + Opt_usrquota___2 = 12, + Opt_grpquota___2 = 13, + Opt_usrquota_block_hardlimit = 14, + Opt_usrquota_inode_hardlimit = 15, + Opt_grpquota_block_hardlimit = 16, + Opt_grpquota_inode_hardlimit = 17, + Opt_casefold_version = 18, + Opt_casefold = 19, + Opt_strict_encoding = 20, +}; + +enum handle_to_path_flags { + HANDLE_CHECK_PERMS = 1, + HANDLE_CHECK_SUBTREE = 2, +}; + +struct handle_to_path_ctx { + struct path root; + enum handle_to_path_flags flags; + unsigned int fh_flags; +}; + +enum bio_post_read_step { + STEP_INITIAL = 0, + STEP_DECRYPT = 1, + STEP_VERITY = 2, + STEP_MAX = 3, +}; + +struct bio_post_read_ctx { + struct bio *bio; + struct work_struct work; + unsigned int cur_step; + unsigned int enabled_steps; +}; + +struct shortname_info { + unsigned char lower: 1; + unsigned char upper: 1; + unsigned char valid: 1; +}; + +struct btrfs_compr_pool { + struct shrinker *shrinker; + spinlock_t lock; + struct list_head list; + int count; + int thresh; +}; + +struct bucket_item { + u32 count; +}; + +struct heuristic_ws { + u8 *sample; + u32 sample_size; + struct bucket_item *bucket; + struct bucket_item *bucket_b; + struct list_head list; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +struct cryptomgr_param { + struct rtattr *tb[34]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } attrs[32]; + char template[128]; + struct crypto_larval *larval; + u32 otype; + u32 omask; +}; + +struct crypto_test_param { + char driver[128]; + char alg[128]; + u32 type; +}; + +enum { + ICQ_EXITED = 4, + ICQ_DESTROYED = 8, +}; + +struct show_busy_params { + struct seq_file *m; + struct blk_mq_hw_ctx *hctx; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_mkdir { + struct file *file; + int dfd; + umode_t mode; + struct filename *filename; +}; + +struct io_link { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 1, + DIM_CQ_PERIOD_NUM_MODES = 2, +}; + +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + __le16 queue_notify_data; + __le16 queue_reset; + __le16 admin_queue_index; + __le16 admin_queue_num; +}; + +struct scsi_dev_info_list { + struct list_head dev_info_list; + char vendor[8]; + char model[16]; + blist_flags_t flags; + unsigned int compatible; + long: 32; +}; + +struct scsi_dev_info_list_table { + struct list_head node; + struct list_head scsi_dev_info_list; + const char *name; + int key; +}; + +enum { + K2_FLAG_SATA_8_PORTS = 16777216, + K2_FLAG_NO_ATAPI_DMA = 33554432, + K2_FLAG_BAR_POS_3 = 67108864, + K2_SATA_TF_CMD_OFFSET = 0, + K2_SATA_TF_DATA_OFFSET = 0, + K2_SATA_TF_ERROR_OFFSET = 4, + K2_SATA_TF_NSECT_OFFSET = 8, + K2_SATA_TF_LBAL_OFFSET = 12, + K2_SATA_TF_LBAM_OFFSET = 16, + K2_SATA_TF_LBAH_OFFSET = 20, + K2_SATA_TF_DEVICE_OFFSET = 24, + K2_SATA_TF_CMDSTAT_OFFSET = 28, + K2_SATA_TF_CTL_OFFSET = 32, + K2_SATA_DMA_CMD_OFFSET = 48, + K2_SATA_SCR_STATUS_OFFSET = 64, + K2_SATA_SCR_ERROR_OFFSET = 68, + K2_SATA_SCR_CONTROL_OFFSET = 72, + K2_SATA_SICR1_OFFSET = 128, + K2_SATA_SICR2_OFFSET = 132, + K2_SATA_SIM_OFFSET = 136, + K2_SATA_PORT_OFFSET = 256, + chip_svw4 = 0, + chip_svw8 = 1, + chip_svw42 = 2, + chip_svw43 = 3, +}; + +struct e1000_opt_list { + int i; + char *str; +}; + +struct e1000_option { + enum { + enable_option = 0, + range_option = 1, + list_option = 2, + } type; + const char *name; + const char *err; + int def; + union { + struct { + int min; + int max; + } r; + struct { + int nr; + struct e1000_opt_list *p; + } l; + } arg; +}; + +struct user_threshold { + struct list_head list_node; + int temperature; + int direction; +}; + +struct trace_event_raw_bpf_trigger_tp { + struct trace_entry ent; + int nonce; + char __data[0]; +}; + +struct trace_event_raw_bpf_test_finish { + struct trace_entry ent; + int err; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trigger_tp {}; + +struct trace_event_data_offsets_bpf_test_finish {}; + +typedef void (*btf_trace_bpf_trigger_tp)(void *, int); + +typedef void (*btf_trace_bpf_test_finish)(void *, int *); + +struct bpf_test_timer { + enum { + NO_PREEMPT = 0, + NO_MIGRATE = 1, + } mode; + u32 i; + u64 time_start; + u64 time_spent; +}; + +struct xdp_page_head { + struct xdp_buff orig_ctx; + struct xdp_buff ctx; + union { + struct { + struct {} __empty_frame; + struct xdp_frame frame[0]; + }; + struct { + struct {} __empty_data; + u8 data[0]; + }; + }; +}; + +struct xdp_test_data { + struct xdp_buff *orig_ctx; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info rxq; + struct net_device *dev; + struct page_pool *pp; + struct xdp_frame **frames; + struct sk_buff **skbs; + struct xdp_mem_info mem; + u32 batch_size; + u32 frame_cnt; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; + +struct bpf_raw_tp_test_run_info { + struct bpf_prog *prog; + void *ctx; + u32 retval; +}; + +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *, unsigned int); + void (*nf_hook_drop)(struct net *); +}; + +enum nft_hook_attributes { + NFTA_HOOK_UNSPEC = 0, + NFTA_HOOK_HOOKNUM = 1, + NFTA_HOOK_PRIORITY = 2, + NFTA_HOOK_DEV = 3, + NFTA_HOOK_DEVS = 4, + __NFTA_HOOK_MAX = 5, +}; + +enum nft_table_flags { + NFT_TABLE_F_DORMANT = 1, + NFT_TABLE_F_OWNER = 2, + NFT_TABLE_F_PERSIST = 4, +}; + +enum nft_table_attributes { + NFTA_TABLE_UNSPEC = 0, + NFTA_TABLE_NAME = 1, + NFTA_TABLE_FLAGS = 2, + NFTA_TABLE_USE = 3, + NFTA_TABLE_HANDLE = 4, + NFTA_TABLE_PAD = 5, + NFTA_TABLE_USERDATA = 6, + NFTA_TABLE_OWNER = 7, + __NFTA_TABLE_MAX = 8, +}; + +enum nft_chain_attributes { + NFTA_CHAIN_UNSPEC = 0, + NFTA_CHAIN_TABLE = 1, + NFTA_CHAIN_HANDLE = 2, + NFTA_CHAIN_NAME = 3, + NFTA_CHAIN_HOOK = 4, + NFTA_CHAIN_POLICY = 5, + NFTA_CHAIN_USE = 6, + NFTA_CHAIN_TYPE = 7, + NFTA_CHAIN_COUNTERS = 8, + NFTA_CHAIN_PAD = 9, + NFTA_CHAIN_FLAGS = 10, + NFTA_CHAIN_ID = 11, + NFTA_CHAIN_USERDATA = 12, + __NFTA_CHAIN_MAX = 13, +}; + +enum nft_set_policies { + NFT_SET_POL_PERFORMANCE = 0, + NFT_SET_POL_MEMORY = 1, +}; + +enum nft_set_desc_attributes { + NFTA_SET_DESC_UNSPEC = 0, + NFTA_SET_DESC_SIZE = 1, + NFTA_SET_DESC_CONCAT = 2, + __NFTA_SET_DESC_MAX = 3, +}; + +enum nft_set_field_attributes { + NFTA_SET_FIELD_UNSPEC = 0, + NFTA_SET_FIELD_LEN = 1, + __NFTA_SET_FIELD_MAX = 2, +}; + +enum nft_set_elem_attributes { + NFTA_SET_ELEM_UNSPEC = 0, + NFTA_SET_ELEM_KEY = 1, + NFTA_SET_ELEM_DATA = 2, + NFTA_SET_ELEM_FLAGS = 3, + NFTA_SET_ELEM_TIMEOUT = 4, + NFTA_SET_ELEM_EXPIRATION = 5, + NFTA_SET_ELEM_USERDATA = 6, + NFTA_SET_ELEM_EXPR = 7, + NFTA_SET_ELEM_PAD = 8, + NFTA_SET_ELEM_OBJREF = 9, + NFTA_SET_ELEM_KEY_END = 10, + NFTA_SET_ELEM_EXPRESSIONS = 11, + __NFTA_SET_ELEM_MAX = 12, +}; + +enum nft_set_elem_list_attributes { + NFTA_SET_ELEM_LIST_UNSPEC = 0, + NFTA_SET_ELEM_LIST_TABLE = 1, + NFTA_SET_ELEM_LIST_SET = 2, + NFTA_SET_ELEM_LIST_ELEMENTS = 3, + NFTA_SET_ELEM_LIST_SET_ID = 4, + __NFTA_SET_ELEM_LIST_MAX = 5, +}; + +enum nft_data_attributes { + NFTA_DATA_UNSPEC = 0, + NFTA_DATA_VALUE = 1, + NFTA_DATA_VERDICT = 2, + __NFTA_DATA_MAX = 3, +}; + +enum nft_verdict_attributes { + NFTA_VERDICT_UNSPEC = 0, + NFTA_VERDICT_CODE = 1, + NFTA_VERDICT_CHAIN = 2, + NFTA_VERDICT_CHAIN_ID = 3, + __NFTA_VERDICT_MAX = 4, +}; + +enum nft_expr_attributes { + NFTA_EXPR_UNSPEC = 0, + NFTA_EXPR_NAME = 1, + NFTA_EXPR_DATA = 2, + __NFTA_EXPR_MAX = 3, +}; + +enum nft_gen_attributes { + NFTA_GEN_UNSPEC = 0, + NFTA_GEN_ID = 1, + NFTA_GEN_PROC_PID = 2, + NFTA_GEN_PROC_NAME = 3, + __NFTA_GEN_MAX = 4, +}; + +enum nft_object_attributes { + NFTA_OBJ_UNSPEC = 0, + NFTA_OBJ_TABLE = 1, + NFTA_OBJ_NAME = 2, + NFTA_OBJ_TYPE = 3, + NFTA_OBJ_DATA = 4, + NFTA_OBJ_USE = 5, + NFTA_OBJ_HANDLE = 6, + NFTA_OBJ_PAD = 7, + NFTA_OBJ_USERDATA = 8, + __NFTA_OBJ_MAX = 9, +}; + +enum nft_flowtable_flags { + NFT_FLOWTABLE_HW_OFFLOAD = 1, + NFT_FLOWTABLE_COUNTER = 2, + NFT_FLOWTABLE_MASK = 3, +}; + +enum nft_flowtable_attributes { + NFTA_FLOWTABLE_UNSPEC = 0, + NFTA_FLOWTABLE_TABLE = 1, + NFTA_FLOWTABLE_NAME = 2, + NFTA_FLOWTABLE_HOOK = 3, + NFTA_FLOWTABLE_USE = 4, + NFTA_FLOWTABLE_HANDLE = 5, + NFTA_FLOWTABLE_PAD = 6, + NFTA_FLOWTABLE_FLAGS = 7, + __NFTA_FLOWTABLE_MAX = 8, +}; + +enum nft_flowtable_hook_attributes { + NFTA_FLOWTABLE_HOOK_UNSPEC = 0, + NFTA_FLOWTABLE_HOOK_NUM = 1, + NFTA_FLOWTABLE_HOOK_PRIORITY = 2, + NFTA_FLOWTABLE_HOOK_DEVS = 3, + __NFTA_FLOWTABLE_HOOK_MAX = 4, +}; + +enum nft_devices_attributes { + NFTA_DEVICE_UNSPEC = 0, + NFTA_DEVICE_NAME = 1, + __NFTA_DEVICE_MAX = 2, +}; + +enum nft_data_desc_flags { + NFT_DATA_DESC_SETELEM = 1, +}; + +struct nft_userdata { + u8 len; + unsigned char data[0]; +}; + +struct nft_flowtable { + struct list_head list; + struct nft_table *table; + char *name; + int hooknum; + int ops_len; + u32 genmask: 2; + u32 use; + u64 handle; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct list_head hook_list; + struct nf_flowtable data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_trans_set { + struct nft_trans_binding nft_trans_binding; + struct list_head list_trans_newset; + struct nft_set *set; + u32 set_id; + u32 gc_int; + u64 timeout; + bool update; + bool bound; + u32 size; +}; + +struct nft_trans_table { + struct nft_trans nft_trans; + bool update; +}; + +enum nft_trans_elem_flags { + NFT_TRANS_UPD_TIMEOUT = 1, + NFT_TRANS_UPD_EXPIRATION = 2, +}; + +struct nft_elem_update { + u64 timeout; + u64 expiration; + u8 flags; + long: 32; +}; + +struct nft_trans_one_elem { + struct nft_elem_priv *priv; + struct nft_elem_update *update; +}; + +struct nft_trans_elem { + struct nft_trans nft_trans; + struct nft_set *set; + bool bound; + unsigned int nelems; + struct nft_trans_one_elem elems[0]; +}; + +struct nft_trans_obj { + struct nft_trans nft_trans; + struct nft_object *obj; + struct nft_object *newobj; + bool update; +}; + +struct nft_trans_flowtable { + struct nft_trans nft_trans; + struct nft_flowtable *flowtable; + struct list_head hook_list; + u32 flags; + bool update; +}; + +enum { + NFT_VALIDATE_SKIP = 0, + NFT_VALIDATE_NEED = 1, + NFT_VALIDATE_DO = 2, +}; + +struct nft_audit_data { + struct nft_table *table; + int entries; + int op; + struct list_head list; +}; + +struct nft_set_elem_catchall { + struct list_head list; + struct callback_head rcu; + struct nft_elem_priv *elem; +}; + +struct nft_module_request { + struct list_head list; + char module[60]; + bool done; +}; + +struct nftnl_skb_parms { + bool report; +}; + +struct nft_chain_hook { + u32 num; + s32 priority; + const struct nft_chain_type *type; + struct list_head list; +}; + +struct nft_rule_dump_ctx { + unsigned int s_idx; + char *table; + char *chain; + bool reset; +}; + +struct nft_set_dump_args { + const struct netlink_callback *cb; + struct nft_set_iter iter; + struct sk_buff *skb; + bool reset; +}; + +struct nft_set_dump_ctx { + const struct nft_set *set; + struct nft_ctx ctx; + bool reset; +}; + +struct nft_obj_dump_ctx { + unsigned int s_idx; + char *table; + u32 type; + bool reset; +}; + +struct nft_flowtable_hook { + u32 num; + int priority; + struct list_head list; +}; + +struct nft_flowtable_filter { + char *table; +}; + +struct ipfrag_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + }; + struct sk_buff *next_frag; + int frag_run_len; + int ip_defrag_offset; +}; + +enum unix_vertex_index { + UNIX_VERTEX_INDEX_MARK1 = 0, + UNIX_VERTEX_INDEX_MARK2 = 1, + UNIX_VERTEX_INDEX_START = 2, +}; + +union net_bridge_eht_addr { + __be32 ip4; + struct in6_addr ip6; +}; + +struct net_bridge_group_eht_host { + struct rb_node rb_node; + union net_bridge_eht_addr h_addr; + struct hlist_head set_entries; + unsigned int num_entries; + unsigned char filter_mode; + struct net_bridge_port_group *pg; +}; + +struct net_bridge_group_eht_set; + +struct net_bridge_group_eht_set_entry { + struct rb_node rb_node; + struct hlist_node host_list; + union net_bridge_eht_addr h_addr; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_group_eht_set *eht_set; + struct net_bridge_group_eht_host *h_parent; + struct net_bridge_mcast_gc mcast_gc; +}; + +struct net_bridge_group_eht_set { + struct rb_node rb_node; + union net_bridge_eht_addr src_addr; + struct rb_root entry_tree; + struct timer_list timer; + struct net_bridge_port_group *pg; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; +}; + +enum cu2_ops { + CU2_EXCEPTION = 0, + CU2_LWC2_OP = 1, + CU2_LDC2_OP = 2, + CU2_SWC2_OP = 3, + CU2_SDC2_OP = 4, +}; + +struct __user_cap_header_struct { + __u32 version; + int pid; +}; + +typedef struct __user_cap_header_struct *cap_user_header_t; + +struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +}; + +typedef struct __user_cap_data_struct *cap_user_data_t; + +struct async_entry { + struct list_head domain_list; + struct list_head global_list; + struct work_struct work; + async_cookie_t cookie; + async_func_t func; + void *data; + struct async_domain *domain; + long: 32; +}; + +enum rtmutex_chainwalk { + RT_MUTEX_MIN_CHAINWALK = 0, + RT_MUTEX_FULL_CHAINWALK = 1, +}; + +struct saved_cmdlines_buffer { + unsigned int map_pid_to_cmdline[32769]; + unsigned int *map_cmdline_to_pid; + unsigned int cmdline_num; + int cmdline_idx; + char saved_cmdlines[0]; +}; + +struct bpf_lpm_trie_key_hdr { + __u32 prefixlen; +}; + +struct bpf_lpm_trie_key_u8 { + union { + struct bpf_lpm_trie_key_hdr hdr; + __u32 prefixlen; + }; + __u8 data[0]; +}; + +struct lpm_trie_node { + struct callback_head rcu; + struct lpm_trie_node *child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node *root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + spinlock_t lock; + long: 32; +}; + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context = 1, + perf_nr_task_contexts = 2, +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1, + PERF_SAMPLE_BRANCH_KERNEL = 2, + PERF_SAMPLE_BRANCH_HV = 4, + PERF_SAMPLE_BRANCH_ANY = 8, + PERF_SAMPLE_BRANCH_ANY_CALL = 16, + PERF_SAMPLE_BRANCH_ANY_RETURN = 32, + PERF_SAMPLE_BRANCH_IND_CALL = 64, + PERF_SAMPLE_BRANCH_ABORT_TX = 128, + PERF_SAMPLE_BRANCH_IN_TX = 256, + PERF_SAMPLE_BRANCH_NO_TX = 512, + PERF_SAMPLE_BRANCH_COND = 1024, + PERF_SAMPLE_BRANCH_CALL_STACK = 2048, + PERF_SAMPLE_BRANCH_IND_JUMP = 4096, + PERF_SAMPLE_BRANCH_CALL = 8192, + PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, + PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, + PERF_SAMPLE_BRANCH_HW_INDEX = 131072, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 262144, + PERF_SAMPLE_BRANCH_COUNTERS = 524288, + PERF_SAMPLE_BRANCH_MAX = 1048576, +}; + +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_ID = 4, + PERF_FORMAT_GROUP = 8, + PERF_FORMAT_LOST = 16, + PERF_FORMAT_MAX = 32, +}; + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1, +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + NR_NAMESPACES = 7, +}; + +enum perf_pmu_scope { + PERF_PMU_SCOPE_NONE = 0, + PERF_PMU_SCOPE_CORE = 1, + PERF_PMU_SCOPE_DIE = 2, + PERF_PMU_SCOPE_CLUSTER = 3, + PERF_PMU_SCOPE_PKG = 4, + PERF_PMU_SCOPE_SYS_WIDE = 5, + PERF_PMU_MAX_SCOPE = 6, +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START = 1, + PERF_ADDR_FILTER_ACTION_FILTER = 2, +}; + +struct perf_addr_filter { + struct list_head entry; + struct path path; + long unsigned int offset; + long unsigned int size; + enum perf_addr_filter_action_t action; +}; + +struct swevent_hlist { + struct hlist_head heads[256]; + struct callback_head callback_head; +}; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int online; + struct perf_cgroup *cgrp; + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; + long: 32; +}; + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; + long: 32; +}; + +struct min_heap_char { + int nr; + int size; + char *data; + char preallocated[0]; +}; + +typedef struct min_heap_char min_heap_char; + +struct min_heap_callbacks { + bool (*less)(const void *, const void *, void *); + void (*swp)(void *, void *, void *); +}; + +typedef int (*remote_function_f)(void *); + +struct remote_function_call { + struct task_struct *p; + remote_function_f func; + void *info; + int ret; +}; + +enum event_type_t { + EVENT_FLEXIBLE = 1, + EVENT_PINNED = 2, + EVENT_TIME = 4, + EVENT_FROZEN = 8, + EVENT_CPU = 16, + EVENT_CGROUP = 32, + EVENT_ALL = 3, + EVENT_TIME_FROZEN = 12, +}; + +typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); + +struct event_function_struct { + struct perf_event *event; + event_f func; + void *data; +}; + +struct __group_key { + int cpu; + struct pmu *pmu; + struct cgroup *cgroup; +}; + +struct stop_event_data { + struct perf_event *event; + unsigned int restart; +}; + +struct perf_event_min_heap { + int nr; + int size; + struct perf_event **data; + struct perf_event *preallocated[0]; +}; + +struct perf_read_data { + struct perf_event *event; + bool group; + int ret; +}; + +struct perf_read_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +typedef void perf_iterate_f(struct perf_event *, void *); + +struct remote_output { + struct perf_buffer *rb; + int err; +}; + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + struct { + struct perf_event_header header; + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + } event_id; +}; + +struct perf_namespaces_event { + struct task_struct *task; + long: 32; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 nr_namespaces; + struct perf_ns_link_info link_info[7]; + } event_id; +}; + +struct perf_cgroup_event { + char *path; + int path_size; + struct { + struct perf_event_header header; + u64 id; + char path[0]; + } event_id; +}; + +struct perf_mmap_event { + struct vm_area_struct *vma; + const char *file_name; + int file_size; + int maj; + int min; + long: 32; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + u8 build_id[20]; + u32 build_id_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +struct perf_switch_event { + struct task_struct *task; + struct task_struct *next_prev; + struct { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; + } event_id; +}; + +struct perf_ksymbol_event { + const char *name; + int name_len; + struct { + struct perf_event_header header; + u64 addr; + u32 len; + u16 ksym_type; + u16 flags; + } event_id; +}; + +struct perf_bpf_event { + struct bpf_prog *prog; + struct { + struct perf_event_header header; + u16 type; + u16 flags; + u32 id; + u8 tag[8]; + } event_id; +}; + +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + struct { + struct perf_event_header header; + u64 addr; + } event_id; +}; + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; +}; + +enum perf_probe_config { + PERF_PROBE_CONFIG_IS_RETPROBE = 1, + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, +}; + +enum { + IF_ACT_NONE = -1, + IF_ACT_FILTER = 0, + IF_ACT_START = 1, + IF_ACT_STOP = 2, + IF_SRC_FILE = 3, + IF_SRC_KERNEL = 4, + IF_SRC_FILEADDR = 5, + IF_SRC_KERNELADDR = 6, +}; + +enum { + IF_STATE_ACTION = 0, + IF_STATE_SOURCE = 1, + IF_STATE_END = 2, +}; + +struct perf_aux_event { + struct perf_event_header header; + u64 hw_id; +}; + +struct perf_aux_event___2 { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +struct perf_aux_event___3 { + struct perf_event_header header; + u64 offset; + u64 size; + u64 flags; +}; + +struct dnotify_struct { + struct dnotify_struct *dn_next; + __u32 dn_mask; + int dn_fd; + struct file *dn_filp; + fl_owner_t dn_owner; +}; + +struct dnotify_mark { + struct fsnotify_mark fsn_mark; + struct dnotify_struct *dn; +}; + +struct iomap_swapfile_info { + struct iomap iomap; + struct swap_info_struct *sis; + long: 32; + uint64_t lowest_ppage; + uint64_t highest_ppage; + long unsigned int nr_pages; + int nr_extents; + struct file *file; + long: 32; +}; + +struct sysctl_alias { + const char *kernel_param; + const char *sysctl_param; +}; + +struct debugfs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid___7 = 0, + Opt_gid___8 = 1, + Opt_mode___6 = 2, + Opt_source = 3, +}; + +struct workspace___3 { + z_stream strm; + char *buf; + unsigned int buf_size; + struct list_head list; + int level; +}; + +struct btrfs_free_space_info { + __le32 extent_count; + __le32 flags; +}; + +struct msgbuf { + __kernel_long_t mtype; + char mtext[1]; +}; + +struct msg; + +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; + struct msg *msg_last; + __kernel_old_time_t msg_stime; + __kernel_old_time_t msg_rtime; + __kernel_old_time_t msg_ctime; + long unsigned int msg_lcbytes; + long unsigned int msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + __kernel_ipc_pid_t msg_lspid; + __kernel_ipc_pid_t msg_lrpid; +}; + +struct msqid64_ds { + struct ipc64_perm msg_perm; + long unsigned int msg_stime_high; + long unsigned int msg_stime; + long unsigned int msg_rtime_high; + long unsigned int msg_rtime; + long unsigned int msg_ctime_high; + long unsigned int msg_ctime; + long unsigned int msg_cbytes; + long unsigned int msg_qnum; + long unsigned int msg_qbytes; + __kernel_pid_t msg_lspid; + __kernel_pid_t msg_lrpid; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + short unsigned int msgseg; +}; + +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; + time64_t q_rtime; + time64_t q_ctime; + long unsigned int q_cbytes; + long unsigned int q_qnum; + long unsigned int q_qbytes; + struct pid *q_lspid; + struct pid *q_lrpid; + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct msg_receiver { + struct list_head r_list; + struct task_struct *r_tsk; + int r_mode; + long int r_msgtype; + long int r_maxsize; + struct msg_msg *r_msg; +}; + +struct msg_sender { + struct list_head list; + struct task_struct *tsk; + size_t msgsz; +}; + +struct crypto_sig_spawn { + struct crypto_spawn base; +}; + +enum { + MILLION = 1000000, + MIN_PERIOD = 1000, + MAX_PERIOD = 1000000, + MARGIN_MIN_PCT = 10, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + INUSE_ADJ_STEP_PCT = 25, + TIMER_SLACK_PCT = 1, + WEIGHT_ONE = 65536, +}; + +enum { + VTIME_PER_SEC_SHIFT = 37ULL, + VTIME_PER_SEC = 137438953472ULL, + VTIME_PER_USEC = 137438ULL, + VTIME_PER_NSEC = 137ULL, + VRATE_MIN_PPM = 10000ULL, + VRATE_MAX_PPM = 100000000ULL, + VRATE_MIN = 1374ULL, + VRATE_CLAMP_ADJ_PCT = 4ULL, + AUTOP_CYCLE_NSEC = 10000000000ULL, +}; + +enum { + RQ_WAIT_BUSY_PCT = 5, + UNBUSY_THR_PCT = 75, + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250000, + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100000, + MAX_LAGGING_PERIODS = 10, + IOC_PAGE_SHIFT = 12, + IOC_PAGE_SIZE = 4096, + IOC_SECT_TO_PAGE_SHIFT = 3, + LCOEF_RANDIO_PAGES = 4096, +}; + +enum ioc_running { + IOC_IDLE = 0, + IOC_RUNNING = 1, + IOC_STOP = 2, +}; + +enum { + QOS_ENABLE = 0, + QOS_CTRL = 1, + NR_QOS_CTRL_PARAMS = 2, +}; + +enum { + QOS_RPPM = 0, + QOS_RLAT = 1, + QOS_WPPM = 2, + QOS_WLAT = 3, + QOS_MIN = 4, + QOS_MAX = 5, + NR_QOS_PARAMS = 6, +}; + +enum { + COST_CTRL = 0, + COST_MODEL = 1, + NR_COST_CTRL_PARAMS = 2, +}; + +enum { + I_LCOEF_RBPS = 0, + I_LCOEF_RSEQIOPS = 1, + I_LCOEF_RRANDIOPS = 2, + I_LCOEF_WBPS = 3, + I_LCOEF_WSEQIOPS = 4, + I_LCOEF_WRANDIOPS = 5, + NR_I_LCOEFS = 6, +}; + +enum { + LCOEF_RPAGE = 0, + LCOEF_RSEQIO = 1, + LCOEF_RRANDIO = 2, + LCOEF_WPAGE = 3, + LCOEF_WSEQIO = 4, + LCOEF_WRANDIO = 5, + NR_LCOEFS = 6, +}; + +enum { + AUTOP_INVALID = 0, + AUTOP_HDD = 1, + AUTOP_SSD_QD1 = 2, + AUTOP_SSD_DFL = 3, + AUTOP_SSD_FAST = 4, +}; + +struct ioc_params { + u32 qos[6]; + u64 i_lcoefs[6]; + u64 lcoefs[6]; + u32 too_fast_vrate_pct; + u32 too_slow_vrate_pct; +}; + +struct ioc_margins { + s64 min; + s64 low; + s64 target; +}; + +struct ioc_missed { + local_t nr_met; + local_t nr_missed; + u32 last_met; + u32 last_missed; +}; + +struct ioc_pcpu_stat { + struct ioc_missed missed[2]; + local64_t rq_wait_ns; + u64 last_rq_wait_ns; +}; + +struct ioc { + struct rq_qos rqos; + bool enabled; + struct ioc_params params; + struct ioc_margins margins; + u32 period_us; + u32 timer_slack_ns; + u64 vrate_min; + u64 vrate_max; + spinlock_t lock; + struct timer_list timer; + struct list_head active_iocgs; + struct ioc_pcpu_stat *pcpu_stat; + enum ioc_running running; + atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; + seqcount_spinlock_t period_seqcount; + long: 32; + u64 period_at; + u64 period_at_vtime; + atomic64_t cur_period; + int busy_level; + bool weights_updated; + atomic_t hweight_gen; + long: 32; + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; + u64 autop_too_fast_at; + u64 autop_too_slow_at; + int autop_idx; + bool user_qos_params: 1; + bool user_cost_model: 1; +}; + +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct iocg_stat { + u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; +}; + +struct ioc_gq { + struct blkg_policy_data pd; + struct ioc *ioc; + u32 cfg_weight; + u32 weight; + u32 active; + u32 inuse; + u32 last_inuse; + long: 32; + s64 saved_margin; + sector_t cursor; + atomic64_t vtime; + atomic64_t done_vtime; + u64 abs_vdebt; + u64 delay; + u64 delay_at; + atomic64_t active_period; + struct list_head active_list; + u64 child_active_sum; + u64 child_inuse_sum; + u64 child_adjusted_sum; + int hweight_gen; + u32 hweight_active; + u32 hweight_inuse; + u32 hweight_donating; + u32 hweight_after_donation; + struct list_head walk_list; + struct list_head surplus_list; + struct wait_queue_head waitq; + struct hrtimer waitq_timer; + u64 activated_at; + struct iocg_pcpu_stat *pcpu_stat; + long: 32; + struct iocg_stat stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; + int level; + struct ioc_gq *ancestors[0]; + long: 32; +}; + +struct ioc_cgrp { + struct blkcg_policy_data cpd; + unsigned int dfl_weight; +}; + +struct ioc_now { + u64 now_ns; + u64 now; + u64 vnow; +}; + +struct iocg_wait { + struct wait_queue_entry wait; + struct bio *bio; + u64 abs_cost; + bool committed; + long: 32; +}; + +struct iocg_wake_ctx { + struct ioc_gq *iocg; + u32 hw_inuse; + s64 vbudget; +}; + +struct trace_event_raw_iocost_iocg_state { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u64 vrate; + u64 last_period; + u64 cur_period; + u64 vtime; + u32 weight; + u32 inuse; + u64 hweight_active; + u64 hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocg_inuse_update { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u32 old_inuse; + u32 new_inuse; + u64 old_hweight_inuse; + u64 new_hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocost_ioc_vrate_adj { + struct trace_entry ent; + u32 __data_loc_devname; + long: 32; + u64 old_vrate; + u64 new_vrate; + int busy_level; + u32 read_missed_ppm; + u32 write_missed_ppm; + u32 rq_wait_pct; + int nr_lagging; + int nr_shortages; + char __data[0]; +}; + +struct trace_event_raw_iocost_iocg_forgive_debt { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u32 usage_pct; + long: 32; + u64 old_debt; + u64 new_debt; + u64 old_delay; + u64 new_delay; + char __data[0]; +}; + +struct trace_event_data_offsets_iocost_iocg_state { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocg_inuse_update { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocost_ioc_vrate_adj { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_iocost_iocg_forgive_debt { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_iocg_idle)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); + +typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u64, u64, u64, u64); + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct usb_qualifier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __u8 bNumConfigurations; + __u8 bRESERVED; +}; + +struct usb_set_sel_req { + __u8 u1_sel; + __u8 u1_pel; + __le16 u2_sel; + __le16 u2_pel; +}; + +struct usbdevfs_hub_portinfo { + char nports; + char port[127]; +}; + +enum hub_led_mode { + INDICATOR_AUTO = 0, + INDICATOR_CYCLE = 1, + INDICATOR_GREEN_BLINK = 2, + INDICATOR_GREEN_BLINK_OFF = 3, + INDICATOR_AMBER_BLINK = 4, + INDICATOR_AMBER_BLINK_OFF = 5, + INDICATOR_ALT_BLINK = 6, + INDICATOR_ALT_BLINK_OFF = 7, +} __attribute__((mode(byte))); + +struct usb_tt_clear { + struct list_head clear_list; + unsigned int tt; + u16 devinfo; + struct usb_hcd *hcd; + struct usb_host_endpoint *ep; +}; + +enum hub_activation_type { + HUB_INIT = 0, + HUB_INIT2 = 1, + HUB_INIT3 = 2, + HUB_POST_RESET = 3, + HUB_RESUME = 4, + HUB_RESET_RESUME = 5, +}; + +enum hub_quiescing_type { + HUB_DISCONNECT = 0, + HUB_PRE_RESET = 1, + HUB_SUSPEND = 2, +}; + +struct atkbd { + struct ps2dev ps2dev; + struct input_dev *dev; + char name[64]; + char phys[32]; + short unsigned int id; + short unsigned int keycode[512]; + long unsigned int force_release_mask[16]; + unsigned char set; + bool translated; + bool extra; + bool write; + bool softrepeat; + bool softraw; + bool scroll; + bool enabled; + unsigned char emul; + bool resend; + bool release; + long unsigned int xl_bit; + unsigned int last; + long unsigned int time; + long unsigned int err_count; + struct delayed_work event_work; + long unsigned int event_jiffies; + long unsigned int event_mask; + struct mutex mutex; + struct vivaldi_data vdata; +}; + +enum sony_state { + STATE_INACTIVE___7 = 0, + STATE_HEADER_SPACE___3 = 1, + STATE_BIT_PULSE___3 = 2, + STATE_BIT_SPACE___3 = 3, + STATE_FINISHED___4 = 4, +}; + +typedef __u16 bitmap_counter_t; + +enum bitmap_state { + BITMAP_STALE = 1, + BITMAP_WRITE_ERROR = 2, + BITMAP_HOSTENDIAN = 15, +}; + +struct bitmap_super_s { + __le32 magic; + __le32 version; + __u8 uuid[16]; + __le64 events; + __le64 events_cleared; + __le64 sync_size; + __le32 state; + __le32 chunksize; + __le32 daemon_sleep; + __le32 write_behind; + __le32 sectors_reserved; + __le32 nodes; + __u8 cluster_name[64]; + __u8 pad[120]; +}; + +typedef struct bitmap_super_s bitmap_super_t; + +struct bitmap_page { + char *map; + unsigned int hijacked: 1; + unsigned int pending: 1; + unsigned int count: 30; +}; + +struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + long unsigned int pages; + long unsigned int missing_pages; + long unsigned int chunkshift; + long unsigned int chunks; +}; + +struct bitmap_storage { + struct file *file; + struct page *sb_page; + long unsigned int sb_index; + struct page **filemap; + long unsigned int *filemap_attr; + long unsigned int file_pages; + long unsigned int bytes; +}; + +struct bitmap { + struct bitmap_counts counts; + struct mddev *mddev; + long: 32; + __u64 events_cleared; + int need_sync; + struct bitmap_storage storage; + long unsigned int flags; + int allclean; + atomic_t behind_writes; + long unsigned int behind_writes_used; + long unsigned int daemon_lastrun; + long unsigned int last_end_sync; + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + struct kernfs_node *sysfs_can_clear; + int cluster_slot; +}; + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, + BITMAP_PAGE_PENDING = 1, + BITMAP_PAGE_NEEDWRITE = 2, +}; + +struct bitmap_unplug_work { + struct work_struct work; + struct bitmap *bitmap; + struct completion *done; +}; + +enum { + IF_LINK_MODE_DEFAULT = 0, + IF_LINK_MODE_DORMANT = 1, + IF_LINK_MODE_TESTING = 2, +}; + +enum lw_bits { + LW_URGENT = 0, +}; + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +struct ct_iter_state { + struct seq_net_private p; + struct hlist_nulls_head *hash; + unsigned int htable_size; + unsigned int bucket; + u_int64_t time_now; +}; + +enum nf_ct_sysctl_index { + NF_SYSCTL_CT_MAX = 0, + NF_SYSCTL_CT_COUNT = 1, + NF_SYSCTL_CT_BUCKETS = 2, + NF_SYSCTL_CT_CHECKSUM = 3, + NF_SYSCTL_CT_LOG_INVALID = 4, + NF_SYSCTL_CT_EXPECT_MAX = 5, + NF_SYSCTL_CT_ACCT = 6, + NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC = 7, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT = 8, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV = 9, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED = 10, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT = 11, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT = 12, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK = 13, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT = 14, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE = 15, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS = 16, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK = 17, + NF_SYSCTL_CT_PROTO_TCP_LOOSE = 18, + NF_SYSCTL_CT_PROTO_TCP_LIBERAL = 19, + NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST = 20, + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS = 21, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP = 22, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM = 23, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP = 24, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6 = 25, + NF_SYSCTL_CT_LAST_SYSCTL = 26, +}; + +enum nft_meta_attributes { + NFTA_META_UNSPEC = 0, + NFTA_META_DREG = 1, + NFTA_META_KEY = 2, + NFTA_META_SREG = 3, + __NFTA_META_MAX = 4, +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; + struct fib_nh *fib_nh; +}; + +enum mm_32b_func { + mm_lwc2_func = 0, + mm_lwp_func = 1, + mm_ldc2_func = 2, + mm_ldp_func = 4, + mm_lwm32_func = 5, + mm_cache_func = 6, + mm_ldm_func = 7, + mm_swc2_func = 8, + mm_swp_func = 9, + mm_sdc2_func = 10, + mm_sdp_func = 12, + mm_swm32_func = 13, + mm_sdm_func = 15, +}; + +enum mm_32c_func { + mm_pref_func = 2, + mm_ll_func = 3, + mm_swr_func = 9, + mm_sc_func = 11, + mm_lwu_func = 14, +}; + +enum mm_32f_func { + mm_lwxc1_func = 72, + mm_swxc1_func = 136, + mm_ldxc1_func = 200, + mm_sdxc1_func = 264, +}; + +enum MIPS16e_i64_func { + MIPS16e_ldsp_func = 0, + MIPS16e_sdsp_func = 1, + MIPS16e_sdrasp_func = 2, + MIPS16e_dadjsp_func = 3, + MIPS16e_ldpc_func = 4, +}; + +enum MIPS6e_i8_func { + MIPS16e_swrasp_func = 2, +}; + +enum { + UNALIGNED_ACTION_QUIET = 0, + UNALIGNED_ACTION_SIGNAL = 1, + UNALIGNED_ACTION_SHOW = 2, +}; + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +struct dma_map_ops { + void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); + void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); + struct page * (*alloc_pages_op)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); + void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); + int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); + int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); + dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); + int (*dma_supported)(struct device *, u64); + u64 (*get_required_mask)(struct device *); + size_t (*max_mapping_size)(struct device *); + size_t (*opt_mapping_size)(); + long unsigned int (*get_merge_boundary)(struct device *); +}; + +struct trace_event_raw_dma_map { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 phys_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_unmap { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_alloc_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + gfp_t flags; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_alloc_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + gfp_t flags; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_free_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_free_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg_err { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + int err; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_unmap_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_addrs; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_single { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_map { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_free_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_free_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg_err { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap_sg { + u32 device; + const void *device_ptr_; + u32 addrs; + const void *addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_single { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_sg { + u32 device; + const void *device_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +typedef void (*btf_trace_dma_map_page)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_resource)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_page)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_resource)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_alloc)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt_err)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_free)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_map_sg)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_sg_err)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_sg)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_sync_single_for_cpu)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_single_for_device)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_cpu)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_device)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + long unsigned int attrs; +}; + +struct trace_event_raw_error_report_template { + struct trace_entry ent; + enum error_detector error_detector; + long unsigned int id; + char __data[0]; +}; + +struct trace_event_data_offsets_error_report_template {}; + +typedef void (*btf_trace_error_report_end)(void *, enum error_detector, long unsigned int); + +struct bpf_iter_seq_link_info { + u32 link_id; +}; + +struct bpf_iter__bpf_link { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_link *link; + }; +}; + +struct bpf_cpumask { + cpumask_t cpumask; + refcount_t usage; +}; + +struct contig_page_info { + long unsigned int free_pages; + long unsigned int free_blocks_total; + long unsigned int free_blocks_suitable; +}; + +struct prepend_buffer { + char *buf; + int len; +}; + +struct timerfd_ctx { + union { + struct hrtimer tmr; + struct alarm alarm; + } t; + ktime_t tintv; + ktime_t moffs; + wait_queue_head_t wqh; + long: 32; + u64 ticks; + int clockid; + short unsigned int expired; + short unsigned int settime_flags; + struct callback_head rcu; + struct list_head clist; + spinlock_t cancel_lock; + bool might_cancel; +}; + +struct btrfs_csum_item { + __u8 csum; +}; + +struct btrfs_stripe_hash { + struct list_head hash_list; + spinlock_t lock; +}; + +struct btrfs_stripe_hash_table { + struct list_head stripe_cache; + spinlock_t cache_lock; + int cache_size; + struct btrfs_stripe_hash table[0]; +}; + +struct sector_ptr { + struct page *page; + unsigned int pgoff: 24; + unsigned int uptodate: 8; +}; + +struct btrfs_plug_cb { + struct blk_plug_cb cb; + struct btrfs_fs_info *info; + struct list_head rbio_list; +}; + +struct user_key_payload { + struct callback_head rcu; + short unsigned int datalen; + long: 32; + char data[0]; +}; + +struct acomp_alg { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + int (*init)(struct crypto_acomp *); + void (*exit)(struct crypto_acomp *); + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[16]; + u32 bytes; +}; + +struct bio_map_data { + bool is_our_pages: 1; + bool is_null_mapped: 1; + long: 32; + struct iov_iter iter; + struct iovec iov[0]; +}; + +struct blk_iolatency { + struct rq_qos rqos; + struct timer_list timer; + bool enabled; + atomic_t enable_cnt; + struct work_struct enable_work; +}; + +struct iolatency_grp; + +struct child_latency_info { + spinlock_t lock; + long: 32; + u64 last_scale_event; + u64 scale_lat; + u64 nr_samples; + struct iolatency_grp *scale_grp; + atomic_t scale_cookie; +}; + +struct percentile_stats { + u64 total; + u64 missed; +}; + +struct latency_stat { + union { + struct percentile_stats ps; + struct blk_rq_stat rqs; + }; +}; + +struct iolatency_grp { + struct blkg_policy_data pd; + struct latency_stat *stats; + struct latency_stat cur_stat; + struct blk_iolatency *blkiolat; + unsigned int max_depth; + struct rq_wait rq_wait; + atomic64_t window_start; + atomic_t scale_cookie; + long: 32; + u64 min_lat_nsec; + u64 cur_win_nsec; + u64 lat_avg; + u64 nr_samples; + bool ssd; + long: 32; + struct child_latency_info child_lat; +}; + +enum io_uring_napi_op { + IO_URING_NAPI_REGISTER_OP = 0, + IO_URING_NAPI_STATIC_ADD_ID = 1, + IO_URING_NAPI_STATIC_DEL_ID = 2, +}; + +struct io_uring_napi { + __u32 busy_poll_to; + __u8 prefer_busy_poll; + __u8 opcode; + __u8 pad[2]; + __u32 op_param; + __u32 resv; +}; + +struct io_napi_entry { + unsigned int napi_id; + struct list_head list; + long unsigned int timeout; + struct hlist_node node; + struct callback_head rcu; +}; + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +typedef enum { + ZSTD_c_compressionLevel = 100, + ZSTD_c_windowLog = 101, + ZSTD_c_hashLog = 102, + ZSTD_c_chainLog = 103, + ZSTD_c_searchLog = 104, + ZSTD_c_minMatch = 105, + ZSTD_c_targetLength = 106, + ZSTD_c_strategy = 107, + ZSTD_c_enableLongDistanceMatching = 160, + ZSTD_c_ldmHashLog = 161, + ZSTD_c_ldmMinMatch = 162, + ZSTD_c_ldmBucketSizeLog = 163, + ZSTD_c_ldmHashRateLog = 164, + ZSTD_c_contentSizeFlag = 200, + ZSTD_c_checksumFlag = 201, + ZSTD_c_dictIDFlag = 202, + ZSTD_c_nbWorkers = 400, + ZSTD_c_jobSize = 401, + ZSTD_c_overlapLog = 402, + ZSTD_c_experimentalParam1 = 500, + ZSTD_c_experimentalParam2 = 10, + ZSTD_c_experimentalParam3 = 1000, + ZSTD_c_experimentalParam4 = 1001, + ZSTD_c_experimentalParam5 = 1002, + ZSTD_c_experimentalParam6 = 1003, + ZSTD_c_experimentalParam7 = 1004, + ZSTD_c_experimentalParam8 = 1005, + ZSTD_c_experimentalParam9 = 1006, + ZSTD_c_experimentalParam10 = 1007, + ZSTD_c_experimentalParam11 = 1008, + ZSTD_c_experimentalParam12 = 1009, + ZSTD_c_experimentalParam13 = 1010, + ZSTD_c_experimentalParam14 = 1011, + ZSTD_c_experimentalParam15 = 1012, +} ZSTD_cParameter; + +typedef enum { + ZSTD_e_continue = 0, + ZSTD_e_flush = 1, + ZSTD_e_end = 2, +} ZSTD_EndDirective; + +typedef struct { + long long unsigned int ingested; + long long unsigned int consumed; + long long unsigned int produced; + long long unsigned int flushed; + unsigned int currentJobID; + unsigned int nbActiveWorkers; +} ZSTD_frameProgression; + +typedef enum { + ZSTD_cpm_noAttachDict = 0, + ZSTD_cpm_attachDict = 1, + ZSTD_cpm_createCDict = 2, + ZSTD_cpm_unknown = 3, +} ZSTD_cParamMode_e; + +typedef enum { + ZSTDcrp_makeClean = 0, + ZSTDcrp_leaveDirty = 1, +} ZSTD_compResetPolicy_e; + +typedef enum { + ZSTDirp_continue = 0, + ZSTDirp_reset = 1, +} ZSTD_indexResetPolicy_e; + +typedef enum { + ZSTD_resetTarget_CDict = 0, + ZSTD_resetTarget_CCtx = 1, +} ZSTD_resetTarget_e; + +typedef struct { + U32 LLtype; + U32 Offtype; + U32 MLtype; + size_t size; + size_t lastCountSize; +} ZSTD_symbolEncodingTypeStats_t; + +enum { + ZSTDbss_compress = 0, + ZSTDbss_noCompress = 1, +}; + +typedef struct { + U32 *splitLocations; + size_t idx; +} seqStoreSplits; + +typedef struct { + U32 idx; + U32 posInSequence; + size_t posInSrc; +} ZSTD_sequencePosition; + +typedef size_t (*ZSTD_sequenceCopier)(ZSTD_CCtx *, ZSTD_sequencePosition *, const ZSTD_Sequence * const, size_t, const void *, size_t); + +struct virtio_admin_cmd { + __le16 opcode; + __le16 group_type; + long: 32; + __le64 group_member_id; + struct scatterlist *data_sg; + struct scatterlist *result_sg; + struct completion completion; + u32 result_sg_size; + int ret; +}; + +struct virtio_admin_cmd_hdr { + __le16 opcode; + __le16 group_type; + __u8 reserved1[12]; + __le64 group_member_id; +}; + +struct virtio_admin_cmd_status { + __le16 status; + __le16 status_qualifier; + __u8 reserved2[4]; +}; + +struct virtio_dev_parts_cap { + __u8 get_parts_resource_objects_limit; + __u8 set_parts_resource_objects_limit; +}; + +struct virtio_admin_cmd_query_cap_id_result { + __le64 supported_caps[1]; +}; + +struct virtio_admin_cmd_cap_get_data { + __le16 id; + __u8 reserved[6]; +}; + +struct virtio_admin_cmd_cap_set_data { + __le16 id; + __u8 reserved[6]; + __u8 cap_specific_data[0]; +}; + +struct virtio_admin_cmd_resource_obj_cmd_hdr { + __le16 type; + __u8 reserved[2]; + __le32 id; +}; + +struct virtio_admin_cmd_resource_obj_create_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __le64 flags; + __u8 resource_obj_specific_data[0]; +}; + +struct virtio_resource_obj_dev_parts { + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_admin_cmd_dev_parts_metadata_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_dev_part_hdr { + __le16 part_type; + __u8 flags; + __u8 reserved; + union { + struct { + __le32 offset; + __le32 reserved; + } pci_common_cfg; + struct { + __le16 index; + __u8 reserved[6]; + } vq_index; + } selector; + __le32 length; +}; + +struct virtio_admin_cmd_dev_parts_metadata_result { + union { + struct { + __le32 size; + __le32 reserved; + } parts_size; + struct { + __le32 count; + __le32 reserved; + } hdr_list_count; + struct { + __le32 count; + __le32 reserved; + struct virtio_dev_part_hdr hdrs[0]; + } hdr_list; + }; +}; + +struct virtio_admin_cmd_dev_parts_get_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; + struct virtio_dev_part_hdr hdr_list[0]; +}; + +struct virtio_admin_cmd_dev_mode_set_data { + __u8 flags; +}; + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +struct class_compat { + struct kobject *kobj; +}; + +typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *); + +struct net_device_devres { + struct net_device *ndev; +}; + +struct stp_proto { + unsigned char group_address[6]; + void (*rcv)(const struct stp_proto *, struct sk_buff *, struct net_device *); + void *data; +}; + +enum ethtool_supported_ring_param { + ETHTOOL_RING_USE_RX_BUF_LEN = 1, + ETHTOOL_RING_USE_CQE_SIZE = 2, + ETHTOOL_RING_USE_TX_PUSH = 4, + ETHTOOL_RING_USE_RX_PUSH = 8, + ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = 16, + ETHTOOL_RING_USE_TCP_DATA_SPLIT = 32, +}; + +enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED = 1, + ETHTOOL_TCP_DATA_SPLIT_ENABLED = 2, +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; + u32 supported_ring_params; +}; + +enum nft_rt_keys { + NFT_RT_CLASSID = 0, + NFT_RT_NEXTHOP4 = 1, + NFT_RT_NEXTHOP6 = 2, + NFT_RT_TCPMSS = 3, + NFT_RT_XFRM = 4, + __NFT_RT_MAX = 5, +}; + +enum nft_rt_attributes { + NFTA_RT_UNSPEC = 0, + NFTA_RT_DREG = 1, + NFTA_RT_KEY = 2, + __NFTA_RT_MAX = 3, +}; + +struct nft_rt { + enum nft_rt_keys key: 8; + u8 dreg; +}; + +enum { + IFLA_INET_UNSPEC = 0, + IFLA_INET_CONF = 1, + __IFLA_INET_MAX = 2, +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[33]; +}; + +struct xt_get_revision { + char name[29]; + __u8 revision; +}; + +struct ipt_getinfo { + char name[32]; + unsigned int valid_hooks; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_entries; + unsigned int size; +}; + +struct ipt_get_entries { + char name[32]; + unsigned int size; + long: 32; + struct ipt_entry entrytable[0]; +}; + +struct ipt_error { + struct ipt_entry entry; + struct xt_error_target target; +}; + +struct module_sect_attr { + struct bin_attribute battr; + long unsigned int address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[0]; +}; + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[0]; +}; + +struct trace_event_raw_csd_queue_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_raw_csd_function { + struct trace_entry ent; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_data_offsets_csd_queue_cpu {}; + +struct trace_event_data_offsets_csd_function {}; + +typedef void (*btf_trace_csd_queue_cpu)(void *, const unsigned int, long unsigned int, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_entry)(void *, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_exit)(void *, smp_call_func_t, call_single_data_t *); + +struct call_function_data { + call_single_data_t *csd; + cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; +}; + +struct smp_call_on_cpu_struct { + struct work_struct work; + struct completion done; + int (*func)(void *); + void *data; + int ret; + int cpu; +}; + +struct kprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int ip; +}; + +struct kretprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int func; + long unsigned int ret_ip; +}; + +struct trace_kprobe { + struct dyn_event devent; + struct kretprobe rp; + long unsigned int *nhit; + const char *symbol; + struct trace_probe tp; +}; + +struct sym_count_ctx { + unsigned int count; + const char *name; +}; + +struct bpf_crypto_type_list { + const struct bpf_crypto_type *type; + struct list_head list; +}; + +struct bpf_crypto_params { + char type[14]; + u8 reserved[2]; + char algo[128]; + u8 key[256]; + u32 key_len; + u32 authsize; +}; + +struct bpf_crypto_ctx { + const struct bpf_crypto_type *type; + void *tfm; + u32 siv_len; + struct callback_head rcu; + refcount_t usage; +}; + +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, + FSCONFIG_SET_STRING = 1, + FSCONFIG_SET_BINARY = 2, + FSCONFIG_SET_PATH = 3, + FSCONFIG_SET_PATH_EMPTY = 4, + FSCONFIG_SET_FD = 5, + FSCONFIG_CMD_CREATE = 6, + FSCONFIG_CMD_RECONFIGURE = 7, + FSCONFIG_CMD_CREATE_EXCL = 8, +}; + +enum { + MBE_REFERENCED_B = 0, + MBE_REUSABLE_B = 1, +}; + +struct mb_cache_entry { + struct list_head e_list; + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + u32 e_key; + long unsigned int e_flags; + long: 32; + u64 e_value; +}; + +struct mb_cache { + struct hlist_bl_head *c_hash; + int c_bucket_bits; + long unsigned int c_max_entries; + spinlock_t c_list_lock; + struct list_head c_list; + long unsigned int c_entry_count; + struct shrinker *c_shrink; + struct work_struct c_shrink_work; +}; + +struct fd_data { + fmode_t mode; + unsigned int fd; +}; + +typedef struct { + __le32 *p; + __le32 key; + struct buffer_head *bh; +} Indirect; + +struct jbd2_revoke_table_s { + int hash_size; + int hash_shift; + struct list_head *hash_table; +}; + +struct jbd2_revoke_record_s { + struct list_head hash; + tid_t sequence; + long: 32; + long long unsigned int blocknr; +}; + +struct fuse_dirent { + uint64_t ino; + uint64_t off; + uint32_t namelen; + uint32_t type; + char name[0]; +}; + +struct fuse_direntplus { + struct fuse_entry_out entry_out; + struct fuse_dirent dirent; +}; + +enum fuse_parse_result { + FOUND_ERR = -1, + FOUND_NONE = 0, + FOUND_SOME = 1, + FOUND_ALL = 2, +}; + +struct reserve_ticket { + u64 bytes; + int error; + bool steal; + struct list_head list; + wait_queue_head_t wait; + long: 32; +}; + +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + void *__ctx[0]; +}; + +struct crypto_kpp { + unsigned int reqsize; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_tfm base; +}; + +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); + int (*generate_public_key)(struct kpp_request *); + int (*compute_shared_secret)(struct kpp_request *); + unsigned int (*max_size)(struct crypto_kpp *); + int (*init)(struct crypto_kpp *); + void (*exit)(struct crypto_kpp *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct crypto_alg base; +}; + +struct kpp_instance { + void (*free)(struct kpp_instance *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct kpp_alg alg; + }; +}; + +struct crypto_kpp_spawn { + struct crypto_spawn base; +}; + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC = 0, + CRYPTO_AUTHENC_KEYA_PARAM = 1, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct authenc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct crypto_authenc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct bsg_device { + struct request_queue *queue; + long: 32; + struct device device; + struct cdev cdev; + int max_queue; + unsigned int timeout; + unsigned int reserved_size; + bsg_sg_io_fn *sg_io_fn; + long: 32; +}; + +struct io_uring_sync_cancel_reg { + __u64 addr; + __s32 fd; + __u32 flags; + struct __kernel_timespec timeout; + __u8 opcode; + __u8 pad[7]; + __u64 pad2[3]; +}; + +struct io_cancel { + struct file *file; + long: 32; + u64 addr; + u32 flags; + s32 fd; + u8 opcode; + long: 32; +}; + +struct clk_mux { + struct clk_hw hw; + void *reg; + const u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +struct console_font_op { + unsigned int op; + unsigned int flags; + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct con_driver { + const struct consw *con; + const char *desc; + struct device *dev; + int node; + int first; + int last; + int flag; +}; + +enum { + blank_off = 0, + blank_normal_wait = 1, + blank_vesa_wait = 2, +}; + +enum { + EPecma = 0, + EPdec = 1, + EPeq = 2, + EPgt = 3, + EPlt = 4, +}; + +enum CSI_J { + CSI_J_CURSOR_TO_END = 0, + CSI_J_START_TO_CURSOR = 1, + CSI_J_VISIBLE = 2, + CSI_J_FULL = 3, +}; + +enum { + CSI_K_CURSOR_TO_LINEEND = 0, + CSI_K_LINESTART_TO_CURSOR = 1, + CSI_K_LINE = 2, +}; + +struct rgb { + u8 r; + u8 g; + u8 b; +}; + +enum { + CSI_m_DEFAULT = 0, + CSI_m_BOLD = 1, + CSI_m_HALF_BRIGHT = 2, + CSI_m_ITALIC = 3, + CSI_m_UNDERLINE = 4, + CSI_m_BLINK = 5, + CSI_m_REVERSE = 7, + CSI_m_PRI_FONT = 10, + CSI_m_ALT_FONT1 = 11, + CSI_m_ALT_FONT2 = 12, + CSI_m_DOUBLE_UNDERLINE = 21, + CSI_m_NORMAL_INTENSITY = 22, + CSI_m_NO_ITALIC = 23, + CSI_m_NO_UNDERLINE = 24, + CSI_m_NO_BLINK = 25, + CSI_m_NO_REVERSE = 27, + CSI_m_FG_COLOR_BEG = 30, + CSI_m_FG_COLOR_END = 37, + CSI_m_FG_COLOR = 38, + CSI_m_DEFAULT_FG_COLOR = 39, + CSI_m_BG_COLOR_BEG = 40, + CSI_m_BG_COLOR_END = 47, + CSI_m_BG_COLOR = 48, + CSI_m_DEFAULT_BG_COLOR = 49, + CSI_m_BRIGHT_FG_COLOR_BEG = 90, + CSI_m_BRIGHT_FG_COLOR_END = 97, + CSI_m_BRIGHT_FG_COLOR_OFF = 60, + CSI_m_BRIGHT_BG_COLOR_BEG = 100, + CSI_m_BRIGHT_BG_COLOR_END = 107, + CSI_m_BRIGHT_BG_COLOR_OFF = 60, +}; + +enum { + CSI_DEC_hl_CURSOR_KEYS = 1, + CSI_DEC_hl_132_COLUMNS = 3, + CSI_DEC_hl_REVERSE_VIDEO = 5, + CSI_DEC_hl_ORIGIN_MODE = 6, + CSI_DEC_hl_AUTOWRAP = 7, + CSI_DEC_hl_AUTOREPEAT = 8, + CSI_DEC_hl_MOUSE_X10 = 9, + CSI_DEC_hl_SHOW_CURSOR = 25, + CSI_DEC_hl_MOUSE_VT200 = 1000, +}; + +enum { + CSI_hl_DISPLAY_CTRL = 3, + CSI_hl_INSERT = 4, + CSI_hl_AUTO_NL = 20, +}; + +enum CSI_right_square_bracket { + CSI_RSB_COLOR_FOR_UNDERLINE = 1, + CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2, + CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8, + CSI_RSB_BLANKING_INTERVAL = 9, + CSI_RSB_BELL_FREQUENCY = 10, + CSI_RSB_BELL_DURATION = 11, + CSI_RSB_BRING_CONSOLE_TO_FRONT = 12, + CSI_RSB_UNBLANK = 13, + CSI_RSB_VESA_OFF_INTERVAL = 14, + CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15, + CSI_RSB_CURSOR_BLINK_INTERVAL = 16, +}; + +enum vc_ctl_state { + ESnormal = 0, + ESesc = 1, + ESsquare = 2, + ESgetpars = 3, + ESfunckey = 4, + EShash = 5, + ESsetG0 = 6, + ESsetG1 = 7, + ESpercent = 8, + EScsiignore = 9, + ESnonstd = 10, + ESpalette = 11, + ESosc = 12, + ESANSI_first = 12, + ESapc = 13, + ESpm = 14, + ESdcs = 15, + ESANSI_last = 15, +}; + +enum { + ASCII_NULL = 0, + ASCII_BELL = 7, + ASCII_BACKSPACE = 8, + ASCII_IGNORE_FIRST = 8, + ASCII_HTAB = 9, + ASCII_LINEFEED = 10, + ASCII_VTAB = 11, + ASCII_FORMFEED = 12, + ASCII_CAR_RET = 13, + ASCII_IGNORE_LAST = 13, + ASCII_SHIFTOUT = 14, + ASCII_SHIFTIN = 15, + ASCII_CANCEL = 24, + ASCII_SUBSTITUTE = 26, + ASCII_ESCAPE = 27, + ASCII_CSI_IGNORE_FIRST = 32, + ASCII_CSI_IGNORE_LAST = 63, + ASCII_DEL = 127, + ASCII_EXT_CSI = 155, +}; + +struct interval { + uint32_t first; + uint32_t last; +}; + +struct vc_draw_region { + long unsigned int from; + long unsigned int to; + int x; +}; + +struct trace_event_raw_devres { + struct trace_entry ent; + u32 __data_loc_devname; + struct device *dev; + const char *op; + void *node; + u32 __data_loc_name; + size_t size; + char __data[0]; +}; + +struct trace_event_data_offsets_devres { + u32 devname; + const void *devname_ptr_; + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_devres_log)(void *, struct device *, const char *, void *, const char *, size_t); + +struct ahci_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 flags_size; +}; + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; + void *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; + void *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void *context; +}; + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[256]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +struct usbdevfs_conninfo_ex { + __u32 size; + __u32 busnum; + __u32 devnum; + __u32 speed; + __u8 num_ports; + __u8 ports[7]; +}; + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void *buffer; + int buffer_length; + int actual_length; + int start_frame; + union { + int number_of_packets; + unsigned int stream_id; + }; + int error_count; + unsigned int signr; + void *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbdevfs_ioctl { + int ifno; + int ioctl_code; + void *data; +}; + +struct usbdevfs_disconnect_claim { + unsigned int interface; + unsigned int flags; + char driver[256]; +}; + +struct usbdevfs_streams { + unsigned int num_streams; + unsigned int num_eps; + unsigned char eps[0]; +}; + +struct usb_dev_state { + struct list_head list; + struct usb_device *dev; + struct file *file; + spinlock_t lock; + struct list_head async_pending; + struct list_head async_completed; + struct list_head memory_list; + wait_queue_head_t wait; + wait_queue_head_t wait_for_resume; + unsigned int discsignr; + struct pid *disc_pid; + const struct cred *cred; + sigval_t disccontext; + long unsigned int ifclaimed; + u32 disabled_bulk_eps; + long unsigned int interface_allowed_mask; + int not_yet_resumed; + bool suspend_allowed; + bool privileges_dropped; +}; + +struct usb_memory { + struct list_head memlist; + int vma_use_count; + int urb_use_count; + u32 size; + void *mem; + dma_addr_t dma_handle; + long unsigned int vm_start; + struct usb_dev_state *ps; +}; + +struct async { + struct list_head asynclist; + struct usb_dev_state *ps; + struct pid *pid; + const struct cred *cred; + unsigned int signr; + unsigned int ifnum; + void *userbuffer; + void *userurb; + sigval_t userurb_sigval; + struct urb *urb; + struct usb_memory *usbm; + unsigned int mem_usage; + int status; + u8 bulk_addr; + u8 bulk_status; +}; + +enum snoop_when { + SUBMIT = 0, + COMPLETE___2 = 1, +}; + +struct i2c_smbus_alert { + struct work_struct alert; + struct i2c_client *ara; +}; + +struct alert_data { + short unsigned int addr; + enum i2c_alert_protocol type; + unsigned int data; +}; + +struct power_supply_hwmon { + struct power_supply *psy; + long unsigned int *props; +}; + +struct hwmon_type_attr_list { + const u32 *attrs; + size_t n_attrs; +}; + +struct rss_req_info { + struct ethnl_req_info base; + u32 rss_context; +}; + +struct rss_reply_data { + struct ethnl_reply_data base; + bool no_key_fields; + u32 indir_size; + u32 hkey_size; + u32 hfunc; + u32 input_xfrm; + u32 *indir_table; + u8 *hkey; +}; + +struct rss_nl_dump_ctx { + long unsigned int ifindex; + long unsigned int ctx_idx; + unsigned int match_ifindex; + unsigned int start_ctx; +}; + +struct nft_rhash { + struct rhashtable ht; + struct delayed_work gc_work; +}; + +struct nft_rhash_elem { + struct nft_elem_priv priv; + struct rhash_head node; + struct nft_set_ext ext; +}; + +struct nft_rhash_cmp_arg { + const struct nft_set *set; + const u32 *key; + u8 genmask; + long: 32; + u64 tstamp; +}; + +struct nft_rhash_ctx { + const struct nft_ctx ctx; + const struct nft_set *set; +}; + +struct nft_hash { + u32 seed; + u32 buckets; + struct hlist_head table[0]; +}; + +struct nft_hash_elem { + struct nft_elem_priv priv; + struct hlist_node node; + struct nft_set_ext ext; +}; + +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, +}; + +enum tsq_flags { + TSQF_THROTTLED = 1, + TSQF_QUEUED = 2, + TCPF_TSQ_DEFERRED = 4, + TCPF_WRITE_TIMER_DEFERRED = 8, + TCPF_DELACK_TIMER_DEFERRED = 16, + TCPF_MTU_REDUCED_DEFERRED = 32, + TCPF_ACK_DEFERRED = 64, +}; + +struct mptcp_out_options {}; + +struct tcp_out_options { + u16 options; + u16 mss; + u8 ws; + u8 num_sack_blocks; + u8 hash_size; + u8 bpf_opt_len; + __u8 *hash_location; + __u32 tsval; + __u32 tsecr; + struct tcp_fastopen_cookie *fastopen_cookie; + struct mptcp_out_options mptcp; +}; + +struct tsq_tasklet { + struct tasklet_struct tasklet; + struct list_head head; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_rec; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + u8 async_capable: 1; + long unsigned int tx_bitmask; +}; + +enum { + TCP_BPF_IPV4 = 0, + TCP_BPF_IPV6 = 1, + TCP_BPF_NUM_PROTS = 2, +}; + +enum { + TCP_BPF_BASE = 0, + TCP_BPF_TX = 1, + TCP_BPF_RX = 2, + TCP_BPF_TXRX = 3, + TCP_BPF_NUM_CFGS = 4, +}; + +struct switchdev_brport { + struct net_device *dev; + const void *ctx; + struct notifier_block *atomic_nb; + struct notifier_block *blocking_nb; + bool tx_fwd_offload; +}; + +enum switchdev_notifier_type { + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, + SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, + SWITCHDEV_FDB_ADD_TO_DEVICE = 3, + SWITCHDEV_FDB_DEL_TO_DEVICE = 4, + SWITCHDEV_FDB_OFFLOADED = 5, + SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, + SWITCHDEV_PORT_OBJ_ADD = 7, + SWITCHDEV_PORT_OBJ_DEL = 8, + SWITCHDEV_PORT_ATTR_SET = 9, + SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, + SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, + SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, + SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, + SWITCHDEV_BRPORT_OFFLOADED = 15, + SWITCHDEV_BRPORT_UNOFFLOADED = 16, + SWITCHDEV_BRPORT_REPLAY = 17, +}; + +struct switchdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; + const void *ctx; +}; + +struct switchdev_notifier_fdb_info { + struct switchdev_notifier_info info; + const unsigned char *addr; + u16 vid; + u8 added_by_user: 1; + u8 is_local: 1; + u8 locked: 1; + u8 offloaded: 1; +}; + +struct switchdev_notifier_brport_info { + struct switchdev_notifier_info info; + const struct switchdev_brport brport; +}; + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +union vdso_data_store { + struct vdso_data data[2]; + u8 page[4096]; +}; + +typedef struct { + rwlock_t *lock; +} class_write_lock_irq_t; + +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + +struct ptrace_syscall_info { + __u8 op; + __u8 pad[3]; + __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + long: 32; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + long: 32; + } seccomp; + }; +}; + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + +typedef struct sigevent sigevent_t; + +enum { + TRACE_PIDS = 1, + TRACE_NO_PIDS = 2, +}; + +struct module_string { + struct list_head next; + struct module *module; + char *str; +}; + +enum { + FORMAT_HEADER = 1, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, +}; + +struct boot_triggers { + const char *event; + char *trigger; +}; + +typedef u64 (*btf_bpf_task_storage_get_recur)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_delete_recur)(struct bpf_map *, struct task_struct *); + +typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); + +struct callchain_cpus_entries { + struct callback_head callback_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +struct vma_prepare { + struct vm_area_struct *vma; + struct vm_area_struct *adj_next; + struct file *file; + struct address_space *mapping; + struct anon_vma *anon_vma; + struct vm_area_struct *insert; + struct vm_area_struct *remove; + struct vm_area_struct *remove2; +}; + +struct vma_munmap_struct { + struct vma_iterator *vmi; + struct vm_area_struct *vma; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct list_head *uf; + long unsigned int start; + long unsigned int end; + long unsigned int unmap_start; + long unsigned int unmap_end; + int vma_count; + bool unlock; + bool clear_ptes; + long unsigned int nr_pages; + long unsigned int locked_vm; + long unsigned int nr_accounted; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int data_vm; +}; + +enum vma_merge_state { + VMA_MERGE_START = 0, + VMA_MERGE_ERROR_NOMEM = 1, + VMA_MERGE_NOMERGE = 2, + VMA_MERGE_SUCCESS = 3, +}; + +enum vma_merge_flags { + VMG_FLAG_DEFAULT = 0, + VMG_FLAG_JUST_EXPAND = 1, +}; + +struct vma_merge_struct { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int pgoff; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vm_area_struct *vma; + long unsigned int start; + long unsigned int end; + long unsigned int flags; + struct file *file; + struct anon_vma *anon_vma; + struct mempolicy *policy; + struct vm_userfaultfd_ctx uffd_ctx; + struct anon_vma_name *anon_name; + enum vma_merge_flags merge_flags; + enum vma_merge_state state; +}; + +struct mmap_state { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int addr; + long unsigned int end; + long unsigned int pgoff; + long unsigned int pglen; + long unsigned int flags; + struct file *file; + long unsigned int charged; + bool retry_merge; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vma_munmap_struct vms; + struct ma_state mas_detach; + struct maple_tree mt_detach; +}; + +typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); + +struct core_name { + char *corename; + int used; + int size; +}; + +typedef __kernel_mode_t mode_t; + +struct orlov_stats { + __u64 free_clusters; + __u32 free_inodes; + __u32 used_dirs; +}; + +enum { + AUTOFS_IOC_READY_CMD = 96, + AUTOFS_IOC_FAIL_CMD = 97, + AUTOFS_IOC_CATATONIC_CMD = 98, + AUTOFS_IOC_PROTOVER_CMD = 99, + AUTOFS_IOC_SETTIMEOUT_CMD = 100, + AUTOFS_IOC_EXPIRE_CMD = 101, +}; + +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 102, + AUTOFS_IOC_PROTOSUBVER_CMD = 103, + AUTOFS_IOC_ASKUMOUNT_CMD = 112, +}; + +struct btrfs_fiemap_entry { + u64 offset; + u64 phys; + u64 len; + u32 flags; + long: 32; +}; + +struct fiemap_cache { + struct btrfs_fiemap_entry *entries; + int entries_size; + int entries_pos; + long: 32; + u64 next_search_offset; + unsigned int extents_mapped; + long: 32; + u64 offset; + u64 phys; + u64 len; + u32 flags; + bool cached; +}; + +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; + __u8 statelen; + __u8 blocklen_bytes; + char cra_name[128]; + char backend_cra_name[128]; +}; + +struct drbg_state; + +struct drbg_state_ops { + int (*update)(struct drbg_state *, struct list_head *, int); + int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); + int (*crypto_init)(struct drbg_state *); + int (*crypto_fini)(struct drbg_state *); +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED = 0, + DRBG_SEED_STATE_PARTIAL = 1, + DRBG_SEED_STATE_FULL = 2, +}; + +struct drbg_state { + struct mutex drbg_mutex; + unsigned char *V; + unsigned char *Vbuf; + unsigned char *C; + unsigned char *Cbuf; + size_t reseed_ctr; + size_t reseed_threshold; + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; + struct crypto_skcipher *ctr_handle; + struct skcipher_request *ctr_req; + __u8 *outscratchpadbuf; + __u8 *outscratchpad; + struct crypto_wait ctr_wait; + struct scatterlist sg_in; + struct scatterlist sg_out; + enum drbg_seed_state seeded; + long unsigned int last_seed_time; + bool pr; + bool fips_primed; + unsigned char *prev; + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +enum drbg_prefixes { + DRBG_PREFIX0 = 0, + DRBG_PREFIX1 = 1, + DRBG_PREFIX2 = 2, + DRBG_PREFIX3 = 3, +}; + +struct sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +struct blk_queue_stats { + struct list_head callbacks; + spinlock_t lock; + int accounting; +}; + +enum dd_data_dir { + DD_READ = 0, + DD_WRITE = 1, +}; + +enum { + DD_DIR_COUNT = 2, +}; + +enum dd_prio { + DD_RT_PRIO = 0, + DD_BE_PRIO = 1, + DD_IDLE_PRIO = 2, + DD_PRIO_MAX = 2, +}; + +enum { + DD_PRIO_COUNT = 3, +}; + +struct io_stats_per_prio { + uint32_t inserted; + uint32_t merged; + uint32_t dispatched; + atomic_t completed; +}; + +struct dd_per_prio { + struct list_head dispatch; + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + sector_t latest_pos[2]; + struct io_stats_per_prio stats; +}; + +struct deadline_data { + struct dd_per_prio per_prio[3]; + enum dd_data_dir last_dir; + unsigned int batching; + unsigned int starved; + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; + u32 async_depth; + int prio_aging_expire; + spinlock_t lock; + long: 32; +}; + +enum io_wq_type { + IO_WQ_BOUND = 0, + IO_WQ_UNBOUND = 1, +}; + +struct io_wq_acct { + unsigned int nr_workers; + unsigned int max_workers; + int index; + atomic_t nr_running; + raw_spinlock_t lock; + struct io_wq_work_list work_list; + long unsigned int flags; +}; + +struct io_wq { + long unsigned int state; + free_work_fn *free_work; + io_wq_work_fn *do_work; + struct io_wq_hash *hash; + atomic_t worker_refs; + struct completion worker_done; + struct hlist_node cpuhp_node; + struct task_struct *task; + struct io_wq_acct acct[2]; + raw_spinlock_t lock; + struct hlist_nulls_head free_list; + struct list_head all_list; + struct wait_queue_entry wait; + struct io_wq_work *hash_tail[32]; + cpumask_var_t cpu_mask; +}; + +enum { + IO_WORKER_F_UP = 0, + IO_WORKER_F_RUNNING = 1, + IO_WORKER_F_FREE = 2, + IO_WORKER_F_BOUND = 3, +}; + +enum { + IO_WQ_BIT_EXIT = 0, +}; + +enum { + IO_ACCT_STALLED_BIT = 0, +}; + +struct io_worker { + refcount_t ref; + int create_index; + long unsigned int flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wq *wq; + struct io_wq_work *cur_work; + raw_spinlock_t lock; + struct completion ref_done; + long unsigned int create_state; + struct callback_head create_work; + int init_retries; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +enum { + IO_WQ_ACCT_BOUND = 0, + IO_WQ_ACCT_UNBOUND = 1, + IO_WQ_ACCT_NR = 2, +}; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +struct online_data { + unsigned int cpu; + bool online; +}; + +typedef struct { + BYTE maxTableLog; + BYTE tableType; + BYTE tableLog; + BYTE reserved; +} DTableDesc; + +typedef struct { + BYTE nbBits; + BYTE byte; +} HUF_DEltX1; + +typedef struct { + U32 rankVal[13]; + U32 rankStart[13]; + U32 statsWksp[218]; + BYTE symbols[256]; + BYTE huffWeight[256]; +} HUF_ReadDTableX1_Workspace; + +typedef struct { + U16 sequence; + BYTE nbBits; + BYTE length; +} HUF_DEltX2; + +typedef struct { + BYTE symbol; +} sortedSymbol_t; + +typedef U32 rankValCol_t[13]; + +typedef struct { + U32 rankVal[156]; + U32 rankStats[13]; + U32 rankStart0[15]; + sortedSymbol_t sortedSymbol[256]; + BYTE weightList[256]; + U32 calleeWksp[218]; +} HUF_ReadDTableX2_Workspace; + +typedef struct { + U32 tableTime; + U32 decode256Time; +} algo_time_t; + +struct emuframe { + mips_instruction emul; + mips_instruction badinst; +}; + +struct pci_filp_private { + enum pci_mmap_state mmap_state; + int write_combine; +}; + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +struct clk_gated_fixed { + struct clk_gpio clk_gpio; + struct regulator *supply; + long unsigned int rate; +}; + +struct subsys_interface { + const char *name; + const struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *, struct subsys_interface *); + void (*remove_dev)(struct device *, struct subsys_interface *); +}; + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +struct dev_pm_domain_attach_data { + const char * const *pd_names; + const u32 num_pd_names; + const u32 pd_flags; +}; + +struct dev_pm_domain_list { + struct device **pd_devs; + struct device_link **pd_links; + u32 *opp_tokens; + u32 num_pds; +}; + +struct opp_table; + +struct dev_pm_opp; + +typedef int (*config_regulators_t)(struct device *, struct dev_pm_opp *, struct dev_pm_opp *, struct regulator **, unsigned int); + +typedef int (*config_clks_t)(struct device *, struct opp_table *, struct dev_pm_opp *, void *, bool); + +struct dev_pm_opp_config { + const char * const *clk_names; + config_clks_t config_clks; + const char *prop_name; + config_regulators_t config_regulators; + const unsigned int *supported_hw; + unsigned int supported_hw_count; + const char * const *regulator_names; + struct device *required_dev; + unsigned int required_dev_index; +}; + +enum { + ACTION_FAIL = 0, + ACTION_REPREP = 1, + ACTION_DELAYED_REPREP = 2, + ACTION_RETRY = 3, + ACTION_DELAYED_RETRY = 4, +}; + +struct virtio_net_config { + __u8 mac[6]; + __virtio16 status; + __virtio16 max_virtqueue_pairs; + __virtio16 mtu; + __le32 speed; + __u8 duplex; + __u8 rss_max_key_size; + __le16 rss_max_indirection_table_length; + __le32 supported_hash_types; +}; + +struct virtio_net_hdr_v1 { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + union { + struct { + __virtio16 csum_start; + __virtio16 csum_offset; + }; + struct { + __virtio16 start; + __virtio16 offset; + } csum; + struct { + __le16 segments; + __le16 dup_acks; + } rsc; + }; + __virtio16 num_buffers; +}; + +struct virtio_net_hdr_v1_hash { + struct virtio_net_hdr_v1 hdr; + __le32 hash_value; + __le16 hash_report; + __le16 padding; +}; + +struct virtio_net_ctrl_hdr { + __u8 class; + __u8 cmd; +}; + +typedef __u8 virtio_net_ctrl_ack; + +struct virtio_net_ctrl_mac { + __virtio32 entries; + __u8 macs[0]; +}; + +struct virtio_net_ctrl_mq { + __virtio16 virtqueue_pairs; +}; + +struct virtio_net_ctrl_coal_tx { + __le32 tx_max_packets; + __le32 tx_usecs; +}; + +struct virtio_net_ctrl_coal_rx { + __le32 rx_max_packets; + __le32 rx_usecs; +}; + +struct virtio_net_ctrl_coal { + __le32 max_packets; + __le32 max_usecs; +}; + +struct virtio_net_ctrl_coal_vq { + __le16 vqn; + __le16 reserved; + struct virtio_net_ctrl_coal coal; +}; + +struct virtio_net_stats_capabilities { + __le64 supported_stats_types[1]; +}; + +struct virtio_net_ctrl_queue_stats { + struct { + __le16 vq_index; + __le16 reserved[3]; + __le64 types_bitmap[1]; + } stats[1]; +}; + +struct virtio_net_stats_reply_hdr { + __u8 type; + __u8 reserved; + __le16 vq_index; + __le16 reserved1; + __le16 size; +}; + +struct ewma_pkt_len { + long unsigned int internal; +}; + +struct virtnet_stat_desc { + char desc[32]; + size_t offset; + size_t qstat_offset; +}; + +struct virtnet_sq_free_stats { + u64 packets; + u64 bytes; + u64 napi_packets; + u64 napi_bytes; + u64 xsk; +}; + +struct virtnet_sq_stats { + struct u64_stats_sync syncp; + long: 32; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t xdp_tx; + u64_stats_t xdp_tx_drops; + u64_stats_t kicks; + u64_stats_t tx_timeouts; + u64_stats_t stop; + u64_stats_t wake; +}; + +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + long: 32; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t drops; + u64_stats_t xdp_packets; + u64_stats_t xdp_tx; + u64_stats_t xdp_redirects; + u64_stats_t xdp_drops; + u64_stats_t kicks; +}; + +struct virtnet_interrupt_coalesce { + u32 max_packets; + u32 max_usecs; +}; + +struct virtnet_rq_dma { + dma_addr_t addr; + u32 ref; + u16 len; + u16 need_sync; +}; + +struct send_queue { + struct virtqueue *vq; + struct scatterlist sg[19]; + char name[16]; + long: 32; + struct virtnet_sq_stats stats; + struct virtnet_interrupt_coalesce intr_coal; + struct napi_struct napi; + bool reset; + struct xsk_buff_pool *xsk_pool; + dma_addr_t xsk_hdr_dma_addr; + long: 32; +}; + +struct receive_queue { + struct virtqueue *vq; + long: 32; + struct napi_struct napi; + struct bpf_prog *xdp_prog; + long: 32; + struct virtnet_rq_stats stats; + u16 calls; + bool dim_enabled; + struct mutex dim_lock; + long: 32; + struct dim dim; + u32 packets_in_napi; + struct virtnet_interrupt_coalesce intr_coal; + struct page *pages; + struct ewma_pkt_len mrg_avg_pkt_len; + struct page_frag alloc_frag; + struct scatterlist sg[19]; + unsigned int min_buf_len; + char name[16]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info xdp_rxq; + struct virtnet_rq_dma *last_dma; + struct xsk_buff_pool *xsk_pool; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info xsk_rxq_info; + struct xdp_buff **xsk_buffs; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct virtio_net_ctrl_rss { + u32 hash_types; + u16 indirection_table_mask; + u16 unclassified_queue; + u16 hash_cfg_reserved; + u16 max_tx_vq; + u8 hash_key_length; + u8 key[40]; + u16 *indirection_table; +}; + +struct control_buf { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; +}; + +struct virtnet_info { + struct virtio_device *vdev; + struct virtqueue *cvq; + struct net_device *dev; + struct send_queue *sq; + struct receive_queue *rq; + unsigned int status; + u16 max_queue_pairs; + u16 curr_queue_pairs; + u16 xdp_queue_pairs; + bool xdp_enabled; + bool big_packets; + unsigned int big_packets_num_skbfrags; + bool mergeable_rx_bufs; + bool has_rss; + bool has_rss_hash_report; + u8 rss_key_size; + u16 rss_indir_table_size; + u32 rss_hash_types_supported; + u32 rss_hash_types_saved; + struct virtio_net_ctrl_rss rss; + bool has_cvq; + struct mutex cvq_lock; + bool any_header_sg; + u8 hdr_len; + struct delayed_work refill; + bool refill_enabled; + spinlock_t refill_lock; + struct work_struct config_work; + struct work_struct rx_mode_work; + bool rx_mode_work_enabled; + bool affinity_hint_set; + struct hlist_node node; + struct hlist_node node_dead; + struct control_buf *ctrl; + u8 duplex; + u32 speed; + bool rx_dim_enabled; + struct virtnet_interrupt_coalesce intr_coal_tx; + struct virtnet_interrupt_coalesce intr_coal_rx; + long unsigned int guest_offloads; + long unsigned int guest_offloads_capable; + struct failover *failover; + long: 32; + u64 device_stats_cap; +}; + +struct virtio_net_common_hdr { + union { + struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf mrg_hdr; + struct virtio_net_hdr_v1_hash hash_v1_hdr; + }; +}; + +enum virtnet_xmit_type { + VIRTNET_XMIT_TYPE_SKB = 0, + VIRTNET_XMIT_TYPE_SKB_ORPHAN = 1, + VIRTNET_XMIT_TYPE_XDP = 2, + VIRTNET_XMIT_TYPE_XSK = 3, +}; + +struct virtnet_stats_ctx { + bool to_qstat; + u32 desc_num[3]; + u64 bitmap[3]; + u32 size[3]; + u64 *data; +}; + +enum sanyo_state { + STATE_INACTIVE___8 = 0, + STATE_HEADER_SPACE___4 = 1, + STATE_BIT_PULSE___4 = 2, + STATE_BIT_SPACE___4 = 3, + STATE_TRAILER_PULSE___3 = 4, + STATE_TRAILER_SPACE___3 = 5, +}; + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +struct csum_state { + __wsum csum; + size_t off; +}; + +typedef int (*pp_nl_fill_cb)(struct sk_buff *, const struct page_pool *, const struct genl_info *); + +struct page_pool_dump_cb { + long unsigned int ifindex; + u32 pp_id; +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + struct ethtool_link_ext_stats link_stats; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; + long: 32; +}; + +enum nfqnl_msg_types { + NFQNL_MSG_PACKET = 0, + NFQNL_MSG_VERDICT = 1, + NFQNL_MSG_CONFIG = 2, + NFQNL_MSG_VERDICT_BATCH = 3, + NFQNL_MSG_MAX = 4, +}; + +struct nfqnl_msg_packet_hdr { + __be32 packet_id; + __be16 hw_protocol; + __u8 hook; +} __attribute__((packed)); + +struct nfqnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfqnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfqnl_vlan_attr { + NFQA_VLAN_UNSPEC = 0, + NFQA_VLAN_PROTO = 1, + NFQA_VLAN_TCI = 2, + __NFQA_VLAN_MAX = 3, +}; + +enum nfqnl_attr_type { + NFQA_UNSPEC = 0, + NFQA_PACKET_HDR = 1, + NFQA_VERDICT_HDR = 2, + NFQA_MARK = 3, + NFQA_TIMESTAMP = 4, + NFQA_IFINDEX_INDEV = 5, + NFQA_IFINDEX_OUTDEV = 6, + NFQA_IFINDEX_PHYSINDEV = 7, + NFQA_IFINDEX_PHYSOUTDEV = 8, + NFQA_HWADDR = 9, + NFQA_PAYLOAD = 10, + NFQA_CT = 11, + NFQA_CT_INFO = 12, + NFQA_CAP_LEN = 13, + NFQA_SKB_INFO = 14, + NFQA_EXP = 15, + NFQA_UID = 16, + NFQA_GID = 17, + NFQA_SECCTX = 18, + NFQA_VLAN = 19, + NFQA_L2HDR = 20, + NFQA_PRIORITY = 21, + NFQA_CGROUP_CLASSID = 22, + __NFQA_MAX = 23, +}; + +struct nfqnl_msg_verdict_hdr { + __be32 verdict; + __be32 id; +}; + +enum nfqnl_msg_config_cmds { + NFQNL_CFG_CMD_NONE = 0, + NFQNL_CFG_CMD_BIND = 1, + NFQNL_CFG_CMD_UNBIND = 2, + NFQNL_CFG_CMD_PF_BIND = 3, + NFQNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfqnl_msg_config_cmd { + __u8 command; + __u8 _pad; + __be16 pf; +}; + +enum nfqnl_config_mode { + NFQNL_COPY_NONE = 0, + NFQNL_COPY_META = 1, + NFQNL_COPY_PACKET = 2, +}; + +struct nfqnl_msg_config_params { + __be32 copy_range; + __u8 copy_mode; +} __attribute__((packed)); + +enum nfqnl_attr_config { + NFQA_CFG_UNSPEC = 0, + NFQA_CFG_CMD = 1, + NFQA_CFG_PARAMS = 2, + NFQA_CFG_QUEUE_MAXLEN = 3, + NFQA_CFG_MASK = 4, + NFQA_CFG_FLAGS = 5, + __NFQA_CFG_MAX = 6, +}; + +struct nfqnl_instance { + struct hlist_node hlist; + struct callback_head rcu; + u32 peer_portid; + unsigned int queue_maxlen; + unsigned int copy_range; + unsigned int queue_dropped; + unsigned int queue_user_dropped; + u_int16_t queue_num; + u_int8_t copy_mode; + u_int32_t flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t lock; + unsigned int queue_total; + unsigned int id_sequence; + struct list_head queue_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, long unsigned int); + +struct nfnl_queue_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; +}; + +enum nft_bitwise_ops { + NFT_BITWISE_MASK_XOR = 0, + NFT_BITWISE_LSHIFT = 1, + NFT_BITWISE_RSHIFT = 2, + NFT_BITWISE_AND = 3, + NFT_BITWISE_OR = 4, + NFT_BITWISE_XOR = 5, +}; + +enum nft_bitwise_attributes { + NFTA_BITWISE_UNSPEC = 0, + NFTA_BITWISE_SREG = 1, + NFTA_BITWISE_DREG = 2, + NFTA_BITWISE_LEN = 3, + NFTA_BITWISE_MASK = 4, + NFTA_BITWISE_XOR = 5, + NFTA_BITWISE_OP = 6, + NFTA_BITWISE_DATA = 7, + NFTA_BITWISE_SREG2 = 8, + __NFTA_BITWISE_MAX = 9, +}; + +struct nft_bitwise { + u8 sreg; + u8 sreg2; + u8 dreg; + enum nft_bitwise_ops op: 8; + u8 len; + struct nft_data mask; + struct nft_data xor; + struct nft_data data; +}; + +struct ifaddrlblmsg { + __u8 ifal_family; + __u8 __ifal_reserved; + __u8 ifal_prefixlen; + __u8 ifal_flags; + __u32 ifal_index; + __u32 ifal_seq; +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX = 3, +}; + +struct ip6addrlbl_entry { + struct in6_addr prefix; + int prefixlen; + int ifindex; + int addrtype; + u32 label; + struct hlist_node list; + struct callback_head rcu; +}; + +struct ip6addrlbl_init_table { + const struct in6_addr *prefix; + int prefixlen; + u32 label; +}; + +enum { + IOAM6_ATTR_UNSPEC = 0, + IOAM6_ATTR_NS_ID = 1, + IOAM6_ATTR_NS_DATA = 2, + IOAM6_ATTR_NS_DATA_WIDE = 3, + IOAM6_ATTR_SC_ID = 4, + IOAM6_ATTR_SC_DATA = 5, + IOAM6_ATTR_SC_NONE = 6, + IOAM6_ATTR_PAD = 7, + __IOAM6_ATTR_MAX = 8, +}; + +enum { + IOAM6_CMD_UNSPEC = 0, + IOAM6_CMD_ADD_NAMESPACE = 1, + IOAM6_CMD_DEL_NAMESPACE = 2, + IOAM6_CMD_DUMP_NAMESPACES = 3, + IOAM6_CMD_ADD_SCHEMA = 4, + IOAM6_CMD_DEL_SCHEMA = 5, + IOAM6_CMD_DUMP_SCHEMAS = 6, + IOAM6_CMD_NS_SET_SCHEMA = 7, + __IOAM6_CMD_MAX = 8, +}; + +enum ioam6_event_attr { + IOAM6_EVENT_ATTR_UNSPEC = 0, + IOAM6_EVENT_ATTR_TRACE_NAMESPACE = 1, + IOAM6_EVENT_ATTR_TRACE_NODELEN = 2, + IOAM6_EVENT_ATTR_TRACE_TYPE = 3, + IOAM6_EVENT_ATTR_TRACE_DATA = 4, + __IOAM6_EVENT_ATTR_MAX = 5, +}; + +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED = 1, +}; + +struct rtc_wkalrm { + unsigned char enabled; + unsigned char pending; + struct rtc_time time; +}; + +struct rtc_param { + __u64 param; + union { + __u64 uvalue; + __s64 svalue; + __u64 ptr; + }; + __u32 index; + __u32 __pad; +}; + +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, long unsigned int); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*alarm_irq_enable)(struct device *, unsigned int); + int (*read_offset)(struct device *, long int *); + int (*set_offset)(struct device *, long int); + int (*param_get)(struct device *, struct rtc_param *); + int (*param_set)(struct device *, struct rtc_param *); +}; + +struct rtc_device; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(struct rtc_device *); + struct rtc_device *rtc; + int enabled; + long: 32; +}; + +struct rtc_device { + struct device dev; + struct module *owner; + int id; + const struct rtc_class_ops *ops; + struct mutex ops_lock; + struct cdev char_dev; + long unsigned int flags; + long unsigned int irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + int irq_freq; + int max_user_freq; + struct timerqueue_head timerqueue; + long: 32; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; + int pie_enabled; + struct work_struct irqwork; + long unsigned int set_offset_nsec; + long unsigned int features[1]; + long: 32; + time64_t range_min; + timeu64_t range_max; + timeu64_t alarm_offset_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; + long: 32; +}; + +struct trace_event_raw_alarmtimer_suspend { + struct trace_entry ent; + s64 expires; + unsigned char alarm_type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_alarm_class { + struct trace_entry ent; + void *alarm; + unsigned char alarm_type; + s64 expires; + s64 now; + char __data[0]; +}; + +struct trace_event_data_offsets_alarmtimer_suspend {}; + +struct trace_event_data_offsets_alarm_class {}; + +typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); + +typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); + +struct alarm_base { + spinlock_t lock; + struct timerqueue_head timerqueue; + ktime_t (*get_ktime)(); + void (*get_timespec)(struct timespec64 *); + clockid_t base_clockid; +}; + +struct syscall_trace_enter { + struct trace_entry ent; + int nr; + long unsigned int args[0]; +}; + +struct syscall_trace_exit { + struct trace_entry ent; + int nr; + long int ret; +}; + +struct syscall_tp_t { + struct trace_entry ent; + int syscall_nr; + long unsigned int ret; +}; + +struct syscall_tp_t___2 { + struct trace_entry ent; + int syscall_nr; + long unsigned int args[6]; + long: 32; +}; + +struct bpf_bloom_filter { + struct bpf_map map; + u32 bitset_mask; + u32 hash_seed; + u32 nr_hash_funcs; + long unsigned int bitset[0]; + long: 32; +}; + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + struct net *net; + struct list_head node; + long: 32; +}; + +struct trace_event_raw_vm_unmapped_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int total_vm; + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + char __data[0]; +}; + +struct trace_event_raw_vma_mas_szero { + struct trace_entry ent; + struct maple_tree *mt; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_vma_store { + struct trace_entry ent; + struct maple_tree *mt; + struct vm_area_struct *vma; + long unsigned int vm_start; + long unsigned int vm_end; + char __data[0]; +}; + +struct trace_event_raw_exit_mmap { + struct trace_entry ent; + struct mm_struct *mm; + struct maple_tree *mt; + char __data[0]; +}; + +struct trace_event_data_offsets_vm_unmapped_area {}; + +struct trace_event_data_offsets_vma_mas_szero {}; + +struct trace_event_data_offsets_vma_store {}; + +struct trace_event_data_offsets_exit_mmap {}; + +typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *); + +typedef void (*btf_trace_vma_mas_szero)(void *, struct maple_tree *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_vma_store)(void *, struct maple_tree *, struct vm_area_struct *); + +typedef void (*btf_trace_exit_mmap)(void *, struct mm_struct *); + +enum execmem_range_flags { + EXECMEM_KASAN_SHADOW = 1, + EXECMEM_ROX_CACHE = 2, +}; + +struct execmem_range { + long unsigned int start; + long unsigned int end; + long unsigned int fallback_start; + long unsigned int fallback_end; + pgprot_t pgprot; + unsigned int alignment; + enum execmem_range_flags flags; +}; + +struct execmem_info { + struct execmem_range ranges[5]; +}; + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fsuuid2 { + __u8 len; + __u8 uuid[16]; +}; + +struct fs_sysfs_path { + __u8 len; + __u8 name[128]; +}; + +struct space_resv { + __s16 l_type; + __s16 l_whence; + long: 32; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +}; + +struct fname { + __u32 hash; + __u32 minor_hash; + struct rb_node rb_hash; + struct fname *next; + __u32 inode; + __u8 name_len; + __u8 file_type; + char name[0]; +}; + +struct ext4_xattr_header { + __le32 h_magic; + __le32 h_refcount; + __le32 h_blocks; + __le32 h_hash; + __le32 h_checksum; + __u32 h_reserved[3]; +}; + +struct ext4_xattr_block_find { + struct ext4_xattr_search s; + struct buffer_head *bh; +}; + +struct fuse_setxattr_in { + uint32_t size; + uint32_t flags; + uint32_t setxattr_flags; + uint32_t padding; +}; + +struct fuse_getxattr_in { + uint32_t size; + uint32_t padding; +}; + +struct fuse_getxattr_out { + uint32_t size; + uint32_t padding; +}; + +struct tree_mod_root { + u64 logical; + u8 level; + long: 32; +}; + +struct tree_mod_elem { + struct rb_node node; + long: 32; + u64 logical; + u64 seq; + enum btrfs_mod_log_op op; + int slot; + u64 generation; + struct btrfs_disk_key key; + long: 32; + u64 blockptr; + struct { + int dst_slot; + int nr_items; + } move; + struct tree_mod_root old_root; +}; + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +struct trace_event_raw_block_buffer { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + size_t size; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_rq_requeue { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_rq_completion { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + int error; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + unsigned int bytes; + short unsigned int ioprio; + char rwbs[8]; + char comm[16]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_bio_complete { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_bio { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_plug { + struct trace_entry ent; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_unplug { + struct trace_entry ent; + int nr_rq; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_split { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + sector_t new_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_remap { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_rq_remap { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + unsigned int nr_bios; + char rwbs[8]; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_block_buffer {}; + +struct trace_event_data_offsets_block_rq_requeue { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq_completion { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_bio_complete {}; + +struct trace_event_data_offsets_block_bio {}; + +struct trace_event_data_offsets_block_plug {}; + +struct trace_event_data_offsets_block_unplug {}; + +struct trace_event_data_offsets_block_split {}; + +struct trace_event_data_offsets_block_bio_remap {}; + +struct trace_event_data_offsets_block_rq_remap {}; + +typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_complete)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_error)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_insert)(void *, struct request *); + +typedef void (*btf_trace_block_rq_issue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_merge)(void *, struct request *); + +typedef void (*btf_trace_block_io_start)(void *, struct request *); + +typedef void (*btf_trace_block_io_done)(void *, struct request *); + +typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); + +typedef void (*btf_trace_block_getrq)(void *, struct bio *); + +typedef void (*btf_trace_block_plug)(void *, struct request_queue *); + +typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); + +typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); + +typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); + +typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); + +enum { + IORING_MEM_REGION_REG_WAIT_ARG = 1, +}; + +struct io_uring_mem_region_reg { + __u64 region_uptr; + __u64 flags; + __u64 __resv[2]; +}; + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; + +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; + __u8 sqe_op; + __u8 sqe_flags; + }; + __u8 resv; + __u32 resv2[3]; +}; + +struct io_uring_clock_register { + __u32 clockid; + __u32 __resv[3]; +}; + +enum io_uring_register_restriction_op { + IORING_RESTRICTION_REGISTER_OP = 0, + IORING_RESTRICTION_SQE_OP = 1, + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + IORING_RESTRICTION_LAST = 4, +}; + +struct io_ring_ctx_rings { + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_uring_sqe *sq_sqes; + struct io_rings *rings; +}; + +typedef ZSTD_customMem zstd_custom_mem; + +typedef ZSTD_DCtx zstd_dctx; + +typedef ZSTD_DDict zstd_ddict; + +typedef ZSTD_frameHeader zstd_frame_header; + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +struct cache_type_info { + const char *size_prop; + const char *line_size_props[2]; + const char *nr_sets_prop; +}; + +typedef struct sg_io_hdr sg_io_hdr_t; + +struct compat_sg_io_hdr { + compat_int_t interface_id; + compat_int_t dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + compat_uint_t dxfer_len; + compat_uint_t dxferp; + compat_uptr_t cmdp; + compat_uptr_t sbp; + compat_uint_t timeout; + compat_uint_t flags; + compat_int_t pack_id; + compat_uptr_t usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + compat_int_t resid; + compat_uint_t duration; + compat_uint_t info; +}; + +struct sg_scsi_id { + int host_no; + int channel; + int scsi_id; + int lun; + int scsi_type; + short int h_cmd_per_lun; + short int d_queue_depth; + int unused[2]; +}; + +typedef struct sg_scsi_id sg_scsi_id_t; + +struct sg_req_info { + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + void *usr_ptr; + unsigned int duration; + int unused; +}; + +typedef struct sg_req_info sg_req_info_t; + +struct sg_header { + int pack_len; + int reply_len; + int pack_id; + int result; + unsigned int twelve_byte: 1; + unsigned int target_status: 5; + unsigned int host_status: 8; + unsigned int driver_status: 8; + unsigned int other_flags: 10; + unsigned char sense_buffer[16]; +}; + +struct sg_scatter_hold { + short unsigned int k_use_sg; + unsigned int sglist_len; + unsigned int bufflen; + struct page **pages; + int page_order; + char dio_in_use; + unsigned char cmd_opcode; +}; + +typedef struct sg_scatter_hold Sg_scatter_hold; + +struct sg_fd; + +struct sg_request { + struct list_head entry; + struct sg_fd *parentfp; + Sg_scatter_hold data; + sg_io_hdr_t header; + unsigned char sense_b[96]; + char res_used; + char orphan; + char sg_io_owned; + char done; + struct request *rq; + struct bio *bio; + struct execute_work ew; +}; + +typedef struct sg_request Sg_request; + +struct sg_device; + +struct sg_fd { + struct list_head sfd_siblings; + struct sg_device *parentdp; + wait_queue_head_t read_wait; + rwlock_t rq_list_lock; + struct mutex f_mutex; + int timeout; + int timeout_user; + Sg_scatter_hold reserve; + struct list_head rq_list; + struct fasync_struct *async_qp; + Sg_request req_arr[16]; + char force_packid; + char cmd_q; + unsigned char next_cmd_len; + char keep_orphan; + char mmap_called; + char res_in_use; + struct kref f_ref; + struct execute_work ew; +}; + +struct sg_device { + struct scsi_device *device; + wait_queue_head_t open_wait; + struct mutex open_rel_lock; + int sg_tablesize; + u32 index; + struct list_head sfds; + rwlock_t sfd_lock; + atomic_t detaching; + bool exclude; + int open_cnt; + char sgdebug; + char name[32]; + struct cdev *cdev; + struct kref d_ref; +}; + +typedef struct sg_fd Sg_fd; + +typedef struct sg_device Sg_device; + +struct phy_fixup { + struct list_head list; + char bus_id[64]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *); +}; + +struct sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; + u8 e10g_base_er: 1; + u8 e10g_base_lrm: 1; + u8 e10g_base_lr: 1; + u8 e10g_base_sr: 1; + u8 if_1x_sx: 1; + u8 if_1x_lx: 1; + u8 if_1x_copper_active: 1; + u8 if_1x_copper_passive: 1; + u8 escon_mmf_1310_led: 1; + u8 escon_smf_1310_laser: 1; + u8 sonet_oc192_short_reach: 1; + u8 sonet_reach_bit1: 1; + u8 sonet_reach_bit2: 1; + u8 sonet_oc48_long_reach: 1; + u8 sonet_oc48_intermediate_reach: 1; + u8 sonet_oc48_short_reach: 1; + u8 unallocated_5_7: 1; + u8 sonet_oc12_smf_long_reach: 1; + u8 sonet_oc12_smf_intermediate_reach: 1; + u8 sonet_oc12_short_reach: 1; + u8 unallocated_5_3: 1; + u8 sonet_oc3_smf_long_reach: 1; + u8 sonet_oc3_smf_intermediate_reach: 1; + u8 sonet_oc3_short_reach: 1; + u8 e_base_px: 1; + u8 e_base_bx10: 1; + u8 e100_base_fx: 1; + u8 e100_base_lx: 1; + u8 e1000_base_t: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_sx: 1; + u8 fc_ll_v: 1; + u8 fc_ll_s: 1; + u8 fc_ll_i: 1; + u8 fc_ll_l: 1; + u8 fc_ll_m: 1; + u8 fc_tech_sa: 1; + u8 fc_tech_lc: 1; + u8 fc_tech_electrical_inter_enclosure: 1; + u8 fc_tech_electrical_intra_enclosure: 1; + u8 fc_tech_sn: 1; + u8 fc_tech_sl: 1; + u8 fc_tech_ll: 1; + u8 sfp_ct_active: 1; + u8 sfp_ct_passive: 1; + u8 unallocated_8_1: 1; + u8 unallocated_8_0: 1; + u8 fc_media_tw: 1; + u8 fc_media_tp: 1; + u8 fc_media_mi: 1; + u8 fc_media_tv: 1; + u8 fc_media_m6: 1; + u8 fc_media_m5: 1; + u8 unallocated_9_1: 1; + u8 fc_media_sm: 1; + u8 fc_speed_1200: 1; + u8 fc_speed_800: 1; + u8 fc_speed_1600: 1; + u8 fc_speed_400: 1; + u8 fc_speed_3200: 1; + u8 fc_speed_200: 1; + u8 unallocated_10_1: 1; + u8 fc_speed_100: 1; + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + __be16 cable_compliance; + struct { + u8 reserved60_2: 6; + u8 fc_pi_4_app_h: 1; + u8 sff8431_app_e: 1; + u8 reserved61: 8; + } passive; + struct { + u8 reserved60_4: 4; + u8 fc_pi_4_lim: 1; + u8 sff8431_lim: 1; + u8 fc_pi_4_app_h: 1; + u8 sff8431_app_e: 1; + u8 reserved61: 8; + } active; + }; + u8 reserved62; + u8 cc_base; +}; + +struct sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +}; + +struct sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +}; + +struct sfp_upstream_ops { + void (*attach)(void *, struct sfp_bus *); + void (*detach)(void *, struct sfp_bus *); + int (*module_insert)(void *, const struct sfp_eeprom_id *); + void (*module_remove)(void *); + int (*module_start)(void *); + void (*module_stop)(void *); + void (*link_down)(void *); + void (*link_up)(void *); + int (*connect_phy)(void *, struct phy_device *); + void (*disconnect_phy)(void *, struct phy_device *); +}; + +enum usb_led_event { + USB_LED_EVENT_HOST = 0, + USB_LED_EVENT_GADGET = 1, +}; + +struct thermal_hwmon_device { + char type[20]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; + struct thermal_hwmon_attr temp_crit; +}; + +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE = 2, + NVMEM_CELL_ADD = 3, + NVMEM_CELL_REMOVE = 4, + NVMEM_LAYOUT_ADD = 5, + NVMEM_LAYOUT_REMOVE = 6, +}; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + const struct nvmem_cell_info *cells; + int ncells; + bool add_legacy_fixed_of_cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + enum nvmem_type type; + bool read_only; + bool root_only; + bool ignore_wp; + struct nvmem_layout *layout; + struct device_node *of_node; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + bool compat; + struct device *base_dev; +}; + +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + +struct nvmem_cell_entry { + const char *name; + int offset; + size_t raw_len; + int bytes; + int bit_offset; + int nbits; + nvmem_cell_post_process_t read_post_process; + void *priv; + struct device_node *np; + struct nvmem_device *nvmem; + struct list_head node; +}; + +struct nvmem_cell { + struct nvmem_cell_entry *entry; + const char *id; + int index; +}; + +struct flow_dissector_key_ports_range { + union { + struct flow_dissector_key_ports tp; + struct { + struct flow_dissector_key_ports tp_min; + struct flow_dissector_key_ports tp_max; + }; + }; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key; + struct flow_dissector_key_meta *mask; +}; + +struct flow_match_arp { + struct flow_dissector_key_arp *key; + struct flow_dissector_key_arp *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key; + struct flow_dissector_key_ipv4_addrs *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key; + struct flow_dissector_key_ipv6_addrs *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key; + struct flow_dissector_key_ip *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key; + struct flow_dissector_key_ports *mask; +}; + +struct flow_match_ports_range { + struct flow_dissector_key_ports_range *key; + struct flow_dissector_key_ports_range *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key; + struct flow_dissector_key_icmp *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key; + struct flow_dissector_key_tcp *mask; +}; + +struct flow_match_ipsec { + struct flow_dissector_key_ipsec *key; + struct flow_dissector_key_ipsec *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key; + struct flow_dissector_key_mpls *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key; + struct flow_dissector_key_keyid *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key; + struct flow_dissector_key_enc_opts *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key; + struct flow_dissector_key_ct *mask; +}; + +struct flow_match_pppoe { + struct flow_dissector_key_pppoe *key; + struct flow_dissector_key_pppoe *mask; +}; + +struct flow_match_l2tpv3 { + struct flow_dissector_key_l2tpv3 *key; + struct flow_dissector_key_l2tpv3 *mask; +}; + +enum offload_act_command { + FLOW_ACT_REPLACE = 0, + FLOW_ACT_DESTROY = 1, + FLOW_ACT_STATS = 2, +}; + +struct flow_offload_action { + struct netlink_ext_ack *extack; + enum offload_act_command command; + enum flow_action_id id; + u32 index; + long unsigned int cookie; + long: 32; + struct flow_stats stats; + struct flow_action action; +}; + +typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); + +struct flow_indr_dev { + struct list_head list; + flow_indr_block_bind_cb_t *cb; + void *cb_priv; + refcount_t refcnt; +}; + +struct flow_indir_dev_info { + void *data; + struct net_device *dev; + struct Qdisc *sch; + enum tc_setup_type type; + void (*cleanup)(struct flow_block_cb *); + struct list_head list; + enum flow_block_command command; + enum flow_block_binder_type binder_type; + struct list_head *cb_list; +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A = 0, + ETHTOOL_A_CABLE_PAIR_B = 1, + ETHTOOL_A_CABLE_PAIR_C = 2, + ETHTOOL_A_CABLE_PAIR_D = 3, +}; + +enum { + ETHTOOL_A_CABLE_INF_SRC_UNSPEC = 0, + ETHTOOL_A_CABLE_INF_SRC_TDR = 1, + ETHTOOL_A_CABLE_INF_SRC_ALCD = 2, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, + ETHTOOL_A_CABLE_RESULT_PAIR = 1, + ETHTOOL_A_CABLE_RESULT_CODE = 2, + ETHTOOL_A_CABLE_RESULT_SRC = 3, + __ETHTOOL_A_CABLE_RESULT_CNT = 4, + ETHTOOL_A_CABLE_RESULT_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, + ETHTOOL_A_CABLE_FAULT_LENGTH_SRC = 3, + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 4, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_NEST_RESULT = 1, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, + __ETHTOOL_A_CABLE_NEST_CNT = 3, + ETHTOOL_A_CABLE_NEST_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, + ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, + __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, + ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, +}; + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, + ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, + __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, + ETHTOOL_A_CABLE_PULSE_mV = 1, + __ETHTOOL_A_CABLE_PULSE_CNT = 2, + ETHTOOL_A_CABLE_PULSE_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC = 0, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, + __ETHTOOL_A_CABLE_STEP_CNT = 4, + ETHTOOL_A_CABLE_STEP_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, + ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, + __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, + ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, +}; + +enum nft_lookup_flags { + NFT_LOOKUP_F_INV = 1, +}; + +enum nft_lookup_attributes { + NFTA_LOOKUP_UNSPEC = 0, + NFTA_LOOKUP_SET = 1, + NFTA_LOOKUP_SREG = 2, + NFTA_LOOKUP_DREG = 3, + NFTA_LOOKUP_SET_ID = 4, + NFTA_LOOKUP_FLAGS = 5, + __NFTA_LOOKUP_MAX = 6, +}; + +struct nft_lookup { + struct nft_set *set; + u8 sreg; + u8 dreg; + bool dreg_set; + bool invert; + struct nft_set_binding binding; +}; + +struct ipq { + struct inet_frag_queue q; + u8 ecn; + u16 max_df_size; + int iif; + unsigned int rid; + struct inet_peer *peer; +}; + +struct icmp_ext_hdr { + __u8 version: 4; + __u8 reserved1: 4; + __u8 reserved2; + __sum16 checksum; +}; + +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; + +struct icmp_ext_echo_ctype3_hdr { + __be16 afi; + __u8 addrlen; + __u8 reserved; +}; + +struct icmp_ext_echo_iio { + struct icmp_extobj_hdr extobj_hdr; + union { + char name[16]; + __be32 ifindex; + struct { + struct icmp_ext_echo_ctype3_hdr ctype3_hdr; + union { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + } ip_addr; + } addr; + } ident; +}; + +struct trace_event_raw_icmp_send { + struct trace_entry ent; + const void *skbaddr; + int type; + int code; + __u8 saddr[4]; + __u8 daddr[4]; + __u16 sport; + __u16 dport; + short unsigned int ulen; + char __data[0]; +}; + +struct trace_event_data_offsets_icmp_send {}; + +typedef void (*btf_trace_icmp_send)(void *, const struct sk_buff *, int, int); + +struct icmp_bxm { + struct sk_buff *skb; + int offset; + int data_len; + struct { + struct icmphdr icmph; + __be32 times[3]; + } data; + int head_len; + struct ip_options_data replyopts; +}; + +struct icmp_control { + enum skb_drop_reason (*handler)(struct sk_buff *); + short int error; +}; + +enum ftlb_flags { + FTLB_EN = 1, + FTLB_SET_PROB = 2, +}; + +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX = 1, + UCLAMP_CNT = 2, +}; + +enum cpu_idle_type { + __CPU_NOT_IDLE = 0, + CPU_IDLE = 1, + CPU_NEWLY_IDLE = 2, + CPU_MAX_IDLE_TYPES = 3, +}; + +struct energy_env { + long unsigned int task_busy_time; + long unsigned int pd_busy_time; + long unsigned int cpu_cap; + long unsigned int pd_cap; +}; + +enum fbq_type { + regular = 0, + remote = 1, + all = 2, +}; + +enum group_type { + group_has_spare = 0, + group_fully_busy = 1, + group_misfit_task = 2, + group_smt_balance = 3, + group_asym_packing = 4, + group_imbalanced = 5, + group_overloaded = 6, +}; + +enum migration_type { + migrate_load = 0, + migrate_util = 1, + migrate_task = 2, + migrate_misfit = 3, +}; + +struct lb_env { + struct sched_domain *sd; + struct rq *src_rq; + int src_cpu; + int dst_cpu; + struct rq *dst_rq; + struct cpumask *dst_grpmask; + int new_dst_cpu; + enum cpu_idle_type idle; + long int imbalance; + struct cpumask *cpus; + unsigned int flags; + unsigned int loop; + unsigned int loop_break; + unsigned int loop_max; + enum fbq_type fbq_type; + enum migration_type migration_type; + struct list_head tasks; +}; + +struct sg_lb_stats { + long unsigned int avg_load; + long unsigned int group_load; + long unsigned int group_capacity; + long unsigned int group_util; + long unsigned int group_runnable; + unsigned int sum_nr_running; + unsigned int sum_h_nr_running; + unsigned int idle_cpus; + unsigned int group_weight; + enum group_type group_type; + unsigned int group_asym_packing; + unsigned int group_smt_balance; + long unsigned int group_misfit_task_load; +}; + +struct sd_lb_stats { + struct sched_group *busiest; + struct sched_group *local; + long unsigned int total_load; + long unsigned int total_capacity; + long unsigned int avg_load; + unsigned int prefer_sibling; + struct sg_lb_stats busiest_stat; + struct sg_lb_stats local_stat; +}; + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID = 1, + TASKSTATS_TYPE_TGID = 2, + TASKSTATS_TYPE_STATS = 3, + TASKSTATS_TYPE_AGGR_PID = 4, + TASKSTATS_TYPE_AGGR_TGID = 5, + TASKSTATS_TYPE_NULL = 6, + __TASKSTATS_TYPE_MAX = 7, +}; + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID = 1, + TASKSTATS_CMD_ATTR_TGID = 2, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, + __TASKSTATS_CMD_ATTR_MAX = 5, +}; + +enum { + CGROUPSTATS_CMD_UNSPEC = 3, + CGROUPSTATS_CMD_GET = 4, + CGROUPSTATS_CMD_NEW = 5, + __CGROUPSTATS_CMD_MAX = 6, +}; + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS = 1, + __CGROUPSTATS_TYPE_MAX = 2, +}; + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD = 1, + __CGROUPSTATS_CMD_ATTR_MAX = 2, +}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; + +enum actions { + REGISTER = 0, + DEREGISTER = 1, + CPU_DONT_CARE = 2, +}; + +enum bpf_lru_list_type { + BPF_LRU_LIST_T_ACTIVE = 0, + BPF_LRU_LIST_T_INACTIVE = 1, + BPF_LRU_LIST_T_FREE = 2, + BPF_LRU_LOCAL_LIST_T_FREE = 3, + BPF_LRU_LOCAL_LIST_T_PENDING = 4, +}; + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head; + u32 tail; + u32 size; + char elements[0]; +}; + +struct trace_event_raw_test_pages_isolated { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int fin_pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_test_pages_isolated {}; + +typedef void (*btf_trace_test_pages_isolated)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct wb_writeback_work { + long int nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages: 1; + unsigned int for_kupdate: 1; + unsigned int range_cyclic: 1; + unsigned int for_background: 1; + unsigned int for_sync: 1; + unsigned int auto_free: 1; + enum wb_reason reason; + struct list_head list; + struct wb_completion *done; +}; + +struct trace_event_raw_writeback_folio_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_writeback_dirty_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_inode_foreign_history { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t cgroup_ino; + unsigned int history; + char __data[0]; +}; + +struct trace_event_raw_inode_switch_wbs { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t old_cgroup_ino; + ino_t new_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_track_foreign_dirty { + struct trace_entry ent; + char name[32]; + u64 bdi_id; + ino_t ino; + unsigned int memcg_id; + ino_t cgroup_ino; + ino_t page_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_flush_foreign { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + unsigned int frn_bdi_id; + unsigned int frn_memcg_id; + char __data[0]; +}; + +struct trace_event_raw_writeback_write_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + int sync_mode; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_work_class { + struct trace_entry ent; + char name[32]; + long int nr_pages; + dev_t sb_dev; + int sync_mode; + int for_kupdate; + int range_cyclic; + int for_background; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_pages_written { + struct trace_entry ent; + long int pages; + char __data[0]; +}; + +struct trace_event_raw_writeback_class { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_bdi_register { + struct trace_entry ent; + char name[32]; + char __data[0]; +}; + +struct trace_event_raw_wbc_class { + struct trace_entry ent; + char name[32]; + long int nr_to_write; + long int pages_skipped; + int sync_mode; + int for_kupdate; + int for_background; + int for_reclaim; + int range_cyclic; + long int range_start; + long int range_end; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_queue_io { + struct trace_entry ent; + char name[32]; + long unsigned int older; + long int age; + int moved; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_global_dirty_state { + struct trace_entry ent; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int background_thresh; + long unsigned int dirty_thresh; + long unsigned int dirty_limit; + long unsigned int nr_dirtied; + long unsigned int nr_written; + char __data[0]; +}; + +struct trace_event_raw_bdi_dirty_ratelimit { + struct trace_entry ent; + char bdi[32]; + long unsigned int write_bw; + long unsigned int avg_write_bw; + long unsigned int dirty_rate; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + long unsigned int balanced_dirty_ratelimit; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_balance_dirty_pages { + struct trace_entry ent; + char bdi[32]; + long unsigned int limit; + long unsigned int setpoint; + long unsigned int dirty; + long unsigned int bdi_setpoint; + long unsigned int bdi_dirty; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + unsigned int dirtied; + unsigned int dirtied_pause; + long unsigned int paused; + long int pause; + long unsigned int period; + long int think; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_sb_inodes_requeue { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_single_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + long unsigned int writeback_index; + long int nr_to_write; + long unsigned int wrote; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_inode_template { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int state; + __u16 mode; + long unsigned int dirtied_when; + char __data[0]; +}; + +struct trace_event_data_offsets_writeback_folio_template {}; + +struct trace_event_data_offsets_writeback_dirty_inode_template {}; + +struct trace_event_data_offsets_inode_foreign_history {}; + +struct trace_event_data_offsets_inode_switch_wbs {}; + +struct trace_event_data_offsets_track_foreign_dirty {}; + +struct trace_event_data_offsets_flush_foreign {}; + +struct trace_event_data_offsets_writeback_write_inode_template {}; + +struct trace_event_data_offsets_writeback_work_class {}; + +struct trace_event_data_offsets_writeback_pages_written {}; + +struct trace_event_data_offsets_writeback_class {}; + +struct trace_event_data_offsets_writeback_bdi_register {}; + +struct trace_event_data_offsets_wbc_class {}; + +struct trace_event_data_offsets_writeback_queue_io {}; + +struct trace_event_data_offsets_global_dirty_state {}; + +struct trace_event_data_offsets_bdi_dirty_ratelimit {}; + +struct trace_event_data_offsets_balance_dirty_pages {}; + +struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; + +struct trace_event_data_offsets_writeback_single_inode_template {}; + +struct trace_event_data_offsets_writeback_inode_template {}; + +typedef void (*btf_trace_writeback_dirty_folio)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_folio_wait_writeback)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); + +typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); + +typedef void (*btf_trace_track_foreign_dirty)(void *, struct folio *, struct bdi_writeback *); + +typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_pages_written)(void *, long int); + +typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); + +typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); + +typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); + +typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, long unsigned int, int); + +typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int); + +typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); + +typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); + +typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); + +typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); + +typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); + +typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); + +struct inode_switch_wbs_context { + struct rcu_work work; + struct bdi_writeback *new_wb; + struct inode *inodes[0]; +}; + +struct ext4_system_zone { + struct rb_node node; + long: 32; + ext4_fsblk_t start_blk; + unsigned int count; + u32 ino; +}; + +struct trace_event_raw_jbd2_checkpoint { + struct trace_entry ent; + dev_t dev; + int result; + char __data[0]; +}; + +struct trace_event_raw_jbd2_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + char __data[0]; +}; + +struct trace_event_raw_jbd2_end_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + tid_t head; + char __data[0]; +}; + +struct trace_event_raw_jbd2_submit_inode_data { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_start_class { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_extend { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int buffer_credits; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int interval; + int sync; + int requested_blocks; + int dirtied_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_run_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int wait; + long unsigned int request_delay; + long unsigned int running; + long unsigned int locked; + long unsigned int flushing; + long unsigned int logging; + __u32 handle_count; + __u32 blocks; + __u32 blocks_logged; + char __data[0]; +}; + +struct trace_event_raw_jbd2_checkpoint_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int chp_time; + __u32 forced_to_close; + __u32 written; + __u32 dropped; + char __data[0]; +}; + +struct trace_event_raw_jbd2_update_log_tail { + struct trace_entry ent; + dev_t dev; + tid_t tail_sequence; + tid_t first_tid; + long unsigned int block_nr; + long unsigned int freed; + char __data[0]; +}; + +struct trace_event_raw_jbd2_write_superblock { + struct trace_entry ent; + dev_t dev; + blk_opf_t write_flags; + char __data[0]; +}; + +struct trace_event_raw_jbd2_lock_buffer_stall { + struct trace_entry ent; + dev_t dev; + long unsigned int stall_ms; + char __data[0]; +}; + +struct trace_event_raw_jbd2_journal_shrink { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int nr_shrunk; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_checkpoint_list { + struct trace_entry ent; + dev_t dev; + tid_t first_tid; + tid_t tid; + tid_t last_tid; + long unsigned int nr_freed; + tid_t next_tid; + char __data[0]; +}; + +struct trace_event_data_offsets_jbd2_checkpoint {}; + +struct trace_event_data_offsets_jbd2_commit {}; + +struct trace_event_data_offsets_jbd2_end_commit {}; + +struct trace_event_data_offsets_jbd2_submit_inode_data {}; + +struct trace_event_data_offsets_jbd2_handle_start_class {}; + +struct trace_event_data_offsets_jbd2_handle_extend {}; + +struct trace_event_data_offsets_jbd2_handle_stats {}; + +struct trace_event_data_offsets_jbd2_run_stats {}; + +struct trace_event_data_offsets_jbd2_checkpoint_stats {}; + +struct trace_event_data_offsets_jbd2_update_log_tail {}; + +struct trace_event_data_offsets_jbd2_write_superblock {}; + +struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; + +struct trace_event_data_offsets_jbd2_journal_shrink {}; + +struct trace_event_data_offsets_jbd2_shrink_scan_exit {}; + +struct trace_event_data_offsets_jbd2_shrink_checkpoint_list {}; + +typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); + +typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int); + +typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int, int, int); + +typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, tid_t, struct transaction_run_stats_s *); + +typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, tid_t, struct transaction_chp_stats_s *); + +typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, blk_opf_t); + +typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_count)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_enter)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_exit)(void *, journal_t *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_checkpoint_list)(void *, journal_t *, tid_t, tid_t, tid_t, long unsigned int, tid_t); + +struct jbd2_stats_proc_session { + journal_t *journal; + struct transaction_stats_s *stats; + int start; + int max; +}; + +struct btrfs_fid { + u64 objectid; + u64 root_objectid; + u32 gen; + u64 parent_objectid; + u32 parent_gen; + u64 parent_root_objectid; +}; + +struct keyctl_dh_params { + union { + __s32 private; + __s32 priv; + }; + __s32 prime; + __s32 base; +}; + +struct keyctl_kdf_params { + char *hashname; + char *otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP = 2, + IOPRIO_WHO_USER = 3, +}; + +struct io_poll_update { + struct file *file; + long: 32; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int nr_entries; + int error; + bool owning; + __poll_t result_mask; +}; + +enum { + IOU_POLL_DONE = 0, + IOU_POLL_NO_ACTION = 1, + IOU_POLL_REMOVE_POLL_USE_RES = 2, + IOU_POLL_REISSUE = 3, + IOU_POLL_REQUEUE = 4, +}; + +typedef ZSTD_CCtx zstd_cctx; + +typedef ZSTD_CDict zstd_cdict; + +struct simple_pm_bus { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +struct clk_divider { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u16 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +struct unimapdesc { + short unsigned int entry_ct; + struct unipair *entries; +}; + +struct vt_stat { + short unsigned int v_active; + short unsigned int v_signal; + short unsigned int v_state; +}; + +struct vt_sizes { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_scrollsize; +}; + +struct vt_consize { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_vlin; + short unsigned int v_clin; + short unsigned int v_vcol; + short unsigned int v_ccol; +}; + +struct vt_event { + unsigned int event; + unsigned int oldev; + unsigned int newev; + unsigned int pad[4]; +}; + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; + long: 32; +}; + +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + union { + __u32 data_len; + __u32 vec_cnt; + }; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; + __u64 result; +}; + +struct nvme_uring_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; +}; + +enum { + NVME_IOCTL_VEC = 1, + NVME_IOCTL_PARTITION = 2, +}; + +struct nvme_uring_data { + __u64 metadata; + __u64 addr; + __u32 data_len; + __u32 metadata_len; + __u32 timeout_ms; + long: 32; +}; + +struct nvme_uring_cmd_pdu { + struct request *req; + struct bio *bio; + u64 result; + int status; + long: 32; +}; + +enum phy_state_work { + PHY_STATE_WORK_NONE = 0, + PHY_STATE_WORK_ANEG = 1, + PHY_STATE_WORK_SUSPEND = 2, +}; + +struct netdev_lag_lower_state_info { + u8 link_up: 1; + u8 tx_enabled: 1; +}; + +struct net_failover_info { + struct net_device *primary_dev; + struct net_device *standby_dev; + struct rtnl_link_stats64 primary_stats; + struct rtnl_link_stats64 standby_stats; + struct rtnl_link_stats64 failover_stats; + spinlock_t stats_lock; + long: 32; +}; + +enum sharp_state { + STATE_INACTIVE___9 = 0, + STATE_BIT_PULSE___5 = 1, + STATE_BIT_SPACE___5 = 2, + STATE_TRAILER_PULSE___4 = 3, + STATE_ECHO_SPACE = 4, + STATE_TRAILER_SPACE___4 = 5, +}; + +struct of_timer_irq { + int irq; + int index; + const char *name; + long unsigned int flags; + irq_handler_t handler; +}; + +struct of_timer_base { + void *base; + const char *name; + int index; +}; + +struct of_timer_clk { + struct clk *clk; + const char *name; + int index; + long unsigned int rate; + long unsigned int period; +}; + +struct timer_of { + unsigned int flags; + struct device_node *np; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct clock_event_device clkevt; + struct of_timer_base of_base; + struct of_timer_irq of_irq; + struct of_timer_clk of_clk; + void *private_data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct of_endpoint { + unsigned int port; + unsigned int id; + const struct device_node *local_node; +}; + +struct supplier_bindings { + struct device_node * (*parse_prop)(struct device_node *, const char *, int); + struct device_node * (*get_con_dev)(struct device_node *); + bool optional; + u8 fwlink_flags; +}; + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +enum { + NDTPA_UNSPEC = 0, + NDTPA_IFINDEX = 1, + NDTPA_REFCNT = 2, + NDTPA_REACHABLE_TIME = 3, + NDTPA_BASE_REACHABLE_TIME = 4, + NDTPA_RETRANS_TIME = 5, + NDTPA_GC_STALETIME = 6, + NDTPA_DELAY_PROBE_TIME = 7, + NDTPA_QUEUE_LEN = 8, + NDTPA_APP_PROBES = 9, + NDTPA_UCAST_PROBES = 10, + NDTPA_MCAST_PROBES = 11, + NDTPA_ANYCAST_DELAY = 12, + NDTPA_PROXY_DELAY = 13, + NDTPA_PROXY_QLEN = 14, + NDTPA_LOCKTIME = 15, + NDTPA_QUEUE_LENBYTES = 16, + NDTPA_MCAST_REPROBES = 17, + NDTPA_PAD = 18, + NDTPA_INTERVAL_PROBE_TIME_MS = 19, + __NDTPA_MAX = 20, +}; + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC = 0, + NDTA_NAME = 1, + NDTA_THRESH1 = 2, + NDTA_THRESH2 = 3, + NDTA_THRESH3 = 4, + NDTA_CONFIG = 5, + NDTA_PARMS = 6, + NDTA_STATS = 7, + NDTA_GC_INTERVAL = 8, + NDTA_PAD = 9, + __NDTA_MAX = 10, +}; + +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + +struct neigh_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table neigh_vars[21]; +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[32]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +enum nf_ct_ecache_state { + NFCT_ECACHE_DESTROY_FAIL = 0, + NFCT_ECACHE_DESTROY_SENT = 1, +}; + +struct nf_ct_timeout { + __u16 l3num; + const struct nf_conntrack_l4proto *l4proto; + char data[0]; +}; + +struct nf_conn_timeout { + struct nf_ct_timeout *timeout; +}; + +struct conntrack_gc_work { + struct delayed_work dwork; + u32 next_bucket; + u32 avg_timeout; + u32 count; + u32 start_time; + bool exiting; + bool early_drop; +}; + +enum nft_nat_types { + NFT_NAT_SNAT = 0, + NFT_NAT_DNAT = 1, +}; + +enum nft_nat_attributes { + NFTA_NAT_UNSPEC = 0, + NFTA_NAT_TYPE = 1, + NFTA_NAT_FAMILY = 2, + NFTA_NAT_REG_ADDR_MIN = 3, + NFTA_NAT_REG_ADDR_MAX = 4, + NFTA_NAT_REG_PROTO_MIN = 5, + NFTA_NAT_REG_PROTO_MAX = 6, + NFTA_NAT_FLAGS = 7, + __NFTA_NAT_MAX = 8, +}; + +struct nft_nat { + u8 sreg_addr_min; + u8 sreg_addr_max; + u8 sreg_proto_min; + u8 sreg_proto_max; + enum nf_nat_manip_type type: 8; + u8 family; + u16 flags; +}; + +struct icmp6_filter { + __u32 data[8]; +}; + +struct raw6_sock { + struct inet_sock inet; + __u32 checksum; + __u32 offset; + struct icmp6_filter filter; + __u32 ip6mr_table; + struct ipv6_pinfo inet6; +}; + +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +struct ip_tunnel_prl { + __be32 addr; + __u16 flags; + __u16 __reserved; + __u32 datalen; + __u32 __reserved2; +}; + +struct sit_net { + struct ip_tunnel *tunnels_r_l[16]; + struct ip_tunnel *tunnels_r[16]; + struct ip_tunnel *tunnels_l[16]; + struct ip_tunnel *tunnels_wc[1]; + struct ip_tunnel **tunnels[4]; + struct net_device *fb_tunnel_dev; +}; + +typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); + +struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +}; + +struct cpio_data { + void *data; + size_t size; + char name[18]; +}; + +enum cpio_fields { + C_MAGIC = 0, + C_INO = 1, + C_MODE = 2, + C_UID = 3, + C_GID = 4, + C_NLINK = 5, + C_MTIME = 6, + C_FILESIZE = 7, + C_MAJ = 8, + C_MIN = 9, + C_RMAJ = 10, + C_RMIN = 11, + C_NAMESIZE = 12, + C_CHKSUM = 13, + C_NFIELDS = 14, +}; + +struct maple_metadata { + unsigned char end; + unsigned char gap; +}; + +struct maple_pnode; + +struct maple_range_64 { + struct maple_pnode *parent; + long unsigned int pivot[31]; + union { + void *slot[32]; + struct { + void *pad[31]; + struct maple_metadata meta; + }; + }; +}; + +struct maple_arange_64 { + struct maple_pnode *parent; + long unsigned int pivot[20]; + void *slot[21]; + long unsigned int gap[21]; + struct maple_metadata meta; +}; + +struct maple_topiary { + struct maple_pnode *parent; + struct maple_enode *next; +}; + +enum maple_type { + maple_dense = 0, + maple_leaf_64 = 1, + maple_range_64 = 2, + maple_arange_64 = 3, +}; + +struct maple_node { + union { + struct { + struct maple_pnode *parent; + void *slot[63]; + }; + struct { + void *pad; + struct callback_head rcu; + struct maple_enode *piv_parent; + unsigned char parent_slot; + enum maple_type type; + unsigned char slot_len; + unsigned int ma_flags; + }; + struct maple_range_64 mr64; + struct maple_arange_64 ma64; + struct maple_alloc alloc; + }; +}; + +struct ma_topiary { + struct maple_enode *head; + struct maple_enode *tail; + struct maple_tree *mtree; +}; + +struct ma_wr_state { + struct ma_state *mas; + struct maple_node *node; + long unsigned int r_min; + long unsigned int r_max; + enum maple_type type; + unsigned char offset_end; + long unsigned int *pivots; + long unsigned int end_piv; + void **slots; + void *entry; + void *content; +}; + +struct trace_event_raw_ma_op { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_read { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_write { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + long unsigned int piv; + void *val; + void *node; + char __data[0]; +}; + +struct trace_event_data_offsets_ma_op {}; + +struct trace_event_data_offsets_ma_read {}; + +struct trace_event_data_offsets_ma_write {}; + +typedef void (*btf_trace_ma_op)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_read)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_write)(void *, const char *, struct ma_state *, long unsigned int, void *); + +struct maple_big_node { + long unsigned int pivot[65]; + union { + struct maple_enode *slot[66]; + struct { + long unsigned int padding[43]; + long unsigned int gap[43]; + }; + }; + unsigned char b_end; + enum maple_type type; +}; + +struct maple_subtree_state { + struct ma_state *orig_l; + struct ma_state *orig_r; + struct ma_state *l; + struct ma_state *m; + struct ma_state *r; + struct ma_topiary *free; + struct ma_topiary *destroy; + struct maple_big_node *bn; +}; + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute pop +#endif + +#endif /* __VMLINUX_H__ */ diff --git a/selftest/struct-ops/dep/include/arch/powerpc/vmlinux-v6.13-rc1-g353b2a7de2a3.h b/selftest/struct-ops/dep/include/arch/powerpc/vmlinux-v6.13-rc1-g353b2a7de2a3.h new file mode 100644 index 00000000..8aabfe68 --- /dev/null +++ b/selftest/struct-ops/dep/include/arch/powerpc/vmlinux-v6.13-rc1-g353b2a7de2a3.h @@ -0,0 +1,102312 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) +#endif + +typedef unsigned char __u8; + +typedef short unsigned int __u16; + +typedef int __s32; + +typedef unsigned int __u32; + +typedef long long int __s64; + +typedef long long unsigned int __u64; + +typedef __u8 u8; + +typedef __u16 u16; + +typedef __s32 s32; + +typedef __u32 u32; + +typedef __s64 s64; + +typedef __u64 u64; + +typedef long int __kernel_long_t; + +typedef int __kernel_pid_t; + +typedef unsigned int __kernel_uid32_t; + +typedef unsigned int __kernel_size_t; + +typedef long long int __kernel_time64_t; + +typedef __kernel_long_t __kernel_clock_t; + +typedef int __kernel_timer_t; + +typedef int __kernel_clockid_t; + +typedef __kernel_pid_t pid_t; + +typedef __kernel_clockid_t clockid_t; + +typedef _Bool bool; + +typedef __kernel_size_t size_t; + +typedef s64 ktime_t; + +typedef struct { + int counter; +} atomic_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct user_pt_regs { + long unsigned int gpr[32]; + long unsigned int nip; + long unsigned int msr; + long unsigned int orig_gpr3; + long unsigned int ctr; + long unsigned int link; + long unsigned int xer; + long unsigned int ccr; + long unsigned int mq; + long unsigned int trap; + long unsigned int dar; + long unsigned int dsisr; + long unsigned int result; +}; + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + long unsigned int gpr[32]; + long unsigned int nip; + long unsigned int msr; + long unsigned int orig_gpr3; + long unsigned int ctr; + long unsigned int link; + long unsigned int xer; + long unsigned int ccr; + long unsigned int mq; + long unsigned int trap; + union { + long unsigned int dar; + long unsigned int dear; + }; + union { + long unsigned int dsisr; + long unsigned int esr; + }; + long unsigned int result; + }; + }; + union { + struct { + union { + long unsigned int kuap; + }; + }; + long unsigned int __pad[4]; + }; +}; + +enum { + CPU_FTRS_POSSIBLE = 662561096, +}; + +struct thread_info { + int preempt_count; + unsigned int cpu; + long unsigned int local_flags; + unsigned char slb_preload_nr; + unsigned char slb_preload_tail; + u32 slb_preload_esid[16]; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; +}; + +struct load_weight { + long unsigned int weight; + u32 inv_weight; +}; + +struct rb_node { + long unsigned int __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + long unsigned int load_avg; + long unsigned int runnable_avg; + long unsigned int util_avg; + unsigned int util_est; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cfs_rq; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + long: 32; + u64 deadline; + u64 min_vruntime; + u64 min_slice; + struct list_head group_node; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + unsigned char custom_slice; + long: 32; + u64 exec_start; + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; + u64 nr_migrations; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + long unsigned int runnable_weight; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + long unsigned int timeout; + long unsigned int watchdog_stamp; + unsigned int time_slice; + short unsigned int on_rq; + short unsigned int on_list; + struct sched_rt_entity *back; +}; + +struct timerqueue_node { + struct rb_node node; + long: 32; + ktime_t expires; +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; + long: 32; +}; + +struct sched_dl_entity; + +typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); + +struct task_struct; + +typedef struct task_struct * (*dl_server_pick_f)(struct sched_dl_entity *); + +struct rq; + +struct sched_dl_entity { + struct rb_node rb_node; + long: 32; + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + s64 runtime; + u64 deadline; + unsigned int flags; + unsigned int dl_throttled: 1; + unsigned int dl_yielded: 1; + unsigned int dl_non_contending: 1; + unsigned int dl_overrun: 1; + unsigned int dl_server: 1; + unsigned int dl_defer: 1; + unsigned int dl_defer_armed: 1; + unsigned int dl_defer_running: 1; + struct hrtimer dl_timer; + struct hrtimer inactive_timer; + struct rq *rq; + dl_server_has_tasks_f server_has_tasks; + dl_server_pick_f server_pick_task; + struct sched_dl_entity *pi_se; +}; + +struct scx_dsq_list_node { + struct list_head node; + u32 flags; + u32 priv; +}; + +typedef atomic_t atomic_long_t; + +struct scx_dispatch_q; + +struct cgroup; + +struct sched_ext_entity { + struct scx_dispatch_q *dsq; + struct scx_dsq_list_node dsq_list; + struct rb_node dsq_priq; + u32 dsq_seq; + u32 dsq_flags; + u32 flags; + u32 weight; + s32 sticky_cpu; + s32 holding_cpu; + u32 kf_mask; + struct task_struct *kf_tasks[2]; + atomic_long_t ops_state; + struct list_head runnable_node; + long unsigned int runnable_at; + long: 32; + u64 ddsp_dsq_id; + u64 ddsp_enq_flags; + u64 slice; + u64 dsq_vtime; + bool disallow; + struct cgroup *cgrp_moving_from; + struct list_head tasks_node; +}; + +struct sched_statistics {}; + +struct cpumask { + long unsigned int bits[128]; +}; + +typedef struct cpumask cpumask_t; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + long unsigned int pcount; + long: 32; + long long unsigned int run_delay; + long long unsigned int last_arrival; + long long unsigned int last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +struct __kernel_timespec; + +struct old_timespec32; + +struct pollfd; + +struct restart_block { + long unsigned int arch_data; + long int (*fn)(struct restart_block *); + union { + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + long: 32; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + long: 32; + u64 expires; + } nanosleep; + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + long unsigned int tv_sec; + long unsigned int tv_nsec; + } poll; + }; +}; + +typedef struct { + volatile unsigned int slock; +} arch_spinlock_t; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; + long: 32; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + long unsigned int sig[2]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct syscall_user_dispatch {}; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + long unsigned int bits[1]; +} nodemask_t; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +struct optimistic_spin_queue { + atomic_t tail; +}; + +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; + struct optimistic_spin_queue osq; + struct list_head wait_list; +}; + +struct tlbflush_unmap_batch {}; + +struct page; + +struct page_frag { + struct page *page; + __u16 offset; + __u16 size; +}; + +struct kmap_ctrl {}; + +struct timer_list { + struct hlist_node entry; + long unsigned int expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct llist_head { + struct llist_node *first; +}; + +struct debug_reg {}; + +struct thread_fp_state { + u64 fpr[32]; + u64 fpscr; + long: 32; + long: 32; +}; + +struct arch_hw_breakpoint { + long unsigned int address; + u16 type; + u16 len; + u16 hw_len; + u8 flags; + bool perf_single_step; +}; + +struct perf_event; + +struct thread_struct { + long unsigned int ksp; + struct pt_regs *regs; + void *pgdir; + long unsigned int rtas_sp; + long unsigned int kuap; + long unsigned int srr0; + long unsigned int srr1; + long unsigned int dar; + long unsigned int dsisr; + long unsigned int r0; + long unsigned int r3; + long unsigned int r4; + long unsigned int r5; + long unsigned int r6; + long unsigned int r8; + long unsigned int r9; + long unsigned int r11; + long unsigned int lr; + long unsigned int ctr; + long unsigned int sr0; + struct debug_reg debug; + struct thread_fp_state fp_state; + struct thread_fp_state *fp_save_area; + int fpexc_mode; + unsigned int align_ctl; + struct perf_event *ptrace_bps[2]; + struct arch_hw_breakpoint hw_brk[2]; + long unsigned int trap_nr; + u8 load_slb; + u8 load_fp; + long: 32; + long: 32; + long: 32; +}; + +struct sched_class; + +struct task_group; + +struct rcu_node; + +struct mm_struct; + +struct address_space; + +struct pid; + +struct completion; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct rseq; + +struct pipe_inode_info; + +struct task_delay_info; + +struct mem_cgroup; + +struct obj_cgroup; + +struct gendisk; + +struct uprobe_task; + +struct vm_struct; + +struct bpf_local_storage; + +struct bpf_run_ctx; + +struct bpf_net_context; + +struct task_struct { + struct thread_info thread_info; + unsigned int __state; + unsigned int saved_state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + int on_cpu; + struct __call_single_node wake_entry; + unsigned int wakee_flips; + long unsigned int wakee_flip_decay_ts; + struct task_struct *last_wakee; + int recent_used_cpu; + int wake_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_entity se; + struct sched_rt_entity rt; + long: 32; + struct sched_dl_entity dl; + struct sched_dl_entity *dl_server; + long: 32; + struct sched_ext_entity scx; + const struct sched_class *sched_class; + struct task_group *sched_task_group; + long: 32; + long: 32; + struct sched_statistics stats; + unsigned int btrace_seq; + unsigned int policy; + long unsigned int max_allowed_capacity; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + short unsigned int migration_disabled; + short unsigned int migration_flags; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; + long unsigned int rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_exit_cpu; + struct list_head rcu_tasks_exit_list; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + long unsigned int jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork: 1; + unsigned int sched_contributes_to_load: 1; + unsigned int sched_migrated: 1; + long: 29; + unsigned int sched_remote_wakeup: 1; + unsigned int sched_rt_mutex: 1; + unsigned int in_execve: 1; + unsigned int in_iowait: 1; + unsigned int restore_sigmask: 1; + unsigned int no_cgroup_migration: 1; + unsigned int frozen: 1; + unsigned int use_memdelay: 1; + unsigned int in_memstall: 1; + unsigned int in_eventfd: 1; + unsigned int in_thrashing: 1; + long unsigned int atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + long unsigned int stack_canary; + struct task_struct *real_parent; + struct task_struct *parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_node; + struct completion *vfork_done; + int *set_child_tid; + int *clear_child_tid; + void *worker_private; + long: 32; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + u64 start_time; + u64 start_boottime; + long unsigned int min_flt; + long unsigned int maj_flt; + struct posix_cputimers posix_cputimers; + const struct cred *ptracer_cred; + const struct cred *real_cred; + const struct cred *cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + long unsigned int last_switch_count; + long unsigned int last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + long unsigned int sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + long: 32; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct io_context *io_context; + struct capture_control *capture_control; + long unsigned int ptrace_message; + kernel_siginfo_t *last_siginfo; + long: 32; + struct task_io_accounting ioac; + unsigned int psi_flags; + long: 32; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + struct css_set *cgroups; + struct list_head cg_list; + struct robust_list_head *robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + u8 perf_recursion[4]; + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct rseq *rseq; + u32 rseq_len; + u32 rseq_sig; + long unsigned int rseq_event_mask; + int mm_cid; + int last_mm_cid; + int migrate_from_cpu; + int mm_cid_active; + struct callback_head cid_work; + struct tlbflush_unmap_batch tlb_ubc; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + long unsigned int dirty_paused_when; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + long unsigned int trace_recursion; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct obj_cgroup *objcg; + struct gendisk *throttle_disk; + struct uprobe_task *utask; + struct kmap_ctrl kmap_ctrl; + struct callback_head rcu; + refcount_t rcu_users; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; + struct vm_struct *stack_vm_area; + refcount_t stack_refcount; + struct bpf_local_storage *bpf_storage; + struct bpf_run_ctx *bpf_ctx; + struct bpf_net_context *bpf_net_context; + struct llist_head kretprobe_instances; + struct llist_head rethooks; + long: 32; + struct thread_struct thread; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long int tv_nsec; +}; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +typedef struct { + long unsigned int pgd; +} pgd_t; + +typedef struct { + s64 counter; +} atomic64_t; + +union sigval { + int sival_int; + void *sival_ptr; +}; + +typedef union sigval sigval_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void *_addr; + union { + int _trapno; + short int _addr_lsb; + struct { + char _dummy_bnd[4]; + void *_lower; + void *_upper; + } _addr_bnd; + struct { + char _dummy_pkey[4]; + __u32 _pkey; + } _addr_pkey; + struct { + long unsigned int _data; + __u32 _type; + __u32 _flags; + } _perf; + }; + } _sigfault; + struct { + long int _band; + int _fd; + } _sigpoll; + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; +}; + +struct page_pool; + +struct dev_pagemap; + +struct page { + long unsigned int flags; + union { + struct { + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + struct list_head buddy_list; + struct list_head pcp_list; + }; + struct address_space *mapping; + union { + long unsigned int index; + long unsigned int share; + }; + long unsigned int private; + }; + struct { + long unsigned int pp_magic; + struct page_pool *pp; + long unsigned int _pp_mapping_pad; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; + }; + struct { + long unsigned int compound_head; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + unsigned int page_type; + atomic_t _mapcount; + }; + atomic_t _refcount; + long unsigned int memcg_data; +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + __u64 rseq_cs; + __u32 flags; + __u32 node_id; + __u32 mm_cid; + char end[0]; + long: 32; +}; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +struct rhash_head { + struct rhash_head *next; +}; + +struct scx_dispatch_q { + raw_spinlock_t lock; + struct list_head list; + struct rb_root priq; + u32 nr; + u32 seq; + u64 id; + struct rhash_head hash_node; + struct llist_node free_node; + struct callback_head rcu; +}; + +struct rq_flags; + +struct affinity_context; + +struct sched_class { + void (*enqueue_task)(struct rq *, struct task_struct *, int); + bool (*dequeue_task)(struct rq *, struct task_struct *, int); + void (*yield_task)(struct rq *); + bool (*yield_to_task)(struct rq *, struct task_struct *); + void (*wakeup_preempt)(struct rq *, struct task_struct *, int); + int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); + struct task_struct * (*pick_task)(struct rq *); + struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *); + void (*put_prev_task)(struct rq *, struct task_struct *, struct task_struct *); + void (*set_next_task)(struct rq *, struct task_struct *, bool); + int (*select_task_rq)(struct task_struct *, int, int); + void (*migrate_task_rq)(struct task_struct *, int); + void (*task_woken)(struct rq *, struct task_struct *); + void (*set_cpus_allowed)(struct task_struct *, struct affinity_context *); + void (*rq_online)(struct rq *); + void (*rq_offline)(struct rq *); + struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); + void (*task_tick)(struct rq *, struct task_struct *, int); + void (*task_fork)(struct task_struct *); + void (*task_dead)(struct task_struct *); + void (*switching_to)(struct rq *, struct task_struct *); + void (*switched_from)(struct rq *, struct task_struct *); + void (*switched_to)(struct rq *, struct task_struct *); + void (*reweight_task)(struct rq *, struct task_struct *, const struct load_weight *); + void (*prio_changed)(struct rq *, struct task_struct *, int); + unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); + void (*update_curr)(struct rq *); + void (*task_change_group)(struct task_struct *); +}; + +typedef struct {} lockdep_map_p; + +struct maple_tree { + union { + spinlock_t ma_lock; + lockdep_map_p ma_external_lock; + }; + unsigned int ma_flags; + void *ma_root; +}; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + struct optimistic_spin_queue osq; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +struct percpu_counter { + raw_spinlock_t lock; + long: 32; + s64 count; + s32 *counters; + long: 32; +}; + +typedef struct { + long unsigned int id; + long unsigned int sr0; + void *vdso; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct mm_cid; + +struct linux_binfmt; + +struct kioctx_table; + +struct user_namespace; + +struct file; + +struct mm_struct { + struct { + struct { + atomic_t mm_count; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct maple_tree mm_mt; + long unsigned int mmap_base; + long unsigned int mmap_legacy_base; + long unsigned int task_size; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + struct mm_cid *pcpu_cid; + long unsigned int mm_cid_next_scan; + unsigned int nr_cpus_allowed; + atomic_t max_nr_cid; + raw_spinlock_t cpus_allowed_lock; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + long unsigned int hiwater_rss; + long unsigned int hiwater_vm; + long unsigned int total_vm; + long unsigned int locked_vm; + long: 32; + atomic64_t pinned_vm; + long unsigned int data_vm; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int def_flags; + seqcount_t write_protect_seq; + spinlock_t arg_lock; + long unsigned int start_code; + long unsigned int end_code; + long unsigned int start_data; + long unsigned int end_data; + long unsigned int start_brk; + long unsigned int brk; + long unsigned int start_stack; + long unsigned int arg_start; + long unsigned int arg_end; + long unsigned int env_start; + long unsigned int env_end; + long unsigned int saved_auxv[76]; + long: 32; + struct percpu_counter rss_stat[4]; + struct linux_binfmt *binfmt; + mm_context_t context; + long unsigned int flags; + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; + struct task_struct *owner; + struct user_namespace *user_ns; + struct file *exe_file; + atomic_t tlb_flush_pending; + struct uprobes_state uprobes_state; + struct work_struct async_put_work; + long: 32; + long: 32; + }; + long unsigned int cpu_bitmap[0]; +}; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +typedef __kernel_uid32_t uid_t; + +typedef struct { + uid_t val; +} kuid_t; + +typedef unsigned int __kernel_gid32_t; + +typedef __kernel_gid32_t gid_t; + +typedef struct { + gid_t val; +} kgid_t; + +typedef struct { + u64 val; +} kernel_cap_t; + +struct user_struct; + +struct ucounts; + +struct group_info; + +struct cred { + atomic_long_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + struct user_struct *user; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; + long: 32; +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +struct arch_uprobe_task { + long unsigned int saved_trap_nr; +}; + +struct return_instance; + +struct uprobe; + +struct arch_uprobe; + +struct uprobe_task { + enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { + struct { + struct arch_uprobe_task autask; + long unsigned int vaddr; + }; + struct { + struct callback_head dup_xol_work; + long unsigned int dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + struct timer_list ri_timer; + long unsigned int xol_vaddr; + struct arch_uprobe *auprobe; +}; + +struct arch_uprobe { + union { + u32 insn[2]; + u32 ixol[2]; + }; +}; + +enum hprobe_state { + HPROBE_LEASED = 0, + HPROBE_STABLE = 1, + HPROBE_GONE = 2, + HPROBE_CONSUMED = 3, +}; + +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + +struct return_consumer { + __u64 cookie; + __u64 id; +}; + +struct return_instance { + struct hprobe hprobe; + long unsigned int func; + long unsigned int stack; + long unsigned int orig_ret_vaddr; + bool chained; + int consumers_cnt; + struct return_instance *next; + struct callback_head rcu; + long: 32; + struct return_consumer consumers[0]; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct mm_cid { + u64 time; + int cid; + int recent_cid; +}; + +enum { + false = 0, + true = 1, +}; + +enum { + CPU_FTRS_ALWAYS = 16777216, +}; + +struct jump_entry { + s32 code; + s32 target; + long int key; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + long unsigned int type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_key_true { + struct static_key key; +}; + +typedef u32 ppc_inst_t; + +enum instruction_type { + COMPUTE = 0, + LOAD = 1, + LOAD_MULTI = 2, + LOAD_FP = 3, + LOAD_VMX = 4, + LOAD_VSX = 5, + STORE = 6, + STORE_MULTI = 7, + STORE_FP = 8, + STORE_VMX = 9, + STORE_VSX = 10, + LARX = 11, + STCX = 12, + BRANCH = 13, + MFSPR = 14, + MTSPR = 15, + CACHEOP = 16, + BARRIER = 17, + SYSCALL = 18, + SYSCALL_VECTORED_0 = 19, + MFMSR = 20, + MTMSR = 21, + RFI = 22, + INTERRUPT = 23, + UNKNOWN = 24, +}; + +struct instruction_op { + int type; + int reg; + long unsigned int val; + long unsigned int ea; + int update_reg; + int spr; + u32 ccval; + u32 xerval; + u8 element_size; + u8 vsx_flags; +}; + +typedef signed char __s8; + +typedef __s8 s8; + +typedef long unsigned int __kernel_ulong_t; + +typedef int __kernel_ssize_t; + +typedef long long int __kernel_loff_t; + +typedef unsigned int __poll_t; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +typedef short unsigned int umode_t; + +typedef __kernel_loff_t loff_t; + +typedef __kernel_ssize_t ssize_t; + +typedef s32 int32_t; + +typedef u32 uint32_t; + +typedef u64 sector_t; + +typedef u64 blkcnt_t; + +typedef unsigned int gfp_t; + +typedef unsigned int fmode_t; + +struct hlist_head { + struct hlist_node *first; +}; + +struct lock_class_key {}; + +struct fs_context; + +struct fs_parameter_spec; + +struct dentry; + +struct super_block; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; + struct lock_class_key i_mutex_dir_key; +}; + +typedef struct { + volatile int lock; +} arch_rwlock_t; + +struct lockdep_map {}; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + unsigned int flags; + long unsigned int begin; +}; + +struct static_key_false { + struct static_key key; +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno: 18; + unsigned int class_id: 6; + unsigned int flags: 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; + long: 32; +}; + +enum class_map_type { + DD_CLASS_TYPE_DISJOINT_BITS = 0, + DD_CLASS_TYPE_LEVEL_NUM = 1, + DD_CLASS_TYPE_DISJOINT_NAMES = 2, + DD_CLASS_TYPE_LEVEL_NAMES = 3, +}; + +struct ddebug_class_map { + struct list_head link; + struct module *mod; + const char *mod_name; + const char **class_names; + const int length; + const int base; + enum class_map_type map_type; +}; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kernfs_node; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + const struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized: 1; + unsigned int state_in_sysfs: 1; + unsigned int state_add_uevent_sent: 1; + unsigned int state_remove_uevent_sent: 1; + unsigned int uevent_suppress: 1; +}; + +struct module_param_attrs; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_memory { + void *base; + void *rw_copy; + bool is_rox; + unsigned int size; + struct mod_tree_node mtn; +}; + +struct mod_arch_specific { + unsigned int core_plt_section; + unsigned int init_plt_section; +}; + +struct elf32_sym; + +typedef struct elf32_sym Elf32_Sym; + +struct mod_kallsyms { + Elf32_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +struct _ddebug_info { + struct _ddebug *descs; + struct ddebug_class_map *classes; + unsigned int num_descs; + unsigned int num_classes; +}; + +struct module_attribute; + +struct kernel_symbol; + +struct kernel_param; + +struct exception_table_entry; + +struct bug_entry; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct tracepoint; + +typedef struct tracepoint * const tracepoint_ptr_t; + +struct srcu_struct; + +struct bpf_raw_event_map; + +struct trace_event_call; + +struct trace_eval_map; + +struct module { + enum module_state state; + struct list_head list; + char name[60]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + bool async_probe_requested; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + long: 32; + long: 32; + long: 32; + long: 32; + struct module_memory mem[7]; + struct mod_arch_specific arch; + long unsigned int taints; + unsigned int num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void *percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + unsigned int btf_data_size; + unsigned int btf_base_data_size; + void *btf_data; + void *btf_base_data; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + void *kprobes_text_start; + unsigned int kprobes_text_size; + long unsigned int *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + struct _ddebug_info dyndbg_info; +}; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +typedef unsigned int fop_flags_t; + +typedef void *fl_owner_t; + +struct kiocb; + +struct iov_iter; + +struct io_comp_batch; + +struct dir_context; + +struct poll_table_struct; + +struct vm_area_struct; + +struct inode; + +struct file_lock; + +struct file_lease; + +struct seq_file; + +struct io_uring_cmd; + +struct file_operations { + struct module *owner; + fop_flags_t fop_flags; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap)(struct file *, struct vm_area_struct *); + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct file *); + int (*setlease)(struct file *, int, struct file_lease **, void **); + long int (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); + int (*uring_cmd)(struct io_uring_cmd *, unsigned int); + int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); +}; + +struct bug_entry { + int bug_addr_disp; + int file_disp; + short unsigned int line; + short unsigned int flags; +}; + +struct static_call_key { + void *func; +}; + +typedef long unsigned int pte_basic_t; + +typedef pte_basic_t pte_t; + +typedef struct { + long unsigned int pgprot; +} pgprot_t; + +struct exception_table_entry { + int insn; + int fixup; +}; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void *xa_head; +}; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct proc_ns_operations; + +struct ns_common { + struct dentry *stashed; + const struct proc_ns_operations *ops; + unsigned int inum; + refcount_t count; +}; + +struct kmem_cache; + +struct fs_pin; + +struct pid_namespace { + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; + int memfd_noexec_scope; +}; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t *__sighandler_t; + +typedef void __restorefn_t(); + +typedef __restorefn_t *__sigrestore_t; + +struct sigaction { + __sighandler_t sa_handler; + long unsigned int sa_flags; + __sigrestore_t sa_restorer; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +typedef u32 errseq_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + struct rw_semaphore invalidate_lock; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + long unsigned int nrpages; + long unsigned int writeback_index; + const struct address_space_operations *a_ops; + long unsigned int flags; + errseq_t wb_err; + spinlock_t i_private_lock; + struct list_head i_private_list; + struct rw_semaphore i_mmap_rwsem; + void *i_private_data; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct dentry *stashed; + u64 ino; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[0]; +}; + +typedef int32_t key_serial_t; + +typedef __s64 time64_t; + +typedef uint32_t key_perm_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + long unsigned int hash; + union { + struct { + char desc[2]; + u16 desc_len; + }; + long unsigned int x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + long unsigned int nr_leaves_on_tree; +}; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct rw_semaphore sem; + struct key_user *user; + void *security; + long: 32; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + short unsigned int quotalen; + short unsigned int datalen; + short int state; + long unsigned int flags; + union { + struct keyring_index_key index_key; + struct { + long unsigned int hash; + long unsigned int len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + refcount_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct pacct_struct { + int ac_flag; + long int ac_exitcode; + long unsigned int ac_mem; + long: 32; + u64 ac_utime; + u64 ac_stime; + long unsigned int ac_minflt; + long unsigned int ac_majflt; +}; + +struct core_state; + +struct tty_struct; + +struct taskstats; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + int quick_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exec_task; + int group_stop_count; + unsigned int flags; + struct core_state *core_state; + unsigned int is_child_subreaper: 1; + unsigned int has_child_subreaper: 1; + unsigned int next_posix_timer_id; + struct hlist_head posix_timers; + struct hlist_head ignored_posix_timers; + long: 32; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + seqlock_t stats_lock; + long: 32; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + long unsigned int cnvcsw; + long unsigned int cnivcsw; + long unsigned int min_flt; + long unsigned int maj_flt; + long unsigned int cmin_flt; + long unsigned int cmaj_flt; + long unsigned int inblock; + long unsigned int oublock; + long unsigned int cinblock; + long unsigned int coublock; + long unsigned int maxrss; + long unsigned int cmaxrss; + struct task_io_accounting ioac; + long long unsigned int sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + bool oom_flag_origin; + short int oom_score_adj; + short int oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; + long: 32; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + short unsigned int ioprio; + spinlock_t lock; + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; + long: 32; +}; + +struct workqueue_struct; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + long unsigned int gp_seq[4]; + long int len; + long int seglen[4]; + u8 flags; +}; + +struct srcu_node; + +struct srcu_data { + atomic_long_t srcu_lock_count[2]; + atomic_long_t srcu_unlock_count[2]; + int srcu_reader_flavor; + long: 32; + long: 32; + long: 32; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + long unsigned int grpmask; + int cpu; + struct srcu_struct *ssp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct srcu_node { + spinlock_t lock; + long unsigned int srcu_have_cbs[4]; + long unsigned int srcu_data_have_cbs[4]; + long unsigned int srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_usage; + +struct srcu_struct { + unsigned int srcu_idx; + struct srcu_data *sda; + struct lockdep_map dep_map; + struct srcu_usage *srcu_sup; +}; + +struct srcu_usage { + struct srcu_node *node; + struct srcu_node *level[3]; + int srcu_size_state; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + long unsigned int srcu_gp_seq; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + long unsigned int srcu_gp_start; + long unsigned int srcu_last_gp_end; + long unsigned int srcu_size_jiffies; + long unsigned int srcu_n_lock_retries; + long unsigned int srcu_n_exp_nodelay; + bool sda_is_static; + long unsigned int srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + long unsigned int reschedule_jiffies; + long unsigned int reschedule_count; + struct delayed_work work; + struct srcu_struct *srcu_ssp; +}; + +typedef pte_t *pgtable_t; + +struct vmem_altmap { + long unsigned int base_pfn; + const long unsigned int end_pfn; + const long unsigned int reserve; + long unsigned int free; + long unsigned int align; + long unsigned int alloc; + bool inaccessible; +}; + +struct percpu_ref_data; + +struct percpu_ref { + long unsigned int percpu_count_ptr; + struct percpu_ref_data *data; +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT = 2, + MEMORY_DEVICE_FS_DAX = 3, + MEMORY_DEVICE_GENERIC = 4, + MEMORY_DEVICE_PCI_P2PDMA = 5, +}; + +struct range { + u64 start; + u64 end; +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref ref; + struct completion done; + enum memory_type type; + unsigned int flags; + long unsigned int vmemmap_shift; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + long: 32; + union { + struct range range; + struct { + struct {} __empty_ranges; + struct range ranges[0]; + }; + }; +}; + +typedef struct { + long unsigned int val; +} swp_entry_t; + +struct folio { + union { + struct { + long unsigned int flags; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; + struct address_space *mapping; + long unsigned int index; + union { + void *private; + swp_entry_t swap; + }; + atomic_t _mapcount; + atomic_t _refcount; + long unsigned int memcg_data; + }; + struct page page; + }; + union { + struct { + long unsigned int _flags_1; + long unsigned int _head_1; + atomic_t _large_mapcount; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + }; + struct page __page_1; + }; + union { + struct { + long unsigned int _flags_2; + long unsigned int _head_2; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; + }; + struct { + long unsigned int _flags_2a; + long unsigned int _head_2a; + struct list_head _deferred_list; + }; + struct page __page_2; + }; +}; + +typedef long unsigned int vm_flags_t; + +typedef struct { + atomic_t refcnt; +} file_ref_t; + +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +struct file_ra_state { + long unsigned int start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + long: 32; + loff_t prev_pos; +}; + +typedef struct { + long unsigned int v; +} freeptr_t; + +struct fown_struct; + +struct file { + file_ref_t f_ref; + spinlock_t f_lock; + fmode_t f_mode; + const struct file_operations *f_op; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + struct path f_path; + union { + struct mutex f_pos_lock; + u64 f_pipe; + }; + loff_t f_pos; + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; + struct hlist_head *f_ep; + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + freeptr_t f_freeptr; + }; +}; + +struct vm_userfaultfd_ctx {}; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + union { + struct { + long unsigned int vm_start; + long unsigned int vm_end; + }; + }; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + union { + const vm_flags_t vm_flags; + vm_flags_t __vm_flags; + }; + struct { + struct rb_node rb; + long unsigned int rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + long unsigned int vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +typedef unsigned int vm_fault_t; + +struct vm_fault; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*may_split)(struct vm_area_struct *, long unsigned int); + int (*mremap)(struct vm_area_struct *); + int (*mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int, long unsigned int); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); + vm_fault_t (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); + long unsigned int (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); + const char * (*name)(struct vm_area_struct *); + struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); +}; + +enum fault_flag { + FAULT_FLAG_WRITE = 1, + FAULT_FLAG_MKWRITE = 2, + FAULT_FLAG_ALLOW_RETRY = 4, + FAULT_FLAG_RETRY_NOWAIT = 8, + FAULT_FLAG_KILLABLE = 16, + FAULT_FLAG_TRIED = 32, + FAULT_FLAG_USER = 64, + FAULT_FLAG_REMOTE = 128, + FAULT_FLAG_INSTRUCTION = 256, + FAULT_FLAG_INTERRUPTIBLE = 512, + FAULT_FLAG_UNSHARE = 1024, + FAULT_FLAG_ORIG_PTE_VALID = 2048, + FAULT_FLAG_VMA_LOCK = 4096, +}; + +typedef struct { + pgd_t pgd; +} p4d_t; + +typedef struct { + p4d_t p4d; +} pud_t; + +typedef struct { + pud_t pud; +} pmd_t; + +struct vm_fault { + const struct { + struct vm_area_struct *vma; + gfp_t gfp_mask; + long unsigned int pgoff; + long unsigned int address; + long unsigned int real_address; + }; + enum fault_flag flags; + pmd_t *pmd; + pud_t *pud; + union { + pte_t orig_pte; + pmd_t orig_pmd; + }; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint_ext { + int (*regfunc)(); + void (*unregfunc)(); + unsigned int faultable: 1; +}; + +struct tracepoint { + const char *name; + struct static_key_false key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + void *probestub; + struct tracepoint_func *funcs; + struct tracepoint_ext *ext; +}; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic: 1; + bool allow_reinit: 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + long unsigned int nr_to_scan; + long unsigned int nr_scanned; + struct mem_cgroup *memcg; +}; + +struct shrinker { + long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); + long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); + long int batch; + int seeks; + unsigned int flags; + refcount_t refcount; + struct completion done; + struct callback_head rcu; + void *private_data; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +typedef bool (*stack_trace_consume_fn)(void *, long unsigned int); + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); + int (*memory_failure)(struct dev_pagemap *, long unsigned int, long unsigned int, int); +}; + +struct hlist_bl_node; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct lockref { + union { + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct qstr { + union { + struct { + u32 len; + u32 hash; + }; + u64 hash_len; + }; + const unsigned char *name; + long: 32; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + long: 32; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[36]; + const struct dentry_operations *d_op; + struct super_block *d_sb; + long unsigned int d_time; + void *d_fsdata; + struct lockref d_lockref; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct hlist_node d_sib; + struct hlist_head d_children; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; + long: 32; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +} __attribute__((mode(byte))); + +struct posix_acl; + +struct inode_operations; + +struct bdi_writeback; + +struct file_lock_context; + +struct cdev; + +struct fsnotify_mark_connector; + +struct inode { + umode_t i_mode; + short unsigned int i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + long unsigned int i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + time64_t i_atime_sec; + time64_t i_mtime_sec; + time64_t i_ctime_sec; + u32 i_atime_nsec; + u32 i_mtime_nsec; + u32 i_ctime_nsec; + u32 i_generation; + spinlock_t i_lock; + short unsigned int i_bytes; + u8 i_blkbits; + enum rw_hint i_write_hint; + blkcnt_t i_blocks; + seqcount_t i_size_seqcount; + u32 i_state; + struct rw_semaphore i_rwsem; + long unsigned int dirtied_when; + long unsigned int dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + long: 32; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + void *i_private; + long: 32; +}; + +enum d_real_type { + D_REAL_DATA = 0, + D_REAL_METADATA = 1, +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char * (*d_dname)(struct dentry *, char *, int); + struct vfsmount * (*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry * (*d_real)(struct dentry *, enum d_real_type); + long: 32; + long: 32; + long: 32; +}; + +struct mtd_info; + +typedef long long int qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + long unsigned int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + long: 32; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; + long: 32; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; + long: 32; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct *task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + short unsigned int frozen; + int freeze_kcount; + int freeze_ucount; + struct percpu_rw_semaphore rw_sem[3]; +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; + struct xarray xa; +}; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct block_device; + +struct backing_dev_info; + +struct fsnotify_sb_info; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + long unsigned int s_blocksize; + long: 32; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + long unsigned int s_flags; + long unsigned int s_iflags; + long unsigned int s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + const struct xattr_handler * const *s_xattr; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct file *s_bdev_file; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + u32 s_fsnotify_mask; + struct fsnotify_sb_info *s_fsnotify_info; + char s_id[32]; + uuid_t s_uuid; + u8 s_uuid_len; + char s_sysfs_name[37]; + unsigned int s_max_links; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + struct shrinker *s_shrink; + atomic_long_t s_remove_count; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + long: 32; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long: 32; + long: 32; +}; + +struct mnt_idmap; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; + struct mnt_idmap *mnt_idmap; +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; + u32 dio_mem_align; + u32 dio_offset_align; + u64 change_cookie; + u64 subvol; + u32 atomic_write_unit_min; + u32 atomic_write_unit_max; + u32 atomic_write_segments_max; + long: 32; +}; + +struct list_lru_one { + struct list_head list; + long int nr_items; + spinlock_t lock; +}; + +struct list_lru_node { + struct list_lru_one lru; + atomic_long_t nr_items; + long: 32; + long: 32; + long: 32; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct key_preparsed_payload; + +struct key_match_data; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long int (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction * (*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +struct user_struct { + refcount_t __count; + long: 32; + struct percpu_counter epoll_watches; + long unsigned int unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + struct ratelimit_state ratelimit; + long: 32; +}; + +struct group_info { + refcount_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active: 1; + unsigned int in_hrtirq: 1; + unsigned int hang_detected: 1; + unsigned int softirq_activated: 1; + unsigned int online: 1; + unsigned int nr_events; + short unsigned int nr_retries; + short unsigned int nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + long: 32; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + long: 32; + struct hrtimer_clock_base clock_base[8]; +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +struct request_queue; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + long: 32; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long int); + void *private; + int ki_flags; + u16 ki_ioprio; + union { + struct wait_page_queue *ki_waitq; + ssize_t (*dio_complete)(void *); + }; + long: 32; +}; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; + long: 32; +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + long unsigned int dq_flags; + long: 32; + struct mem_dqblk dq_dqb; +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot * (*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t * (*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_dqblk { + int d_fieldmask; + long: 32; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; + long: 32; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + long: 32; + long long unsigned int ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + long: 32; + struct qc_type_state s_state[3]; +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct writeback_control; + +struct readahead_control; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*read_folio)(struct file *, struct folio *); + int (*writepages)(struct address_space *, struct writeback_control *); + bool (*dirty_folio)(struct address_space *, struct folio *); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio)(struct folio *, size_t, size_t); + bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, enum migrate_mode); + int (*launder_folio)(struct folio *); + bool (*is_partially_uptodate)(struct folio *, size_t, size_t); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); + int (*error_remove_folio)(struct address_space *, struct folio *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); + int (*swap_rw)(struct kiocb *, struct iov_iter *); +}; + +struct fiemap_extent_info; + +struct fileattr; + +struct offset_ctx; + +struct inode_operations { + struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); + const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct mnt_idmap *, struct inode *, int); + struct posix_acl * (*get_inode_acl)(struct inode *, int, bool); + int (*readlink)(struct dentry *, char *, int); + int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); + int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); + int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); + int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); + int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); + struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int); + int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); + int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); + int (*fileattr_get)(struct dentry *, struct fileattr *); + struct offset_ctx * (*get_offset_ctx)(struct inode *); + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct fown_struct { + struct file *file; + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +enum freeze_holder { + FREEZE_HOLDER_KERNEL = 1, + FREEZE_HOLDER_USERSPACE = 2, + FREEZE_MAY_NEST = 4, +}; + +struct kstatfs; + +struct super_operations { + struct inode * (*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *, enum freeze_holder); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *, enum freeze_holder); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long int (*free_cached_objects)(struct super_block *, struct shrink_control *); + void (*shutdown)(struct super_block *); +}; + +struct fid; + +struct iomap; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry * (*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); + long unsigned int flags; +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); +}; + +typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + long: 32; + loff_t pos; +}; + +struct offset_ctx { + struct maple_tree mt; + long unsigned int next_offset; +}; + +struct p_log; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + short unsigned int flags; + const void *data; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + long unsigned int subdirs; + struct rb_root children; + struct kernfs_root *root; + long unsigned int rev; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; + long: 32; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + short unsigned int flags; + umode_t mode; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + u64 id; + void *priv; + struct kernfs_iattrs *iattr; + struct callback_head rcu; +}; + +struct kernfs_open_file; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); + loff_t (*llseek)(struct kernfs_open_file *, loff_t, int); +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped: 1; + bool released: 1; + const struct vm_operations_struct *vm_ops; +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void * (*grab_current_ns)(); + const void * (*netlink_ns)(struct sock *); + const void * (*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + struct address_space * (*f_mapping)(); + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); + int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *, struct vm_area_struct *); +}; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); + size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); + struct attribute **attrs; + union { + struct bin_attribute **bin_attrs; + const struct bin_attribute * const *bin_attrs_new; + }; +}; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations * (*child_ns_type)(const struct kobject *); + const void * (*namespace)(const struct kobject *); + void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(const struct kobject *); + const char * (* const name)(const struct kobject *); + int (* const uevent)(const struct kobject *, struct kobj_uevent_env *); +}; + +typedef __u32 Elf32_Addr; + +typedef __u16 Elf32_Half; + +typedef __u32 Elf32_Word; + +struct elf32_sym { + Elf32_Word st_name; + Elf32_Addr st_value; + Elf32_Word st_size; + unsigned char st_info; + unsigned char st_other; + Elf32_Half st_shndx; +}; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct kernel_symbol { + long unsigned int value; + const char *name; + const char *namespace; +}; + +struct nsset; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common * (*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace * (*owner)(struct ns_common *); + struct ns_common * (*get_parent)(struct ns_common *); +}; + +typedef short int __s16; + +typedef u32 dma_addr_t; + +struct pollfd { + int fd; + short int events; + short int revents; +}; + +struct cacheline_padding { + char x[0]; +}; + +struct device; + +struct perf_cpu_pmu_context; + +struct perf_event_pmu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + struct device *parent; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + unsigned int scope; + int *pmu_disable_count; + struct perf_cpu_pmu_context *cpu_pmu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_pmu_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); + void * (*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + bool (*filter)(struct pmu *, int); + int (*check_period)(struct perf_event *, u64); +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +typedef struct { + atomic64_t a; +} local64_t; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled: 1; + __u64 inherit: 1; + __u64 pinned: 1; + __u64 exclusive: 1; + __u64 exclude_user: 1; + __u64 exclude_kernel: 1; + __u64 exclude_hv: 1; + __u64 exclude_idle: 1; + __u64 mmap: 1; + __u64 comm: 1; + __u64 freq: 1; + __u64 inherit_stat: 1; + __u64 enable_on_exec: 1; + __u64 task: 1; + __u64 watermark: 1; + __u64 precise_ip: 2; + __u64 mmap_data: 1; + __u64 sample_id_all: 1; + __u64 exclude_host: 1; + __u64 exclude_guest: 1; + __u64 exclude_callchain_kernel: 1; + __u64 exclude_callchain_user: 1; + __u64 mmap2: 1; + __u64 comm_exec: 1; + __u64 use_clockid: 1; + __u64 context_switch: 1; + __u64 write_backward: 1; + __u64 namespaces: 1; + __u64 ksymbol: 1; + __u64 bpf_event: 1; + __u64 aux_output: 1; + __u64 cgroup: 1; + __u64 text_poke: 1; + __u64 build_id: 1; + __u64 inherit_thread: 1; + __u64 remove_on_exec: 1; + __u64 sigtrap: 1; + __u64 __reserved_1: 26; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + union { + __u32 aux_action; + struct { + __u32 aux_start_paused: 1; + __u32 aux_pause: 1; + __u32 aux_resume: 1; + __u32 __reserved_3: 29; + }; + }; + __u64 sig_data; + __u64 config3; +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; + long: 32; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head *next; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + long unsigned int config_base; + long unsigned int event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + u64 aux_config; + unsigned int aux_paused; + long: 32; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + struct arch_hw_breakpoint info; + struct rhlist_head bp_list; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + long: 32; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + long unsigned int addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct irq_work { + struct __call_single_node node; + void (*func)(struct irq_work *); + struct rcuwait irqwait; +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); + +struct perf_buffer; + +struct fasync_struct; + +struct perf_addr_filter_range; + +struct bpf_prog; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + long: 32; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + unsigned int group_generation; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + long: 32; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + struct perf_event_pmu_context *pmu_ctx; + atomic_long_t refcount; + long: 32; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + long unsigned int rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + unsigned int pending_wakeup; + unsigned int pending_kill; + unsigned int pending_disable; + long unsigned int pending_addr; + struct irq_work pending_irq; + struct irq_work pending_disable_irq; + struct callback_head pending_task; + unsigned int pending_work; + struct rcuwait pending_work_wait; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + long unsigned int addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + atomic64_t lost_samples; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + struct bpf_prog *prog; + u64 bpf_cookie; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct perf_cgroup *cgrp; + struct list_head sb_list; + __u32 orig_type; +}; + +typedef struct cpumask cpumask_var_t[1]; + +struct task_cputime { + u64 stime; + u64 utime; + long long unsigned int sum_exec_runtime; +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + atomic_t count; + atomic_long_t ucount[10]; + atomic_long_t rlimit[4]; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; + int nr_descendants; +}; + +struct cgroup_file { + struct kernfs_node *kn; + long unsigned int notified_at; + struct timer_list notify_timer; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; + u64 ntime; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + struct bpf_prog_array *effective[28]; + struct hlist_head progs[28]; + u8 flags[28]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + bool e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct psi_group; + +struct cgroup { + struct cgroup_subsys_state self; + long unsigned int flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + struct cgroup_file psi_files[3]; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state *subsys[13]; + int nr_dying_subsys[13]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[13]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + struct cacheline_padding _pad_; + struct cgroup *rstat_flush_next; + long: 32; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group *psi; + struct cgroup_bpf bpf; + struct cgroup_freezer_state freezer; + struct bpf_local_storage *bpf_cgrp_storage; + struct cgroup *ancestors[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +struct css_set { + struct cgroup_subsys_state *subsys[13]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[13]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +struct perf_event_groups { + struct rb_root tree; + long: 32; + u64 index; +}; + +typedef struct { + atomic_long_t a; +} local_t; + +struct perf_event_context { + raw_spinlock_t lock; + struct mutex mutex; + struct list_head pmu_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + int nr_events; + int nr_user; + int is_active; + int nr_task_data; + int nr_stat; + int nr_freq; + int rotate_disable; + refcount_t refcount; + struct task_struct *task; + long: 32; + u64 time; + u64 timestamp; + u64 timeoffset; + struct perf_event_context *parent_ctx; + long: 32; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + struct callback_head callback_head; + local_t nr_no_switch_fast; + long: 32; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct page_counter { + atomic_long_t usage; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + long unsigned int emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + long unsigned int elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + long unsigned int watermark; + long unsigned int local_watermark; + long unsigned int failcnt; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + bool protection_support; + long unsigned int min; + long unsigned int low; + long unsigned int high; + long unsigned int max; + struct page_counter *parent; + long: 32; + long: 32; +}; + +struct vmpressure { + long unsigned int scanned; + long unsigned int reclaimed; + long unsigned int tree_scanned; + long unsigned int tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + long: 32; + struct fprop_global completions; + struct timer_list period_timer; + long unsigned int period_time; + long unsigned int dirty_limit_tstamp; + long unsigned int dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + long: 32; + u64 at; + struct wb_completion done; +}; + +struct memcg_vmstats; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + long: 32; + long: 32; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct list_head memory_peaks; + struct list_head swap_peaks; + spinlock_t peaks_lock; + struct work_struct high_work; + long unsigned int zswap_max; + bool zswap_writeback; + struct vmpressure vmpressure; + bool oom_group; + int swappiness; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct memcg_vmstats *vmstats; + atomic_long_t memory_events[9]; + atomic_long_t memory_events_local[9]; + long unsigned int socket_pressure; + int kmemcg_id; + struct obj_cgroup *objcg; + struct obj_cgroup *orig_objcg; + struct list_head objcg_list; + struct memcg_vmstats_percpu *vmstats_percpu; + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct mem_cgroup_per_node *nodeinfo[0]; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct bpf_run_ctx {}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + union { + struct { + struct uid_gid_extent extent[5]; + u32 nr_extents; + }; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +struct ctl_table; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + const struct ctl_table *ctl_table; + int ctl_table_size; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + const struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; + enum { + SYSCTL_TABLE_TYPE_DEFAULT = 0, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, + } type; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct binfmt_misc; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + long unsigned int flags; + bool parent_could_setfcap; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + long int ucount_max[10]; + long int rlimit_max[4]; + struct binfmt_misc *binfmt_misc; +}; + +struct shrinker_info_unit { + atomic_long_t nr_deferred[32]; + long unsigned int map[1]; +}; + +struct shrinker_info { + struct callback_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[0]; +}; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + long unsigned int min_coredump; +}; + +struct zswap_lruvec_state { + atomic_long_t nr_disk_swapins; +}; + +struct free_area { + struct list_head free_list[6]; + long unsigned int nr_free; +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + spinlock_t lru_lock; + long unsigned int anon_cost; + long unsigned int file_cost; + atomic_long_t nonresident_age; + long unsigned int refaults[2]; + long unsigned int flags; + struct pglist_data *pgdat; + struct zswap_lruvec_state zswap_lruvec_state; +}; + +struct per_cpu_pages; + +struct per_cpu_zonestat; + +struct zone { + long unsigned int _watermark[4]; + long unsigned int watermark_boost; + long unsigned int nr_reserved_highatomic; + long unsigned int nr_free_highatomic; + long int lowmem_reserve[3]; + struct pglist_data *zone_pgdat; + struct per_cpu_pages *per_cpu_pageset; + struct per_cpu_zonestat *per_cpu_zonestats; + int pageset_high_min; + int pageset_high_max; + int pageset_batch; + long unsigned int *pageblock_flags; + long unsigned int zone_start_pfn; + atomic_long_t managed_pages; + long unsigned int spanned_pages; + long unsigned int present_pages; + long unsigned int cma_pages; + const char *name; + long unsigned int nr_isolate_pageblock; + int initialized; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct free_area free_area[11]; + long unsigned int flags; + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + long unsigned int percpu_drift_mark; + long unsigned int compact_cached_free_pfn; + long unsigned int compact_cached_migrate_pfn[2]; + long unsigned int compact_init_migrate_pfn; + long unsigned int compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad3_; + atomic_long_t vm_stat[10]; + atomic_long_t vm_numa_event[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[4]; +}; + +enum zone_type { + ZONE_DMA = 0, + ZONE_NORMAL = 1, + ZONE_MOVABLE = 2, + __MAX_NR_ZONES = 3, +}; + +struct per_cpu_nodestat; + +struct pglist_data { + struct zone node_zones[3]; + struct zonelist node_zonelists[1]; + int nr_zones; + struct page *node_mem_map; + long unsigned int node_start_pfn; + long unsigned int node_present_pages; + long unsigned int node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + wait_queue_head_t reclaim_wait[4]; + atomic_t nr_writeback_throttled; + long unsigned int nr_reclaim_start; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + bool proactive_compact_trigger; + long unsigned int totalreserve_pages; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct lruvec __lruvec; + long unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[45]; + long: 32; + long: 32; +}; + +struct per_cpu_pages { + spinlock_t lock; + int count; + int high; + int high_min; + int high_max; + int batch; + u8 flags; + u8 alloc_factor; + short int free_count; + struct list_head lists[12]; + long: 32; +}; + +struct per_cpu_zonestat { + s8 vm_stat_diff[10]; + s8 stat_threshold; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[45]; +}; + +typedef int proc_handler(const struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set * (*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, kuid_t *, kgid_t *); + int (*permissions)(struct ctl_table_header *, const struct ctl_table *); +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + long: 32; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + long: 32; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + long: 32; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; + __u64 compact_count; + __u64 compact_delay_total; + __u32 ac_tgid; + long: 32; + __u64 ac_tgetime; + __u64 ac_exe_dev; + __u64 ac_exe_inode; + __u64 wpcopy_count; + __u64 wpcopy_delay_total; + __u64 irq_count; + __u64 irq_delay_total; +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +struct folio_batch { + unsigned char nr; + unsigned char i; + bool percpu_pvec_drained; + struct folio *folios[31]; +}; + +struct swap_iocb; + +struct writeback_control { + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate: 1; + unsigned int for_background: 1; + unsigned int tagged_writepages: 1; + unsigned int for_reclaim: 1; + unsigned int range_cyclic: 1; + unsigned int for_sync: 1; + unsigned int unpinned_netfs_wb: 1; + unsigned int no_cgroup_owner: 1; + struct swap_iocb **swap_plug; + struct list_head *list; + struct folio_batch fbatch; + long unsigned int index; + int saved_err; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct iovec { + void *iov_base; + __kernel_size_t iov_len; +}; + +struct kvec; + +struct bio_vec; + +struct folio_queue; + +struct iov_iter { + u8 iter_type; + bool nofault; + bool data_source; + size_t iov_offset; + union { + struct iovec __ubuf_iovec; + struct { + union { + const struct iovec *__iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + const struct folio_queue *folioq; + struct xarray *xarray; + void *ubuf; + }; + size_t count; + }; + }; + union { + long unsigned int nr_segs; + u8 folioq_slot; + loff_t xarray_start; + }; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +struct bdi_writeback { + struct backing_dev_info *bdi; + long unsigned int state; + long unsigned int last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + atomic_t writeback_inodes; + long: 32; + struct percpu_counter stat[4]; + long unsigned int bw_time_stamp; + long unsigned int dirtied_stamp; + long unsigned int written_stamp; + long unsigned int write_bandwidth; + long unsigned int avg_write_bandwidth; + long unsigned int dirty_ratelimit; + long unsigned int balanced_dirty_ratelimit; + long: 32; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + struct delayed_work bw_dwork; + struct list_head bdi_node; + struct percpu_ref refcnt; + long: 32; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + struct list_head b_attached; + struct list_head offline_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head defer_sync; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +enum rpm_status { + RPM_INVALID = -1, + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +struct wake_irq; + +struct pm_subsys_data; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + bool can_wakeup: 1; + bool async_suspend: 1; + bool in_dpm_list: 1; + bool is_prepared: 1; + bool is_suspended: 1; + bool is_noirq_suspended: 1; + bool is_late_suspended: 1; + bool no_pm: 1; + bool early_init: 1; + bool direct_complete: 1; + u32 driver_flags; + spinlock_t lock; + bool should_wakeup: 1; + long: 32; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth: 3; + bool idle_notification: 1; + bool request_pending: 1; + bool deferred_resume: 1; + bool needs_force_resume: 1; + bool runtime_auto: 1; + bool ignore_children: 1; + bool no_callbacks: 1; + bool irq_safe: 1; + bool use_autosuspend: 1; + bool timer_autosuspends: 1; + bool memalloc_noio: 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + enum rpm_status last_status; + int runtime_error; + int autosuspend_delay; + long: 32; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; + long: 32; +}; + +struct irq_domain; + +struct msi_device_data; + +struct dev_msi_info { + struct irq_domain *domain; + struct msi_device_data *data; +}; + +struct dev_archdata { + dma_addr_t dma_offset; +}; + +struct dev_iommu; + +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, + DEVICE_REMOVABLE_UNKNOWN = 1, + DEVICE_FIXED = 2, + DEVICE_REMOVABLE = 3, +}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct dma_coherent_mem; + +struct device_node; + +struct fwnode_handle; + +struct class; + +struct iommu_group; + +struct device_physical_location; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + const struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + long: 32; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct dev_msi_info msi; + u64 *dma_mask; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct dma_coherent_mem *dma_mem; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + const struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + struct device_physical_location *physical_location; + enum device_removable removable; + bool offline_disabled: 1; + bool offline: 1; + bool of_node_reused: 1; + bool state_synced: 1; + bool can_match: 1; + long: 32; +}; + +struct disk_stats; + +struct blk_holder_ops; + +struct partition_meta_info; + +struct block_device { + sector_t bd_start_sect; + sector_t bd_nr_sectors; + struct gendisk *bd_disk; + struct request_queue *bd_queue; + struct disk_stats *bd_stats; + long unsigned int bd_stamp; + atomic_t __bd_flags; + dev_t bd_dev; + struct address_space *bd_mapping; + atomic_t bd_openers; + spinlock_t bd_size_lock; + void *bd_claiming; + void *bd_holder; + const struct blk_holder_ops *bd_holder_ops; + struct mutex bd_holder_lock; + int bd_holders; + struct kobject *bd_holder_dir; + atomic_t bd_fsfreeze_count; + struct mutex bd_fsfreeze_mutex; + struct partition_meta_info *bd_meta_info; + int bd_writers; + long: 32; + struct device bd_device; +}; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + long unsigned int ra_pages; + long unsigned int io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + long unsigned int last_bdp_sleep; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; + long: 32; +}; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + long: 32; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; + long: 32; +}; + +typedef __u32 blk_opf_t; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +}; + +typedef unsigned int blk_qc_t; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct blkcg_gq; + +struct bio_set; + +struct bio { + struct bio *bi_next; + struct block_device *bi_bdev; + blk_opf_t bi_opf; + short unsigned int bi_flags; + short unsigned int bi_ioprio; + enum rw_hint bi_write_hint; + blk_status_t bi_status; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + union { + blk_qc_t bi_cookie; + unsigned int __bi_nr_segments; + }; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + long: 32; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + short unsigned int bi_vcnt; + short unsigned int bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct folio_queue { + struct folio_batch vec; + u8 orders[31]; + struct folio_queue *next; + struct folio_queue *prev; + long unsigned int marks; + long unsigned int marks2; + long unsigned int marks3; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; + struct list_head clock_list; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); + int (*set_performance_state)(struct device *, unsigned int); +}; + +struct bus_type { + const char *name; + const char *dev_name; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, const struct device_driver *); + int (*uevent)(const struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + void (*dma_cleanup)(struct device *); + const struct dev_pm_ops *pm; + bool need_parent_lock; +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +struct of_device_id; + +struct acpi_device_id; + +struct driver_private; + +struct device_driver { + const char *name; + const struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +struct class { + const char *name; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *); + void (*class_release)(const struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void * (*namespace)(const struct device *); + void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; +}; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + union { + void *module; + atomic_t refcnt; + }; + void *data; + int flags; + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +struct trace_eval_map { + const char *system; + const char *eval_string; + long unsigned int eval_value; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +typedef long unsigned int kernel_ulong_t; + +struct acpi_device_id { + __u8 id[16]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + unsigned int min_align_mask; + long unsigned int segment_boundary_mask; +}; + +enum device_physical_location_panel { + DEVICE_PANEL_TOP = 0, + DEVICE_PANEL_BOTTOM = 1, + DEVICE_PANEL_LEFT = 2, + DEVICE_PANEL_RIGHT = 3, + DEVICE_PANEL_FRONT = 4, + DEVICE_PANEL_BACK = 5, + DEVICE_PANEL_UNKNOWN = 6, +}; + +enum device_physical_location_vertical_position { + DEVICE_VERT_POS_UPPER = 0, + DEVICE_VERT_POS_CENTER = 1, + DEVICE_VERT_POS_LOWER = 2, +}; + +enum device_physical_location_horizontal_position { + DEVICE_HORI_POS_LEFT = 0, + DEVICE_HORI_POS_CENTER = 1, + DEVICE_HORI_POS_RIGHT = 2, +}; + +struct device_physical_location { + enum device_physical_location_panel panel; + enum device_physical_location_vertical_position vertical_position; + enum device_physical_location_horizontal_position horizontal_position; + bool dock; + bool lid; +}; + +typedef u32 phys_addr_t; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; +}; + +typedef u32 phandle; + +struct fwnode_operations; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct property; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + struct kobject kobj; + long unsigned int _flags; + void *data; +}; + +struct iommu_group {}; + +struct seq_operations { + void * (*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void * (*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; +}; + +struct trace_seq { + char buffer[8172]; + struct seq_buf seq; + size_t readpos; + int full; +}; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle * (*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); + bool (*device_dma_supported)(const struct fwnode_handle *); + enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); + const char * (*get_name)(const struct fwnode_handle *); + const char * (*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); + struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + void * (*iomap)(struct fwnode_handle *, int); + int (*irq_get)(const struct fwnode_handle *, unsigned int); + int (*add_links)(struct fwnode_handle *); +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_rsvd: 18; + __u64 mem_hops: 3; + __u64 mem_blk: 3; + __u64 mem_snoopx: 2; + __u64 mem_remote: 1; + __u64 mem_lvl_num: 4; + __u64 mem_dtlb: 7; + __u64 mem_lock: 2; + __u64 mem_snoop: 5; + __u64 mem_lvl: 14; + __u64 mem_op: 5; + }; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred: 1; + __u64 predicted: 1; + __u64 in_tx: 1; + __u64 abort: 1; + __u64 cycles: 16; + __u64 type: 4; + __u64 spec: 2; + __u64 new_type: 4; + __u64 priv: 3; + __u64 reserved: 31; +}; + +union perf_sample_weight { + __u64 full; + struct { + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; + }; +}; + +struct cgroup_namespace { + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; + long: 32; +}; + +struct binfmt_misc { + struct list_head entries; + rwlock_t entries_lock; + bool enabled; +}; + +struct u64_stats_sync { + seqcount_t seq; +}; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + long: 32; + union { + struct bpf_cgroup_storage *cgroup_storage[2]; + u64 bpf_cookie; + }; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct psi_group_cpu { + seqcount_t seq; + unsigned int tasks[4]; + u32 state_mask; + u32 times[7]; + long: 32; + u64 state_start; + u32 times_prev[14]; + long: 32; + long: 32; +}; + +struct psi_group { + struct psi_group *parent; + bool enabled; + struct mutex avgs_lock; + struct psi_group_cpu *pcpu; + u64 avg_total[6]; + u64 avg_last_update; + u64 avg_next_update; + struct delayed_work avgs_work; + struct list_head avg_triggers; + u32 avg_nr_triggers[6]; + long: 32; + u64 total[12]; + long unsigned int avg[18]; + struct task_struct *rtpoll_task; + struct timer_list rtpoll_timer; + wait_queue_head_t rtpoll_wait; + atomic_t rtpoll_wakeup; + atomic_t rtpoll_scheduled; + struct mutex rtpoll_trigger_lock; + struct list_head rtpoll_triggers; + u32 rtpoll_nr_triggers[6]; + u32 rtpoll_states; + long: 32; + u64 rtpoll_min_period; + u64 rtpoll_total[6]; + u64 rtpoll_next_update; + u64 rtpoll_until; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init: 1; + bool implicit_on_dfl: 1; + bool threaded: 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + long: 32; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat subtree_bstat; + struct cgroup_base_stat last_subtree_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct list_head root_list; + struct callback_head rcu; + long: 32; + struct cgroup cgrp; + struct cgroup *cgrp_ancestor_storage; + atomic_t nr_cgrps; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cftype { + char name[64]; + long unsigned int private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + struct lock_class_key lockdep_key; +}; + +struct bpf_insn { + __u8 code; + __u8 dst_reg: 4; + __u8 src_reg: 4; + __s16 off; + __s32 imm; +}; + +enum bpf_cgroup_iter_order { + BPF_CGROUP_ITER_ORDER_UNSPEC = 0, + BPF_CGROUP_ITER_SELF_ONLY = 1, + BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, + BPF_CGROUP_ITER_DESCENDANTS_POST = 3, + BPF_CGROUP_ITER_ANCESTORS_UP = 4, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, + BPF_MAP_TYPE_USER_RINGBUF = 31, + BPF_MAP_TYPE_CGRP_STORAGE = 32, + BPF_MAP_TYPE_ARENA = 33, + __MAX_BPF_MAP_TYPE = 34, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, + BPF_PROG_TYPE_NETFILTER = 32, + __MAX_BPF_PROG_TYPE = 33, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + BPF_LSM_CGROUP = 43, + BPF_STRUCT_OPS = 44, + BPF_NETFILTER = 45, + BPF_TCX_INGRESS = 46, + BPF_TCX_EGRESS = 47, + BPF_TRACE_UPROBE_MULTI = 48, + BPF_CGROUP_UNIX_CONNECT = 49, + BPF_CGROUP_UNIX_SENDMSG = 50, + BPF_CGROUP_UNIX_RECVMSG = 51, + BPF_CGROUP_UNIX_GETPEERNAME = 52, + BPF_CGROUP_UNIX_GETSOCKNAME = 53, + BPF_NETKIT_PRIMARY = 54, + BPF_NETKIT_PEER = 55, + BPF_TRACE_KPROBE_SESSION = 56, + BPF_TRACE_UPROBE_SESSION = 57, + __MAX_BPF_ATTACH_TYPE = 58, +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + __u64 map_extra; + __s32 value_type_btf_obj_fd; + __s32 map_token_fd; + }; + struct { + __u32 map_fd; + long: 32; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; + __u64 fd_array; + __u64 core_relos; + __u32 core_relo_rec_size; + __u32 log_true_size; + __s32 prog_token_fd; + long: 32; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + __s32 path_fd; + long: 32; + }; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; + long: 32; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + union { + __u32 prog_cnt; + __u32 count; + }; + long: 32; + __u64 prog_attach_flags; + __u64 link_ids; + __u64 link_attach_flags; + __u64 revision; + } query; + struct { + __u64 name; + __u32 prog_fd; + long: 32; + __u64 cookie; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + __u32 btf_log_true_size; + __u32 btf_flags; + __s32 btf_token_fd; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + union { + __u32 prog_fd; + __u32 map_fd; + }; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + long: 32; + }; + struct { + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __u64 syms; + __u64 addrs; + __u64 cookies; + } kprobe_multi; + struct { + __u32 target_btf_id; + long: 32; + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + long: 32; + __u64 expected_revision; + } tcx; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + long: 32; + } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + long: 32; + __u64 expected_revision; + } netkit; + }; + } link_create; + struct { + __u32 link_fd; + union { + __u32 new_prog_fd; + __u32 new_map_fd; + }; + __u32 flags; + union { + __u32 old_prog_fd; + __u32 old_map_fd; + }; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; + struct { + __u32 flags; + __u32 bpffs_fd; + } token_create; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +struct bpf_prog_stats; + +struct bpf_prog_aux; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited: 1; + u16 jit_requested: 1; + u16 gpl_compatible: 1; + u16 cb_access: 1; + u16 dst_needed: 1; + u16 blinding_requested: 1; + u16 blinded: 1; + u16 is_func: 1; + u16 kprobe_override: 1; + u16 has_callchain_buf: 1; + u16 enforce_expected_attach_type: 1; + u16 call_get_stack: 1; + u16 call_get_func_ip: 1; + u16 tstamp_type_access: 1; + u16 sleepable: 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_stats *stats; + int *active; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + union { + struct { + struct {} __empty_insns; + struct sock_filter insns[0]; + }; + struct { + struct {} __empty_insnsi; + struct bpf_insn insnsi[0]; + }; + }; +}; + +enum btf_field_type { + BPF_SPIN_LOCK = 1, + BPF_TIMER = 2, + BPF_KPTR_UNREF = 4, + BPF_KPTR_REF = 8, + BPF_KPTR_PERCPU = 16, + BPF_KPTR = 28, + BPF_LIST_HEAD = 32, + BPF_LIST_NODE = 64, + BPF_RB_ROOT = 128, + BPF_RB_NODE = 256, + BPF_GRAPH_NODE = 320, + BPF_GRAPH_ROOT = 160, + BPF_REFCOUNT = 512, + BPF_WORKQUEUE = 1024, + BPF_UPTR = 2048, +}; + +typedef void (*btf_dtor_kfunc_t)(void *); + +struct btf; + +struct btf_field_kptr { + struct btf *btf; + struct module *module; + btf_dtor_kfunc_t dtor; + u32 btf_id; +}; + +struct btf_record; + +struct btf_field_graph_root { + struct btf *btf; + u32 value_btf_id; + u32 node_offset; + struct btf_record *value_rec; +}; + +struct btf_field { + u32 offset; + u32 size; + enum btf_field_type type; + union { + struct btf_field_kptr kptr; + struct btf_field_graph_root graph_root; + }; +}; + +struct btf_record { + u32 cnt; + u32 field_mask; + int spin_lock_off; + int timer_off; + int wq_off; + int refcount_off; + struct btf_field fields[0]; +}; + +struct blk_holder_ops { + void (*mark_dead)(struct block_device *, bool); + void (*sync)(struct block_device *); + int (*freeze)(struct block_device *); + int (*thaw)(struct block_device *); +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_alloc_cache; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + struct bio_alloc_cache *cache; + mempool_t bio_pool; + mempool_t bvec_pool; + unsigned int back_pad; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; + struct hlist_node cpuhp_dead; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + atomic_t generation; +}; + +struct lruvec_stats_percpu; + +struct lruvec_stats; + +struct mem_cgroup_per_node { + struct mem_cgroup *memcg; + struct lruvec_stats_percpu *lruvec_stats_percpu; + struct lruvec_stats *lruvec_stats; + struct shrinker_info *shrinker_info; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad1_; + struct lruvec lruvec; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct cacheline_padding _pad2_; + long unsigned int lru_zone_size[15]; + struct mem_cgroup_reclaim_iter iter; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +enum bpf_iter_task_type { + BPF_TASK_ITER_ALL = 0, + BPF_TASK_ITER_TID = 1, + BPF_TASK_ITER_TGID = 2, +}; + +struct bpf_map; + +struct bpf_iter_aux_info { + struct bpf_map *map; + struct { + struct cgroup *start; + enum bpf_cgroup_iter_order order; + } cgroup; + struct { + enum bpf_iter_task_type type; + u32 pid; + } task; +}; + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +struct bpf_local_storage_map; + +struct bpf_verifier_env; + +struct bpf_func_state; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map * (*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, union bpf_attr *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + void * (*map_lookup_elem)(struct bpf_map *, void *); + long int (*map_update_elem)(struct bpf_map *, void *, void *, u64); + long int (*map_delete_elem)(struct bpf_map *, void *); + long int (*map_push_elem)(struct bpf_map *, void *, u64); + long int (*map_pop_elem)(struct bpf_map *, void *); + long int (*map_peek_elem)(struct bpf_map *, void *); + void * (*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + long unsigned int (*map_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); + long int (*map_redirect)(struct bpf_map *, u64, u64); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); + long int (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); + u64 (*map_mem_usage)(const struct bpf_map *); + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u64 map_extra; + u32 map_flags; + u32 id; + struct btf_record *record; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + u32 btf_vmlinux_value_type_id; + struct btf *btf; + struct obj_cgroup *objcg; + char name[16]; + struct mutex freeze_mutex; + atomic64_t refcnt; + atomic64_t usercnt; + union { + struct work_struct work; + struct callback_head rcu; + }; + atomic64_t writecnt; + struct { + const struct btf_type *attach_func_proto; + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; + bool bypass_spec_v1; + bool frozen; + bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + long: 32; + atomic64_t sleepable_refcnt; + s64 *elem_count; + long: 32; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf_kfunc_set_tab; + +struct btf_id_dtor_kfunc_tab; + +struct btf_struct_metas; + +struct btf_struct_ops_tab; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; + struct btf_kfunc_set_tab *kfunc_set_tab; + struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; + struct btf_struct_metas *struct_meta_tab; + struct btf_struct_ops_tab *struct_ops_tab; + struct btf *base_btf; + u32 start_id; + u32 start_str_off; + char name[60]; + bool kernel_btf; + __u32 *base_id_map; +}; + +struct bpf_arena; + +struct bpf_ksym { + long unsigned int start; + long unsigned int end; + char name[512]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct bpf_jit_poke_descriptor; + +struct bpf_kfunc_desc_tab; + +struct bpf_kfunc_btf_tab; + +struct bpf_prog_ops; + +struct btf_mod_pair; + +struct bpf_token; + +struct bpf_prog_offload; + +struct bpf_func_info_aux; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 real_func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + struct btf *attach_btf; + const struct bpf_ctx_arg_aux *ctx_arg_info; + void *priv_stack_ptr; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool dev_bound; + bool offload_requested; + bool attach_btf_trace; + bool attach_tracing_prog; + bool func_proto_unreliable; + bool tail_call_reachable; + bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; + bool is_extended; + bool jits_use_priv_stack; + bool priv_stack_requested; + u64 prog_array_member_cnt; + struct mutex ext_mutex; + struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *); + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; + struct bpf_kfunc_btf_tab *kfunc_btf_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct btf_mod_pair *used_btfs; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + u32 verified_insns; + int cgroup_atype; + struct bpf_map *cgroup_storage[2]; + char name[16]; + u64 (*bpf_exception_cb)(u64, u64, u64, u64, u64); + struct bpf_token *token; + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + struct module *mod; + u32 num_exentries; + struct exception_table_entry *extable; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_KEY = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCK_COMMON = 12, + PTR_TO_TCP_SOCK = 13, + PTR_TO_TP_BUFFER = 14, + PTR_TO_XDP_SOCK = 15, + PTR_TO_BTF_ID = 16, + PTR_TO_MEM = 17, + PTR_TO_ARENA = 18, + PTR_TO_BUF = 19, + PTR_TO_FUNC = 20, + CONST_PTR_TO_DYNPTR = 21, + __BPF_REG_TYPE_MAX = 22, + PTR_TO_MAP_VALUE_OR_NULL = 260, + PTR_TO_SOCKET_OR_NULL = 267, + PTR_TO_SOCK_COMMON_OR_NULL = 268, + PTR_TO_TCP_SOCK_OR_NULL = 269, + PTR_TO_BTF_ID_OR_NULL = 272, + __BPF_REG_TYPE_LIMIT = 134217727, +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); +}; + +struct net_device; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct btf_func_model { + u8 ret_size; + u8 ret_flags; + u8 nr_args; + u8 arg_size[12]; + u8 arg_flags[12]; +}; + +struct bpf_tramp_image { + void *image; + int size; + struct bpf_ksym ksym; + struct percpu_ref pcref; + void *ip_after_call; + void *ip_epilogue; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct ftrace_ops; + +struct bpf_trampoline { + struct hlist_node hlist; + struct ftrace_ops *fops; + struct mutex mutex; + refcount_t refcnt; + u32 flags; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + struct bpf_tramp_image *cur_image; + long: 32; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; + bool called: 1; + bool verified: 1; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + void *aux; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + struct btf *btf; + u32 btf_id; +}; + +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + +struct bpf_token { + struct work_struct work; + atomic64_t refcnt; + struct user_namespace *userns; + long: 32; + u64 allowed_cmds; + u64 allowed_maps; + u64 allowed_progs; + u64 allowed_attachs; +}; + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + long unsigned int pad; + }; + perf_copy_f copy; + void *data; + u32 size; +}; + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct perf_event_pmu_context { + struct pmu *pmu; + struct perf_event_context *ctx; + struct list_head pmu_ctx_entry; + struct list_head pinned_active; + struct list_head flexible_active; + unsigned int embedded: 1; + unsigned int nr_events; + unsigned int nr_cgroups; + unsigned int nr_freq; + atomic_t refcount; + struct callback_head callback_head; + void *task_ctx_data; + int rotate_necessary; +}; + +struct perf_cpu_pmu_context { + struct perf_event_pmu_context epc; + struct perf_event_pmu_context *task_epc; + struct list_head sched_cb_entry; + int sched_cb_usage; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; + long: 32; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + long unsigned int wakeup; + long unsigned int size; + u64 aux_flags; + union { + void *addr; + long unsigned int head; + }; + int page; +}; + +struct perf_addr_filter_range { + long unsigned int start; + long unsigned int size; +}; + +struct perf_sample_data { + u64 sample_flags; + u64 period; + u64 dyn_size; + u64 type; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 ip; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; + union perf_sample_weight weight; + union perf_mem_data_src data_src; + u64 txn; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 stream_id; + u64 cgroup; + u64 addr; + u64 phys_addr; + u64 data_page_size; + u64 code_page_size; + u64 aux_size; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; + long: 32; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; + u64 timeoffset; + int active; + long: 32; +}; + +struct trace_entry { + short unsigned int type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + long unsigned int iter_flags; + void *temp; + unsigned int temp_size; + char *fmt; + unsigned int fmt_size; + atomic_t wait_index; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool closed; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + long unsigned int lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + loff_t pos; + long int idx; + long: 32; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + const int len; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head * (*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +enum { + TRACE_EVENT_FL_CAP_ANY = 1, + TRACE_EVENT_FL_NO_SET_FILTER = 2, + TRACE_EVENT_FL_IGNORE_ENABLE = 4, + TRACE_EVENT_FL_TRACEPOINT = 8, + TRACE_EVENT_FL_DYNAMIC = 16, + TRACE_EVENT_FL_KPROBE = 32, + TRACE_EVENT_FL_UPROBE = 64, + TRACE_EVENT_FL_EPROBE = 128, + TRACE_EVENT_FL_FPROBE = 256, + TRACE_EVENT_FL_CUSTOM = 512, +}; + +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + +struct linux_binprm { + struct vm_area_struct *vma; + long unsigned int vma_pages; + long unsigned int argmin; + struct mm_struct *mm; + long unsigned int p; + unsigned int have_execfd: 1; + unsigned int execfd_creds: 1; + unsigned int secureexec: 1; + unsigned int point_of_no_return: 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + long unsigned int loader; + long unsigned int exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +typedef u32 compat_size_t; + +struct property { + char *name; + int length; + void *value; + struct property *next; + struct bin_attribute attr; +}; + +typedef phys_addr_t resource_size_t; + +typedef long unsigned int irq_hw_number_t; + +struct pci_device_id { + __u32 vendor; + __u32 device; + __u32 subvendor; + __u32 subdevice; + __u32 class; + __u32 class_mask; + kernel_ulong_t driver_data; + __u32 override_only; +}; + +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + long unsigned int flags; + long unsigned int desc; + struct resource *parent; + struct resource *sibling; + struct resource *child; +}; + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED = 1, + DOMAIN_BUS_GENERIC_MSI = 2, + DOMAIN_BUS_PCI_MSI = 3, + DOMAIN_BUS_PLATFORM_MSI = 4, + DOMAIN_BUS_NEXUS = 5, + DOMAIN_BUS_IPI = 6, + DOMAIN_BUS_FSL_MC_MSI = 7, + DOMAIN_BUS_TI_SCI_INTA_MSI = 8, + DOMAIN_BUS_WAKEUP = 9, + DOMAIN_BUS_VMD_MSI = 10, + DOMAIN_BUS_PCI_DEVICE_MSI = 11, + DOMAIN_BUS_PCI_DEVICE_MSIX = 12, + DOMAIN_BUS_DMAR = 13, + DOMAIN_BUS_AMDVI = 14, + DOMAIN_BUS_DEVICE_MSI = 15, + DOMAIN_BUS_WIRED_TO_MSI = 16, +}; + +struct irq_domain_ops; + +struct irq_domain_chip_generic; + +struct msi_parent_ops; + +struct irq_data; + +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + struct mutex mutex; + struct irq_domain *root; + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; + struct device *dev; + struct device *pm_dev; + struct irq_domain *parent; + const struct msi_parent_ops *msi_parent_ops; + void (*exit)(struct irq_domain *); + irq_hw_number_t hwirq_max; + unsigned int revmap_size; + struct xarray revmap_tree; + struct irq_data *revmap[0]; +}; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; +}; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + irq_hw_number_t hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct irqstat; + +struct irqaction; + +struct irq_affinity_notify; + +struct proc_dir_entry; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + struct irqstat *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + long unsigned int last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + long unsigned int threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + struct proc_dir_entry *dir; + struct callback_head rcu; + struct kobject kobj; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + long: 32; +}; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, + IRQ_NO_DEBUG = 2097152, +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +struct msi_msg; + +struct irq_chip { + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + long unsigned int flags; +}; + +struct irqstat { + unsigned int cnt; +}; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + long unsigned int thread_flags; + long unsigned int thread_mask; + const char *name; + struct proc_dir_entry *dir; + long: 32; + long: 32; + long: 32; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +struct irq_chip_regs { + long unsigned int enable; + long unsigned int disable; + long unsigned int mask; + long unsigned int ack; + long unsigned int eoi; + long unsigned int type; +}; + +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +struct irq_chip_generic { + raw_spinlock_t lock; + void *reg_base; + u32 (*reg_readl)(void *); + void (*reg_writel)(u32, void *); + void (*suspend)(struct irq_chip_generic *); + void (*resume)(struct irq_chip_generic *); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + long unsigned int installed; + long unsigned int unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1, + IRQ_GC_INIT_NESTED_LOCK = 2, + IRQ_GC_MASK_CACHE_PER_TYPE = 4, + IRQ_GC_NO_MASK = 8, + IRQ_GC_BE_IO = 16, +}; + +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + void (*exit)(struct irq_chip_generic *); + struct irq_chip_generic *gc[0]; +}; + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed: 1; +}; + +struct pci_bus; + +struct hotplug_slot; + +struct pci_slot { + struct pci_bus *bus; + struct list_head list; + struct hotplug_slot *hotplug; + unsigned char number; + struct kobject kobj; +}; + +typedef short unsigned int pci_bus_flags_t; + +struct pci_dev; + +struct pci_ops; + +struct pci_bus { + struct list_head node; + struct pci_bus *parent; + struct list_head children; + struct list_head devices; + struct pci_dev *self; + struct list_head slots; + struct resource *resource[4]; + struct list_head resources; + struct resource busn_res; + struct pci_ops *ops; + void *sysdata; + struct proc_dir_entry *procdir; + unsigned char number; + unsigned char primary; + unsigned char max_bus_speed; + unsigned char cur_bus_speed; + char name[48]; + short unsigned int bridge_ctl; + pci_bus_flags_t bus_flags; + struct device *bridge; + struct device dev; + struct bin_attribute *legacy_io; + struct bin_attribute *legacy_mem; + unsigned int is_added: 1; + unsigned int unsafe_warn: 1; + long: 32; +}; + +typedef int pci_power_t; + +typedef unsigned int pci_channel_state_t; + +typedef short unsigned int pci_dev_flags_t; + +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; +}; + +struct pcie_bwctrl_data; + +struct pci_driver; + +struct pcie_link_state; + +struct pci_dev { + struct list_head bus_list; + struct pci_bus *bus; + struct pci_bus *subordinate; + void *sysdata; + struct proc_dir_entry *procent; + struct pci_slot *slot; + unsigned int devfn; + short unsigned int vendor; + short unsigned int device; + short unsigned int subsystem_vendor; + short unsigned int subsystem_device; + unsigned int class; + u8 revision; + u8 hdr_type; + u32 devcap; + u8 pcie_cap; + u8 msi_cap; + u8 msix_cap; + u8 pcie_mpss: 3; + u8 rom_base_reg; + u8 pin; + u16 pcie_flags_reg; + long unsigned int *dma_alias_mask; + struct pci_driver *driver; + long: 32; + u64 dma_mask; + struct device_dma_parameters dma_parms; + pci_power_t current_state; + u8 pm_cap; + unsigned int pme_support: 5; + unsigned int pme_poll: 1; + unsigned int pinned: 1; + unsigned int config_rrs_sv: 1; + unsigned int imm_ready: 1; + unsigned int d1_support: 1; + unsigned int d2_support: 1; + unsigned int no_d1d2: 1; + unsigned int no_d3cold: 1; + unsigned int bridge_d3: 1; + unsigned int d3cold_allowed: 1; + unsigned int mmio_always_on: 1; + unsigned int wakeup_prepared: 1; + unsigned int skip_bus_pm: 1; + unsigned int ignore_hotplug: 1; + unsigned int hotplug_user_indicators: 1; + unsigned int clear_retrain_link: 1; + unsigned int d3hot_delay; + unsigned int d3cold_delay; + u16 l1ss; + struct pcie_link_state *link_state; + unsigned int ltr_path: 1; + unsigned int pasid_no_tlp: 1; + unsigned int eetlp_prefix_path: 1; + pci_channel_state_t error_state; + long: 32; + struct device dev; + int cfg_size; + unsigned int irq; + struct resource resource[11]; + struct resource driver_exclusive_resource; + bool match_driver; + unsigned int transparent: 1; + unsigned int io_window: 1; + unsigned int pref_window: 1; + unsigned int pref_64_window: 1; + unsigned int multifunction: 1; + unsigned int is_busmaster: 1; + unsigned int no_msi: 1; + unsigned int no_64bit_msi: 1; + unsigned int block_cfg_access: 1; + unsigned int broken_parity_status: 1; + unsigned int irq_reroute_variant: 2; + unsigned int msi_enabled: 1; + unsigned int msix_enabled: 1; + unsigned int ari_enabled: 1; + unsigned int ats_enabled: 1; + unsigned int pasid_enabled: 1; + unsigned int pri_enabled: 1; + unsigned int tph_enabled: 1; + unsigned int is_managed: 1; + unsigned int is_msi_managed: 1; + unsigned int needs_freset: 1; + unsigned int state_saved: 1; + unsigned int is_physfn: 1; + unsigned int is_virtfn: 1; + unsigned int is_hotplug_bridge: 1; + unsigned int shpc_managed: 1; + unsigned int is_thunderbolt: 1; + unsigned int untrusted: 1; + unsigned int external_facing: 1; + unsigned int broken_intx_masking: 1; + unsigned int io_window_1k: 1; + unsigned int irq_managed: 1; + unsigned int non_compliant_bars: 1; + unsigned int is_probed: 1; + unsigned int link_active_reporting: 1; + unsigned int no_vf_scan: 1; + unsigned int no_command_memory: 1; + unsigned int rom_bar_overlap: 1; + unsigned int rom_attr_enabled: 1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; + spinlock_t pcie_cap_lock; + u32 saved_config_space[16]; + struct hlist_head saved_cap_space; + struct bin_attribute *res_attr[11]; + struct bin_attribute *res_attr_wc[11]; + void *msix_base; + raw_spinlock_t msi_lock; + struct pci_vpd vpd; + struct pcie_bwctrl_data *link_bwctrl; + u16 acs_cap; + u8 supported_speeds; + phys_addr_t rom; + size_t romlen; + const char *driver_override; + long unsigned int priv_flags; + u8 reset_methods[8]; +}; + +struct pci_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct pci_error_handlers; + +struct pci_driver { + const char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *, const struct pci_device_id *); + void (*remove)(struct pci_dev *); + int (*suspend)(struct pci_dev *, pm_message_t); + int (*resume)(struct pci_dev *); + void (*shutdown)(struct pci_dev *); + int (*sriov_configure)(struct pci_dev *, int); + int (*sriov_set_msix_vec_count)(struct pci_dev *, int); + u32 (*sriov_get_vf_total_msix)(struct pci_dev *); + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + struct device_driver driver; + struct pci_dynids dynids; + bool driver_managed_dma; +}; + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; + struct pci_ops *ops; + struct pci_ops *child_ops; + void *sysdata; + int busnr; + int domain_nr; + struct list_head windows; + struct list_head dma_ranges; + u8 (*swizzle_irq)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + unsigned int ignore_reset_delay: 1; + unsigned int no_ext_tags: 1; + unsigned int no_inc_mrrs: 1; + unsigned int native_aer: 1; + unsigned int native_pcie_hotplug: 1; + unsigned int native_shpc_hotplug: 1; + unsigned int native_pme: 1; + unsigned int native_ltr: 1; + unsigned int native_dpc: 1; + unsigned int native_cxl_error: 1; + unsigned int preserve_config: 1; + unsigned int size_windows: 1; + unsigned int msi_domain: 1; + resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); + long: 32; + long: 32; + long unsigned int private[0]; +}; + +struct pci_ops { + int (*add_bus)(struct pci_bus *); + void (*remove_bus)(struct pci_bus *); + void * (*map_bus)(struct pci_bus *, unsigned int, int); + int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); + int (*write)(struct pci_bus *, unsigned int, int, int, u32); +}; + +typedef unsigned int pci_ers_result_t; + +struct pci_error_handlers { + pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *); + pci_ers_result_t (*slot_reset)(struct pci_dev *); + void (*reset_prepare)(struct pci_dev *); + void (*reset_done)(struct pci_dev *); + void (*resume)(struct pci_dev *); + void (*cor_error_detected)(struct pci_dev *); +}; + +enum { + PCI_REASSIGN_ALL_RSRC = 1, + PCI_REASSIGN_ALL_BUS = 2, + PCI_PROBE_ONLY = 4, + PCI_CAN_SKIP_ISA_ALIGN = 8, + PCI_ENABLE_PROC_DOMAINS = 16, + PCI_COMPAT_DOMAIN_0 = 32, + PCI_SCAN_ALL_PCIE_DEVS = 64, +}; + +struct pci_controller; + +struct rtc_time; + +struct kimage; + +struct machdep_calls { + const char *name; + const char *compatible; + const char * const *compatibles; + void (*dma_set_mask)(struct device *, u64); + int (*probe)(); + void (*setup_arch)(); + void (*show_cpuinfo)(struct seq_file *); + long unsigned int (*get_proc_freq)(unsigned int); + void (*init_IRQ)(); + unsigned int (*get_irq)(); + void (*pcibios_fixup)(); + void (*pci_irq_fixup)(struct pci_dev *); + int (*pcibios_root_bridge_prepare)(struct pci_host_bridge *); + void (*discover_phbs)(); + int (*pci_setup_phb)(struct pci_controller *); + void (*restart)(char *); + void (*halt)(); + void (*panic)(char *); + long int (*time_init)(); + int (*set_rtc_time)(struct rtc_time *); + void (*get_rtc_time)(struct rtc_time *); + time64_t (*get_boot_time)(); + void (*calibrate_decr)(); + void (*progress)(char *, short unsigned int); + void (*log_error)(char *, unsigned int, int); + unsigned char (*nvram_read_val)(int); + void (*nvram_write_val)(int, unsigned char); + ssize_t (*nvram_write)(char *, size_t, loff_t *); + ssize_t (*nvram_read)(char *, size_t, loff_t *); + ssize_t (*nvram_size)(); + void (*nvram_sync)(); + int (*system_reset_exception)(struct pt_regs *); + int (*machine_check_exception)(struct pt_regs *); + int (*handle_hmi_exception)(struct pt_regs *); + int (*hmi_exception_early)(struct pt_regs *); + long int (*machine_check_early)(struct pt_regs *); + bool (*mce_check_early_recovery)(struct pt_regs *); + void (*machine_check_log_err)(); + long int (*feature_call)(unsigned int, ...); + int (*pci_get_legacy_ide_irq)(struct pci_dev *, int); + pgprot_t (*phys_mem_access_prot)(long unsigned int, long unsigned int, pgprot_t); + void (*power_save)(); + void (*enable_pmcs)(); + int (*set_dabr)(long unsigned int, long unsigned int); + int (*set_dawr)(int, long unsigned int, long unsigned int); + void (*init)(); + void (*pcibios_after_init)(); + int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char); + void (*pcibios_fixup_resources)(struct pci_dev *); + void (*pcibios_fixup_bus)(struct pci_bus *); + void (*pcibios_fixup_phb)(struct pci_controller *); + void (*pcibios_bus_add_device)(struct pci_dev *); + resource_size_t (*pcibios_default_alignment)(); + void (*machine_shutdown)(); + void (*kexec_cpu_down)(int, int); + void (*machine_kexec)(struct kimage *); + int (*get_random_seed)(long unsigned int *); +}; + +struct pci_controller_ops { + void (*dma_dev_setup)(struct pci_dev *); + void (*dma_bus_setup)(struct pci_bus *); + bool (*iommu_bypass_supported)(struct pci_dev *, u64); + int (*probe_mode)(struct pci_bus *); + bool (*enable_device_hook)(struct pci_dev *); + void (*disable_device)(struct pci_dev *); + void (*release_device)(struct pci_dev *); + resource_size_t (*window_alignment)(struct pci_bus *, long unsigned int); + void (*setup_bridge)(struct pci_bus *, long unsigned int); + void (*reset_secondary_bus)(struct pci_dev *); + int (*setup_msi_irqs)(struct pci_dev *, int, int); + void (*teardown_msi_irqs)(struct pci_dev *); + void (*shutdown)(struct pci_controller *); + struct iommu_group * (*device_group)(struct pci_controller *, struct pci_dev *); +}; + +struct iommu_device {}; + +struct pci_controller { + struct pci_bus *bus; + char is_dynamic; + struct device_node *dn; + struct list_head list_node; + struct device *parent; + int first_busno; + int last_busno; + int self_busno; + struct resource busn; + void *io_base_virt; + resource_size_t io_base_phys; + resource_size_t pci_io_size; + resource_size_t isa_mem_phys; + resource_size_t isa_mem_size; + struct pci_controller_ops controller_ops; + struct pci_ops *ops; + unsigned int *cfg_addr; + void *cfg_data; + u32 indirect_type; + struct resource io_resource; + struct resource mem_resources[3]; + resource_size_t mem_offset[3]; + int global_number; + resource_size_t dma_window_base_cur; + resource_size_t dma_window_size; + void *private_data; + struct irq_domain *dev_domain; + struct irq_domain *msi_domain; + struct fwnode_handle *fwnode; + struct iommu_device iommu; +}; + +struct pci_fixup { + u16 vendor; + u16 device; + u32 class; + unsigned int class_shift; + void (*hook)(struct pci_dev *); +}; + +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[16]; +}; + +struct irq_domain_ops { + int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); + int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); + int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); + void (*unmap)(struct irq_domain *, unsigned int); + int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); + int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); + void (*free)(struct irq_domain *, unsigned int, unsigned int); + int (*activate)(struct irq_domain *, struct irq_data *, bool); + void (*deactivate)(struct irq_domain *, struct irq_data *); + int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); +}; + +struct msi_domain_info; + +struct msi_parent_ops { + u32 supported_flags; + u32 required_flags; + u32 bus_select_token; + u32 bus_select_mask; + const char *prefix; + bool (*init_dev_msi_info)(struct device *, struct irq_domain *, struct irq_domain *, struct msi_domain_info *); +}; + +typedef long unsigned int uintptr_t; + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[8]; +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +enum { + MM_FILEPAGES = 0, + MM_ANONPAGES = 1, + MM_SWAPENTS = 2, + MM_SHMEMPAGES = 3, + NR_MM_COUNTERS = 4, +}; + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF = 0, + REFCOUNT_ADD_OVF = 1, + REFCOUNT_ADD_UAF = 2, + REFCOUNT_SUB_UAF = 3, + REFCOUNT_DEC_LEAK = 4, +}; + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + +enum tk_offsets { + TK_OFFS_REAL = 0, + TK_OFFS_BOOT = 1, + TK_OFFS_TAI = 2, + TK_OFFS_MAX = 3, +}; + +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +enum { + TASK_COMM_LEN = 16, +}; + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_spinlock_t seq; + int umask; + int in_exec; + struct path root; + struct path pwd; +}; + +struct reclaim_state { + long unsigned int reclaimed; +}; + +struct anon_vma { + struct anon_vma *root; + struct rw_semaphore rwsem; + atomic_t refcount; + long unsigned int num_children; + long unsigned int num_active_vmas; + struct anon_vma *parent; + struct rb_root_cached rb_root; +}; + +struct ld_semaphore { + atomic_long_t count; + raw_spinlock_t wait_lock; + unsigned int wait_readers; + struct list_head read_wait; + struct list_head write_wait; +}; + +typedef unsigned int tcflag_t; + +typedef unsigned char cc_t; + +typedef unsigned int speed_t; + +struct ktermios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_cc[19]; + cc_t c_line; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct winsize { + short unsigned int ws_row; + short unsigned int ws_col; + short unsigned int ws_xpixel; + short unsigned int ws_ypixel; +}; + +struct tty_driver; + +struct tty_port; + +struct tty_operations; + +struct tty_ldisc; + +struct tty_struct { + struct kref kref; + int index; + struct device *dev; + struct tty_driver *driver; + struct tty_port *port; + const struct tty_operations *ops; + struct tty_ldisc *ldisc; + struct ld_semaphore ldisc_sem; + struct mutex atomic_write_lock; + struct mutex legacy_mutex; + struct mutex throttle_mutex; + struct rw_semaphore termios_rwsem; + struct mutex winsize_mutex; + struct ktermios termios; + struct ktermios termios_locked; + char name[64]; + long unsigned int flags; + int count; + unsigned int receive_room; + struct winsize winsize; + struct { + spinlock_t lock; + bool stopped; + bool tco_stopped; + } flow; + struct { + struct pid *pgrp; + struct pid *session; + spinlock_t lock; + unsigned char pktstatus; + bool packet; + } ctrl; + bool hw_stopped; + bool closing; + int flow_change; + struct tty_struct *link; + struct fasync_struct *fasync; + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + spinlock_t files_lock; + int write_cnt; + u8 *write_buf; + struct list_head tty_files; + struct work_struct SAK_work; +}; + +typedef int (*proc_visitor)(struct task_struct *, void *); + +struct wait_page_queue { + struct folio *folio; + int bit_nr; + wait_queue_entry_t wait; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + struct file_ra_state *ra; + long unsigned int _index; + unsigned int _nr_pages; + unsigned int _batch_count; + bool _workingset; + long unsigned int _pflags; +}; + +struct swap_cluster_info; + +struct percpu_cluster; + +struct swap_info_struct { + struct percpu_ref users; + long unsigned int flags; + short int prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + long unsigned int *zeromap; + struct swap_cluster_info *cluster_info; + struct list_head free_clusters; + struct list_head full_clusters; + struct list_head nonfull_clusters[1]; + struct list_head frag_clusters[1]; + unsigned int frag_cluster_nr[1]; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int *cluster_next_cpu; + struct percpu_cluster *percpu_cluster; + struct rb_root swap_extent_root; + struct block_device *bdev; + struct file *swap_file; + struct completion comp; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct work_struct reclaim_work; + struct list_head discard_clusters; + struct plist_node avail_lists[0]; +}; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +enum rlimit_type { + UCOUNT_RLIMIT_NPROC = 0, + UCOUNT_RLIMIT_MSGQUEUE = 1, + UCOUNT_RLIMIT_SIGPENDING = 2, + UCOUNT_RLIMIT_MEMLOCK = 3, + UCOUNT_RLIMIT_COUNTS = 4, +}; + +struct swap_cluster_info { + spinlock_t lock; + u16 count; + u8 flags; + u8 order; + struct list_head list; +}; + +struct percpu_cluster { + unsigned int next[1]; +}; + +struct oldold_utsname { + char sysname[9]; + char nodename[9]; + char release[9]; + char version[9]; + char machine[9]; +}; + +struct old_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; +}; + +enum uts_proc { + UTS_PROC_ARCH = 0, + UTS_PROC_OSTYPE = 1, + UTS_PROC_OSRELEASE = 2, + UTS_PROC_VERSION = 3, + UTS_PROC_HOSTNAME = 4, + UTS_PROC_DOMAINNAME = 5, +}; + +struct prctl_mm_map { + __u64 start_code; + __u64 end_code; + __u64 start_data; + __u64 end_data; + __u64 start_brk; + __u64 brk; + __u64 start_stack; + __u64 arg_start; + __u64 arg_end; + __u64 env_start; + __u64 env_end; + __u64 *auxv; + __u32 auxv_size; + __u32 exe_fd; + long: 32; +}; + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +}; + +struct fd { + long unsigned int word; +}; + +typedef struct fd class_fd_t; + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +struct serial_icounter_struct; + +struct serial_struct; + +struct tty_operations { + struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); + int (*install)(struct tty_driver *, struct tty_struct *); + void (*remove)(struct tty_driver *, struct tty_struct *); + int (*open)(struct tty_struct *, struct file *); + void (*close)(struct tty_struct *, struct file *); + void (*shutdown)(struct tty_struct *); + void (*cleanup)(struct tty_struct *); + ssize_t (*write)(struct tty_struct *, const u8 *, size_t); + int (*put_char)(struct tty_struct *, u8); + void (*flush_chars)(struct tty_struct *); + unsigned int (*write_room)(struct tty_struct *); + unsigned int (*chars_in_buffer)(struct tty_struct *); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + void (*throttle)(struct tty_struct *); + void (*unthrottle)(struct tty_struct *); + void (*stop)(struct tty_struct *); + void (*start)(struct tty_struct *); + void (*hangup)(struct tty_struct *); + int (*break_ctl)(struct tty_struct *, int); + void (*flush_buffer)(struct tty_struct *); + int (*ldisc_ok)(struct tty_struct *, int); + void (*set_ldisc)(struct tty_struct *); + void (*wait_until_sent)(struct tty_struct *, int); + void (*send_xchar)(struct tty_struct *, u8); + int (*tiocmget)(struct tty_struct *); + int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); + int (*resize)(struct tty_struct *, struct winsize *); + int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); + int (*get_serial)(struct tty_struct *, struct serial_struct *); + int (*set_serial)(struct tty_struct *, struct serial_struct *); + void (*show_fdinfo)(struct tty_struct *, struct seq_file *); + int (*proc_show)(struct seq_file *, void *); +}; + +struct tty_driver { + struct kref kref; + struct cdev **cdevs; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; + int major; + int minor_start; + unsigned int num; + short int type; + short int subtype; + struct ktermios init_termios; + long unsigned int flags; + struct proc_dir_entry *proc_entry; + struct tty_driver *other; + struct tty_struct **ttys; + struct tty_port **ports; + struct ktermios **termios; + void *driver_state; + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + unsigned int used; + unsigned int size; + unsigned int commit; + unsigned int lookahead; + unsigned int read; + bool flags; + long: 0; + u8 data[0]; +}; + +struct tty_bufhead { + struct tty_buffer *head; + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; + atomic_t mem_used; + int mem_limit; + struct tty_buffer *tail; +}; + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +struct tty_port_operations; + +struct tty_port_client_operations; + +struct tty_port { + struct tty_bufhead buf; + struct tty_struct *tty; + struct tty_struct *itty; + const struct tty_port_operations *ops; + const struct tty_port_client_operations *client_ops; + spinlock_t lock; + int blocked_open; + int count; + wait_queue_head_t open_wait; + wait_queue_head_t delta_msr_wait; + long unsigned int flags; + long unsigned int iflags; + unsigned char console: 1; + struct mutex mutex; + struct mutex buf_mutex; + u8 *xmit_buf; + struct { + union { + struct __kfifo kfifo; + u8 *type; + const u8 *const_type; + char (*rectype)[0]; + u8 *ptr; + const u8 *ptr_const; + }; + u8 buf[0]; + } xmit_fifo; + unsigned int close_delay; + unsigned int closing_wait; + int drain_delay; + struct kref kref; + void *client_data; +}; + +struct tty_ldisc_ops { + char *name; + int num; + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *); + ssize_t (*read)(struct tty_struct *, struct file *, u8 *, size_t, void **, long unsigned int); + ssize_t (*write)(struct tty_struct *, struct file *, const u8 *, size_t); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, const struct ktermios *); + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); + void (*hangup)(struct tty_struct *); + void (*receive_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, bool); + size_t (*receive_buf2)(struct tty_struct *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); + struct module *owner; +}; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + struct tty_struct *tty; +}; + +struct tty_port_operations { + bool (*carrier_raised)(struct tty_port *); + void (*dtr_rts)(struct tty_port *, bool); + void (*shutdown)(struct tty_port *); + int (*activate)(struct tty_port *, struct tty_struct *); + void (*destruct)(struct tty_port *); +}; + +struct tty_port_client_operations { + size_t (*receive_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*lookahead_buf)(struct tty_port *, const u8 *, const u8 *, size_t); + void (*write_wakeup)(struct tty_port *); +}; + +struct getcpu_cache { + long unsigned int blob[32]; +}; + +enum { + UNAME26 = 131072, + ADDR_NO_RANDOMIZE = 262144, + FDPIC_FUNCPTRS = 524288, + MMAP_PAGE_ZERO = 1048576, + ADDR_COMPAT_LAYOUT = 2097152, + READ_IMPLIES_EXEC = 4194304, + ADDR_LIMIT_32BIT = 8388608, + SHORT_INODE = 16777216, + WHOLE_SECONDS = 33554432, + STICKY_TIMEOUTS = 67108864, + ADDR_LIMIT_3GB = 134217728, +}; + +enum { + PER_LINUX = 0, + PER_LINUX_32BIT = 8388608, + PER_LINUX_FDPIC = 524288, + PER_SVR4 = 68157441, + PER_SVR3 = 83886082, + PER_SCOSVR3 = 117440515, + PER_OSR5 = 100663299, + PER_WYSEV386 = 83886084, + PER_ISCR4 = 67108869, + PER_BSD = 6, + PER_SUNOS = 67108870, + PER_XENIX = 83886087, + PER_LINUX32 = 8, + PER_LINUX32_3GB = 134217736, + PER_IRIX32 = 67108873, + PER_IRIXN32 = 67108874, + PER_IRIX64 = 67108875, + PER_RISCOS = 12, + PER_SOLARIS = 67108877, + PER_UW7 = 68157454, + PER_OSF4 = 15, + PER_HPUX = 16, + PER_MASK = 255, +}; + +enum { + TRACE_EVENT_FL_CAP_ANY_BIT = 0, + TRACE_EVENT_FL_NO_SET_FILTER_BIT = 1, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 2, + TRACE_EVENT_FL_TRACEPOINT_BIT = 3, + TRACE_EVENT_FL_DYNAMIC_BIT = 4, + TRACE_EVENT_FL_KPROBE_BIT = 5, + TRACE_EVENT_FL_UPROBE_BIT = 6, + TRACE_EVENT_FL_EPROBE_BIT = 7, + TRACE_EVENT_FL_FPROBE_BIT = 8, + TRACE_EVENT_FL_CUSTOM_BIT = 9, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, + EVENT_FILE_FL_FREED_BIT = 11, +}; + +struct codetag { + unsigned int flags; + unsigned int lineno; + const char *modname; + const char *function; + const char *filename; + long: 32; +}; + +enum { + ___GFP_DMA_BIT = 0, + ___GFP_HIGHMEM_BIT = 1, + ___GFP_DMA32_BIT = 2, + ___GFP_MOVABLE_BIT = 3, + ___GFP_RECLAIMABLE_BIT = 4, + ___GFP_HIGH_BIT = 5, + ___GFP_IO_BIT = 6, + ___GFP_FS_BIT = 7, + ___GFP_ZERO_BIT = 8, + ___GFP_UNUSED_BIT = 9, + ___GFP_DIRECT_RECLAIM_BIT = 10, + ___GFP_KSWAPD_RECLAIM_BIT = 11, + ___GFP_WRITE_BIT = 12, + ___GFP_NOWARN_BIT = 13, + ___GFP_RETRY_MAYFAIL_BIT = 14, + ___GFP_NOFAIL_BIT = 15, + ___GFP_NORETRY_BIT = 16, + ___GFP_MEMALLOC_BIT = 17, + ___GFP_COMP_BIT = 18, + ___GFP_NOMEMALLOC_BIT = 19, + ___GFP_HARDWALL_BIT = 20, + ___GFP_THISNODE_BIT = 21, + ___GFP_ACCOUNT_BIT = 22, + ___GFP_ZEROTAGS_BIT = 23, + ___GFP_NO_OBJ_EXT_BIT = 24, + ___GFP_LAST_BIT = 25, +}; + +struct alloc_tag_counters { + u64 bytes; + u64 calls; +}; + +struct alloc_tag { + struct codetag ct; + struct alloc_tag_counters *counters; + long: 32; +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_RANDOM_START = 0, + KMALLOC_RANDOM_END = 0, + KMALLOC_RECLAIM = 1, + KMALLOC_DMA = 2, + KMALLOC_CGROUP = 3, + NR_KMALLOC_TYPES = 4, +}; + +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int); + void *priv; +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +struct stacktrace_cookie { + long unsigned int *store; + unsigned int size; + unsigned int skip; + unsigned int len; +}; + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +enum { + __PERCPU_REF_ATOMIC = 1, + __PERCPU_REF_DEAD = 2, + __PERCPU_REF_ATOMIC_DEAD = 3, + __PERCPU_REF_FLAG_BITS = 2, +}; + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + NR_STATS = 10, +}; + +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS = 1, + CGROUP_INET_SOCK_CREATE = 2, + CGROUP_SOCK_OPS = 3, + CGROUP_DEVICE = 4, + CGROUP_INET4_BIND = 5, + CGROUP_INET6_BIND = 6, + CGROUP_INET4_CONNECT = 7, + CGROUP_INET6_CONNECT = 8, + CGROUP_UNIX_CONNECT = 9, + CGROUP_INET4_POST_BIND = 10, + CGROUP_INET6_POST_BIND = 11, + CGROUP_UDP4_SENDMSG = 12, + CGROUP_UDP6_SENDMSG = 13, + CGROUP_UNIX_SENDMSG = 14, + CGROUP_SYSCTL = 15, + CGROUP_UDP4_RECVMSG = 16, + CGROUP_UDP6_RECVMSG = 17, + CGROUP_UNIX_RECVMSG = 18, + CGROUP_GETSOCKOPT = 19, + CGROUP_SETSOCKOPT = 20, + CGROUP_INET4_GETPEERNAME = 21, + CGROUP_INET6_GETPEERNAME = 22, + CGROUP_UNIX_GETPEERNAME = 23, + CGROUP_INET4_GETSOCKNAME = 24, + CGROUP_INET6_GETSOCKNAME = 25, + CGROUP_UNIX_GETSOCKNAME = 26, + CGROUP_INET_SOCK_RELEASE = 27, + CGROUP_LSM_START = 28, + CGROUP_LSM_END = 27, + MAX_CGROUP_BPF_ATTACH_TYPE = 28, +}; + +enum psi_task_count { + NR_IOWAIT = 0, + NR_MEMSTALL = 1, + NR_RUNNING = 2, + NR_MEMSTALL_RUNNING = 3, + NR_PSI_TASK_COUNTS = 4, +}; + +enum psi_res { + PSI_IO = 0, + PSI_MEM = 1, + PSI_CPU = 2, + NR_PSI_RESOURCES = 3, +}; + +enum psi_states { + PSI_IO_SOME = 0, + PSI_IO_FULL = 1, + PSI_MEM_SOME = 2, + PSI_MEM_FULL = 3, + PSI_CPU_SOME = 4, + PSI_CPU_FULL = 5, + PSI_NONIDLE = 6, + NR_PSI_STATES = 7, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL = 1, + NR_PSI_AGGREGATORS = 2, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + pids_cgrp_id = 9, + rdma_cgrp_id = 10, + misc_cgrp_id = 11, + debug_cgrp_id = 12, + CGROUP_SUBSYS_COUNT = 13, +}; + +enum { + CSS_NO_REF = 1, + CSS_ONLINE = 2, + CSS_RELEASED = 4, + CSS_VISIBLE = 8, + CSS_DYING = 16, +}; + +enum { + CFTYPE_ONLY_ON_ROOT = 1, + CFTYPE_NOT_ON_ROOT = 2, + CFTYPE_NS_DELEGATABLE = 4, + CFTYPE_NO_PREFIX = 8, + CFTYPE_WORLD_WRITABLE = 16, + CFTYPE_DEBUG = 32, + __CFTYPE_ONLY_ON_DFL = 65536, + __CFTYPE_NOT_ON_DFL = 131072, + __CFTYPE_ADDED = 262144, +}; + +typedef struct { + char *from; + char *to; +} substring_t; + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE = 0, + RDMACG_RESOURCE_HCA_OBJECT = 1, + RDMACG_RESOURCE_MAX = 2, +}; + +struct rdma_cgroup { + struct cgroup_subsys_state css; + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +enum rdmacg_file_type { + RDMACG_RESOURCE_TYPE_MAX = 0, + RDMACG_RESOURCE_TYPE_STAT = 1, +}; + +struct rdmacg_resource { + int max; + int usage; +}; + +struct rdmacg_resource_pool { + struct rdmacg_device *device; + struct rdmacg_resource resources[2]; + struct list_head cg_node; + struct list_head dev_node; + long: 32; + u64 usage_sum; + int num_max_cnt; + long: 32; +}; + +typedef __s16 s16; + +typedef __u16 __be16; + +typedef __u32 __be32; + +typedef __u32 __wsum; + +typedef u64 uint64_t; + +typedef unsigned int slab_flags_t; + +typedef int (*initcall_t)(); + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID = 1, + TASKSTATS_TYPE_TGID = 2, + TASKSTATS_TYPE_STATS = 3, + TASKSTATS_TYPE_AGGR_PID = 4, + TASKSTATS_TYPE_AGGR_TGID = 5, + TASKSTATS_TYPE_NULL = 6, + __TASKSTATS_TYPE_MAX = 7, +}; + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID = 1, + TASKSTATS_CMD_ATTR_TGID = 2, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, + __TASKSTATS_CMD_ATTR_MAX = 5, +}; + +struct rhashtable; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct task_delay_info { + raw_spinlock_t lock; + long: 32; + u64 blkio_start; + u64 blkio_delay; + u64 swapin_start; + u64 swapin_delay; + u32 blkio_count; + u32 swapin_count; + u64 freepages_start; + u64 freepages_delay; + u64 thrashing_start; + u64 thrashing_delay; + u64 compact_start; + u64 compact_delay; + u64 wpcopy_start; + u64 wpcopy_delay; + u64 irq_delay; + u32 freepages_count; + u32 thrashing_count; + u32 compact_count; + u32 wpcopy_count; + u32 irq_count; + long: 32; +}; + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +enum _slab_flag_bits { + _SLAB_CONSISTENCY_CHECKS = 0, + _SLAB_RED_ZONE = 1, + _SLAB_POISON = 2, + _SLAB_KMALLOC = 3, + _SLAB_HWCACHE_ALIGN = 4, + _SLAB_CACHE_DMA = 5, + _SLAB_CACHE_DMA32 = 6, + _SLAB_STORE_USER = 7, + _SLAB_PANIC = 8, + _SLAB_TYPESAFE_BY_RCU = 9, + _SLAB_TRACE = 10, + _SLAB_NOLEAKTRACE = 11, + _SLAB_NO_MERGE = 12, + _SLAB_ACCOUNT = 13, + _SLAB_NO_USER_FLAGS = 14, + _SLAB_RECLAIM_ACCOUNT = 15, + _SLAB_OBJECT_POISON = 16, + _SLAB_CMPXCHG_DOUBLE = 17, + _SLAB_NO_OBJ_EXT = 18, + _SLAB_FLAGS_LAST_BIT = 19, +}; + +struct kmem_cache_args { + unsigned int align; + unsigned int useroffset; + unsigned int usersize; + unsigned int freeptr_offset; + bool use_freeptr_offset; + void (*ctor)(void *); +}; + +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + __u64 nr_io_wait; +}; + +enum { + CGROUPSTATS_CMD_UNSPEC = 3, + CGROUPSTATS_CMD_GET = 4, + CGROUPSTATS_CMD_NEW = 5, + __CGROUPSTATS_CMD_MAX = 6, +}; + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS = 1, + __CGROUPSTATS_TYPE_MAX = 2, +}; + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD = 1, + __CGROUPSTATS_CMD_ATTR_MAX = 2, +}; + +struct ref_tracker_dir {}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + int sysctl_optmem_max; + u8 sysctl_txrehash; + u8 sysctl_tstamp_allow_data; + struct prot_inuse *prot_inuse; + struct cpumask *rps_default_mask; +}; + +struct ipstats_mib; + +struct tcp_mib; + +struct linux_mib; + +struct udp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct netns_mib { + struct ipstats_mib *ip_statistics; + struct ipstats_mib *ipv6_statistics; + struct tcp_mib *tcp_statistics; + struct linux_mib *net_statistics; + struct udp_mib *udp_statistics; + struct udp_mib *udp_stats_in6; + struct udp_mib *udplite_statistics; + struct udp_mib *udplite_stats_in6; + struct icmp_mib *icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct icmpv6_mib *icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct unix_table { + spinlock_t *locks; + struct hlist_head *buckets; +}; + +struct netns_unix { + struct unix_table table; + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + refcount_t tw_refcount; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct local_ports { + u32 range; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct udp_table; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct inet_peer_base; + +struct fqdir; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + __u8 __cacheline_group_begin__netns_ipv4_read_tx[0]; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_tso_rtt_log; + u8 sysctl_tcp_autocorking; + int sysctl_tcp_min_snd_mss; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_wmem[3]; + u8 sysctl_ip_fwd_use_pmtu; + __u8 __cacheline_group_end__netns_ipv4_read_tx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_txrx[0]; + u8 sysctl_tcp_moderate_rcvbuf; + __u8 __cacheline_group_end__netns_ipv4_read_txrx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_rx[0]; + u8 sysctl_ip_early_demux; + u8 sysctl_tcp_early_demux; + u8 sysctl_tcp_l3mdev_accept; + int sysctl_tcp_reordering; + int sysctl_tcp_rmem[3]; + __u8 __cacheline_group_end__netns_ipv4_read_rx[0]; + long: 32; + long: 32; + struct inet_timewait_death_row tcp_death_row; + struct udp_table *udp_table; + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + bool fib_has_custom_local_routes; + bool fib_offload_disabled; + u8 sysctl_tcp_shrink_window; + struct hlist_head *fib_table_hash; + struct sock *fibnl; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct fqdir *fqdir; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_msgs_per_sec; + int sysctl_icmp_msgs_burst; + atomic_t icmp_global_credit; + u32 icmp_global_stamp; + u32 ip_rt_min_pmtu; + int ip_rt_mtu_expires; + int ip_rt_min_advmss; + struct local_ports ip_local_ports; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; + u8 sysctl_ip_dynaddr; + u8 sysctl_udp_early_demux; + u8 sysctl_nexthop_compat_mode; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; + u8 sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; + u8 sysctl_tcp_comp_sack_nr; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; + int sysctl_tcp_fin_timeout; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + int sysctl_tcp_rto_min_us; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_adv_win_scale; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_challenge_ack_limit; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_reflect_tos; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + unsigned int sysctl_tcp_child_ehash_entries; + long unsigned int sysctl_tcp_comp_sack_delay_ns; + long unsigned int sysctl_tcp_comp_sack_slack_ns; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + long unsigned int tfo_active_disable_stamp; + u32 tcp_challenge_timestamp; + u32 tcp_challenge_count; + u8 sysctl_tcp_plb_enabled; + u8 sysctl_tcp_plb_idle_rehash_rounds; + u8 sysctl_tcp_plb_rehash_rounds; + u8 sysctl_tcp_plb_suspend_rto_sec; + int sysctl_tcp_plb_cong_thresh; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_tcp_syn_linear_timeouts; + u8 sysctl_igmp_llm_reports; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + unsigned int sysctl_udp_child_hash_entries; + long unsigned int *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + long: 32; + siphash_key_t ip_id_key; + struct hlist_head *inet_addr_lst; + struct delayed_work addr_chk_work; + long: 32; + long: 32; +}; + +struct dst_entry; + +struct sk_buff; + +struct neighbour; + +struct dst_ops { + short unsigned int family; + unsigned int gc_thresh; + void (*gc)(struct dst_ops *); + struct dst_entry * (*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *); + void (*negative_advice)(struct sock *, struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct percpu_counter pcpuc_entries; + long: 32; + long: 32; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + u32 multipath_hash_fields; + u8 multipath_hash_policy; + u8 bindv6only; + u8 flowlabel_consistency; + u8 auto_flowlabels; + int icmpv6_time; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; + long unsigned int icmpv6_ratemask[8]; + long unsigned int *icmpv6_ratemask_ptr; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; + int idgen_retries; + int idgen_delay; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; + u8 skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; + u8 icmpv6_error_anycast_as_unicast; + long: 32; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct ioam6_pernet_data; + +struct netns_ipv6 { + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + atomic_t ip6_rt_gc_expire; + long unsigned int ip6_rt_last_gc; + unsigned char flowlabel_has_excl; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct hlist_head *inet6_addr_lst; + spinlock_t addrconf_hash_lock; + struct delayed_work addr_chk_work; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; + struct ioam6_pernet_data *ioam6_data; +}; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_logger *nf_loggers[11]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries *hooks_ipv4[5]; + struct nf_hook_entries *hooks_ipv6[5]; + struct nf_hook_entries *hooks_bridge[5]; + unsigned int defrag_ipv4_users; + unsigned int defrag_ipv6_users; +}; + +struct nf_ct_event_notifier; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; +}; + +struct ip_conntrack_stat; + +struct netns_ct { + u8 sysctl_log_invalid; + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_tstamp; + u8 sysctl_checksum; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_ip_net nf_ct_proto; +}; + +struct netns_nftables { + u8 gencursor; +}; + +struct netns_bpf { + struct bpf_prog_array *run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + spinlock_t rules_mod_lock; + unsigned int dev_base_seq; + u32 ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct ref_tracker_dir refcnt_tracker; + struct ref_tracker_dir notrefcnt_tracker; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct xarray dev_by_index; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + long: 32; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_nf nf; + struct netns_ct ct; + struct netns_nftables nft; + struct net_generic *gen; + struct netns_bpf bpf; + u64 net_cookie; + struct sock *diag_nlsk; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef unsigned char *sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + long unsigned int dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + struct llist_node ll_node; + }; + struct sock *sk; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + long unsigned int _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + long unsigned int _sk_redir; + }; + long unsigned int _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned: 1; + __u8 nohdr: 1; + __u8 fclone: 2; + __u8 peeked: 1; + __u8 head_frag: 1; + __u8 pfmemalloc: 1; + __u8 pp_recycle: 1; + __u8 active_extensions; + union { + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + }; + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + } headers; + }; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; + long: 32; +}; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; + unsigned int chaintoolong; +}; + +enum skb_drop_reason { + SKB_NOT_DROPPED_YET = 0, + SKB_CONSUMED = 1, + SKB_DROP_REASON_NOT_SPECIFIED = 2, + SKB_DROP_REASON_NO_SOCKET = 3, + SKB_DROP_REASON_PKT_TOO_SMALL = 4, + SKB_DROP_REASON_TCP_CSUM = 5, + SKB_DROP_REASON_SOCKET_FILTER = 6, + SKB_DROP_REASON_UDP_CSUM = 7, + SKB_DROP_REASON_NETFILTER_DROP = 8, + SKB_DROP_REASON_OTHERHOST = 9, + SKB_DROP_REASON_IP_CSUM = 10, + SKB_DROP_REASON_IP_INHDR = 11, + SKB_DROP_REASON_IP_RPFILTER = 12, + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST = 13, + SKB_DROP_REASON_XFRM_POLICY = 14, + SKB_DROP_REASON_IP_NOPROTO = 15, + SKB_DROP_REASON_SOCKET_RCVBUFF = 16, + SKB_DROP_REASON_PROTO_MEM = 17, + SKB_DROP_REASON_TCP_AUTH_HDR = 18, + SKB_DROP_REASON_TCP_MD5NOTFOUND = 19, + SKB_DROP_REASON_TCP_MD5UNEXPECTED = 20, + SKB_DROP_REASON_TCP_MD5FAILURE = 21, + SKB_DROP_REASON_TCP_AONOTFOUND = 22, + SKB_DROP_REASON_TCP_AOUNEXPECTED = 23, + SKB_DROP_REASON_TCP_AOKEYNOTFOUND = 24, + SKB_DROP_REASON_TCP_AOFAILURE = 25, + SKB_DROP_REASON_SOCKET_BACKLOG = 26, + SKB_DROP_REASON_TCP_FLAGS = 27, + SKB_DROP_REASON_TCP_ABORT_ON_DATA = 28, + SKB_DROP_REASON_TCP_ZEROWINDOW = 29, + SKB_DROP_REASON_TCP_OLD_DATA = 30, + SKB_DROP_REASON_TCP_OVERWINDOW = 31, + SKB_DROP_REASON_TCP_OFOMERGE = 32, + SKB_DROP_REASON_TCP_RFC7323_PAWS = 33, + SKB_DROP_REASON_TCP_OLD_SEQUENCE = 34, + SKB_DROP_REASON_TCP_INVALID_SEQUENCE = 35, + SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE = 36, + SKB_DROP_REASON_TCP_RESET = 37, + SKB_DROP_REASON_TCP_INVALID_SYN = 38, + SKB_DROP_REASON_TCP_CLOSE = 39, + SKB_DROP_REASON_TCP_FASTOPEN = 40, + SKB_DROP_REASON_TCP_OLD_ACK = 41, + SKB_DROP_REASON_TCP_TOO_OLD_ACK = 42, + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA = 43, + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE = 44, + SKB_DROP_REASON_TCP_OFO_DROP = 45, + SKB_DROP_REASON_IP_OUTNOROUTES = 46, + SKB_DROP_REASON_BPF_CGROUP_EGRESS = 47, + SKB_DROP_REASON_IPV6DISABLED = 48, + SKB_DROP_REASON_NEIGH_CREATEFAIL = 49, + SKB_DROP_REASON_NEIGH_FAILED = 50, + SKB_DROP_REASON_NEIGH_QUEUEFULL = 51, + SKB_DROP_REASON_NEIGH_DEAD = 52, + SKB_DROP_REASON_TC_EGRESS = 53, + SKB_DROP_REASON_SECURITY_HOOK = 54, + SKB_DROP_REASON_QDISC_DROP = 55, + SKB_DROP_REASON_CPU_BACKLOG = 56, + SKB_DROP_REASON_XDP = 57, + SKB_DROP_REASON_TC_INGRESS = 58, + SKB_DROP_REASON_UNHANDLED_PROTO = 59, + SKB_DROP_REASON_SKB_CSUM = 60, + SKB_DROP_REASON_SKB_GSO_SEG = 61, + SKB_DROP_REASON_SKB_UCOPY_FAULT = 62, + SKB_DROP_REASON_DEV_HDR = 63, + SKB_DROP_REASON_DEV_READY = 64, + SKB_DROP_REASON_FULL_RING = 65, + SKB_DROP_REASON_NOMEM = 66, + SKB_DROP_REASON_HDR_TRUNC = 67, + SKB_DROP_REASON_TAP_FILTER = 68, + SKB_DROP_REASON_TAP_TXFILTER = 69, + SKB_DROP_REASON_ICMP_CSUM = 70, + SKB_DROP_REASON_INVALID_PROTO = 71, + SKB_DROP_REASON_IP_INADDRERRORS = 72, + SKB_DROP_REASON_IP_INNOROUTES = 73, + SKB_DROP_REASON_IP_LOCAL_SOURCE = 74, + SKB_DROP_REASON_IP_INVALID_SOURCE = 75, + SKB_DROP_REASON_IP_LOCALNET = 76, + SKB_DROP_REASON_IP_INVALID_DEST = 77, + SKB_DROP_REASON_PKT_TOO_BIG = 78, + SKB_DROP_REASON_DUP_FRAG = 79, + SKB_DROP_REASON_FRAG_REASM_TIMEOUT = 80, + SKB_DROP_REASON_FRAG_TOO_FAR = 81, + SKB_DROP_REASON_TCP_MINTTL = 82, + SKB_DROP_REASON_IPV6_BAD_EXTHDR = 83, + SKB_DROP_REASON_IPV6_NDISC_FRAG = 84, + SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT = 85, + SKB_DROP_REASON_IPV6_NDISC_BAD_CODE = 86, + SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS = 87, + SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST = 88, + SKB_DROP_REASON_QUEUE_PURGE = 89, + SKB_DROP_REASON_TC_COOKIE_ERROR = 90, + SKB_DROP_REASON_PACKET_SOCK_ERROR = 91, + SKB_DROP_REASON_TC_CHAIN_NOTFOUND = 92, + SKB_DROP_REASON_TC_RECLASSIFY_LOOP = 93, + SKB_DROP_REASON_VXLAN_INVALID_HDR = 94, + SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND = 95, + SKB_DROP_REASON_MAC_INVALID_SOURCE = 96, + SKB_DROP_REASON_VXLAN_ENTRY_EXISTS = 97, + SKB_DROP_REASON_VXLAN_NO_REMOTE = 98, + SKB_DROP_REASON_IP_TUNNEL_ECN = 99, + SKB_DROP_REASON_TUNNEL_TXINFO = 100, + SKB_DROP_REASON_LOCAL_MAC = 101, + SKB_DROP_REASON_ARP_PVLAN_DISABLE = 102, + SKB_DROP_REASON_MAX = 103, + SKB_DROP_REASON_SUBSYS_MASK = 4294901760, +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[1]; + u8 chunks; + long: 0; + char data[0]; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +struct nla_policy; + +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + const struct nla_policy *policy; + const struct nlattr *miss_nest; + u16 miss_type; + u8 cookie[20]; + u8 cookie_len; + char _msg_buf[80]; +}; + +struct netlink_range_validation; + +struct netlink_range_validation_signed; + +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + u16 strict_start_type; + const u32 bitfield32_valid; + const u32 mask; + const char *reject_message; + const struct nla_policy *nested_policy; + const struct netlink_range_validation *range; + const struct netlink_range_validation_signed *range_signed; + struct { + s16 min; + s16 max; + }; + int (*validate)(const struct nlattr *, struct netlink_ext_ack *); + }; +}; + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq; + unsigned int seq; + int flags; + bool strict_check; + union { + u8 ctx[48]; + long int args[6]; + }; +}; + +enum { + NLA_UNSPEC = 0, + NLA_U8 = 1, + NLA_U16 = 2, + NLA_U32 = 3, + NLA_U64 = 4, + NLA_STRING = 5, + NLA_FLAG = 6, + NLA_MSECS = 7, + NLA_NESTED = 8, + NLA_NESTED_ARRAY = 9, + NLA_NUL_STRING = 10, + NLA_BINARY = 11, + NLA_S8 = 12, + NLA_S16 = 13, + NLA_S32 = 14, + NLA_S64 = 15, + NLA_BITFIELD32 = 16, + NLA_REJECT = 17, + NLA_BE16 = 18, + NLA_BE32 = 19, + NLA_SINT = 20, + NLA_UINT = 21, + __NLA_TYPE_MAX = 22, +}; + +struct netlink_range_validation { + u64 min; + u64 max; +}; + +struct netlink_range_validation_signed { + s64 min; + s64 max; +}; + +enum { + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS = 1, + IPSTATS_MIB_INOCTETS = 2, + IPSTATS_MIB_INDELIVERS = 3, + IPSTATS_MIB_OUTFORWDATAGRAMS = 4, + IPSTATS_MIB_OUTREQUESTS = 5, + IPSTATS_MIB_OUTOCTETS = 6, + IPSTATS_MIB_INHDRERRORS = 7, + IPSTATS_MIB_INTOOBIGERRORS = 8, + IPSTATS_MIB_INNOROUTES = 9, + IPSTATS_MIB_INADDRERRORS = 10, + IPSTATS_MIB_INUNKNOWNPROTOS = 11, + IPSTATS_MIB_INTRUNCATEDPKTS = 12, + IPSTATS_MIB_INDISCARDS = 13, + IPSTATS_MIB_OUTDISCARDS = 14, + IPSTATS_MIB_OUTNOROUTES = 15, + IPSTATS_MIB_REASMTIMEOUT = 16, + IPSTATS_MIB_REASMREQDS = 17, + IPSTATS_MIB_REASMOKS = 18, + IPSTATS_MIB_REASMFAILS = 19, + IPSTATS_MIB_FRAGOKS = 20, + IPSTATS_MIB_FRAGFAILS = 21, + IPSTATS_MIB_FRAGCREATES = 22, + IPSTATS_MIB_INMCASTPKTS = 23, + IPSTATS_MIB_OUTMCASTPKTS = 24, + IPSTATS_MIB_INBCASTPKTS = 25, + IPSTATS_MIB_OUTBCASTPKTS = 26, + IPSTATS_MIB_INMCASTOCTETS = 27, + IPSTATS_MIB_OUTMCASTOCTETS = 28, + IPSTATS_MIB_INBCASTOCTETS = 29, + IPSTATS_MIB_OUTBCASTOCTETS = 30, + IPSTATS_MIB_CSUMERRORS = 31, + IPSTATS_MIB_NOECTPKTS = 32, + IPSTATS_MIB_ECT1PKTS = 33, + IPSTATS_MIB_ECT0PKTS = 34, + IPSTATS_MIB_CEPKTS = 35, + IPSTATS_MIB_REASM_OVERLAPS = 36, + IPSTATS_MIB_OUTPKTS = 37, + __IPSTATS_MIB_MAX = 38, +}; + +enum { + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS = 1, + ICMP_MIB_INERRORS = 2, + ICMP_MIB_INDESTUNREACHS = 3, + ICMP_MIB_INTIMEEXCDS = 4, + ICMP_MIB_INPARMPROBS = 5, + ICMP_MIB_INSRCQUENCHS = 6, + ICMP_MIB_INREDIRECTS = 7, + ICMP_MIB_INECHOS = 8, + ICMP_MIB_INECHOREPS = 9, + ICMP_MIB_INTIMESTAMPS = 10, + ICMP_MIB_INTIMESTAMPREPS = 11, + ICMP_MIB_INADDRMASKS = 12, + ICMP_MIB_INADDRMASKREPS = 13, + ICMP_MIB_OUTMSGS = 14, + ICMP_MIB_OUTERRORS = 15, + ICMP_MIB_OUTDESTUNREACHS = 16, + ICMP_MIB_OUTTIMEEXCDS = 17, + ICMP_MIB_OUTPARMPROBS = 18, + ICMP_MIB_OUTSRCQUENCHS = 19, + ICMP_MIB_OUTREDIRECTS = 20, + ICMP_MIB_OUTECHOS = 21, + ICMP_MIB_OUTECHOREPS = 22, + ICMP_MIB_OUTTIMESTAMPS = 23, + ICMP_MIB_OUTTIMESTAMPREPS = 24, + ICMP_MIB_OUTADDRMASKS = 25, + ICMP_MIB_OUTADDRMASKREPS = 26, + ICMP_MIB_CSUMERRORS = 27, + ICMP_MIB_RATELIMITGLOBAL = 28, + ICMP_MIB_RATELIMITHOST = 29, + __ICMP_MIB_MAX = 30, +}; + +enum { + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS = 1, + ICMP6_MIB_INERRORS = 2, + ICMP6_MIB_OUTMSGS = 3, + ICMP6_MIB_OUTERRORS = 4, + ICMP6_MIB_CSUMERRORS = 5, + ICMP6_MIB_RATELIMITHOST = 6, + __ICMP6_MIB_MAX = 7, +}; + +enum { + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM = 1, + TCP_MIB_RTOMIN = 2, + TCP_MIB_RTOMAX = 3, + TCP_MIB_MAXCONN = 4, + TCP_MIB_ACTIVEOPENS = 5, + TCP_MIB_PASSIVEOPENS = 6, + TCP_MIB_ATTEMPTFAILS = 7, + TCP_MIB_ESTABRESETS = 8, + TCP_MIB_CURRESTAB = 9, + TCP_MIB_INSEGS = 10, + TCP_MIB_OUTSEGS = 11, + TCP_MIB_RETRANSSEGS = 12, + TCP_MIB_INERRS = 13, + TCP_MIB_OUTRSTS = 14, + TCP_MIB_CSUMERRORS = 15, + __TCP_MIB_MAX = 16, +}; + +enum { + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS = 1, + UDP_MIB_NOPORTS = 2, + UDP_MIB_INERRORS = 3, + UDP_MIB_OUTDATAGRAMS = 4, + UDP_MIB_RCVBUFERRORS = 5, + UDP_MIB_SNDBUFERRORS = 6, + UDP_MIB_CSUMERRORS = 7, + UDP_MIB_IGNOREDMULTI = 8, + UDP_MIB_MEMERRORS = 9, + __UDP_MIB_MAX = 10, +}; + +enum { + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT = 1, + LINUX_MIB_SYNCOOKIESRECV = 2, + LINUX_MIB_SYNCOOKIESFAILED = 3, + LINUX_MIB_EMBRYONICRSTS = 4, + LINUX_MIB_PRUNECALLED = 5, + LINUX_MIB_RCVPRUNED = 6, + LINUX_MIB_OFOPRUNED = 7, + LINUX_MIB_OUTOFWINDOWICMPS = 8, + LINUX_MIB_LOCKDROPPEDICMPS = 9, + LINUX_MIB_ARPFILTER = 10, + LINUX_MIB_TIMEWAITED = 11, + LINUX_MIB_TIMEWAITRECYCLED = 12, + LINUX_MIB_TIMEWAITKILLED = 13, + LINUX_MIB_PAWSACTIVEREJECTED = 14, + LINUX_MIB_PAWSESTABREJECTED = 15, + LINUX_MIB_DELAYEDACKS = 16, + LINUX_MIB_DELAYEDACKLOCKED = 17, + LINUX_MIB_DELAYEDACKLOST = 18, + LINUX_MIB_LISTENOVERFLOWS = 19, + LINUX_MIB_LISTENDROPS = 20, + LINUX_MIB_TCPHPHITS = 21, + LINUX_MIB_TCPPUREACKS = 22, + LINUX_MIB_TCPHPACKS = 23, + LINUX_MIB_TCPRENORECOVERY = 24, + LINUX_MIB_TCPSACKRECOVERY = 25, + LINUX_MIB_TCPSACKRENEGING = 26, + LINUX_MIB_TCPSACKREORDER = 27, + LINUX_MIB_TCPRENOREORDER = 28, + LINUX_MIB_TCPTSREORDER = 29, + LINUX_MIB_TCPFULLUNDO = 30, + LINUX_MIB_TCPPARTIALUNDO = 31, + LINUX_MIB_TCPDSACKUNDO = 32, + LINUX_MIB_TCPLOSSUNDO = 33, + LINUX_MIB_TCPLOSTRETRANSMIT = 34, + LINUX_MIB_TCPRENOFAILURES = 35, + LINUX_MIB_TCPSACKFAILURES = 36, + LINUX_MIB_TCPLOSSFAILURES = 37, + LINUX_MIB_TCPFASTRETRANS = 38, + LINUX_MIB_TCPSLOWSTARTRETRANS = 39, + LINUX_MIB_TCPTIMEOUTS = 40, + LINUX_MIB_TCPLOSSPROBES = 41, + LINUX_MIB_TCPLOSSPROBERECOVERY = 42, + LINUX_MIB_TCPRENORECOVERYFAIL = 43, + LINUX_MIB_TCPSACKRECOVERYFAIL = 44, + LINUX_MIB_TCPRCVCOLLAPSED = 45, + LINUX_MIB_TCPDSACKOLDSENT = 46, + LINUX_MIB_TCPDSACKOFOSENT = 47, + LINUX_MIB_TCPDSACKRECV = 48, + LINUX_MIB_TCPDSACKOFORECV = 49, + LINUX_MIB_TCPABORTONDATA = 50, + LINUX_MIB_TCPABORTONCLOSE = 51, + LINUX_MIB_TCPABORTONMEMORY = 52, + LINUX_MIB_TCPABORTONTIMEOUT = 53, + LINUX_MIB_TCPABORTONLINGER = 54, + LINUX_MIB_TCPABORTFAILED = 55, + LINUX_MIB_TCPMEMORYPRESSURES = 56, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, + LINUX_MIB_TCPSACKDISCARD = 58, + LINUX_MIB_TCPDSACKIGNOREDOLD = 59, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, + LINUX_MIB_TCPSPURIOUSRTOS = 61, + LINUX_MIB_TCPMD5NOTFOUND = 62, + LINUX_MIB_TCPMD5UNEXPECTED = 63, + LINUX_MIB_TCPMD5FAILURE = 64, + LINUX_MIB_SACKSHIFTED = 65, + LINUX_MIB_SACKMERGED = 66, + LINUX_MIB_SACKSHIFTFALLBACK = 67, + LINUX_MIB_TCPBACKLOGDROP = 68, + LINUX_MIB_PFMEMALLOCDROP = 69, + LINUX_MIB_TCPMINTTLDROP = 70, + LINUX_MIB_TCPDEFERACCEPTDROP = 71, + LINUX_MIB_IPRPFILTER = 72, + LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, + LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, + LINUX_MIB_TCPREQQFULLDROP = 75, + LINUX_MIB_TCPRETRANSFAIL = 76, + LINUX_MIB_TCPRCVCOALESCE = 77, + LINUX_MIB_TCPBACKLOGCOALESCE = 78, + LINUX_MIB_TCPOFOQUEUE = 79, + LINUX_MIB_TCPOFODROP = 80, + LINUX_MIB_TCPOFOMERGE = 81, + LINUX_MIB_TCPCHALLENGEACK = 82, + LINUX_MIB_TCPSYNCHALLENGE = 83, + LINUX_MIB_TCPFASTOPENACTIVE = 84, + LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, + LINUX_MIB_TCPFASTOPENPASSIVE = 86, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, + LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, + LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, + LINUX_MIB_BUSYPOLLRXPACKETS = 92, + LINUX_MIB_TCPAUTOCORKING = 93, + LINUX_MIB_TCPFROMZEROWINDOWADV = 94, + LINUX_MIB_TCPTOZEROWINDOWADV = 95, + LINUX_MIB_TCPWANTZEROWINDOWADV = 96, + LINUX_MIB_TCPSYNRETRANS = 97, + LINUX_MIB_TCPORIGDATASENT = 98, + LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, + LINUX_MIB_TCPHYSTARTTRAINCWND = 100, + LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, + LINUX_MIB_TCPHYSTARTDELAYCWND = 102, + LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, + LINUX_MIB_TCPACKSKIPPEDPAWS = 104, + LINUX_MIB_TCPACKSKIPPEDSEQ = 105, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, + LINUX_MIB_TCPWINPROBE = 109, + LINUX_MIB_TCPKEEPALIVE = 110, + LINUX_MIB_TCPMTUPFAIL = 111, + LINUX_MIB_TCPMTUPSUCCESS = 112, + LINUX_MIB_TCPDELIVERED = 113, + LINUX_MIB_TCPDELIVEREDCE = 114, + LINUX_MIB_TCPACKCOMPRESSED = 115, + LINUX_MIB_TCPZEROWINDOWDROP = 116, + LINUX_MIB_TCPRCVQDROP = 117, + LINUX_MIB_TCPWQUEUETOOBIG = 118, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, + LINUX_MIB_TCPTIMEOUTREHASH = 120, + LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, + LINUX_MIB_TCPDSACKRECVSEGS = 122, + LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, + LINUX_MIB_TCPMIGRATEREQSUCCESS = 124, + LINUX_MIB_TCPMIGRATEREQFAILURE = 125, + LINUX_MIB_TCPPLBREHASH = 126, + LINUX_MIB_TCPAOREQUIRED = 127, + LINUX_MIB_TCPAOBAD = 128, + LINUX_MIB_TCPAOKEYNOTFOUND = 129, + LINUX_MIB_TCPAOGOOD = 130, + LINUX_MIB_TCPAODROPPEDICMPS = 131, + __LINUX_MIB_MAX = 132, +}; + +enum { + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR = 1, + LINUX_MIB_XFRMINBUFFERERROR = 2, + LINUX_MIB_XFRMINHDRERROR = 3, + LINUX_MIB_XFRMINNOSTATES = 4, + LINUX_MIB_XFRMINSTATEPROTOERROR = 5, + LINUX_MIB_XFRMINSTATEMODEERROR = 6, + LINUX_MIB_XFRMINSTATESEQERROR = 7, + LINUX_MIB_XFRMINSTATEEXPIRED = 8, + LINUX_MIB_XFRMINSTATEMISMATCH = 9, + LINUX_MIB_XFRMINSTATEINVALID = 10, + LINUX_MIB_XFRMINTMPLMISMATCH = 11, + LINUX_MIB_XFRMINNOPOLS = 12, + LINUX_MIB_XFRMINPOLBLOCK = 13, + LINUX_MIB_XFRMINPOLERROR = 14, + LINUX_MIB_XFRMOUTERROR = 15, + LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, + LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, + LINUX_MIB_XFRMOUTNOSTATES = 18, + LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, + LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, + LINUX_MIB_XFRMOUTSTATESEQERROR = 21, + LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, + LINUX_MIB_XFRMOUTPOLBLOCK = 23, + LINUX_MIB_XFRMOUTPOLDEAD = 24, + LINUX_MIB_XFRMOUTPOLERROR = 25, + LINUX_MIB_XFRMFWDHDRERROR = 26, + LINUX_MIB_XFRMOUTSTATEINVALID = 27, + LINUX_MIB_XFRMACQUIREERROR = 28, + LINUX_MIB_XFRMOUTSTATEDIRERROR = 29, + LINUX_MIB_XFRMINSTATEDIRERROR = 30, + __LINUX_MIB_XFRMMAX = 31, +}; + +enum { + LINUX_MIB_TLSNUM = 0, + LINUX_MIB_TLSCURRTXSW = 1, + LINUX_MIB_TLSCURRRXSW = 2, + LINUX_MIB_TLSCURRTXDEVICE = 3, + LINUX_MIB_TLSCURRRXDEVICE = 4, + LINUX_MIB_TLSTXSW = 5, + LINUX_MIB_TLSRXSW = 6, + LINUX_MIB_TLSTXDEVICE = 7, + LINUX_MIB_TLSRXDEVICE = 8, + LINUX_MIB_TLSDECRYPTERROR = 9, + LINUX_MIB_TLSRXDEVICERESYNC = 10, + LINUX_MIB_TLSDECRYPTRETRY = 11, + LINUX_MIB_TLSRXNOPADVIOL = 12, + __LINUX_MIB_TLSMAX = 13, +}; + +struct ipstats_mib { + u64 mibs[38]; + struct u64_stats_sync syncp; + long: 32; +}; + +struct icmp_mib { + long unsigned int mibs[30]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + long unsigned int mibs[7]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct tcp_mib { + long unsigned int mibs[16]; +}; + +struct udp_mib { + long unsigned int mibs[10]; +}; + +struct linux_mib { + long unsigned int mibs[132]; +}; + +struct inet_frags; + +struct fqdir { + long int high_thresh; + long int low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long: 32; + struct rhashtable rhashtable; + long: 32; + long: 32; + atomic_long_t mem; + struct work_struct destroy_work; + struct llist_node free_list; + long: 32; + long: 32; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + u8 tstamp_type; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*min_tso_segs)(struct sock *); + void (*cong_control)(struct sock *, u32, int, const struct rate_sample *); + u32 (*undo_cwnd)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + long: 32; + long: 32; +}; + +typedef struct { + atomic_t refcnt; +} rcuref_t; + +typedef struct {} netdevice_tracker; + +struct lwtunnel_state; + +struct uncached_list; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + long unsigned int _metrics; + long unsigned int expires; + void *__pad1; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + short unsigned int flags; + short int obsolete; + short unsigned int header_len; + short unsigned int trailer_len; + int __use; + long unsigned int lastuse; + struct callback_head callback_head; + short int error; + short int __pad; + __u32 tclassid; + struct lwtunnel_state *lwtstate; + rcuref_t __rcuref; + netdevice_tracker dev_tracker; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; +}; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5, + NF_INET_INGRESS = 5, +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_NUMPROTO = 11, +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +enum tcp_conntrack { + TCP_CONNTRACK_NONE = 0, + TCP_CONNTRACK_SYN_SENT = 1, + TCP_CONNTRACK_SYN_RECV = 2, + TCP_CONNTRACK_ESTABLISHED = 3, + TCP_CONNTRACK_FIN_WAIT = 4, + TCP_CONNTRACK_CLOSE_WAIT = 5, + TCP_CONNTRACK_LAST_ACK = 6, + TCP_CONNTRACK_TIME_WAIT = 7, + TCP_CONNTRACK_CLOSE = 8, + TCP_CONNTRACK_LISTEN = 9, + TCP_CONNTRACK_MAX = 10, + TCP_CONNTRACK_IGNORE = 11, + TCP_CONNTRACK_RETRANS = 12, + TCP_CONNTRACK_UNACK = 13, + TCP_CONNTRACK_TIMEOUT_MAX = 14, +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED = 0, + UDP_CT_REPLIED = 1, + UDP_CT_MAX = 2, +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3, +}; + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP = 1, + MAX_NETNS_BPF_ATTACH_TYPE = 2, +}; + +typedef struct { + struct net *net; +} possible_net_t; + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +struct genl_multicast_group { + char name[16]; + u8 flags; +}; + +struct genl_split_ops; + +struct genl_info; + +struct genl_ops; + +struct genl_small_ops; + +struct genl_family { + unsigned int hdrsize; + char name[16]; + unsigned int version; + unsigned int maxattr; + u8 netnsok: 1; + u8 parallel_ops: 1; + u8 n_ops; + u8 n_small_ops; + u8 n_split_ops; + u8 n_mcgrps; + u8 resv_start_op; + const struct nla_policy *policy; + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*bind)(int); + void (*unbind)(int); + const struct genl_ops *ops; + const struct genl_small_ops *small_ops; + const struct genl_split_ops *split_ops; + const struct genl_multicast_group *mcgrps; + struct module *module; + size_t sock_priv_size; + void (*sock_priv_init)(void *); + void (*sock_priv_destroy)(void *); + int id; + unsigned int mcgrp_offset; + struct xarray *sock_privs; +}; + +struct genl_split_ops { + union { + struct { + int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + int (*doit)(struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); + }; + struct { + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + }; + }; + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_info { + u32 snd_seq; + u32 snd_portid; + const struct genl_family *family; + const struct nlmsghdr *nlhdr; + struct genlmsghdr *genlhdr; + struct nlattr **attrs; + possible_net_t _net; + union { + u8 ctx[48]; + void *user_ptr[2]; + }; + struct netlink_ext_ack *extack; +}; + +struct genl_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_small_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +enum genl_validate_flags { + GENL_DONT_VALIDATE_STRICT = 1, + GENL_DONT_VALIDATE_DUMP = 2, + GENL_DONT_VALIDATE_DUMP_STRICT = 4, +}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; + +enum actions { + REGISTER = 0, + DEREGISTER = 1, + CPU_DONT_CARE = 2, +}; + +enum { + CSD_FLAG_LOCK = 1, + IRQ_WORK_PENDING = 1, + IRQ_WORK_BUSY = 2, + IRQ_WORK_LAZY = 4, + IRQ_WORK_HARD_IRQ = 8, + IRQ_WORK_CLAIMED = 3, + CSD_TYPE_ASYNC = 0, + CSD_TYPE_SYNC = 16, + CSD_TYPE_IRQ_WORK = 32, + CSD_TYPE_TTWU = 48, + CSD_FLAG_TYPE_MASK = 240, +}; + +typedef struct { + void *lock; +} class_preempt_notrace_t; + +struct smp_hotplug_thread { + struct task_struct **store; + struct list_head list; + int (*thread_should_run)(unsigned int); + void (*thread_fn)(unsigned int); + void (*create)(unsigned int); + void (*setup)(unsigned int); + void (*cleanup)(unsigned int, bool); + void (*park)(unsigned int); + void (*unpark)(unsigned int); + bool selfparking; + const char *thread_comm; +}; + +enum bpf_lru_list_type { + BPF_LRU_LIST_T_ACTIVE = 0, + BPF_LRU_LIST_T_INACTIVE = 1, + BPF_LRU_LIST_T_FREE = 2, + BPF_LRU_LOCAL_LIST_T_FREE = 3, + BPF_LRU_LOCAL_LIST_T_PENDING = 4, +}; + +struct bpf_lru_node { + struct list_head list; + u16 cpu; + u8 type; + u8 ref; +}; + +struct bpf_lru_list { + struct list_head lists[3]; + unsigned int counts[2]; + struct list_head *next_inactive_rotation; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_lru_locallist { + struct list_head lists[2]; + u16 next_steal; + raw_spinlock_t lock; +}; + +struct bpf_common_lru { + struct bpf_lru_list lru_list; + struct bpf_lru_locallist *local_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); + +struct bpf_lru { + union { + struct bpf_common_lru common_lru; + struct bpf_lru_list *percpu_lru; + }; + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; + unsigned int nr_scans; + bool percpu; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_array_aux { + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + long: 32; + union { + struct { + struct {} __empty_value; + char value[0]; + }; + struct { + struct {} __empty_ptrs; + void *ptrs[0]; + }; + struct { + struct {} __empty_pptrs; + void *pptrs[0]; + }; + }; +}; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, + __MAX_BPF_LINK_TYPE = 15, +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + long: 32; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + long: 32; + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; + __u32 target_btf_id; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + long: 32; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + union { + struct { + __u64 cgroup_id; + __u32 order; + long: 32; + } cgroup; + struct { + __u32 tid; + __u32 pid; + } task; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + struct { + __u32 map_id; + } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + __u64 addrs; + __u32 count; + __u32 flags; + __u64 missed; + __u64 cookies; + } kprobe_multi; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 path_size; + __u32 count; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + __u32 type; + long: 32; + union { + struct { + __u64 file_name; + __u32 name_len; + __u32 offset; + __u64 cookie; + } uprobe; + struct { + __u64 func_name; + __u32 name_len; + __u32 offset; + __u64 addr; + __u64 missed; + __u64 cookie; + } kprobe; + struct { + __u64 tp_name; + __u32 name_len; + long: 32; + __u64 cookie; + } tracepoint; + struct { + __u64 config; + __u32 type; + long: 32; + __u64 cookie; + } event; + }; + } perf_event; + struct { + __u32 ifindex; + __u32 attach_type; + } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; + }; +}; + +struct vm_struct { + struct vm_struct *next; + void *addr; + long unsigned int size; + long unsigned int flags; + struct page **pages; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + +struct bpf_redirect_info { + u64 tgt_index; + void *tgt_value; + struct bpf_map *map; + u32 flags; + u32 map_id; + enum bpf_map_type map_type; + struct bpf_nh_params nh; + u32 kern_flags; + long: 32; +}; + +struct bpf_net_context { + struct bpf_redirect_info ri; + struct list_head cpu_map_flush_list; + struct list_head dev_map_flush_list; + struct list_head xskmap_map_flush_list; +}; + +typedef struct { + u64 v; +} u64_stats_t; + +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); + int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); + int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_offloaded_map *, void *); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; + long: 32; +}; + +typedef u64 netdev_features_t; + +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +enum rx_handler_result { + RX_HANDLER_CONSUMED = 0, + RX_HANDLER_ANOTHER = 1, + RX_HANDLER_EXACT = 2, + RX_HANDLER_PASS = 3, +}; + +typedef enum rx_handler_result rx_handler_result_t; + +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); + +typedef u32 xdp_features_t; + +struct net_device_stats { + union { + long unsigned int rx_packets; + atomic_long_t __rx_packets; + }; + union { + long unsigned int tx_packets; + atomic_long_t __tx_packets; + }; + union { + long unsigned int rx_bytes; + atomic_long_t __rx_bytes; + }; + union { + long unsigned int tx_bytes; + atomic_long_t __tx_bytes; + }; + union { + long unsigned int rx_errors; + atomic_long_t __rx_errors; + }; + union { + long unsigned int tx_errors; + atomic_long_t __tx_errors; + }; + union { + long unsigned int rx_dropped; + atomic_long_t __rx_dropped; + }; + union { + long unsigned int tx_dropped; + atomic_long_t __tx_dropped; + }; + union { + long unsigned int multicast; + atomic_long_t __multicast; + }; + union { + long unsigned int collisions; + atomic_long_t __collisions; + }; + union { + long unsigned int rx_length_errors; + atomic_long_t __rx_length_errors; + }; + union { + long unsigned int rx_over_errors; + atomic_long_t __rx_over_errors; + }; + union { + long unsigned int rx_crc_errors; + atomic_long_t __rx_crc_errors; + }; + union { + long unsigned int rx_frame_errors; + atomic_long_t __rx_frame_errors; + }; + union { + long unsigned int rx_fifo_errors; + atomic_long_t __rx_fifo_errors; + }; + union { + long unsigned int rx_missed_errors; + atomic_long_t __rx_missed_errors; + }; + union { + long unsigned int tx_aborted_errors; + atomic_long_t __tx_aborted_errors; + }; + union { + long unsigned int tx_carrier_errors; + atomic_long_t __tx_carrier_errors; + }; + union { + long unsigned int tx_fifo_errors; + atomic_long_t __tx_fifo_errors; + }; + union { + long unsigned int tx_heartbeat_errors; + atomic_long_t __tx_heartbeat_errors; + }; + union { + long unsigned int tx_window_errors; + atomic_long_t __tx_window_errors; + }; + union { + long unsigned int rx_compressed; + atomic_long_t __rx_compressed; + }; + union { + long unsigned int tx_compressed; + atomic_long_t __tx_compressed; + }; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; + struct rb_root tree; +}; + +enum netdev_ml_priv_type { + ML_PRIV_NONE = 0, + ML_PRIV_CAN = 1, +}; + +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE = 0, + NETDEV_PCPU_STAT_LSTATS = 1, + NETDEV_PCPU_STAT_TSTATS = 2, + NETDEV_PCPU_STAT_DSTATS = 3, +}; + +struct sfp_bus; + +struct udp_tunnel_nic; + +struct bpf_xdp_link; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; + +struct net_device_ops; + +struct header_ops; + +struct netdev_queue; + +struct xps_dev_maps; + +struct bpf_mprog_entry; + +struct pcpu_lstats; + +struct pcpu_sw_netstats; + +struct pcpu_dstats; + +struct inet6_dev; + +struct netdev_rx_queue; + +struct netdev_name_node; + +struct dev_ifalias; + +struct xdp_metadata_ops; + +struct xsk_tx_metadata_ops; + +struct net_device_core_stats; + +struct ethtool_ops; + +struct ndisc_ops; + +struct in_device; + +struct cpu_rmap; + +struct Qdisc; + +struct xdp_dev_bulk_queue; + +struct rtnl_link_ops; + +struct netdev_stat_ops; + +struct netdev_queue_mgmt_ops; + +struct phy_link_topology; + +struct phy_device; + +struct udp_tunnel_nic_info; + +struct ethtool_netdev_state; + +struct rtnl_hw_stats64; + +struct devlink_port; + +struct dim_irq_moder; + +struct napi_config; + +struct net_device { + __u8 __cacheline_group_begin__net_device_read_tx[0]; + union { + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + }; + struct { + long unsigned int priv_flags: 32; + long unsigned int lltx: 1; + } priv_flags_fast; + }; + const struct net_device_ops *netdev_ops; + const struct header_ops *header_ops; + struct netdev_queue *_tx; + long: 32; + netdev_features_t gso_partial_features; + unsigned int real_num_tx_queues; + unsigned int gso_max_size; + unsigned int gso_ipv4_max_size; + u16 gso_max_segs; + s16 num_tc; + unsigned int mtu; + short unsigned int needed_headroom; + struct netdev_tc_txq tc_to_txq[16]; + struct xps_dev_maps *xps_maps[2]; + struct nf_hook_entries *nf_hooks_egress; + struct bpf_mprog_entry *tcx_egress; + __u8 __cacheline_group_end__net_device_read_tx[0]; + __u8 __cacheline_group_begin__net_device_read_txrx[0]; + union { + struct pcpu_lstats *lstats; + struct pcpu_sw_netstats *tstats; + struct pcpu_dstats *dstats; + }; + long unsigned int state; + unsigned int flags; + short unsigned int hard_header_len; + netdev_features_t features; + struct inet6_dev *ip6_ptr; + __u8 __cacheline_group_end__net_device_read_txrx[0]; + __u8 __cacheline_group_begin__net_device_read_rx[0]; + struct bpf_prog *xdp_prog; + struct list_head ptype_specific; + int ifindex; + unsigned int real_num_rx_queues; + struct netdev_rx_queue *_rx; + unsigned int gro_max_size; + unsigned int gro_ipv4_max_size; + rx_handler_func_t *rx_handler; + void *rx_handler_data; + possible_net_t nd_net; + struct bpf_mprog_entry *tcx_ingress; + __u8 __cacheline_group_end__net_device_read_rx[0]; + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias *ifalias; + long unsigned int mem_end; + long unsigned int mem_start; + long unsigned int base_addr; + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + xdp_features_t xdp_features; + const struct xdp_metadata_ops *xdp_metadata_ops; + const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; + short unsigned int gflags; + short unsigned int needed_tailroom; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + unsigned int min_mtu; + unsigned int max_mtu; + short unsigned int type; + unsigned char min_header_len; + unsigned char name_assign_type; + int group; + struct net_device_stats stats; + struct net_device_core_stats *core_stats; + atomic_t carrier_up_count; + atomic_t carrier_down_count; + const struct ethtool_ops *ethtool_ops; + const struct ndisc_ops *ndisc_ops; + unsigned int operstate; + unsigned char link_mode; + unsigned char if_port; + unsigned char dma; + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + short unsigned int neigh_priv_len; + short unsigned int dev_id; + short unsigned int dev_port; + int irq; + u32 priv_len; + spinlock_t addr_list_lock; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + struct kset *queues_kset; + unsigned int promiscuity; + unsigned int allmulti; + bool uc_promisc; + struct in_device *ip_ptr; + struct hlist_head fib_nh_head; + const unsigned char *dev_addr; + unsigned int num_rx_queues; + unsigned int xdp_zc_max_segs; + struct netdev_queue *ingress_queue; + struct nf_hook_entries *nf_hooks_ingress; + unsigned char broadcast[32]; + struct cpu_rmap *rx_cpu_rmap; + struct hlist_node index_hlist; + unsigned int num_tx_queues; + struct Qdisc *qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + struct xdp_dev_bulk_queue *xdp_bulkq; + struct timer_list watchdog_timer; + int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; + int *pcpu_refcnt; + struct ref_tracker_dir refcnt_tracker; + struct list_head link_watch_list; + u8 reg_state; + bool dismantle; + enum { + RTNL_LINK_INITIALIZED = 0, + RTNL_LINK_INITIALIZING = 1, + } rtnl_link_state: 16; + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *); + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type: 8; + long: 32; + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + const struct rtnl_link_ops *rtnl_link_ops; + const struct netdev_stat_ops *stat_ops; + const struct netdev_queue_mgmt_ops *queue_mgmt_ops; + unsigned int tso_max_size; + u16 tso_max_segs; + u8 prio_tc_map[16]; + struct phy_link_topology *link_topo; + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + bool proto_down; + bool threaded; + long unsigned int see_all_hwtstamp_requests: 1; + long unsigned int change_proto_down: 1; + long unsigned int netns_local: 1; + long unsigned int fcoe_mtu: 1; + struct list_head net_notifier_list; + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + struct ethtool_netdev_state *ethtool; + struct bpf_xdp_entity xdp_state[3]; + u8 dev_addr_shadow[32]; + netdevice_tracker linkwatch_dev_tracker; + netdevice_tracker watchdog_dev_tracker; + netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; + struct devlink_port *devlink_port; + struct hlist_head page_pools; + struct dim_irq_moder *irq_moder; + u64 max_pacing_offload_horizon; + struct napi_config *napi_config; + long unsigned int gro_flush_timeout; + u32 napi_defer_hard_irqs; + struct mutex lock; + struct hlist_head neighbours[2]; + u8 priv[0]; +}; + +struct bpf_prog_stats { + u64_stats_t cnt; + u64_stats_t nsecs; + u64_stats_t misses; + struct u64_stats_sync syncp; + long: 32; +}; + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + bool sleepable; + union { + struct callback_head rcu; + struct work_struct work; + }; + long: 32; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + void (*dealloc_deferred)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); + int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +typedef short unsigned int __kernel_sa_family_t; + +typedef __kernel_sa_family_t sa_family_t; + +struct sockaddr { + sa_family_t sa_family; + union { + char sa_data_min[14]; + struct { + struct {} __empty_sa_data; + char sa_data[0]; + }; + }; +}; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; + unsigned int slot_map; +} te1_settings; + +typedef struct { + short unsigned int encoding; + short unsigned int parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + short unsigned int lmi; + short unsigned int dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +} fr_proto_pvc_info; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + short unsigned int dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; + +struct ifmap { + long unsigned int mem_start; + long unsigned int mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct if_settings { + unsigned int type; + unsigned int size; + union { + raw_hdlc_proto *raw_hdlc; + cisco_proto *cisco; + fr_proto *fr; + fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; + x25_hdlc_proto *x25; + sync_serial_settings *sync; + te1_settings *te1; + } ifs_ifsu; +}; + +struct ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void *ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; + long: 32; +}; + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short int cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +struct sk_buff_list { + struct sk_buff *next; + struct sk_buff *prev; +}; + +struct sk_buff_head { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + }; + struct sk_buff_list list; + }; + __u32 qlen; + spinlock_t lock; +}; + +struct skb_shared_hwtstamps { + union { + ktime_t hwtstamp; + void *netdev_data; + }; +}; + +struct dql { + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + short unsigned int stall_thrs; + long unsigned int history_head; + long unsigned int history[4]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + unsigned int limit; + unsigned int num_completed; + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + unsigned int lowest_slack; + long unsigned int slack_start_time; + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; + short unsigned int stall_max; + long unsigned int last_reap; + long unsigned int stall_cnt; + long: 32; + long: 32; + long: 32; +}; + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + long unsigned int hh_data[24]; +}; + +struct neigh_table; + +struct neigh_parms; + +struct neigh_ops; + +struct neighbour { + struct hlist_node hash; + struct hlist_node dev_list; + struct neigh_table *tbl; + struct neigh_parms *parms; + long unsigned int confirmed; + long unsigned int updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + long unsigned int used; + atomic_t probes; + u8 nud_state; + u8 type; + u8 dead; + u8 protocol; + u32 flags; + seqlock_t ha_lock; + unsigned char ha[32]; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct list_head managed_list; + struct callback_head rcu; + struct net_device *dev; + netdevice_tracker dev_tracker; + u8 primary_key[0]; +}; + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *); + void (*pre_exit)(struct net *); + void (*exit)(struct net *); + void (*exit_batch)(struct list_head *); + void (*exit_batch_rtnl)(struct list_head *, struct list_head *); + unsigned int * const id; + const size_t size; +}; + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + __u64 rx_compressed; + __u64 tx_compressed; + __u64 rx_nohandler; + __u64 rx_otherhost_dropped; +}; + +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + +struct ifla_vf_guid { + __u32 vf; + long: 32; + __u64 guid; +}; + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; + +enum netdev_tx { + __NETDEV_TX_MIN = -2147483648, + NETDEV_TX_OK = 0, + NETDEV_TX_BUSY = 16, +}; + +typedef enum netdev_tx netdev_tx_t; + +struct net_device_core_stats { + long unsigned int rx_dropped; + long unsigned int tx_dropped; + long unsigned int rx_nohandler; + long unsigned int rx_otherhost_dropped; +}; + +struct header_ops { + int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); + int (*parse)(const struct sk_buff *, unsigned char *); + int (*cache)(const struct neighbour *, struct hh_cache *, __be16); + void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); + bool (*validate)(const char *, unsigned int); + __be16 (*parse_protocol)(const struct sk_buff *); +}; + +struct gro_list { + struct list_head list; + int count; +}; + +struct napi_config { + u64 gro_flush_timeout; + u64 irq_suspend_timeout; + u32 defer_hard_irqs; + unsigned int napi_id; +}; + +struct napi_struct { + struct list_head poll_list; + long unsigned int state; + int weight; + u32 defer_hard_irqs_count; + long unsigned int gro_bitmask; + int (*poll)(struct napi_struct *, int); + int list_owner; + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + unsigned int napi_id; + struct hrtimer timer; + struct task_struct *thread; + long unsigned int gro_flush_timeout; + long unsigned int irq_suspend_timeout; + u32 defer_hard_irqs; + struct list_head dev_list; + struct hlist_node napi_hash_node; + int irq; + int index; + struct napi_config *config; + long: 32; +}; + +struct netdev_queue { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; + struct kobject kobj; + long unsigned int tx_maxrate; + atomic_long_t trans_timeout; + struct net_device *sb_dev; + long: 32; + struct dql dql; + spinlock_t _xmit_lock; + int xmit_lock_owner; + long unsigned int trans_start; + long unsigned int state; + struct napi_struct *napi; + long: 32; + long: 32; + long: 32; +}; + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct gnet_stats_basic_sync { + u64_stats_t bytes; + u64_stats_t packets; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +struct Qdisc_ops; + +struct qdisc_size_table; + +struct net_rate_estimator; + +struct Qdisc { + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + unsigned int flags; + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table *stab; + struct hlist_node hash; + u32 handle; + u32 parent; + struct netdev_queue *dev_queue; + struct net_rate_estimator *rate_est; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + int pad; + refcount_t refcnt; + struct sk_buff_head gso_skb; + struct qdisc_skb_head q; + struct gnet_stats_basic_sync bstats; + struct gnet_stats_queue qstats; + int owner; + long unsigned int state; + long unsigned int state2; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + long: 32; + long: 32; + long: 32; + spinlock_t busylock; + spinlock_t seqlock; + struct callback_head rcu; + netdevice_tracker dev_tracker; + struct lock_class_key root_lock_key; + long: 32; + long: 32; + long: 32; + long: 32; + long int privdata[0]; +}; + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[0]; +}; + +struct xps_dev_maps { + struct callback_head rcu; + unsigned int nr_ids; + s16 num_tc; + struct xps_map *attr_map[0]; +}; + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN = 1, + DEV_PATH_BRIDGE = 2, + DEV_PATH_PPPOE = 3, + DEV_PATH_DSA = 4, + DEV_PATH_MTK_WDMA = 5, +}; + +struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; + union { + struct { + u16 id; + __be16 proto; + u8 h_dest[6]; + } encap; + struct { + enum { + DEV_PATH_BR_VLAN_KEEP = 0, + DEV_PATH_BR_VLAN_TAG = 1, + DEV_PATH_BR_VLAN_UNTAG = 2, + DEV_PATH_BR_VLAN_UNTAG_HW = 3, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; + } bridge; + struct { + int port; + u16 proto; + } dsa; + struct { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; + u8 amsdu; + } mtk_wdma; + }; +}; + +struct net_device_path_ctx { + const struct net_device *dev; + u8 daddr[6]; + int num_vlans; + struct { + u16 id; + __be16 proto; + } vlan[2]; +}; + +enum tc_setup_type { + TC_QUERY_CAPS = 0, + TC_SETUP_QDISC_MQPRIO = 1, + TC_SETUP_CLSU32 = 2, + TC_SETUP_CLSFLOWER = 3, + TC_SETUP_CLSMATCHALL = 4, + TC_SETUP_CLSBPF = 5, + TC_SETUP_BLOCK = 6, + TC_SETUP_QDISC_CBS = 7, + TC_SETUP_QDISC_RED = 8, + TC_SETUP_QDISC_PRIO = 9, + TC_SETUP_QDISC_MQ = 10, + TC_SETUP_QDISC_ETF = 11, + TC_SETUP_ROOT_QDISC = 12, + TC_SETUP_QDISC_GRED = 13, + TC_SETUP_QDISC_TAPRIO = 14, + TC_SETUP_FT = 15, + TC_SETUP_QDISC_ETS = 16, + TC_SETUP_QDISC_TBF = 17, + TC_SETUP_QDISC_FIFO = 18, + TC_SETUP_QDISC_HTB = 19, + TC_SETUP_ACT = 20, +}; + +enum bpf_netdev_command { + XDP_SETUP_PROG = 0, + XDP_SETUP_PROG_HW = 1, + BPF_OFFLOAD_MAP_ALLOC = 2, + BPF_OFFLOAD_MAP_FREE = 3, + XDP_SETUP_XSK_POOL = 4, +}; + +struct xsk_buff_pool; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + struct { + struct bpf_offloaded_map *offmap; + }; + struct { + struct xsk_buff_pool *pool; + u16 queue_id; + } xsk; + }; +}; + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[0]; +}; + +struct xdp_frame; + +struct xdp_buff; + +struct ip_tunnel_parm_kern; + +struct kernel_hwtstamp_config; + +struct net_device_ops { + int (*ndo_init)(struct net_device *); + void (*ndo_uninit)(struct net_device *); + int (*ndo_open)(struct net_device *); + int (*ndo_stop)(struct net_device *); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); + netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); + u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); + void (*ndo_change_rx_flags)(struct net_device *, int); + void (*ndo_set_rx_mode)(struct net_device *); + int (*ndo_set_mac_address)(struct net_device *, void *); + int (*ndo_validate_addr)(struct net_device *); + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_eth_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_siocbond)(struct net_device *, struct ifreq *, int); + int (*ndo_siocwandev)(struct net_device *, struct if_settings *); + int (*ndo_siocdevprivate)(struct net_device *, struct ifreq *, void *, int); + int (*ndo_set_config)(struct net_device *, struct ifmap *); + int (*ndo_change_mtu)(struct net_device *, int); + int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); + void (*ndo_tx_timeout)(struct net_device *, unsigned int); + void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); + bool (*ndo_has_offload_stats)(const struct net_device *, int); + int (*ndo_get_offload_stats)(int, const struct net_device *, void *); + struct net_device_stats * (*ndo_get_stats)(struct net_device *); + int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); + int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); + int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); + int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); + int (*ndo_set_vf_rate)(struct net_device *, int, int, int); + int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); + int (*ndo_set_vf_trust)(struct net_device *, int, bool); + int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); + int (*ndo_set_vf_link_state)(struct net_device *, int, int); + int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); + int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); + int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); + int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); + int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); + int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); + int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); + int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_del_slave)(struct net_device *, struct net_device *); + struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); + struct net_device * (*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); + netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); + int (*ndo_set_features)(struct net_device *, netdev_features_t); + int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); + void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); + int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, bool *, struct netlink_ext_ack *); + int (*ndo_fdb_del_bulk)(struct nlmsghdr *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); + int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); + int (*ndo_mdb_add)(struct net_device *, struct nlattr **, u16, struct netlink_ext_ack *); + int (*ndo_mdb_del)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_del_bulk)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); + int (*ndo_mdb_dump)(struct net_device *, struct sk_buff *, struct netlink_callback *); + int (*ndo_mdb_get)(struct net_device *, struct nlattr **, u32, u32, struct netlink_ext_ack *); + int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); + int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); + int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); + int (*ndo_change_carrier)(struct net_device *, bool); + int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); + void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); + void (*ndo_dfwd_del_station)(struct net_device *, void *); + int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); + int (*ndo_get_iflink)(const struct net_device *); + int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); + void (*ndo_set_rx_headroom)(struct net_device *, int); + int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); + int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *, struct xdp_buff *); + int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); + int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm_kern *, int); + struct net_device * (*ndo_get_peer_dev)(struct net_device *); + int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); + ktime_t (*ndo_get_tstamp)(struct net_device *, const struct skb_shared_hwtstamps *, bool); + int (*ndo_hwtstamp_get)(struct net_device *, struct kernel_hwtstamp_config *); + int (*ndo_hwtstamp_set)(struct net_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +struct pcpu_sw_netstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct pcpu_dstats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_drops; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_drops; + struct u64_stats_sync syncp; + long: 32; + long: 32; + long: 32; +}; + +enum xdp_rss_hash_type { + XDP_RSS_L3_IPV4 = 1, + XDP_RSS_L3_IPV6 = 2, + XDP_RSS_L3_DYNHDR = 4, + XDP_RSS_L4 = 8, + XDP_RSS_L4_TCP = 16, + XDP_RSS_L4_UDP = 32, + XDP_RSS_L4_SCTP = 64, + XDP_RSS_L4_IPSEC = 128, + XDP_RSS_L4_ICMP = 256, + XDP_RSS_TYPE_NONE = 0, + XDP_RSS_TYPE_L2 = 0, + XDP_RSS_TYPE_L3_IPV4 = 1, + XDP_RSS_TYPE_L3_IPV6 = 2, + XDP_RSS_TYPE_L3_IPV4_OPT = 5, + XDP_RSS_TYPE_L3_IPV6_EX = 6, + XDP_RSS_TYPE_L4_ANY = 8, + XDP_RSS_TYPE_L4_IPV4_TCP = 25, + XDP_RSS_TYPE_L4_IPV4_UDP = 41, + XDP_RSS_TYPE_L4_IPV4_SCTP = 73, + XDP_RSS_TYPE_L4_IPV4_IPSEC = 137, + XDP_RSS_TYPE_L4_IPV4_ICMP = 265, + XDP_RSS_TYPE_L4_IPV6_TCP = 26, + XDP_RSS_TYPE_L4_IPV6_UDP = 42, + XDP_RSS_TYPE_L4_IPV6_SCTP = 74, + XDP_RSS_TYPE_L4_IPV6_IPSEC = 138, + XDP_RSS_TYPE_L4_IPV6_ICMP = 266, + XDP_RSS_TYPE_L4_IPV6_TCP_EX = 30, + XDP_RSS_TYPE_L4_IPV6_UDP_EX = 46, + XDP_RSS_TYPE_L4_IPV6_SCTP_EX = 78, +}; + +struct xdp_md; + +struct xdp_metadata_ops { + int (*xmo_rx_timestamp)(const struct xdp_md *, u64 *); + int (*xmo_rx_hash)(const struct xdp_md *, u32 *, enum xdp_rss_hash_type *); + int (*xmo_rx_vlan_tag)(const struct xdp_md *, __be16 *, u16 *); +}; + +struct xsk_tx_metadata_ops { + void (*tmo_request_timestamp)(void *); + u64 (*tmo_fill_timestamp)(void *); + void (*tmo_request_checksum)(u16, u16, void *); +}; + +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE = 0, + ETHTOOL_ID_ACTIVE = 1, + ETHTOOL_ID_ON = 2, + ETHTOOL_ID_OFF = 3, +}; + +struct ethtool_drvinfo; + +struct ethtool_regs; + +struct ethtool_wolinfo; + +struct ethtool_link_ext_state_info; + +struct ethtool_link_ext_stats; + +struct ethtool_eeprom; + +struct ethtool_coalesce; + +struct kernel_ethtool_coalesce; + +struct ethtool_ringparam; + +struct kernel_ethtool_ringparam; + +struct ethtool_pause_stats; + +struct ethtool_pauseparam; + +struct ethtool_test; + +struct ethtool_stats; + +struct ethtool_rxnfc; + +struct ethtool_flash; + +struct ethtool_rxfh_param; + +struct ethtool_rxfh_context; + +struct ethtool_channels; + +struct ethtool_dump; + +struct kernel_ethtool_ts_info; + +struct ethtool_ts_stats; + +struct ethtool_modinfo; + +struct ethtool_keee; + +struct ethtool_tunable; + +struct ethtool_link_ksettings; + +struct ethtool_fec_stats; + +struct ethtool_fecparam; + +struct ethtool_module_eeprom; + +struct ethtool_eth_phy_stats; + +struct ethtool_eth_mac_stats; + +struct ethtool_eth_ctrl_stats; + +struct ethtool_rmon_stats; + +struct ethtool_rmon_hist_range; + +struct ethtool_module_power_mode_params; + +struct ethtool_mm_state; + +struct ethtool_mm_cfg; + +struct ethtool_mm_stats; + +struct ethtool_ops { + u32 cap_link_lanes_supported: 1; + u32 cap_rss_ctx_supported: 1; + u32 cap_rss_sym_xor_supported: 1; + u32 rxfh_per_ctx_key: 1; + u32 cap_rss_rxnfc_adds: 1; + u32 rxfh_indir_space; + u16 rxfh_key_space; + u16 rxfh_priv_size; + u32 rxfh_max_num_contexts; + u32 supported_coalesce_params; + u32 supported_ring_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); + void (*get_link_ext_stats)(struct net_device *, struct ethtool_link_ext_stats *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); + void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); + void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); + int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*create_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*modify_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); + int (*remove_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, u32, struct netlink_ext_ack *); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *); + void (*get_ts_stats)(struct net_device *, struct ethtool_ts_stats *); + int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_keee *); + int (*set_eee)(struct net_device *, struct ethtool_keee *); + int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); + int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + int (*set_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); + void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); + void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); + void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, const struct ethtool_rmon_hist_range **); + int (*get_module_power_mode)(struct net_device *, struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*set_module_power_mode)(struct net_device *, const struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); + int (*get_mm)(struct net_device *, struct ethtool_mm_state *); + int (*set_mm)(struct net_device *, struct ethtool_mm_cfg *, struct netlink_ext_ack *); + void (*get_mm_stats)(struct net_device *, struct ethtool_mm_stats *); +}; + +struct nd_opt_hdr; + +struct ndisc_options; + +struct prefix_info; + +struct ndisc_ops { + int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); + void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); + int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); + void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); + void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); +}; + +struct rtnl_link_ops { + struct list_head list; + struct srcu_struct srcu; + const char *kind; + size_t priv_size; + struct net_device * (*alloc)(struct nlattr **, const char *, unsigned char, unsigned int, unsigned int); + void (*setup)(struct net_device *); + bool netns_refund; + const u16 peer_type; + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + void (*dellink)(struct net_device *, struct list_head *); + size_t (*get_size)(const struct net_device *); + int (*fill_info)(struct sk_buff *, const struct net_device *); + size_t (*get_xstats_size)(const struct net_device *); + int (*fill_xstats)(struct sk_buff *, const struct net_device *); + unsigned int (*get_num_tx_queues)(); + unsigned int (*get_num_rx_queues)(); + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + size_t (*get_slave_size)(const struct net_device *, const struct net_device *); + int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); + struct net * (*get_link_net)(const struct net_device *); + size_t (*get_linkxstats_size)(const struct net_device *, int); + int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); +}; + +struct netdev_queue_stats_rx; + +struct netdev_queue_stats_tx; + +struct netdev_stat_ops { + void (*get_queue_stats_rx)(struct net_device *, int, struct netdev_queue_stats_rx *); + void (*get_queue_stats_tx)(struct net_device *, int, struct netdev_queue_stats_tx *); + void (*get_base_stats)(struct net_device *, struct netdev_queue_stats_rx *, struct netdev_queue_stats_tx *); +}; + +struct netdev_queue_mgmt_ops { + size_t ndo_queue_mem_size; + int (*ndo_queue_mem_alloc)(struct net_device *, void *, int); + void (*ndo_queue_mem_free)(struct net_device *, void *); + int (*ndo_queue_start)(struct net_device *, void *, int); + int (*ndo_queue_stop)(struct net_device *, void *, int); +}; + +struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; +}; + +struct udp_tunnel_info; + +struct udp_tunnel_nic_shared; + +struct udp_tunnel_nic_info { + int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*sync_table)(struct net_device *, unsigned int); + struct udp_tunnel_nic_shared *shared; + unsigned int flags; + struct udp_tunnel_nic_table_info tables[4]; +}; + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + short unsigned int tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +struct gnet_dump { + spinlock_t *lock; + struct sk_buff *skb; + struct nlattr *tail; + int compat_tc_stats; + int compat_xstats; + int padattr; + void *xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0, + FLOW_ACTION_HW_STATS_DELAYED_BIT = 1, + FLOW_ACTION_HW_STATS_DISABLED_BIT = 2, + FLOW_ACTION_HW_STATS_NUM_BITS = 3, +}; + +struct flow_block { + struct list_head cb_list; +}; + +typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[0]; +}; + +struct Qdisc_class_ops; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); + int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*attach)(struct Qdisc *); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + void (*change_real_num_tx)(struct Qdisc *, unsigned int); + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *, u32); + void (*egress_block_set)(struct Qdisc *, u32); + u32 (*ingress_block_get)(struct Qdisc *); + u32 (*egress_block_get)(struct Qdisc *); + struct module *owner; +}; + +struct qdisc_walker; + +struct tcf_block; + +struct Qdisc_class_ops { + unsigned int flags; + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); + struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); + void (*qlen_notify)(struct Qdisc *, long unsigned int); + long unsigned int (*find)(struct Qdisc *, u32); + int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + void (*walk)(struct Qdisc *, struct qdisc_walker *); + struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); + void (*unbind_tcf)(struct Qdisc *, long unsigned int); + int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); + int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); +}; + +struct tcf_chain; + +struct tcf_block { + struct xarray ports; + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + bool bypass_wanted; + atomic_t filtercnt; + atomic_t skipswcnt; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[128]; + struct mutex proto_destroy_lock; +}; + +struct tcf_result; + +struct tcf_proto_ops; + +struct tcf_proto { + struct tcf_proto *next; + void *root; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + __be16 protocol; + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + spinlock_t lock; + bool deleting; + bool counted; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +struct tcf_result { + union { + struct { + long unsigned int class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + }; +}; + +struct tcf_walker; + +struct tcf_exts; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + int (*init)(struct tcf_proto *); + void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); + void * (*get)(struct tcf_proto *, u32); + void (*put)(struct tcf_proto *, void *); + int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, u32, struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *); + void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); + int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); + void (*hw_add)(struct tcf_proto *, void *); + void (*hw_del)(struct tcf_proto *, void *); + void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); + void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); + void (*tmplt_destroy)(void *); + void (*tmplt_reoffload)(struct tcf_chain *, bool, flow_setup_cb_t *, void *); + struct tcf_exts * (*get_exts)(const struct tcf_proto *, u32); + int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*tmplt_dump)(struct sk_buff *, struct net *, void *); + struct module *owner; + int flags; +}; + +struct tcf_chain { + struct mutex filter_chain_lock; + struct tcf_proto *filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + struct net *net; + struct list_head node; + long: 32; +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_THROTTLED_WRITTEN = 33, + NR_KERNEL_MISC_RECLAIMABLE = 34, + NR_FOLL_PIN_ACQUIRED = 35, + NR_FOLL_PIN_RELEASED = 36, + NR_KERNEL_STACK_KB = 37, + NR_PAGETABLE = 38, + NR_SECONDARY_PAGETABLE = 39, + NR_IOMMU_PAGES = 40, + NR_SWAPCACHE = 41, + PGDEMOTE_KSWAPD = 42, + PGDEMOTE_DIRECT = 43, + PGDEMOTE_KHUGEPAGED = 44, + NR_VM_NODE_STAT_ITEMS = 45, +}; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_OOM_GROUP_KILL = 5, + MEMCG_SWAP_HIGH = 6, + MEMCG_SWAP_MAX = 7, + MEMCG_SWAP_FAIL = 8, + MEMCG_NR_MEMORY_EVENTS = 9, +}; + +enum page_memcg_data_flags { + MEMCG_DATA_OBJEXTS = 1, + MEMCG_DATA_KMEM = 2, + __NR_MEMCG_DATA_FLAGS = 4, +}; + +enum { + FOLL_WRITE = 1, + FOLL_GET = 2, + FOLL_DUMP = 4, + FOLL_FORCE = 8, + FOLL_NOWAIT = 16, + FOLL_NOFAULT = 32, + FOLL_HWPOISON = 64, + FOLL_ANON = 128, + FOLL_LONGTERM = 256, + FOLL_SPLIT_PMD = 512, + FOLL_PCI_P2PDMA = 1024, + FOLL_INTERRUPTIBLE = 2048, + FOLL_HONOR_NUMA_FAULT = 4096, +}; + +struct compact_control; + +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +enum execmem_type { + EXECMEM_DEFAULT = 0, + EXECMEM_MODULE_TEXT = 0, + EXECMEM_KPROBES = 1, + EXECMEM_FTRACE = 2, + EXECMEM_BPF = 3, + EXECMEM_MODULE_DATA = 4, + EXECMEM_TYPE_MAX = 5, +}; + +enum execmem_range_flags { + EXECMEM_KASAN_SHADOW = 1, + EXECMEM_ROX_CACHE = 2, +}; + +struct execmem_range { + long unsigned int start; + long unsigned int end; + long unsigned int fallback_start; + long unsigned int fallback_end; + pgprot_t pgprot; + unsigned int alignment; + enum execmem_range_flags flags; +}; + +struct execmem_info { + struct execmem_range ranges[5]; +}; + +struct compact_control { + struct list_head freepages[11]; + struct list_head migratepages; + unsigned int nr_freepages; + unsigned int nr_migratepages; + long unsigned int free_pfn; + long unsigned int migrate_pfn; + long unsigned int fast_start_pfn; + struct zone *zone; + long unsigned int total_migrate_scanned; + long unsigned int total_free_scanned; + short unsigned int fast_search_fail; + short int search_order; + const gfp_t gfp_mask; + int order; + int migratetype; + const unsigned int alloc_flags; + const int highest_zoneidx; + enum migrate_mode mode; + bool ignore_skip_hint; + bool no_set_skip_hint; + bool ignore_block_suitable; + bool direct_compaction; + bool proactive_compaction; + bool whole_zone; + bool contended; + bool finish_pageblock; + bool alloc_contig; +}; + +enum work_bits { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_INACTIVE_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + WORK_STRUCT_FLAG_BITS = 4, + WORK_STRUCT_COLOR_SHIFT = 4, + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PWQ_SHIFT = 8, + WORK_OFFQ_FLAG_SHIFT = 4, + WORK_OFFQ_BH_BIT = 4, + WORK_OFFQ_FLAG_END = 5, + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_DISABLE_SHIFT = 5, + WORK_OFFQ_DISABLE_BITS = 16, + WORK_OFFQ_POOL_SHIFT = 21, + WORK_OFFQ_LEFT = 11, + WORK_OFFQ_POOL_BITS = 11, +}; + +enum wq_misc_consts { + WORK_NR_COLORS = 16, + WORK_CPU_UNBOUND = 32, + WORK_BUSY_PENDING = 1, + WORK_BUSY_RUNNING = 2, + WORKER_DESC_LEN = 32, +}; + +struct request; + +struct rq_list { + struct request *head; + struct request *tail; +}; + +struct blk_plug { + struct rq_list mq_list; + struct rq_list cached_rqs; + u64 cur_ktime; + short unsigned int nr_ios; + short unsigned int rq_count; + bool multiple_queues; + bool has_elevator; + struct list_head cb_list; +}; + +typedef unsigned int blk_mode_t; + +struct block_device_operations; + +struct timer_rand_state; + +struct disk_events; + +struct badblocks; + +struct blk_independent_access_ranges; + +struct gendisk { + int major; + int first_minor; + int minors; + char disk_name[32]; + short unsigned int events; + short unsigned int event_flags; + struct xarray part_tbl; + struct block_device *part0; + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + struct bio_set bio_split; + int flags; + long unsigned int state; + struct mutex open_mutex; + unsigned int open_partitions; + struct backing_dev_info *bdi; + struct kobject queue_kobj; + struct kobject *slave_dir; + struct list_head slave_bdevs; + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; + long: 32; + u64 diskseq; + blk_mode_t open_mode; + struct blk_independent_access_ranges *ia_ranges; +}; + +typedef unsigned int blk_features_t; + +typedef unsigned int blk_flags_t; + +enum blk_integrity_checksum { + BLK_INTEGRITY_CSUM_NONE = 0, + BLK_INTEGRITY_CSUM_IP = 1, + BLK_INTEGRITY_CSUM_CRC = 2, + BLK_INTEGRITY_CSUM_CRC64 = 3, +} __attribute__((mode(byte))); + +struct blk_integrity { + unsigned char flags; + enum blk_integrity_checksum csum_type; + unsigned char tuple_size; + unsigned char pi_offset; + unsigned char interval_exp; + unsigned char tag_size; +}; + +struct queue_limits { + blk_features_t features; + blk_flags_t flags; + long unsigned int seg_boundary_mask; + long unsigned int virt_boundary_mask; + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_user_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_user_discard_sectors; + unsigned int max_secure_erase_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_hw_zone_append_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int zone_write_granularity; + unsigned int atomic_write_hw_max; + unsigned int atomic_write_max_sectors; + unsigned int atomic_write_hw_boundary; + unsigned int atomic_write_boundary_sectors; + unsigned int atomic_write_hw_unit_min; + unsigned int atomic_write_unit_min; + unsigned int atomic_write_hw_unit_max; + unsigned int atomic_write_unit_max; + short unsigned int max_segments; + short unsigned int max_integrity_segments; + short unsigned int max_discard_segments; + unsigned int max_open_zones; + unsigned int max_active_zones; + unsigned int dma_alignment; + unsigned int dma_pad_mask; + struct blk_integrity integrity; +}; + +struct elevator_queue; + +struct blk_mq_ops; + +struct blk_mq_ctx; + +struct blk_queue_stats; + +struct rq_qos; + +struct blk_mq_tags; + +struct blk_trace; + +struct blk_flush_queue; + +struct throtl_data; + +struct blk_mq_tag_set; + +struct request_queue { + void *queuedata; + struct elevator_queue *elevator; + const struct blk_mq_ops *mq_ops; + struct blk_mq_ctx *queue_ctx; + long unsigned int queue_flags; + unsigned int rq_timeout; + unsigned int queue_depth; + refcount_t refs; + unsigned int nr_hw_queues; + struct xarray hctx_table; + struct percpu_ref q_usage_counter; + struct lock_class_key io_lock_cls_key; + struct lockdep_map io_lockdep_map; + struct lock_class_key q_lock_cls_key; + struct lockdep_map q_lockdep_map; + struct request *last_merge; + spinlock_t queue_lock; + int quiesce_depth; + struct gendisk *disk; + struct kobject *mq_kobj; + struct queue_limits limits; + struct device *dev; + enum rpm_status rpm_status; + atomic_t pm_only; + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + struct mutex rq_qos_mutex; + int id; + long unsigned int nr_requests; + struct timer_list timeout; + struct work_struct timeout_work; + atomic_t nr_active_requests_shared_tags; + struct blk_mq_tags *sched_shared_tags; + struct list_head icq_list; + long unsigned int blkcg_pols[1]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct mutex blkcg_mutex; + int node; + spinlock_t requeue_lock; + struct list_head requeue_list; + struct delayed_work requeue_work; + struct blk_trace *blk_trace; + struct blk_flush_queue *fq; + struct list_head flush_list; + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + struct mutex limits_lock; + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + int mq_freeze_depth; + struct throtl_data *td; + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + struct mutex mq_freeze_lock; + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; + struct mutex debugfs_mutex; + bool mq_sysfs_init_done; +}; + +struct io_comp_batch { + struct rq_list req_list; + bool need_ts; + void (*complete)(struct io_comp_batch *); +}; + +struct partition_meta_info { + char uuid[37]; + u8 volname[64]; +}; + +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 resv[4]; + __u64 capacity; + __u8 reserved[24]; +}; + +typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); + +enum blk_unique_id { + BLK_UID_T10 = 1, + BLK_UID_EUI64 = 2, + BLK_UID_NAA = 3, +}; + +struct hd_geometry; + +struct pr_ops; + +struct block_device_operations { + void (*submit_bio)(struct bio *); + int (*poll_bio)(struct bio *, struct io_comp_batch *, unsigned int); + int (*open)(struct gendisk *, blk_mode_t); + void (*release)(struct gendisk *); + int (*ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + int (*compat_ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); + unsigned int (*check_events)(struct gendisk *, unsigned int); + void (*unlock_native_capacity)(struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + int (*set_read_only)(struct block_device *, bool); + void (*free_disk)(struct gendisk *); + void (*swap_slot_free_notify)(struct block_device *, long unsigned int); + int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); + char * (*devnode)(struct gendisk *, umode_t *); + int (*get_unique_id)(struct gendisk *, u8 *, enum blk_unique_id); + struct module *owner; + const struct pr_ops *pr_ops; + int (*alternative_gpt_sector)(struct gendisk *, sector_t *); +}; + +struct blk_independent_access_range { + struct kobject kobj; + long: 32; + sector_t sector; + sector_t nr_sectors; +}; + +struct blk_independent_access_ranges { + struct kobject kobj; + bool sysfs_registered; + unsigned int nr_ia_ranges; + long: 32; + struct blk_independent_access_range ia_range[0]; +}; + +enum blk_eh_timer_return { + BLK_EH_DONE = 0, + BLK_EH_RESET_TIMER = 1, +}; + +struct blk_mq_hw_ctx; + +struct blk_mq_queue_data; + +struct blk_mq_ops { + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); + void (*commit_rqs)(struct blk_mq_hw_ctx *); + void (*queue_rqs)(struct rq_list *); + int (*get_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *, int); + void (*set_rq_budget_token)(struct request *, int); + int (*get_rq_budget_token)(struct request *); + enum blk_eh_timer_return (*timeout)(struct request *); + int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); + void (*complete)(struct request *); + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); + void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); + void (*cleanup_rq)(struct request *); + bool (*busy)(struct request_queue *); + void (*map_queues)(struct blk_mq_tag_set *); + void (*show_rq)(struct seq_file *, struct request *); +}; + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_keys; + +struct pr_held_reservation; + +struct pr_ops { + int (*pr_register)(struct block_device *, u64, u64, u32); + int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); + int (*pr_release)(struct block_device *, u64, enum pr_type); + int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); + int (*pr_clear)(struct block_device *, u64); + int (*pr_read_keys)(struct block_device *, struct pr_keys *); + int (*pr_read_reservation)(struct block_device *, struct pr_held_reservation *); +}; + +typedef __kernel_long_t __kernel_off_t; + +typedef __kernel_off_t off_t; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_usage srcuu; + struct srcu_struct srcu; + struct notifier_block *head; +}; + +struct fdtable { + unsigned int max_fds; + struct file **fd; + long unsigned int *close_on_exec; + long unsigned int *open_fds; + long unsigned int *full_fds_bits; + struct callback_head rcu; +}; + +struct files_struct { + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable *fdt; + struct fdtable fdtab; + long: 32; + long: 32; + long: 32; + spinlock_t file_lock; + unsigned int next_fd; + long unsigned int close_on_exec_init[1]; + long unsigned int open_fds_init[1]; + long unsigned int full_fds_bits_init[1]; + struct file *fd_array[32]; + long: 32; + long: 32; + long: 32; +}; + +struct flock { + short int l_type; + short int l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; +}; + +struct flock64 { + short int l_type; + short int l_whence; + long: 32; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; + long: 32; +}; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct file_lock_core { + struct file_lock_core *flc_blocker; + struct list_head flc_list; + struct hlist_node flc_link; + struct list_head flc_blocked_requests; + struct list_head flc_blocked_member; + fl_owner_t flc_owner; + unsigned int flc_flags; + unsigned char flc_type; + pid_t flc_pid; + int flc_link_cpu; + wait_queue_head_t flc_wait; + struct file *flc_file; +}; + +struct nlm_lockowner; + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; + +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +struct file_lock_operations; + +struct lock_manager_operations; + +struct file_lock { + struct file_lock_core c; + loff_t fl_start; + loff_t fl_end; + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + struct { + struct inode *inode; + } ceph; + } fl_u; +}; + +struct lease_manager_operations; + +struct file_lease { + struct file_lock_core c; + struct fasync_struct *fl_fasync; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + const struct lease_manager_operations *fl_lmops; +}; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + void *lm_mod_owner; + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_lock_expirable)(struct file_lock *); + void (*lm_expire_lock)(); +}; + +struct lease_manager_operations { + bool (*lm_break)(struct file_lease *); + int (*lm_change)(struct file_lease *, int, struct list_head *); + void (*lm_setup)(struct file_lease *, void **); + bool (*lm_breaker_owns_lease)(struct file_lease *); +}; + +struct trace_print_flags { + long unsigned int mask; + const char *name; +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; + long: 32; + u64 cookie; +}; + +struct ring_buffer_event { + u32 type_len: 5; + u32 time_delta: 27; + u32 array[0]; +}; + +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +}; + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 20, +}; + +struct trace_buffer; + +struct trace_event_file; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned int trace_ctx; + struct pt_regs *regs; +}; + +struct eventfs_inode; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct eventfs_inode *ei; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + long unsigned int flags; + refcount_t ref; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, + EVENT_FILE_FL_FREED = 2048, +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_RDYN_STRING = 3, + FILTER_PTR_STRING = 4, + FILTER_TRACE_FN = 5, + FILTER_CPUMASK = 6, + FILTER_COMM = 7, + FILTER_CPU = 8, + FILTER_STACKTRACE = 9, +}; + +struct trace_event_raw_locks_get_lock_context { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + unsigned char type; + struct file_lock_context *ctx; + char __data[0]; +}; + +struct trace_event_raw_filelock_lock { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int pid; + unsigned int flags; + unsigned char type; + loff_t fl_start; + loff_t fl_end; + int ret; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_filelock_lease { + struct trace_entry ent; + struct file_lease *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock_core *blocker; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + long unsigned int break_time; + long unsigned int downgrade_time; + char __data[0]; +}; + +struct trace_event_raw_generic_add_lease { + struct trace_entry ent; + long unsigned int i_ino; + int wcount; + int rcount; + int icount; + dev_t s_dev; + fl_owner_t owner; + unsigned int flags; + unsigned char type; + char __data[0]; +}; + +struct trace_event_raw_leases_conflict { + struct trace_entry ent; + void *lease; + void *breaker; + unsigned int l_fl_flags; + unsigned int b_fl_flags; + unsigned char l_fl_type; + unsigned char b_fl_type; + bool conflict; + char __data[0]; +}; + +struct trace_event_data_offsets_locks_get_lock_context {}; + +struct trace_event_data_offsets_filelock_lock {}; + +struct trace_event_data_offsets_filelock_lease {}; + +struct trace_event_data_offsets_generic_add_lease {}; + +struct trace_event_data_offsets_leases_conflict {}; + +typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); + +typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lease *); + +typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lease *, struct file_lease *); + +struct file_lock_list_struct { + spinlock_t lock; + struct hlist_head hlist; +}; + +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, +}; + +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; + struct dentry *proc_thread_self; + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; + struct callback_head rcu; +}; + +struct locks_iterator { + int li_cpu; + long: 32; + loff_t li_pos; +}; + +typedef __u16 __le16; + +typedef __u32 __le32; + +typedef __u64 __le64; + +struct buffer_head; + +typedef void bh_end_io_t(struct buffer_head *, int); + +struct buffer_head { + long unsigned int b_state; + struct buffer_head *b_this_page; + union { + struct page *b_page; + struct folio *b_folio; + }; + long: 32; + sector_t b_blocknr; + size_t b_size; + char *b_data; + struct block_device *b_bdev; + bh_end_io_t *b_end_io; + void *b_private; + struct list_head b_assoc_buffers; + struct address_space *b_assoc_map; + atomic_t b_count; + spinlock_t b_uptodate_lock; +}; + +struct fiemap_extent; + +struct fiemap_extent_info { + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent *fi_extents_start; +}; + +typedef unsigned int tid_t; + +struct transaction_chp_stats_s { + long unsigned int cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +struct journal_s; + +typedef struct journal_s journal_t; + +struct journal_head; + +struct transaction_s; + +typedef struct transaction_s transaction_t; + +struct transaction_s { + journal_t *t_journal; + tid_t t_tid; + enum { + T_RUNNING = 0, + T_LOCKED = 1, + T_SWITCH = 2, + T_FLUSH = 3, + T_COMMIT = 4, + T_COMMIT_DFLUSH = 5, + T_COMMIT_JFLUSH = 6, + T_COMMIT_CALLBACK = 7, + T_FINISHED = 8, + } t_state; + long unsigned int t_log_start; + int t_nr_buffers; + struct journal_head *t_reserved_list; + struct journal_head *t_buffers; + struct journal_head *t_forget; + struct journal_head *t_checkpoint_list; + struct journal_head *t_shadow_list; + struct list_head t_inode_list; + long unsigned int t_max_wait; + long unsigned int t_start; + long unsigned int t_requested; + struct transaction_chp_stats_s t_chp_stats; + atomic_t t_updates; + atomic_t t_outstanding_credits; + atomic_t t_outstanding_revokes; + atomic_t t_handle_count; + transaction_t *t_cpnext; + transaction_t *t_cpprev; + long unsigned int t_expires; + ktime_t t_start_time; + unsigned int t_synchronous_commit: 1; + int t_need_data_flush; + struct list_head t_private_list; +}; + +struct jbd2_buffer_trigger_type; + +struct journal_head { + struct buffer_head *b_bh; + spinlock_t b_state_lock; + int b_jcount; + unsigned int b_jlist; + unsigned int b_modified; + char *b_frozen_data; + char *b_committed_data; + transaction_t *b_transaction; + transaction_t *b_next_transaction; + struct journal_head *b_tnext; + struct journal_head *b_tprev; + transaction_t *b_cp_transaction; + struct journal_head *b_cpnext; + struct journal_head *b_cpprev; + struct jbd2_buffer_trigger_type *b_triggers; + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +struct jbd2_buffer_trigger_type { + void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); + void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); +}; + +struct crypto_alg; + +struct crypto_tfm { + refcount_t refcnt; + u32 crt_flags; + int node; + void (*exit)(struct crypto_tfm *); + struct crypto_alg *__crt_alg; + long: 32; + void *__crt_ctx[0]; +}; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); + void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); + void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); + int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); +}; + +struct crypto_type; + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + int cra_priority; + refcount_t cra_refcnt; + char cra_name[128]; + char cra_driver_name[128]; + const struct crypto_type *cra_type; + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + int (*cra_init)(struct crypto_tfm *); + void (*cra_exit)(struct crypto_tfm *); + void (*cra_destroy)(struct crypto_alg *); + struct module *cra_module; +}; + +struct crypto_instance; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); + unsigned int (*extsize)(struct crypto_alg *); + int (*init_tfm)(struct crypto_tfm *); + void (*show)(struct seq_file *, struct crypto_alg *); + int (*report)(struct sk_buff *, struct crypto_alg *); + void (*free)(struct crypto_instance *); + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_shash { + unsigned int descsize; + long: 32; + struct crypto_tfm base; +}; + +struct transaction_run_stats_s { + long unsigned int rs_wait; + long unsigned int rs_request_delay; + long unsigned int rs_running; + long unsigned int rs_locked; + long unsigned int rs_flushing; + long unsigned int rs_logging; + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + long unsigned int ts_tid; + long unsigned int ts_requested; + struct transaction_run_stats_s run; +}; + +enum passtype { + PASS_SCAN = 0, + PASS_REVOKE = 1, + PASS_REPLAY = 2, +}; + +struct journal_superblock_s; + +typedef struct journal_superblock_s journal_superblock_t; + +struct jbd2_revoke_table_s; + +struct jbd2_inode; + +struct journal_s { + long unsigned int j_flags; + int j_errno; + struct mutex j_abort_mutex; + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + rwlock_t j_state_lock; + int j_barrier_count; + struct mutex j_barrier; + transaction_t *j_running_transaction; + transaction_t *j_committing_transaction; + transaction_t *j_checkpoint_transactions; + wait_queue_head_t j_wait_transaction_locked; + wait_queue_head_t j_wait_done_commit; + wait_queue_head_t j_wait_commit; + wait_queue_head_t j_wait_updates; + wait_queue_head_t j_wait_reserved; + wait_queue_head_t j_fc_wait; + struct mutex j_checkpoint_mutex; + struct buffer_head *j_chkpt_bhs[64]; + struct shrinker *j_shrinker; + long: 32; + struct percpu_counter j_checkpoint_jh_count; + transaction_t *j_shrink_transaction; + long unsigned int j_head; + long unsigned int j_tail; + long unsigned int j_free; + long unsigned int j_first; + long unsigned int j_last; + long unsigned int j_fc_first; + long unsigned int j_fc_off; + long unsigned int j_fc_last; + struct block_device *j_dev; + int j_blocksize; + long: 32; + long long unsigned int j_blk_offset; + char j_devname[56]; + struct block_device *j_fs_dev; + errseq_t j_fs_dev_wb_err; + unsigned int j_total_len; + atomic_t j_reserved_credits; + spinlock_t j_list_lock; + struct inode *j_inode; + tid_t j_tail_sequence; + tid_t j_transaction_sequence; + tid_t j_commit_sequence; + tid_t j_commit_request; + __u8 j_uuid[16]; + struct task_struct *j_task; + int j_max_transaction_buffers; + int j_revoke_records_per_block; + int j_transaction_overhead_buffers; + long unsigned int j_commit_interval; + struct timer_list j_commit_timer; + spinlock_t j_revoke_lock; + struct jbd2_revoke_table_s *j_revoke; + struct jbd2_revoke_table_s *j_revoke_table[2]; + struct buffer_head **j_wbuf; + struct buffer_head **j_fc_wbuf; + int j_wbufsize; + int j_fc_wbufsize; + pid_t j_last_sync_writer; + long: 32; + u64 j_average_commit_time; + u32 j_min_batch_time; + u32 j_max_batch_time; + void (*j_commit_callback)(journal_t *, transaction_t *); + int (*j_submit_inode_data_buffers)(struct jbd2_inode *); + int (*j_finish_inode_data_buffers)(struct jbd2_inode *); + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + unsigned int j_failed_commit; + void *j_private; + struct crypto_shash *j_chksum_driver; + __u32 j_csum_seed; + void (*j_fc_cleanup_callback)(struct journal_s *, int, tid_t); + int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); + int (*j_bmap)(struct journal_s *, sector_t *); + long: 32; +}; + +struct journal_header_s { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +}; + +typedef struct journal_header_s journal_header_t; + +struct journal_superblock_s { + journal_header_t s_header; + __be32 s_blocksize; + __be32 s_maxlen; + __be32 s_first; + __be32 s_sequence; + __be32 s_start; + __be32 s_errno; + __be32 s_feature_compat; + __be32 s_feature_incompat; + __be32 s_feature_ro_compat; + __u8 s_uuid[16]; + __be32 s_nr_users; + __be32 s_dynsuper; + __be32 s_max_transaction; + __be32 s_max_trans_data; + __u8 s_checksum_type; + __u8 s_padding2[3]; + __be32 s_num_fc_blks; + __be32 s_head; + __u32 s_padding[40]; + __be32 s_checksum; + __u8 s_users[768]; +}; + +struct jbd2_inode { + transaction_t *i_transaction; + transaction_t *i_next_transaction; + struct list_head i_list; + struct inode *i_vfs_inode; + long unsigned int i_flags; + loff_t i_dirty_start; + loff_t i_dirty_end; +}; + +struct bgl_lock { + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct blockgroup_lock { + struct bgl_lock locks[128]; +}; + +struct fiemap_extent { + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + __u64 fe_reserved64[2]; + __u32 fe_flags; + __u32 fe_reserved[3]; +}; + +struct fscrypt_dummy_policy {}; + +typedef int ext4_grpblk_t; + +typedef long long unsigned int ext4_fsblk_t; + +typedef __u32 ext4_lblk_t; + +typedef unsigned int ext4_group_t; + +struct ext4_system_blocks { + struct rb_root root; + struct callback_head rcu; +}; + +struct flex_groups { + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; +}; + +struct ext4_es_stats { + long unsigned int es_stats_shrunk; + long: 32; + struct percpu_counter es_stats_cache_hits; + struct percpu_counter es_stats_cache_misses; + u64 es_stats_scan_time; + u64 es_stats_max_scan_time; + struct percpu_counter es_stats_all_cnt; + struct percpu_counter es_stats_shk_cnt; +}; + +struct ext4_fc_stats { + unsigned int fc_ineligible_reason_count[10]; + long unsigned int fc_num_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_failed_commits; + long unsigned int fc_skipped_commits; + long unsigned int fc_numblks; + long: 32; + u64 s_fc_avg_commit_time; +}; + +struct ext4_fc_alloc_region { + ext4_lblk_t lblk; + long: 32; + ext4_fsblk_t pblk; + int ino; + int len; +}; + +struct ext4_fc_replay_state { + int fc_replay_num_tags; + int fc_replay_expected_off; + int fc_current_pass; + int fc_cur_tag; + int fc_crc; + struct ext4_fc_alloc_region *fc_regions; + int fc_regions_size; + int fc_regions_used; + int fc_regions_valid; + int *fc_modified_inodes; + int fc_modified_inodes_used; + int fc_modified_inodes_size; +}; + +struct ext4_super_block { + __le32 s_inodes_count; + __le32 s_blocks_count_lo; + __le32 s_r_blocks_count_lo; + __le32 s_free_blocks_count_lo; + __le32 s_free_inodes_count; + __le32 s_first_data_block; + __le32 s_log_block_size; + __le32 s_log_cluster_size; + __le32 s_blocks_per_group; + __le32 s_clusters_per_group; + __le32 s_inodes_per_group; + __le32 s_mtime; + __le32 s_wtime; + __le16 s_mnt_count; + __le16 s_max_mnt_count; + __le16 s_magic; + __le16 s_state; + __le16 s_errors; + __le16 s_minor_rev_level; + __le32 s_lastcheck; + __le32 s_checkinterval; + __le32 s_creator_os; + __le32 s_rev_level; + __le16 s_def_resuid; + __le16 s_def_resgid; + __le32 s_first_ino; + __le16 s_inode_size; + __le16 s_block_group_nr; + __le32 s_feature_compat; + __le32 s_feature_incompat; + __le32 s_feature_ro_compat; + __u8 s_uuid[16]; + char s_volume_name[16]; + char s_last_mounted[64]; + __le32 s_algorithm_usage_bitmap; + __u8 s_prealloc_blocks; + __u8 s_prealloc_dir_blocks; + __le16 s_reserved_gdt_blocks; + __u8 s_journal_uuid[16]; + __le32 s_journal_inum; + __le32 s_journal_dev; + __le32 s_last_orphan; + __le32 s_hash_seed[4]; + __u8 s_def_hash_version; + __u8 s_jnl_backup_type; + __le16 s_desc_size; + __le32 s_default_mount_opts; + __le32 s_first_meta_bg; + __le32 s_mkfs_time; + __le32 s_jnl_blocks[17]; + __le32 s_blocks_count_hi; + __le32 s_r_blocks_count_hi; + __le32 s_free_blocks_count_hi; + __le16 s_min_extra_isize; + __le16 s_want_extra_isize; + __le32 s_flags; + __le16 s_raid_stride; + __le16 s_mmp_update_interval; + __le64 s_mmp_block; + __le32 s_raid_stripe_width; + __u8 s_log_groups_per_flex; + __u8 s_checksum_type; + __u8 s_encryption_level; + __u8 s_reserved_pad; + __le64 s_kbytes_written; + __le32 s_snapshot_inum; + __le32 s_snapshot_id; + __le64 s_snapshot_r_blocks_count; + __le32 s_snapshot_list; + __le32 s_error_count; + __le32 s_first_error_time; + __le32 s_first_error_ino; + __le64 s_first_error_block; + __u8 s_first_error_func[32]; + __le32 s_first_error_line; + __le32 s_last_error_time; + __le32 s_last_error_ino; + __le32 s_last_error_line; + __le64 s_last_error_block; + __u8 s_last_error_func[32]; + __u8 s_mount_opts[64]; + __le32 s_usr_quota_inum; + __le32 s_grp_quota_inum; + __le32 s_overhead_clusters; + __le32 s_backup_bgs[2]; + __u8 s_encrypt_algos[4]; + __u8 s_encrypt_pw_salt[16]; + __le32 s_lpf_ino; + __le32 s_prj_quota_inum; + __le32 s_checksum_seed; + __u8 s_wtime_hi; + __u8 s_mtime_hi; + __u8 s_mkfs_time_hi; + __u8 s_lastcheck_hi; + __u8 s_first_error_time_hi; + __u8 s_last_error_time_hi; + __u8 s_first_error_errcode; + __u8 s_last_error_errcode; + __le16 s_encoding; + __le16 s_encoding_flags; + __le32 s_orphan_file_inum; + __le32 s_reserved[94]; + __le32 s_checksum; +}; + +struct ext4_journal_trigger { + struct jbd2_buffer_trigger_type tr_triggers; + struct super_block *sb; +}; + +struct ext4_orphan_block { + atomic_t ob_free_entries; + struct buffer_head *ob_bh; +}; + +struct ext4_orphan_info { + int of_blocks; + __u32 of_csum_seed; + struct ext4_orphan_block *of_binfo; +}; + +struct dax_device; + +struct ext4_group_info; + +struct ext4_locality_group; + +struct ext4_li_request; + +struct mb_cache; + +struct ext4_sb_info { + long unsigned int s_desc_size; + long unsigned int s_inodes_per_block; + long unsigned int s_blocks_per_group; + long unsigned int s_clusters_per_group; + long unsigned int s_inodes_per_group; + long unsigned int s_itb_per_group; + long unsigned int s_gdb_count; + long unsigned int s_desc_per_block; + ext4_group_t s_groups_count; + ext4_group_t s_blockfile_groups; + long unsigned int s_overhead; + unsigned int s_cluster_ratio; + unsigned int s_cluster_bits; + long: 32; + loff_t s_bitmap_maxbytes; + struct buffer_head *s_sbh; + struct ext4_super_block *s_es; + struct buffer_head **s_group_desc; + unsigned int s_mount_opt; + unsigned int s_mount_opt2; + long unsigned int s_mount_flags; + unsigned int s_def_mount_opt; + unsigned int s_def_mount_opt2; + ext4_fsblk_t s_sb_block; + atomic64_t s_resv_clusters; + kuid_t s_resuid; + kgid_t s_resgid; + short unsigned int s_mount_state; + short unsigned int s_pad; + int s_addr_per_block_bits; + int s_desc_per_block_bits; + int s_inode_size; + int s_first_ino; + unsigned int s_inode_readahead_blks; + unsigned int s_inode_goal; + u32 s_hash_seed[4]; + int s_def_hash_version; + int s_hash_unsigned; + long: 32; + struct percpu_counter s_freeclusters_counter; + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; + struct percpu_counter s_dirtyclusters_counter; + struct percpu_counter s_sra_exceeded_retry_limit; + struct blockgroup_lock *s_blockgroup_lock; + struct proc_dir_entry *s_proc; + struct kobject s_kobj; + struct completion s_kobj_unregister; + struct super_block *s_sb; + struct buffer_head *s_mmp_bh; + struct journal_s *s_journal; + long unsigned int s_ext4_flags; + struct mutex s_orphan_lock; + struct list_head s_orphan; + struct ext4_orphan_info s_orphan_info; + long unsigned int s_commit_interval; + u32 s_max_batch_time; + u32 s_min_batch_time; + struct file *s_journal_bdev_file; + unsigned int s_want_extra_isize; + struct ext4_system_blocks *s_system_blks; + struct ext4_group_info ***s_group_info; + struct inode *s_buddy_cache; + spinlock_t s_md_lock; + short unsigned int *s_mb_offsets; + unsigned int *s_mb_maxs; + unsigned int s_group_info_size; + unsigned int s_mb_free_pending; + struct list_head s_freed_data_list[2]; + struct list_head s_discard_list; + struct work_struct s_discard_work; + atomic_t s_retry_alloc_pending; + struct list_head *s_mb_avg_fragment_size; + rwlock_t *s_mb_avg_fragment_size_locks; + struct list_head *s_mb_largest_free_orders; + rwlock_t *s_mb_largest_free_orders_locks; + long unsigned int s_stripe; + unsigned int s_mb_max_linear_groups; + unsigned int s_mb_stream_request; + unsigned int s_mb_max_to_scan; + unsigned int s_mb_min_to_scan; + unsigned int s_mb_stats; + unsigned int s_mb_order2_reqs; + unsigned int s_mb_group_prealloc; + unsigned int s_max_dir_size_kb; + long unsigned int s_mb_last_group; + long unsigned int s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; + unsigned int s_mb_best_avail_max_trim_order; + atomic_t s_bal_reqs; + atomic_t s_bal_success; + atomic_t s_bal_allocated; + atomic_t s_bal_ex_scanned; + atomic_t s_bal_cX_ex_scanned[5]; + atomic_t s_bal_groups_scanned; + atomic_t s_bal_goals; + atomic_t s_bal_len_goals; + atomic_t s_bal_breaks; + atomic_t s_bal_2orders; + atomic_t s_bal_p2_aligned_bad_suggestions; + atomic_t s_bal_goal_fast_bad_suggestions; + atomic_t s_bal_best_avail_bad_suggestions; + atomic64_t s_bal_cX_groups_considered[5]; + atomic64_t s_bal_cX_hits[5]; + atomic64_t s_bal_cX_failed[5]; + atomic_t s_mb_buddies_generated; + long: 32; + atomic64_t s_mb_generation_time; + atomic_t s_mb_lost_chunks; + atomic_t s_mb_preallocated; + atomic_t s_mb_discarded; + atomic_t s_lock_busy; + struct ext4_locality_group *s_locality_groups; + long unsigned int s_sectors_written_start; + u64 s_kbytes_written; + unsigned int s_extent_max_zeroout_kb; + unsigned int s_log_groups_per_flex; + struct flex_groups **s_flex_groups; + ext4_group_t s_flex_groups_allocated; + struct workqueue_struct *rsv_conversion_wq; + struct timer_list s_err_report; + struct ext4_li_request *s_li_request; + unsigned int s_li_wait_mult; + struct task_struct *s_mmp_tsk; + long unsigned int s_last_trim_minblks; + struct crypto_shash *s_chksum_driver; + __u32 s_csum_seed; + struct shrinker *s_es_shrinker; + struct list_head s_es_list; + long int s_es_nr_inode; + struct ext4_es_stats s_es_stats; + struct mb_cache *s_ea_block_cache; + struct mb_cache *s_ea_inode_cache; + spinlock_t s_es_lock; + struct ext4_journal_trigger s_journal_triggers[1]; + struct ratelimit_state s_err_ratelimit_state; + struct ratelimit_state s_warning_ratelimit_state; + struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; + struct fscrypt_dummy_policy s_dummy_enc_policy; + struct percpu_rw_semaphore s_writepages_rwsem; + struct dax_device *s_daxdev; + long: 32; + u64 s_dax_part_off; + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; + spinlock_t s_error_lock; + int s_add_error_count; + int s_first_error_code; + __u32 s_first_error_line; + __u32 s_first_error_ino; + long: 32; + __u64 s_first_error_block; + const char *s_first_error_func; + long: 32; + time64_t s_first_error_time; + int s_last_error_code; + __u32 s_last_error_line; + __u32 s_last_error_ino; + long: 32; + __u64 s_last_error_block; + const char *s_last_error_func; + long: 32; + time64_t s_last_error_time; + struct work_struct s_sb_upd_work; + unsigned int s_awu_min; + unsigned int s_awu_max; + atomic_t s_fc_subtid; + struct list_head s_fc_q[2]; + struct list_head s_fc_dentry_q[2]; + unsigned int s_fc_bytes; + spinlock_t s_fc_lock; + struct buffer_head *s_fc_bh; + struct ext4_fc_stats s_fc_stats; + tid_t s_fc_ineligible_tid; + struct ext4_fc_replay_state s_fc_replay_state; + long: 32; + long: 32; + long: 32; +}; + +struct ext4_group_info { + long unsigned int bb_state; + struct rb_root bb_free_root; + ext4_grpblk_t bb_first_free; + ext4_grpblk_t bb_free; + ext4_grpblk_t bb_fragments; + int bb_avg_fragment_size_order; + ext4_grpblk_t bb_largest_free_order; + ext4_group_t bb_group; + struct list_head bb_prealloc_list; + struct rw_semaphore alloc_sem; + struct list_head bb_avg_fragment_size_node; + struct list_head bb_largest_free_order_node; + ext4_grpblk_t bb_counters[0]; +}; + +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP = 0, + EXT4_LI_MODE_ITABLE = 1, +}; + +struct ext4_li_request { + struct super_block *lr_super; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; + ext4_group_t lr_next_group; + struct list_head lr_request; + long unsigned int lr_next_sched; + long unsigned int lr_timeout; +}; + +enum ucount_type { + UCOUNT_USER_NAMESPACES = 0, + UCOUNT_PID_NAMESPACES = 1, + UCOUNT_UTS_NAMESPACES = 2, + UCOUNT_IPC_NAMESPACES = 3, + UCOUNT_NET_NAMESPACES = 4, + UCOUNT_MNT_NAMESPACES = 5, + UCOUNT_CGROUP_NAMESPACES = 6, + UCOUNT_TIME_NAMESPACES = 7, + UCOUNT_INOTIFY_INSTANCES = 8, + UCOUNT_INOTIFY_WATCHES = 9, + UCOUNT_COUNTS = 10, +}; + +typedef void (*exitcall_t)(); + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +struct fc_log; + +struct p_log { + const char *prefix; + struct fc_log *log; +}; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT = 0, + FS_CONTEXT_FOR_SUBMOUNT = 1, + FS_CONTEXT_FOR_RECONFIGURE = 2, +}; + +enum fs_context_phase { + FS_CONTEXT_CREATE_PARAMS = 0, + FS_CONTEXT_CREATING = 1, + FS_CONTEXT_AWAITING_MOUNT = 2, + FS_CONTEXT_AWAITING_RECONF = 3, + FS_CONTEXT_RECONF_PARAMS = 4, + FS_CONTEXT_RECONFIGURING = 5, + FS_CONTEXT_FAILED = 6, +}; + +struct fs_context_operations; + +struct fs_context { + const struct fs_context_operations *ops; + struct mutex uapi_mutex; + struct file_system_type *fs_type; + void *fs_private; + void *sget_key; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + struct p_log log; + const char *source; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + unsigned int s_iflags; + enum fs_context_purpose purpose: 8; + enum fs_context_phase phase: 8; + bool need_free: 1; + bool global: 1; + bool oldapi: 1; + bool exclusive: 1; +}; + +struct audit_names; + +struct filename { + const char *name; + const char *uptr; + atomic_t refcnt; + struct audit_names *aname; + const char iname[0]; +}; + +typedef u16 wchar_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char)(wchar_t, unsigned char *, int); + int (*char2uni)(const unsigned char *, int, wchar_t *); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +enum utf16_endian { + UTF16_HOST_ENDIAN = 0, + UTF16_LITTLE_ENDIAN = 1, + UTF16_BIG_ENDIAN = 2, +}; + +struct msdos_dir_entry { + __u8 name[11]; + __u8 attr; + __u8 lcase; + __u8 ctime_cs; + __le16 ctime; + __le16 cdate; + __le16 adate; + __le16 starthi; + __le16 time; + __le16 date; + __le16 start; + __le32 size; +}; + +struct msdos_dir_slot { + __u8 id; + __u8 name0_4[10]; + __u8 attr; + __u8 reserved; + __u8 alias_checksum; + __u8 name5_10[12]; + __le16 start; + __u8 name11_12[4]; +}; + +enum fs_value_type { + fs_value_is_undefined = 0, + fs_value_is_flag = 1, + fs_value_is_string = 2, + fs_value_is_blob = 3, + fs_value_is_filename = 4, + fs_value_is_file = 5, +}; + +struct fs_parameter { + const char *key; + enum fs_value_type type: 8; + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +struct fc_log { + refcount_t usage; + u8 head; + u8 tail; + u8 need_free; + struct module *owner; + char *buffer[8]; +}; + +struct fs_context_operations { + void (*free)(struct fs_context *); + int (*dup)(struct fs_context *, struct fs_context *); + int (*parse_param)(struct fs_context *, struct fs_parameter *); + int (*parse_monolithic)(struct fs_context *, void *); + int (*get_tree)(struct fs_context *); + int (*reconfigure)(struct fs_context *); +}; + +struct fs_parse_result { + bool negated; + long: 32; + union { + bool boolean; + int int_32; + unsigned int uint_32; + u64 uint_64; + kuid_t uid; + kgid_t gid; + }; +}; + +struct fat_mount_options { + kuid_t fs_uid; + kgid_t fs_gid; + short unsigned int fs_fmask; + short unsigned int fs_dmask; + short unsigned int codepage; + int time_offset; + char *iocharset; + short unsigned int shortname; + unsigned char name_check; + unsigned char errors; + unsigned char nfs; + short unsigned int allow_utime; + unsigned int quiet: 1; + unsigned int showexec: 1; + unsigned int sys_immutable: 1; + unsigned int dotsOK: 1; + unsigned int isvfat: 1; + unsigned int utf8: 1; + unsigned int unicode_xlate: 1; + unsigned int numtail: 1; + unsigned int flush: 1; + unsigned int nocase: 1; + unsigned int usefree: 1; + unsigned int tz_set: 1; + unsigned int rodir: 1; + unsigned int discard: 1; + unsigned int dos1xfloppy: 1; + unsigned int debug: 1; +}; + +struct fatent_operations; + +struct msdos_sb_info { + short unsigned int sec_per_clus; + short unsigned int cluster_bits; + unsigned int cluster_size; + unsigned char fats; + unsigned char fat_bits; + short unsigned int fat_start; + long unsigned int fat_length; + long unsigned int dir_start; + short unsigned int dir_entries; + long unsigned int data_start; + long unsigned int max_cluster; + long unsigned int root_cluster; + long unsigned int fsinfo_sector; + struct mutex fat_lock; + struct mutex nfs_build_inode_lock; + struct mutex s_lock; + unsigned int prev_free; + unsigned int free_clusters; + unsigned int free_clus_valid; + struct fat_mount_options options; + struct nls_table *nls_disk; + struct nls_table *nls_io; + const void *dir_ops; + int dir_per_block; + int dir_per_block_bits; + unsigned int vol_id; + int fatent_shift; + const struct fatent_operations *fatent_ops; + struct inode *fat_inode; + struct inode *fsinfo_inode; + struct ratelimit_state ratelimit; + spinlock_t inode_hash_lock; + struct hlist_head inode_hashtable[256]; + spinlock_t dir_hash_lock; + struct hlist_head dir_hashtable[256]; + unsigned int dirty; + struct callback_head rcu; +}; + +struct fat_entry; + +struct fatent_operations { + void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); + void (*ent_set_ptr)(struct fat_entry *, int); + int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); + int (*ent_get)(struct fat_entry *); + void (*ent_put)(struct fat_entry *, int); + int (*ent_next)(struct fat_entry *); +}; + +struct msdos_inode_info { + spinlock_t cache_lru_lock; + struct list_head cache_lru; + int nr_caches; + unsigned int cache_valid_id; + long: 32; + loff_t mmu_private; + int i_start; + int i_logstart; + int i_attrs; + long: 32; + loff_t i_pos; + struct hlist_node i_fat_hash; + struct hlist_node i_dir_hash; + struct rw_semaphore truncate_lock; + struct timespec64 i_crtime; + struct inode vfs_inode; +}; + +struct fat_slot_info { + loff_t i_pos; + loff_t slot_off; + int nr_slots; + struct msdos_dir_entry *de; + struct buffer_head *bh; + long: 32; +}; + +struct shortname_info { + unsigned char lower: 1; + unsigned char upper: 1; + unsigned char valid: 1; +}; + +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +enum req_op { + REQ_OP_READ = 0, + REQ_OP_WRITE = 1, + REQ_OP_FLUSH = 2, + REQ_OP_DISCARD = 3, + REQ_OP_SECURE_ERASE = 5, + REQ_OP_ZONE_APPEND = 7, + REQ_OP_WRITE_ZEROES = 9, + REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_RESET = 13, + REQ_OP_ZONE_RESET_ALL = 15, + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST = 36, +}; + +struct shash_desc { + struct crypto_shash *tfm; + long: 32; + void *__ctx[0]; +}; + +struct btrfs_qgroup_limit { + __u64 flags; + __u64 max_rfer; + __u64 max_excl; + __u64 rsv_rfer; + __u64 rsv_excl; +}; + +struct btrfs_qgroup_inherit { + __u64 flags; + __u64 num_qgroups; + __u64 num_ref_copies; + __u64 num_excl_copies; + struct btrfs_qgroup_limit lim; + __u64 qgroups[0]; +}; + +struct btrfs_scrub_progress { + __u64 data_extents_scrubbed; + __u64 tree_extents_scrubbed; + __u64 data_bytes_scrubbed; + __u64 tree_bytes_scrubbed; + __u64 read_errors; + __u64 csum_errors; + __u64 verify_errors; + __u64 no_csum; + __u64 csum_discards; + __u64 super_errors; + __u64 malloc_errors; + __u64 uncorrectable_errors; + __u64 corrected_errors; + __u64 last_physical; + __u64 unverified_errors; +}; + +struct btrfs_disk_key { + __le64 objectid; + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct btrfs_key { + __u64 objectid; + __u8 type; + __u64 offset; +} __attribute__((packed)); + +struct btrfs_header { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __u8 chunk_tree_uuid[16]; + __le64 generation; + __le64 owner; + __le32 nritems; + __u8 level; +} __attribute__((packed)); + +struct btrfs_root_backup { + __le64 tree_root; + __le64 tree_root_gen; + __le64 chunk_root; + __le64 chunk_root_gen; + __le64 extent_root; + __le64 extent_root_gen; + __le64 fs_root; + __le64 fs_root_gen; + __le64 dev_root; + __le64 dev_root_gen; + __le64 csum_root; + __le64 csum_root_gen; + __le64 total_bytes; + __le64 bytes_used; + __le64 num_devices; + __le64 unused_64[4]; + __u8 tree_root_level; + __u8 chunk_root_level; + __u8 extent_root_level; + __u8 fs_root_level; + __u8 dev_root_level; + __u8 csum_root_level; + __u8 unused_8[10]; +}; + +struct btrfs_item { + struct btrfs_disk_key key; + __le32 offset; + __le32 size; +} __attribute__((packed)); + +struct btrfs_dev_item { + __le64 devid; + __le64 total_bytes; + __le64 bytes_used; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le64 type; + __le64 generation; + __le64 start_offset; + __le32 dev_group; + __u8 seek_speed; + __u8 bandwidth; + __u8 uuid[16]; + __u8 fsid[16]; +} __attribute__((packed)); + +struct btrfs_super_block { + __u8 csum[32]; + __u8 fsid[16]; + __le64 bytenr; + __le64 flags; + __le64 magic; + __le64 generation; + __le64 root; + __le64 chunk_root; + __le64 log_root; + __le64 __unused_log_root_transid; + __le64 total_bytes; + __le64 bytes_used; + __le64 root_dir_objectid; + __le64 num_devices; + __le32 sectorsize; + __le32 nodesize; + __le32 __unused_leafsize; + __le32 stripesize; + __le32 sys_chunk_array_size; + __le64 chunk_root_generation; + __le64 compat_flags; + __le64 compat_ro_flags; + __le64 incompat_flags; + __le16 csum_type; + __u8 root_level; + __u8 chunk_root_level; + __u8 log_root_level; + struct btrfs_dev_item dev_item; + char label[256]; + __le64 cache_generation; + __le64 uuid_tree_generation; + __u8 metadata_uuid[16]; + __u64 nr_global_roots; + __le64 reserved[27]; + __u8 sys_chunk_array[2048]; + struct btrfs_root_backup super_roots[4]; + __u8 padding[565]; +} __attribute__((packed)); + +struct btrfs_timespec { + __le64 sec; + __le32 nsec; +}; + +struct btrfs_inode_item { + __le64 generation; + __le64 transid; + __le64 size; + __le64 nbytes; + __le64 block_group; + __le32 nlink; + __le32 uid; + __le32 gid; + __le32 mode; + __le64 rdev; + __le64 flags; + __le64 sequence; + __le64 reserved[4]; + struct btrfs_timespec atime; + struct btrfs_timespec ctime; + struct btrfs_timespec mtime; + struct btrfs_timespec otime; +}; + +struct btrfs_root_item { + struct btrfs_inode_item inode; + __le64 generation; + __le64 root_dirid; + __le64 bytenr; + __le64 byte_limit; + __le64 bytes_used; + __le64 last_snapshot; + __le64 flags; + __le32 refs; + struct btrfs_disk_key drop_progress; + __u8 drop_level; + __u8 level; + __le64 generation_v2; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __le64 ctransid; + __le64 otransid; + __le64 stransid; + __le64 rtransid; + struct btrfs_timespec ctime; + struct btrfs_timespec otime; + struct btrfs_timespec stime; + struct btrfs_timespec rtime; + __le64 reserved[8]; +} __attribute__((packed)); + +enum { + BTRFS_FILE_EXTENT_INLINE = 0, + BTRFS_FILE_EXTENT_REG = 1, + BTRFS_FILE_EXTENT_PREALLOC = 2, + BTRFS_NR_FILE_EXTENT_TYPES = 3, +}; + +struct btrfs_file_extent_item { + __le64 generation; + __le64 ram_bytes; + __u8 compression; + __u8 encryption; + __le16 other_encoding; + __u8 type; + __le64 disk_bytenr; + __le64 disk_num_bytes; + __le64 offset; + __le64 num_bytes; +} __attribute__((packed)); + +struct btrfs_csum_item { + __u8 csum; +}; + +struct btrfs_tree_parent_check { + u64 owner_root; + u64 transid; + struct btrfs_key first_key; + bool has_first_key; + u8 level; + long: 32; +}; + +struct btrfs_bio; + +typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *); + +struct btrfs_inode; + +struct btrfs_ordered_extent; + +struct btrfs_ordered_sum; + +struct btrfs_fs_info; + +struct btrfs_bio { + struct btrfs_inode *inode; + long: 32; + u64 file_offset; + union { + struct { + u8 *csum; + u8 csum_inline[64]; + struct bvec_iter saved_iter; + }; + struct { + struct btrfs_ordered_extent *ordered; + struct btrfs_ordered_sum *sums; + u64 orig_physical; + }; + struct btrfs_tree_parent_check parent_check; + }; + btrfs_bio_end_io_t end_io; + void *private; + unsigned int mirror_num; + atomic_t pending_ios; + struct work_struct end_io_work; + struct btrfs_fs_info *fs_info; + blk_status_t status; + struct bio bio; +}; + +struct btrfs_work; + +typedef void (*btrfs_func_t)(struct btrfs_work *); + +typedef void (*btrfs_ordered_func_t)(struct btrfs_work *, bool); + +struct btrfs_workqueue; + +struct btrfs_work { + btrfs_func_t func; + btrfs_ordered_func_t ordered_func; + struct work_struct normal_work; + struct list_head ordered_list; + struct btrfs_workqueue *wq; + long unsigned int flags; +}; + +struct btrfs_ordered_extent { + u64 file_offset; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 bytes_left; + u64 truncated_len; + long unsigned int flags; + int compress_type; + int qgroup_rsv; + refcount_t refs; + struct btrfs_inode *inode; + struct list_head list; + struct list_head log_list; + wait_queue_head_t wait; + struct rb_node rb_node; + struct list_head root_extent_list; + struct btrfs_work work; + struct completion completion; + struct btrfs_work flush_work; + struct list_head work_list; + struct list_head bioc_list; + long: 32; +}; + +struct btrfs_ordered_sum { + u64 logical; + u32 len; + struct list_head list; + u8 sums[0]; + long: 32; +}; + +struct extent_map_tree { + struct rb_root root; + struct list_head modified_extents; + rwlock_t lock; +}; + +struct extent_io_tree { + struct rb_root state; + union { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + }; + u8 owner; + spinlock_t lock; +}; + +enum btrfs_rsv_type { + BTRFS_BLOCK_RSV_GLOBAL = 0, + BTRFS_BLOCK_RSV_DELALLOC = 1, + BTRFS_BLOCK_RSV_TRANS = 2, + BTRFS_BLOCK_RSV_CHUNK = 3, + BTRFS_BLOCK_RSV_DELOPS = 4, + BTRFS_BLOCK_RSV_DELREFS = 5, + BTRFS_BLOCK_RSV_EMPTY = 6, + BTRFS_BLOCK_RSV_TEMP = 7, +}; + +struct btrfs_space_info; + +struct btrfs_block_rsv { + u64 size; + u64 reserved; + struct btrfs_space_info *space_info; + spinlock_t lock; + bool full; + bool failfast; + enum btrfs_rsv_type type: 8; + long: 32; + u64 qgroup_rsv_size; + u64 qgroup_rsv_reserved; +}; + +struct btrfs_root; + +struct btrfs_delayed_node; + +struct btrfs_inode { + struct btrfs_root *root; + long: 32; + u64 objectid; + u8 prop_compress; + u8 defrag_compress; + spinlock_t lock; + struct extent_map_tree extent_tree; + struct extent_io_tree io_tree; + struct extent_io_tree *file_extent_tree; + struct mutex log_mutex; + unsigned int outstanding_extents; + spinlock_t ordered_tree_lock; + struct rb_root ordered_tree; + struct rb_node *ordered_tree_last; + struct list_head delalloc_inodes; + long unsigned int runtime_flags; + long: 32; + u64 generation; + u64 last_trans; + u64 logged_trans; + int last_sub_trans; + int last_log_commit; + union { + u64 delalloc_bytes; + u64 first_dir_index_to_log; + }; + union { + u64 new_delalloc_bytes; + u64 last_dir_index_offset; + }; + union { + u64 defrag_bytes; + u64 reloc_block_group_start; + }; + u64 disk_i_size; + union { + u64 index_cnt; + u64 csum_bytes; + }; + u64 dir_index; + u64 last_unlink_trans; + union { + u64 last_reflink_trans; + u64 ref_root_id; + }; + u32 flags; + u32 ro_flags; + struct btrfs_block_rsv block_rsv; + struct btrfs_delayed_node *delayed_node; + long: 32; + u64 i_otime_sec; + u32 i_otime_nsec; + struct list_head delayed_iput; + struct rw_semaphore i_mmap_lock; + long: 32; + struct inode vfs_inode; +}; + +struct btrfs_block_group; + +struct btrfs_free_cluster { + spinlock_t lock; + spinlock_t refill_lock; + struct rb_root root; + long: 32; + u64 max_size; + u64 window_start; + bool fragmented; + struct btrfs_block_group *block_group; + struct list_head block_group_list; +}; + +struct btrfs_discard_ctl { + struct workqueue_struct *discard_workers; + struct delayed_work work; + spinlock_t lock; + struct btrfs_block_group *block_group; + struct list_head discard_list[3]; + u64 prev_discard; + u64 prev_discard_time; + atomic_t discardable_extents; + long: 32; + atomic64_t discardable_bytes; + u64 max_discard_size; + u64 delay_ms; + u32 iops_limit; + u32 kbps_limit; + u64 discard_extent_bytes; + u64 discard_bitmap_bytes; + atomic64_t discard_bytes_saved; +}; + +struct btrfs_device; + +struct btrfs_dev_replace { + u64 replace_state; + time64_t time_started; + time64_t time_stopped; + atomic64_t num_write_errors; + atomic64_t num_uncorrectable_read_errors; + u64 cursor_left; + u64 committed_cursor_left; + u64 cursor_left_last_write_of_item; + u64 cursor_right; + u64 cont_reading_from_srcdev_mode; + int is_valid; + int item_needs_writeback; + struct btrfs_device *srcdev; + struct btrfs_device *tgtdev; + struct mutex lock_finishing_cancel_unmount; + struct rw_semaphore rwsem; + long: 32; + struct btrfs_scrub_progress scrub_progress; + struct percpu_counter bio_counter; + wait_queue_head_t replace_wait; + struct task_struct *replace_task; +}; + +enum btrfs_exclusive_operation { + BTRFS_EXCLOP_NONE = 0, + BTRFS_EXCLOP_BALANCE_PAUSED = 1, + BTRFS_EXCLOP_BALANCE = 2, + BTRFS_EXCLOP_DEV_ADD = 3, + BTRFS_EXCLOP_DEV_REMOVE = 4, + BTRFS_EXCLOP_DEV_REPLACE = 5, + BTRFS_EXCLOP_RESIZE = 6, + BTRFS_EXCLOP_SWAP_ACTIVATE = 7, +}; + +struct btrfs_commit_stats { + u64 commit_count; + u64 max_commit_dur; + u64 last_commit_dur; + u64 total_commit_dur; +}; + +struct btrfs_transaction; + +struct btrfs_stripe_hash_table; + +struct btrfs_fs_devices; + +struct reloc_control; + +struct btrfs_balance_control; + +struct ulist; + +struct btrfs_delayed_root; + +struct btrfs_fs_info { + u8 chunk_tree_uuid[16]; + long unsigned int flags; + struct btrfs_root *tree_root; + struct btrfs_root *chunk_root; + struct btrfs_root *dev_root; + struct btrfs_root *fs_root; + struct btrfs_root *quota_root; + struct btrfs_root *uuid_root; + struct btrfs_root *data_reloc_root; + struct btrfs_root *block_group_root; + struct btrfs_root *stripe_root; + struct btrfs_root *log_root_tree; + rwlock_t global_root_lock; + struct rb_root global_root_tree; + spinlock_t fs_roots_radix_lock; + struct xarray fs_roots_radix; + rwlock_t block_group_cache_lock; + struct rb_root_cached block_group_cache_tree; + atomic64_t free_chunk_space; + struct extent_io_tree excluded_extents; + struct rb_root_cached mapping_tree; + rwlock_t mapping_tree_lock; + long: 32; + struct btrfs_block_rsv global_block_rsv; + struct btrfs_block_rsv trans_block_rsv; + struct btrfs_block_rsv chunk_block_rsv; + struct btrfs_block_rsv delayed_block_rsv; + struct btrfs_block_rsv delayed_refs_rsv; + struct btrfs_block_rsv empty_block_rsv; + u64 generation; + u64 last_trans_committed; + u64 last_reloc_trans; + u64 last_trans_log_full_commit; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + u32 commit_interval; + long: 32; + u64 max_inline; + struct btrfs_transaction *running_transaction; + wait_queue_head_t transaction_throttle; + wait_queue_head_t transaction_wait; + wait_queue_head_t transaction_blocked_wait; + wait_queue_head_t async_submit_wait; + spinlock_t super_lock; + struct btrfs_super_block *super_copy; + struct btrfs_super_block *super_for_commit; + struct super_block *sb; + struct inode *btree_inode; + struct mutex tree_log_mutex; + struct mutex transaction_kthread_mutex; + struct mutex cleaner_mutex; + struct mutex chunk_mutex; + struct mutex ro_block_group_mutex; + struct btrfs_stripe_hash_table *stripe_hash_table; + struct mutex ordered_operations_mutex; + struct rw_semaphore commit_root_sem; + struct rw_semaphore cleanup_work_sem; + struct rw_semaphore subvol_sem; + spinlock_t trans_lock; + struct mutex reloc_mutex; + struct list_head trans_list; + struct list_head dead_roots; + struct list_head caching_block_groups; + spinlock_t delayed_iput_lock; + struct list_head delayed_iputs; + atomic_t nr_delayed_iputs; + wait_queue_head_t delayed_iputs_wait; + atomic64_t tree_mod_seq; + rwlock_t tree_mod_log_lock; + struct rb_root tree_mod_log; + struct list_head tree_mod_seq_list; + atomic_t async_delalloc_pages; + spinlock_t ordered_root_lock; + struct list_head ordered_roots; + struct mutex delalloc_root_mutex; + spinlock_t delalloc_root_lock; + struct list_head delalloc_roots; + struct btrfs_workqueue *workers; + struct btrfs_workqueue *delalloc_workers; + struct btrfs_workqueue *flush_workers; + struct workqueue_struct *endio_workers; + struct workqueue_struct *endio_meta_workers; + struct workqueue_struct *rmw_workers; + struct workqueue_struct *compressed_write_workers; + struct btrfs_workqueue *endio_write_workers; + struct btrfs_workqueue *endio_freespace_worker; + struct btrfs_workqueue *caching_workers; + struct btrfs_workqueue *fixup_workers; + struct btrfs_workqueue *delayed_workers; + struct task_struct *transaction_kthread; + struct task_struct *cleaner_kthread; + u32 thread_pool_size; + struct kobject *space_info_kobj; + struct kobject *qgroups_kobj; + struct kobject *discard_kobj; + struct percpu_counter dirty_metadata_bytes; + struct percpu_counter delalloc_bytes; + struct percpu_counter ordered_bytes; + s32 dirty_metadata_batch; + s32 delalloc_batch; + struct percpu_counter evictable_extent_maps; + u64 em_shrinker_last_root; + u64 em_shrinker_last_ino; + atomic64_t em_shrinker_nr_to_scan; + struct work_struct em_shrinker_work; + struct list_head dirty_cowonly_roots; + struct btrfs_fs_devices *fs_devices; + struct list_head space_info; + struct btrfs_space_info *data_sinfo; + struct reloc_control *reloc_ctl; + long: 32; + struct btrfs_free_cluster data_alloc_cluster; + struct btrfs_free_cluster meta_alloc_cluster; + spinlock_t defrag_inodes_lock; + struct rb_root defrag_inodes; + atomic_t defrag_running; + seqlock_t profiles_lock; + long: 32; + u64 avail_data_alloc_bits; + u64 avail_metadata_alloc_bits; + u64 avail_system_alloc_bits; + spinlock_t balance_lock; + struct mutex balance_mutex; + atomic_t balance_pause_req; + atomic_t balance_cancel_req; + struct btrfs_balance_control *balance_ctl; + wait_queue_head_t balance_wait_q; + atomic_t reloc_cancel_req; + u32 data_chunk_allocations; + u32 metadata_ratio; + void *bdev_holder; + struct mutex scrub_lock; + atomic_t scrubs_running; + atomic_t scrub_pause_req; + atomic_t scrubs_paused; + atomic_t scrub_cancel_req; + wait_queue_head_t scrub_pause_wait; + refcount_t scrub_workers_refcnt; + u32 sectors_per_page; + struct workqueue_struct *scrub_workers; + long: 32; + struct btrfs_discard_ctl discard_ctl; + u64 qgroup_flags; + struct rb_root qgroup_tree; + spinlock_t qgroup_lock; + struct ulist *qgroup_ulist; + struct mutex qgroup_ioctl_lock; + struct list_head dirty_qgroups; + u64 qgroup_seq; + struct mutex qgroup_rescan_lock; + struct btrfs_key qgroup_rescan_progress; + struct btrfs_workqueue *qgroup_rescan_workers; + struct completion qgroup_rescan_completion; + struct btrfs_work qgroup_rescan_work; + bool qgroup_rescan_running; + u8 qgroup_drop_subtree_thres; + u64 qgroup_enable_gen; + int fs_error; + long unsigned int fs_state; + struct btrfs_delayed_root *delayed_root; + spinlock_t buffer_lock; + struct xarray buffer_radix; + int backup_root_index; + struct btrfs_dev_replace dev_replace; + struct semaphore uuid_tree_rescan_sem; + struct work_struct async_reclaim_work; + struct work_struct async_data_reclaim_work; + struct work_struct preempt_reclaim_work; + struct work_struct reclaim_bgs_work; + struct list_head reclaim_bgs; + int bg_reclaim_threshold; + spinlock_t unused_bgs_lock; + struct list_head unused_bgs; + struct mutex unused_bg_unpin_mutex; + struct mutex reclaim_bgs_lock; + u32 nodesize; + u32 sectorsize; + u32 sectorsize_bits; + u32 csum_size; + u32 csums_per_leaf; + u32 stripesize; + u64 max_extent_size; + spinlock_t swapfile_pins_lock; + struct rb_root swapfile_pins; + struct crypto_shash *csum_shash; + enum btrfs_exclusive_operation exclusive_operation; + u64 zone_size; + struct queue_limits limits; + long: 32; + u64 max_zone_append_size; + struct mutex zoned_meta_io_lock; + spinlock_t treelog_bg_lock; + u64 treelog_bg; + spinlock_t relocation_bg_lock; + long: 32; + u64 data_reloc_bg; + struct mutex zoned_data_reloc_io_lock; + struct btrfs_block_group *active_meta_bg; + struct btrfs_block_group *active_system_bg; + long: 32; + u64 nr_global_roots; + spinlock_t zone_active_bgs_lock; + struct list_head zone_active_bgs; + long: 32; + struct btrfs_commit_stats commit_stats; + u64 last_root_drop_gen; + struct lockdep_map btrfs_trans_num_writers_map; + struct lockdep_map btrfs_trans_num_extwriters_map; + struct lockdep_map btrfs_state_change_map[4]; + struct lockdep_map btrfs_trans_pending_ordered_map; + struct lockdep_map btrfs_ordered_extent_map; +}; + +enum btrfs_compression_type { + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_LZO = 2, + BTRFS_COMPRESS_ZSTD = 3, + BTRFS_NR_COMPRESS_TYPES = 4, +}; + +struct ulist_node { + u64 val; + u64 aux; + struct list_head list; + struct rb_node rb_node; + long: 32; +}; + +struct ulist { + long unsigned int nnodes; + struct list_head nodes; + struct rb_root root; + struct ulist_node *prealloc; +}; + +struct extent_buffer { + u64 start; + u32 len; + u32 folio_size; + long unsigned int bflags; + struct btrfs_fs_info *fs_info; + void *addr; + spinlock_t refs_lock; + atomic_t refs; + int read_mirror; + s8 log_index; + u8 folio_shift; + struct callback_head callback_head; + struct rw_semaphore lock; + struct folio *folios[16]; + long: 32; +}; + +struct extent_changeset { + u64 bytes_changed; + struct ulist range_changed; + long: 32; +}; + +struct btrfs_drew_lock { + atomic_t readers; + atomic_t writers; + wait_queue_head_t pending_writers; + wait_queue_head_t pending_readers; +}; + +enum { + __EXTENT_DIRTY_BIT = 0, + EXTENT_DIRTY = 1, + __EXTENT_DIRTY_SEQ = 0, + __EXTENT_UPTODATE_BIT = 1, + EXTENT_UPTODATE = 2, + __EXTENT_UPTODATE_SEQ = 1, + __EXTENT_LOCKED_BIT = 2, + EXTENT_LOCKED = 4, + __EXTENT_LOCKED_SEQ = 2, + __EXTENT_DIO_LOCKED_BIT = 3, + EXTENT_DIO_LOCKED = 8, + __EXTENT_DIO_LOCKED_SEQ = 3, + __EXTENT_NEW_BIT = 4, + EXTENT_NEW = 16, + __EXTENT_NEW_SEQ = 4, + __EXTENT_DELALLOC_BIT = 5, + EXTENT_DELALLOC = 32, + __EXTENT_DELALLOC_SEQ = 5, + __EXTENT_DEFRAG_BIT = 6, + EXTENT_DEFRAG = 64, + __EXTENT_DEFRAG_SEQ = 6, + __EXTENT_BOUNDARY_BIT = 7, + EXTENT_BOUNDARY = 128, + __EXTENT_BOUNDARY_SEQ = 7, + __EXTENT_NODATASUM_BIT = 8, + EXTENT_NODATASUM = 256, + __EXTENT_NODATASUM_SEQ = 8, + __EXTENT_CLEAR_META_RESV_BIT = 9, + EXTENT_CLEAR_META_RESV = 512, + __EXTENT_CLEAR_META_RESV_SEQ = 9, + __EXTENT_NEED_WAIT_BIT = 10, + EXTENT_NEED_WAIT = 1024, + __EXTENT_NEED_WAIT_SEQ = 10, + __EXTENT_NORESERVE_BIT = 11, + EXTENT_NORESERVE = 2048, + __EXTENT_NORESERVE_SEQ = 11, + __EXTENT_QGROUP_RESERVED_BIT = 12, + EXTENT_QGROUP_RESERVED = 4096, + __EXTENT_QGROUP_RESERVED_SEQ = 12, + __EXTENT_CLEAR_DATA_RESV_BIT = 13, + EXTENT_CLEAR_DATA_RESV = 8192, + __EXTENT_CLEAR_DATA_RESV_SEQ = 13, + __EXTENT_DELALLOC_NEW_BIT = 14, + EXTENT_DELALLOC_NEW = 16384, + __EXTENT_DELALLOC_NEW_SEQ = 14, + __EXTENT_ADD_INODE_BYTES_BIT = 15, + EXTENT_ADD_INODE_BYTES = 32768, + __EXTENT_ADD_INODE_BYTES_SEQ = 15, + __EXTENT_CLEAR_ALL_BITS_BIT = 16, + EXTENT_CLEAR_ALL_BITS = 65536, + __EXTENT_CLEAR_ALL_BITS_SEQ = 16, + __EXTENT_NOWAIT_BIT = 17, + EXTENT_NOWAIT = 131072, + __EXTENT_NOWAIT_SEQ = 17, +}; + +struct extent_state { + u64 start; + u64 end; + struct rb_node rb_node; + wait_queue_head_t wq; + refcount_t refs; + u32 state; +}; + +enum { + BTRFS_FS_STATE_REMOUNTING = 0, + BTRFS_FS_STATE_RO = 1, + BTRFS_FS_STATE_TRANS_ABORTED = 2, + BTRFS_FS_STATE_DEV_REPLACING = 3, + BTRFS_FS_STATE_DUMMY_FS_INFO = 4, + BTRFS_FS_STATE_NO_DATA_CSUMS = 5, + BTRFS_FS_STATE_SKIP_META_CSUMS = 6, + BTRFS_FS_STATE_LOG_CLEANUP_ERROR = 7, + BTRFS_FS_STATE_COUNT = 8, +}; + +struct btrfs_qgroup_swapped_blocks { + spinlock_t lock; + bool swapped; + struct rb_root blocks[8]; +}; + +struct btrfs_root { + struct rb_node rb_node; + struct extent_buffer *node; + struct extent_buffer *commit_root; + struct btrfs_root *log_root; + struct btrfs_root *reloc_root; + long unsigned int state; + struct btrfs_root_item root_item; + struct btrfs_key root_key; + struct btrfs_fs_info *fs_info; + struct extent_io_tree dirty_log_pages; + struct mutex objectid_mutex; + spinlock_t accounting_lock; + struct btrfs_block_rsv *block_rsv; + struct mutex log_mutex; + wait_queue_head_t log_writer_wait; + wait_queue_head_t log_commit_wait[2]; + struct list_head log_ctxs[2]; + atomic_t log_writers; + atomic_t log_commit[2]; + atomic_t log_batch; + int log_transid; + int log_transid_committed; + int last_log_commit; + pid_t log_start_pid; + u64 last_trans; + u64 free_objectid; + struct btrfs_key defrag_progress; + struct btrfs_key defrag_max; + struct list_head dirty_list; + struct list_head root_list; + struct xarray inodes; + struct xarray delayed_nodes; + dev_t anon_dev; + spinlock_t root_item_lock; + refcount_t refs; + struct mutex delalloc_mutex; + spinlock_t delalloc_lock; + struct list_head delalloc_inodes; + struct list_head delalloc_root; + u64 nr_delalloc_inodes; + struct mutex ordered_extent_mutex; + spinlock_t ordered_extent_lock; + struct list_head ordered_extents; + struct list_head ordered_root; + u64 nr_ordered_extents; + struct list_head reloc_dirty_list; + int send_in_progress; + int dedupe_in_progress; + struct btrfs_drew_lock snapshot_lock; + atomic_t snapshot_force_cow; + spinlock_t qgroup_meta_rsv_lock; + u64 qgroup_meta_rsv_pertrans; + u64 qgroup_meta_rsv_prealloc; + wait_queue_head_t qgroup_flush_wait; + atomic_t nr_swapfiles; + struct btrfs_qgroup_swapped_blocks swapped_blocks; + struct extent_io_tree log_csum_range; + u64 relocation_src_root; +}; + +enum btrfs_trans_state { + TRANS_STATE_RUNNING = 0, + TRANS_STATE_COMMIT_PREP = 1, + TRANS_STATE_COMMIT_START = 2, + TRANS_STATE_COMMIT_DOING = 3, + TRANS_STATE_UNBLOCKED = 4, + TRANS_STATE_SUPER_COMMITTED = 5, + TRANS_STATE_COMPLETED = 6, + TRANS_STATE_MAX = 7, +}; + +struct btrfs_delayed_ref_root { + struct xarray head_refs; + struct xarray dirty_extents; + spinlock_t lock; + long unsigned int num_heads; + long unsigned int num_heads_ready; + long: 32; + u64 pending_csums; + long unsigned int flags; + long: 32; + u64 run_delayed_start; + u64 qgroup_to_skip; +}; + +struct btrfs_transaction { + u64 transid; + atomic_t num_extwriters; + atomic_t num_writers; + refcount_t use_count; + long unsigned int flags; + enum btrfs_trans_state state; + int aborted; + struct list_head list; + struct extent_io_tree dirty_pages; + time64_t start_time; + wait_queue_head_t writer_wait; + wait_queue_head_t commit_wait; + struct list_head pending_snapshots; + struct list_head dev_update_list; + struct list_head switch_commits; + struct list_head dirty_bgs; + struct list_head io_bgs; + struct list_head dropped_roots; + struct extent_io_tree pinned_extents; + struct mutex cache_write_mutex; + spinlock_t dirty_bgs_lock; + struct list_head deleted_bgs; + spinlock_t dropped_roots_lock; + long: 32; + struct btrfs_delayed_ref_root delayed_refs; + struct btrfs_fs_info *fs_info; + atomic_t pending_ordered; + wait_queue_head_t pending_wait; + long: 32; +}; + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + struct list_head prepare_list; + atomic_t items; + atomic_t items_seq; + int nodes; + wait_queue_head_t wait; +}; + +enum { + READA_NONE = 0, + READA_BACK = 1, + READA_FORWARD = 2, + READA_FORWARD_ALWAYS = 3, +}; + +struct btrfs_path { + struct extent_buffer *nodes[8]; + int slots[8]; + u8 locks[8]; + u8 reada; + u8 lowest_level; + unsigned int search_for_split: 1; + unsigned int keep_locks: 1; + unsigned int skip_locking: 1; + unsigned int search_commit_root: 1; + unsigned int need_commit_sem: 1; + unsigned int skip_release_on_error: 1; + unsigned int search_for_extension: 1; + unsigned int nowait: 1; +}; + +struct btrfs_item_batch { + const struct btrfs_key *keys; + const u32 *data_sizes; + u32 total_data_size; + int nr; +}; + +enum { + __EXTENT_FLAG_PINNED_BIT = 0, + EXTENT_FLAG_PINNED = 1, + __EXTENT_FLAG_PINNED_SEQ = 0, + __EXTENT_FLAG_COMPRESS_ZLIB_BIT = 1, + EXTENT_FLAG_COMPRESS_ZLIB = 2, + __EXTENT_FLAG_COMPRESS_ZLIB_SEQ = 1, + __EXTENT_FLAG_COMPRESS_LZO_BIT = 2, + EXTENT_FLAG_COMPRESS_LZO = 4, + __EXTENT_FLAG_COMPRESS_LZO_SEQ = 2, + __EXTENT_FLAG_COMPRESS_ZSTD_BIT = 3, + EXTENT_FLAG_COMPRESS_ZSTD = 8, + __EXTENT_FLAG_COMPRESS_ZSTD_SEQ = 3, + __EXTENT_FLAG_PREALLOC_BIT = 4, + EXTENT_FLAG_PREALLOC = 16, + __EXTENT_FLAG_PREALLOC_SEQ = 4, + __EXTENT_FLAG_LOGGING_BIT = 5, + EXTENT_FLAG_LOGGING = 32, + __EXTENT_FLAG_LOGGING_SEQ = 5, + __EXTENT_FLAG_MERGED_BIT = 6, + EXTENT_FLAG_MERGED = 64, + __EXTENT_FLAG_MERGED_SEQ = 6, +}; + +struct extent_map { + struct rb_node rb_node; + long: 32; + u64 start; + u64 len; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 offset; + u64 ram_bytes; + u64 generation; + u32 flags; + refcount_t refs; + struct list_head list; +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + struct list_head n_list; + struct list_head p_list; + struct rb_root_cached ins_root; + struct rb_root_cached del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + refcount_t refs; + int count; + u64 index_cnt; + long unsigned int flags; + u32 curr_index_batch_size; + u32 index_item_leaves; + long: 32; +}; + +enum { + BTRFS_INODE_FLUSH_ON_CLOSE = 0, + BTRFS_INODE_DUMMY = 1, + BTRFS_INODE_IN_DEFRAG = 2, + BTRFS_INODE_HAS_ASYNC_EXTENT = 3, + BTRFS_INODE_NEEDS_FULL_SYNC = 4, + BTRFS_INODE_COPY_EVERYTHING = 5, + BTRFS_INODE_HAS_PROPS = 6, + BTRFS_INODE_SNAPSHOT_FLUSH = 7, + BTRFS_INODE_NO_XATTRS = 8, + BTRFS_INODE_NO_DELALLOC_FLUSH = 9, + BTRFS_INODE_VERITY_IN_PROGRESS = 10, + BTRFS_INODE_FREE_SPACE_INODE = 11, + BTRFS_INODE_NO_CAP_XATTR = 12, + BTRFS_INODE_COW_WRITE_ERROR = 13, + BTRFS_INODE_ROOT_STUB = 14, +}; + +struct btrfs_pending_snapshot; + +struct btrfs_trans_handle { + u64 transid; + u64 bytes_reserved; + u64 delayed_refs_bytes_reserved; + u64 chunk_bytes_reserved; + long unsigned int delayed_ref_updates; + long unsigned int delayed_ref_csum_deletions; + struct btrfs_transaction *transaction; + struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *orig_rsv; + struct btrfs_pending_snapshot *pending_snapshot; + refcount_t use_count; + unsigned int type; + short int aborted; + bool adding_csums; + bool allocating_chunk; + bool removing_chunk; + bool reloc_reserved; + bool in_fsync; + struct btrfs_fs_info *fs_info; + struct list_head new_bgs; + long: 32; + struct btrfs_block_rsv delayed_rsv; +}; + +struct btrfs_pending_snapshot { + struct dentry *dentry; + struct btrfs_inode *dir; + struct btrfs_root *root; + struct btrfs_root_item *root_item; + struct btrfs_root *snap; + struct btrfs_qgroup_inherit *inherit; + struct btrfs_path *path; + long: 32; + struct btrfs_block_rsv block_rsv; + int error; + dev_t anon_dev; + bool readonly; + struct list_head list; + long: 32; +}; + +struct btrfs_data_container { + __u32 bytes_left; + __u32 bytes_missing; + __u32 elem_cnt; + __u32 elem_missed; + __u64 val[0]; +}; + +enum btrfs_dev_stat_values { + BTRFS_DEV_STAT_WRITE_ERRS = 0, + BTRFS_DEV_STAT_READ_ERRS = 1, + BTRFS_DEV_STAT_FLUSH_ERRS = 2, + BTRFS_DEV_STAT_CORRUPTION_ERRS = 3, + BTRFS_DEV_STAT_GENERATION_ERRS = 4, + BTRFS_DEV_STAT_VALUES_MAX = 5, +}; + +struct btrfs_key_ptr { + struct btrfs_disk_key key; + __le64 blockptr; + __le64 generation; +} __attribute__((packed)); + +struct btrfs_extent_item { + __le64 refs; + __le64 generation; + __le64 flags; +}; + +struct btrfs_tree_block_info { + struct btrfs_disk_key key; + __u8 level; +}; + +struct btrfs_extent_data_ref { + __le64 root; + __le64 objectid; + __le64 offset; + __le32 count; +}; + +struct btrfs_shared_data_ref { + __le32 count; +}; + +struct btrfs_extent_inline_ref { + __u8 type; + __le64 offset; +} __attribute__((packed)); + +struct btrfs_inode_ref { + __le64 index; + __le16 name_len; +} __attribute__((packed)); + +struct btrfs_inode_extref { + __le64 parent_objectid; + __le64 index; + __le16 name_len; + __u8 name[0]; +} __attribute__((packed)); + +struct ulist_iterator { + struct list_head *cur_list; +}; + +struct rb_simple_node { + struct rb_node rb_node; + long: 32; + u64 bytenr; +}; + +enum { + EXTENT_BUFFER_UPTODATE = 0, + EXTENT_BUFFER_DIRTY = 1, + EXTENT_BUFFER_CORRUPT = 2, + EXTENT_BUFFER_READAHEAD = 3, + EXTENT_BUFFER_TREE_REF = 4, + EXTENT_BUFFER_STALE = 5, + EXTENT_BUFFER_WRITEBACK = 6, + EXTENT_BUFFER_READ_ERR = 7, + EXTENT_BUFFER_UNMAPPED = 8, + EXTENT_BUFFER_IN_TREE = 9, + EXTENT_BUFFER_WRITE_ERR = 10, + EXTENT_BUFFER_ZONED_ZEROOUT = 11, + EXTENT_BUFFER_READING = 12, +}; + +enum btrfs_discard_state { + BTRFS_DISCARD_EXTENTS = 0, + BTRFS_DISCARD_BITMAPS = 1, + BTRFS_DISCARD_RESET_CURSOR = 2, +}; + +struct btrfs_io_ctl { + void *cur; + void *orig; + struct page *page; + struct page **pages; + struct btrfs_fs_info *fs_info; + struct inode *inode; + long unsigned int size; + int index; + int num_pages; + int entries; + int bitmaps; +}; + +enum btrfs_block_group_size_class { + BTRFS_BG_SZ_NONE = 0, + BTRFS_BG_SZ_SMALL = 1, + BTRFS_BG_SZ_MEDIUM = 2, + BTRFS_BG_SZ_LARGE = 3, +}; + +struct btrfs_caching_control; + +struct btrfs_free_space_ctl; + +struct btrfs_chunk_map; + +struct btrfs_block_group { + struct btrfs_fs_info *fs_info; + struct btrfs_inode *inode; + spinlock_t lock; + long: 32; + u64 start; + u64 length; + u64 pinned; + u64 reserved; + u64 used; + u64 delalloc_bytes; + u64 bytes_super; + u64 flags; + u64 cache_generation; + u64 global_root_id; + u64 commit_used; + u32 bitmap_high_thresh; + u32 bitmap_low_thresh; + struct rw_semaphore data_rwsem; + long unsigned int full_stripe_len; + long unsigned int runtime_flags; + unsigned int ro; + int disk_cache_state; + int cached; + struct btrfs_caching_control *caching_ctl; + struct btrfs_space_info *space_info; + struct btrfs_free_space_ctl *free_space_ctl; + struct rb_node cache_node; + struct list_head list; + refcount_t refs; + struct list_head cluster_list; + struct list_head bg_list; + struct list_head ro_list; + atomic_t frozen; + struct list_head discard_list; + int discard_index; + u64 discard_eligible_time; + u64 discard_cursor; + enum btrfs_discard_state discard_state; + struct list_head dirty_list; + struct list_head io_list; + struct btrfs_io_ctl io_ctl; + atomic_t reservations; + atomic_t nocow_writers; + struct mutex free_space_lock; + int swap_extents; + u64 alloc_offset; + u64 zone_unusable; + u64 zone_capacity; + u64 meta_write_pointer; + struct btrfs_chunk_map *physical_map; + struct list_head active_bg_list; + struct work_struct zone_finish_work; + struct extent_buffer *last_eb; + enum btrfs_block_group_size_class size_class; + long: 32; + u64 reclaim_mark; +}; + +enum btrfs_lock_nesting { + BTRFS_NESTING_NORMAL = 0, + BTRFS_NESTING_COW = 1, + BTRFS_NESTING_LEFT = 2, + BTRFS_NESTING_RIGHT = 3, + BTRFS_NESTING_LEFT_COW = 4, + BTRFS_NESTING_RIGHT_COW = 5, + BTRFS_NESTING_SPLIT = 6, + BTRFS_NESTING_NEW_ROOT = 7, + BTRFS_NESTING_MAX = 8, +}; + +enum { + BTRFS_ROOT_IN_TRANS_SETUP = 0, + BTRFS_ROOT_SHAREABLE = 1, + BTRFS_ROOT_TRACK_DIRTY = 2, + BTRFS_ROOT_IN_RADIX = 3, + BTRFS_ROOT_ORPHAN_ITEM_INSERTED = 4, + BTRFS_ROOT_DEFRAG_RUNNING = 5, + BTRFS_ROOT_FORCE_COW = 6, + BTRFS_ROOT_MULTI_LOG_TASKS = 7, + BTRFS_ROOT_DIRTY = 8, + BTRFS_ROOT_DELETING = 9, + BTRFS_ROOT_DEAD_RELOC_TREE = 10, + BTRFS_ROOT_DEAD_TREE = 11, + BTRFS_ROOT_HAS_LOG_TREE = 12, + BTRFS_ROOT_QGROUP_FLUSHING = 13, + BTRFS_ROOT_ORPHAN_CLEANUP = 14, + BTRFS_ROOT_UNFINISHED_DROP = 15, + BTRFS_ROOT_RESET_LOCKDEP_CLASS = 16, +}; + +typedef int iterate_extent_inodes_t(u64, u64, u64, u64, void *); + +struct btrfs_backref_walk_ctx { + u64 bytenr; + u64 extent_item_pos; + bool ignore_extent_item_pos; + bool skip_inode_ref_list; + struct btrfs_trans_handle *trans; + struct btrfs_fs_info *fs_info; + long: 32; + u64 time_seq; + struct ulist *refs; + struct ulist *roots; + bool (*cache_lookup)(u64, void *, const u64 **, int *); + void (*cache_store)(u64, const struct ulist *, void *); + iterate_extent_inodes_t *indirect_ref_iterator; + int (*check_extent_item)(u64, const struct btrfs_extent_item *, const struct extent_buffer *, void *); + bool (*skip_data_ref)(u64, u64, u64, void *); + void *user_ctx; +}; + +struct inode_fs_paths { + struct btrfs_path *btrfs_path; + struct btrfs_root *fs_root; + struct btrfs_data_container *fspath; +}; + +struct btrfs_backref_shared_cache_entry { + u64 bytenr; + u64 gen; + bool is_shared; + long: 32; +}; + +struct btrfs_backref_share_check_ctx { + struct ulist refs; + long: 32; + u64 curr_leaf_bytenr; + u64 prev_leaf_bytenr; + struct btrfs_backref_shared_cache_entry path_cache_entries[8]; + bool use_path_cache; + long: 32; + struct { + u64 bytenr; + bool is_shared; + long: 32; + } prev_extents_cache[8]; + int prev_extents_cache_slot; + long: 32; +}; + +struct extent_inode_elem; + +struct prelim_ref { + struct rb_node rbnode; + long: 32; + u64 root_id; + struct btrfs_key key_for_search; + u8 level; + int count; + struct extent_inode_elem *inode_list; + long: 32; + u64 parent; + u64 wanted_disk_byte; +}; + +struct extent_inode_elem { + u64 inum; + u64 offset; + u64 num_bytes; + struct extent_inode_elem *next; + long: 32; +}; + +struct btrfs_backref_iter { + u64 bytenr; + struct btrfs_path *path; + struct btrfs_fs_info *fs_info; + struct btrfs_key cur_key; + u32 item_ptr; + u32 cur_ptr; + u32 end_ptr; +}; + +struct btrfs_backref_node { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + u64 new_bytenr; + u64 owner; + struct list_head list; + struct list_head upper; + struct list_head lower; + struct btrfs_root *root; + struct extent_buffer *eb; + unsigned int level: 8; + unsigned int cowonly: 1; + unsigned int lowest: 1; + unsigned int locked: 1; + unsigned int processed: 1; + unsigned int checked: 1; + unsigned int pending: 1; + unsigned int detached: 1; + unsigned int is_reloc_root: 1; + long: 32; +}; + +struct btrfs_backref_edge { + struct list_head list[2]; + struct btrfs_backref_node *node[2]; +}; + +struct btrfs_backref_cache { + struct rb_root rb_root; + struct btrfs_backref_node *path[8]; + struct list_head pending[8]; + struct list_head leaves; + struct list_head changed; + struct list_head detached; + long: 32; + u64 last_trans; + int nr_nodes; + int nr_edges; + struct list_head pending_edge; + struct list_head useless_node; + struct btrfs_fs_info *fs_info; + bool is_reloc; +}; + +enum btrfs_delayed_ref_action { + BTRFS_ADD_DELAYED_REF = 1, + BTRFS_DROP_DELAYED_REF = 2, + BTRFS_ADD_DELAYED_EXTENT = 3, + BTRFS_UPDATE_DELAYED_HEAD = 4, +} __attribute__((mode(byte))); + +struct btrfs_data_ref { + u64 objectid; + u64 offset; +}; + +struct btrfs_tree_ref { + int level; +}; + +struct btrfs_delayed_ref_node { + struct rb_node ref_node; + struct list_head add_list; + long: 32; + u64 bytenr; + u64 num_bytes; + u64 seq; + u64 ref_root; + u64 parent; + refcount_t refs; + int ref_mod; + unsigned int action: 8; + unsigned int type: 8; + long: 32; + union { + struct btrfs_tree_ref tree_ref; + struct btrfs_data_ref data_ref; + }; +}; + +struct btrfs_delayed_extent_op { + struct btrfs_disk_key key; + bool update_key; + bool update_flags; + long: 32; + u64 flags_to_set; +}; + +struct btrfs_delayed_ref_head { + u64 bytenr; + u64 num_bytes; + struct mutex mutex; + refcount_t refs; + spinlock_t lock; + struct rb_root_cached ref_tree; + struct list_head ref_add_list; + struct btrfs_delayed_extent_op *extent_op; + int total_ref_mod; + int ref_mod; + u64 owning_root; + u64 reserved_bytes; + u8 level; + bool must_insert_reserved; + bool is_data; + bool is_system; + bool processing; + bool tracked; +}; + +enum { + ____TRANS_FREEZABLE_BIT = 0, + __TRANS_FREEZABLE = 1, + ____TRANS_FREEZABLE_SEQ = 0, + ____TRANS_START_BIT = 1, + __TRANS_START = 2, + ____TRANS_START_SEQ = 1, + ____TRANS_ATTACH_BIT = 2, + __TRANS_ATTACH = 4, + ____TRANS_ATTACH_SEQ = 2, + ____TRANS_JOIN_BIT = 3, + __TRANS_JOIN = 8, + ____TRANS_JOIN_SEQ = 3, + ____TRANS_JOIN_NOLOCK_BIT = 4, + __TRANS_JOIN_NOLOCK = 16, + ____TRANS_JOIN_NOLOCK_SEQ = 4, + ____TRANS_DUMMY_BIT = 5, + __TRANS_DUMMY = 32, + ____TRANS_DUMMY_SEQ = 5, + ____TRANS_JOIN_NOSTART_BIT = 6, + __TRANS_JOIN_NOSTART = 64, + ____TRANS_JOIN_NOSTART_SEQ = 6, +}; + +struct btrfs_seq_list { + struct list_head list; + u64 seq; +}; + +enum btrfs_trim_state { + BTRFS_TRIM_STATE_UNTRIMMED = 0, + BTRFS_TRIM_STATE_TRIMMED = 1, + BTRFS_TRIM_STATE_TRIMMING = 2, +}; + +struct btrfs_free_space { + struct rb_node offset_index; + struct rb_node bytes_index; + u64 offset; + u64 bytes; + u64 max_extent_size; + long unsigned int *bitmap; + struct list_head list; + enum btrfs_trim_state trim_state; + s32 bitmap_extents; + long: 32; +}; + +enum { + BTRFS_STAT_CURR = 0, + BTRFS_STAT_PREV = 1, + BTRFS_STAT_NR_ENTRIES = 2, +}; + +struct btrfs_free_space_op; + +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + struct rb_root_cached free_space_bytes; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + s32 discardable_extents[2]; + s64 discardable_bytes[2]; + const struct btrfs_free_space_op *op; + struct btrfs_block_group *block_group; + struct mutex cache_writeout_mutex; + struct list_head trimming_ranges; + long: 32; +}; + +struct btrfs_free_space_op { + bool (*use_bitmap)(struct btrfs_free_space_ctl *, struct btrfs_free_space *); +}; + +struct btrfs_caching_control { + struct list_head list; + struct mutex mutex; + wait_queue_head_t wait; + struct btrfs_work work; + struct btrfs_block_group *block_group; + atomic_t progress; + refcount_t count; +}; + +enum btrfs_inline_ref_type { + BTRFS_REF_TYPE_INVALID = 0, + BTRFS_REF_TYPE_BLOCK = 1, + BTRFS_REF_TYPE_DATA = 2, + BTRFS_REF_TYPE_ANY = 3, +}; + +struct preftree { + struct rb_root_cached root; + unsigned int count; +}; + +struct preftrees { + struct preftree direct; + struct preftree indirect; + struct preftree indirect_missing_keys; +}; + +struct share_check { + struct btrfs_backref_share_check_ctx *ctx; + struct btrfs_root *root; + u64 inum; + u64 data_bytenr; + u64 data_extent_gen; + int share_count; + int self_ref_count; + bool have_delayed_delete_refs; + long: 32; +}; + +struct assoc_array_ops { + long unsigned int (*get_key_chunk)(const void *, int); + long unsigned int (*get_object_key_chunk)(const void *, int); + bool (*compare_object)(const void *, const void *); + int (*diff_objects)(const void *, const void *); + void (*free_object)(void *); +}; + +enum key_need_perm { + KEY_NEED_UNSPECIFIED = 0, + KEY_NEED_VIEW = 1, + KEY_NEED_READ = 2, + KEY_NEED_WRITE = 3, + KEY_NEED_SEARCH = 4, + KEY_NEED_LINK = 5, + KEY_NEED_SETATTR = 6, + KEY_NEED_UNLINK = 7, + KEY_SYSADMIN_OVERRIDE = 8, + KEY_AUTHTOKEN_OVERRIDE = 9, + KEY_DEFER_PERM_CHECK = 10, +}; + +struct __key_reference_with_attributes; + +typedef struct __key_reference_with_attributes *key_ref_t; + +enum key_state { + KEY_IS_UNINSTANTIATED = 0, + KEY_IS_POSITIVE = 1, +}; + +struct key_user { + struct rb_node node; + struct mutex cons_lock; + spinlock_t lock; + refcount_t usage; + atomic_t nkeys; + atomic_t nikeys; + kuid_t uid; + int qnkeys; + int qnbytes; +}; + +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[16]; + long unsigned int nr_leaves_on_branch; +}; + +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + long unsigned int index_key[0]; +}; + +struct assoc_array_edit { + struct callback_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[16]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long int adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[17]; +}; + +enum { + TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = 1, + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = 2, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = 4, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = 8, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = 16, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = 32, + __TCA_FLOWER_KEY_FLAGS_MAX = 33, +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, + FLOW_DISSECTOR_KEY_PPPOE = 29, + FLOW_DISSECTOR_KEY_L2TPV3 = 30, + FLOW_DISSECTOR_KEY_CFM = 31, + FLOW_DISSECTOR_KEY_IPSEC = 32, + FLOW_DISSECTOR_KEY_MAX = 33, +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_NUM = 1, +}; + +struct key_preparsed_payload { + const char *orig_description; + char *description; + union key_payload payload; + const void *data; + size_t datalen; + size_t quotalen; + long: 32; + time64_t expiry; +}; + +struct key_match_data { + bool (*cmp)(const struct key *, const struct key_match_data *); + const void *raw_data; + void *preparsed; + unsigned int lookup_type; +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt = 0, + kernel_pkey_decrypt = 1, + kernel_pkey_sign = 2, + kernel_pkey_verify = 3, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op: 8; +}; + +struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; +}; + +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, + NOTIFY_KEY_UPDATED = 1, + NOTIFY_KEY_LINKED = 2, + NOTIFY_KEY_UNLINKED = 3, + NOTIFY_KEY_CLEARED = 4, + NOTIFY_KEY_REVOKED = 5, + NOTIFY_KEY_INVALIDATED = 6, + NOTIFY_KEY_SETATTR = 7, +}; + +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + struct key_match_data match_data; + unsigned int flags; + int (*iterator)(const void *, void *); + int skipped_ret; + bool possessed; + key_ref_t result; + long: 32; + time64_t now; +}; + +struct keyring_read_iterator_context { + size_t buflen; + size_t count; + key_serial_t *buffer; +}; + +enum migratetype { + MIGRATE_UNMOVABLE = 0, + MIGRATE_MOVABLE = 1, + MIGRATE_RECLAIMABLE = 2, + MIGRATE_PCPTYPES = 3, + MIGRATE_HIGHATOMIC = 3, + MIGRATE_CMA = 4, + MIGRATE_ISOLATE = 5, + MIGRATE_TYPES = 6, +}; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_BOUNCE = 8, + NR_FREE_CMA_PAGES = 9, + NR_VM_ZONE_STAT_ITEMS = 10, +}; + +enum vmscan_throttle_state { + VMSCAN_THROTTLE_WRITEBACK = 0, + VMSCAN_THROTTLE_ISOLATED = 1, + VMSCAN_THROTTLE_NOPROGRESS = 2, + VMSCAN_THROTTLE_CONGESTED = 3, + NR_VMSCAN_THROTTLE = 4, +}; + +enum zone_watermarks { + WMARK_MIN = 0, + WMARK_LOW = 1, + WMARK_HIGH = 2, + WMARK_PROMO = 3, + NR_WMARK = 4, +}; + +enum { + ZONELIST_FALLBACK = 0, + MAX_ZONELISTS = 1, +}; + +struct crypto_template; + +struct crypto_spawn; + +struct crypto_instance { + struct crypto_alg alg; + struct crypto_template *tmpl; + union { + struct hlist_node list; + struct crypto_spawn *spawns; + }; + struct work_struct free_work; + long: 32; + void *__ctx[0]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + struct crypto_instance *inst; + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct rtattr; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + int (*create)(struct crypto_template *, struct rtattr **); + char name[128]; +}; + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; +}; + +struct shash_alg { + int (*init)(struct shash_desc *); + int (*update)(struct shash_desc *, const u8 *, unsigned int); + int (*final)(struct shash_desc *, u8 *); + int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*export)(struct shash_desc *, void *); + int (*import)(struct shash_desc *, const void *); + int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_shash *); + void (*exit_tfm)(struct crypto_shash *); + int (*clone_tfm)(struct crypto_shash *, struct crypto_shash *); + unsigned int descsize; + union { + struct { + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; + }; + struct hash_alg_common halg; + }; +}; + +enum mod_mem_type { + MOD_TEXT = 0, + MOD_DATA = 1, + MOD_RODATA = 2, + MOD_RO_AFTER_INIT = 3, + MOD_INIT_TEXT = 4, + MOD_INIT_DATA = 5, + MOD_INIT_RODATA = 6, + MOD_MEM_NUM_TYPES = 7, + MOD_INVALID = -1, +}; + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + unsigned int partial; + u8 buf[144]; + long: 32; +}; + +typedef void (*crypto_completion_t)(void *, int); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + u32 flags; +}; + +struct scatterlist; + +struct aead_request { + struct crypto_async_request base; + unsigned int assoclen; + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + long: 32; + void *__ctx[0]; +}; + +struct scatterlist { + long unsigned int page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; + unsigned int dma_length; +}; + +struct crypto_aead; + +struct aead_alg { + int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); + int (*setauthsize)(struct crypto_aead *, unsigned int); + int (*encrypt)(struct aead_request *); + int (*decrypt)(struct aead_request *); + int (*init)(struct crypto_aead *); + void (*exit)(struct crypto_aead *); + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + long: 32; + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct rtattr { + short unsigned int rta_len; + short unsigned int rta_type; +}; + +struct aead_instance { + void (*free)(struct aead_instance *); + long: 32; + union { + struct { + char head[40]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct ahash_request { + struct crypto_async_request base; + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + void *priv; + void *__ctx[0]; +}; + +struct crypto_ahash { + bool using_shash; + unsigned int statesize; + unsigned int reqsize; + long: 32; + struct crypto_tfm base; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct skcipher_request { + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + struct crypto_async_request base; + void *__ctx[0]; +}; + +struct crypto_skcipher { + unsigned int reqsize; + long: 32; + struct crypto_tfm base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +struct skcipher_alg_common { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + long: 32; + struct crypto_alg base; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + unsigned int authkeylen; + unsigned int enckeylen; +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_NR_TABLES = 2, + NEIGH_LINK_TABLE = 2, +}; + +enum { + NAPI_STATE_SCHED = 0, + NAPI_STATE_MISSED = 1, + NAPI_STATE_DISABLE = 2, + NAPI_STATE_NPSVC = 3, + NAPI_STATE_LISTED = 4, + NAPI_STATE_NO_BUSY_POLL = 5, + NAPI_STATE_IN_BUSY_POLL = 6, + NAPI_STATE_PREFER_BUSY_POLL = 7, + NAPI_STATE_THREADED = 8, + NAPI_STATE_SCHED_THREADED = 9, +}; + +enum xps_map_type { + XPS_CPUS = 0, + XPS_RXQS = 1, + XPS_MAPS_MAX = 2, +}; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE = 3, +}; + +enum { + NETIF_MSG_DRV_BIT = 0, + NETIF_MSG_PROBE_BIT = 1, + NETIF_MSG_LINK_BIT = 2, + NETIF_MSG_TIMER_BIT = 3, + NETIF_MSG_IFDOWN_BIT = 4, + NETIF_MSG_IFUP_BIT = 5, + NETIF_MSG_RX_ERR_BIT = 6, + NETIF_MSG_TX_ERR_BIT = 7, + NETIF_MSG_TX_QUEUED_BIT = 8, + NETIF_MSG_INTR_BIT = 9, + NETIF_MSG_TX_DONE_BIT = 10, + NETIF_MSG_RX_STATUS_BIT = 11, + NETIF_MSG_PKTDATA_BIT = 12, + NETIF_MSG_HW_BIT = 13, + NETIF_MSG_WOL_BIT = 14, + NETIF_MSG_CLASS_COUNT = 15, +}; + +struct authenc_esn_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; +}; + +struct crypto_authenc_esn_ctx { + unsigned int reqoff; + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_esn_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +typedef void (*rcu_callback_t)(struct callback_head *); + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum wq_flags { + WQ_BH = 1, + WQ_UNBOUND = 2, + WQ_FREEZABLE = 4, + WQ_MEM_RECLAIM = 8, + WQ_HIGHPRI = 16, + WQ_CPU_INTENSIVE = 32, + WQ_SYSFS = 64, + WQ_POWER_EFFICIENT = 128, + __WQ_DESTROYING = 32768, + __WQ_DRAINING = 65536, + __WQ_ORDERED = 131072, + __WQ_LEGACY = 262144, + __WQ_BH_ALLOWS = 17, +}; + +typedef struct {} local_lock_t; + +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node *parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void *slots[64]; + union { + long unsigned int tags[6]; + long unsigned int marks[6]; + }; +}; + +struct radix_tree_preload { + local_lock_t lock; + unsigned int nr; + struct xa_node *nodes; +}; + +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0, + HRTIMER_MODE_REL = 1, + HRTIMER_MODE_PINNED = 2, + HRTIMER_MODE_SOFT = 4, + HRTIMER_MODE_HARD = 8, + HRTIMER_MODE_ABS_PINNED = 2, + HRTIMER_MODE_REL_PINNED = 3, + HRTIMER_MODE_ABS_SOFT = 4, + HRTIMER_MODE_REL_SOFT = 5, + HRTIMER_MODE_ABS_PINNED_SOFT = 6, + HRTIMER_MODE_REL_PINNED_SOFT = 7, + HRTIMER_MODE_ABS_HARD = 8, + HRTIMER_MODE_REL_HARD = 9, + HRTIMER_MODE_ABS_PINNED_HARD = 10, + HRTIMER_MODE_REL_PINNED_HARD = 11, +}; + +struct klist_node; + +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +}; + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + +struct subsys_private; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; + struct subsys_private *sp; +}; + +struct disk_stats { + u64 nsecs[4]; + long unsigned int sectors[4]; + long unsigned int ios[4]; + long unsigned int merges[4]; + long unsigned int io_ticks; + local_t in_flight[2]; + long: 32; +}; + +struct blkg_iostat { + u64 bytes[3]; + u64 ios[3]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkcg_gq *blkg; + struct llist_node lnode; + int lqueued; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +struct blkcg; + +struct blkg_policy_data; + +struct blkcg_gq { + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + struct blkcg_gq *parent; + struct percpu_ref refcnt; + bool online; + struct blkg_iostat_set *iostat_cpu; + long: 32; + struct blkg_iostat_set iostat; + struct blkg_policy_data *pd[6]; + spinlock_t async_bio_lock; + struct bio_list async_bios; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + struct callback_head callback_head; + long: 32; +}; + +enum { + BIO_PAGE_PINNED = 0, + BIO_CLONED = 1, + BIO_BOUNCED = 2, + BIO_QUIET = 3, + BIO_CHAIN = 4, + BIO_REFFED = 5, + BIO_BPS_THROTTLED = 6, + BIO_TRACE_COMPLETION = 7, + BIO_CGROUP_ACCT = 8, + BIO_QOS_THROTTLED = 9, + BIO_QOS_MERGED = 10, + BIO_REMAPPED = 11, + BIO_ZONE_WRITE_PLUGGING = 12, + BIO_EMULATES_ZONE_APPEND = 13, + BIO_FLAG_LAST = 14, +}; + +typedef __u32 blk_mq_req_flags_t; + +enum stat_group { + STAT_READ = 0, + STAT_WRITE = 1, + STAT_DISCARD = 2, + STAT_FLUSH = 3, + NR_STAT_GROUPS = 4, +}; + +struct sbitmap_word { + long unsigned int word; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int cleared; + raw_spinlock_t swap_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sbitmap { + unsigned int depth; + unsigned int shift; + unsigned int map_nr; + bool round_robin; + struct sbitmap_word *map; + unsigned int *alloc_hint; +}; + +struct sbq_wait_state { + wait_queue_head_t wait; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sbitmap_queue { + struct sbitmap sb; + unsigned int wake_batch; + atomic_t wake_index; + struct sbq_wait_state *ws; + atomic_t ws_active; + unsigned int min_shallow_depth; + atomic_t completion_cnt; + atomic_t wakeup_cnt; +}; + +typedef __u32 req_flags_t; + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +enum rq_end_io_ret { + RQ_END_IO_NONE = 0, + RQ_END_IO_FREE = 1, +}; + +typedef enum rq_end_io_ret rq_end_io_fn(struct request *, blk_status_t); + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + int tag; + int internal_tag; + unsigned int timeout; + unsigned int __data_len; + long: 32; + sector_t __sector; + struct bio *bio; + struct bio *biotail; + union { + struct list_head queuelist; + struct request *rq_next; + }; + struct block_device *part; + long: 32; + u64 alloc_time_ns; + u64 start_time_ns; + u64 io_start_time_ns; + short unsigned int wbt_flags; + short unsigned int stats_sectors; + short unsigned int nr_phys_segments; + short unsigned int nr_integrity_segments; + enum mq_rq_state state; + atomic_t ref; + long unsigned int deadline; + union { + struct hlist_node hash; + struct llist_node ipi_list; + }; + union { + struct rb_node rb_node; + struct bio_vec special_vec; + }; + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { + unsigned int seq; + rq_end_io_fn *saved_end_io; + } flush; + long: 32; + u64 fifo_time; + rq_end_io_fn *end_io; + void *end_io_data; +}; + +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + unsigned int active_queues; + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; + spinlock_t lock; +}; + +struct blk_flush_queue { + spinlock_t mq_flush_lock; + unsigned int flush_pending_idx: 1; + unsigned int flush_running_idx: 1; + blk_status_t rq_status; + long unsigned int flush_pending_since; + struct list_head flush_queue[2]; + long unsigned int flush_data_in_flight; + struct request *flush_rq; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +struct blk_mq_tag_set { + const struct blk_mq_ops *ops; + struct blk_mq_queue_map map[3]; + unsigned int nr_maps; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int cmd_size; + int numa_node; + unsigned int timeout; + unsigned int flags; + void *driver_data; + struct blk_mq_tags **tags; + struct blk_mq_tags *shared_tags; + struct mutex tag_list_lock; + struct list_head tag_list; + struct srcu_struct *srcu; +}; + +enum { + QUEUE_FLAG_DYING = 0, + QUEUE_FLAG_NOMERGES = 1, + QUEUE_FLAG_SAME_COMP = 2, + QUEUE_FLAG_FAIL_IO = 3, + QUEUE_FLAG_NOXMERGES = 4, + QUEUE_FLAG_SAME_FORCE = 5, + QUEUE_FLAG_INIT_DONE = 6, + QUEUE_FLAG_STATS = 7, + QUEUE_FLAG_REGISTERED = 8, + QUEUE_FLAG_QUIESCED = 9, + QUEUE_FLAG_RQ_ALLOC_TIME = 10, + QUEUE_FLAG_HCTX_ACTIVE = 11, + QUEUE_FLAG_SQ_SCHED = 12, + QUEUE_FLAG_MAX = 13, +}; + +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = 0, + RSEQ_EVENT_SIGNAL_BIT = 1, + RSEQ_EVENT_MIGRATE_BIT = 2, +}; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + long unsigned int state; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + long unsigned int flags; + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + void *driver_data; + struct sbitmap ctx_map; + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + short unsigned int type; + short unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + unsigned int numa_node; + unsigned int queue_num; + atomic_t nr_active; + struct hlist_node cpuhp_online; + struct hlist_node cpuhp_dead; + struct kobject kobj; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct list_head hctx_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT = 0, + HCTX_TYPE_READ = 1, + HCTX_TYPE_POLL = 2, + HCTX_MAX_TYPES = 3, +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +enum blkg_iostat_type { + BLKG_IOSTAT_READ = 0, + BLKG_IOSTAT_WRITE = 1, + BLKG_IOSTAT_DISCARD = 2, + BLKG_IOSTAT_NR = 3, +}; + +struct blkcg_policy_data; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + atomic_t congestion_count; + struct xarray blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + struct blkcg_policy_data *cpd[6]; + struct list_head all_blkcgs_node; + struct llist_head *lhead; + struct list_head cgwb_list; + long: 32; +}; + +struct blkg_policy_data { + struct blkcg_gq *blkg; + int plid; + bool online; +}; + +struct blkcg_policy_data { + struct blkcg *blkcg; + int plid; +}; + +typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); + +typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); + +typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(struct gendisk *, struct blkcg *, gfp_t); + +typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_stat_pd_fn(struct blkg_policy_data *, struct seq_file *); + +struct blkcg_policy { + int plid; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +struct blkg_conf_ctx { + char *input; + char *body; + struct block_device *bdev; + struct blkcg_gq *blkg; +}; + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ = 0, + BLKG_RWSTAT_WRITE = 1, + BLKG_RWSTAT_SYNC = 2, + BLKG_RWSTAT_ASYNC = 3, + BLKG_RWSTAT_DISCARD = 4, + BLKG_RWSTAT_NR = 5, + BLKG_RWSTAT_TOTAL = 5, +}; + +struct io_ring_ctx; + +struct io_wq; + +struct io_uring_task { + int cached_refs; + const struct io_ring_ctx *last; + struct task_struct *task; + struct io_wq *io_wq; + struct file *registered_rings[16]; + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_cancel; + atomic_t inflight_tracked; + struct percpu_counter inflight; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + struct llist_head task_list; + struct callback_head task_work; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; +}; + +typedef int __kernel_rwf_t; + +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + struct { + __u32 cmd_op; + __u32 __pad1; + }; + }; + union { + __u64 addr; + __u64 splice_off_in; + struct { + __u32 level; + __u32 optname; + }; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + __u32 hardlink_flags; + __u32 xattr_flags; + __u32 msg_ring_flags; + __u32 uring_cmd_flags; + __u32 waitid_flags; + __u32 futex_flags; + __u32 install_fd_flags; + __u32 nop_flags; + }; + __u64 user_data; + union { + __u16 buf_index; + __u16 buf_group; + }; + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; + __u32 optlen; + struct { + __u16 addr_len; + __u16 __pad3[1]; + }; + }; + union { + struct { + __u64 addr3; + __u64 __pad2[1]; + }; + __u64 optval; + __u8 cmd[0]; + }; +}; + +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; + __u64 big_cqe[0]; +}; + +struct epoll_event { + __poll_t events; + long: 32; + __u64 data; +}; + +enum task_work_notify_mode { + TWA_NONE = 0, + TWA_RESUME = 1, + TWA_SIGNAL = 2, + TWA_SIGNAL_NO_IPI = 3, + TWA_NMI_CURRENT = 4, + TWA_FLAGS = 65280, + TWAF_NO_ALLOC = 256, +}; + +enum io_uring_cmd_flags { + IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, + IO_URING_F_MULTISHOT = 4, + IO_URING_F_IOWQ = 8, + IO_URING_F_NONBLOCK = -2147483648, + IO_URING_F_SQE128 = 256, + IO_URING_F_CQE32 = 512, + IO_URING_F_IOPOLL = 1024, + IO_URING_F_CANCEL = 2048, + IO_URING_F_COMPAT = 4096, + IO_URING_F_TASK_DEAD = 8192, +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_wq_work { + struct io_wq_work_node list; + atomic_t flags; + int cancel_seq; +}; + +struct io_rsrc_node; + +struct io_rsrc_data { + unsigned int nr; + struct io_rsrc_node **nodes; +}; + +struct io_mapped_ubuf; + +struct io_rsrc_node { + unsigned char type; + int refs; + u64 tag; + union { + long unsigned int file_ptr; + struct io_mapped_ubuf *buf; + }; + long: 32; +}; + +struct io_file_table { + struct io_rsrc_data data; + long unsigned int *bitmap; + unsigned int alloc_hint; +}; + +struct io_hash_bucket { + struct hlist_head list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned int hash_bits; +}; + +struct io_mapped_region { + struct page **pages; + void *vmap_ptr; + size_t nr_pages; +}; + +struct io_kiocb; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + struct io_wq_work_node free_list; + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + bool plug_started; + bool need_plug; + bool cq_flush; + short unsigned int submit_nr; + long: 32; + struct blk_plug plug; +}; + +struct io_alloc_cache { + void **entries; + unsigned int nr_cached; + unsigned int max_cached; + size_t elem_size; +}; + +struct io_restriction { + long unsigned int register_op[2]; + long unsigned int sqe_op[2]; + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_rings; + +struct io_ev_fd; + +struct io_sq_data; + +struct io_wq_hash; + +struct io_ring_ctx { + struct { + unsigned int flags; + unsigned int drain_next: 1; + unsigned int restricted: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + unsigned int has_evfd: 1; + unsigned int task_complete: 1; + unsigned int lockless_cq: 1; + unsigned int syscall_iopoll: 1; + unsigned int poll_activated: 1; + unsigned int drain_disabled: 1; + unsigned int compat: 1; + unsigned int iowq_limits_set: 1; + struct task_struct *submitter_task; + struct io_rings *rings; + struct percpu_ref refs; + clockid_t clockid; + enum tk_offsets clock_offset; + enum task_work_notify_mode notify_method; + unsigned int sq_thread_idle; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct mutex uring_lock; + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned int cached_sq_head; + unsigned int sq_entries; + atomic_t cancel_seq; + bool poll_multi_queue; + struct io_wq_work_list iopoll_list; + struct io_file_table file_table; + struct io_rsrc_data buf_table; + long: 32; + struct io_submit_state submit_state; + struct xarray io_bl_xa; + struct io_hash_table cancel_table; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + struct io_alloc_cache rw_cache; + struct io_alloc_cache uring_cache; + struct hlist_head cancelable_uring_cmd; + u64 hybrid_poll_time; + long: 32; + long: 32; + }; + struct { + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + unsigned int cached_cq_tail; + unsigned int cq_entries; + struct io_ev_fd *io_ev_fd; + unsigned int cq_extra; + void *cq_wait_arg; + size_t cq_wait_size; + }; + struct { + struct llist_head work_llist; + struct llist_head retry_llist; + long unsigned int check_cq; + atomic_t cq_wait_nr; + atomic_t cq_timeouts; + struct wait_queue_head cq_wait; + }; + struct { + spinlock_t timeout_lock; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned int cq_last_tm_flush; + long: 32; + long: 32; + }; + spinlock_t completion_lock; + struct list_head io_buffers_comp; + struct list_head cq_overflow_list; + struct hlist_head waitid_list; + struct hlist_head futex_list; + struct io_alloc_cache futex_cache; + const struct cred *sq_creds; + struct io_sq_data *sq_data; + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + unsigned int file_alloc_start; + unsigned int file_alloc_end; + struct list_head io_buffers_cache; + struct wait_queue_head poll_wq; + struct io_restriction restrictions; + u32 pers_next; + struct xarray personalities; + struct io_wq_hash *hash_map; + struct user_struct *user; + struct mm_struct *mm_account; + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + u32 iowq_limits[2]; + struct callback_head poll_wq_task_work; + struct list_head defer_list; + struct io_alloc_cache msg_cache; + spinlock_t msg_lock; + struct list_head napi_list; + spinlock_t napi_lock; + long: 32; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; + u8 napi_track_mode; + struct hlist_head napi_ht[16]; + unsigned int evfd_last_cq_tail; + struct mutex resize_lock; + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_mapped_region param_region; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct io_uring { + u32 head; + u32 tail; +}; + +struct io_rings { + struct io_uring sq; + struct io_uring cq; + u32 sq_ring_mask; + u32 cq_ring_mask; + u32 sq_ring_entries; + u32 cq_ring_entries; + u32 sq_dropped; + atomic_t sq_flags; + u32 cq_flags; + u32 cq_overflow; + long: 32; + long: 32; + long: 32; + long: 32; + struct io_uring_cqe cqes[0]; +}; + +struct io_cmd_data { + struct file *file; + __u8 data[56]; +}; + +typedef u64 io_req_flags_t; + +struct io_cqe { + __u64 user_data; + __s32 res; + union { + __u32 flags; + int fd; + }; +}; + +struct io_tw_state; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *, struct io_tw_state *); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_buffer; + +struct io_buffer_list; + +struct async_poll; + +struct io_kiocb { + union { + struct file *file; + struct io_cmd_data cmd; + }; + u8 opcode; + u8 iopoll_completed; + u16 buf_index; + unsigned int nr_tw; + long: 32; + io_req_flags_t flags; + struct io_cqe cqe; + struct io_ring_ctx *ctx; + struct io_uring_task *tctx; + union { + struct io_buffer *kbuf; + struct io_buffer_list *buf_list; + struct io_rsrc_node *buf_node; + }; + union { + struct io_wq_work_node comp_list; + __poll_t apoll_events; + }; + struct io_rsrc_node *file_node; + atomic_t refs; + bool cancel_seq_set; + struct io_task_work io_task_work; + long: 32; + union { + struct hlist_node hash_node; + u64 iopoll_start; + }; + struct async_poll *apoll; + void *async_data; + atomic_t poll_refs; + struct io_kiocb *link; + const struct cred *creds; + struct io_wq_work work; + struct { + u64 extra1; + u64 extra2; + } big_cqe; +}; + +struct io_wq_hash { + refcount_t refs; + long unsigned int map; + struct wait_queue_head wait; +}; + +struct io_tw_state {}; + +enum { + REQ_F_FIXED_FILE = 1ULL, + REQ_F_IO_DRAIN = 2ULL, + REQ_F_LINK = 4ULL, + REQ_F_HARDLINK = 8ULL, + REQ_F_FORCE_ASYNC = 16ULL, + REQ_F_BUFFER_SELECT = 32ULL, + REQ_F_CQE_SKIP = 64ULL, + REQ_F_FAIL = 256ULL, + REQ_F_INFLIGHT = 512ULL, + REQ_F_CUR_POS = 1024ULL, + REQ_F_NOWAIT = 2048ULL, + REQ_F_LINK_TIMEOUT = 4096ULL, + REQ_F_NEED_CLEANUP = 8192ULL, + REQ_F_POLLED = 16384ULL, + REQ_F_IOPOLL_STATE = 32768ULL, + REQ_F_BUFFER_SELECTED = 65536ULL, + REQ_F_BUFFER_RING = 131072ULL, + REQ_F_REISSUE = 262144ULL, + REQ_F_SUPPORT_NOWAIT = 268435456ULL, + REQ_F_ISREG = 536870912ULL, + REQ_F_CREDS = 524288ULL, + REQ_F_REFCOUNT = 1048576ULL, + REQ_F_ARM_LTIMEOUT = 2097152ULL, + REQ_F_ASYNC_DATA = 4194304ULL, + REQ_F_SKIP_LINK_CQES = 8388608ULL, + REQ_F_SINGLE_POLL = 16777216ULL, + REQ_F_DOUBLE_POLL = 33554432ULL, + REQ_F_APOLL_MULTISHOT = 67108864ULL, + REQ_F_CLEAR_POLLIN = 134217728ULL, + REQ_F_POLL_NO_LAZY = 1073741824ULL, + REQ_F_CAN_POLL = 2147483648ULL, + REQ_F_BL_EMPTY = 4294967296ULL, + REQ_F_BL_NO_RECYCLE = 8589934592ULL, + REQ_F_BUFFERS_COMMIT = 17179869184ULL, + REQ_F_BUF_NODE = 34359738368ULL, +}; + +struct io_mapped_ubuf { + u64 ubuf; + unsigned int len; + unsigned int nr_bvecs; + unsigned int folio_shift; + refcount_t refs; + long unsigned int acct_pages; + struct bio_vec bvec[0]; + long: 32; +}; + +enum { + IOU_OK = 0, + IOU_ISSUE_SKIP_COMPLETE = -529, + IOU_REQUEUE = -3072, + IOU_STOP_MULTISHOT = -125, +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +typedef long unsigned int mpi_limb_t; + +typedef long int mpi_limb_signed_t; + +struct gcry_mpi { + int alloced; + int nlimbs; + int nbits; + int sign; + unsigned int flags; + mpi_limb_t *d; +}; + +typedef struct gcry_mpi *MPI; + +typedef mpi_limb_t *mpi_ptr_t; + +typedef int mpi_size_t; + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + mpi_size_t tspace_size; + mpi_ptr_t tp; + mpi_size_t tp_size; +}; + +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + void (*xor_syndrome)(int, int, int, size_t, void **); + int (*valid)(); + const char *name; + int priority; +}; + +struct raid6_recov_calls { + void (*data2)(int, size_t, int, int, void **); + void (*datap)(int, size_t, int, void **); + int (*valid)(); + const char *name; + int priority; +}; + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *, struct device_attribute *, char *); + ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); +}; + +struct arch_msi_msg_addr_lo { + u32 address_lo; +}; + +typedef struct arch_msi_msg_addr_lo arch_msi_msg_addr_lo_t; + +struct arch_msi_msg_addr_hi { + u32 address_hi; +}; + +typedef struct arch_msi_msg_addr_hi arch_msi_msg_addr_hi_t; + +struct arch_msi_msg_data { + u32 data; +}; + +typedef struct arch_msi_msg_data arch_msi_msg_data_t; + +struct msi_msg { + union { + u32 address_lo; + arch_msi_msg_addr_lo_t arch_addr_lo; + }; + union { + u32 address_hi; + arch_msi_msg_addr_hi_t arch_addr_hi; + }; + union { + u32 data; + arch_msi_msg_data_t arch_data; + }; +}; + +struct pci_msi_desc { + union { + u32 msi_mask; + u32 msix_ctrl; + }; + struct { + u8 is_msix: 1; + u8 multiple: 3; + u8 multi_cap: 3; + u8 can_mask: 1; + u8 is_64: 1; + u8 is_virtual: 1; + unsigned int default_irq; + } msi_attrib; + union { + u8 mask_pos; + void *mask_base; + }; +}; + +union msi_domain_cookie { + u64 value; + void *ptr; + void *iobase; +}; + +union msi_instance_cookie { + u64 value; + void *ptr; +}; + +struct msi_desc_data { + union msi_domain_cookie dcookie; + union msi_instance_cookie icookie; +}; + +struct msi_desc { + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct irq_affinity_desc *affinity; + struct device_attribute *sysfs_attrs; + void (*write_msi_msg)(struct msi_desc *, void *); + void *write_msi_msg_data; + u16 msi_index; + long: 32; + union { + struct pci_msi_desc pci; + struct msi_desc_data data; + }; +}; + +enum msi_domain_ids { + MSI_DEFAULT_DOMAIN = 0, + MSI_MAX_DEVICE_IRQDOMAINS = 1, +}; + +struct msi_map { + int index; + int virq; +}; + +struct msix_entry { + u32 vector; + u16 entry; +}; + +enum { + MSI_FLAG_USE_DEF_DOM_OPS = 1, + MSI_FLAG_USE_DEF_CHIP_OPS = 2, + MSI_FLAG_ACTIVATE_EARLY = 4, + MSI_FLAG_MUST_REACTIVATE = 8, + MSI_FLAG_DEV_SYSFS = 16, + MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = 32, + MSI_FLAG_FREE_MSI_DESCS = 64, + MSI_FLAG_USE_DEV_FWNODE = 128, + MSI_FLAG_PARENT_PM_DEV = 256, + MSI_FLAG_PCI_MSI_MASK_PARENT = 512, + MSI_GENERIC_FLAGS_MASK = 65535, + MSI_DOMAIN_FLAGS_MASK = 4294901760, + MSI_FLAG_MULTI_PCI_MSI = 65536, + MSI_FLAG_PCI_MSIX = 131072, + MSI_FLAG_LEVEL_CAPABLE = 262144, + MSI_FLAG_MSIX_CONTIGUOUS = 524288, + MSI_FLAG_PCI_MSIX_ALLOC_DYN = 1048576, + MSI_FLAG_NO_AFFINITY = 2097152, +}; + +enum support_mode { + ALLOW_LEGACY = 0, + DENY_LEGACY = 1, +}; + +enum vesa_blank_mode { + VESA_NO_BLANKING = 0, + VESA_VSYNC_SUSPEND = 1, + VESA_HSYNC_SUSPEND = 2, + VESA_POWERDOWN = 3, + VESA_BLANK_MAX = 3, +}; + +typedef unsigned int uint; + +typedef struct mutex *class_mutex_t; + +enum kobject_action { + KOBJ_ADD = 0, + KOBJ_REMOVE = 1, + KOBJ_CHANGE = 2, + KOBJ_MOVE = 3, + KOBJ_ONLINE = 4, + KOBJ_OFFLINE = 5, + KOBJ_BIND = 6, + KOBJ_UNBIND = 7, +}; + +typedef int (*device_match_t)(struct device *, const void *); + +struct serial_icounter_struct { + int cts; + int dsr; + int rng; + int dcd; + int rx; + int tx; + int frame; + int overrun; + int parity; + int brk; + int buf_overrun; + int reserved[9]; +}; + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char[1]; + int hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + long unsigned int iomap_base; +}; + +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +enum cons_flags { + CON_PRINTBUFFER = 1, + CON_CONSDEV = 2, + CON_ENABLED = 4, + CON_BOOT = 8, + CON_ANYTIME = 16, + CON_BRL = 32, + CON_EXTENDED = 64, + CON_SUSPENDED = 128, + CON_NBCON = 256, +}; + +enum nbcon_prio { + NBCON_PRIO_NONE = 0, + NBCON_PRIO_NORMAL = 1, + NBCON_PRIO_EMERGENCY = 2, + NBCON_PRIO_PANIC = 3, + NBCON_PRIO_MAX = 4, +}; + +struct console; + +struct printk_buffers; + +struct nbcon_context { + struct console *console; + unsigned int spinwait_max_us; + enum nbcon_prio prio; + unsigned int allow_unsafe_takeover: 1; + unsigned int backlog: 1; + struct printk_buffers *pbufs; + long: 32; + u64 seq; +}; + +struct nbcon_write_context; + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned int); + int (*read)(struct console *, char *, unsigned int); + struct tty_driver * (*device)(struct console *, int *); + void (*unblank)(); + int (*setup)(struct console *, char *); + int (*exit)(struct console *); + int (*match)(struct console *, char *, int, char *); + short int flags; + short int index; + int cflag; + uint ispeed; + uint ospeed; + long: 32; + u64 seq; + long unsigned int dropped; + void *data; + struct hlist_node node; + void (*write_atomic)(struct console *, struct nbcon_write_context *); + void (*write_thread)(struct console *, struct nbcon_write_context *); + void (*device_lock)(struct console *, long unsigned int *); + void (*device_unlock)(struct console *, long unsigned int); + atomic_t nbcon_state; + atomic_long_t nbcon_seq; + struct nbcon_context nbcon_device_ctxt; + atomic_long_t nbcon_prev_seq; + struct printk_buffers *pbufs; + struct task_struct *kthread; + struct rcuwait rcuwait; + struct irq_work irq_work; +}; + +struct nbcon_write_context { + struct nbcon_context ctxt; + char *outbuf; + unsigned int len; + bool unsafe_takeover; + long: 32; +}; + +typedef struct poll_table_struct poll_table; + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + const struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead: 1; +}; + +struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; + kuid_t uid; + kgid_t gid; + struct device *dev; +}; + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +struct execute_work { + struct work_struct work; +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_COMPLETED = 16384, + VM_FAULT_HINDEX_MASK = 983040, +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +}; + +typedef s32 compat_int_t; + +typedef u32 compat_uint_t; + +typedef u32 compat_uptr_t; + +struct class_interface { + struct list_head node; + const struct class *class; + int (*add_dev)(struct device *); + void (*remove_dev)(struct device *); +}; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct rchan; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + long unsigned int *sequence; + unsigned char *msg_data; + u16 act_mask; + long: 32; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct list_head running_list; + atomic_t dropped; +}; + +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; + +struct rq_map_data { + struct page **pages; + long unsigned int offset; + short unsigned int page_order; + short unsigned int nr_entries; + bool null_mapped; + bool from_user; +}; + +struct rchan_buf { + void *start; + void *data; + size_t offset; + size_t subbufs_produced; + size_t subbufs_consumed; + struct rchan *chan; + wait_queue_head_t read_wait; + struct irq_work wakeup_work; + struct dentry *dentry; + struct kref kref; + struct page **page_array; + unsigned int page_count; + unsigned int finalized; + size_t *padding; + size_t prev_padding; + size_t bytes_consumed; + size_t early_bytes; + unsigned int cpu; + long: 32; +}; + +struct rchan_callbacks; + +struct rchan { + u32 version; + size_t subbuf_size; + size_t n_subbufs; + size_t alloc_size; + const struct rchan_callbacks *cb; + struct kref kref; + void *private_data; + size_t last_toobig; + struct rchan_buf **buf; + int is_global; + struct list_head list; + struct dentry *parent; + int has_base_filename; + char base_filename[255]; +}; + +struct rchan_callbacks { + int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); + struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); + int (*remove_buf_file)(struct dentry *); +}; + +enum blktrace_act { + __BLK_TA_QUEUE = 1, + __BLK_TA_BACKMERGE = 2, + __BLK_TA_FRONTMERGE = 3, + __BLK_TA_GETRQ = 4, + __BLK_TA_SLEEPRQ = 5, + __BLK_TA_REQUEUE = 6, + __BLK_TA_ISSUE = 7, + __BLK_TA_COMPLETE = 8, + __BLK_TA_PLUG = 9, + __BLK_TA_UNPLUG_IO = 10, + __BLK_TA_UNPLUG_TIMER = 11, + __BLK_TA_INSERT = 12, + __BLK_TA_SPLIT = 13, + __BLK_TA_BOUNCE = 14, + __BLK_TA_REMAP = 15, + __BLK_TA_ABORT = 16, + __BLK_TA_DRV_DATA = 17, + __BLK_TA_CGROUP = 256, +}; + +struct scsi_sense_hdr { + u8 response_code; + u8 sense_key; + u8 asc; + u8 ascq; + u8 byte4; + u8 byte5; + u8 byte6; + u8 additional_length; +}; + +enum scsi_msg_byte { + COMMAND_COMPLETE = 0, + EXTENDED_MESSAGE = 1, + SAVE_POINTERS = 2, + RESTORE_POINTERS = 3, + DISCONNECT = 4, + INITIATOR_ERROR = 5, + ABORT_TASK_SET = 6, + MESSAGE_REJECT = 7, + NOP = 8, + MSG_PARITY_ERROR = 9, + LINKED_CMD_COMPLETE = 10, + LINKED_FLG_CMD_COMPLETE = 11, + TARGET_RESET = 12, + ABORT_TASK = 13, + CLEAR_TASK_SET = 14, + INITIATE_RECOVERY = 15, + RELEASE_RECOVERY = 16, + TERMINATE_IO_PROC = 17, + CLEAR_ACA = 22, + LOGICAL_UNIT_RESET = 23, + SIMPLE_QUEUE_TAG = 32, + HEAD_OF_QUEUE_TAG = 33, + ORDERED_QUEUE_TAG = 34, + IGNORE_WIDE_RESIDUE = 35, + ACA = 36, + QAS_REQUEST = 85, + BUS_DEVICE_RESET = 12, + ABORT = 6, +}; + +enum scsi_host_status { + DID_OK = 0, + DID_NO_CONNECT = 1, + DID_BUS_BUSY = 2, + DID_TIME_OUT = 3, + DID_BAD_TARGET = 4, + DID_ABORT = 5, + DID_PARITY = 6, + DID_ERROR = 7, + DID_RESET = 8, + DID_BAD_INTR = 9, + DID_PASSTHROUGH = 10, + DID_SOFT_ERROR = 11, + DID_IMM_RETRY = 12, + DID_REQUEUE = 13, + DID_TRANSPORT_DISRUPTED = 14, + DID_TRANSPORT_FAILFAST = 15, + DID_TRANSPORT_MARGINAL = 20, +}; + +typedef __u64 blist_flags_t; + +enum scsi_device_state { + SDEV_CREATED = 1, + SDEV_RUNNING = 2, + SDEV_CANCEL = 3, + SDEV_DEL = 4, + SDEV_QUIESCE = 5, + SDEV_OFFLINE = 6, + SDEV_TRANSPORT_OFFLINE = 7, + SDEV_BLOCK = 8, + SDEV_CREATED_BLOCK = 9, +}; + +enum scsi_device_event { + SDEV_EVT_MEDIA_CHANGE = 1, + SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, + SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, + SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, + SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, + SDEV_EVT_LUN_CHANGE_REPORTED = 6, + SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, + SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, + SDEV_EVT_FIRST = 1, + SDEV_EVT_LAST = 8, + SDEV_EVT_MAXBITS = 9, +}; + +struct scsi_vpd { + struct callback_head rcu; + int len; + unsigned char data[0]; +}; + +struct Scsi_Host; + +struct scsi_target; + +struct scsi_device_handler; + +struct bsg_device; + +struct scsi_device { + struct Scsi_Host *host; + struct request_queue *request_queue; + struct list_head siblings; + struct list_head same_target_siblings; + struct sbitmap budget_map; + atomic_t device_blocked; + atomic_t restarts; + spinlock_t list_lock; + struct list_head starved_entry; + short unsigned int queue_depth; + short unsigned int max_queue_depth; + short unsigned int last_queue_full_depth; + short unsigned int last_queue_full_count; + long unsigned int last_queue_full_time; + long unsigned int queue_ramp_up_period; + long unsigned int last_queue_ramp_up; + unsigned int id; + unsigned int channel; + u64 lun; + unsigned int manufacturer; + unsigned int sector_size; + void *hostdata; + unsigned char type; + char scsi_level; + char inq_periph_qual; + struct mutex inquiry_mutex; + unsigned char inquiry_len; + unsigned char *inquiry; + const char *vendor; + const char *model; + const char *rev; + struct scsi_vpd *vpd_pg0; + struct scsi_vpd *vpd_pg83; + struct scsi_vpd *vpd_pg80; + struct scsi_vpd *vpd_pg89; + struct scsi_vpd *vpd_pgb0; + struct scsi_vpd *vpd_pgb1; + struct scsi_vpd *vpd_pgb2; + struct scsi_vpd *vpd_pgb7; + struct scsi_target *sdev_target; + long: 32; + blist_flags_t sdev_bflags; + unsigned int eh_timeout; + unsigned int manage_system_start_stop: 1; + unsigned int manage_runtime_start_stop: 1; + unsigned int manage_shutdown: 1; + unsigned int force_runtime_start_on_system_start: 1; + unsigned int removable: 1; + unsigned int changed: 1; + unsigned int busy: 1; + unsigned int lockable: 1; + unsigned int locked: 1; + unsigned int borken: 1; + unsigned int disconnect: 1; + unsigned int soft_reset: 1; + unsigned int sdtr: 1; + unsigned int wdtr: 1; + unsigned int ppr: 1; + unsigned int tagged_supported: 1; + unsigned int simple_tags: 1; + unsigned int was_reset: 1; + unsigned int expecting_cc_ua: 1; + unsigned int use_10_for_rw: 1; + unsigned int use_10_for_ms: 1; + unsigned int set_dbd_for_ms: 1; + unsigned int read_before_ms: 1; + unsigned int no_report_opcodes: 1; + unsigned int no_write_same: 1; + unsigned int use_16_for_rw: 1; + unsigned int use_16_for_sync: 1; + unsigned int skip_ms_page_8: 1; + unsigned int skip_ms_page_3f: 1; + unsigned int skip_vpd_pages: 1; + unsigned int try_vpd_pages: 1; + unsigned int use_192_bytes_for_3f: 1; + unsigned int no_start_on_add: 1; + unsigned int allow_restart: 1; + unsigned int start_stop_pwr_cond: 1; + unsigned int no_uld_attach: 1; + unsigned int select_no_atn: 1; + unsigned int fix_capacity: 1; + unsigned int guess_capacity: 1; + unsigned int retry_hwerror: 1; + unsigned int last_sector_bug: 1; + unsigned int no_read_disc_info: 1; + unsigned int no_read_capacity_16: 1; + unsigned int try_rc_10_first: 1; + unsigned int security_supported: 1; + unsigned int is_visible: 1; + unsigned int wce_default_on: 1; + unsigned int no_dif: 1; + unsigned int broken_fua: 1; + unsigned int lun_in_cdb: 1; + unsigned int unmap_limit_for_ws: 1; + unsigned int rpm_autosuspend: 1; + unsigned int ignore_media_change: 1; + unsigned int silence_suspend: 1; + unsigned int no_vpd_size: 1; + unsigned int cdl_supported: 1; + unsigned int cdl_enable: 1; + unsigned int queue_stopped; + bool offline_already; + atomic_t disk_events_disable_depth; + long unsigned int supported_events[1]; + long unsigned int pending_events[1]; + struct list_head event_list; + struct work_struct event_work; + unsigned int max_device_blocked; + atomic_t iorequest_cnt; + atomic_t iodone_cnt; + atomic_t ioerr_cnt; + atomic_t iotmo_cnt; + long: 32; + struct device sdev_gendev; + struct device sdev_dev; + struct work_struct requeue_work; + struct scsi_device_handler *handler; + void *handler_data; + size_t dma_drain_len; + void *dma_drain_buf; + unsigned int sg_timeout; + unsigned int sg_reserved_size; + struct bsg_device *bsg_dev; + unsigned char access_state; + struct mutex state_mutex; + enum scsi_device_state sdev_state; + struct task_struct *quiesced_by; + long unsigned int sdev_data[0]; + long: 32; +}; + +enum scsi_host_state { + SHOST_CREATED = 1, + SHOST_RUNNING = 2, + SHOST_CANCEL = 3, + SHOST_DEL = 4, + SHOST_RECOVERY = 5, + SHOST_CANCEL_RECOVERY = 6, + SHOST_DEL_RECOVERY = 7, +}; + +struct scsi_host_template; + +struct scsi_transport_template; + +struct Scsi_Host { + struct list_head __devices; + struct list_head __targets; + struct list_head starved_list; + spinlock_t default_lock; + spinlock_t *host_lock; + struct mutex scan_mutex; + struct list_head eh_abort_list; + struct list_head eh_cmd_q; + struct task_struct *ehandler; + struct completion *eh_action; + wait_queue_head_t host_wait; + const struct scsi_host_template *hostt; + struct scsi_transport_template *transportt; + struct kref tagset_refcnt; + struct completion tagset_freed; + struct blk_mq_tag_set tag_set; + atomic_t host_blocked; + unsigned int host_failed; + unsigned int host_eh_scheduled; + unsigned int host_no; + int eh_deadline; + long unsigned int last_reset; + unsigned int max_channel; + unsigned int max_id; + u64 max_lun; + unsigned int unique_id; + short unsigned int max_cmd_len; + int this_id; + int can_queue; + short int cmd_per_lun; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int opt_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + unsigned int nr_hw_queues; + unsigned int nr_maps; + unsigned int active_mode: 2; + unsigned int host_self_blocked: 1; + unsigned int reverse_ordering: 1; + unsigned int tmf_in_progress: 1; + unsigned int async_scan: 1; + unsigned int eh_noresume: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int short_inquiry: 1; + unsigned int no_scsi2_lun_in_cdb: 1; + unsigned int no_highmem: 1; + struct workqueue_struct *work_q; + struct workqueue_struct *tmf_work_q; + unsigned int max_host_blocked; + unsigned int prot_capabilities; + unsigned char prot_guard_type; + long unsigned int base; + long unsigned int io_port; + unsigned char n_io_port; + unsigned char dma_channel; + unsigned int irq; + enum scsi_host_state shost_state; + long: 32; + struct device shost_gendev; + struct device shost_dev; + void *shost_data; + struct device *dma_dev; + int rpm_autosuspend_delay; + long unsigned int hostdata[0]; + long: 32; +}; + +enum scsi_target_state { + STARGET_CREATED = 1, + STARGET_RUNNING = 2, + STARGET_REMOVE = 3, + STARGET_CREATED_REMOVE = 4, + STARGET_DEL = 5, +}; + +struct scsi_target { + struct scsi_device *starget_sdev_user; + struct list_head siblings; + struct list_head devices; + long: 32; + struct device dev; + struct kref reap_ref; + unsigned int channel; + unsigned int id; + unsigned int create: 1; + unsigned int single_lun: 1; + unsigned int pdt_1f_for_no_lun: 1; + unsigned int no_report_luns: 1; + unsigned int expecting_lun_change: 1; + atomic_t target_busy; + atomic_t target_blocked; + unsigned int can_queue; + unsigned int max_target_blocked; + char scsi_level; + enum scsi_target_state state; + void *hostdata; + long unsigned int starget_data[0]; + long: 32; +}; + +struct scsi_data_buffer { + struct sg_table table; + unsigned int length; +}; + +enum scsi_cmnd_submitter { + SUBMITTED_BY_BLOCK_LAYER = 0, + SUBMITTED_BY_SCSI_ERROR_HANDLER = 1, + SUBMITTED_BY_SCSI_RESET_IOCTL = 2, +} __attribute__((mode(byte))); + +struct scsi_cmnd { + struct scsi_device *device; + struct list_head eh_entry; + struct delayed_work abort_work; + struct callback_head rcu; + int eh_eflags; + int budget_token; + long unsigned int jiffies_at_alloc; + int retries; + int allowed; + unsigned char prot_op; + unsigned char prot_type; + unsigned char prot_flags; + enum scsi_cmnd_submitter submitter; + short unsigned int cmd_len; + enum dma_data_direction sc_data_direction; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scsi_data_buffer *prot_sdb; + unsigned int underflow; + unsigned int transfersize; + unsigned int resid_len; + unsigned int sense_len; + unsigned char *sense_buffer; + int flags; + long unsigned int state; + unsigned int extra_len; + unsigned char *host_scribble; + int result; +}; + +enum scsi_timeout_action { + SCSI_EH_DONE = 0, + SCSI_EH_RESET_TIMER = 1, + SCSI_EH_NOT_HANDLED = 2, +}; + +struct scsi_host_template { + unsigned int cmd_size; + int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); + void (*commit_rqs)(struct Scsi_Host *, u16); + struct module *module; + const char *name; + const char * (*info)(struct Scsi_Host *); + int (*ioctl)(struct scsi_device *, unsigned int, void *); + int (*init_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*exit_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*eh_abort_handler)(struct scsi_cmnd *); + int (*eh_device_reset_handler)(struct scsi_cmnd *); + int (*eh_target_reset_handler)(struct scsi_cmnd *); + int (*eh_bus_reset_handler)(struct scsi_cmnd *); + int (*eh_host_reset_handler)(struct scsi_cmnd *); + int (*slave_alloc)(struct scsi_device *); + int (*device_configure)(struct scsi_device *, struct queue_limits *); + int (*slave_configure)(struct scsi_device *); + void (*slave_destroy)(struct scsi_device *); + int (*target_alloc)(struct scsi_target *); + void (*target_destroy)(struct scsi_target *); + int (*scan_finished)(struct Scsi_Host *, long unsigned int); + void (*scan_start)(struct Scsi_Host *); + int (*change_queue_depth)(struct scsi_device *, int); + void (*map_queues)(struct Scsi_Host *); + int (*mq_poll)(struct Scsi_Host *, unsigned int); + bool (*dma_need_drain)(struct request *); + int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *); + void (*unlock_native_capacity)(struct scsi_device *); + int (*show_info)(struct seq_file *, struct Scsi_Host *); + int (*write_info)(struct Scsi_Host *, char *, int); + enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); + bool (*eh_should_retry_cmd)(struct scsi_cmnd *); + int (*host_reset)(struct Scsi_Host *, int); + const char *proc_name; + int can_queue; + int this_id; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int dma_alignment; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + short int cmd_per_lun; + int tag_alloc_policy; + unsigned int track_queue_depth: 1; + unsigned int supported_mode: 2; + unsigned int emulated: 1; + unsigned int skip_settle_delay: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int queuecommand_may_block: 1; + unsigned int max_host_blocked; + const struct attribute_group **shost_groups; + const struct attribute_group **sdev_groups; + u64 vendor_id; +}; + +struct sg_io_hdr { + int interface_id; + int dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + unsigned int dxfer_len; + void *dxferp; + unsigned char *cmdp; + void *sbp; + unsigned int timeout; + unsigned int flags; + int pack_id; + void *usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + int resid; + unsigned int duration; + unsigned int info; +}; + +typedef struct sg_io_hdr sg_io_hdr_t; + +struct compat_sg_io_hdr { + compat_int_t interface_id; + compat_int_t dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + compat_uint_t dxfer_len; + compat_uint_t dxferp; + compat_uptr_t cmdp; + compat_uptr_t sbp; + compat_uint_t timeout; + compat_uint_t flags; + compat_int_t pack_id; + compat_uptr_t usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + compat_int_t resid; + compat_uint_t duration; + compat_uint_t info; +}; + +struct sg_scsi_id { + int host_no; + int channel; + int scsi_id; + int lun; + int scsi_type; + short int h_cmd_per_lun; + short int d_queue_depth; + int unused[2]; +}; + +typedef struct sg_scsi_id sg_scsi_id_t; + +struct sg_req_info { + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + void *usr_ptr; + unsigned int duration; + int unused; +}; + +typedef struct sg_req_info sg_req_info_t; + +struct sg_header { + int pack_len; + int reply_len; + int pack_id; + int result; + unsigned int twelve_byte: 1; + unsigned int target_status: 5; + unsigned int host_status: 8; + unsigned int driver_status: 8; + unsigned int other_flags: 10; + unsigned char sense_buffer[16]; +}; + +struct sg_scatter_hold { + short unsigned int k_use_sg; + unsigned int sglist_len; + unsigned int bufflen; + struct page **pages; + int page_order; + char dio_in_use; + unsigned char cmd_opcode; +}; + +typedef struct sg_scatter_hold Sg_scatter_hold; + +struct sg_fd; + +struct sg_request { + struct list_head entry; + struct sg_fd *parentfp; + Sg_scatter_hold data; + sg_io_hdr_t header; + unsigned char sense_b[96]; + char res_used; + char orphan; + char sg_io_owned; + char done; + struct request *rq; + struct bio *bio; + struct execute_work ew; +}; + +typedef struct sg_request Sg_request; + +struct sg_device; + +struct sg_fd { + struct list_head sfd_siblings; + struct sg_device *parentdp; + wait_queue_head_t read_wait; + rwlock_t rq_list_lock; + struct mutex f_mutex; + int timeout; + int timeout_user; + Sg_scatter_hold reserve; + struct list_head rq_list; + struct fasync_struct *async_qp; + Sg_request req_arr[16]; + char force_packid; + char cmd_q; + unsigned char next_cmd_len; + char keep_orphan; + char mmap_called; + char res_in_use; + struct kref f_ref; + struct execute_work ew; +}; + +struct sg_device { + struct scsi_device *device; + wait_queue_head_t open_wait; + struct mutex open_rel_lock; + int sg_tablesize; + u32 index; + struct list_head sfds; + rwlock_t sfd_lock; + atomic_t detaching; + bool exclude; + int open_cnt; + char sgdebug; + char name[32]; + struct cdev *cdev; + struct kref d_ref; +}; + +typedef struct sg_fd Sg_fd; + +typedef struct sg_device Sg_device; + +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE = 1, + DL_STATE_CONSUMER_PROBE = 2, + DL_STATE_ACTIVE = 3, + DL_STATE_SUPPLIER_UNBIND = 4, +}; + +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + struct work_struct rm_work; + bool supplier_preactivated; + long: 32; +}; + +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; + +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, + ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, + ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, + __ETHTOOL_LINK_MODE_MASK_NBITS = 103, +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF = 0, + HWTSTAMP_TX_ON = 1, + HWTSTAMP_TX_ONESTEP_SYNC = 2, + HWTSTAMP_TX_ONESTEP_P2P = 3, + __HWTSTAMP_TX_CNT = 4, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE = 0, + HWTSTAMP_FILTER_ALL = 1, + HWTSTAMP_FILTER_SOME = 2, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, + HWTSTAMP_FILTER_PTP_V2_EVENT = 12, + HWTSTAMP_FILTER_PTP_V2_SYNC = 13, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, + HWTSTAMP_FILTER_NTP_ALL = 15, + __HWTSTAMP_FILTER_CNT = 16, +}; + +struct kernel_ethtool_ts_info { + u32 cmd; + u32 so_timestamping; + int phc_index; + enum hwtstamp_tx_types tx_types; + enum hwtstamp_rx_filters rx_filters; +}; + +struct gpio_desc; + +struct reset_control; + +struct mii_bus; + +struct mdio_device { + struct device dev; + struct mii_bus *bus; + char modalias[32]; + int (*bus_match)(struct device *, const struct device_driver *); + void (*device_free)(struct mdio_device *); + void (*device_remove)(struct mdio_device *); + int addr; + int flags; + int reset_state; + struct gpio_desc *reset_gpio; + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; + long: 32; +}; + +struct phy_c45_device_ids { + u32 devices_in_package; + u32 mmds_present; + u32 device_ids[32]; +}; + +enum phy_state { + PHY_DOWN = 0, + PHY_READY = 1, + PHY_HALTED = 2, + PHY_ERROR = 3, + PHY_UP = 4, + PHY_RUNNING = 5, + PHY_NOLINK = 6, + PHY_CABLETEST = 7, +}; + +typedef enum { + PHY_INTERFACE_MODE_NA = 0, + PHY_INTERFACE_MODE_INTERNAL = 1, + PHY_INTERFACE_MODE_MII = 2, + PHY_INTERFACE_MODE_GMII = 3, + PHY_INTERFACE_MODE_SGMII = 4, + PHY_INTERFACE_MODE_TBI = 5, + PHY_INTERFACE_MODE_REVMII = 6, + PHY_INTERFACE_MODE_RMII = 7, + PHY_INTERFACE_MODE_REVRMII = 8, + PHY_INTERFACE_MODE_RGMII = 9, + PHY_INTERFACE_MODE_RGMII_ID = 10, + PHY_INTERFACE_MODE_RGMII_RXID = 11, + PHY_INTERFACE_MODE_RGMII_TXID = 12, + PHY_INTERFACE_MODE_RTBI = 13, + PHY_INTERFACE_MODE_SMII = 14, + PHY_INTERFACE_MODE_XGMII = 15, + PHY_INTERFACE_MODE_XLGMII = 16, + PHY_INTERFACE_MODE_MOCA = 17, + PHY_INTERFACE_MODE_PSGMII = 18, + PHY_INTERFACE_MODE_QSGMII = 19, + PHY_INTERFACE_MODE_TRGMII = 20, + PHY_INTERFACE_MODE_100BASEX = 21, + PHY_INTERFACE_MODE_1000BASEX = 22, + PHY_INTERFACE_MODE_2500BASEX = 23, + PHY_INTERFACE_MODE_5GBASER = 24, + PHY_INTERFACE_MODE_RXAUI = 25, + PHY_INTERFACE_MODE_XAUI = 26, + PHY_INTERFACE_MODE_10GBASER = 27, + PHY_INTERFACE_MODE_25GBASER = 28, + PHY_INTERFACE_MODE_USXGMII = 29, + PHY_INTERFACE_MODE_10GKR = 30, + PHY_INTERFACE_MODE_QUSGMII = 31, + PHY_INTERFACE_MODE_1000BASEKX = 32, + PHY_INTERFACE_MODE_10G_QXGMII = 33, + PHY_INTERFACE_MODE_MAX = 34, +} phy_interface_t; + +struct eee_config { + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_enabled; +}; + +struct phylink; + +struct pse_control; + +struct phy_driver; + +struct phy_package_shared; + +struct mii_timestamper; + +struct phy_device { + struct mdio_device mdio; + const struct phy_driver *drv; + struct device_link *devlink; + u32 phyindex; + u32 phy_id; + struct phy_c45_device_ids c45_ids; + unsigned int is_c45: 1; + unsigned int is_internal: 1; + unsigned int is_pseudo_fixed_link: 1; + unsigned int is_gigabit_capable: 1; + unsigned int has_fixups: 1; + unsigned int suspended: 1; + unsigned int suspended_by_mdio_bus: 1; + unsigned int sysfs_links: 1; + unsigned int loopback_enabled: 1; + unsigned int downshifted_rate: 1; + unsigned int is_on_sfp_module: 1; + unsigned int mac_managed_pm: 1; + unsigned int wol_enabled: 1; + unsigned int autoneg: 1; + unsigned int link: 1; + unsigned int autoneg_complete: 1; + unsigned int interrupts: 1; + unsigned int irq_suspended: 1; + unsigned int irq_rerun: 1; + unsigned int default_timestamp: 1; + int rate_matching; + enum phy_state state; + u32 dev_flags; + phy_interface_t interface; + long unsigned int possible_interfaces[2]; + int speed; + int duplex; + int port; + int pause; + int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; + long unsigned int supported[4]; + long unsigned int advertising[4]; + long unsigned int lp_advertising[4]; + long unsigned int adv_old[4]; + long unsigned int supported_eee[4]; + long unsigned int advertising_eee[4]; + long unsigned int eee_broken_modes[4]; + bool enable_tx_lpi; + bool eee_active; + struct eee_config eee_cfg; + long unsigned int host_interfaces[2]; + struct list_head leds; + int irq; + void *priv; + struct phy_package_shared *shared; + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + struct delayed_work state_queue; + struct mutex lock; + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; + struct phylink *phylink; + struct net_device *attached_dev; + struct mii_timestamper *mii_ts; + struct pse_control *psec; + u8 mdix; + u8 mdix_ctrl; + int pma_extable; + unsigned int link_down_events; + void (*phy_link_change)(struct phy_device *, bool); + void (*adjust_link)(struct net_device *); + long: 32; +}; + +struct phy_plca_cfg { + int version; + int enabled; + int node_id; + int node_cnt; + int to_tmr; + int burst_cnt; + int burst_tmr; +}; + +struct phy_plca_status { + bool pst; +}; + +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +enum led_trigger_netdev_modes { + TRIGGER_NETDEV_LINK = 0, + TRIGGER_NETDEV_LINK_10 = 1, + TRIGGER_NETDEV_LINK_100 = 2, + TRIGGER_NETDEV_LINK_1000 = 3, + TRIGGER_NETDEV_LINK_2500 = 4, + TRIGGER_NETDEV_LINK_5000 = 5, + TRIGGER_NETDEV_LINK_10000 = 6, + TRIGGER_NETDEV_HALF_DUPLEX = 7, + TRIGGER_NETDEV_FULL_DUPLEX = 8, + TRIGGER_NETDEV_TX = 9, + TRIGGER_NETDEV_RX = 10, + TRIGGER_NETDEV_TX_ERR = 11, + TRIGGER_NETDEV_RX_ERR = 12, + __TRIGGER_NETDEV_MAX = 13, +}; + +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + struct u64_stats_sync syncp; + long: 32; +}; + +struct mii_bus { + struct module *owner; + const char *name; + char id[61]; + void *priv; + int (*read)(struct mii_bus *, int, int); + int (*write)(struct mii_bus *, int, int, u16); + int (*read_c45)(struct mii_bus *, int, int, int); + int (*write_c45)(struct mii_bus *, int, int, int, u16); + int (*reset)(struct mii_bus *); + struct mdio_bus_stats stats[32]; + struct mutex mdio_lock; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED = 2, + MDIOBUS_UNREGISTERED = 3, + MDIOBUS_RELEASED = 4, + } state; + long: 32; + struct device dev; + struct mdio_device *mdio_map[32]; + u32 phy_mask; + u32 phy_ignore_ta_mask; + int irq[32]; + int reset_delay_us; + int reset_post_delay_us; + struct gpio_desc *reset_gpiod; + struct mutex shared_lock; + struct phy_package_shared *shared[32]; +}; + +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; + +enum hwtstamp_source { + HWTSTAMP_SOURCE_UNSPEC = 0, + HWTSTAMP_SOURCE_NETDEV = 1, + HWTSTAMP_SOURCE_PHYLIB = 2, +}; + +struct kernel_hwtstamp_config { + int flags; + int tx_type; + int rx_filter; + struct ifreq *ifr; + bool copied_to_user; + enum hwtstamp_source source; +}; + +struct mii_timestamper { + bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); + void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); + int (*hwtstamp)(struct mii_timestamper *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); + void (*link_state)(struct mii_timestamper *, struct phy_device *); + int (*ts_info)(struct mii_timestamper *, struct kernel_ethtool_ts_info *); + struct device *device; +}; + +struct phy_package_shared { + u8 base_addr; + struct device_node *np; + refcount_t refcnt; + long unsigned int flags; + size_t priv_size; + void *priv; +}; + +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + const long unsigned int * const features; + u32 flags; + const void *driver_data; + int (*soft_reset)(struct phy_device *); + int (*config_init)(struct phy_device *); + int (*probe)(struct phy_device *); + int (*get_features)(struct phy_device *); + int (*get_rate_matching)(struct phy_device *, phy_interface_t); + int (*suspend)(struct phy_device *); + int (*resume)(struct phy_device *); + int (*config_aneg)(struct phy_device *); + int (*aneg_done)(struct phy_device *); + int (*read_status)(struct phy_device *); + int (*config_intr)(struct phy_device *); + irqreturn_t (*handle_interrupt)(struct phy_device *); + void (*remove)(struct phy_device *); + int (*match_phy_device)(struct phy_device *); + int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*link_change_notify)(struct phy_device *); + int (*read_mmd)(struct phy_device *, int, u16); + int (*write_mmd)(struct phy_device *, int, u16, u16); + int (*read_page)(struct phy_device *); + int (*write_page)(struct phy_device *, int); + int (*module_info)(struct phy_device *, struct ethtool_modinfo *); + int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); + int (*cable_test_start)(struct phy_device *); + int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); + int (*cable_test_get_status)(struct phy_device *, bool *); + int (*get_sset_count)(struct phy_device *); + void (*get_strings)(struct phy_device *, u8 *); + void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); + int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); + int (*set_loopback)(struct phy_device *, bool); + int (*get_sqi)(struct phy_device *); + int (*get_sqi_max)(struct phy_device *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*led_brightness_set)(struct phy_device *, u8, enum led_brightness); + int (*led_blink_set)(struct phy_device *, u8, long unsigned int *, long unsigned int *); + int (*led_hw_is_supported)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_set)(struct phy_device *, u8, long unsigned int); + int (*led_hw_control_get)(struct phy_device *, u8, long unsigned int *); + int (*led_polarity_set)(struct phy_device *, int, long unsigned int); +}; + +struct clk; + +struct rtl821x_priv { + u16 phycr1; + u16 phycr2; + bool has_phycr2; + struct clk *clk; +}; + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575 = 1, + e1000_82576 = 2, + e1000_82580 = 3, + e1000_i350 = 4, + e1000_i354 = 5, + e1000_i210 = 6, + e1000_i211 = 7, + e1000_num_macs = 8, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types = 4, +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none = 1, + e1000_nvm_eeprom_spi = 2, + e1000_nvm_flash_hw = 3, + e1000_nvm_invm = 4, + e1000_nvm_flash_sw = 5, +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small = 1, + e1000_nvm_override_spi_large = 2, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none = 1, + e1000_phy_m88 = 2, + e1000_phy_igp = 3, + e1000_phy_igp_2 = 4, + e1000_phy_gg82563 = 5, + e1000_phy_igp_3 = 6, + e1000_phy_ife = 7, + e1000_phy_82580 = 8, + e1000_phy_i210 = 9, + e1000_phy_bcm54616 = 10, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci = 1, + e1000_bus_type_pcix = 2, + e1000_bus_type_pci_express = 3, + e1000_bus_type_reserved = 4, +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33 = 1, + e1000_bus_speed_66 = 2, + e1000_bus_speed_100 = 3, + e1000_bus_speed_120 = 4, + e1000_bus_speed_133 = 5, + e1000_bus_speed_2500 = 6, + e1000_bus_speed_5000 = 7, + e1000_bus_speed_reserved = 8, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1 = 1, + e1000_bus_width_pcie_x2 = 2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32 = 9, + e1000_bus_width_64 = 10, + e1000_bus_width_reserved = 11, +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok = 1, + e1000_1000t_rx_status_undefined = 255, +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed = 1, + e1000_rev_polarity_undefined = 255, +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause = 1, + e1000_fc_tx_pause = 2, + e1000_fc_full = 3, + e1000_fc_default = 255, +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master = 1, + e1000_ms_force_slave = 2, + e1000_ms_auto = 3, +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on = 1, + e1000_smart_speed_off = 2, +}; + +struct e1000_sfp_flags { + u8 e1000_base_sx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_t: 1; + u8 e100_base_lx: 1; + u8 e100_base_fx: 1; + u8 e10_base_bx10: 1; + u8 e10_base_px: 1; +}; + +struct e1000_hw; + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[3]; +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type type; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + u16 mta_reg_count; + u16 uta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_fc_info { + u32 high_water; + u32 low_water; + u16 pause_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + u32 snoop; + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16, bool); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); + s32 (*unlock)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + u8 *hw_addr; + u8 *flash_address; + long unsigned int io_base; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + u8 revision_id; +}; + +struct regulator; + +struct phy_configure_opts_dp { + unsigned int link_rate; + unsigned int lanes; + unsigned int voltage[4]; + unsigned int pre[4]; + u8 ssc: 1; + u8 set_rate: 1; + u8 set_lanes: 1; + u8 set_voltages: 1; +}; + +struct phy_configure_opts_lvds { + unsigned int bits_per_lane_and_dclk_cycle; + long unsigned int differential_clk_rate; + unsigned int lanes; + bool is_slave; +}; + +struct phy_configure_opts_mipi_dphy { + unsigned int clk_miss; + unsigned int clk_post; + unsigned int clk_pre; + unsigned int clk_prepare; + unsigned int clk_settle; + unsigned int clk_term_en; + unsigned int clk_trail; + unsigned int clk_zero; + unsigned int d_term_en; + unsigned int eot; + unsigned int hs_exit; + unsigned int hs_prepare; + unsigned int hs_settle; + unsigned int hs_skip; + unsigned int hs_trail; + unsigned int hs_zero; + unsigned int init; + unsigned int lpx; + unsigned int ta_get; + unsigned int ta_go; + unsigned int ta_sure; + unsigned int wakeup; + long unsigned int hs_clk_rate; + long unsigned int lp_clk_rate; + unsigned char lanes; +}; + +enum phy_mode { + PHY_MODE_INVALID = 0, + PHY_MODE_USB_HOST = 1, + PHY_MODE_USB_HOST_LS = 2, + PHY_MODE_USB_HOST_FS = 3, + PHY_MODE_USB_HOST_HS = 4, + PHY_MODE_USB_HOST_SS = 5, + PHY_MODE_USB_DEVICE = 6, + PHY_MODE_USB_DEVICE_LS = 7, + PHY_MODE_USB_DEVICE_FS = 8, + PHY_MODE_USB_DEVICE_HS = 9, + PHY_MODE_USB_DEVICE_SS = 10, + PHY_MODE_USB_OTG = 11, + PHY_MODE_UFS_HS_A = 12, + PHY_MODE_UFS_HS_B = 13, + PHY_MODE_PCIE = 14, + PHY_MODE_ETHERNET = 15, + PHY_MODE_MIPI_DPHY = 16, + PHY_MODE_SATA = 17, + PHY_MODE_LVDS = 18, + PHY_MODE_DP = 19, +}; + +enum phy_media { + PHY_MEDIA_DEFAULT = 0, + PHY_MEDIA_SR = 1, + PHY_MEDIA_DAC = 2, +}; + +union phy_configure_opts { + struct phy_configure_opts_mipi_dphy mipi_dphy; + struct phy_configure_opts_dp dp; + struct phy_configure_opts_lvds lvds; +}; + +struct phy; + +struct phy_ops { + int (*init)(struct phy *); + int (*exit)(struct phy *); + int (*power_on)(struct phy *); + int (*power_off)(struct phy *); + int (*set_mode)(struct phy *, enum phy_mode, int); + int (*set_media)(struct phy *, enum phy_media); + int (*set_speed)(struct phy *, int); + int (*configure)(struct phy *, union phy_configure_opts *); + int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); + int (*reset)(struct phy *); + int (*calibrate)(struct phy *); + int (*connect)(struct phy *, int); + int (*disconnect)(struct phy *, int); + void (*release)(struct phy *); + struct module *owner; +}; + +struct phy_attrs { + u32 bus_width; + u32 max_link_rate; + enum phy_mode mode; +}; + +struct phy { + struct device dev; + int id; + const struct phy_ops *ops; + struct mutex mutex; + int init_count; + int power_count; + struct phy_attrs attrs; + struct regulator *pwr; + struct dentry *debugfs; +}; + +struct usb_phy_roothub { + struct phy *phy; + struct list_head list; +}; + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +struct input_keymap_entry { + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +struct ff_replay { + __u16 length; + __u16 delay; +}; + +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + __s16 right_coeff; + __s16 left_coeff; + __u16 deadband; + __s16 center; +}; + +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + __s16 *custom_data; +}; + +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +enum utf8_normalization { + UTF8_NFDI = 0, + UTF8_NFDICF = 1, + UTF8_NMAX = 2, +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +struct input_device_id { + kernel_ulong_t flags; + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + kernel_ulong_t evbit[1]; + kernel_ulong_t keybit[24]; + kernel_ulong_t relbit[1]; + kernel_ulong_t absbit[2]; + kernel_ulong_t mscbit[1]; + kernel_ulong_t ledbit[1]; + kernel_ulong_t sndbit[1]; + kernel_ulong_t ffbit[4]; + kernel_ulong_t swbit[1]; + kernel_ulong_t propbit[1]; + kernel_ulong_t driver_info; +}; + +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO = 1, + INPUT_CLK_BOOT = 2, + INPUT_CLK_MAX = 3, +}; + +struct ff_device; + +struct input_dev_poller; + +struct input_mt; + +struct input_handle; + +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + long unsigned int propbit[1]; + long unsigned int evbit[1]; + long unsigned int keybit[24]; + long unsigned int relbit[1]; + long unsigned int absbit[2]; + long unsigned int mscbit[1]; + long unsigned int ledbit[1]; + long unsigned int sndbit[1]; + long unsigned int ffbit[4]; + long unsigned int swbit[1]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); + int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); + struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; + struct timer_list timer; + int rep[2]; + struct input_mt *mt; + struct input_absinfo *absinfo; + long unsigned int key[24]; + long unsigned int led[1]; + long unsigned int snd[1]; + long unsigned int sw[1]; + int (*open)(struct input_dev *); + void (*close)(struct input_dev *); + int (*flush)(struct input_dev *, struct file *); + int (*event)(struct input_dev *, unsigned int, unsigned int, int); + struct input_handle *grab; + spinlock_t event_lock; + struct mutex mutex; + unsigned int users; + bool going_away; + struct device dev; + struct list_head h_list; + struct list_head node; + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + bool devres_managed; + ktime_t timestamp[3]; + bool inhibited; + long: 32; +}; + +struct ff_device { + int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); + int (*erase)(struct input_dev *, int); + int (*playback)(struct input_dev *, int, int); + void (*set_gain)(struct input_dev *, u16); + void (*set_autocenter)(struct input_dev *, u16); + void (*destroy)(struct ff_device *); + void *private; + long unsigned int ffbit[4]; + struct mutex mutex; + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[0]; +}; + +struct input_mt_slot { + int abs[14]; + unsigned int frame; + unsigned int key; +}; + +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[0]; +}; + +struct input_handler; + +struct input_handle { + void *private; + int open; + const char *name; + struct input_dev *dev; + struct input_handler *handler; + unsigned int (*handle_events)(struct input_handle *, struct input_value *, unsigned int); + struct list_head d_node; + struct list_head h_node; +}; + +struct input_handler { + void *private; + void (*event)(struct input_handle *, unsigned int, unsigned int, int); + unsigned int (*events)(struct input_handle *, struct input_value *, unsigned int); + bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); + bool (*match)(struct input_handler *, struct input_dev *); + int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); + void (*disconnect)(struct input_handle *); + void (*start)(struct input_handle *); + bool passive_observer; + bool legacy_minors; + int minor; + const char *name; + const struct input_device_id *id_table; + struct list_head h_list; + struct list_head node; +}; + +struct input_mt_pos { + s16 x; + s16 y; +}; + +enum rc_proto { + RC_PROTO_UNKNOWN = 0, + RC_PROTO_OTHER = 1, + RC_PROTO_RC5 = 2, + RC_PROTO_RC5X_20 = 3, + RC_PROTO_RC5_SZ = 4, + RC_PROTO_JVC = 5, + RC_PROTO_SONY12 = 6, + RC_PROTO_SONY15 = 7, + RC_PROTO_SONY20 = 8, + RC_PROTO_NEC = 9, + RC_PROTO_NECX = 10, + RC_PROTO_NEC32 = 11, + RC_PROTO_SANYO = 12, + RC_PROTO_MCIR2_KBD = 13, + RC_PROTO_MCIR2_MSE = 14, + RC_PROTO_RC6_0 = 15, + RC_PROTO_RC6_6A_20 = 16, + RC_PROTO_RC6_6A_24 = 17, + RC_PROTO_RC6_6A_32 = 18, + RC_PROTO_RC6_MCE = 19, + RC_PROTO_SHARP = 20, + RC_PROTO_XMP = 21, + RC_PROTO_CEC = 22, + RC_PROTO_IMON = 23, + RC_PROTO_RCMM12 = 24, + RC_PROTO_RCMM24 = 25, + RC_PROTO_RCMM32 = 26, + RC_PROTO_XBOX_DVD = 27, + RC_PROTO_MAX = 27, +}; + +struct rc_map_table { + u64 scancode; + u32 keycode; + long: 32; +}; + +struct rc_map { + struct rc_map_table *scan; + unsigned int size; + unsigned int len; + unsigned int alloc; + enum rc_proto rc_proto; + const char *name; + spinlock_t lock; +}; + +struct rc_map_list { + struct list_head list; + struct rc_map map; +}; + +typedef int (*cmp_func_t)(const void *, const void *); + +typedef unsigned int xa_mark_t; + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +struct ida { + struct xarray xa; +}; + +typedef void (*dr_release_t)(struct device *, void *); + +struct lirc_scancode { + __u64 timestamp; + __u16 flags; + __u16 rc_proto; + __u32 keycode; + __u64 scancode; +}; + +enum rc_driver_type { + RC_DRIVER_SCANCODE = 0, + RC_DRIVER_IR_RAW = 1, + RC_DRIVER_IR_RAW_TX = 2, +}; + +struct rc_scancode_filter { + u32 data; + u32 mask; +}; + +enum rc_filter_type { + RC_FILTER_NORMAL = 0, + RC_FILTER_WAKEUP = 1, + RC_FILTER_MAX = 2, +}; + +struct ir_raw_event_ctrl; + +struct rc_dev { + struct device dev; + bool managed_alloc; + const struct attribute_group *sysfs_groups[5]; + const char *device_name; + const char *input_phys; + struct input_id input_id; + const char *driver_name; + const char *map_name; + struct rc_map rc_map; + struct mutex lock; + unsigned int minor; + struct ir_raw_event_ctrl *raw; + struct input_dev *input_dev; + enum rc_driver_type driver_type; + bool idle; + bool encode_wakeup; + long: 32; + u64 allowed_protocols; + u64 enabled_protocols; + u64 allowed_wakeup_protocols; + enum rc_proto wakeup_protocol; + struct rc_scancode_filter scancode_filter; + struct rc_scancode_filter scancode_wakeup_filter; + u32 scancode_mask; + u32 users; + void *priv; + spinlock_t keylock; + bool keypressed; + long unsigned int keyup_jiffies; + struct timer_list timer_keyup; + struct timer_list timer_repeat; + u32 last_keycode; + enum rc_proto last_protocol; + long: 32; + u64 last_scancode; + u8 last_toggle; + u32 timeout; + u32 min_timeout; + u32 max_timeout; + u32 rx_resolution; + bool registered; + int (*change_protocol)(struct rc_dev *, u64 *); + int (*open)(struct rc_dev *); + void (*close)(struct rc_dev *); + int (*s_tx_mask)(struct rc_dev *, u32); + int (*s_tx_carrier)(struct rc_dev *, u32); + int (*s_tx_duty_cycle)(struct rc_dev *, u32); + int (*s_rx_carrier_range)(struct rc_dev *, u32, u32); + int (*tx_ir)(struct rc_dev *, unsigned int *, unsigned int); + void (*s_idle)(struct rc_dev *, bool); + int (*s_wideband_receiver)(struct rc_dev *, int); + int (*s_carrier_report)(struct rc_dev *, int); + int (*s_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_wakeup_filter)(struct rc_dev *, struct rc_scancode_filter *); + int (*s_timeout)(struct rc_dev *, unsigned int); +}; + +struct ir_raw_event { + union { + u32 duration; + u32 carrier; + }; + u8 duty_cycle; + unsigned int pulse: 1; + unsigned int overflow: 1; + unsigned int timeout: 1; + unsigned int carrier_report: 1; +}; + +struct nec_dec { + int state; + unsigned int count; + u32 bits; + bool is_nec_x; + bool necx_repeat; +}; + +struct rc5_dec { + int state; + u32 bits; + unsigned int count; + bool is_rc5x; +}; + +struct rc6_dec { + int state; + u8 header; + u32 body; + bool toggle; + unsigned int count; + unsigned int wanted_bits; +}; + +struct sony_dec { + int state; + u32 bits; + unsigned int count; +}; + +struct jvc_dec { + int state; + u16 bits; + u16 old_bits; + unsigned int count; + bool first; + bool toggle; +}; + +struct sanyo_dec { + int state; + unsigned int count; + u64 bits; +}; + +struct sharp_dec { + int state; + unsigned int count; + u32 bits; + unsigned int pulse_len; +}; + +struct mce_kbd_dec { + spinlock_t keylock; + struct timer_list rx_timeout; + int state; + u8 header; + u32 body; + unsigned int count; + unsigned int wanted_bits; +}; + +struct xmp_dec { + int state; + unsigned int count; + u32 durations[16]; +}; + +struct ir_raw_event_ctrl { + struct list_head list; + struct task_struct *thread; + struct { + union { + struct __kfifo kfifo; + struct ir_raw_event *type; + const struct ir_raw_event *const_type; + char (*rectype)[0]; + struct ir_raw_event *ptr; + const struct ir_raw_event *ptr_const; + }; + struct ir_raw_event buf[512]; + } kfifo; + ktime_t last_event; + struct rc_dev *dev; + spinlock_t edge_spinlock; + struct timer_list edge_handle; + struct ir_raw_event prev_ev; + struct ir_raw_event this_ev; + struct nec_dec nec; + struct rc5_dec rc5; + struct rc6_dec rc6; + struct sony_dec sony; + struct jvc_dec jvc; + struct sanyo_dec sanyo; + struct sharp_dec sharp; + struct mce_kbd_dec mce_kbd; + struct xmp_dec xmp; + long: 32; +}; + +struct led_trigger {}; + +struct rc_filter_attribute { + struct device_attribute attr; + enum rc_filter_type type; + bool mask; +}; + +struct usb_device_id { + __u16 match_flags; + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice_lo; + __u16 bcdDevice_hi; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 bInterfaceNumber; + kernel_ulong_t driver_info; +}; + +struct hid_device_id { + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + kernel_ulong_t driver_data; +}; + +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +}; + +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +}; + +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__((packed)); + +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +}; + +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__((packed)); + +struct usb_ssp_isoc_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wReseved; + __le32 dwBytesPerInterval; +}; + +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +}; + +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +}; + +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +struct usb_ext_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +} __attribute__((packed)); + +struct usb_ss_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; + __le16 wSpeedSupported; + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +}; + +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; +}; + +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; + __le16 wFunctionalitySupport; + __le16 wReserved; + union { + __le32 legacy_padding; + struct { + struct {} __empty_bmSublinkSpeedAttr; + __le32 bmSublinkSpeedAttr[0]; + }; + }; +}; + +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, + USB_SPEED_LOW = 1, + USB_SPEED_FULL = 2, + USB_SPEED_HIGH = 3, + USB_SPEED_WIRELESS = 4, + USB_SPEED_SUPER = 5, + USB_SPEED_SUPER_PLUS = 6, +}; + +enum usb_device_state { + USB_STATE_NOTATTACHED = 0, + USB_STATE_ATTACHED = 1, + USB_STATE_POWERED = 2, + USB_STATE_RECONNECTING = 3, + USB_STATE_UNAUTHENTICATED = 4, + USB_STATE_DEFAULT = 5, + USB_STATE_ADDRESS = 6, + USB_STATE_CONFIGURED = 7, + USB_STATE_SUSPENDED = 8, +}; + +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1 = 1, + USB_SSP_GEN_1x2 = 2, + USB_SSP_GEN_2x2 = 3, +}; + +struct ep_device; + +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; + long: 0; + struct list_head urb_list; + void *hcpriv; + struct ep_device *ep_dev; + unsigned char *extra; + int extralen; + int enabled; + int streams; +} __attribute__((packed)); + +struct usb_host_interface { + struct usb_interface_descriptor desc; + int extralen; + unsigned char *extra; + struct usb_host_endpoint *endpoint; + char *string; +}; + +enum usb_interface_condition { + USB_INTERFACE_UNBOUND = 0, + USB_INTERFACE_BINDING = 1, + USB_INTERFACE_BOUND = 2, + USB_INTERFACE_UNBINDING = 3, +}; + +enum usb_wireless_status { + USB_WIRELESS_STATUS_NA = 0, + USB_WIRELESS_STATUS_DISCONNECTED = 1, + USB_WIRELESS_STATUS_CONNECTED = 2, +}; + +struct usb_interface { + struct usb_host_interface *altsetting; + struct usb_host_interface *cur_altsetting; + unsigned int num_altsetting; + struct usb_interface_assoc_descriptor *intf_assoc; + int minor; + enum usb_interface_condition condition; + unsigned int sysfs_files_created: 1; + unsigned int ep_devs_created: 1; + unsigned int unregistering: 1; + unsigned int needs_remote_wakeup: 1; + unsigned int needs_altsetting0: 1; + unsigned int needs_binding: 1; + unsigned int resetting_device: 1; + unsigned int authorized: 1; + enum usb_wireless_status wireless_status; + struct work_struct wireless_status_work; + struct device dev; + struct device *usb_dev; + struct work_struct reset_ws; + long: 32; +}; + +struct usb_interface_cache { + unsigned int num_altsetting; + struct kref ref; + struct usb_host_interface altsetting[0]; +}; + +struct usb_host_config { + struct usb_config_descriptor desc; + char *string; + struct usb_interface_assoc_descriptor *intf_assoc[16]; + struct usb_interface *interface[32]; + struct usb_interface_cache *intf_cache[32]; + unsigned char *extra; + int extralen; +}; + +struct usb_host_bos { + struct usb_bos_descriptor *desc; + struct usb_ext_cap_descriptor *ext_cap; + struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; +}; + +struct usb_device; + +struct usb_bus { + struct device *controller; + struct device *sysdev; + int busnum; + const char *bus_name; + u8 uses_pio_for_control; + u8 otg_port; + unsigned int is_b_host: 1; + unsigned int b_hnp_enable: 1; + unsigned int no_stop_on_short: 1; + unsigned int no_sg_constraint: 1; + unsigned int sg_tablesize; + int devnum_next; + struct mutex devnum_next_mutex; + long unsigned int devmap[4]; + struct usb_device *root_hub; + struct usb_bus *hs_companion; + int bandwidth_allocated; + int bandwidth_int_reqs; + int bandwidth_isoc_reqs; + unsigned int resuming_ports; +}; + +enum usb_link_tunnel_mode { + USB_LINK_UNKNOWN = 0, + USB_LINK_NATIVE = 1, + USB_LINK_TUNNELED = 2, +}; + +struct usb2_lpm_parameters { + unsigned int besl; + int timeout; +}; + +struct usb3_lpm_parameters { + unsigned int mel; + unsigned int pel; + unsigned int sel; + int timeout; +}; + +struct usb_tt; + +struct usb_device { + int devnum; + char devpath[16]; + u32 route; + enum usb_device_state state; + enum usb_device_speed speed; + unsigned int rx_lanes; + unsigned int tx_lanes; + enum usb_ssp_rate ssp_rate; + struct usb_tt *tt; + int ttport; + unsigned int toggle[2]; + struct usb_device *parent; + struct usb_bus *bus; + struct usb_host_endpoint ep0; + long: 32; + struct device dev; + struct usb_device_descriptor descriptor; + struct usb_host_bos *bos; + struct usb_host_config *config; + struct usb_host_config *actconfig; + struct usb_host_endpoint *ep_in[16]; + struct usb_host_endpoint *ep_out[16]; + char **rawdescriptors; + short unsigned int bus_mA; + u8 portnum; + u8 level; + u8 devaddr; + unsigned int can_submit: 1; + unsigned int persist_enabled: 1; + unsigned int reset_in_progress: 1; + unsigned int have_langid: 1; + unsigned int authorized: 1; + unsigned int authenticated: 1; + unsigned int lpm_capable: 1; + unsigned int lpm_devinit_allow: 1; + unsigned int usb2_hw_lpm_capable: 1; + unsigned int usb2_hw_lpm_besl_capable: 1; + unsigned int usb2_hw_lpm_enabled: 1; + unsigned int usb2_hw_lpm_allowed: 1; + unsigned int usb3_lpm_u1_enabled: 1; + unsigned int usb3_lpm_u2_enabled: 1; + int string_langid; + char *product; + char *manufacturer; + char *serial; + struct list_head filelist; + int maxchild; + u32 quirks; + atomic_t urbnum; + long unsigned int active_duration; + long unsigned int connect_time; + unsigned int do_remote_wakeup: 1; + unsigned int reset_resume: 1; + unsigned int port_is_suspended: 1; + enum usb_link_tunnel_mode tunnel_mode; + int slot_id; + struct usb2_lpm_parameters l1_params; + struct usb3_lpm_parameters u1_params; + struct usb3_lpm_parameters u2_params; + unsigned int lpm_disable_count; + u16 hub_delay; + unsigned int use_generic_driver: 1; + long: 32; +}; + +struct usb_dynids { + struct list_head list; +}; + +struct usb_driver { + const char *name; + int (*probe)(struct usb_interface *, const struct usb_device_id *); + void (*disconnect)(struct usb_interface *); + int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); + int (*suspend)(struct usb_interface *, pm_message_t); + int (*resume)(struct usb_interface *); + int (*reset_resume)(struct usb_interface *); + int (*pre_reset)(struct usb_interface *); + int (*post_reset)(struct usb_interface *); + void (*shutdown)(struct usb_interface *); + const struct usb_device_id *id_table; + const struct attribute_group **dev_groups; + struct usb_dynids dynids; + struct device_driver driver; + unsigned int no_dynamic_id: 1; + unsigned int supports_autosuspend: 1; + unsigned int disable_hub_initiated_lpm: 1; + unsigned int soft_unbind: 1; +}; + +struct usb_iso_packet_descriptor { + unsigned int offset; + unsigned int length; + unsigned int actual_length; + int status; +}; + +struct usb_anchor { + struct list_head urb_list; + wait_queue_head_t wait; + spinlock_t lock; + atomic_t suspend_wakeups; + unsigned int poisoned: 1; +}; + +struct urb; + +typedef void (*usb_complete_t)(struct urb *); + +struct urb { + struct kref kref; + int unlinked; + void *hcpriv; + atomic_t use_count; + atomic_t reject; + struct list_head urb_list; + struct list_head anchor_list; + struct usb_anchor *anchor; + struct usb_device *dev; + struct usb_host_endpoint *ep; + unsigned int pipe; + unsigned int stream_id; + int status; + unsigned int transfer_flags; + void *transfer_buffer; + dma_addr_t transfer_dma; + struct scatterlist *sg; + int num_mapped_sgs; + int num_sgs; + u32 transfer_buffer_length; + u32 actual_length; + unsigned char *setup_packet; + dma_addr_t setup_dma; + int start_frame; + int number_of_packets; + int interval; + int error_count; + void *context; + usb_complete_t complete; + struct usb_iso_packet_descriptor iso_frame_desc[0]; +}; + +enum hid_report_type { + HID_INPUT_REPORT = 0, + HID_OUTPUT_REPORT = 1, + HID_FEATURE_REPORT = 2, + HID_REPORT_TYPES = 3, +}; + +enum hid_class_request { + HID_REQ_GET_REPORT = 1, + HID_REQ_GET_IDLE = 2, + HID_REQ_GET_PROTOCOL = 3, + HID_REQ_SET_REPORT = 9, + HID_REQ_SET_IDLE = 10, + HID_REQ_SET_PROTOCOL = 11, +}; + +enum bpf_type_flag { + PTR_MAYBE_NULL = 256, + MEM_RDONLY = 512, + MEM_RINGBUF = 1024, + MEM_USER = 2048, + MEM_PERCPU = 4096, + OBJ_RELEASE = 8192, + PTR_UNTRUSTED = 16384, + MEM_UNINIT = 32768, + DYNPTR_TYPE_LOCAL = 65536, + DYNPTR_TYPE_RINGBUF = 131072, + MEM_FIXED_SIZE = 262144, + MEM_ALLOC = 524288, + PTR_TRUSTED = 1048576, + MEM_RCU = 2097152, + NON_OWN_REF = 4194304, + DYNPTR_TYPE_SKB = 8388608, + DYNPTR_TYPE_XDP = 16777216, + MEM_ALIGNED = 33554432, + MEM_WRITE = 67108864, + __BPF_TYPE_FLAG_MAX = 67108865, + __BPF_TYPE_LAST_FLAG = 67108864, +}; + +enum bpf_arg_type { + ARG_DONTCARE = 0, + ARG_CONST_MAP_PTR = 1, + ARG_PTR_TO_MAP_KEY = 2, + ARG_PTR_TO_MAP_VALUE = 3, + ARG_PTR_TO_MEM = 4, + ARG_PTR_TO_ARENA = 5, + ARG_CONST_SIZE = 6, + ARG_CONST_SIZE_OR_ZERO = 7, + ARG_PTR_TO_CTX = 8, + ARG_ANYTHING = 9, + ARG_PTR_TO_SPIN_LOCK = 10, + ARG_PTR_TO_SOCK_COMMON = 11, + ARG_PTR_TO_SOCKET = 12, + ARG_PTR_TO_BTF_ID = 13, + ARG_PTR_TO_RINGBUF_MEM = 14, + ARG_CONST_ALLOC_SIZE_OR_ZERO = 15, + ARG_PTR_TO_BTF_ID_SOCK_COMMON = 16, + ARG_PTR_TO_PERCPU_BTF_ID = 17, + ARG_PTR_TO_FUNC = 18, + ARG_PTR_TO_STACK = 19, + ARG_PTR_TO_CONST_STR = 20, + ARG_PTR_TO_TIMER = 21, + ARG_KPTR_XCHG_DEST = 22, + ARG_PTR_TO_DYNPTR = 23, + __BPF_ARG_TYPE_MAX = 24, + ARG_PTR_TO_MAP_VALUE_OR_NULL = 259, + ARG_PTR_TO_MEM_OR_NULL = 260, + ARG_PTR_TO_CTX_OR_NULL = 264, + ARG_PTR_TO_SOCKET_OR_NULL = 268, + ARG_PTR_TO_STACK_OR_NULL = 275, + ARG_PTR_TO_BTF_ID_OR_NULL = 269, + ARG_PTR_TO_UNINIT_MEM = 67141636, + ARG_PTR_TO_FIXED_SIZE_MEM = 262148, + __BPF_ARG_TYPE_LIMIT = 134217727, +}; + +enum bpf_return_type { + RET_INTEGER = 0, + RET_VOID = 1, + RET_PTR_TO_MAP_VALUE = 2, + RET_PTR_TO_SOCKET = 3, + RET_PTR_TO_TCP_SOCK = 4, + RET_PTR_TO_SOCK_COMMON = 5, + RET_PTR_TO_MEM = 6, + RET_PTR_TO_MEM_OR_BTF_ID = 7, + RET_PTR_TO_BTF_ID = 8, + __BPF_RET_TYPE_MAX = 9, + RET_PTR_TO_MAP_VALUE_OR_NULL = 258, + RET_PTR_TO_SOCKET_OR_NULL = 259, + RET_PTR_TO_TCP_SOCK_OR_NULL = 260, + RET_PTR_TO_SOCK_COMMON_OR_NULL = 261, + RET_PTR_TO_RINGBUF_MEM_OR_NULL = 1286, + RET_PTR_TO_DYNPTR_MEM_OR_NULL = 262, + RET_PTR_TO_BTF_ID_OR_NULL = 264, + RET_PTR_TO_BTF_ID_TRUSTED = 1048584, + __BPF_RET_TYPE_LIMIT = 134217727, +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED = 0, + BPF_CGROUP_STORAGE_PERCPU = 1, + __BPF_CGROUP_STORAGE_MAX = 2, +}; + +enum { + BPF_MAX_TRAMP_LINKS = 38, +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY = 0, + BPF_TRAMP_FEXIT = 1, + BPF_TRAMP_MODIFY_RETURN = 2, + BPF_TRAMP_MAX = 3, + BPF_TRAMP_REPLACE = 4, +}; + +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE = 1, + HID_TYPE_USBNONE = 2, +}; + +struct hid_report; + +struct hid_report_enum { + unsigned int numbered; + struct list_head report_list; + struct hid_report *report_id_hash[256]; +}; + +struct hid_collection; + +struct hid_driver; + +struct hid_ll_driver; + +struct hid_field; + +struct hid_usage; + +struct hid_device { + const __u8 *dev_rdesc; + const __u8 *bpf_rdesc; + const __u8 *rdesc; + unsigned int dev_rsize; + unsigned int bpf_rsize; + unsigned int rsize; + unsigned int collection_size; + struct hid_collection *collection; + unsigned int maxcollection; + unsigned int maxapplication; + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + __u32 version; + enum hid_type type; + unsigned int country; + struct hid_report_enum report_enum[3]; + struct work_struct led_work; + struct semaphore driver_input_lock; + long: 32; + struct device dev; + struct hid_driver *driver; + void *devres_group_id; + const struct hid_ll_driver *ll_driver; + struct mutex ll_open_lock; + unsigned int ll_open_count; + long unsigned int status; + unsigned int claimed; + unsigned int quirks; + unsigned int initial_quirks; + bool io_started; + struct list_head inputs; + void *hiddev; + void *hidraw; + char name[128]; + char phys[64]; + char uniq[64]; + void *driver_data; + int (*ff_init)(struct hid_device *); + int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); + void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*hiddev_report_event)(struct hid_device *, struct hid_report *); + short unsigned int debug; + struct dentry *debug_dir; + struct dentry *debug_rdesc; + struct dentry *debug_events; + struct list_head debug_list; + spinlock_t debug_list_lock; + wait_queue_head_t debug_wait; + struct kref ref; + unsigned int id; +}; + +struct hid_field_entry; + +struct hid_report { + struct list_head list; + struct list_head hidinput_list; + struct list_head field_entry_list; + unsigned int id; + enum hid_report_type type; + unsigned int application; + struct hid_field *field[256]; + struct hid_field_entry *field_entries; + unsigned int maxfield; + unsigned int size; + struct hid_device *device; + bool tool_active; + unsigned int tool; +}; + +struct hid_collection { + int parent_idx; + unsigned int type; + unsigned int usage; + unsigned int level; +}; + +struct hid_usage { + unsigned int hid; + unsigned int collection_index; + unsigned int usage_index; + __s8 resolution_multiplier; + __s8 wheel_factor; + __u16 code; + __u8 type; + __s16 hat_min; + __s16 hat_max; + __s16 hat_dir; + __s16 wheel_accumulated; +}; + +struct hid_input; + +struct hid_field { + unsigned int physical; + unsigned int logical; + unsigned int application; + struct hid_usage *usage; + unsigned int maxusage; + unsigned int flags; + unsigned int report_offset; + unsigned int report_size; + unsigned int report_count; + unsigned int report_type; + __s32 *value; + __s32 *new_value; + __s32 *usages_priorities; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + bool ignored; + struct hid_report *report; + unsigned int index; + struct hid_input *hidinput; + __u16 dpad; + unsigned int slot_idx; +}; + +struct hid_input { + struct list_head list; + struct hid_report *report; + struct input_dev *input; + const char *name; + struct list_head reports; + unsigned int application; + bool registered; +}; + +struct hid_field_entry { + struct list_head list; + struct hid_field *field; + unsigned int index; + __s32 priority; +}; + +struct hid_control_fifo { + unsigned char dir; + struct hid_report *report; + char *raw_report; +}; + +struct hid_output_fifo { + struct hid_report *report; + char *raw_report; +}; + +struct hid_report_id; + +struct hid_usage_id; + +struct hid_driver { + char *name; + const struct hid_device_id *id_table; + struct list_head dyn_list; + spinlock_t dyn_lock; + bool (*match)(struct hid_device *, bool); + int (*probe)(struct hid_device *, const struct hid_device_id *); + void (*remove)(struct hid_device *); + const struct hid_report_id *report_table; + int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); + const struct hid_usage_id *usage_table; + int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); + void (*report)(struct hid_device *, struct hid_report *); + const __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *); + int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); + int (*input_configured)(struct hid_device *, struct hid_input *); + void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); + int (*suspend)(struct hid_device *, pm_message_t); + int (*resume)(struct hid_device *); + int (*reset_resume)(struct hid_device *); + struct device_driver driver; +}; + +struct hid_ll_driver { + int (*start)(struct hid_device *); + void (*stop)(struct hid_device *); + int (*open)(struct hid_device *); + void (*close)(struct hid_device *); + int (*power)(struct hid_device *, int); + int (*parse)(struct hid_device *); + void (*request)(struct hid_device *, struct hid_report *, int); + int (*wait)(struct hid_device *); + int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int); + int (*output_report)(struct hid_device *, __u8 *, size_t); + int (*idle)(struct hid_device *, int, int, int); + bool (*may_wakeup)(struct hid_device *); + unsigned int max_buffer_size; +}; + +struct hid_class_descriptor { + __u8 bDescriptorType; + __le16 wDescriptorLength; +} __attribute__((packed)); + +struct hid_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdHID; + __u8 bCountryCode; + __u8 bNumDescriptors; + struct hid_class_descriptor desc[1]; +} __attribute__((packed)); + +struct hid_report_id { + __u32 report_type; +}; + +struct hid_usage_id { + __u32 usage_hid; + __u32 usage_type; + __u32 usage_code; +}; + +struct usbhid_device { + struct hid_device *hid; + struct usb_interface *intf; + int ifnum; + unsigned int bufsize; + struct urb *urbin; + char *inbuf; + dma_addr_t inbuf_dma; + struct urb *urbctrl; + struct usb_ctrlrequest *cr; + struct hid_control_fifo ctrl[256]; + unsigned char ctrlhead; + unsigned char ctrltail; + char *ctrlbuf; + dma_addr_t ctrlbuf_dma; + long unsigned int last_ctrl; + struct urb *urbout; + struct hid_output_fifo out[256]; + unsigned char outhead; + unsigned char outtail; + char *outbuf; + dma_addr_t outbuf_dma; + long unsigned int last_out; + struct mutex mutex; + spinlock_t lock; + long unsigned int iofl; + struct timer_list io_retry; + long unsigned int stop_retry; + unsigned int retry_delay; + struct work_struct reset_work; + wait_queue_head_t wait; +}; + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + __u32 ingress_ifindex; + __u32 rx_queue_index; + __u32 egress_ifindex; +}; + +struct rhash_lock_head; + +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + struct bucket_table *future_tbl; + struct lockdep_map dep_map; + struct rhash_lock_head *buckets[0]; +}; + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +struct page_pool_params_fast { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; +}; + +typedef long unsigned int netmem_ref; + +struct page_pool_alloc_stats { + u64 fast; + u64 slow; + u64 slow_high_order; + u64 empty; + u64 refill; + u64 waive; +}; + +struct pp_alloc_cache { + u32 count; + netmem_ref cache[128]; +}; + +struct ptr_ring { + int producer; + spinlock_t producer_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + int consumer_head; + int consumer_tail; + spinlock_t consumer_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + int size; + int batch; + void **queue; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct page_pool_params_slow { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; +}; + +struct page_pool_recycle_stats; + +struct page_pool { + struct page_pool_params_fast p; + int cpuid; + u32 pages_state_hold_cnt; + bool has_init_callback: 1; + bool dma_map: 1; + bool dma_sync: 1; + bool system: 1; + long: 32; + __u8 __cacheline_group_begin__frag[0]; + long int frag_users; + netmem_ref frag_page; + unsigned int frag_offset; + __u8 __cacheline_group_end__frag[0]; + long: 32; + struct {} __cacheline_group_pad__frag; + struct delayed_work release_dw; + void (*disconnect)(void *); + long unsigned int defer_start; + long unsigned int defer_warn; + struct page_pool_alloc_stats alloc_stats; + u32 xdp_mem_id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct pp_alloc_cache alloc; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ptr_ring ring; + void *mp_priv; + struct page_pool_recycle_stats *recycle_stats; + atomic_t pages_state_release_cnt; + refcount_t user_cnt; + u64 destroy_cnt; + struct page_pool_params_slow slow; + long: 32; + struct { + struct hlist_node list; + u64 detach_time; + u32 napi_id; + u32 id; + } user; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next; + struct hlist_nulls_node **pprev; +}; + +typedef __u64 __addrpair; + +typedef __u32 __portpair; + +struct proto; + +struct sock_common { + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + short unsigned int skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse: 4; + unsigned char skc_reuseport: 1; + unsigned char skc_ipv6only: 1; + unsigned char skc_net_refcnt: 1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + atomic64_t skc_cookie; + union { + long unsigned int skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + int skc_dontcopy_begin[0]; + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + short unsigned int skc_tx_queue_mapping; + short unsigned int skc_rx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + refcount_t skc_refcnt; + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; + long: 32; +}; + +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; +} socket_lock_t; + +struct sock_cgroup_data { + struct cgroup *cgroup; + u32 classid; +}; + +typedef struct {} netns_tracker; + +struct sk_filter; + +struct socket_wq; + +struct socket; + +struct sock_reuseport; + +struct sock { + struct sock_common __sk_common; + __u8 __cacheline_group_begin__sock_write_rx[0]; + atomic_t sk_drops; + __s32 sk_peek_off; + struct sk_buff_head sk_error_queue; + struct sk_buff_head sk_receive_queue; + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + __u8 __cacheline_group_end__sock_write_rx[0]; + __u8 __cacheline_group_begin__sock_read_rx[0]; + struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + unsigned int sk_ll_usec; + unsigned int sk_napi_id; + u16 sk_busy_poll_budget; + u8 sk_prefer_busy_poll; + u8 sk_userlocks; + int sk_rcvbuf; + struct sk_filter *sk_filter; + union { + struct socket_wq *sk_wq; + struct socket_wq *sk_wq_raw; + }; + void (*sk_data_ready)(struct sock *); + long int sk_rcvtimeo; + int sk_rcvlowat; + __u8 __cacheline_group_end__sock_read_rx[0]; + __u8 __cacheline_group_begin__sock_read_rxtx[0]; + int sk_err; + struct socket *sk_socket; + struct mem_cgroup *sk_memcg; + __u8 __cacheline_group_end__sock_read_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_rxtx[0]; + socket_lock_t sk_lock; + u32 sk_reserved_mem; + int sk_forward_alloc; + u32 sk_tsflags; + __u8 __cacheline_group_end__sock_write_rxtx[0]; + __u8 __cacheline_group_begin__sock_write_tx[0]; + int sk_write_pending; + atomic_t sk_omem_alloc; + int sk_sndbuf; + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + long unsigned int sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff_head sk_write_queue; + u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + struct page_frag sk_frag; + struct timer_list sk_timer; + long unsigned int sk_pacing_rate; + atomic_t sk_zckey; + atomic_t sk_tskey; + __u8 __cacheline_group_end__sock_write_tx[0]; + __u8 __cacheline_group_begin__sock_read_tx[0]; + long unsigned int sk_max_pacing_rate; + long int sk_sndtimeo; + u32 sk_priority; + u32 sk_mark; + struct dst_entry *sk_dst_cache; + long: 32; + netdev_features_t sk_route_caps; + u16 sk_gso_type; + u16 sk_gso_max_segs; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + u32 sk_txhash; + u8 sk_pacing_shift; + bool sk_use_task_frag; + __u8 __cacheline_group_end__sock_read_tx[0]; + u8 sk_gso_disabled: 1; + u8 sk_kern_sock: 1; + u8 sk_no_check_tx: 1; + u8 sk_no_check_rx: 1; + u8 sk_shutdown; + u16 sk_type; + u16 sk_protocol; + long unsigned int sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + spinlock_t sk_peer_lock; + int sk_bind_phc; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + long: 32; + ktime_t sk_stamp; + seqlock_t sk_stamp_seq; + int sk_disconnects; + u8 sk_txrehash; + u8 sk_clockid; + u8 sk_txtime_deadline_mode: 1; + u8 sk_txtime_report_errors: 1; + u8 sk_txtime_unused: 6; + void *sk_user_data; + struct sock_cgroup_data sk_cgrp_data; + void (*sk_state_change)(struct sock *); + void (*sk_write_space)(struct sock *); + void (*sk_error_report)(struct sock *); + int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); + void (*sk_destruct)(struct sock *); + struct sock_reuseport *sk_reuseport_cb; + struct bpf_local_storage *sk_bpf_storage; + struct callback_head sk_rcu; + netns_tracker ns_tracker; + struct xarray sk_user_frags; + long: 32; +}; + +typedef struct { + union { + void *kernel; + void *user; + }; + bool is_kernel: 1; +} sockptr_t; + +struct btf_id_set8 { + u32 cnt; + u32 flags; + struct { + u32 id; + u32 flags; + } pairs[0]; +}; + +typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *, u32); + +struct btf_kfunc_id_set { + struct module *owner; + struct btf_id_set8 *set; + btf_kfunc_filter_t filter; +}; + +struct ubuf_info; + +struct msghdr { + void *msg_name; + int msg_namelen; + int msg_inq; + long: 32; + struct iov_iter msg_iter; + union { + void *msg_control; + void *msg_control_user; + }; + bool msg_control_is_user: 1; + bool msg_get_inq: 1; + unsigned int msg_flags; + __kernel_size_t msg_controllen; + struct kiocb *msg_iocb; + struct ubuf_info *msg_ubuf; + int (*sg_from_iter)(struct sk_buff *, struct iov_iter *, size_t); + long: 32; +}; + +struct ubuf_info_ops; + +struct ubuf_info { + const struct ubuf_info_ops *ops; + refcount_t refcnt; + u8 flags; +}; + +struct dmabuf_genpool_chunk_owner; + +struct net_iov { + long unsigned int __unused_padding; + long unsigned int pp_magic; + struct page_pool *pp; + struct dmabuf_genpool_chunk_owner *owner; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; +}; + +struct skb_frag { + netmem_ref netmem; + unsigned int len; + unsigned int offset; +}; + +typedef struct skb_frag skb_frag_t; + +struct ubuf_info_ops { + void (*complete)(struct sk_buff *, struct ubuf_info *, bool); + int (*link_skb)(struct sk_buff *, struct ubuf_info *); +}; + +struct xsk_tx_metadata_compl { + __u64 *tx_timestamp; +}; + +struct skb_shared_info { + __u8 flags; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + short unsigned int gso_size; + short unsigned int gso_segs; + struct sk_buff *frag_list; + long: 32; + union { + struct skb_shared_hwtstamps hwtstamps; + struct xsk_tx_metadata_compl xsk_meta; + }; + unsigned int gso_type; + u32 tskey; + atomic_t dataref; + unsigned int xdp_frags_size; + void *destructor_arg; + skb_frag_t frags[17]; +}; + +struct prot_inuse { + int all; + int val[64]; +}; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(const struct net *); + int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); + struct module *owner; + struct callback_head rcu; +}; + +struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; +}; + +struct ipv6_devconf { + __u8 __cacheline_group_begin__ipv6_devconf_read_txrx[0]; + __s32 disable_ipv6; + __s32 hop_limit; + __s32 mtu6; + __s32 forwarding; + __s32 disable_policy; + __s32 proxy_ndp; + __u8 __cacheline_group_end__ipv6_devconf_read_txrx[0]; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_min_advance; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_min_lft; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + __s32 accept_source_route; + __s32 accept_ra_from_local; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + __s32 accept_untracked_na; + struct ipv6_stable_secret stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 ndisc_tclass; + __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; + __u8 ndisc_evict_nocarrier; + __u8 ra_honor_pio_life; + __u8 ra_honor_pio_pflag; + struct ctl_table_header *sysctl_header; +}; + +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED = 1, + SS_CONNECTING = 2, + SS_CONNECTED = 3, + SS_DISCONNECTING = 4, +} socket_state; + +struct socket_wq { + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + long unsigned int flags; + struct callback_head rcu; + long: 32; +}; + +struct proto_ops; + +struct socket { + socket_state state; + short int type; + long unsigned int flags; + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + long: 32; + long: 32; + struct socket_wq wq; +}; + +typedef struct { + size_t written; + size_t count; + union { + char *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); + +typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); + +struct proto_accept_arg; + +struct proto_ops { + int family; + struct module *owner; + int (*release)(struct socket *); + int (*bind)(struct socket *, struct sockaddr *, int); + int (*connect)(struct socket *, struct sockaddr *, int, int); + int (*socketpair)(struct socket *, struct socket *); + int (*accept)(struct socket *, struct socket *, struct proto_accept_arg *); + int (*getname)(struct socket *, struct sockaddr *, int); + __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); + int (*ioctl)(struct socket *, unsigned int, long unsigned int); + int (*gettstamp)(struct socket *, void *, bool, bool); + int (*listen)(struct socket *, int); + int (*shutdown)(struct socket *, int); + int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct socket *, int, int, char *, int *); + void (*show_fdinfo)(struct seq_file *, struct socket *); + int (*sendmsg)(struct socket *, struct msghdr *, size_t); + int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); + int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); + ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct socket *); + int (*set_peek_off)(struct sock *, int); + int (*peek_len)(struct socket *); + int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); + int (*read_skb)(struct sock *, skb_read_actor_t); + int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); + int (*set_rcvlowat)(struct sock *, int); +}; + +struct proto_accept_arg { + int flags; + int err; + int is_empty; + bool kern; +}; + +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + NETDEV_XDP_ACT_MASK = 127, +}; + +struct xsk_queue; + +struct xdp_umem; + +struct xdp_buff_xsk; + +struct xdp_desc; + +struct xsk_buff_pool { + struct device *dev; + struct net_device *netdev; + struct list_head xsk_tx_list; + spinlock_t xsk_tx_list_lock; + refcount_t users; + struct xdp_umem *umem; + struct work_struct work; + struct list_head free_list; + struct list_head xskb_list; + u32 heads_cnt; + u16 queue_id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xsk_queue *fq; + struct xsk_queue *cq; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + struct xdp_desc *tx_descs; + long: 32; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 chunk_shift; + u32 frame_len; + u32 xdp_zc_max_segs; + u8 tx_metadata_len; + u8 cached_need_wakeup; + bool uses_need_wakeup; + bool unaligned; + bool tx_sw_csum; + void *addrs; + spinlock_t cq_lock; + struct xdp_buff_xsk *free_heads[0]; + long: 32; + long: 32; +}; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + void *sysctl_table; + int dead; + refcount_t refcnt; + struct callback_head callback_head; + int reachable_time; + u32 qlen; + int data[14]; + long unsigned int data_state[1]; +}; + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize; + struct xdp_mem_info mem; + struct net_device *dev_rx; + u32 frame_sz; + u32 flags; +}; + +struct xdp_rxq_info; + +struct xdp_txq_info; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; + u32 flags; +}; + +enum netdev_reg_state { + NETREG_UNINITIALIZED = 0, + NETREG_REGISTERED = 1, + NETREG_UNREGISTERING = 2, + NETREG_UNREGISTERED = 3, + NETREG_RELEASED = 4, + NETREG_DUMMY = 5, +}; + +struct offload_callbacks { + struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); + struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sk_buff *, int); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN = 2, + NETDEV_REBOOT = 3, + NETDEV_CHANGE = 4, + NETDEV_REGISTER = 5, + NETDEV_UNREGISTER = 6, + NETDEV_CHANGEMTU = 7, + NETDEV_CHANGEADDR = 8, + NETDEV_PRE_CHANGEADDR = 9, + NETDEV_GOING_DOWN = 10, + NETDEV_CHANGENAME = 11, + NETDEV_FEAT_CHANGE = 12, + NETDEV_BONDING_FAILOVER = 13, + NETDEV_PRE_UP = 14, + NETDEV_PRE_TYPE_CHANGE = 15, + NETDEV_POST_TYPE_CHANGE = 16, + NETDEV_POST_INIT = 17, + NETDEV_PRE_UNINIT = 18, + NETDEV_RELEASE = 19, + NETDEV_NOTIFY_PEERS = 20, + NETDEV_JOIN = 21, + NETDEV_CHANGEUPPER = 22, + NETDEV_RESEND_IGMP = 23, + NETDEV_PRECHANGEMTU = 24, + NETDEV_CHANGEINFODATA = 25, + NETDEV_BONDING_INFO = 26, + NETDEV_PRECHANGEUPPER = 27, + NETDEV_CHANGELOWERSTATE = 28, + NETDEV_UDP_TUNNEL_PUSH_INFO = 29, + NETDEV_UDP_TUNNEL_DROP_INFO = 30, + NETDEV_CHANGE_TX_QUEUE_LEN = 31, + NETDEV_CVLAN_FILTER_PUSH_INFO = 32, + NETDEV_CVLAN_FILTER_DROP_INFO = 33, + NETDEV_SVLAN_FILTER_PUSH_INFO = 34, + NETDEV_SVLAN_FILTER_DROP_INFO = 35, + NETDEV_OFFLOAD_XSTATS_ENABLE = 36, + NETDEV_OFFLOAD_XSTATS_DISABLE = 37, + NETDEV_OFFLOAD_XSTATS_REPORT_USED = 38, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA = 39, + NETDEV_XDP_FEAT_CHANGE = 40, +}; + +struct __una_u32 { + u32 x; +}; + +enum { + RTAX_UNSPEC = 0, + RTAX_LOCK = 1, + RTAX_MTU = 2, + RTAX_WINDOW = 3, + RTAX_RTT = 4, + RTAX_RTTVAR = 5, + RTAX_SSTHRESH = 6, + RTAX_CWND = 7, + RTAX_ADVMSS = 8, + RTAX_REORDERING = 9, + RTAX_HOPLIMIT = 10, + RTAX_INITCWND = 11, + RTAX_FEATURES = 12, + RTAX_RTO_MIN = 13, + RTAX_INITRWND = 14, + RTAX_QUICKACK = 15, + RTAX_CC_ALGO = 16, + RTAX_FASTOPEN_NO_COOKIE = 17, + __RTAX_MAX = 18, +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +struct rhash_lock_head {}; + +struct page_pool_recycle_stats { + u64 cached; + u64 cache_full; + u64 ring; + u64 ring_full; + u64 released_refcnt; +}; + +enum { + NEIGH_VAR_MCAST_PROBES = 0, + NEIGH_VAR_UCAST_PROBES = 1, + NEIGH_VAR_APP_PROBES = 2, + NEIGH_VAR_MCAST_REPROBES = 3, + NEIGH_VAR_RETRANS_TIME = 4, + NEIGH_VAR_BASE_REACHABLE_TIME = 5, + NEIGH_VAR_DELAY_PROBE_TIME = 6, + NEIGH_VAR_INTERVAL_PROBE_TIME_MS = 7, + NEIGH_VAR_GC_STALETIME = 8, + NEIGH_VAR_QUEUE_LEN_BYTES = 9, + NEIGH_VAR_PROXY_QLEN = 10, + NEIGH_VAR_ANYCAST_DELAY = 11, + NEIGH_VAR_PROXY_DELAY = 12, + NEIGH_VAR_LOCKTIME = 13, + NEIGH_VAR_QUEUE_LEN = 14, + NEIGH_VAR_RETRANS_TIME_MS = 15, + NEIGH_VAR_BASE_REACHABLE_TIME_MS = 16, + NEIGH_VAR_GC_INTERVAL = 17, + NEIGH_VAR_GC_THRESH1 = 18, + NEIGH_VAR_GC_THRESH2 = 19, + NEIGH_VAR_GC_THRESH3 = 20, + NEIGH_VAR_MAX = 21, +}; + +struct pneigh_entry; + +struct neigh_statistics; + +struct neigh_hash_table; + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *, const struct net_device *, __u32 *); + bool (*key_eq)(const struct neighbour *, const void *); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *); + int (*is_multicast)(const void *); + bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + long unsigned int last_flush; + struct delayed_work gc_work; + struct delayed_work managed_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + struct list_head managed_list; + rwlock_t lock; + long unsigned int last_rand; + struct neigh_statistics *stats; + struct neigh_hash_table *nht; + struct pneigh_entry **phash_buckets; +}; + +struct neigh_statistics { + long unsigned int allocs; + long unsigned int destroys; + long unsigned int hash_grows; + long unsigned int res_failed; + long unsigned int lookups; + long unsigned int hits; + long unsigned int rcv_probes_mcast; + long unsigned int rcv_probes_ucast; + long unsigned int periodic_gc_runs; + long unsigned int forced_gc_runs; + long unsigned int unres_discards; + long unsigned int table_fulls; +}; + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u8 protocol; + u32 key[0]; +}; + +struct neigh_hash_table { + struct hlist_head *hash_heads; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT = 2, + TCP_SYN_RECV = 3, + TCP_FIN_WAIT1 = 4, + TCP_FIN_WAIT2 = 5, + TCP_TIME_WAIT = 6, + TCP_CLOSE = 7, + TCP_CLOSE_WAIT = 8, + TCP_LAST_ACK = 9, + TCP_LISTEN = 10, + TCP_CLOSING = 11, + TCP_NEW_SYN_RECV = 12, + TCP_BOUND_INACTIVE = 13, + TCP_MAX_STATES = 14, +}; + +struct smc_hashinfo; + +struct sk_psock; + +struct request_sock_ops; + +struct timewait_sock_ops; + +struct raw_hashinfo; + +struct proto { + void (*close)(struct sock *, long int); + int (*pre_connect)(struct sock *, struct sockaddr *, int); + int (*connect)(struct sock *, struct sockaddr *, int); + int (*disconnect)(struct sock *, int); + struct sock * (*accept)(struct sock *, struct proto_accept_arg *); + int (*ioctl)(struct sock *, int, int *); + int (*init)(struct sock *); + void (*destroy)(struct sock *); + void (*shutdown)(struct sock *, int); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*keepalive)(struct sock *, int); + int (*sendmsg)(struct sock *, struct msghdr *, size_t); + int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int *); + void (*splice_eof)(struct socket *); + int (*bind)(struct sock *, struct sockaddr *, int); + int (*bind_add)(struct sock *, struct sockaddr *, int); + int (*backlog_rcv)(struct sock *, struct sk_buff *); + bool (*bpf_bypass_getsockopt)(int, int); + void (*release_cb)(struct sock *); + int (*hash)(struct sock *); + void (*unhash)(struct sock *); + void (*rehash)(struct sock *); + int (*get_port)(struct sock *, short unsigned int); + void (*put_port)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + unsigned int inuse_idx; + bool (*stream_memory_free)(const struct sock *, int); + bool (*sock_is_readable)(struct sock *); + void (*enter_memory_pressure)(struct sock *); + void (*leave_memory_pressure)(struct sock *); + atomic_long_t *memory_allocated; + int *per_cpu_fw_alloc; + struct percpu_counter *sockets_allocated; + long unsigned int *memory_pressure; + long int *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + int max_header; + bool no_autobind; + struct kmem_cache *slab; + unsigned int obj_size; + unsigned int ipv6_pinfo_offset; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + unsigned int *orphan_count; + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + struct module *owner; + char name[32]; + struct list_head node; + int (*diag_destroy)(struct sock *, int); +}; + +enum sk_rst_reason { + SK_RST_REASON_NOT_SPECIFIED = 0, + SK_RST_REASON_NO_SOCKET = 1, + SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE = 2, + SK_RST_REASON_TCP_RFC7323_PAWS = 3, + SK_RST_REASON_TCP_TOO_OLD_ACK = 4, + SK_RST_REASON_TCP_ACK_UNSENT_DATA = 5, + SK_RST_REASON_TCP_FLAGS = 6, + SK_RST_REASON_TCP_OLD_ACK = 7, + SK_RST_REASON_TCP_ABORT_ON_DATA = 8, + SK_RST_REASON_TCP_TIMEWAIT_SOCKET = 9, + SK_RST_REASON_INVALID_SYN = 10, + SK_RST_REASON_TCP_ABORT_ON_CLOSE = 11, + SK_RST_REASON_TCP_ABORT_ON_LINGER = 12, + SK_RST_REASON_TCP_ABORT_ON_MEMORY = 13, + SK_RST_REASON_TCP_STATE = 14, + SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT = 15, + SK_RST_REASON_TCP_DISCONNECT_WITH_DATA = 16, + SK_RST_REASON_MPTCP_RST_EUNSPEC = 17, + SK_RST_REASON_MPTCP_RST_EMPTCP = 18, + SK_RST_REASON_MPTCP_RST_ERESOURCE = 19, + SK_RST_REASON_MPTCP_RST_EPROHIBIT = 20, + SK_RST_REASON_MPTCP_RST_EWQ2BIG = 21, + SK_RST_REASON_MPTCP_RST_EBADPERF = 22, + SK_RST_REASON_MPTCP_RST_EMIDDLEBOX = 23, + SK_RST_REASON_ERROR = 24, + SK_RST_REASON_MAX = 25, +}; + +struct request_sock; + +struct request_sock_ops { + int family; + unsigned int obj_size; + struct kmem_cache *slab; + char *slab_name; + int (*rtx_syn_ack)(const struct sock *, struct request_sock *); + void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*send_reset)(const struct sock *, struct sk_buff *, enum sk_rst_reason); + void (*destructor)(struct request_sock *); + void (*syn_ack_timeout)(const struct request_sock *); +}; + +struct timewait_sock_ops { + struct kmem_cache *twsk_slab; + char *twsk_slab_name; + unsigned int twsk_obj_size; + void (*twsk_destructor)(struct sock *); +}; + +struct saved_syn; + +struct request_sock { + struct sock_common __req_common; + struct request_sock *dl_next; + u16 mss; + u8 num_retrans; + u8 syncookie: 1; + u8 num_timeout: 7; + u32 ts_recent; + struct timer_list rsk_timer; + const struct request_sock_ops *rsk_ops; + struct sock *sk; + struct saved_syn *saved_syn; + u32 secid; + u32 peer_secid; + u32 timeout; +}; + +struct saved_syn { + u32 mac_hdrlen; + u32 network_hdrlen; + u32 tcp_hdrlen; + u8 data[0]; +}; + +enum tsq_enum { + TSQ_THROTTLED = 0, + TSQ_QUEUED = 1, + TCP_TSQ_DEFERRED = 2, + TCP_WRITE_TIMER_DEFERRED = 3, + TCP_DELACK_TIMER_DEFERRED = 4, + TCP_MTU_REDUCED_DEFERRED = 5, + TCP_ACK_DEFERRED = 6, +}; + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; + __u16 frag_max_size; + __u16 srhoff; +}; + +struct net_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, u32); + unsigned int no_policy: 1; + unsigned int icmp_strict_tag_validation: 1; + u32 secret; +}; + +struct inet6_protocol { + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + unsigned int flags; + u32 secret; +}; + +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; + u32 secret; +}; + +struct rps_sock_flow_table; + +struct net_hotdata { + struct packet_offload ip_packet_offload; + struct net_offload tcpv4_offload; + struct net_protocol tcp_protocol; + struct net_offload udpv4_offload; + struct net_protocol udp_protocol; + struct packet_offload ipv6_packet_offload; + struct net_offload tcpv6_offload; + struct inet6_protocol tcpv6_protocol; + struct inet6_protocol udpv6_protocol; + struct net_offload udpv6_offload; + struct list_head offload_base; + struct list_head ptype_all; + struct kmem_cache *skbuff_cache; + struct kmem_cache *skbuff_fclone_cache; + struct kmem_cache *skb_small_head_cache; + struct rps_sock_flow_table *rps_sock_flow_table; + u32 rps_cpu_mask; + int gro_normal_batch; + int netdev_budget; + int netdev_budget_usecs; + int tstamp_prequeue; + int max_backlog; + int dev_tx_weight; + int dev_rx_weight; + int sysctl_max_skb_frags; + int sysctl_skb_defer_max; + int sysctl_mem_pcpu_rsv; +}; + +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0 = 1, + MEM_TYPE_PAGE_POOL = 2, + MEM_TYPE_XSK_BUFF_POOL = 3, + MEM_TYPE_MAX = 4, +}; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; + unsigned int napi_id; + u32 frag_size; + long: 32; +}; + +struct xdp_txq_info { + struct net_device *dev; +}; + +enum xdp_buff_flags { + XDP_FLAGS_HAS_FRAGS = 1, + XDP_FLAGS_FRAGS_PF_MEMALLOC = 2, +}; + +struct xdp_frame_bulk { + int count; + void *xa; + void *q[16]; +}; + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + }; + struct rhash_head node; + struct callback_head rcu; +}; + +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +struct xdp_umem { + void *addrs; + long: 32; + u64 size; + u32 headroom; + u32 chunk_size; + u32 chunks; + u32 npgs; + struct user_struct *user; + refcount_t users; + u8 flags; + u8 tx_metadata_len; + bool zc; + struct page **pgs; + int id; + struct list_head xsk_dma_list; + struct work_struct work; + long: 32; +}; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + u8 cb[24]; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + struct list_head list_node; +}; + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + netdevice_tracker dev_tracker; + int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); + bool (*id_match)(struct packet_type *, struct sock *); + struct net *af_packet_net; + void *af_packet_priv; + struct list_head list; +}; + +struct llc_sap; + +struct datalink_proto { + unsigned char type[8]; + struct llc_sap *sap; + short unsigned int header_length; + int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + int (*request)(struct datalink_proto *, struct sk_buff *, const unsigned char *); + struct list_head node; +}; + +struct llc_addr { + unsigned char lsap; + unsigned char mac[6]; +}; + +struct llc_sap { + unsigned char state; + unsigned char p_bit; + unsigned char f_bit; + refcount_t refcnt; + int (*rcv_func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + struct llc_addr laddr; + struct list_head node; + spinlock_t sk_lock; + int sk_count; + struct hlist_nulls_head sk_laddr_hash[64]; + struct hlist_head sk_dev_hash[64]; + struct callback_head rcu; +}; + +enum net_device_flags { + IFF_UP = 1, + IFF_BROADCAST = 2, + IFF_DEBUG = 4, + IFF_LOOPBACK = 8, + IFF_POINTOPOINT = 16, + IFF_NOTRAILERS = 32, + IFF_RUNNING = 64, + IFF_NOARP = 128, + IFF_PROMISC = 256, + IFF_ALLMULTI = 512, + IFF_MASTER = 1024, + IFF_SLAVE = 2048, + IFF_MULTICAST = 4096, + IFF_PORTSEL = 8192, + IFF_AUTOMEDIA = 16384, + IFF_DYNAMIC = 32768, + IFF_LOWER_UP = 65536, + IFF_DORMANT = 131072, + IFF_ECHO = 262144, +}; + +enum { + NETIF_F_SG_BIT = 0, + NETIF_F_IP_CSUM_BIT = 1, + __UNUSED_NETIF_F_1 = 2, + NETIF_F_HW_CSUM_BIT = 3, + NETIF_F_IPV6_CSUM_BIT = 4, + NETIF_F_HIGHDMA_BIT = 5, + NETIF_F_FRAGLIST_BIT = 6, + NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, + NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, + NETIF_F_VLAN_CHALLENGED_BIT = 10, + NETIF_F_GSO_BIT = 11, + __UNUSED_NETIF_F_12 = 12, + __UNUSED_NETIF_F_13 = 13, + NETIF_F_GRO_BIT = 14, + NETIF_F_LRO_BIT = 15, + NETIF_F_GSO_SHIFT = 16, + NETIF_F_TSO_BIT = 16, + NETIF_F_GSO_ROBUST_BIT = 17, + NETIF_F_TSO_ECN_BIT = 18, + NETIF_F_TSO_MANGLEID_BIT = 19, + NETIF_F_TSO6_BIT = 20, + NETIF_F_FSO_BIT = 21, + NETIF_F_GSO_GRE_BIT = 22, + NETIF_F_GSO_GRE_CSUM_BIT = 23, + NETIF_F_GSO_IPXIP4_BIT = 24, + NETIF_F_GSO_IPXIP6_BIT = 25, + NETIF_F_GSO_UDP_TUNNEL_BIT = 26, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, + NETIF_F_GSO_PARTIAL_BIT = 28, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, + NETIF_F_GSO_SCTP_BIT = 30, + NETIF_F_GSO_ESP_BIT = 31, + NETIF_F_GSO_UDP_BIT = 32, + NETIF_F_GSO_UDP_L4_BIT = 33, + NETIF_F_GSO_FRAGLIST_BIT = 34, + NETIF_F_GSO_LAST = 34, + NETIF_F_FCOE_CRC_BIT = 35, + NETIF_F_SCTP_CRC_BIT = 36, + __UNUSED_NETIF_F_37 = 37, + NETIF_F_NTUPLE_BIT = 38, + NETIF_F_RXHASH_BIT = 39, + NETIF_F_RXCSUM_BIT = 40, + NETIF_F_NOCACHE_COPY_BIT = 41, + NETIF_F_LOOPBACK_BIT = 42, + NETIF_F_RXFCS_BIT = 43, + NETIF_F_RXALL_BIT = 44, + NETIF_F_HW_VLAN_STAG_TX_BIT = 45, + NETIF_F_HW_VLAN_STAG_RX_BIT = 46, + NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, + NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, + NETIF_F_HW_TC_BIT = 49, + NETIF_F_HW_ESP_BIT = 50, + NETIF_F_HW_ESP_TX_CSUM_BIT = 51, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, + NETIF_F_HW_TLS_TX_BIT = 53, + NETIF_F_HW_TLS_RX_BIT = 54, + NETIF_F_GRO_HW_BIT = 55, + NETIF_F_HW_TLS_RECORD_BIT = 56, + NETIF_F_GRO_FRAGLIST_BIT = 57, + NETIF_F_HW_MACSEC_BIT = 58, + NETIF_F_GRO_UDP_FWD_BIT = 59, + NETIF_F_HW_HSR_TAG_INS_BIT = 60, + NETIF_F_HW_HSR_TAG_RM_BIT = 61, + NETIF_F_HW_HSR_FWD_BIT = 62, + NETIF_F_HW_HSR_DUP_BIT = 63, + NETDEV_FEATURE_COUNT = 64, +}; + +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +}; + +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; + +enum tunable_id { + ETHTOOL_ID_UNSPEC = 0, + ETHTOOL_RX_COPYBREAK = 1, + ETHTOOL_TX_COPYBREAK = 2, + ETHTOOL_PFC_PREVENTION_TOUT = 3, + ETHTOOL_TX_COPYBREAK_BUF_SIZE = 4, + __ETHTOOL_TUNABLE_COUNT = 5, +}; + +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC = 0, + ETHTOOL_PHY_DOWNSHIFT = 1, + ETHTOOL_PHY_FAST_LINK_DOWN = 2, + ETHTOOL_PHY_EDPD = 3, + __ETHTOOL_PHY_TUNABLE_COUNT = 4, +}; + +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, + ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, + ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, + ETHTOOL_LINK_EXT_STATE_MODULE = 10, +}; + +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, +}; + +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, +}; + +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, +}; + +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST = 3, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS = 4, +}; + +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, +}; + +enum ethtool_link_ext_substate_module { + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, +}; + +enum ethtool_mac_stats_src { + ETHTOOL_MAC_STATS_SRC_AGGREGATE = 0, + ETHTOOL_MAC_STATS_SRC_EMAC = 1, + ETHTOOL_MAC_STATS_SRC_PMAC = 2, +}; + +enum ethtool_module_power_mode_policy { + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO = 2, +}; + +enum ethtool_module_power_mode { + ETHTOOL_MODULE_POWER_MODE_LOW = 1, + ETHTOOL_MODULE_POWER_MODE_HIGH = 2, +}; + +enum ethtool_mm_verify_status { + ETHTOOL_MM_VERIFY_STATUS_UNKNOWN = 0, + ETHTOOL_MM_VERIFY_STATUS_INITIAL = 1, + ETHTOOL_MM_VERIFY_STATUS_VERIFYING = 2, + ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED = 3, + ETHTOOL_MM_VERIFY_STATUS_FAILED = 4, + ETHTOOL_MM_VERIFY_STATUS_DISABLED = 5, +}; + +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; + +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + long: 32; + __u64 ring_cookie; + __u32 location; + long: 32; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; + long: 32; +}; + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_fecparam { + __u32 cmd; + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 rate_matching; + __u32 reserved[7]; +}; + +enum { + ETH_RSS_HASH_TOP_BIT = 0, + ETH_RSS_HASH_XOR_BIT = 1, + ETH_RSS_HASH_CRC32_BIT = 2, + ETH_RSS_HASH_FUNCS_COUNT = 3, +}; + +struct kernel_ethtool_ringparam { + u32 rx_buf_len; + u8 tcp_data_split; + u8 tx_push; + u8 rx_push; + u32 cqe_size; + u32 tx_push_buf_len; + u32 tx_push_buf_max_len; +}; + +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + enum ethtool_link_ext_substate_module module; + u32 __link_ext_substate; + }; +}; + +struct ethtool_link_ext_stats { + u64 link_down_events; +}; + +struct ethtool_rxfh_context { + u32 indir_size; + u32 key_size; + u16 priv_size; + u8 hfunc; + u8 input_xfrm; + u8 indir_configured: 1; + u8 key_configured: 1; + u32 key_off; + u8 data[0]; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + long unsigned int supported[4]; + long unsigned int advertising[4]; + long unsigned int lp_advertising[4]; + } link_modes; + u32 lanes; +}; + +struct ethtool_keee { + long unsigned int supported[4]; + long unsigned int advertised[4]; + long unsigned int lp_advertised[4]; + u32 tx_lpi_timer; + bool tx_lpi_enabled; + bool eee_active; + bool eee_enabled; +}; + +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; + u32 tx_aggr_max_bytes; + u32 tx_aggr_max_frames; + u32 tx_aggr_time_usecs; +}; + +struct ethtool_eth_mac_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + }; + struct { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; + } stats; + }; +}; + +struct ethtool_eth_phy_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 SymbolErrorDuringCarrier; + }; + struct { + u64 SymbolErrorDuringCarrier; + } stats; + }; +}; + +struct ethtool_eth_ctrl_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + }; + struct { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; + } stats; + }; +}; + +struct ethtool_pause_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + }; + struct { + u64 tx_pause_frames; + u64 rx_pause_frames; + } stats; + }; +}; + +struct ethtool_fec_stat { + u64 total; + u64 lanes[8]; +}; + +struct ethtool_fec_stats { + struct ethtool_fec_stat corrected_blocks; + struct ethtool_fec_stat uncorrectable_blocks; + struct ethtool_fec_stat corrected_bits; +}; + +struct ethtool_rmon_hist_range { + u16 low; + u16 high; +}; + +struct ethtool_rmon_stats { + enum ethtool_mac_stats_src src; + long: 32; + union { + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + }; + struct { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; + } stats; + }; +}; + +struct ethtool_ts_stats { + union { + struct { + u64 pkts; + u64 lost; + u64 err; + }; + struct { + u64 pkts; + u64 lost; + u64 err; + } tx_stats; + }; +}; + +struct ethtool_module_eeprom { + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 *data; +}; + +struct ethtool_module_power_mode_params { + enum ethtool_module_power_mode_policy policy; + enum ethtool_module_power_mode mode; +}; + +struct ethtool_mm_state { + u32 verify_time; + u32 max_verify_time; + enum ethtool_mm_verify_status verify_status; + bool tx_enabled; + bool tx_active; + bool pmac_enabled; + bool verify_enabled; + u32 tx_min_frag_size; + u32 rx_min_frag_size; +}; + +struct ethtool_mm_cfg { + u32 verify_time; + bool verify_enabled; + bool tx_enabled; + bool pmac_enabled; + u32 tx_min_frag_size; +}; + +struct ethtool_mm_stats { + u64 MACMergeFrameAssErrorCount; + u64 MACMergeFrameSmdErrorCount; + u64 MACMergeFrameAssOkCount; + u64 MACMergeFragCountRx; + u64 MACMergeFragCountTx; + u64 MACMergeHoldCount; +}; + +struct ethtool_rxfh_param { + u8 hfunc; + u32 indir_size; + u32 *indir; + u32 key_size; + u8 *key; + u32 rss_context; + u8 rss_delete; + u8 input_xfrm; +}; + +struct ethtool_netdev_state { + struct xarray rss_ctx; + struct mutex rss_lock; + unsigned int wol_enabled: 1; + unsigned int module_fw_flash_in_progress: 1; +}; + +enum { + ETHTOOL_MSG_USER_NONE = 0, + ETHTOOL_MSG_STRSET_GET = 1, + ETHTOOL_MSG_LINKINFO_GET = 2, + ETHTOOL_MSG_LINKINFO_SET = 3, + ETHTOOL_MSG_LINKMODES_GET = 4, + ETHTOOL_MSG_LINKMODES_SET = 5, + ETHTOOL_MSG_LINKSTATE_GET = 6, + ETHTOOL_MSG_DEBUG_GET = 7, + ETHTOOL_MSG_DEBUG_SET = 8, + ETHTOOL_MSG_WOL_GET = 9, + ETHTOOL_MSG_WOL_SET = 10, + ETHTOOL_MSG_FEATURES_GET = 11, + ETHTOOL_MSG_FEATURES_SET = 12, + ETHTOOL_MSG_PRIVFLAGS_GET = 13, + ETHTOOL_MSG_PRIVFLAGS_SET = 14, + ETHTOOL_MSG_RINGS_GET = 15, + ETHTOOL_MSG_RINGS_SET = 16, + ETHTOOL_MSG_CHANNELS_GET = 17, + ETHTOOL_MSG_CHANNELS_SET = 18, + ETHTOOL_MSG_COALESCE_GET = 19, + ETHTOOL_MSG_COALESCE_SET = 20, + ETHTOOL_MSG_PAUSE_GET = 21, + ETHTOOL_MSG_PAUSE_SET = 22, + ETHTOOL_MSG_EEE_GET = 23, + ETHTOOL_MSG_EEE_SET = 24, + ETHTOOL_MSG_TSINFO_GET = 25, + ETHTOOL_MSG_CABLE_TEST_ACT = 26, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27, + ETHTOOL_MSG_TUNNEL_INFO_GET = 28, + ETHTOOL_MSG_FEC_GET = 29, + ETHTOOL_MSG_FEC_SET = 30, + ETHTOOL_MSG_MODULE_EEPROM_GET = 31, + ETHTOOL_MSG_STATS_GET = 32, + ETHTOOL_MSG_PHC_VCLOCKS_GET = 33, + ETHTOOL_MSG_MODULE_GET = 34, + ETHTOOL_MSG_MODULE_SET = 35, + ETHTOOL_MSG_PSE_GET = 36, + ETHTOOL_MSG_PSE_SET = 37, + ETHTOOL_MSG_RSS_GET = 38, + ETHTOOL_MSG_PLCA_GET_CFG = 39, + ETHTOOL_MSG_PLCA_SET_CFG = 40, + ETHTOOL_MSG_PLCA_GET_STATUS = 41, + ETHTOOL_MSG_MM_GET = 42, + ETHTOOL_MSG_MM_SET = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 44, + ETHTOOL_MSG_PHY_GET = 45, + __ETHTOOL_MSG_USER_CNT = 46, + ETHTOOL_MSG_USER_MAX = 45, +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, + ETHTOOL_MSG_LINKINFO_NTF = 3, + ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, + ETHTOOL_MSG_LINKMODES_NTF = 5, + ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, + ETHTOOL_MSG_DEBUG_GET_REPLY = 7, + ETHTOOL_MSG_DEBUG_NTF = 8, + ETHTOOL_MSG_WOL_GET_REPLY = 9, + ETHTOOL_MSG_WOL_NTF = 10, + ETHTOOL_MSG_FEATURES_GET_REPLY = 11, + ETHTOOL_MSG_FEATURES_SET_REPLY = 12, + ETHTOOL_MSG_FEATURES_NTF = 13, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, + ETHTOOL_MSG_PRIVFLAGS_NTF = 15, + ETHTOOL_MSG_RINGS_GET_REPLY = 16, + ETHTOOL_MSG_RINGS_NTF = 17, + ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, + ETHTOOL_MSG_CHANNELS_NTF = 19, + ETHTOOL_MSG_COALESCE_GET_REPLY = 20, + ETHTOOL_MSG_COALESCE_NTF = 21, + ETHTOOL_MSG_PAUSE_GET_REPLY = 22, + ETHTOOL_MSG_PAUSE_NTF = 23, + ETHTOOL_MSG_EEE_GET_REPLY = 24, + ETHTOOL_MSG_EEE_NTF = 25, + ETHTOOL_MSG_TSINFO_GET_REPLY = 26, + ETHTOOL_MSG_CABLE_TEST_NTF = 27, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, + ETHTOOL_MSG_FEC_GET_REPLY = 30, + ETHTOOL_MSG_FEC_NTF = 31, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, + ETHTOOL_MSG_STATS_GET_REPLY = 33, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 34, + ETHTOOL_MSG_MODULE_GET_REPLY = 35, + ETHTOOL_MSG_MODULE_NTF = 36, + ETHTOOL_MSG_PSE_GET_REPLY = 37, + ETHTOOL_MSG_RSS_GET_REPLY = 38, + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 39, + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 40, + ETHTOOL_MSG_PLCA_NTF = 41, + ETHTOOL_MSG_MM_GET_REPLY = 42, + ETHTOOL_MSG_MM_NTF = 43, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 44, + ETHTOOL_MSG_PHY_GET_REPLY = 45, + ETHTOOL_MSG_PHY_NTF = 46, + __ETHTOOL_MSG_KERNEL_CNT = 47, + ETHTOOL_MSG_KERNEL_MAX = 46, +}; + +enum ethtool_header_flags { + ETHTOOL_FLAG_COMPACT_BITSETS = 1, + ETHTOOL_FLAG_OMIT_REPLY = 2, + ETHTOOL_FLAG_STATS = 4, +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC = 0, + ETHTOOL_A_HEADER_DEV_INDEX = 1, + ETHTOOL_A_HEADER_DEV_NAME = 2, + ETHTOOL_A_HEADER_FLAGS = 3, + ETHTOOL_A_HEADER_PHY_INDEX = 4, + __ETHTOOL_A_HEADER_CNT = 5, + ETHTOOL_A_HEADER_MAX = 4, +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC = 0, + ETHTOOL_A_STRSET_HEADER = 1, + ETHTOOL_A_STRSET_STRINGSETS = 2, + ETHTOOL_A_STRSET_COUNTS_ONLY = 3, + __ETHTOOL_A_STRSET_CNT = 4, + ETHTOOL_A_STRSET_MAX = 3, +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC = 0, + ETHTOOL_A_LINKINFO_HEADER = 1, + ETHTOOL_A_LINKINFO_PORT = 2, + ETHTOOL_A_LINKINFO_PHYADDR = 3, + ETHTOOL_A_LINKINFO_TP_MDIX = 4, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, + ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, + __ETHTOOL_A_LINKINFO_CNT = 7, + ETHTOOL_A_LINKINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC = 0, + ETHTOOL_A_LINKMODES_HEADER = 1, + ETHTOOL_A_LINKMODES_AUTONEG = 2, + ETHTOOL_A_LINKMODES_OURS = 3, + ETHTOOL_A_LINKMODES_PEER = 4, + ETHTOOL_A_LINKMODES_SPEED = 5, + ETHTOOL_A_LINKMODES_DUPLEX = 6, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, + ETHTOOL_A_LINKMODES_LANES = 9, + ETHTOOL_A_LINKMODES_RATE_MATCHING = 10, + __ETHTOOL_A_LINKMODES_CNT = 11, + ETHTOOL_A_LINKMODES_MAX = 10, +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC = 0, + ETHTOOL_A_LINKSTATE_HEADER = 1, + ETHTOOL_A_LINKSTATE_LINK = 2, + ETHTOOL_A_LINKSTATE_SQI = 3, + ETHTOOL_A_LINKSTATE_SQI_MAX = 4, + ETHTOOL_A_LINKSTATE_EXT_STATE = 5, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 7, + __ETHTOOL_A_LINKSTATE_CNT = 8, + ETHTOOL_A_LINKSTATE_MAX = 7, +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC = 0, + ETHTOOL_A_DEBUG_HEADER = 1, + ETHTOOL_A_DEBUG_MSGMASK = 2, + __ETHTOOL_A_DEBUG_CNT = 3, + ETHTOOL_A_DEBUG_MAX = 2, +}; + +enum { + ETHTOOL_A_WOL_UNSPEC = 0, + ETHTOOL_A_WOL_HEADER = 1, + ETHTOOL_A_WOL_MODES = 2, + ETHTOOL_A_WOL_SOPASS = 3, + __ETHTOOL_A_WOL_CNT = 4, + ETHTOOL_A_WOL_MAX = 3, +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC = 0, + ETHTOOL_A_FEATURES_HEADER = 1, + ETHTOOL_A_FEATURES_HW = 2, + ETHTOOL_A_FEATURES_WANTED = 3, + ETHTOOL_A_FEATURES_ACTIVE = 4, + ETHTOOL_A_FEATURES_NOCHANGE = 5, + __ETHTOOL_A_FEATURES_CNT = 6, + ETHTOOL_A_FEATURES_MAX = 5, +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, + ETHTOOL_A_PRIVFLAGS_HEADER = 1, + ETHTOOL_A_PRIVFLAGS_FLAGS = 2, + __ETHTOOL_A_PRIVFLAGS_CNT = 3, + ETHTOOL_A_PRIVFLAGS_MAX = 2, +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC = 0, + ETHTOOL_A_RINGS_HEADER = 1, + ETHTOOL_A_RINGS_RX_MAX = 2, + ETHTOOL_A_RINGS_RX_MINI_MAX = 3, + ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, + ETHTOOL_A_RINGS_TX_MAX = 5, + ETHTOOL_A_RINGS_RX = 6, + ETHTOOL_A_RINGS_RX_MINI = 7, + ETHTOOL_A_RINGS_RX_JUMBO = 8, + ETHTOOL_A_RINGS_TX = 9, + ETHTOOL_A_RINGS_RX_BUF_LEN = 10, + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 11, + ETHTOOL_A_RINGS_CQE_SIZE = 12, + ETHTOOL_A_RINGS_TX_PUSH = 13, + ETHTOOL_A_RINGS_RX_PUSH = 14, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 15, + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 16, + __ETHTOOL_A_RINGS_CNT = 17, + ETHTOOL_A_RINGS_MAX = 16, +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC = 0, + ETHTOOL_A_CHANNELS_HEADER = 1, + ETHTOOL_A_CHANNELS_RX_MAX = 2, + ETHTOOL_A_CHANNELS_TX_MAX = 3, + ETHTOOL_A_CHANNELS_OTHER_MAX = 4, + ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, + ETHTOOL_A_CHANNELS_RX_COUNT = 6, + ETHTOOL_A_CHANNELS_TX_COUNT = 7, + ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, + ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, + __ETHTOOL_A_CHANNELS_CNT = 10, + ETHTOOL_A_CHANNELS_MAX = 9, +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC = 0, + ETHTOOL_A_COALESCE_HEADER = 1, + ETHTOOL_A_COALESCE_RX_USECS = 2, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, + ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, + ETHTOOL_A_COALESCE_TX_USECS = 6, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, + ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, + ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, + ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, + ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, + ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, + ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 24, + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 25, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES = 26, + ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES = 27, + ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS = 28, + ETHTOOL_A_COALESCE_RX_PROFILE = 29, + ETHTOOL_A_COALESCE_TX_PROFILE = 30, + __ETHTOOL_A_COALESCE_CNT = 31, + ETHTOOL_A_COALESCE_MAX = 30, +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC = 0, + ETHTOOL_A_PAUSE_HEADER = 1, + ETHTOOL_A_PAUSE_AUTONEG = 2, + ETHTOOL_A_PAUSE_RX = 3, + ETHTOOL_A_PAUSE_TX = 4, + ETHTOOL_A_PAUSE_STATS = 5, + ETHTOOL_A_PAUSE_STATS_SRC = 6, + __ETHTOOL_A_PAUSE_CNT = 7, + ETHTOOL_A_PAUSE_MAX = 6, +}; + +enum { + ETHTOOL_A_EEE_UNSPEC = 0, + ETHTOOL_A_EEE_HEADER = 1, + ETHTOOL_A_EEE_MODES_OURS = 2, + ETHTOOL_A_EEE_MODES_PEER = 3, + ETHTOOL_A_EEE_ACTIVE = 4, + ETHTOOL_A_EEE_ENABLED = 5, + ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, + ETHTOOL_A_EEE_TX_LPI_TIMER = 7, + __ETHTOOL_A_EEE_CNT = 8, + ETHTOOL_A_EEE_MAX = 7, +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC = 0, + ETHTOOL_A_TSINFO_HEADER = 1, + ETHTOOL_A_TSINFO_TIMESTAMPING = 2, + ETHTOOL_A_TSINFO_TX_TYPES = 3, + ETHTOOL_A_TSINFO_RX_FILTERS = 4, + ETHTOOL_A_TSINFO_PHC_INDEX = 5, + ETHTOOL_A_TSINFO_STATS = 6, + __ETHTOOL_A_TSINFO_CNT = 7, + ETHTOOL_A_TSINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC = 0, + ETHTOOL_A_PHC_VCLOCKS_HEADER = 1, + ETHTOOL_A_PHC_VCLOCKS_NUM = 2, + ETHTOOL_A_PHC_VCLOCKS_INDEX = 3, + __ETHTOOL_A_PHC_VCLOCKS_CNT = 4, + ETHTOOL_A_PHC_VCLOCKS_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_HEADER = 1, + __ETHTOOL_A_CABLE_TEST_CNT = 2, + ETHTOOL_A_CABLE_TEST_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, + __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, + ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, + ETHTOOL_A_TUNNEL_INFO_HEADER = 1, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, + __ETHTOOL_A_TUNNEL_INFO_CNT = 3, + ETHTOOL_A_TUNNEL_INFO_MAX = 2, +}; + +enum { + ETHTOOL_A_FEC_UNSPEC = 0, + ETHTOOL_A_FEC_HEADER = 1, + ETHTOOL_A_FEC_MODES = 2, + ETHTOOL_A_FEC_AUTO = 3, + ETHTOOL_A_FEC_ACTIVE = 4, + ETHTOOL_A_FEC_STATS = 5, + __ETHTOOL_A_FEC_CNT = 6, + ETHTOOL_A_FEC_MAX = 5, +}; + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, + ETHTOOL_A_MODULE_EEPROM_HEADER = 1, + ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, + ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, + ETHTOOL_A_MODULE_EEPROM_PAGE = 4, + ETHTOOL_A_MODULE_EEPROM_BANK = 5, + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, + ETHTOOL_A_MODULE_EEPROM_DATA = 7, + __ETHTOOL_A_MODULE_EEPROM_CNT = 8, + ETHTOOL_A_MODULE_EEPROM_MAX = 7, +}; + +enum { + ETHTOOL_A_STATS_UNSPEC = 0, + ETHTOOL_A_STATS_PAD = 1, + ETHTOOL_A_STATS_HEADER = 2, + ETHTOOL_A_STATS_GROUPS = 3, + ETHTOOL_A_STATS_GRP = 4, + ETHTOOL_A_STATS_SRC = 5, + __ETHTOOL_A_STATS_CNT = 6, + ETHTOOL_A_STATS_MAX = 5, +}; + +enum { + ETHTOOL_STATS_ETH_PHY = 0, + ETHTOOL_STATS_ETH_MAC = 1, + ETHTOOL_STATS_ETH_CTRL = 2, + ETHTOOL_STATS_RMON = 3, + __ETHTOOL_STATS_CNT = 4, +}; + +enum { + ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, + __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, + ETHTOOL_A_STATS_ETH_PHY_MAX = 0, +}; + +enum { + ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, + ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, + ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, + ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, + ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, + ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, + ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, + ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, + ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, + ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, + ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, + ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, + ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, + ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, + ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, + ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, + ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, + ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, + ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, + ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, + ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, + ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, + __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, + ETHTOOL_A_STATS_ETH_MAC_MAX = 21, +}; + +enum { + ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, + ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, + ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, + __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, + ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, +}; + +enum { + ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, + ETHTOOL_A_STATS_RMON_OVERSIZE = 1, + ETHTOOL_A_STATS_RMON_FRAG = 2, + ETHTOOL_A_STATS_RMON_JABBER = 3, + __ETHTOOL_A_STATS_RMON_CNT = 4, + ETHTOOL_A_STATS_RMON_MAX = 3, +}; + +enum { + ETHTOOL_A_MODULE_UNSPEC = 0, + ETHTOOL_A_MODULE_HEADER = 1, + ETHTOOL_A_MODULE_POWER_MODE_POLICY = 2, + ETHTOOL_A_MODULE_POWER_MODE = 3, + __ETHTOOL_A_MODULE_CNT = 4, + ETHTOOL_A_MODULE_MAX = 3, +}; + +enum { + ETHTOOL_A_PSE_UNSPEC = 0, + ETHTOOL_A_PSE_HEADER = 1, + ETHTOOL_A_PODL_PSE_ADMIN_STATE = 2, + ETHTOOL_A_PODL_PSE_ADMIN_CONTROL = 3, + ETHTOOL_A_PODL_PSE_PW_D_STATUS = 4, + ETHTOOL_A_C33_PSE_ADMIN_STATE = 5, + ETHTOOL_A_C33_PSE_ADMIN_CONTROL = 6, + ETHTOOL_A_C33_PSE_PW_D_STATUS = 7, + ETHTOOL_A_C33_PSE_PW_CLASS = 8, + ETHTOOL_A_C33_PSE_ACTUAL_PW = 9, + ETHTOOL_A_C33_PSE_EXT_STATE = 10, + ETHTOOL_A_C33_PSE_EXT_SUBSTATE = 11, + ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT = 12, + ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES = 13, + __ETHTOOL_A_PSE_CNT = 14, + ETHTOOL_A_PSE_MAX = 13, +}; + +enum { + ETHTOOL_A_RSS_UNSPEC = 0, + ETHTOOL_A_RSS_HEADER = 1, + ETHTOOL_A_RSS_CONTEXT = 2, + ETHTOOL_A_RSS_HFUNC = 3, + ETHTOOL_A_RSS_INDIR = 4, + ETHTOOL_A_RSS_HKEY = 5, + ETHTOOL_A_RSS_INPUT_XFRM = 6, + ETHTOOL_A_RSS_START_CONTEXT = 7, + __ETHTOOL_A_RSS_CNT = 8, + ETHTOOL_A_RSS_MAX = 7, +}; + +enum { + ETHTOOL_A_PLCA_UNSPEC = 0, + ETHTOOL_A_PLCA_HEADER = 1, + ETHTOOL_A_PLCA_VERSION = 2, + ETHTOOL_A_PLCA_ENABLED = 3, + ETHTOOL_A_PLCA_STATUS = 4, + ETHTOOL_A_PLCA_NODE_CNT = 5, + ETHTOOL_A_PLCA_NODE_ID = 6, + ETHTOOL_A_PLCA_TO_TMR = 7, + ETHTOOL_A_PLCA_BURST_CNT = 8, + ETHTOOL_A_PLCA_BURST_TMR = 9, + __ETHTOOL_A_PLCA_CNT = 10, + ETHTOOL_A_PLCA_MAX = 9, +}; + +enum { + ETHTOOL_A_MM_UNSPEC = 0, + ETHTOOL_A_MM_HEADER = 1, + ETHTOOL_A_MM_PMAC_ENABLED = 2, + ETHTOOL_A_MM_TX_ENABLED = 3, + ETHTOOL_A_MM_TX_ACTIVE = 4, + ETHTOOL_A_MM_TX_MIN_FRAG_SIZE = 5, + ETHTOOL_A_MM_RX_MIN_FRAG_SIZE = 6, + ETHTOOL_A_MM_VERIFY_ENABLED = 7, + ETHTOOL_A_MM_VERIFY_STATUS = 8, + ETHTOOL_A_MM_VERIFY_TIME = 9, + ETHTOOL_A_MM_MAX_VERIFY_TIME = 10, + ETHTOOL_A_MM_STATS = 11, + __ETHTOOL_A_MM_CNT = 12, + ETHTOOL_A_MM_MAX = 11, +}; + +enum { + ETHTOOL_A_MODULE_FW_FLASH_UNSPEC = 0, + ETHTOOL_A_MODULE_FW_FLASH_HEADER = 1, + ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME = 2, + ETHTOOL_A_MODULE_FW_FLASH_PASSWORD = 3, + ETHTOOL_A_MODULE_FW_FLASH_STATUS = 4, + ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG = 5, + ETHTOOL_A_MODULE_FW_FLASH_DONE = 6, + ETHTOOL_A_MODULE_FW_FLASH_TOTAL = 7, + __ETHTOOL_A_MODULE_FW_FLASH_CNT = 8, + ETHTOOL_A_MODULE_FW_FLASH_MAX = 7, +}; + +enum { + ETHTOOL_A_PHY_UNSPEC = 0, + ETHTOOL_A_PHY_HEADER = 1, + ETHTOOL_A_PHY_INDEX = 2, + ETHTOOL_A_PHY_DRVNAME = 3, + ETHTOOL_A_PHY_NAME = 4, + ETHTOOL_A_PHY_UPSTREAM_TYPE = 5, + ETHTOOL_A_PHY_UPSTREAM_INDEX = 6, + ETHTOOL_A_PHY_UPSTREAM_SFP_NAME = 7, + ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME = 8, + __ETHTOOL_A_PHY_CNT = 9, + ETHTOOL_A_PHY_MAX = 8, +}; + +struct ethnl_req_info { + struct net_device *dev; + netdevice_tracker dev_tracker; + u32 flags; + u32 phy_index; +}; + +struct ethnl_reply_data { + struct net_device *dev; +}; + +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + u8 set_ntf_cmd; + int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); + int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, const struct genl_info *); + int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); + int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); + void (*cleanup_data)(struct ethnl_reply_data *); + int (*set_validate)(struct ethnl_req_info *, struct genl_info *); + int (*set)(struct ethnl_req_info *, struct genl_info *); +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + struct ethtool_link_ext_stats link_stats; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; + long: 32; +}; + +typedef __u64 __be64; + +typedef u16 u_int16_t; + +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + int flowic_l3mdev; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + __u32 flowic_secid; + kuid_t flowic_uid; + __u32 flowic_multipath_hash; + struct flowi_tunnel flowic_tun_key; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + struct { + __u8 type; + __u8 code; + } icmpt; + __be32 gre_key; + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; + __be32 saddr; + __be32 daddr; + union flowi_uli uli; + long: 32; +}; + +struct flowi6 { + struct flowi_common __fl_common; + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; + __u32 mp_hash; + long: 32; +}; + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + } u; +}; + +enum ip_conntrack_info { + IP_CT_ESTABLISHED = 0, + IP_CT_RELATED = 1, + IP_CT_NEW = 2, + IP_CT_IS_REPLY = 3, + IP_CT_ESTABLISHED_REPLY = 3, + IP_CT_RELATED_REPLY = 4, + IP_CT_NUMBER = 5, + IP_CT_UNTRACKED = 7, +}; + +struct nf_conntrack { + refcount_t use; +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS = 0, + NF_NETDEV_EGRESS = 1, + NF_NETDEV_NUMHOOKS = 2, +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[7]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + +struct inet_ehash_bucket; + +struct inet_bind_hashbucket; + +struct inet_listen_hashbucket; + +struct inet_hashinfo { + struct inet_ehash_bucket *ehash; + spinlock_t *ehash_locks; + unsigned int ehash_mask; + unsigned int ehash_locks_mask; + struct kmem_cache *bind_bucket_cachep; + struct inet_bind_hashbucket *bhash; + struct kmem_cache *bind2_bucket_cachep; + struct inet_bind_hashbucket *bhash2; + unsigned int bhash_size; + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; + bool pernet; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ipv4_devconf { + void *sysctl; + int data[33]; + long unsigned int state[2]; +}; + +struct ip_ra_chain { + struct ip_ra_chain *next; + struct sock *sk; + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct callback_head rcu; +}; + +struct inet_peer_base { + struct rb_root rb_root; + seqlock_t lock; + int total; +}; + +struct tcp_fastopen_context { + siphash_key_t key[2]; + int num; + struct callback_head rcu; + long: 32; +}; + +struct nf_hook_state; + +typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + struct nf_hook_entry hooks[0]; +}; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; + long: 32; +}; + +struct bpf_storage_buffer; + +struct bpf_cgroup_storage_map; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list_map; + struct list_head list_cg; + struct rb_node node; + struct callback_head rcu; + long: 32; +}; + +struct ipv6_devstat { + struct proc_dir_entry *proc_dir_entry; + struct ipstats_mib *ipv6; + struct icmpv6_mib_device *icmpv6dev; + struct icmpv6msg_mib_device *icmpv6msgdev; +}; + +struct ifmcaddr6; + +struct ifacaddr6; + +struct inet6_dev { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head addr_list; + struct ifmcaddr6 *mc_list; + struct ifmcaddr6 *mc_tomb; + unsigned char mc_qrv; + unsigned char mc_gq_running; + unsigned char mc_ifc_count; + unsigned char mc_dad_count; + long unsigned int mc_v1_seen; + long unsigned int mc_qi; + long unsigned int mc_qri; + long unsigned int mc_maxdelay; + struct delayed_work mc_gq_work; + struct delayed_work mc_ifc_work; + struct delayed_work mc_dad_work; + struct delayed_work mc_query_work; + struct delayed_work mc_report_work; + struct sk_buff_head mc_query_queue; + struct sk_buff_head mc_report_queue; + spinlock_t mc_query_lock; + spinlock_t mc_report_lock; + struct mutex mc_lock; + struct ifacaddr6 *ac_list; + rwlock_t lock; + refcount_t refcnt; + __u32 if_flags; + int dead; + u32 desync_factor; + struct list_head tempaddr_list; + struct in6_addr token; + struct neigh_parms *nd_parms; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; + struct timer_list rs_timer; + __s32 rs_interval; + __u8 rs_probes; + long unsigned int tstamp; + struct callback_head rcu; + unsigned int ra_mtu; +}; + +struct in_ifaddr; + +struct ip_mc_list; + +struct in_device { + struct net_device *dev; + netdevice_tracker dev_tracker; + refcount_t refcnt; + int dead; + struct in_ifaddr *ifa_list; + struct ip_mc_list *mc_list; + struct ip_mc_list **mc_hash; + int mc_count; + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + long unsigned int mr_v1_seen; + long unsigned int mr_v2_seen; + long unsigned int mr_maxdelay; + long unsigned int mr_qi; + long unsigned int mr_qri; + unsigned char mr_qrv; + unsigned char mr_gq_running; + u32 mr_ifc_count; + struct timer_list mr_gq_timer; + struct timer_list mr_ifc_timer; + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct callback_head callback_head; +}; + +struct nf_hook_state { + u8 hook; + u8 pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +enum nf_hook_ops_type { + NF_HOOK_OP_UNDEFINED = 0, + NF_HOOK_OP_NF_TABLES = 1, + NF_HOOK_OP_BPF = 2, +}; + +struct nf_hook_ops { + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u8 pf; + enum nf_hook_ops_type hook_ops_type: 8; + unsigned int hooknum; + int priority; +}; + +struct nf_hook_entries_rcu_head { + struct callback_head head; + void *allocation; +}; + +enum nf_nat_manip_type { + NF_NAT_MANIP_SRC = 0, + NF_NAT_MANIP_DST = 1, +}; + +struct nf_conn; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); + void (*decode_session)(struct sk_buff *, struct flowi *); + void (*remove_nat_bysrc)(struct nf_conn *); +}; + +struct in_addr { + __be32 s_addr; +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +union nf_conntrack_man_proto { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; + +struct nf_conntrack_man { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + u_int16_t l3num; +}; + +struct nf_conntrack_tuple { + struct nf_conntrack_man src; + struct { + union nf_inet_addr u3; + union { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + u_int8_t type; + u_int8_t code; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; + } u; + u_int8_t protonum; + struct {} __nfct_hash_offsetend; + u_int8_t dir; + } dst; +}; + +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; + struct nf_conntrack_tuple tuple; +}; + +typedef u32 u_int32_t; + +typedef u64 u_int64_t; + +struct nf_ct_dccp { + u_int8_t role[2]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE = 0, + SCTP_CONNTRACK_CLOSED = 1, + SCTP_CONNTRACK_COOKIE_WAIT = 2, + SCTP_CONNTRACK_COOKIE_ECHOED = 3, + SCTP_CONNTRACK_ESTABLISHED = 4, + SCTP_CONNTRACK_SHUTDOWN_SENT = 5, + SCTP_CONNTRACK_SHUTDOWN_RECD = 6, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, + SCTP_CONNTRACK_HEARTBEAT_SENT = 8, + SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, + SCTP_CONNTRACK_MAX = 10, +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + __be32 vtag[2]; + u8 init[2]; + u8 last_dir; + u8 flags; +}; + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + u_int8_t last_wscale; + u_int8_t last_flags; +}; + +struct nf_ct_udp { + long unsigned int stream_ts; +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +union nf_conntrack_proto { + struct nf_ct_dccp dccp; + struct ip_ct_sctp sctp; + struct ip_ct_tcp tcp; + struct nf_ct_udp udp; + struct nf_ct_gre gre; + unsigned int tmpl_padto; +}; + +struct nf_ct_ext; + +struct nf_conn { + struct nf_conntrack ct_general; + spinlock_t lock; + u32 timeout; + struct nf_conntrack_tuple_hash tuplehash[2]; + long unsigned int status; + possible_net_t ct_net; + struct hlist_node nat_bysource; + struct {} __nfct_init_offset; + struct nf_conn *master; + u_int32_t mark; + struct nf_ct_ext *ext; + union nf_conntrack_proto proto; +}; + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL = 0, + IP_CT_DIR_REPLY = 1, + IP_CT_DIR_MAX = 2, +}; + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +struct nf_ct_hook { + int (*update)(struct net *, struct sk_buff *); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); + void (*attach)(struct sk_buff *, const struct sk_buff *); + void (*set_closing)(struct nf_conntrack *); + int (*confirm)(struct sk_buff *); +}; + +struct nfnl_ct_hook { + size_t (*build_size)(const struct nf_conn *); + int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t); + int (*parse)(const struct nlattr *, struct nf_conn *); + int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); + void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); +}; + +struct nf_defrag_hook { + struct module *owner; + int (*enable)(struct net *); + void (*disable)(struct net *); +}; + +struct lwtunnel_state { + __u16 type; + __u16 flags; + __u16 headroom; + atomic_t refcnt; + int (*orig_output)(struct net *, struct sock *, struct sk_buff *); + int (*orig_input)(struct sk_buff *); + struct callback_head rcu; + __u8 data[0]; +}; + +struct sock_reuseport { + struct callback_head rcu; + u16 max_socks; + u16 num_socks; + u16 num_closed_socks; + u16 incoming_cpu; + unsigned int synq_overflow_ts; + unsigned int reuseport_id; + unsigned int bind_inany: 1; + unsigned int has_conns: 1; + struct bpf_prog *prog; + struct sock *socks[0]; +}; + +enum { + IPV4_DEVCONF_FORWARDING = 1, + IPV4_DEVCONF_MC_FORWARDING = 2, + IPV4_DEVCONF_PROXY_ARP = 3, + IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, + IPV4_DEVCONF_SECURE_REDIRECTS = 5, + IPV4_DEVCONF_SEND_REDIRECTS = 6, + IPV4_DEVCONF_SHARED_MEDIA = 7, + IPV4_DEVCONF_RP_FILTER = 8, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, + IPV4_DEVCONF_BOOTP_RELAY = 10, + IPV4_DEVCONF_LOG_MARTIANS = 11, + IPV4_DEVCONF_TAG = 12, + IPV4_DEVCONF_ARPFILTER = 13, + IPV4_DEVCONF_MEDIUM_ID = 14, + IPV4_DEVCONF_NOXFRM = 15, + IPV4_DEVCONF_NOPOLICY = 16, + IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, + IPV4_DEVCONF_ARP_ANNOUNCE = 18, + IPV4_DEVCONF_ARP_IGNORE = 19, + IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, + IPV4_DEVCONF_ARP_ACCEPT = 21, + IPV4_DEVCONF_ARP_NOTIFY = 22, + IPV4_DEVCONF_ACCEPT_LOCAL = 23, + IPV4_DEVCONF_SRC_VMARK = 24, + IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, + IPV4_DEVCONF_ROUTE_LOCALNET = 26, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, + IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, + IPV4_DEVCONF_BC_FORWARDING = 32, + IPV4_DEVCONF_ARP_EVICT_NOCARRIER = 33, + __IPV4_DEVCONF_MAX = 34, +}; + +struct ip6_sf_list { + struct ip6_sf_list *sf_next; + struct in6_addr sf_addr; + long unsigned int sf_count[2]; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; + struct callback_head rcu; +}; + +struct ifmcaddr6 { + struct in6_addr mca_addr; + struct inet6_dev *idev; + struct ifmcaddr6 *next; + struct ip6_sf_list *mca_sources; + struct ip6_sf_list *mca_tomb; + unsigned int mca_sfmode; + unsigned char mca_crcount; + long unsigned int mca_sfcount[2]; + struct delayed_work mca_work; + unsigned int mca_flags; + int mca_users; + refcount_t mca_refcnt; + long unsigned int mca_cstamp; + long unsigned int mca_tstamp; + struct callback_head rcu; +}; + +struct ifacaddr6 { + struct in6_addr aca_addr; + struct fib6_info *aca_rt; + struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; + int aca_users; + refcount_t aca_refcnt; + long unsigned int aca_cstamp; + long unsigned int aca_tstamp; + struct callback_head rcu; +}; + +struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +}; + +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[15]; + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; +}; + +struct prefix_info { + __u8 type; + __u8 length; + __u8 prefix_len; + union { + __u8 flags; + struct { + __u8 onlink: 1; + __u8 autoconf: 1; + __u8 routeraddr: 1; + __u8 preferpd: 1; + __u8 reserved: 4; + }; + }; + __be32 valid; + __be32 prefered; + __be32 reserved2; + struct in6_addr prefix; +}; + +struct inet_ehash_bucket { + struct hlist_nulls_head chain; +}; + +struct inet_bind_hashbucket { + spinlock_t lock; + struct hlist_head chain; +}; + +struct inet_listen_hashbucket { + spinlock_t lock; + struct hlist_nulls_head nulls_head; +}; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[0]; +}; + +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; + u32 in_flight; +}; + +struct rate_sample { + u64 prior_mstamp; + u32 prior_delivered; + u32 prior_delivered_ce; + s32 delivered; + s32 delivered_ce; + long int interval_us; + u32 snd_interval_us; + u32 rcv_interval_us; + long int rtt_us; + int losses; + u32 acked_sacked; + u32 prior_in_flight; + u32 last_end_seq; + bool is_app_limited; + bool is_retrans; + bool is_ack_delayed; + long: 32; +}; + +struct nf_queue_entry; + +struct nf_ipv6_ops { + void (*route_input)(struct sk_buff *); + int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); +}; + +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + unsigned int hook_index; + struct net_device *physin; + struct net_device *physout; + struct nf_hook_state state; + u16 size; +}; + +struct in_ifaddr { + struct hlist_node addr_lst; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct callback_head callback_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + unsigned char ifa_proto; + __u32 ifa_flags; + char ifa_label[16]; + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + long unsigned int ifa_cstamp; + long unsigned int ifa_tstamp; +}; + +struct __va_list_tag { + unsigned char gpr; + unsigned char fpr; + short unsigned int reserved; + void *overflow_arg_area; + void *reg_save_area; +}; + +typedef __builtin_va_list va_list; + +enum work_flags { + WORK_STRUCT_PENDING = 1, + WORK_STRUCT_INACTIVE = 2, + WORK_STRUCT_PWQ = 4, + WORK_STRUCT_LINKED = 8, + WORK_STRUCT_STATIC = 0, +}; + +struct rhltable { + struct rhashtable ht; +}; + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id: 12; + u16 vlan_dei: 1; + u16 vlan_priority: 3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; + __be16 vlan_eth_type; + u16 padding; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +struct flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +struct flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +struct flow_dissector_key_eth_addrs { + unsigned char dst[6]; + unsigned char src[6]; +}; + +struct flow_dissector_key_tcp { + __be16 flags; +}; + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; + u8 l2_miss; +}; + +struct flow_dissector { + long long unsigned int used_keys; + short unsigned int offset[33]; + long: 32; +}; + +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + struct netlink_ext_ack *extack; + void *data; + struct module *module; + u32 min_dump_alloc; + int flags; +}; + +enum nft_registers { + NFT_REG_VERDICT = 0, + NFT_REG_1 = 1, + NFT_REG_2 = 2, + NFT_REG_3 = 3, + NFT_REG_4 = 4, + __NFT_REG_MAX = 5, + NFT_REG32_00 = 8, + NFT_REG32_01 = 9, + NFT_REG32_02 = 10, + NFT_REG32_03 = 11, + NFT_REG32_04 = 12, + NFT_REG32_05 = 13, + NFT_REG32_06 = 14, + NFT_REG32_07 = 15, + NFT_REG32_08 = 16, + NFT_REG32_09 = 17, + NFT_REG32_10 = 18, + NFT_REG32_11 = 19, + NFT_REG32_12 = 20, + NFT_REG32_13 = 21, + NFT_REG32_14 = 22, + NFT_REG32_15 = 23, +}; + +enum nft_verdicts { + NFT_CONTINUE = -1, + NFT_BREAK = -2, + NFT_JUMP = -3, + NFT_GOTO = -4, + NFT_RETURN = -5, +}; + +enum nf_tables_msg_types { + NFT_MSG_NEWTABLE = 0, + NFT_MSG_GETTABLE = 1, + NFT_MSG_DELTABLE = 2, + NFT_MSG_NEWCHAIN = 3, + NFT_MSG_GETCHAIN = 4, + NFT_MSG_DELCHAIN = 5, + NFT_MSG_NEWRULE = 6, + NFT_MSG_GETRULE = 7, + NFT_MSG_DELRULE = 8, + NFT_MSG_NEWSET = 9, + NFT_MSG_GETSET = 10, + NFT_MSG_DELSET = 11, + NFT_MSG_NEWSETELEM = 12, + NFT_MSG_GETSETELEM = 13, + NFT_MSG_DELSETELEM = 14, + NFT_MSG_NEWGEN = 15, + NFT_MSG_GETGEN = 16, + NFT_MSG_TRACE = 17, + NFT_MSG_NEWOBJ = 18, + NFT_MSG_GETOBJ = 19, + NFT_MSG_DELOBJ = 20, + NFT_MSG_GETOBJ_RESET = 21, + NFT_MSG_NEWFLOWTABLE = 22, + NFT_MSG_GETFLOWTABLE = 23, + NFT_MSG_DELFLOWTABLE = 24, + NFT_MSG_GETRULE_RESET = 25, + NFT_MSG_DESTROYTABLE = 26, + NFT_MSG_DESTROYCHAIN = 27, + NFT_MSG_DESTROYRULE = 28, + NFT_MSG_DESTROYSET = 29, + NFT_MSG_DESTROYSETELEM = 30, + NFT_MSG_DESTROYOBJ = 31, + NFT_MSG_DESTROYFLOWTABLE = 32, + NFT_MSG_GETSETELEM_RESET = 33, + NFT_MSG_MAX = 34, +}; + +enum nft_list_attributes { + NFTA_LIST_UNSPEC = 0, + NFTA_LIST_ELEM = 1, + __NFTA_LIST_MAX = 2, +}; + +enum nft_hook_attributes { + NFTA_HOOK_UNSPEC = 0, + NFTA_HOOK_HOOKNUM = 1, + NFTA_HOOK_PRIORITY = 2, + NFTA_HOOK_DEV = 3, + NFTA_HOOK_DEVS = 4, + __NFTA_HOOK_MAX = 5, +}; + +enum nft_table_flags { + NFT_TABLE_F_DORMANT = 1, + NFT_TABLE_F_OWNER = 2, + NFT_TABLE_F_PERSIST = 4, +}; + +enum nft_table_attributes { + NFTA_TABLE_UNSPEC = 0, + NFTA_TABLE_NAME = 1, + NFTA_TABLE_FLAGS = 2, + NFTA_TABLE_USE = 3, + NFTA_TABLE_HANDLE = 4, + NFTA_TABLE_PAD = 5, + NFTA_TABLE_USERDATA = 6, + NFTA_TABLE_OWNER = 7, + __NFTA_TABLE_MAX = 8, +}; + +enum nft_chain_flags { + NFT_CHAIN_BASE = 1, + NFT_CHAIN_HW_OFFLOAD = 2, + NFT_CHAIN_BINDING = 4, +}; + +enum nft_chain_attributes { + NFTA_CHAIN_UNSPEC = 0, + NFTA_CHAIN_TABLE = 1, + NFTA_CHAIN_HANDLE = 2, + NFTA_CHAIN_NAME = 3, + NFTA_CHAIN_HOOK = 4, + NFTA_CHAIN_POLICY = 5, + NFTA_CHAIN_USE = 6, + NFTA_CHAIN_TYPE = 7, + NFTA_CHAIN_COUNTERS = 8, + NFTA_CHAIN_PAD = 9, + NFTA_CHAIN_FLAGS = 10, + NFTA_CHAIN_ID = 11, + NFTA_CHAIN_USERDATA = 12, + __NFTA_CHAIN_MAX = 13, +}; + +enum nft_rule_attributes { + NFTA_RULE_UNSPEC = 0, + NFTA_RULE_TABLE = 1, + NFTA_RULE_CHAIN = 2, + NFTA_RULE_HANDLE = 3, + NFTA_RULE_EXPRESSIONS = 4, + NFTA_RULE_COMPAT = 5, + NFTA_RULE_POSITION = 6, + NFTA_RULE_USERDATA = 7, + NFTA_RULE_PAD = 8, + NFTA_RULE_ID = 9, + NFTA_RULE_POSITION_ID = 10, + NFTA_RULE_CHAIN_ID = 11, + __NFTA_RULE_MAX = 12, +}; + +enum nft_set_flags { + NFT_SET_ANONYMOUS = 1, + NFT_SET_CONSTANT = 2, + NFT_SET_INTERVAL = 4, + NFT_SET_MAP = 8, + NFT_SET_TIMEOUT = 16, + NFT_SET_EVAL = 32, + NFT_SET_OBJECT = 64, + NFT_SET_CONCAT = 128, + NFT_SET_EXPR = 256, +}; + +enum nft_set_policies { + NFT_SET_POL_PERFORMANCE = 0, + NFT_SET_POL_MEMORY = 1, +}; + +enum nft_set_desc_attributes { + NFTA_SET_DESC_UNSPEC = 0, + NFTA_SET_DESC_SIZE = 1, + NFTA_SET_DESC_CONCAT = 2, + __NFTA_SET_DESC_MAX = 3, +}; + +enum nft_set_field_attributes { + NFTA_SET_FIELD_UNSPEC = 0, + NFTA_SET_FIELD_LEN = 1, + __NFTA_SET_FIELD_MAX = 2, +}; + +enum nft_set_attributes { + NFTA_SET_UNSPEC = 0, + NFTA_SET_TABLE = 1, + NFTA_SET_NAME = 2, + NFTA_SET_FLAGS = 3, + NFTA_SET_KEY_TYPE = 4, + NFTA_SET_KEY_LEN = 5, + NFTA_SET_DATA_TYPE = 6, + NFTA_SET_DATA_LEN = 7, + NFTA_SET_POLICY = 8, + NFTA_SET_DESC = 9, + NFTA_SET_ID = 10, + NFTA_SET_TIMEOUT = 11, + NFTA_SET_GC_INTERVAL = 12, + NFTA_SET_USERDATA = 13, + NFTA_SET_PAD = 14, + NFTA_SET_OBJ_TYPE = 15, + NFTA_SET_HANDLE = 16, + NFTA_SET_EXPR = 17, + NFTA_SET_EXPRESSIONS = 18, + __NFTA_SET_MAX = 19, +}; + +enum nft_set_elem_flags { + NFT_SET_ELEM_INTERVAL_END = 1, + NFT_SET_ELEM_CATCHALL = 2, +}; + +enum nft_set_elem_attributes { + NFTA_SET_ELEM_UNSPEC = 0, + NFTA_SET_ELEM_KEY = 1, + NFTA_SET_ELEM_DATA = 2, + NFTA_SET_ELEM_FLAGS = 3, + NFTA_SET_ELEM_TIMEOUT = 4, + NFTA_SET_ELEM_EXPIRATION = 5, + NFTA_SET_ELEM_USERDATA = 6, + NFTA_SET_ELEM_EXPR = 7, + NFTA_SET_ELEM_PAD = 8, + NFTA_SET_ELEM_OBJREF = 9, + NFTA_SET_ELEM_KEY_END = 10, + NFTA_SET_ELEM_EXPRESSIONS = 11, + __NFTA_SET_ELEM_MAX = 12, +}; + +enum nft_set_elem_list_attributes { + NFTA_SET_ELEM_LIST_UNSPEC = 0, + NFTA_SET_ELEM_LIST_TABLE = 1, + NFTA_SET_ELEM_LIST_SET = 2, + NFTA_SET_ELEM_LIST_ELEMENTS = 3, + NFTA_SET_ELEM_LIST_SET_ID = 4, + __NFTA_SET_ELEM_LIST_MAX = 5, +}; + +enum nft_data_types { + NFT_DATA_VALUE = 0, + NFT_DATA_VERDICT = 4294967040, +}; + +enum nft_data_attributes { + NFTA_DATA_UNSPEC = 0, + NFTA_DATA_VALUE = 1, + NFTA_DATA_VERDICT = 2, + __NFTA_DATA_MAX = 3, +}; + +enum nft_verdict_attributes { + NFTA_VERDICT_UNSPEC = 0, + NFTA_VERDICT_CODE = 1, + NFTA_VERDICT_CHAIN = 2, + NFTA_VERDICT_CHAIN_ID = 3, + __NFTA_VERDICT_MAX = 4, +}; + +enum nft_expr_attributes { + NFTA_EXPR_UNSPEC = 0, + NFTA_EXPR_NAME = 1, + NFTA_EXPR_DATA = 2, + __NFTA_EXPR_MAX = 3, +}; + +enum nft_counter_attributes { + NFTA_COUNTER_UNSPEC = 0, + NFTA_COUNTER_BYTES = 1, + NFTA_COUNTER_PACKETS = 2, + NFTA_COUNTER_PAD = 3, + __NFTA_COUNTER_MAX = 4, +}; + +enum nft_gen_attributes { + NFTA_GEN_UNSPEC = 0, + NFTA_GEN_ID = 1, + NFTA_GEN_PROC_PID = 2, + NFTA_GEN_PROC_NAME = 3, + __NFTA_GEN_MAX = 4, +}; + +enum nft_object_attributes { + NFTA_OBJ_UNSPEC = 0, + NFTA_OBJ_TABLE = 1, + NFTA_OBJ_NAME = 2, + NFTA_OBJ_TYPE = 3, + NFTA_OBJ_DATA = 4, + NFTA_OBJ_USE = 5, + NFTA_OBJ_HANDLE = 6, + NFTA_OBJ_PAD = 7, + NFTA_OBJ_USERDATA = 8, + __NFTA_OBJ_MAX = 9, +}; + +enum nft_flowtable_flags { + NFT_FLOWTABLE_HW_OFFLOAD = 1, + NFT_FLOWTABLE_COUNTER = 2, + NFT_FLOWTABLE_MASK = 3, +}; + +enum nft_flowtable_attributes { + NFTA_FLOWTABLE_UNSPEC = 0, + NFTA_FLOWTABLE_TABLE = 1, + NFTA_FLOWTABLE_NAME = 2, + NFTA_FLOWTABLE_HOOK = 3, + NFTA_FLOWTABLE_USE = 4, + NFTA_FLOWTABLE_HANDLE = 5, + NFTA_FLOWTABLE_PAD = 6, + NFTA_FLOWTABLE_FLAGS = 7, + __NFTA_FLOWTABLE_MAX = 8, +}; + +enum nft_flowtable_hook_attributes { + NFTA_FLOWTABLE_HOOK_UNSPEC = 0, + NFTA_FLOWTABLE_HOOK_NUM = 1, + NFTA_FLOWTABLE_HOOK_PRIORITY = 2, + NFTA_FLOWTABLE_HOOK_DEVS = 3, + __NFTA_FLOWTABLE_HOOK_MAX = 4, +}; + +enum nft_devices_attributes { + NFTA_DEVICE_UNSPEC = 0, + NFTA_DEVICE_NAME = 1, + __NFTA_DEVICE_MAX = 2, +}; + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER = 0, + AUDIT_XT_OP_REPLACE = 1, + AUDIT_XT_OP_UNREGISTER = 2, + AUDIT_NFT_OP_TABLE_REGISTER = 3, + AUDIT_NFT_OP_TABLE_UNREGISTER = 4, + AUDIT_NFT_OP_CHAIN_REGISTER = 5, + AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, + AUDIT_NFT_OP_RULE_REGISTER = 7, + AUDIT_NFT_OP_RULE_UNREGISTER = 8, + AUDIT_NFT_OP_SET_REGISTER = 9, + AUDIT_NFT_OP_SET_UNREGISTER = 10, + AUDIT_NFT_OP_SETELEM_REGISTER = 11, + AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, + AUDIT_NFT_OP_GEN_REGISTER = 13, + AUDIT_NFT_OP_OBJ_REGISTER = 14, + AUDIT_NFT_OP_OBJ_UNREGISTER = 15, + AUDIT_NFT_OP_OBJ_RESET = 16, + AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, + AUDIT_NFT_OP_SETELEM_RESET = 19, + AUDIT_NFT_OP_RULE_RESET = 20, + AUDIT_NFT_OP_INVALID = 21, +}; + +struct net_generic { + union { + struct { + unsigned int len; + struct callback_head rcu; + } s; + struct { + struct {} __empty_ptr; + void *ptr[0]; + }; + }; +}; + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = 1, + NL_VALIDATE_MAXTYPE = 2, + NL_VALIDATE_UNSPEC = 4, + NL_VALIDATE_STRICT_ATTRS = 8, + NL_VALIDATE_NESTED = 16, +}; + +enum nfnetlink_groups { + NFNLGRP_NONE = 0, + NFNLGRP_CONNTRACK_NEW = 1, + NFNLGRP_CONNTRACK_UPDATE = 2, + NFNLGRP_CONNTRACK_DESTROY = 3, + NFNLGRP_CONNTRACK_EXP_NEW = 4, + NFNLGRP_CONNTRACK_EXP_UPDATE = 5, + NFNLGRP_CONNTRACK_EXP_DESTROY = 6, + NFNLGRP_NFTABLES = 7, + NFNLGRP_ACCT_QUOTA = 8, + NFNLGRP_NFTRACE = 9, + __NFNLGRP_MAX = 10, +}; + +struct nfgenmsg { + __u8 nfgen_family; + __u8 version; + __be16 res_id; +}; + +struct nfnl_info { + struct net *net; + struct sock *sk; + const struct nlmsghdr *nlh; + const struct nfgenmsg *nfmsg; + struct netlink_ext_ack *extack; +}; + +enum nfnl_callback_type { + NFNL_CB_UNSPEC = 0, + NFNL_CB_MUTEX = 1, + NFNL_CB_RCU = 2, + NFNL_CB_BATCH = 3, +}; + +struct nfnl_callback { + int (*call)(struct sk_buff *, const struct nfnl_info *, const struct nlattr * const *); + const struct nla_policy *policy; + enum nfnl_callback_type type; + __u16 attr_count; +}; + +enum nfnl_abort_action { + NFNL_ABORT_NONE = 0, + NFNL_ABORT_AUTOLOAD = 1, + NFNL_ABORT_VALIDATE = 2, +}; + +struct nfnetlink_subsystem { + const char *name; + __u8 subsys_id; + __u8 cb_count; + const struct nfnl_callback *cb; + struct module *owner; + int (*commit)(struct net *, struct sk_buff *); + int (*abort)(struct net *, struct sk_buff *, enum nfnl_abort_action); + bool (*valid_genid)(struct net *, u32); +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP = 1, + FLOW_ACTION_TRAP = 2, + FLOW_ACTION_GOTO = 3, + FLOW_ACTION_REDIRECT = 4, + FLOW_ACTION_MIRRED = 5, + FLOW_ACTION_REDIRECT_INGRESS = 6, + FLOW_ACTION_MIRRED_INGRESS = 7, + FLOW_ACTION_VLAN_PUSH = 8, + FLOW_ACTION_VLAN_POP = 9, + FLOW_ACTION_VLAN_MANGLE = 10, + FLOW_ACTION_TUNNEL_ENCAP = 11, + FLOW_ACTION_TUNNEL_DECAP = 12, + FLOW_ACTION_MANGLE = 13, + FLOW_ACTION_ADD = 14, + FLOW_ACTION_CSUM = 15, + FLOW_ACTION_MARK = 16, + FLOW_ACTION_PTYPE = 17, + FLOW_ACTION_PRIORITY = 18, + FLOW_ACTION_RX_QUEUE_MAPPING = 19, + FLOW_ACTION_WAKE = 20, + FLOW_ACTION_QUEUE = 21, + FLOW_ACTION_SAMPLE = 22, + FLOW_ACTION_POLICE = 23, + FLOW_ACTION_CT = 24, + FLOW_ACTION_CT_METADATA = 25, + FLOW_ACTION_MPLS_PUSH = 26, + FLOW_ACTION_MPLS_POP = 27, + FLOW_ACTION_MPLS_MANGLE = 28, + FLOW_ACTION_GATE = 29, + FLOW_ACTION_PPPOE_PUSH = 30, + FLOW_ACTION_JUMP = 31, + FLOW_ACTION_PIPE = 32, + FLOW_ACTION_VLAN_PUSH_ETH = 33, + FLOW_ACTION_VLAN_POP_ETH = 34, + FLOW_ACTION_CONTINUE = 35, + NUM_FLOW_ACTIONS = 36, +}; + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, + FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, + FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, + FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, + FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = 1, + FLOW_ACTION_HW_STATS_DELAYED = 2, + FLOW_ACTION_HW_STATS_ANY = 3, + FLOW_ACTION_HW_STATS_DISABLED = 4, + FLOW_ACTION_HW_STATS_DONT_CARE = 7, +}; + +typedef void (*action_destr)(void *); + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[0]; +}; + +struct psample_group; + +struct nf_flowtable_type; + +struct nf_flowtable { + unsigned int flags; + int priority; + struct rhashtable rhashtable; + struct list_head list; + const struct nf_flowtable_type *type; + struct delayed_work gc_work; + struct flow_block flow_block; + struct rw_semaphore flow_block_lock; + possible_net_t net; +}; + +struct action_gate_entry; + +struct ip_tunnel_info; + +struct flow_action_entry { + enum flow_action_id id; + u32 hw_index; + long unsigned int cookie; + long: 32; + u64 miss_cookie; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + long: 32; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + unsigned char dst[6]; + unsigned char src[6]; + } vlan_push_eth; + struct { + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u16 rx_queue; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + u32 burst; + long: 32; + u64 rate_bytes_ps; + u64 peakrate_bytes_ps; + u32 avrate; + u16 overhead; + u64 burst_pkt; + u64 rate_pkt_ps; + u32 mtu; + struct { + enum flow_action_id act_id; + u32 extval; + } exceed; + struct { + enum flow_action_id act_id; + u32 extval; + } notexceed; + long: 32; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + long unsigned int cookie; + u32 mark; + u32 labels[4]; + bool orig_dir; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + s32 prio; + long: 32; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + struct { + u16 sid; + } pppoe; + }; + struct flow_action_cookie *user_cookie; + long: 32; +}; + +struct flow_action { + unsigned int num_entries; + long: 32; + struct flow_action_entry entries[0]; +}; + +struct flow_rule { + struct flow_match match; + long: 32; + struct flow_action action; +}; + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 drops; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +enum flow_block_command { + FLOW_BLOCK_BIND = 0, + FLOW_BLOCK_UNBIND = 1, +}; + +enum l2tp_debug_flags { + L2TP_MSG_DEBUG = 1, + L2TP_MSG_CONTROL = 2, + L2TP_MSG_SEQ = 4, + L2TP_MSG_DATA = 8, +}; + +struct nf_flow_key { + struct flow_dissector_key_meta meta; + struct flow_dissector_key_control control; + struct flow_dissector_key_control enc_control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_keyid enc_key_id; + union { + struct flow_dissector_key_ipv4_addrs enc_ipv4; + struct flow_dissector_key_ipv6_addrs enc_ipv6; + }; + struct flow_dissector_key_tcp tcp; + struct flow_dissector_key_ports tp; +}; + +struct nf_flow_match { + struct flow_dissector dissector; + struct nf_flow_key key; + struct nf_flow_key mask; +}; + +struct nf_flow_rule { + struct nf_flow_match match; + struct flow_rule *rule; + long: 32; +}; + +enum flow_offload_tuple_dir { + FLOW_OFFLOAD_DIR_ORIGINAL = 0, + FLOW_OFFLOAD_DIR_REPLY = 1, +}; + +struct flow_offload; + +struct nf_flowtable_type { + struct list_head list; + int family; + int (*init)(struct nf_flowtable *); + bool (*gc)(const struct flow_offload *); + int (*setup)(struct nf_flowtable *, struct net_device *, enum flow_block_command); + int (*action)(struct net *, struct flow_offload *, enum flow_offload_tuple_dir, struct nf_flow_rule *); + void (*free)(struct nf_flowtable *); + void (*get)(struct nf_flowtable *); + void (*put)(struct nf_flowtable *); + nf_hookfn *hook; + struct module *owner; +}; + +struct flow_offload_tuple { + union { + struct in_addr src_v4; + struct in6_addr src_v6; + }; + union { + struct in_addr dst_v4; + struct in6_addr dst_v6; + }; + struct { + __be16 src_port; + __be16 dst_port; + }; + int iifidx; + u8 l3proto; + u8 l4proto; + struct { + u16 id; + __be16 proto; + } encap[2]; + struct {} __hash; + u8 dir: 2; + u8 xmit_type: 3; + u8 encap_num: 2; + char: 1; + u8 in_vlan_ingress: 2; + u16 mtu; + union { + struct { + struct dst_entry *dst_cache; + u32 dst_cookie; + }; + struct { + u32 ifidx; + u32 hw_ifidx; + u8 h_source[6]; + u8 h_dest[6]; + } out; + struct { + u32 iifidx; + } tc; + }; +}; + +struct flow_offload_tuple_rhash { + struct rhash_head node; + struct flow_offload_tuple tuple; +}; + +struct flow_offload { + struct flow_offload_tuple_rhash tuplehash[2]; + struct nf_conn *ct; + long unsigned int flags; + u16 type; + u32 timeout; + struct callback_head callback_head; +}; + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = -2147483648, + NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP_PRI_CONNTRACK_DEFRAG = -400, + NF_IP_PRI_RAW = -300, + NF_IP_PRI_SELINUX_FIRST = -225, + NF_IP_PRI_CONNTRACK = -200, + NF_IP_PRI_MANGLE = -150, + NF_IP_PRI_NAT_DST = -100, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_HELPER = 300, + NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, + NF_IP_PRI_LAST = 2147483647, +}; + +struct nft_pktinfo { + struct sk_buff *skb; + const struct nf_hook_state *state; + u8 flags; + u8 tprot; + u16 fragoff; + u16 thoff; + u16 inneroff; +}; + +struct nft_chain; + +struct nft_verdict { + u32 code; + struct nft_chain *chain; +}; + +struct nft_rule_blob; + +struct nft_table; + +struct nft_chain { + struct nft_rule_blob *blob_gen_0; + struct nft_rule_blob *blob_gen_1; + struct list_head rules; + struct list_head list; + struct rhlist_head rhlhead; + struct nft_table *table; + long: 32; + u64 handle; + u32 use; + u8 flags: 5; + u8 bound: 1; + u8 genmask: 2; + char *name; + u16 udlen; + u8 *udata; + struct callback_head callback_head; + struct nft_rule_blob *blob_next; +}; + +struct nft_data { + union { + u32 data[4]; + struct nft_verdict verdict; + }; +}; + +struct nft_regs { + union { + u32 data[20]; + struct nft_verdict verdict; + }; +}; + +struct nft_expr_ops; + +struct nft_expr { + const struct nft_expr_ops *ops; + long: 32; + unsigned char data[0]; +}; + +struct nft_regs_track { + struct { + const struct nft_expr *selector; + const struct nft_expr *bitwise; + u8 num_reg; + } regs[20]; + const struct nft_expr *cur; + const struct nft_expr *last; +}; + +struct nft_ctx { + struct net *net; + struct nft_table *table; + struct nft_chain *chain; + const struct nlattr * const *nla; + u32 portid; + u32 seq; + u16 flags; + u8 family; + u8 level; + bool report; + long unsigned int reg_inited[1]; +}; + +struct nft_table { + struct list_head list; + struct rhltable chains_ht; + struct list_head chains; + struct list_head sets; + struct list_head objects; + struct list_head flowtables; + possible_net_t net; + long: 32; + u64 hgenerator; + u64 handle; + u32 use; + u16 family: 6; + u16 flags: 8; + u16 genmask: 2; + u32 nlpid; + char *name; + u16 udlen; + u8 *udata; + u8 validate_state; + long: 32; +}; + +enum nft_data_desc_flags { + NFT_DATA_DESC_SETELEM = 1, +}; + +struct nft_data_desc { + enum nft_data_types type; + unsigned int size; + unsigned int len; + unsigned int flags; +}; + +struct nft_userdata { + u8 len; + unsigned char data[0]; +}; + +struct nft_elem_priv {}; + +struct nft_set_elem { + union { + u32 buf[16]; + struct nft_data val; + } key; + union { + u32 buf[16]; + struct nft_data val; + } key_end; + union { + u32 buf[16]; + struct nft_data val; + } data; + struct nft_elem_priv *priv; + long: 32; +}; + +enum nft_iter_type { + NFT_ITER_UNSPEC = 0, + NFT_ITER_READ = 1, + NFT_ITER_UPDATE = 2, +}; + +struct nft_set; + +struct nft_set_iter { + u8 genmask; + enum nft_iter_type type: 8; + unsigned int count; + unsigned int skip; + int err; + int (*fn)(const struct nft_ctx *, struct nft_set *, const struct nft_set_iter *, struct nft_elem_priv *); +}; + +struct nft_set_ops; + +struct nft_set { + struct list_head list; + struct list_head bindings; + refcount_t refs; + struct nft_table *table; + possible_net_t net; + char *name; + u64 handle; + u32 ktype; + u32 dtype; + u32 objtype; + u32 size; + u8 field_len[16]; + u8 field_count; + u32 use; + atomic_t nelems; + u32 ndeact; + u64 timeout; + u32 gc_int; + u16 policy; + u16 udlen; + unsigned char *udata; + struct list_head pending_update; + long: 32; + long: 32; + long: 32; + const struct nft_set_ops *ops; + u16 flags: 13; + u16 dead: 1; + u16 genmask: 2; + u8 klen; + u8 dlen; + u8 num_exprs; + struct nft_expr *exprs[2]; + struct list_head catchall_list; + long: 32; + unsigned char data[0]; +}; + +struct nft_set_desc { + u32 ktype; + unsigned int klen; + u32 dtype; + unsigned int dlen; + u32 objtype; + unsigned int size; + u32 policy; + u32 gc_int; + u64 timeout; + u8 field_len[16]; + u8 field_count; + bool expr; + long: 32; +}; + +enum nft_set_class { + NFT_SET_CLASS_O_1 = 0, + NFT_SET_CLASS_O_LOG_N = 1, + NFT_SET_CLASS_O_N = 2, +}; + +struct nft_set_estimate { + u64 size; + enum nft_set_class lookup; + enum nft_set_class space; +}; + +enum nft_trans_phase { + NFT_TRANS_PREPARE = 0, + NFT_TRANS_PREPARE_ERROR = 1, + NFT_TRANS_ABORT = 2, + NFT_TRANS_COMMIT = 3, + NFT_TRANS_RELEASE = 4, +}; + +struct nft_offload_ctx; + +struct nft_flow_rule; + +struct nft_expr_type; + +struct nft_expr_ops { + void (*eval)(const struct nft_expr *, struct nft_regs *, const struct nft_pktinfo *); + int (*clone)(struct nft_expr *, const struct nft_expr *, gfp_t); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nft_expr *, const struct nlattr * const *); + void (*activate)(const struct nft_ctx *, const struct nft_expr *); + void (*deactivate)(const struct nft_ctx *, const struct nft_expr *, enum nft_trans_phase); + void (*destroy)(const struct nft_ctx *, const struct nft_expr *); + void (*destroy_clone)(const struct nft_ctx *, const struct nft_expr *); + int (*dump)(struct sk_buff *, const struct nft_expr *, bool); + int (*validate)(const struct nft_ctx *, const struct nft_expr *); + bool (*reduce)(struct nft_regs_track *, const struct nft_expr *); + bool (*gc)(struct net *, const struct nft_expr *); + int (*offload)(struct nft_offload_ctx *, struct nft_flow_rule *, const struct nft_expr *); + bool (*offload_action)(const struct nft_expr *); + void (*offload_stats)(struct nft_expr *, const struct flow_stats *); + const struct nft_expr_type *type; + void *data; +}; + +struct nft_set_ext; + +struct nft_set_ops { + bool (*lookup)(const struct net *, const struct nft_set *, const u32 *, const struct nft_set_ext **); + bool (*update)(struct nft_set *, const u32 *, struct nft_elem_priv * (*)(struct nft_set *, const struct nft_expr *, struct nft_regs *), const struct nft_expr *, struct nft_regs *, const struct nft_set_ext **); + bool (*delete)(const struct nft_set *, const u32 *); + int (*insert)(const struct net *, const struct nft_set *, const struct nft_set_elem *, struct nft_elem_priv **); + void (*activate)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + struct nft_elem_priv * (*deactivate)(const struct net *, const struct nft_set *, const struct nft_set_elem *); + void (*flush)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*remove)(const struct net *, const struct nft_set *, struct nft_elem_priv *); + void (*walk)(const struct nft_ctx *, struct nft_set *, struct nft_set_iter *); + struct nft_elem_priv * (*get)(const struct net *, const struct nft_set *, const struct nft_set_elem *, unsigned int); + void (*commit)(struct nft_set *); + void (*abort)(const struct nft_set *); + u64 (*privsize)(const struct nlattr * const *, const struct nft_set_desc *); + bool (*estimate)(const struct nft_set_desc *, u32, struct nft_set_estimate *); + int (*init)(const struct nft_set *, const struct nft_set_desc *, const struct nlattr * const *); + void (*destroy)(const struct nft_ctx *, const struct nft_set *); + void (*gc_init)(const struct nft_set *); + unsigned int elemsize; +}; + +struct nft_set_ext { + u8 genmask; + u8 offset[8]; + char data[0]; +}; + +struct nft_set_type { + const struct nft_set_ops ops; + u32 features; +}; + +struct nft_set_elem_expr { + u8 size; + long: 32; + unsigned char data[0]; +}; + +struct nft_set_binding { + struct list_head list; + const struct nft_chain *chain; + u32 flags; +}; + +enum nft_set_extensions { + NFT_SET_EXT_KEY = 0, + NFT_SET_EXT_KEY_END = 1, + NFT_SET_EXT_DATA = 2, + NFT_SET_EXT_FLAGS = 3, + NFT_SET_EXT_TIMEOUT = 4, + NFT_SET_EXT_USERDATA = 5, + NFT_SET_EXT_EXPRESSIONS = 6, + NFT_SET_EXT_OBJREF = 7, + NFT_SET_EXT_NUM = 8, +}; + +struct nft_set_ext_type { + u8 len; + u8 align; +}; + +struct nft_set_ext_tmpl { + u16 len; + u8 offset[8]; + u8 ext_len[8]; +}; + +struct nft_timeout { + u64 timeout; + u64 expiration; +}; + +struct nft_expr_type { + const struct nft_expr_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + void (*release_ops)(const struct nft_expr_ops *); + const struct nft_expr_ops *ops; + const struct nft_expr_ops *inner_ops; + struct list_head list; + const char *name; + struct module *owner; + const struct nla_policy *policy; + unsigned int maxattr; + u8 family; + u8 flags; +}; + +enum nft_offload_dep_type { + NFT_OFFLOAD_DEP_UNSPEC = 0, + NFT_OFFLOAD_DEP_NETWORK = 1, + NFT_OFFLOAD_DEP_TRANSPORT = 2, +}; + +struct nft_offload_reg { + u32 key; + u32 len; + u32 base_offset; + u32 offset; + u32 flags; + long: 32; + struct nft_data data; + struct nft_data mask; +}; + +struct nft_offload_ctx { + struct { + enum nft_offload_dep_type type; + __be16 l3num; + u8 protonum; + } dep; + unsigned int num_actions; + struct net *net; + struct nft_offload_reg regs[24]; +}; + +struct nft_flow_key { + struct flow_dissector_key_basic basic; + struct flow_dissector_key_control control; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_eth_addrs eth_addrs; + struct flow_dissector_key_meta meta; +}; + +struct nft_flow_match { + struct flow_dissector dissector; + struct nft_flow_key key; + struct nft_flow_key mask; +}; + +struct nft_flow_rule { + __be16 proto; + long: 32; + struct nft_flow_match match; + struct flow_rule *rule; + long: 32; +}; + +struct nft_rule { + struct list_head list; + u64 handle: 42; + u64 genmask: 2; + u64 dlen: 12; + u64 udata: 1; + unsigned char data[0]; +}; + +struct nft_rule_dp { + u64 is_last: 1; + u64 dlen: 12; + u64 handle: 42; + long: 0; + unsigned char data[0]; +}; + +struct nft_rule_dp_last { + struct nft_rule_dp end; + struct callback_head h; + struct nft_rule_blob *blob; + const struct nft_chain *chain; +}; + +struct nft_rule_blob { + long unsigned int size; + long: 32; + unsigned char data[0]; +}; + +enum nft_chain_types { + NFT_CHAIN_T_DEFAULT = 0, + NFT_CHAIN_T_ROUTE = 1, + NFT_CHAIN_T_NAT = 2, + NFT_CHAIN_T_MAX = 3, +}; + +struct nft_chain_type { + const char *name; + enum nft_chain_types type; + int family; + struct module *owner; + unsigned int hook_mask; + nf_hookfn *hooks[6]; + int (*ops_register)(struct net *, const struct nf_hook_ops *); + void (*ops_unregister)(struct net *, const struct nf_hook_ops *); +}; + +struct nft_stats { + u64 bytes; + u64 pkts; + struct u64_stats_sync syncp; + long: 32; +}; + +struct nft_hook { + struct list_head list; + struct nf_hook_ops ops; + struct callback_head rcu; +}; + +struct nft_base_chain { + struct nf_hook_ops ops; + struct list_head hook_list; + const struct nft_chain_type *type; + u8 policy; + u8 flags; + struct nft_stats *stats; + long: 32; + struct nft_chain chain; + struct flow_block flow_block; +}; + +struct nft_object_hash_key { + const char *name; + const struct nft_table *table; +}; + +struct nft_object_ops; + +struct nft_object { + struct list_head list; + struct rhlist_head rhlhead; + struct nft_object_hash_key key; + u32 genmask: 2; + u32 use; + u64 handle; + u16 udlen; + u8 *udata; + long: 32; + long: 32; + long: 32; + long: 32; + const struct nft_object_ops *ops; + long: 32; + unsigned char data[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_object_type; + +struct nft_object_ops { + void (*eval)(struct nft_object *, struct nft_regs *, const struct nft_pktinfo *); + unsigned int size; + int (*init)(const struct nft_ctx *, const struct nlattr * const *, struct nft_object *); + void (*destroy)(const struct nft_ctx *, struct nft_object *); + int (*dump)(struct sk_buff *, struct nft_object *, bool); + void (*update)(struct nft_object *, struct nft_object *); + const struct nft_object_type *type; +}; + +struct nft_object_type { + const struct nft_object_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); + const struct nft_object_ops *ops; + struct list_head list; + u32 type; + unsigned int maxattr; + u8 family; + struct module *owner; + const struct nla_policy *policy; +}; + +struct nft_flowtable { + struct list_head list; + struct nft_table *table; + char *name; + int hooknum; + int ops_len; + u32 genmask: 2; + u32 use; + u64 handle; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct list_head hook_list; + struct nf_flowtable data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct nft_trans { + struct list_head list; + struct net *net; + struct nft_table *table; + int msg_type; + u32 seq; + u16 flags; + u8 report: 1; + u8 put_net: 1; +}; + +struct nft_trans_binding { + struct nft_trans nft_trans; + struct list_head binding_list; +}; + +struct nft_trans_rule { + struct nft_trans nft_trans; + struct nft_rule *rule; + struct nft_chain *chain; + struct nft_flow_rule *flow; + u32 rule_id; + bool bound; +}; + +struct nft_trans_set { + struct nft_trans_binding nft_trans_binding; + struct list_head list_trans_newset; + struct nft_set *set; + u32 set_id; + u32 gc_int; + u64 timeout; + bool update; + bool bound; + u32 size; +}; + +struct nft_trans_chain { + struct nft_trans_binding nft_trans_binding; + struct nft_chain *chain; + char *name; + struct nft_stats *stats; + u8 policy; + bool update; + bool bound; + u32 chain_id; + struct nft_base_chain *basechain; + struct list_head hook_list; +}; + +struct nft_trans_table { + struct nft_trans nft_trans; + bool update; +}; + +enum nft_trans_elem_flags { + NFT_TRANS_UPD_TIMEOUT = 1, + NFT_TRANS_UPD_EXPIRATION = 2, +}; + +struct nft_elem_update { + u64 timeout; + u64 expiration; + u8 flags; + long: 32; +}; + +struct nft_trans_one_elem { + struct nft_elem_priv *priv; + struct nft_elem_update *update; +}; + +struct nft_trans_elem { + struct nft_trans nft_trans; + struct nft_set *set; + bool bound; + unsigned int nelems; + struct nft_trans_one_elem elems[0]; +}; + +struct nft_trans_obj { + struct nft_trans nft_trans; + struct nft_object *obj; + struct nft_object *newobj; + bool update; +}; + +struct nft_trans_flowtable { + struct nft_trans nft_trans; + struct nft_flowtable *flowtable; + struct list_head hook_list; + u32 flags; + bool update; +}; + +struct nft_trans_gc { + struct list_head list; + struct net *net; + struct nft_set *set; + u32 seq; + u16 count; + struct nft_elem_priv *priv[256]; + struct callback_head rcu; +}; + +struct nftables_pernet { + struct list_head tables; + struct list_head commit_list; + struct list_head commit_set_list; + struct list_head binding_list; + struct list_head module_list; + struct list_head notify_list; + struct mutex commit_mutex; + long: 32; + u64 table_handle; + u64 tstamp; + unsigned int base_seq; + unsigned int gc_seq; + u8 validate_state; + long: 32; +}; + +enum { + NFT_VALIDATE_SKIP = 0, + NFT_VALIDATE_NEED = 1, + NFT_VALIDATE_DO = 2, +}; + +struct nft_audit_data { + struct nft_table *table; + int entries; + int op; + struct list_head list; +}; + +struct nft_set_elem_catchall { + struct list_head list; + struct callback_head rcu; + struct nft_elem_priv *elem; +}; + +struct nft_module_request { + struct list_head list; + char module[60]; + bool done; +}; + +struct nftnl_skb_parms { + bool report; +}; + +struct nft_chain_hook { + u32 num; + s32 priority; + const struct nft_chain_type *type; + struct list_head list; +}; + +struct nft_expr_info { + const struct nft_expr_ops *ops; + const struct nlattr *attr; + struct nlattr *tb[17]; +}; + +struct nft_rule_dump_ctx { + unsigned int s_idx; + char *table; + char *chain; + bool reset; +}; + +struct nft_set_dump_args { + const struct netlink_callback *cb; + struct nft_set_iter iter; + struct sk_buff *skb; + bool reset; +}; + +struct nft_set_dump_ctx { + const struct nft_set *set; + struct nft_ctx ctx; + bool reset; +}; + +struct nft_obj_dump_ctx { + unsigned int s_idx; + char *table; + u32 type; + bool reset; +}; + +struct nft_flowtable_hook { + u32 num; + int priority; + struct list_head list; +}; + +struct nft_flowtable_filter { + char *table; +}; + +struct rt6key { + struct in6_addr addr; + int plen; +}; + +struct rtable; + +struct fnhe_hash_bucket; + +struct fib_nh_common { + struct net_device *nhc_dev; + netdevice_tracker nhc_dev_tracker; + int nhc_oif; + unsigned char nhc_scope; + u8 nhc_family; + u8 nhc_gw_family; + unsigned char nhc_flags; + struct lwtunnel_state *nhc_lwtstate; + union { + __be32 ipv4; + struct in6_addr ipv6; + } nhc_gw; + int nhc_weight; + atomic_t nhc_upper_bound; + struct rtable **nhc_pcpu_rth_output; + struct rtable *nhc_rth_input; + struct fnhe_hash_bucket *nhc_exceptions; +}; + +struct rt6_exception_bucket; + +struct fib6_nh { + struct fib_nh_common nh_common; + struct rt6_info **rt6i_pcpu; + struct rt6_exception_bucket *rt6i_exception_bucket; +}; + +struct fib6_node; + +struct dst_metrics; + +struct nexthop; + +struct fib6_info { + struct fib6_table *fib6_table; + struct fib6_info *fib6_next; + struct fib6_node *fib6_node; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; + unsigned int fib6_nsiblings; + refcount_t fib6_ref; + long unsigned int expires; + struct hlist_node gc_link; + struct dst_metrics *fib6_metrics; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; + u32 fib6_metric; + u8 fib6_protocol; + u8 fib6_type; + u8 offload; + u8 trap; + u8 offload_failed; + u8 should_flush: 1; + u8 dst_nocount: 1; + u8 dst_nopolicy: 1; + u8 fib6_destroying: 1; + u8 unused: 4; + struct callback_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; +}; + +struct rt6_info { + struct dst_entry dst; + struct fib6_info *from; + int sernum; + struct rt6key rt6i_dst; + struct rt6key rt6i_src; + struct in6_addr rt6i_gateway; + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + short unsigned int rt6i_nfheader_len; +}; + +struct rt6_statistics { + __u32 fib_nodes; + __u32 fib_route_nodes; + __u32 fib_rt_entries; + __u32 fib_rt_cache; + __u32 fib_discarded_routes; + atomic_t fib_rt_alloc; +}; + +struct fib6_node { + struct fib6_node *parent; + struct fib6_node *left; + struct fib6_node *right; + struct fib6_info *leaf; + __u16 fn_bit; + __u16 fn_flags; + int fn_sernum; + struct fib6_info *rr_ptr; + struct callback_head rcu; +}; + +struct fib6_table { + struct hlist_node tb6_hlist; + u32 tb6_id; + spinlock_t tb6_lock; + struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; + unsigned int flags; + unsigned int fib_seq; + struct hlist_head tb6_gc_hlist; +}; + +enum netdev_state_t { + __LINK_STATE_START = 0, + __LINK_STATE_PRESENT = 1, + __LINK_STATE_NOCARRIER = 2, + __LINK_STATE_LINKWATCH_PENDING = 3, + __LINK_STATE_DORMANT = 4, + __LINK_STATE_TESTING = 5, +}; + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + unsigned int rtm_flags; +}; + +enum { + RTN_UNSPEC = 0, + RTN_UNICAST = 1, + RTN_LOCAL = 2, + RTN_BROADCAST = 3, + RTN_ANYCAST = 4, + RTN_MULTICAST = 5, + RTN_BLACKHOLE = 6, + RTN_UNREACHABLE = 7, + RTN_PROHIBIT = 8, + RTN_THROW = 9, + RTN_NAT = 10, + RTN_XRESOLVE = 11, + __RTN_MAX = 12, +}; + +enum rt_scope_t { + RT_SCOPE_UNIVERSE = 0, + RT_SCOPE_SITE = 200, + RT_SCOPE_LINK = 253, + RT_SCOPE_HOST = 254, + RT_SCOPE_NOWHERE = 255, +}; + +enum rt_class_t { + RT_TABLE_UNSPEC = 0, + RT_TABLE_COMPAT = 252, + RT_TABLE_DEFAULT = 253, + RT_TABLE_MAIN = 254, + RT_TABLE_LOCAL = 255, + RT_TABLE_MAX = 4294967295, +}; + +enum rtattr_type_t { + RTA_UNSPEC = 0, + RTA_DST = 1, + RTA_SRC = 2, + RTA_IIF = 3, + RTA_OIF = 4, + RTA_GATEWAY = 5, + RTA_PRIORITY = 6, + RTA_PREFSRC = 7, + RTA_METRICS = 8, + RTA_MULTIPATH = 9, + RTA_PROTOINFO = 10, + RTA_FLOW = 11, + RTA_CACHEINFO = 12, + RTA_SESSION = 13, + RTA_MP_ALGO = 14, + RTA_TABLE = 15, + RTA_MARK = 16, + RTA_MFC_STATS = 17, + RTA_VIA = 18, + RTA_NEWDST = 19, + RTA_PREF = 20, + RTA_ENCAP_TYPE = 21, + RTA_ENCAP = 22, + RTA_EXPIRES = 23, + RTA_PAD = 24, + RTA_UID = 25, + RTA_TTL_PROPAGATE = 26, + RTA_IP_PROTO = 27, + RTA_SPORT = 28, + RTA_DPORT = 29, + RTA_NH_ID = 30, + __RTA_MAX = 31, +}; + +struct rtnexthop { + short unsigned int rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + +enum rtnetlink_groups { + RTNLGRP_NONE = 0, + RTNLGRP_LINK = 1, + RTNLGRP_NOTIFY = 2, + RTNLGRP_NEIGH = 3, + RTNLGRP_TC = 4, + RTNLGRP_IPV4_IFADDR = 5, + RTNLGRP_IPV4_MROUTE = 6, + RTNLGRP_IPV4_ROUTE = 7, + RTNLGRP_IPV4_RULE = 8, + RTNLGRP_IPV6_IFADDR = 9, + RTNLGRP_IPV6_MROUTE = 10, + RTNLGRP_IPV6_ROUTE = 11, + RTNLGRP_IPV6_IFINFO = 12, + RTNLGRP_DECnet_IFADDR = 13, + RTNLGRP_NOP2 = 14, + RTNLGRP_DECnet_ROUTE = 15, + RTNLGRP_DECnet_RULE = 16, + RTNLGRP_NOP4 = 17, + RTNLGRP_IPV6_PREFIX = 18, + RTNLGRP_IPV6_RULE = 19, + RTNLGRP_ND_USEROPT = 20, + RTNLGRP_PHONET_IFADDR = 21, + RTNLGRP_PHONET_ROUTE = 22, + RTNLGRP_DCB = 23, + RTNLGRP_IPV4_NETCONF = 24, + RTNLGRP_IPV6_NETCONF = 25, + RTNLGRP_MDB = 26, + RTNLGRP_MPLS_ROUTE = 27, + RTNLGRP_NSID = 28, + RTNLGRP_MPLS_NETCONF = 29, + RTNLGRP_IPV4_MROUTE_R = 30, + RTNLGRP_IPV6_MROUTE_R = 31, + RTNLGRP_NEXTHOP = 32, + RTNLGRP_BRVLAN = 33, + RTNLGRP_MCTP_IFADDR = 34, + RTNLGRP_TUNNEL = 35, + RTNLGRP_STATS = 36, + __RTNLGRP_MAX = 37, +}; + +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify: 1; + u8 skip_notify_kernel: 1; +}; + +typedef u8 dscp_t; + +struct dst_metrics { + u32 metrics[17]; + refcount_t refcnt; +}; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE = 0, + FIB_EVENT_ENTRY_APPEND = 1, + FIB_EVENT_ENTRY_ADD = 2, + FIB_EVENT_ENTRY_DEL = 3, + FIB_EVENT_RULE_ADD = 4, + FIB_EVENT_RULE_DEL = 5, + FIB_EVENT_NH_ADD = 6, + FIB_EVENT_NH_DEL = 7, + FIB_EVENT_VIF_ADD = 8, + FIB_EVENT_VIF_DEL = 9, +}; + +struct ipv6_opt_hdr; + +struct ipv6_rt_hdr; + +struct ipv6_txoptions { + refcount_t refcnt; + int tot_len; + __u16 opt_flen; + __u16 opt_nflen; + struct ipv6_opt_hdr *hopopt; + struct ipv6_opt_hdr *dst0opt; + struct ipv6_rt_hdr *srcrt; + struct ipv6_opt_hdr *dst1opt; + struct callback_head rcu; +}; + +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +}; + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; +}; + +struct fib_config { + u8 fc_dst_len; + dscp_t fc_dscp; + u8 fc_protocol; + u8 fc_scope; + u8 fc_type; + u8 fc_gw_family; + u32 fc_table; + __be32 fc_dst; + union { + __be32 fc_gw4; + struct in6_addr fc_gw6; + }; + int fc_oif; + u32 fc_flags; + u32 fc_priority; + __be32 fc_prefsrc; + u32 fc_nh_id; + struct nlattr *fc_mx; + struct rtnexthop *fc_mp; + int fc_mx_len; + int fc_mp_len; + u32 fc_flow; + u32 fc_nlflags; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; +}; + +struct fib_nh_exception { + struct fib_nh_exception *fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + bool fnhe_mtu_locked; + __be32 fnhe_gw; + long unsigned int fnhe_expires; + struct rtable *fnhe_rth_input; + struct rtable *fnhe_rth_output; + long unsigned int fnhe_stamp; + struct callback_head rcu; +}; + +struct rtable { + struct dst_entry dst; + int rt_genid; + unsigned int rt_flags; + __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; + int rt_iif; + u8 rt_gw_family; + union { + __be32 rt_gw4; + struct in6_addr rt_gw6; + }; + u32 rt_mtu_locked: 1; + u32 rt_pmtu: 31; +}; + +struct fnhe_hash_bucket { + struct fib_nh_exception *chain; +}; + +struct fib_info; + +struct fib_nh { + struct fib_nh_common nh_common; + struct hlist_node nh_hash; + struct fib_info *nh_parent; + __be32 nh_saddr; + int nh_saddr_genid; +}; + +struct fib_info { + struct hlist_node fib_hash; + struct hlist_node fib_lhash; + struct list_head nh_list; + struct net *fib_net; + refcount_t fib_treeref; + refcount_t fib_clntref; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_tb_id; + u32 fib_priority; + struct dst_metrics *fib_metrics; + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; + bool pfsrc_removed; + struct nexthop *nh; + struct callback_head rcu; + struct fib_nh fib_nh[0]; +}; + +struct nh_info; + +struct nh_group; + +struct nexthop { + struct rb_node rb_node; + struct list_head fi_list; + struct list_head f6i_list; + struct list_head fdb_list; + struct list_head grp_list; + struct net *net; + u32 id; + u8 protocol; + u8 nh_flags; + bool is_group; + refcount_t refcnt; + struct callback_head rcu; + union { + struct nh_info *nh_info; + struct nh_group *nh_grp; + }; +}; + +struct fib_table; + +struct fib_result { + __be32 prefix; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + u32 tclassid; + dscp_t dscp; + struct fib_nh_common *nhc; + struct fib_info *fi; + struct fib_table *table; + struct hlist_head *fa_head; +}; + +struct fib_table { + struct hlist_node tb_hlist; + u32 tb_id; + int tb_num_default; + struct callback_head rcu; + long unsigned int *tb_data; + long unsigned int __data[0]; +}; + +struct fib_rt_info { + struct fib_info *fi; + u32 tb_id; + __be32 dst; + int dst_len; + dscp_t dscp; + u8 type; + u8 offload: 1; + u8 trap: 1; + u8 offload_failed: 1; + u8 unused: 5; +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; + struct fib_nh *fib_nh; +}; + +struct fib6_result; + +struct fib6_config; + +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); + int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); + struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); + int (*ipv6_route_input)(struct sk_buff *); + struct fib6_table * (*fib6_get_table)(struct net *, u32); + int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); + int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); + void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); + u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); + int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); + void (*fib6_nh_release)(struct fib6_nh *); + void (*fib6_nh_release_dsts)(struct fib6_nh *); + void (*fib6_update_sernum)(struct net *, struct fib6_info *); + int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); + void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); + void (*udpv6_encap_enable)(); + void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); + struct neigh_table *nd_tbl; + int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + struct net_device * (*ipv6_dev_find)(struct net *, const struct in6_addr *, struct net_device *); + int (*ip6_xmit)(const struct sock *, struct sk_buff *, struct flowi6 *, __u32, struct ipv6_txoptions *, int, u32); +}; + +struct fib6_result { + struct fib6_nh *nh; + struct fib6_info *f6i; + u32 fib6_flags; + u8 fib6_type; + struct rt6_info *rt6; +}; + +struct fib6_config { + u32 fc_table; + u32 fc_metric; + int fc_dst_len; + int fc_src_len; + int fc_ifindex; + u32 fc_flags; + u32 fc_protocol; + u16 fc_type; + u16 fc_delete_all_nh: 1; + u16 fc_ignore_dev_down: 1; + u16 __unused: 14; + u32 fc_nh_id; + struct in6_addr fc_dst; + struct in6_addr fc_src; + struct in6_addr fc_prefsrc; + struct in6_addr fc_gateway; + long unsigned int fc_expires; + struct nlattr *fc_mx; + int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; + bool fc_is_fdb; +}; + +enum lwtunnel_encap_types { + LWTUNNEL_ENCAP_NONE = 0, + LWTUNNEL_ENCAP_MPLS = 1, + LWTUNNEL_ENCAP_IP = 2, + LWTUNNEL_ENCAP_ILA = 3, + LWTUNNEL_ENCAP_IP6 = 4, + LWTUNNEL_ENCAP_SEG6 = 5, + LWTUNNEL_ENCAP_BPF = 6, + LWTUNNEL_ENCAP_SEG6_LOCAL = 7, + LWTUNNEL_ENCAP_RPL = 8, + LWTUNNEL_ENCAP_IOAM6 = 9, + LWTUNNEL_ENCAP_XFRM = 10, + __LWTUNNEL_ENCAP_MAX = 11, +}; + +struct rt6_exception_bucket { + struct hlist_head chain; + int depth; +}; + +struct nh_info { + struct hlist_node dev_hash; + struct nexthop *nh_parent; + u8 family; + bool reject_nh; + bool fdb_nh; + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct nh_grp_entry; + +struct nh_res_bucket { + struct nh_grp_entry *nh_entry; + atomic_long_t used_time; + long unsigned int migrated_time; + bool occupied; + u8 nh_flags; +}; + +struct nh_grp_entry_stats; + +struct nh_grp_entry { + struct nexthop *nh; + struct nh_grp_entry_stats *stats; + u16 weight; + union { + struct { + atomic_t upper_bound; + } hthr; + struct { + struct list_head uw_nh_entry; + u16 count_buckets; + u16 wants_buckets; + } res; + }; + struct list_head nh_list; + struct nexthop *nh_parent; + long: 32; + u64 packets_hw; +}; + +struct nh_res_table { + struct net *net; + u32 nhg_id; + struct delayed_work upkeep_dw; + struct list_head uw_nh_entries; + long unsigned int unbalanced_since; + u32 idle_timer; + u32 unbalanced_timer; + u16 num_nh_buckets; + struct nh_res_bucket nh_buckets[0]; +}; + +struct nh_grp_entry_stats { + u64_stats_t packets; + struct u64_stats_sync syncp; + long: 32; +}; + +struct nh_group { + struct nh_group *spare; + u16 num_nh; + bool is_multipath; + bool hash_threshold; + bool resilient; + bool fdb_nh; + bool has_v4; + bool hw_stats; + struct nh_res_table *res_table; + struct nh_grp_entry nh_entries[0]; +}; + +struct fib_alias { + struct hlist_node fa_list; + struct fib_info *fa_info; + dscp_t fa_dscp; + u8 fa_type; + u8 fa_state; + u8 fa_slen; + u32 tb_id; + s16 fa_default; + u8 offload; + u8 trap; + u8 offload_failed; + struct callback_head rcu; +}; + +struct fib_prop { + int error; + u8 scope; +}; + +typedef __u16 __sum16; + +typedef __kernel_clock_t clock_t; + +enum { + IPPROTO_IP = 0, + IPPROTO_ICMP = 1, + IPPROTO_IGMP = 2, + IPPROTO_IPIP = 4, + IPPROTO_TCP = 6, + IPPROTO_EGP = 8, + IPPROTO_PUP = 12, + IPPROTO_UDP = 17, + IPPROTO_IDP = 22, + IPPROTO_TP = 29, + IPPROTO_DCCP = 33, + IPPROTO_IPV6 = 41, + IPPROTO_RSVP = 46, + IPPROTO_GRE = 47, + IPPROTO_ESP = 50, + IPPROTO_AH = 51, + IPPROTO_MTP = 92, + IPPROTO_BEETPH = 94, + IPPROTO_ENCAP = 98, + IPPROTO_PIM = 103, + IPPROTO_COMP = 108, + IPPROTO_L2TP = 115, + IPPROTO_SCTP = 132, + IPPROTO_UDPLITE = 136, + IPPROTO_MPLS = 137, + IPPROTO_ETHERNET = 143, + IPPROTO_RAW = 255, + IPPROTO_SMC = 256, + IPPROTO_MPTCP = 262, + IPPROTO_MAX = 263, +}; + +struct seq_net_private { + struct net *net; + netns_tracker ns_tracker; +}; + +enum { + IF_OPER_UNKNOWN = 0, + IF_OPER_NOTPRESENT = 1, + IF_OPER_DOWN = 2, + IF_OPER_LOWERLAYERDOWN = 3, + IF_OPER_TESTING = 4, + IF_OPER_DORMANT = 5, + IF_OPER_UP = 6, +}; + +enum { + IFLA_UNSPEC = 0, + IFLA_ADDRESS = 1, + IFLA_BROADCAST = 2, + IFLA_IFNAME = 3, + IFLA_MTU = 4, + IFLA_LINK = 5, + IFLA_QDISC = 6, + IFLA_STATS = 7, + IFLA_COST = 8, + IFLA_PRIORITY = 9, + IFLA_MASTER = 10, + IFLA_WIRELESS = 11, + IFLA_PROTINFO = 12, + IFLA_TXQLEN = 13, + IFLA_MAP = 14, + IFLA_WEIGHT = 15, + IFLA_OPERSTATE = 16, + IFLA_LINKMODE = 17, + IFLA_LINKINFO = 18, + IFLA_NET_NS_PID = 19, + IFLA_IFALIAS = 20, + IFLA_NUM_VF = 21, + IFLA_VFINFO_LIST = 22, + IFLA_STATS64 = 23, + IFLA_VF_PORTS = 24, + IFLA_PORT_SELF = 25, + IFLA_AF_SPEC = 26, + IFLA_GROUP = 27, + IFLA_NET_NS_FD = 28, + IFLA_EXT_MASK = 29, + IFLA_PROMISCUITY = 30, + IFLA_NUM_TX_QUEUES = 31, + IFLA_NUM_RX_QUEUES = 32, + IFLA_CARRIER = 33, + IFLA_PHYS_PORT_ID = 34, + IFLA_CARRIER_CHANGES = 35, + IFLA_PHYS_SWITCH_ID = 36, + IFLA_LINK_NETNSID = 37, + IFLA_PHYS_PORT_NAME = 38, + IFLA_PROTO_DOWN = 39, + IFLA_GSO_MAX_SEGS = 40, + IFLA_GSO_MAX_SIZE = 41, + IFLA_PAD = 42, + IFLA_XDP = 43, + IFLA_EVENT = 44, + IFLA_NEW_NETNSID = 45, + IFLA_IF_NETNSID = 46, + IFLA_TARGET_NETNSID = 46, + IFLA_CARRIER_UP_COUNT = 47, + IFLA_CARRIER_DOWN_COUNT = 48, + IFLA_NEW_IFINDEX = 49, + IFLA_MIN_MTU = 50, + IFLA_MAX_MTU = 51, + IFLA_PROP_LIST = 52, + IFLA_ALT_IFNAME = 53, + IFLA_PERM_ADDRESS = 54, + IFLA_PROTO_DOWN_REASON = 55, + IFLA_PARENT_DEV_NAME = 56, + IFLA_PARENT_DEV_BUS_NAME = 57, + IFLA_GRO_MAX_SIZE = 58, + IFLA_TSO_MAX_SIZE = 59, + IFLA_TSO_MAX_SEGS = 60, + IFLA_ALLMULTI = 61, + IFLA_DEVLINK_PORT = 62, + IFLA_GSO_IPV4_MAX_SIZE = 63, + IFLA_GRO_IPV4_MAX_SIZE = 64, + IFLA_DPLL_PIN = 65, + IFLA_MAX_PACING_OFFLOAD_HORIZON = 66, + __IFLA_MAX = 67, +}; + +enum { + IFLA_INET6_UNSPEC = 0, + IFLA_INET6_FLAGS = 1, + IFLA_INET6_CONF = 2, + IFLA_INET6_STATS = 3, + IFLA_INET6_MCAST = 4, + IFLA_INET6_CACHEINFO = 5, + IFLA_INET6_ICMP6STATS = 6, + IFLA_INET6_TOKEN = 7, + IFLA_INET6_ADDR_GEN_MODE = 8, + IFLA_INET6_RA_MTU = 9, + __IFLA_INET6_MAX = 10, +}; + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64 = 0, + IN6_ADDR_GEN_MODE_NONE = 1, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, + IN6_ADDR_GEN_MODE_RANDOM = 3, +}; + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +struct iphdr { + __u8 version: 4; + __u8 ihl: 4; + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + union { + struct { + __be32 saddr; + __be32 daddr; + }; + struct { + __be32 saddr; + __be32 daddr; + } addrs; + }; +}; + +struct ip_tunnel_parm_kern { + char name[16]; + long unsigned int i_flags[1]; + long unsigned int o_flags[1]; + __be32 i_key; + __be32 o_key; + int link; + struct iphdr iph; +}; + +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1, + IFF_EBRIDGE = 2, + IFF_BONDING = 4, + IFF_ISATAP = 8, + IFF_WAN_HDLC = 16, + IFF_XMIT_DST_RELEASE = 32, + IFF_DONT_BRIDGE = 64, + IFF_DISABLE_NETPOLL = 128, + IFF_MACVLAN_PORT = 256, + IFF_BRIDGE_PORT = 512, + IFF_OVS_DATAPATH = 1024, + IFF_TX_SKB_SHARING = 2048, + IFF_UNICAST_FLT = 4096, + IFF_TEAM_PORT = 8192, + IFF_SUPP_NOFCS = 16384, + IFF_LIVE_ADDR_CHANGE = 32768, + IFF_MACVLAN = 65536, + IFF_XMIT_DST_RELEASE_PERM = 131072, + IFF_L3MDEV_MASTER = 262144, + IFF_NO_QUEUE = 524288, + IFF_OPENVSWITCH = 1048576, + IFF_L3MDEV_SLAVE = 2097152, + IFF_TEAM = 4194304, + IFF_RXFH_CONFIGURED = 8388608, + IFF_PHONY_HEADROOM = 16777216, + IFF_MACSEC = 33554432, + IFF_NO_RX_HANDLER = 67108864, + IFF_FAILOVER = 134217728, + IFF_FAILOVER_SLAVE = 268435456, + IFF_L3MDEV_RX_HANDLER = 536870912, + IFF_NO_ADDRCONF = 1073741824, + IFF_TX_SKB_NO_LINEAR = 2147483648, +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; + +enum { + IFA_UNSPEC = 0, + IFA_ADDRESS = 1, + IFA_LOCAL = 2, + IFA_LABEL = 3, + IFA_BROADCAST = 4, + IFA_ANYCAST = 5, + IFA_CACHEINFO = 6, + IFA_MULTICAST = 7, + IFA_FLAGS = 8, + IFA_RT_PRIORITY = 9, + IFA_TARGET_NETNSID = 10, + IFA_PROTO = 11, + __IFA_MAX = 12, +}; + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; + +enum { + RTM_BASE = 16, + RTM_NEWLINK = 16, + RTM_DELLINK = 17, + RTM_GETLINK = 18, + RTM_SETLINK = 19, + RTM_NEWADDR = 20, + RTM_DELADDR = 21, + RTM_GETADDR = 22, + RTM_NEWROUTE = 24, + RTM_DELROUTE = 25, + RTM_GETROUTE = 26, + RTM_NEWNEIGH = 28, + RTM_DELNEIGH = 29, + RTM_GETNEIGH = 30, + RTM_NEWRULE = 32, + RTM_DELRULE = 33, + RTM_GETRULE = 34, + RTM_NEWQDISC = 36, + RTM_DELQDISC = 37, + RTM_GETQDISC = 38, + RTM_NEWTCLASS = 40, + RTM_DELTCLASS = 41, + RTM_GETTCLASS = 42, + RTM_NEWTFILTER = 44, + RTM_DELTFILTER = 45, + RTM_GETTFILTER = 46, + RTM_NEWACTION = 48, + RTM_DELACTION = 49, + RTM_GETACTION = 50, + RTM_NEWPREFIX = 52, + RTM_GETMULTICAST = 58, + RTM_GETANYCAST = 62, + RTM_NEWNEIGHTBL = 64, + RTM_GETNEIGHTBL = 66, + RTM_SETNEIGHTBL = 67, + RTM_NEWNDUSEROPT = 68, + RTM_NEWADDRLABEL = 72, + RTM_DELADDRLABEL = 73, + RTM_GETADDRLABEL = 74, + RTM_GETDCB = 78, + RTM_SETDCB = 79, + RTM_NEWNETCONF = 80, + RTM_DELNETCONF = 81, + RTM_GETNETCONF = 82, + RTM_NEWMDB = 84, + RTM_DELMDB = 85, + RTM_GETMDB = 86, + RTM_NEWNSID = 88, + RTM_DELNSID = 89, + RTM_GETNSID = 90, + RTM_NEWSTATS = 92, + RTM_GETSTATS = 94, + RTM_SETSTATS = 95, + RTM_NEWCACHEREPORT = 96, + RTM_NEWCHAIN = 100, + RTM_DELCHAIN = 101, + RTM_GETCHAIN = 102, + RTM_NEWNEXTHOP = 104, + RTM_DELNEXTHOP = 105, + RTM_GETNEXTHOP = 106, + RTM_NEWLINKPROP = 108, + RTM_DELLINKPROP = 109, + RTM_GETLINKPROP = 110, + RTM_NEWVLAN = 112, + RTM_DELVLAN = 113, + RTM_GETVLAN = 114, + RTM_NEWNEXTHOPBUCKET = 116, + RTM_DELNEXTHOPBUCKET = 117, + RTM_GETNEXTHOPBUCKET = 118, + RTM_NEWTUNNEL = 120, + RTM_DELTUNNEL = 121, + RTM_GETTUNNEL = 122, + __RTM_MAX = 123, +}; + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + short unsigned int ifi_type; + int ifi_index; + unsigned int ifi_flags; + unsigned int ifi_change; +}; + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + short unsigned int prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum { + PREFIX_UNSPEC = 0, + PREFIX_ADDRESS = 1, + PREFIX_CACHEINFO = 2, + __PREFIX_MAX = 3, +}; + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + +enum { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + TCA_EXT_WARN_MSG = 16, + __TCA_MAX = 17, +}; + +enum { + IP_TUNNEL_CSUM_BIT = 0, + IP_TUNNEL_ROUTING_BIT = 1, + IP_TUNNEL_KEY_BIT = 2, + IP_TUNNEL_SEQ_BIT = 3, + IP_TUNNEL_STRICT_BIT = 4, + IP_TUNNEL_REC_BIT = 5, + IP_TUNNEL_VERSION_BIT = 6, + IP_TUNNEL_NO_KEY_BIT = 7, + IP_TUNNEL_DONT_FRAGMENT_BIT = 8, + IP_TUNNEL_OAM_BIT = 9, + IP_TUNNEL_CRIT_OPT_BIT = 10, + IP_TUNNEL_GENEVE_OPT_BIT = 11, + IP_TUNNEL_VXLAN_OPT_BIT = 12, + IP_TUNNEL_NOCACHE_BIT = 13, + IP_TUNNEL_ERSPAN_OPT_BIT = 14, + IP_TUNNEL_GTP_OPT_BIT = 15, + IP_TUNNEL_VTI_BIT = 16, + IP_TUNNEL_SIT_ISATAP_BIT = 16, + IP_TUNNEL_PFCP_OPT_BIT = 17, + __IP_TUNNEL_FLAG_NUM = 18, +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT = 1, + DEVCONF_MTU6 = 2, + DEVCONF_ACCEPT_RA = 3, + DEVCONF_ACCEPT_REDIRECTS = 4, + DEVCONF_AUTOCONF = 5, + DEVCONF_DAD_TRANSMITS = 6, + DEVCONF_RTR_SOLICITS = 7, + DEVCONF_RTR_SOLICIT_INTERVAL = 8, + DEVCONF_RTR_SOLICIT_DELAY = 9, + DEVCONF_USE_TEMPADDR = 10, + DEVCONF_TEMP_VALID_LFT = 11, + DEVCONF_TEMP_PREFERED_LFT = 12, + DEVCONF_REGEN_MAX_RETRY = 13, + DEVCONF_MAX_DESYNC_FACTOR = 14, + DEVCONF_MAX_ADDRESSES = 15, + DEVCONF_FORCE_MLD_VERSION = 16, + DEVCONF_ACCEPT_RA_DEFRTR = 17, + DEVCONF_ACCEPT_RA_PINFO = 18, + DEVCONF_ACCEPT_RA_RTR_PREF = 19, + DEVCONF_RTR_PROBE_INTERVAL = 20, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, + DEVCONF_PROXY_NDP = 22, + DEVCONF_OPTIMISTIC_DAD = 23, + DEVCONF_ACCEPT_SOURCE_ROUTE = 24, + DEVCONF_MC_FORWARDING = 25, + DEVCONF_DISABLE_IPV6 = 26, + DEVCONF_ACCEPT_DAD = 27, + DEVCONF_FORCE_TLLAO = 28, + DEVCONF_NDISC_NOTIFY = 29, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, + DEVCONF_SUPPRESS_FRAG_NDISC = 32, + DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, + DEVCONF_USE_OPTIMISTIC = 34, + DEVCONF_ACCEPT_RA_MTU = 35, + DEVCONF_STABLE_SECRET = 36, + DEVCONF_USE_OIF_ADDRS_ONLY = 37, + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, + DEVCONF_DROP_UNSOLICITED_NA = 41, + DEVCONF_KEEP_ADDR_ON_DOWN = 42, + DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, + DEVCONF_SEG6_ENABLED = 44, + DEVCONF_SEG6_REQUIRE_HMAC = 45, + DEVCONF_ENHANCED_DAD = 46, + DEVCONF_ADDR_GEN_MODE = 47, + DEVCONF_DISABLE_POLICY = 48, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, + DEVCONF_NDISC_TCLASS = 50, + DEVCONF_RPL_SEG_ENABLED = 51, + DEVCONF_RA_DEFRTR_METRIC = 52, + DEVCONF_IOAM6_ENABLED = 53, + DEVCONF_IOAM6_ID = 54, + DEVCONF_IOAM6_ID_WIDE = 55, + DEVCONF_NDISC_EVICT_NOCARRIER = 56, + DEVCONF_ACCEPT_UNTRACKED_NA = 57, + DEVCONF_ACCEPT_RA_MIN_LFT = 58, + DEVCONF_MAX = 59, +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); + +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, + RTNL_FLAG_BULK_DEL_SUPPORTED = 2, + RTNL_FLAG_DUMP_UNLOCKED = 4, + RTNL_FLAG_DUMP_SPLIT_NLM_DONE = 8, +}; + +struct rtnl_msg_handler { + struct module *owner; + int protocol; + int msgtype; + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + int flags; +}; + +struct rtnl_af_ops { + struct list_head list; + struct srcu_struct srcu; + int family; + int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); + size_t (*get_link_af_size)(const struct net_device *, u32); + int (*validate_link_af)(const struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*set_link_af)(struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*fill_stats_af)(struct sk_buff *, const struct net_device *); + size_t (*get_stats_af_size)(const struct net_device *); +}; + +enum { + INET6_IFADDR_STATE_PREDAD = 0, + INET6_IFADDR_STATE_DAD = 1, + INET6_IFADDR_STATE_POSTDAD = 2, + INET6_IFADDR_STATE_ERRDAD = 3, + INET6_IFADDR_STATE_DEAD = 4, +}; + +struct inet6_ifaddr { + struct in6_addr addr; + __u32 prefix_len; + __u32 rt_priority; + __u32 valid_lft; + __u32 prefered_lft; + refcount_t refcnt; + spinlock_t lock; + int state; + __u32 flags; + __u8 dad_probes; + __u8 stable_privacy_retry; + __u16 scope; + long: 32; + __u64 dad_nonce; + long unsigned int cstamp; + long unsigned int tstamp; + struct delayed_work dad_work; + struct inet6_dev *idev; + struct fib6_info *rt; + struct hlist_node addr_lst; + struct list_head if_list; + struct list_head if_list_aux; + struct list_head tmp_list; + struct inet6_ifaddr *ifpub; + int regen_count; + bool tokenized; + u8 ifa_proto; + struct callback_head rcu; + struct in6_addr peer_addr; +}; + +enum { + __ND_OPT_PREFIX_INFO_END = 0, + ND_OPT_SOURCE_LL_ADDR = 1, + ND_OPT_TARGET_LL_ADDR = 2, + ND_OPT_PREFIX_INFO = 3, + ND_OPT_REDIRECT_HDR = 4, + ND_OPT_MTU = 5, + ND_OPT_NONCE = 14, + __ND_OPT_ARRAY_MAX = 15, + ND_OPT_ROUTE_INFO = 24, + ND_OPT_RDNSS = 25, + ND_OPT_DNSSL = 31, + ND_OPT_6CO = 34, + ND_OPT_CAPTIVE_PORTAL = 37, + ND_OPT_PREF64 = 38, + __ND_OPT_MAX = 39, +}; + +struct in6_validator_info { + struct in6_addr i6vi_addr; + struct inet6_dev *i6vi_dev; + struct netlink_ext_ack *extack; +}; + +struct ifa6_config { + const struct in6_addr *pfx; + unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; + u32 rt_priority; + u32 ifa_flags; + u32 preferred_lft; + u32 valid_lft; + u16 scope; +}; + +union fwnet_hwaddr { + u8 u[16]; + struct { + __be64 uniq_id; + u8 max_rec; + u8 sspd; + u8 fifo[6]; + } uc; +}; + +struct qdisc_walker { + int stop; + int skip; + int count; + int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); +}; + +struct netconfmsg { + __u8 ncm_family; +}; + +enum { + NETCONFA_UNSPEC = 0, + NETCONFA_IFINDEX = 1, + NETCONFA_FORWARDING = 2, + NETCONFA_RP_FILTER = 3, + NETCONFA_MC_FORWARDING = 4, + NETCONFA_PROXY_NEIGH = 5, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, + NETCONFA_INPUT = 7, + NETCONFA_BC_FORWARDING = 8, + __NETCONFA_MAX = 9, +}; + +enum cleanup_prefix_rt_t { + CLEANUP_PREFIX_RT_NOP = 0, + CLEANUP_PREFIX_RT_DEL = 1, + CLEANUP_PREFIX_RT_EXPIRE = 2, +}; + +enum { + IPV6_SADDR_RULE_INIT = 0, + IPV6_SADDR_RULE_LOCAL = 1, + IPV6_SADDR_RULE_SCOPE = 2, + IPV6_SADDR_RULE_PREFERRED = 3, + IPV6_SADDR_RULE_OIF = 4, + IPV6_SADDR_RULE_LABEL = 5, + IPV6_SADDR_RULE_PRIVACY = 6, + IPV6_SADDR_RULE_ORCHID = 7, + IPV6_SADDR_RULE_PREFIX = 8, + IPV6_SADDR_RULE_MAX = 9, +}; + +struct ipv6_saddr_score { + int rule; + int addr_type; + struct inet6_ifaddr *ifa; + long unsigned int scorebits[1]; + int scopedist; + int matchlen; +}; + +struct ipv6_saddr_dst { + const struct in6_addr *addr; + int ifindex; + int scope; + int label; + unsigned int prefs; +}; + +struct if6_iter_state { + struct seq_net_private p; + int bucket; + int offset; +}; + +enum addr_type_t { + UNICAST_ADDR = 0, + MULTICAST_ADDR = 1, + ANYCAST_ADDR = 2, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; +}; + +enum { + DAD_PROCESS = 0, + DAD_BEGIN = 1, + DAD_ABORT = 2, +}; + +struct cpio_data { + void *data; + size_t size; + char name[18]; +}; + +enum cpio_fields { + C_MAGIC = 0, + C_INO = 1, + C_MODE = 2, + C_UID = 3, + C_GID = 4, + C_NLINK = 5, + C_MTIME = 6, + C_FILESIZE = 7, + C_MAJ = 8, + C_MIN = 9, + C_RMAJ = 10, + C_RMIN = 11, + C_NAMESIZE = 12, + C_CHKSUM = 13, + C_NFIELDS = 14, +}; + +typedef u8 uint8_t; + +typedef __be32 fdt32_t; + +typedef __be64 fdt64_t; + +struct fdt_header { + fdt32_t magic; + fdt32_t totalsize; + fdt32_t off_dt_struct; + fdt32_t off_dt_strings; + fdt32_t off_mem_rsvmap; + fdt32_t version; + fdt32_t last_comp_version; + fdt32_t boot_cpuid_phys; + fdt32_t size_dt_strings; + fdt32_t size_dt_struct; +}; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +enum { + ASSUME_PERFECT = 255, + ASSUME_VALID_DTB = 1, + ASSUME_VALID_INPUT = 2, + ASSUME_LATEST = 4, + ASSUME_NO_ROLLBACK = 8, + ASSUME_LIBFDT_ORDER = 16, + ASSUME_LIBFDT_FLAWLESS = 32, +}; + +struct fdt_errtabent { + const char *str; +}; + +enum { + DUMP_PREFIX_NONE = 0, + DUMP_PREFIX_ADDRESS = 1, + DUMP_PREFIX_OFFSET = 2, +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +typedef long int (*syscall_fn)(const struct pt_regs *); + +struct membuf { + void *p; + size_t left; +}; + +struct user_regset; + +typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); + +typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); + +typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); + +typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); + +struct user_regset { + user_regset_get2_fn *regset_get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +struct maple_alloc { + long unsigned int total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[61]; +}; + +struct maple_enode; + +enum store_type { + wr_invalid = 0, + wr_new_root = 1, + wr_store_root = 2, + wr_exact_fit = 3, + wr_spanning_store = 4, + wr_split_store = 5, + wr_rebalance = 6, + wr_append = 7, + wr_node_store = 8, + wr_slot_store = 9, +}; + +enum maple_status { + ma_active = 0, + ma_start = 1, + ma_root = 2, + ma_none = 3, + ma_pause = 4, + ma_overflow = 5, + ma_underflow = 6, + ma_error = 7, +}; + +struct ma_state { + struct maple_tree *tree; + long unsigned int index; + long unsigned int last; + struct maple_enode *node; + long unsigned int min; + long unsigned int max; + struct maple_alloc *alloc; + enum maple_status status; + unsigned char depth; + unsigned char offset; + unsigned char mas_flags; + unsigned char end; + enum store_type store_type; +}; + +struct vma_iterator { + struct ma_state mas; +}; + +struct smp_ops_t { + void (*message_pass)(int, int); + void (*cause_ipi)(int); + int (*cause_nmi_ipi)(int); + void (*probe)(); + int (*kick_cpu)(int); + int (*prepare_cpu)(int); + void (*setup_cpu)(int); + void (*bringup_done)(); + void (*take_timebase)(); + void (*give_timebase)(); + int (*cpu_disable)(); + void (*cpu_die)(unsigned int); + int (*cpu_bootable)(unsigned int); +}; + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE = 1, + HP_THREAD_PARKED = 2, +}; + +struct pin_cookie {}; + +typedef struct { + void *lock; + long unsigned int flags; +} class_irqsave_t; + +typedef void (*smp_call_func_t)(void *); + +struct __call_single_data { + struct __call_single_node node; + smp_call_func_t func; + void *info; +}; + +typedef struct __call_single_data call_single_data_t; + +typedef struct { + void *lock; +} class_rcu_t; + +struct plist_head { + struct list_head node_list; +}; + +enum scx_public_consts { + SCX_OPS_NAME_LEN = 128ULL, + SCX_SLICE_DFL = 20000000ULL, + SCX_SLICE_INF = 18446744073709551615ULL, +}; + +enum scx_dsq_id_flags { + SCX_DSQ_FLAG_BUILTIN = 9223372036854775808ULL, + SCX_DSQ_FLAG_LOCAL_ON = 4611686018427387904ULL, + SCX_DSQ_INVALID = 9223372036854775808ULL, + SCX_DSQ_GLOBAL = 9223372036854775809ULL, + SCX_DSQ_LOCAL = 9223372036854775810ULL, + SCX_DSQ_LOCAL_ON = 13835058055282163712ULL, + SCX_DSQ_LOCAL_CPU_MASK = 4294967295ULL, +}; + +enum scx_ent_flags { + SCX_TASK_QUEUED = 1, + SCX_TASK_RESET_RUNNABLE_AT = 4, + SCX_TASK_DEQD_FOR_SLEEP = 8, + SCX_TASK_STATE_SHIFT = 8, + SCX_TASK_STATE_BITS = 2, + SCX_TASK_STATE_MASK = 768, + SCX_TASK_CURSOR = -2147483648, +}; + +enum scx_task_state { + SCX_TASK_NONE = 0, + SCX_TASK_INIT = 1, + SCX_TASK_READY = 2, + SCX_TASK_ENABLED = 3, + SCX_TASK_NR_STATES = 4, +}; + +enum scx_ent_dsq_flags { + SCX_TASK_DSQ_ON_PRIQ = 1, +}; + +enum scx_kf_mask { + SCX_KF_UNLOCKED = 0, + SCX_KF_CPU_RELEASE = 1, + SCX_KF_DISPATCH = 2, + SCX_KF_ENQUEUE = 4, + SCX_KF_SELECT_CPU = 8, + SCX_KF_REST = 16, + __SCX_KF_RQ_LOCKED = 31, + __SCX_KF_TERMINAL = 28, +}; + +enum scx_dsq_lnode_flags { + SCX_DSQ_LNODE_ITER_CURSOR = 1, + __SCX_DSQ_LNODE_PRIV_SHIFT = 16, +}; + +struct dl_bw { + raw_spinlock_t lock; + long: 32; + u64 bw; + u64 total_bw; +}; + +struct cpudl_item; + +struct cpudl { + raw_spinlock_t lock; + int size; + cpumask_var_t free_cpus; + struct cpudl_item *elements; +}; + +struct cpupri_vec { + atomic_t count; + cpumask_var_t mask; +}; + +struct cpupri { + struct cpupri_vec pri_to_cpu[101]; + int *cpu_to_pri; +}; + +struct perf_domain; + +struct root_domain { + atomic_t refcount; + atomic_t rto_count; + struct callback_head rcu; + cpumask_var_t span; + cpumask_var_t online; + bool overloaded; + bool overutilized; + cpumask_var_t dlo_mask; + atomic_t dlo_count; + long: 32; + struct dl_bw dl_bw; + struct cpudl cpudl; + u64 visit_gen; + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + int rto_loop; + int rto_cpu; + atomic_t rto_loop_next; + atomic_t rto_loop_start; + cpumask_var_t rto_mask; + struct cpupri cpupri; + struct perf_domain *pd; +}; + +struct sched_param { + int sched_priority; +}; + +struct cfs_rq { + struct load_weight load; + unsigned int nr_running; + unsigned int h_nr_running; + unsigned int idle_nr_running; + unsigned int idle_h_nr_running; + s64 avg_vruntime; + u64 avg_load; + u64 min_vruntime; + struct rb_root_cached tasks_timeline; + struct sched_entity *curr; + struct sched_entity *next; + struct sched_avg avg; + u64 last_update_time_copy; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct { + raw_spinlock_t lock; + int nr; + long unsigned int load_avg; + long unsigned int util_avg; + long unsigned int runnable_avg; + long: 32; + long: 32; + long: 32; + } removed; + u64 last_update_tg_load_avg; + long unsigned int tg_load_avg_contrib; + long int propagate; + long int prop_runnable_sum; + long unsigned int h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; + struct rq *rq; + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; + int idle; + int runtime_enabled; + s64 runtime_remaining; + u64 throttled_pelt_idle; + u64 throttled_pelt_idle_copy; + u64 throttled_clock; + u64 throttled_clock_pelt; + u64 throttled_clock_pelt_time; + u64 throttled_clock_self; + u64 throttled_clock_self_time; + int throttled; + int throttle_count; + struct list_head throttled_list; + struct list_head throttled_csd_list; + long: 32; + long: 32; +}; + +struct rt_prio_array { + long unsigned int bitmap[4]; + struct list_head queue[100]; +}; + +struct rt_rq { + struct rt_prio_array active; + unsigned int rt_nr_running; + unsigned int rr_nr_running; + struct { + int curr; + int next; + } highest_prio; + bool overloaded; + struct plist_head pushable_tasks; + int rt_queued; +}; + +struct dl_rq { + struct rb_root_cached root; + unsigned int dl_nr_running; + long: 32; + struct { + u64 curr; + u64 next; + } earliest_dl; + bool overloaded; + struct rb_root_cached pushable_dl_tasks_root; + long: 32; + u64 running_bw; + u64 this_bw; + u64 extra_bw; + u64 max_bw; + u64 bw_ratio; +}; + +struct balance_callback { + struct balance_callback *next; + void (*func)(struct rq *); +}; + +struct scx_rq { + struct scx_dispatch_q local_dsq; + struct list_head runnable_list; + struct list_head ddsp_deferred_locals; + long unsigned int ops_qseq; + long: 32; + u64 extra_enq_flags; + u32 nr_running; + u32 flags; + u32 cpuperf_target; + bool cpu_released; + cpumask_var_t cpus_to_kick; + cpumask_var_t cpus_to_kick_if_idle; + cpumask_var_t cpus_to_preempt; + cpumask_var_t cpus_to_wait; + long unsigned int pnt_seq; + struct balance_callback deferred_bal_cb; + struct irq_work deferred_irq_work; + struct irq_work kick_cpus_irq_work; + long: 32; +}; + +typedef int (*cpu_stop_fn_t)(void *); + +struct cpu_stop_done; + +struct cpu_stop_work { + struct list_head list; + cpu_stop_fn_t fn; + long unsigned int caller; + void *arg; + struct cpu_stop_done *done; +}; + +struct sched_domain; + +struct rq { + raw_spinlock_t __lock; + unsigned int nr_running; + unsigned int ttwu_pending; + long: 32; + u64 nr_switches; + long: 32; + long: 32; + struct cfs_rq cfs; + struct rt_rq rt; + struct dl_rq dl; + struct scx_rq scx; + struct sched_dl_entity fair_server; + struct list_head leaf_cfs_rq_list; + struct list_head *tmp_alone_branch; + unsigned int nr_uninterruptible; + union { + struct task_struct *donor; + struct task_struct *curr; + }; + struct sched_dl_entity *dl_server; + struct task_struct *idle; + struct task_struct *stop; + long unsigned int next_balance; + struct mm_struct *prev_mm; + unsigned int clock_update_flags; + long: 32; + u64 clock; + u64 clock_task; + u64 clock_pelt; + long unsigned int lost_idle_time; + long: 32; + u64 clock_pelt_idle; + u64 clock_idle; + u64 clock_pelt_idle_copy; + u64 clock_idle_copy; + atomic_t nr_iowait; + long: 32; + u64 last_seen_need_resched_ns; + int ticks_without_resched; + int membarrier_state; + struct root_domain *rd; + struct sched_domain *sd; + long unsigned int cpu_capacity; + struct balance_callback *balance_callback; + unsigned char nohz_idle_balance; + unsigned char idle_balance; + long unsigned int misfit_task_load; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + int cpu; + int online; + struct list_head cfs_tasks; + long: 32; + long: 32; + struct sched_avg avg_rt; + struct sched_avg avg_dl; + u64 idle_stamp; + u64 avg_idle; + u64 max_idle_balance_cost; + long unsigned int calc_load_update; + long int calc_load_active; + call_single_data_t hrtick_csd; + struct hrtimer hrtick_timer; + ktime_t hrtick_time; + unsigned int nr_pinned; + unsigned int push_busy; + struct cpu_stop_work push_work; + cpumask_var_t scratch_mask; + long: 32; + call_single_data_t cfsb_csd; + struct list_head cfsb_csd_list; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct cfs_bandwidth { + raw_spinlock_t lock; + long: 32; + ktime_t period; + u64 quota; + u64 runtime; + u64 burst; + u64 runtime_snap; + s64 hierarchical_quota; + u8 idle; + u8 period_active; + u8 slack_started; + long: 32; + struct hrtimer period_timer; + struct hrtimer slack_timer; + struct list_head throttled_cfs_rq; + int nr_periods; + int nr_throttled; + int nr_burst; + long: 32; + u64 throttled_time; + u64 burst_time; +}; + +struct task_group { + struct cgroup_subsys_state css; + int idle; + struct sched_entity **se; + struct cfs_rq **cfs_rq; + long unsigned int shares; + atomic_long_t load_avg; + u32 scx_flags; + u32 scx_weight; + struct callback_head rcu; + struct list_head list; + struct task_group *parent; + struct list_head siblings; + struct list_head children; + struct cfs_bandwidth cfs_bandwidth; + long: 32; + long: 32; +}; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_t; + +typedef struct { + raw_spinlock_t *lock; + long unsigned int flags; +} class_raw_spinlock_irqsave_t; + +enum { + SD_BALANCE_NEWIDLE = 1, + SD_BALANCE_EXEC = 2, + SD_BALANCE_FORK = 4, + SD_BALANCE_WAKE = 8, + SD_WAKE_AFFINE = 16, + SD_ASYM_CPUCAPACITY = 32, + SD_ASYM_CPUCAPACITY_FULL = 64, + SD_SHARE_CPUCAPACITY = 128, + SD_CLUSTER = 256, + SD_SHARE_LLC = 512, + SD_SERIALIZE = 1024, + SD_ASYM_PACKING = 2048, + SD_PREFER_SIBLING = 4096, + SD_OVERLAP = 8192, + SD_NUMA = 16384, +}; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; + int nr_idle_scan; +}; + +struct sched_group; + +struct sched_domain { + struct sched_domain *parent; + struct sched_domain *child; + struct sched_group *groups; + long unsigned int min_interval; + long unsigned int max_interval; + unsigned int busy_factor; + unsigned int imbalance_pct; + unsigned int cache_nice_tries; + unsigned int imb_numa_nr; + int nohz_idle; + int flags; + int level; + long unsigned int last_balance; + unsigned int balance_interval; + unsigned int nr_balance_failed; + long: 32; + u64 max_newidle_lb_cost; + long unsigned int last_decay_max_lb_cost; + char *name; + union { + void *private; + struct callback_head rcu; + }; + struct sched_domain_shared *shared; + unsigned int span_weight; + long unsigned int span[0]; +}; + +struct sched_group_capacity; + +struct sched_group { + struct sched_group *next; + atomic_t ref; + unsigned int group_weight; + unsigned int cores; + struct sched_group_capacity *sgc; + int asym_prefer_cpu; + int flags; + long unsigned int cpumask[0]; +}; + +struct sched_group_capacity { + atomic_t ref; + long unsigned int capacity; + long unsigned int min_capacity; + long unsigned int max_capacity; + long unsigned int next_update; + int imbalance; + int id; + long unsigned int cpumask[0]; +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); + ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); +}; + +struct em_perf_state { + long unsigned int performance; + long unsigned int frequency; + long unsigned int power; + long unsigned int cost; + long unsigned int flags; +}; + +struct em_perf_table { + struct callback_head rcu; + struct kref kref; + struct em_perf_state state[0]; +}; + +struct em_perf_domain { + struct em_perf_table *em_table; + int nr_perf_states; + int min_perf_state; + int max_perf_state; + long unsigned int flags; + long unsigned int cpus[0]; +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED = 0, + PM_QOS_MAX = 1, + PM_QOS_MIN = 2, +}; + +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; +}; + +struct dev_pm_qos_request; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct freq_constraints freq; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +enum hk_type { + HK_TYPE_TIMER = 0, + HK_TYPE_RCU = 1, + HK_TYPE_MISC = 2, + HK_TYPE_SCHED = 3, + HK_TYPE_TICK = 4, + HK_TYPE_DOMAIN = 5, + HK_TYPE_WQ = 6, + HK_TYPE_MANAGED_IRQ = 7, + HK_TYPE_KTHREAD = 8, + HK_TYPE_MAX = 9, +}; + +struct cpuidle_state_usage { + long long unsigned int disable; + long long unsigned int usage; + u64 time_ns; + long long unsigned int above; + long long unsigned int below; + long long unsigned int rejected; +}; + +struct cpuidle_device; + +struct cpuidle_driver; + +struct cpuidle_state { + char name[16]; + char desc[32]; + s64 exit_latency_ns; + s64 target_residency_ns; + unsigned int flags; + unsigned int exit_latency; + int power_usage; + unsigned int target_residency; + int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); + void (*enter_dead)(struct cpuidle_device *, int); + int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); + long: 32; +}; + +struct cpuidle_state_kobj; + +struct cpuidle_driver_kobj; + +struct cpuidle_device_kobj; + +struct cpuidle_device { + unsigned int registered: 1; + unsigned int enabled: 1; + unsigned int poll_time_limit: 1; + unsigned int cpu; + ktime_t next_hrtimer; + int last_state_idx; + long: 32; + u64 last_residency_ns; + u64 poll_limit_ns; + u64 forced_idle_latency_limit_ns; + struct cpuidle_state_usage states_usage[10]; + struct cpuidle_state_kobj *kobjs[10]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; +}; + +struct cpuidle_driver { + const char *name; + struct module *owner; + unsigned int bctimer: 1; + long: 32; + struct cpuidle_state states[10]; + int state_count; + int safe_state_index; + struct cpumask *cpumask; + const char *governor; +}; + +struct kthread_work; + +typedef void (*kthread_work_func_t)(struct kthread_work *); + +struct kthread_worker; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + int canceling; +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kernel_cpustat { + u64 cpustat[10]; +}; + +struct sysrq_key_op { + void (* const handler)(u8); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; +}; + +struct sched_attr { + __u32 size; + __u32 sched_policy; + __u64 sched_flags; + __s32 sched_nice; + __u32 sched_priority; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; + __u32 sched_util_min; + __u32 sched_util_max; +}; + +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_MCE_DEAD = 8, + CPUHP_VIRT_NET_DEAD = 9, + CPUHP_IBMVNIC_DEAD = 10, + CPUHP_SLUB_DEAD = 11, + CPUHP_DEBUG_OBJ_DEAD = 12, + CPUHP_MM_WRITEBACK_DEAD = 13, + CPUHP_MM_VMSTAT_DEAD = 14, + CPUHP_SOFTIRQ_DEAD = 15, + CPUHP_NET_MVNETA_DEAD = 16, + CPUHP_CPUIDLE_DEAD = 17, + CPUHP_ARM64_FPSIMD_DEAD = 18, + CPUHP_ARM_OMAP_WAKE_DEAD = 19, + CPUHP_IRQ_POLL_DEAD = 20, + CPUHP_BLOCK_SOFTIRQ_DEAD = 21, + CPUHP_BIO_DEAD = 22, + CPUHP_ACPI_CPUDRV_DEAD = 23, + CPUHP_S390_PFAULT_DEAD = 24, + CPUHP_BLK_MQ_DEAD = 25, + CPUHP_FS_BUFF_DEAD = 26, + CPUHP_PRINTK_DEAD = 27, + CPUHP_MM_MEMCQ_DEAD = 28, + CPUHP_PERCPU_CNT_DEAD = 29, + CPUHP_RADIX_DEAD = 30, + CPUHP_PAGE_ALLOC = 31, + CPUHP_NET_DEV_DEAD = 32, + CPUHP_PCI_XGENE_DEAD = 33, + CPUHP_IOMMU_IOVA_DEAD = 34, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, + CPUHP_PADATA_DEAD = 36, + CPUHP_AP_DTPM_CPU_DEAD = 37, + CPUHP_RANDOM_PREPARE = 38, + CPUHP_WORKQUEUE_PREP = 39, + CPUHP_POWER_NUMA_PREPARE = 40, + CPUHP_HRTIMERS_PREPARE = 41, + CPUHP_X2APIC_PREPARE = 42, + CPUHP_SMPCFD_PREPARE = 43, + CPUHP_RELAY_PREPARE = 44, + CPUHP_MD_RAID5_PREPARE = 45, + CPUHP_RCUTREE_PREP = 46, + CPUHP_CPUIDLE_COUPLED_PREPARE = 47, + CPUHP_POWERPC_PMAC_PREPARE = 48, + CPUHP_POWERPC_MMU_CTX_PREPARE = 49, + CPUHP_XEN_PREPARE = 50, + CPUHP_XEN_EVTCHN_PREPARE = 51, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, + CPUHP_SH_SH3X_PREPARE = 53, + CPUHP_TOPOLOGY_PREPARE = 54, + CPUHP_NET_IUCV_PREPARE = 55, + CPUHP_ARM_BL_PREPARE = 56, + CPUHP_TRACE_RB_PREPARE = 57, + CPUHP_MM_ZS_PREPARE = 58, + CPUHP_MM_ZSWP_POOL_PREPARE = 59, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 60, + CPUHP_ZCOMP_PREPARE = 61, + CPUHP_TIMERS_PREPARE = 62, + CPUHP_TMIGR_PREPARE = 63, + CPUHP_MIPS_SOC_PREPARE = 64, + CPUHP_BP_PREPARE_DYN = 65, + CPUHP_BP_PREPARE_DYN_END = 85, + CPUHP_BP_KICK_AP = 86, + CPUHP_BRINGUP_CPU = 87, + CPUHP_AP_IDLE_DEAD = 88, + CPUHP_AP_OFFLINE = 89, + CPUHP_AP_CACHECTRL_STARTING = 90, + CPUHP_AP_SCHED_STARTING = 91, + CPUHP_AP_RCUTREE_DYING = 92, + CPUHP_AP_CPU_PM_STARTING = 93, + CPUHP_AP_IRQ_GIC_STARTING = 94, + CPUHP_AP_IRQ_HIP04_STARTING = 95, + CPUHP_AP_IRQ_APPLE_AIC_STARTING = 96, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 97, + CPUHP_AP_IRQ_BCM2836_STARTING = 98, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 99, + CPUHP_AP_IRQ_EIOINTC_STARTING = 100, + CPUHP_AP_IRQ_AVECINTC_STARTING = 101, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 102, + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING = 103, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING = 104, + CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING = 105, + CPUHP_AP_ARM_MVEBU_COHERENCY = 106, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 107, + CPUHP_AP_PERF_X86_STARTING = 108, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 109, + CPUHP_AP_PERF_XTENSA_STARTING = 110, + CPUHP_AP_ARM_VFP_STARTING = 111, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 112, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 113, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 114, + CPUHP_AP_PERF_ARM_STARTING = 115, + CPUHP_AP_PERF_RISCV_STARTING = 116, + CPUHP_AP_ARM_L2X0_STARTING = 117, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 118, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 119, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING = 120, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 121, + CPUHP_AP_JCORE_TIMER_STARTING = 122, + CPUHP_AP_ARM_TWD_STARTING = 123, + CPUHP_AP_QCOM_TIMER_STARTING = 124, + CPUHP_AP_TEGRA_TIMER_STARTING = 125, + CPUHP_AP_ARMADA_TIMER_STARTING = 126, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 127, + CPUHP_AP_ARC_TIMER_STARTING = 128, + CPUHP_AP_REALTEK_TIMER_STARTING = 129, + CPUHP_AP_RISCV_TIMER_STARTING = 130, + CPUHP_AP_CLINT_TIMER_STARTING = 131, + CPUHP_AP_CSKY_TIMER_STARTING = 132, + CPUHP_AP_TI_GP_TIMER_STARTING = 133, + CPUHP_AP_HYPERV_TIMER_STARTING = 134, + CPUHP_AP_DUMMY_TIMER_STARTING = 135, + CPUHP_AP_ARM_XEN_STARTING = 136, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 137, + CPUHP_AP_ARM_CORESIGHT_STARTING = 138, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 139, + CPUHP_AP_ARM64_ISNDEP_STARTING = 140, + CPUHP_AP_SMPCFD_DYING = 141, + CPUHP_AP_HRTIMERS_DYING = 142, + CPUHP_AP_TICK_DYING = 143, + CPUHP_AP_X86_TBOOT_DYING = 144, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 145, + CPUHP_AP_ONLINE = 146, + CPUHP_TEARDOWN_CPU = 147, + CPUHP_AP_ONLINE_IDLE = 148, + CPUHP_AP_HYPERV_ONLINE = 149, + CPUHP_AP_KVM_ONLINE = 150, + CPUHP_AP_SCHED_WAIT_EMPTY = 151, + CPUHP_AP_SMPBOOT_THREADS = 152, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 153, + CPUHP_AP_BLK_MQ_ONLINE = 154, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 155, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 156, + CPUHP_AP_PERF_ONLINE = 157, + CPUHP_AP_PERF_X86_ONLINE = 158, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 159, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 160, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 161, + CPUHP_AP_PERF_S390_CF_ONLINE = 162, + CPUHP_AP_PERF_S390_SF_ONLINE = 163, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 164, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 165, + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 166, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 167, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 168, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 169, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 170, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 171, + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 172, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 173, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 174, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 175, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 176, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 177, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 178, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 179, + CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE = 180, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 181, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 182, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 183, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 184, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 185, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 186, + CPUHP_AP_PERF_CSKY_ONLINE = 187, + CPUHP_AP_TMIGR_ONLINE = 188, + CPUHP_AP_WATCHDOG_ONLINE = 189, + CPUHP_AP_WORKQUEUE_ONLINE = 190, + CPUHP_AP_RANDOM_ONLINE = 191, + CPUHP_AP_RCUTREE_ONLINE = 192, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 193, + CPUHP_AP_ONLINE_DYN = 194, + CPUHP_AP_ONLINE_DYN_END = 234, + CPUHP_AP_X86_HPET_ONLINE = 235, + CPUHP_AP_X86_KVM_CLK_ONLINE = 236, + CPUHP_AP_ACTIVE = 237, + CPUHP_ONLINE = 238, +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; +}; + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX = 2, +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE = 2, + DEV_PM_QOS_MIN_FREQUENCY = 3, + DEV_PM_QOS_MAX_FREQUENCY = 4, + DEV_PM_QOS_FLAGS = 5, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + struct freq_qos_request freq; + } data; + struct device *dev; +}; + +enum { + BPF_REG_0 = 0, + BPF_REG_1 = 1, + BPF_REG_2 = 2, + BPF_REG_3 = 3, + BPF_REG_4 = 4, + BPF_REG_5 = 5, + BPF_REG_6 = 6, + BPF_REG_7 = 7, + BPF_REG_8 = 8, + BPF_REG_9 = 9, + BPF_REG_10 = 10, + __MAX_BPF_REG = 11, +}; + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + BPF_FUNC_task_storage_get = 156, + BPF_FUNC_task_storage_delete = 157, + BPF_FUNC_get_current_task_btf = 158, + BPF_FUNC_bprm_opts_set = 159, + BPF_FUNC_ktime_get_coarse_ns = 160, + BPF_FUNC_ima_inode_hash = 161, + BPF_FUNC_sock_from_file = 162, + BPF_FUNC_check_mtu = 163, + BPF_FUNC_for_each_map_elem = 164, + BPF_FUNC_snprintf = 165, + BPF_FUNC_sys_bpf = 166, + BPF_FUNC_btf_find_by_name_kind = 167, + BPF_FUNC_sys_close = 168, + BPF_FUNC_timer_init = 169, + BPF_FUNC_timer_set_callback = 170, + BPF_FUNC_timer_start = 171, + BPF_FUNC_timer_cancel = 172, + BPF_FUNC_get_func_ip = 173, + BPF_FUNC_get_attach_cookie = 174, + BPF_FUNC_task_pt_regs = 175, + BPF_FUNC_get_branch_snapshot = 176, + BPF_FUNC_trace_vprintk = 177, + BPF_FUNC_skc_to_unix_sock = 178, + BPF_FUNC_kallsyms_lookup_name = 179, + BPF_FUNC_find_vma = 180, + BPF_FUNC_loop = 181, + BPF_FUNC_strncmp = 182, + BPF_FUNC_get_func_arg = 183, + BPF_FUNC_get_func_ret = 184, + BPF_FUNC_get_func_arg_cnt = 185, + BPF_FUNC_get_retval = 186, + BPF_FUNC_set_retval = 187, + BPF_FUNC_xdp_get_buff_len = 188, + BPF_FUNC_xdp_load_bytes = 189, + BPF_FUNC_xdp_store_bytes = 190, + BPF_FUNC_copy_from_user_task = 191, + BPF_FUNC_skb_set_tstamp = 192, + BPF_FUNC_ima_file_hash = 193, + BPF_FUNC_kptr_xchg = 194, + BPF_FUNC_map_lookup_percpu_elem = 195, + BPF_FUNC_skc_to_mptcp_sock = 196, + BPF_FUNC_dynptr_from_mem = 197, + BPF_FUNC_ringbuf_reserve_dynptr = 198, + BPF_FUNC_ringbuf_submit_dynptr = 199, + BPF_FUNC_ringbuf_discard_dynptr = 200, + BPF_FUNC_dynptr_read = 201, + BPF_FUNC_dynptr_write = 202, + BPF_FUNC_dynptr_data = 203, + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204, + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205, + BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206, + BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207, + BPF_FUNC_ktime_get_tai_ns = 208, + BPF_FUNC_user_ringbuf_drain = 209, + BPF_FUNC_cgrp_storage_get = 210, + BPF_FUNC_cgrp_storage_delete = 211, + __BPF_FUNC_MAX_ID = 212, +}; + +typedef sockptr_t bpfptr_t; + +enum { + BTF_TRACING_TYPE_TASK = 0, + BTF_TRACING_TYPE_FILE = 1, + BTF_TRACING_TYPE_VMA = 2, + MAX_BTF_TRACING_TYPE = 3, +}; + +struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; +}; + +struct btf_struct_meta { + u32 btf_id; + struct btf_record *record; +}; + +struct bpf_verifier_log { + u64 start_pos; + u64 end_pos; + char *ubuf; + u32 level; + u32 len_total; + u32 len_max; + char kbuf[1024]; +}; + +enum priv_stack_mode { + PRIV_STACK_UNKNOWN = 0, + NO_PRIV_STACK = 1, + PRIV_STACK_ADAPTIVE = 2, +}; + +struct bpf_subprog_arg_info { + enum bpf_arg_type arg_type; + union { + u32 mem_size; + u32 btf_id; + }; +}; + +struct bpf_subprog_info { + u32 start; + u32 linfo_idx; + u16 stack_depth; + u16 stack_extra; + s16 fastcall_stack_off; + bool has_tail_call: 1; + bool tail_call_reachable: 1; + bool has_ld_abs: 1; + bool is_cb: 1; + bool is_async_cb: 1; + bool is_exception_cb: 1; + bool args_cached: 1; + bool keep_fastcall_stack: 1; + enum priv_stack_mode priv_stack_mode; + u8 arg_cnt; + struct bpf_subprog_arg_info args[5]; +}; + +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +struct bpf_idmap { + u32 tmp_id_gen; + struct bpf_id_pair map[600]; +}; + +struct bpf_idset { + u32 count; + u32 ids[600]; +}; + +struct backtrack_state { + struct bpf_verifier_env *env; + u32 frame; + u32 reg_masks[8]; + u64 stack_masks[8]; +}; + +enum bpf_dynptr_type { + BPF_DYNPTR_TYPE_INVALID = 0, + BPF_DYNPTR_TYPE_LOCAL = 1, + BPF_DYNPTR_TYPE_RINGBUF = 2, + BPF_DYNPTR_TYPE_SKB = 3, + BPF_DYNPTR_TYPE_XDP = 4, +}; + +enum bpf_iter_state { + BPF_ITER_STATE_INVALID = 0, + BPF_ITER_STATE_ACTIVE = 1, + BPF_ITER_STATE_DRAINED = 2, +}; + +struct tnum { + u64 value; + u64 mask; +}; + +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, + REG_LIVE_READ32 = 1, + REG_LIVE_READ64 = 2, + REG_LIVE_READ = 3, + REG_LIVE_WRITTEN = 4, + REG_LIVE_DONE = 8, +}; + +struct bpf_reg_state { + enum bpf_reg_type type; + s32 off; + union { + int range; + struct { + struct bpf_map *map_ptr; + u32 map_uid; + }; + struct { + struct btf *btf; + u32 btf_id; + }; + struct { + u32 mem_size; + u32 dynptr_id; + }; + struct { + enum bpf_dynptr_type type; + bool first_slot; + } dynptr; + struct { + struct btf *btf; + u32 btf_id; + enum bpf_iter_state state: 2; + int depth: 30; + } iter; + struct { + long unsigned int raw1; + long unsigned int raw2; + } raw; + u32 subprogno; + }; + long: 32; + struct tnum var_off; + s64 smin_value; + s64 smax_value; + u64 umin_value; + u64 umax_value; + s32 s32_min_value; + s32 s32_max_value; + u32 u32_min_value; + u32 u32_max_value; + u32 id; + u32 ref_obj_id; + struct bpf_reg_state *parent; + u32 frameno; + s32 subreg_def; + enum bpf_reg_liveness live; + bool precise; + long: 32; +}; + +struct bpf_verifier_ops; + +struct bpf_verifier_stack_elem; + +struct bpf_verifier_state; + +struct bpf_verifier_state_list; + +struct bpf_insn_aux_data; + +struct bpf_insn_hist_entry; + +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; + const struct bpf_verifier_ops *ops; + struct module *attach_btf_mod; + struct bpf_verifier_stack_elem *head; + int stack_size; + bool strict_alignment; + bool test_state_freq; + bool test_reg_invariants; + struct bpf_verifier_state *cur_state; + struct bpf_verifier_state_list **explored_states; + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[64]; + struct btf_mod_pair used_btfs[64]; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 id_gen; + u32 hidden_subprog_cnt; + int exception_callback_subprog; + bool explore_alu_limits; + bool allow_ptr_leaks; + bool allow_uninit_stack; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; + bool seen_direct_write; + bool seen_exception; + struct bpf_insn_aux_data *insn_aux_data; + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[258]; + union { + struct bpf_idmap idmap_scratch; + struct bpf_idset idset_scratch; + }; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + struct backtrack_state bt; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; + u32 pass_cnt; + u32 subprog_cnt; + u32 prev_insn_processed; + u32 insn_processed; + u32 prev_jmps_processed; + u32 jmps_processed; + long: 32; + u64 verification_time; + u32 max_states_per_insn; + u32 total_states; + u32 peak_states; + u32 longest_mark_read_walk; + bpfptr_t fd_array; + u32 scratched_regs; + long: 32; + u64 scratched_stack_slots; + u64 prev_log_pos; + u64 prev_insn_print_pos; + struct bpf_reg_state fake_reg[2]; + char tmp_str_buf[320]; + struct bpf_insn insn_buf[32]; + struct bpf_insn epilogue_buf[32]; +}; + +struct bpf_retval_range { + s32 minval; + s32 maxval; +}; + +struct bpf_reference_state; + +struct bpf_stack_state; + +struct bpf_func_state { + struct bpf_reg_state regs[11]; + int callsite; + u32 frameno; + u32 subprogno; + u32 async_entry_cnt; + struct bpf_retval_range callback_ret_range; + bool in_callback_fn; + bool in_async_callback_fn; + bool in_exception_callback_fn; + u32 callback_depth; + int acquired_refs; + int active_locks; + struct bpf_reference_state *refs; + struct bpf_stack_state *stack; + int allocated_stack; + long: 32; +}; + +struct bpf_func_proto { + u64 (*func)(u64, u64, u64, u64, u64); + bool gpl_only; + bool pkt_access; + bool might_sleep; + bool allow_fastcall; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + union { + struct { + u32 *arg1_btf_id; + u32 *arg2_btf_id; + u32 *arg3_btf_id; + u32 *arg4_btf_id; + u32 *arg5_btf_id; + }; + u32 *arg_btf_id[5]; + struct { + size_t arg1_size; + size_t arg2_size; + size_t arg3_size; + size_t arg4_size; + size_t arg5_size; + }; + size_t arg_size[5]; + }; + int *ret_btf_id; + bool (*allowed)(const struct bpf_prog *); +}; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2, +}; + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + bool is_ldsx; + union { + int ctx_field_size; + struct { + struct btf *btf; + u32 btf_id; + }; + }; + struct bpf_verifier_log *log; + bool is_retval; +}; + +struct bpf_verifier_ops { + const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); + bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); + int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); + int (*gen_epilogue)(struct bpf_insn *, const struct bpf_prog *, s16); + int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); + u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + int (*btf_struct_access)(struct bpf_verifier_log *, const struct bpf_reg_state *, int, int); +}; + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *); + int (*check_member)(const struct btf_type *, const struct btf_member *, const struct bpf_prog *); + int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); + int (*reg)(void *, struct bpf_link *); + void (*unreg)(void *, struct bpf_link *); + int (*update)(void *, void *, struct bpf_link *); + int (*validate)(void *); + void *cfi_stubs; + struct module *owner; + const char *name; + struct btf_func_model func_models[64]; +}; + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT = 0, + BPF_STRUCT_OPS_STATE_INUSE = 1, + BPF_STRUCT_OPS_STATE_TOBEFREE = 2, + BPF_STRUCT_OPS_STATE_READY = 3, +}; + +struct bpf_struct_ops_common_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; +}; + +struct bpf_bprintf_data { + u32 *bin_args; + char *buf; + bool get_bin_args; + bool get_buf; +}; + +struct cpudl_item { + u64 dl; + int cpu; + int idx; +}; + +enum scx_rq_flags { + SCX_RQ_ONLINE = 1, + SCX_RQ_CAN_STOP_TICK = 2, + SCX_RQ_BAL_PENDING = 4, + SCX_RQ_BAL_KEEP = 8, + SCX_RQ_BYPASSING = 16, + SCX_RQ_IN_WAKEUP = 65536, + SCX_RQ_IN_BALANCE = 131072, +}; + +struct perf_domain { + struct em_perf_domain *em_pd; + struct perf_domain *next; + struct callback_head rcu; +}; + +struct rq_flags { + long unsigned int flags; + struct pin_cookie cookie; + unsigned int clock_update_flags; +}; + +typedef struct { + struct task_struct *lock; + struct rq *rq; + struct rq_flags rf; +} class_task_rq_lock_t; + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_t; + +enum { + __SCHED_FEAT_PLACE_LAG = 0, + __SCHED_FEAT_PLACE_DEADLINE_INITIAL = 1, + __SCHED_FEAT_PLACE_REL_DEADLINE = 2, + __SCHED_FEAT_RUN_TO_PARITY = 3, + __SCHED_FEAT_PREEMPT_SHORT = 4, + __SCHED_FEAT_NEXT_BUDDY = 5, + __SCHED_FEAT_CACHE_HOT_BUDDY = 6, + __SCHED_FEAT_DELAY_DEQUEUE = 7, + __SCHED_FEAT_DELAY_ZERO = 8, + __SCHED_FEAT_WAKEUP_PREEMPTION = 9, + __SCHED_FEAT_HRTICK = 10, + __SCHED_FEAT_HRTICK_DL = 11, + __SCHED_FEAT_NONTASK_CAPACITY = 12, + __SCHED_FEAT_TTWU_QUEUE = 13, + __SCHED_FEAT_SIS_UTIL = 14, + __SCHED_FEAT_WARN_DOUBLE_CLOCK = 15, + __SCHED_FEAT_RT_PUSH_IPI = 16, + __SCHED_FEAT_RT_RUNTIME_SHARE = 17, + __SCHED_FEAT_LB_MIN = 18, + __SCHED_FEAT_ATTACH_AGE_LOAD = 19, + __SCHED_FEAT_WA_IDLE = 20, + __SCHED_FEAT_WA_WEIGHT = 21, + __SCHED_FEAT_WA_BIAS = 22, + __SCHED_FEAT_UTIL_EST = 23, + __SCHED_FEAT_LATENCY_WARN = 24, + __SCHED_FEAT_NR = 25, +}; + +struct affinity_context { + const struct cpumask *new_mask; + struct cpumask *user_mask; + unsigned int flags; +}; + +typedef struct { + struct rq *lock; + struct rq *lock2; +} class_double_rq_lock_t; + +struct sched_enq_and_set_ctx { + struct task_struct *p; + int queue_flags; + bool queued; + bool running; +}; + +struct idle_timer { + struct hrtimer timer; + int done; + long: 32; +}; + +typedef struct rt_rq *rt_rq_iter_t; + +enum dl_bw_request { + dl_bw_req_check_overflow = 0, + dl_bw_req_alloc = 1, + dl_bw_req_free = 2, +}; + +enum scx_consts { + SCX_DSP_DFL_MAX_BATCH = 32, + SCX_DSP_MAX_LOOPS = 32, + SCX_WATCHDOG_MAX_TIMEOUT = 30000, + SCX_EXIT_BT_LEN = 64, + SCX_EXIT_MSG_LEN = 1024, + SCX_EXIT_DUMP_DFL_LEN = 32768, + SCX_CPUPERF_ONE = 1024, + SCX_OPS_TASK_ITER_BATCH = 32, +}; + +enum scx_exit_kind { + SCX_EXIT_NONE = 0, + SCX_EXIT_DONE = 1, + SCX_EXIT_UNREG = 64, + SCX_EXIT_UNREG_BPF = 65, + SCX_EXIT_UNREG_KERN = 66, + SCX_EXIT_SYSRQ = 67, + SCX_EXIT_ERROR = 1024, + SCX_EXIT_ERROR_BPF = 1025, + SCX_EXIT_ERROR_STALL = 1026, +}; + +enum scx_exit_code { + SCX_ECODE_RSN_HOTPLUG = 4294967296ULL, + SCX_ECODE_ACT_RESTART = 281474976710656ULL, +}; + +struct scx_exit_info { + enum scx_exit_kind kind; + long: 32; + s64 exit_code; + const char *reason; + long unsigned int *bt; + u32 bt_len; + char *msg; + char *dump; + long: 32; +}; + +enum scx_ops_flags { + SCX_OPS_KEEP_BUILTIN_IDLE = 1, + SCX_OPS_ENQ_LAST = 2, + SCX_OPS_ENQ_EXITING = 4, + SCX_OPS_SWITCH_PARTIAL = 8, + SCX_OPS_HAS_CGROUP_WEIGHT = 65536, + SCX_OPS_ALL_FLAGS = 65551, +}; + +struct scx_init_task_args { + bool fork; + struct cgroup *cgroup; +}; + +struct scx_exit_task_args { + bool cancelled; +}; + +struct scx_cgroup_init_args { + u32 weight; +}; + +enum scx_cpu_preempt_reason { + SCX_CPU_PREEMPT_RT = 0, + SCX_CPU_PREEMPT_DL = 1, + SCX_CPU_PREEMPT_STOP = 2, + SCX_CPU_PREEMPT_UNKNOWN = 3, +}; + +struct scx_cpu_acquire_args {}; + +struct scx_cpu_release_args { + enum scx_cpu_preempt_reason reason; + struct task_struct *task; +}; + +struct scx_dump_ctx { + enum scx_exit_kind kind; + long: 32; + s64 exit_code; + const char *reason; + long: 32; + u64 at_ns; + u64 at_jiffies; +}; + +struct sched_ext_ops { + s32 (*select_cpu)(struct task_struct *, s32, u64); + void (*enqueue)(struct task_struct *, u64); + void (*dequeue)(struct task_struct *, u64); + void (*dispatch)(s32, struct task_struct *); + void (*tick)(struct task_struct *); + void (*runnable)(struct task_struct *, u64); + void (*running)(struct task_struct *); + void (*stopping)(struct task_struct *, bool); + void (*quiescent)(struct task_struct *, u64); + bool (*yield)(struct task_struct *, struct task_struct *); + bool (*core_sched_before)(struct task_struct *, struct task_struct *); + void (*set_weight)(struct task_struct *, u32); + void (*set_cpumask)(struct task_struct *, const struct cpumask *); + void (*update_idle)(s32, bool); + void (*cpu_acquire)(s32, struct scx_cpu_acquire_args *); + void (*cpu_release)(s32, struct scx_cpu_release_args *); + s32 (*init_task)(struct task_struct *, struct scx_init_task_args *); + void (*exit_task)(struct task_struct *, struct scx_exit_task_args *); + void (*enable)(struct task_struct *); + void (*disable)(struct task_struct *); + void (*dump)(struct scx_dump_ctx *); + void (*dump_cpu)(struct scx_dump_ctx *, s32, bool); + void (*dump_task)(struct scx_dump_ctx *, struct task_struct *); + s32 (*cgroup_init)(struct cgroup *, struct scx_cgroup_init_args *); + void (*cgroup_exit)(struct cgroup *); + s32 (*cgroup_prep_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_cancel_move)(struct task_struct *, struct cgroup *, struct cgroup *); + void (*cgroup_set_weight)(struct cgroup *, u32); + void (*cpu_online)(s32); + void (*cpu_offline)(s32); + s32 (*init)(); + void (*exit)(struct scx_exit_info *); + u32 dispatch_max_batch; + u64 flags; + u32 timeout_ms; + u32 exit_dump_len; + u64 hotplug_seq; + char name[128]; +}; + +enum scx_opi { + SCX_OPI_BEGIN = 0, + SCX_OPI_NORMAL_BEGIN = 0, + SCX_OPI_NORMAL_END = 29, + SCX_OPI_CPU_HOTPLUG_BEGIN = 29, + SCX_OPI_CPU_HOTPLUG_END = 31, + SCX_OPI_END = 31, +}; + +enum scx_wake_flags { + SCX_WAKE_FORK = 4, + SCX_WAKE_TTWU = 8, + SCX_WAKE_SYNC = 16, +}; + +enum scx_enq_flags { + SCX_ENQ_WAKEUP = 1ULL, + SCX_ENQ_HEAD = 16ULL, + SCX_ENQ_CPU_SELECTED = 1024ULL, + SCX_ENQ_PREEMPT = 4294967296ULL, + SCX_ENQ_REENQ = 1099511627776ULL, + SCX_ENQ_LAST = 2199023255552ULL, + __SCX_ENQ_INTERNAL_MASK = 18374686479671623680ULL, + SCX_ENQ_CLEAR_OPSS = 72057594037927936ULL, + SCX_ENQ_DSQ_PRIQ = 144115188075855872ULL, +}; + +enum scx_deq_flags { + SCX_DEQ_SLEEP = 1ULL, + SCX_DEQ_CORE_SCHED_EXEC = 4294967296ULL, +}; + +enum scx_pick_idle_cpu_flags { + SCX_PICK_IDLE_CORE = 1, +}; + +enum scx_kick_flags { + SCX_KICK_IDLE = 1, + SCX_KICK_PREEMPT = 2, + SCX_KICK_WAIT = 4, +}; + +enum scx_tg_flags { + SCX_TG_ONLINE = 1, + SCX_TG_INITED = 2, +}; + +enum scx_ops_enable_state { + SCX_OPS_ENABLING = 0, + SCX_OPS_ENABLED = 1, + SCX_OPS_DISABLING = 2, + SCX_OPS_DISABLED = 3, +}; + +enum scx_ops_state { + SCX_OPSS_NONE = 0, + SCX_OPSS_QUEUEING = 1, + SCX_OPSS_QUEUED = 2, + SCX_OPSS_DISPATCHING = 3, + SCX_OPSS_QSEQ_SHIFT = 2, +}; + +struct scx_dsp_buf_ent { + struct task_struct *task; + long unsigned int qseq; + u64 dsq_id; + u64 enq_flags; +}; + +struct scx_dsp_ctx { + struct rq *rq; + u32 cursor; + u32 nr_tasks; + long: 32; + struct scx_dsp_buf_ent buf[0]; +}; + +struct scx_bstr_buf { + u64 data[12]; + char line[1024]; +}; + +struct scx_dump_data { + s32 cpu; + bool first; + s32 cursor; + struct seq_buf *s; + const char *prefix; + long: 32; + struct scx_bstr_buf buf; +}; + +struct trace_event_raw_sched_ext_dump { + struct trace_entry ent; + u32 __data_loc_line; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_ext_dump { + u32 line; + const void *line_ptr_; +}; + +typedef void (*btf_trace_sched_ext_dump)(void *, const char *); + +enum scx_dsq_iter_flags { + SCX_DSQ_ITER_REV = 65536, + __SCX_DSQ_ITER_HAS_SLICE = 1073741824, + __SCX_DSQ_ITER_HAS_VTIME = 2147483648, + __SCX_DSQ_ITER_USER_FLAGS = 65536, + __SCX_DSQ_ITER_ALL_FLAGS = 3221291008, +}; + +struct bpf_iter_scx_dsq_kern { + struct scx_dsq_list_node cursor; + struct scx_dispatch_q *dsq; + long: 32; + u64 slice; + u64 vtime; +}; + +struct bpf_iter_scx_dsq { + u64 __opaque[6]; +}; + +struct scx_task_iter { + struct sched_ext_entity cursor; + struct task_struct *locked; + struct rq *rq; + struct rq_flags rf; + u32 cnt; + long: 32; +}; + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[8]; +}; + +enum ref_state_type { + REF_TYPE_PTR = 0, + REF_TYPE_LOCK = 1, +}; + +struct bpf_reference_state { + enum ref_state_type type; + int id; + int insn_idx; + void *ptr; +}; + +enum { + INSN_F_FRAMENO_MASK = 7, + INSN_F_SPI_MASK = 63, + INSN_F_SPI_SHIFT = 3, + INSN_F_STACK_ACCESS = 512, +}; + +struct bpf_insn_hist_entry { + u32 idx; + u32 prev_idx: 22; + u32 flags: 10; + u64 linked_regs; +}; + +struct bpf_verifier_state { + struct bpf_func_state *frame[8]; + struct bpf_verifier_state *parent; + u32 branches; + u32 insn_idx; + u32 curframe; + bool speculative; + bool active_rcu_lock; + u32 active_preempt_lock; + bool used_as_loop_entry; + bool in_sleepable; + u32 first_insn_idx; + u32 last_insn_idx; + struct bpf_verifier_state *loop_entry; + u32 insn_hist_start; + u32 insn_hist_end; + u32 dfs_depth; + u32 callback_unroll_depth; + u32 may_goto_depth; +}; + +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt; + int hit_cnt; +}; + +struct bpf_loop_inline_state { + unsigned int initialized: 1; + unsigned int fit_for_inline: 1; + u32 callback_subprogno; +}; + +struct bpf_map_ptr_state { + struct bpf_map *map_ptr; + bool poison; + bool unpriv; +}; + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; + struct bpf_map_ptr_state map_ptr_state; + s32 call_imm; + u32 alu_limit; + struct { + u32 map_index; + u32 map_off; + }; + struct { + enum bpf_reg_type reg_type; + union { + struct { + struct btf *btf; + u32 btf_id; + }; + u32 mem_size; + }; + } btf_var; + struct bpf_loop_inline_state loop_inline_state; + }; + long: 32; + union { + u64 obj_new_size; + u64 insert_off; + }; + struct btf_struct_meta *kptr_struct_meta; + long: 32; + u64 map_key_state; + int ctx_field_size; + u32 seen; + bool sanitize_stack_spill; + bool zext_dst; + bool needs_zext; + bool storage_get_func_atomic; + bool is_iter_next; + bool call_with_percpu_alloc_ptr; + u8 alu_state; + u8 fastcall_pattern: 1; + u8 fastcall_spills_num: 3; + unsigned int orig_idx; + bool jmp_point; + bool prune_point; + bool force_checkpoint; + bool calls_callback; +}; + +typedef struct task_struct *class_find_get_task_t; + +struct bpf_struct_ops_sched_ext_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sched_ext_ops data; +}; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT = 0, + BPF_FD_TYPE_TRACEPOINT = 1, + BPF_FD_TYPE_KPROBE = 2, + BPF_FD_TYPE_KRETPROBE = 3, + BPF_FD_TYPE_UPROBE = 4, + BPF_FD_TYPE_URETPROBE = 5, +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *, struct pt_regs *, __u64 *); + int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *, __u64 *); + bool (*filter)(struct uprobe_consumer *, struct mm_struct *); + struct list_head cons_node; + long: 32; + __u64 id; +}; + +struct btf_param { + __u32 name_off; + __u32 type; +}; + +typedef unsigned int (*bpf_func_t)(const void *, const struct bpf_insn *); + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + bool is_uprobe; + long: 32; +}; + +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *, const void *); + +enum lockdown_reason { + LOCKDOWN_NONE = 0, + LOCKDOWN_MODULE_SIGNATURE = 1, + LOCKDOWN_DEV_MEM = 2, + LOCKDOWN_EFI_TEST = 3, + LOCKDOWN_KEXEC = 4, + LOCKDOWN_HIBERNATION = 5, + LOCKDOWN_PCI_ACCESS = 6, + LOCKDOWN_IOPORT = 7, + LOCKDOWN_MSR = 8, + LOCKDOWN_ACPI_TABLES = 9, + LOCKDOWN_DEVICE_TREE = 10, + LOCKDOWN_PCMCIA_CIS = 11, + LOCKDOWN_TIOCSSERIAL = 12, + LOCKDOWN_MODULE_PARAMETERS = 13, + LOCKDOWN_MMIOTRACE = 14, + LOCKDOWN_DEBUGFS = 15, + LOCKDOWN_XMON_WR = 16, + LOCKDOWN_BPF_WRITE_USER = 17, + LOCKDOWN_DBG_WRITE_KERNEL = 18, + LOCKDOWN_RTAS_ERROR_INJECTION = 19, + LOCKDOWN_INTEGRITY_MAX = 20, + LOCKDOWN_KCORE = 21, + LOCKDOWN_KPROBES = 22, + LOCKDOWN_BPF_READ_KERNEL = 23, + LOCKDOWN_DBG_READ_KERNEL = 24, + LOCKDOWN_PERF = 25, + LOCKDOWN_TRACEFS = 26, + LOCKDOWN_XMON_RW = 27, + LOCKDOWN_XFRM_SECRET = 28, + LOCKDOWN_CONFIDENTIALITY_MAX = 29, +}; + +typedef unsigned int (*bpf_dispatcher_fn)(const void *, const struct bpf_insn *, unsigned int (*)(const void *, const struct bpf_insn *)); + +struct prog_entry; + +struct event_filter { + struct prog_entry *prog; + char *filter_string; +}; + +struct trace_array_cpu; + +struct array_buffer { + struct trace_array *tr; + struct trace_buffer *buffer; + struct trace_array_cpu *data; + long: 32; + u64 time_start; + int cpu; + long: 32; +}; + +struct trace_pid_list; + +struct trace_options; + +struct trace_func_repeats; + +struct trace_array { + struct list_head list; + char *name; + long: 32; + struct array_buffer array_buffer; + unsigned int mapped; + long unsigned int range_addr_start; + long unsigned int range_addr_size; + long int text_delta; + long int data_delta; + struct trace_pid_list *filtered_pids; + struct trace_pid_list *filtered_no_pids; + arch_spinlock_t max_lock; + int buffer_disabled; + int sys_refcount_enter; + int sys_refcount_exit; + struct trace_event_file *enter_syscall_files[467]; + struct trace_event_file *exit_syscall_files[467]; + int stop_count; + int clock_id; + int nr_topts; + bool clear_trace; + int buffer_percent; + unsigned int n_err_log_entries; + struct tracer *current_trace; + unsigned int trace_flags; + unsigned char trace_flags_index[32]; + unsigned int flags; + raw_spinlock_t start_lock; + const char *system_names; + struct list_head err_log; + struct dentry *dir; + struct dentry *options; + struct dentry *percpu_dir; + struct eventfs_inode *event_dir; + struct trace_options *topts; + struct list_head systems; + struct list_head events; + struct trace_event_file *trace_marker_file; + cpumask_var_t tracing_cpumask; + cpumask_var_t pipe_cpumask; + int ref; + int trace_ref; + int no_filter_buffering_ref; + struct list_head hist_vars; + struct trace_func_repeats *last_func_repeats; + bool ring_buffer_expanded; + long: 32; +}; + +struct tracer_flags; + +struct tracer { + const char *name; + int (*init)(struct trace_array *); + void (*reset)(struct trace_array *); + void (*start)(struct trace_array *); + void (*stop)(struct trace_array *); + int (*update_thresh)(struct trace_array *); + void (*open)(struct trace_iterator *); + void (*pipe_open)(struct trace_iterator *); + void (*close)(struct trace_iterator *); + void (*pipe_close)(struct trace_iterator *); + ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); + ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*print_header)(struct seq_file *); + enum print_line_t (*print_line)(struct trace_iterator *); + int (*set_flag)(struct trace_array *, u32, u32, int); + int (*flag_changed)(struct trace_array *, u32, int); + struct tracer *next; + struct tracer_flags *flags; + int enabled; + bool print_max; + bool allow_instances; + bool noboot; +}; + +struct event_subsystem; + +struct trace_subsystem_dir { + struct list_head list; + struct event_subsystem *subsystem; + struct trace_array *tr; + struct eventfs_inode *ei; + int ref_count; + int nr_events; +}; + +union lower_chunk { + union lower_chunk *next; + long unsigned int data[512]; +}; + +union upper_chunk { + union upper_chunk *next; + union lower_chunk *data[256]; +}; + +struct trace_pid_list { + raw_spinlock_t lock; + struct irq_work refill_irqwork; + union upper_chunk *upper[256]; + union upper_chunk *upper_list; + union lower_chunk *lower_list; + int free_upper_chunks; + int free_lower_chunks; +}; + +struct trace_array_cpu { + atomic_t disabled; + void *buffer_page; + long unsigned int entries; + long unsigned int saved_latency; + long unsigned int critical_start; + long unsigned int critical_end; + long unsigned int critical_sequence; + long unsigned int nice; + long unsigned int policy; + long unsigned int rt_priority; + long unsigned int skipped_entries; + long: 32; + u64 preempt_timestamp; + pid_t pid; + kuid_t uid; + char comm[16]; + bool ignore_pid; + long: 32; +}; + +struct trace_option_dentry; + +struct trace_options { + struct tracer *tracer; + struct trace_option_dentry *topts; +}; + +struct tracer_opt; + +struct trace_option_dentry { + struct tracer_opt *opt; + struct tracer_flags *flags; + struct trace_array *tr; + struct dentry *entry; +}; + +struct trace_func_repeats { + long unsigned int ip; + long unsigned int parent_ip; + long unsigned int count; + long: 32; + u64 ts_last_call; +}; + +struct tracer_opt { + const char *name; + u32 bit; +}; + +struct tracer_flags { + u32 val; + struct tracer_opt *opts; + struct tracer *trace; +}; + +enum trace_iterator_bits { + TRACE_ITER_PRINT_PARENT_BIT = 0, + TRACE_ITER_SYM_OFFSET_BIT = 1, + TRACE_ITER_SYM_ADDR_BIT = 2, + TRACE_ITER_VERBOSE_BIT = 3, + TRACE_ITER_RAW_BIT = 4, + TRACE_ITER_HEX_BIT = 5, + TRACE_ITER_BIN_BIT = 6, + TRACE_ITER_BLOCK_BIT = 7, + TRACE_ITER_FIELDS_BIT = 8, + TRACE_ITER_PRINTK_BIT = 9, + TRACE_ITER_ANNOTATE_BIT = 10, + TRACE_ITER_USERSTACKTRACE_BIT = 11, + TRACE_ITER_SYM_USEROBJ_BIT = 12, + TRACE_ITER_PRINTK_MSGONLY_BIT = 13, + TRACE_ITER_CONTEXT_INFO_BIT = 14, + TRACE_ITER_LATENCY_FMT_BIT = 15, + TRACE_ITER_RECORD_CMD_BIT = 16, + TRACE_ITER_RECORD_TGID_BIT = 17, + TRACE_ITER_OVERWRITE_BIT = 18, + TRACE_ITER_STOP_ON_FREE_BIT = 19, + TRACE_ITER_IRQ_INFO_BIT = 20, + TRACE_ITER_MARKERS_BIT = 21, + TRACE_ITER_EVENT_FORK_BIT = 22, + TRACE_ITER_TRACE_PRINTK_BIT = 23, + TRACE_ITER_PAUSE_ON_TRACE_BIT = 24, + TRACE_ITER_HASH_PTR_BIT = 25, + TRACE_ITER_STACKTRACE_BIT = 26, + TRACE_ITER_LAST_BIT = 27, +}; + +struct event_subsystem { + struct list_head list; + const char *name; + struct event_filter *filter; + int ref_count; +}; + +struct dyn_event; + +struct dyn_event_operations { + struct list_head list; + int (*create)(const char *); + int (*show)(struct seq_file *, struct dyn_event *); + bool (*is_busy)(struct dyn_event *); + int (*free)(struct dyn_event *); + bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); +}; + +struct dyn_event { + struct list_head list; + struct dyn_event_operations *ops; +}; + +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); + +enum fetch_op { + FETCH_OP_NOP = 0, + FETCH_OP_REG = 1, + FETCH_OP_STACK = 2, + FETCH_OP_STACKP = 3, + FETCH_OP_RETVAL = 4, + FETCH_OP_IMM = 5, + FETCH_OP_COMM = 6, + FETCH_OP_ARG = 7, + FETCH_OP_FOFFS = 8, + FETCH_OP_DATA = 9, + FETCH_OP_EDATA = 10, + FETCH_OP_DEREF = 11, + FETCH_OP_UDEREF = 12, + FETCH_OP_ST_RAW = 13, + FETCH_OP_ST_MEM = 14, + FETCH_OP_ST_UMEM = 15, + FETCH_OP_ST_STRING = 16, + FETCH_OP_ST_USTRING = 17, + FETCH_OP_ST_SYMSTR = 18, + FETCH_OP_ST_EDATA = 19, + FETCH_OP_MOD_BF = 20, + FETCH_OP_LP_ARRAY = 21, + FETCH_OP_TP_ARG = 22, + FETCH_OP_END = 23, + FETCH_NOP_SYMBOL = 24, +}; + +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + long unsigned int immediate; + void *data; + }; +}; + +struct fetch_type { + const char *name; + size_t size; + bool is_signed; + bool is_string; + print_type_func_t print; + const char *fmt; + const char *fmttype; +}; + +struct probe_arg { + struct fetch_insn *code; + bool dynamic; + unsigned int offset; + unsigned int count; + const char *name; + const char *comm; + char *fmt; + const struct fetch_type *type; +}; + +struct probe_entry_arg { + struct fetch_insn *code; + unsigned int size; +}; + +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + +struct trace_probe_event { + unsigned int flags; + struct trace_event_class class; + struct trace_event_call call; + struct list_head files; + struct list_head probes; + struct trace_uprobe_filter filter[0]; +}; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; + ssize_t size; + unsigned int nr_args; + struct probe_entry_arg *entry_arg; + struct probe_arg args[0]; +}; + +struct event_file_link { + struct trace_event_file *file; + struct list_head list; +}; + +struct traceprobe_parse_context { + struct trace_event_call *event; + const char *funcname; + const struct btf_type *proto; + const struct btf_param *params; + s32 nr_params; + struct btf *btf; + const struct btf_type *last_type; + u32 last_bitoffs; + u32 last_bitsize; + struct trace_probe *tp; + unsigned int flags; + int offset; +}; + +enum probe_print_type { + PROBE_PRINT_NORMAL = 0, + PROBE_PRINT_RETURN = 1, + PROBE_PRINT_EVENT = 2, +}; + +enum { + TP_ERR_FILE_NOT_FOUND = 0, + TP_ERR_NO_REGULAR_FILE = 1, + TP_ERR_BAD_REFCNT = 2, + TP_ERR_REFCNT_OPEN_BRACE = 3, + TP_ERR_BAD_REFCNT_SUFFIX = 4, + TP_ERR_BAD_UPROBE_OFFS = 5, + TP_ERR_BAD_MAXACT_TYPE = 6, + TP_ERR_BAD_MAXACT = 7, + TP_ERR_MAXACT_TOO_BIG = 8, + TP_ERR_BAD_PROBE_ADDR = 9, + TP_ERR_NON_UNIQ_SYMBOL = 10, + TP_ERR_BAD_RETPROBE = 11, + TP_ERR_NO_TRACEPOINT = 12, + TP_ERR_BAD_ADDR_SUFFIX = 13, + TP_ERR_NO_GROUP_NAME = 14, + TP_ERR_GROUP_TOO_LONG = 15, + TP_ERR_BAD_GROUP_NAME = 16, + TP_ERR_NO_EVENT_NAME = 17, + TP_ERR_EVENT_TOO_LONG = 18, + TP_ERR_BAD_EVENT_NAME = 19, + TP_ERR_EVENT_EXIST = 20, + TP_ERR_RETVAL_ON_PROBE = 21, + TP_ERR_NO_RETVAL = 22, + TP_ERR_BAD_STACK_NUM = 23, + TP_ERR_BAD_ARG_NUM = 24, + TP_ERR_BAD_VAR = 25, + TP_ERR_BAD_REG_NAME = 26, + TP_ERR_BAD_MEM_ADDR = 27, + TP_ERR_BAD_IMM = 28, + TP_ERR_IMMSTR_NO_CLOSE = 29, + TP_ERR_FILE_ON_KPROBE = 30, + TP_ERR_BAD_FILE_OFFS = 31, + TP_ERR_SYM_ON_UPROBE = 32, + TP_ERR_TOO_MANY_OPS = 33, + TP_ERR_DEREF_NEED_BRACE = 34, + TP_ERR_BAD_DEREF_OFFS = 35, + TP_ERR_DEREF_OPEN_BRACE = 36, + TP_ERR_COMM_CANT_DEREF = 37, + TP_ERR_BAD_FETCH_ARG = 38, + TP_ERR_ARRAY_NO_CLOSE = 39, + TP_ERR_BAD_ARRAY_SUFFIX = 40, + TP_ERR_BAD_ARRAY_NUM = 41, + TP_ERR_ARRAY_TOO_BIG = 42, + TP_ERR_BAD_TYPE = 43, + TP_ERR_BAD_STRING = 44, + TP_ERR_BAD_SYMSTRING = 45, + TP_ERR_BAD_BITFIELD = 46, + TP_ERR_ARG_NAME_TOO_LONG = 47, + TP_ERR_NO_ARG_NAME = 48, + TP_ERR_BAD_ARG_NAME = 49, + TP_ERR_USED_ARG_NAME = 50, + TP_ERR_ARG_TOO_LONG = 51, + TP_ERR_NO_ARG_BODY = 52, + TP_ERR_BAD_INSN_BNDRY = 53, + TP_ERR_FAIL_REG_PROBE = 54, + TP_ERR_DIFF_PROBE_TYPE = 55, + TP_ERR_DIFF_ARG_TYPE = 56, + TP_ERR_SAME_PROBE = 57, + TP_ERR_NO_EVENT_INFO = 58, + TP_ERR_BAD_ATTACH_EVENT = 59, + TP_ERR_BAD_ATTACH_ARG = 60, + TP_ERR_NO_EP_FILTER = 61, + TP_ERR_NOSUP_BTFARG = 62, + TP_ERR_NO_BTFARG = 63, + TP_ERR_NO_BTF_ENTRY = 64, + TP_ERR_BAD_VAR_ARGS = 65, + TP_ERR_NOFENTRY_ARGS = 66, + TP_ERR_DOUBLE_ARGS = 67, + TP_ERR_ARGS_2LONG = 68, + TP_ERR_ARGIDX_2BIG = 69, + TP_ERR_NO_PTR_STRCT = 70, + TP_ERR_NOSUP_DAT_ARG = 71, + TP_ERR_BAD_HYPHEN = 72, + TP_ERR_NO_BTF_FIELD = 73, + TP_ERR_BAD_BTF_TID = 74, + TP_ERR_BAD_TYPE4STR = 75, + TP_ERR_NEED_STRING_TYPE = 76, +}; + +struct trace_uprobe; + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + long unsigned int bp_addr; +}; + +struct trace_uprobe { + struct dyn_event devent; + long: 32; + struct uprobe_consumer consumer; + struct path path; + char *filename; + struct uprobe *uprobe; + long unsigned int offset; + long unsigned int ref_ctr_offset; + long unsigned int *nhits; + struct trace_probe tp; + long: 32; +}; + +struct uprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int vaddr[0]; +}; + +struct uprobe_cpu_buffer { + struct mutex mutex; + void *buf; + int dsize; +}; + +typedef bool (*filter_func_t)(struct uprobe_consumer *, struct mm_struct *); + +struct bpf_mem_caches; + +struct bpf_mem_cache; + +struct bpf_mem_alloc { + struct bpf_mem_caches *caches; + struct bpf_mem_cache *cache; + struct obj_cgroup *objcg; + bool percpu; + struct work_struct work; +}; + +struct bpf_mem_cache { + struct llist_head free_llist; + local_t active; + struct llist_head free_llist_extra; + struct irq_work refill_work; + struct obj_cgroup *objcg; + int unit_size; + int free_cnt; + int low_watermark; + int high_watermark; + int batch; + int percpu_size; + bool draining; + struct bpf_mem_cache *tgt; + struct llist_head free_by_rcu; + struct llist_node *free_by_rcu_tail; + struct llist_head waiting_for_gp; + struct llist_node *waiting_for_gp_tail; + struct callback_head rcu; + atomic_t call_rcu_in_progress; + struct llist_head free_llist_extra_rcu; + struct llist_head free_by_rcu_ttrace; + struct llist_head waiting_for_gp_ttrace; + struct callback_head rcu_ttrace; + atomic_t call_rcu_ttrace_in_progress; +}; + +struct bpf_mem_caches { + struct bpf_mem_cache cache[11]; +}; + +struct seqcount_rwlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_rwlock seqcount_rwlock_t; + +typedef struct { + struct srcu_struct *lock; + int idx; +} class_srcu_t; + +typedef u32 uprobe_opcode_t; + +struct uprobe { + struct rb_node rb_node; + refcount_t ref; + struct rw_semaphore register_rwsem; + struct rw_semaphore consumer_rwsem; + struct list_head pending_list; + struct list_head consumers; + struct inode *inode; + union { + struct callback_head rcu; + struct work_struct work; + }; + long: 32; + loff_t offset; + loff_t ref_ctr_offset; + long unsigned int flags; + struct arch_uprobe arch; + long: 32; +}; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +struct xol_area { + wait_queue_head_t wq; + long unsigned int *bitmap; + struct page *page; + long unsigned int vaddr; +}; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); + void (*close)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +enum pageflags { + PG_locked = 0, + PG_writeback = 1, + PG_referenced = 2, + PG_uptodate = 3, + PG_dirty = 4, + PG_lru = 5, + PG_head = 6, + PG_waiters = 7, + PG_active = 8, + PG_workingset = 9, + PG_owner_priv_1 = 10, + PG_owner_2 = 11, + PG_arch_1 = 12, + PG_reserved = 13, + PG_private = 14, + PG_private_2 = 15, + PG_reclaim = 16, + PG_swapbacked = 17, + PG_unevictable = 18, + PG_mlocked = 19, + __NR_PAGEFLAGS = 20, + PG_readahead = 16, + PG_swapcache = 10, + PG_checked = 10, + PG_anon_exclusive = 11, + PG_mappedtodisk = 11, + PG_fscache = 15, + PG_pinned = 10, + PG_savepinned = 4, + PG_foreign = 10, + PG_xen_remapped = 10, + PG_isolated = 16, + PG_reported = 3, + PG_has_hwpoisoned = 8, + PG_large_rmappable = 9, + PG_partially_mapped = 16, +}; + +enum pagetype { + PGTY_buddy = 240, + PGTY_offline = 241, + PGTY_table = 242, + PGTY_guard = 243, + PGTY_hugetlb = 244, + PGTY_slab = 245, + PGTY_zsmalloc = 246, + PGTY_unaccepted = 247, + PGTY_mapcount_underflow = 255, +}; + +typedef int filler_t(struct file *, struct folio *); + +typedef unsigned int fgf_t; + +typedef int rmap_t; + +struct page_vma_mapped_walk { + long unsigned int pfn; + long unsigned int nr_pages; + long unsigned int pgoff; + struct vm_area_struct *vma; + long unsigned int address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +struct mmu_notifier_range { + long unsigned int start; + long unsigned int end; +}; + +typedef void (*task_work_func_t)(struct callback_head *); + +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +struct __uprobe_key { + struct inode *inode; + long: 32; + loff_t offset; +}; + +struct map_info { + struct map_info *next; + struct mm_struct *mm; + long unsigned int vaddr; +}; + +enum { + MMU_FTRS_POSSIBLE = 67073, +}; + +enum migrate_reason { + MR_COMPACTION = 0, + MR_MEMORY_FAILURE = 1, + MR_MEMORY_HOTPLUG = 2, + MR_SYSCALL = 3, + MR_MEMPOLICY_MBIND = 4, + MR_NUMA_MISPLACED = 5, + MR_CONTIG_RANGE = 6, + MR_LONGTERM_PIN = 7, + MR_DEMOTION = 8, + MR_DAMON = 9, + MR_TYPES = 10, +}; + +struct encoded_page; + +enum page_walk_lock { + PGWALK_RDLOCK = 0, + PGWALK_WRLOCK = 1, + PGWALK_WRLOCK_VERIFY = 2, +}; + +struct mm_walk; + +struct mm_walk_ops { + int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); + int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); + int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *); + void (*post_vma)(struct mm_walk *); + int (*install_pte)(long unsigned int, long unsigned int, pte_t *, struct mm_walk *); + enum page_walk_lock walk_lock; +}; + +enum page_walk_action { + ACTION_SUBTREE = 0, + ACTION_CONTINUE = 1, + ACTION_AGAIN = 2, +}; + +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + pgd_t *pgd; + struct vm_area_struct *vma; + enum page_walk_action action; + bool no_vma; + void *private; +}; + +typedef long unsigned int pte_marker; + +struct mmu_table_batch { + struct callback_head rcu; + unsigned int nr; + void *tables[0]; +}; + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct encoded_page *encoded_pages[0]; +}; + +struct mmu_gather { + struct mm_struct *mm; + struct mmu_table_batch *batch; + long unsigned int start; + long unsigned int end; + unsigned int fullmm: 1; + unsigned int need_flush_all: 1; + unsigned int freed_tables: 1; + unsigned int delayed_rmap: 1; + unsigned int cleared_ptes: 1; + unsigned int cleared_pmds: 1; + unsigned int cleared_puds: 1; + unsigned int cleared_p4ds: 1; + unsigned int vma_exec: 1; + unsigned int vma_huge: 1; + unsigned int vma_pfn: 1; + unsigned int batch_count; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[8]; + unsigned int page_size; +}; + +struct fsnotify_mark_connector { + spinlock_t lock; + unsigned char type; + unsigned char prio; + short unsigned int flags; + union { + void *obj; + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +struct fsnotify_sb_info { + struct fsnotify_mark_connector *sb_marks; + atomic_long_t watched_objects[3]; +}; + +struct fsnotify_group; + +struct fsnotify_iter_info; + +struct fsnotify_mark; + +struct fsnotify_event; + +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); + int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); + void (*free_group_priv)(struct fsnotify_group *); + void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); + void (*free_event)(struct fsnotify_group *, struct fsnotify_event *); + void (*free_mark)(struct fsnotify_mark *); +}; + +enum fsnotify_group_prio { + FSNOTIFY_PRIO_NORMAL = 0, + FSNOTIFY_PRIO_CONTENT = 1, + FSNOTIFY_PRIO_PRE_CONTENT = 2, + __FSNOTIFY_PRIO_NUM = 3, +}; + +struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; +}; + +struct fsnotify_group { + const struct fsnotify_ops *ops; + refcount_t refcnt; + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + enum fsnotify_group_prio priority; + bool shutdown; + int flags; + unsigned int owner_flags; + struct mutex mark_mutex; + atomic_t user_waits; + struct list_head marks_list; + struct fasync_struct *fsn_fa; + struct fsnotify_event *overflow_event; + struct mem_cgroup *memcg; + union { + void *private; + struct inotify_group_private_data inotify_data; + }; +}; + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[5]; + struct fsnotify_group *current_group; + unsigned int report_mask; + int srcu_idx; +}; + +struct fsnotify_mark { + __u32 mask; + refcount_t refcnt; + struct fsnotify_group *group; + struct list_head g_list; + spinlock_t lock; + struct hlist_node obj_list; + struct fsnotify_mark_connector *connector; + __u32 ignore_mask; + unsigned int flags; +}; + +struct fsnotify_event { + struct list_head list; +}; + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_ANY = -1, + FSNOTIFY_OBJ_TYPE_INODE = 0, + FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, + FSNOTIFY_OBJ_TYPE_SB = 2, + FSNOTIFY_OBJ_TYPE_COUNT = 3, + FSNOTIFY_OBJ_TYPE_DETACHED = 3, +}; + +struct inotify_event { + __s32 wd; + __u32 mask; + __u32 cookie; + __u32 len; + char name[0]; +}; + +struct inotify_event_info { + struct fsnotify_event fse; + u32 mask; + int wd; + u32 sync_cookie; + int name_len; + char name[0]; +}; + +struct inotify_inode_mark { + struct fsnotify_mark fsn_mark; + int wd; +}; + +typedef u32 nlink_t; + +enum { + PROC_ENTRY_PERMANENT = 1, +}; + +struct proc_ops { + unsigned int proc_flags; + int (*proc_open)(struct inode *, struct file *); + ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *); + loff_t (*proc_lseek)(struct file *, loff_t, int); + int (*proc_release)(struct inode *, struct file *); + __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); + long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int); + int (*proc_mmap)(struct file *, struct vm_area_struct *); + long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); +}; + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_DMA = 4, + PGALLOC_NORMAL = 5, + PGALLOC_MOVABLE = 6, + ALLOCSTALL_DMA = 7, + ALLOCSTALL_NORMAL = 8, + ALLOCSTALL_MOVABLE = 9, + PGSCAN_SKIP_DMA = 10, + PGSCAN_SKIP_NORMAL = 11, + PGSCAN_SKIP_MOVABLE = 12, + PGFREE = 13, + PGACTIVATE = 14, + PGDEACTIVATE = 15, + PGLAZYFREE = 16, + PGFAULT = 17, + PGMAJFAULT = 18, + PGLAZYFREED = 19, + PGREFILL = 20, + PGREUSE = 21, + PGSTEAL_KSWAPD = 22, + PGSTEAL_DIRECT = 23, + PGSTEAL_KHUGEPAGED = 24, + PGSCAN_KSWAPD = 25, + PGSCAN_DIRECT = 26, + PGSCAN_KHUGEPAGED = 27, + PGSCAN_DIRECT_THROTTLE = 28, + PGSCAN_ANON = 29, + PGSCAN_FILE = 30, + PGSTEAL_ANON = 31, + PGSTEAL_FILE = 32, + PGINODESTEAL = 33, + SLABS_SCANNED = 34, + KSWAPD_INODESTEAL = 35, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 36, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 37, + PAGEOUTRUN = 38, + PGROTATED = 39, + DROP_PAGECACHE = 40, + DROP_SLAB = 41, + OOM_KILL = 42, + PGMIGRATE_SUCCESS = 43, + PGMIGRATE_FAIL = 44, + THP_MIGRATION_SUCCESS = 45, + THP_MIGRATION_FAIL = 46, + THP_MIGRATION_SPLIT = 47, + COMPACTMIGRATE_SCANNED = 48, + COMPACTFREE_SCANNED = 49, + COMPACTISOLATED = 50, + COMPACTSTALL = 51, + COMPACTFAIL = 52, + COMPACTSUCCESS = 53, + KCOMPACTD_WAKE = 54, + KCOMPACTD_MIGRATE_SCANNED = 55, + KCOMPACTD_FREE_SCANNED = 56, + CMA_ALLOC_SUCCESS = 57, + CMA_ALLOC_FAIL = 58, + UNEVICTABLE_PGCULLED = 59, + UNEVICTABLE_PGSCANNED = 60, + UNEVICTABLE_PGRESCUED = 61, + UNEVICTABLE_PGMLOCKED = 62, + UNEVICTABLE_PGMUNLOCKED = 63, + UNEVICTABLE_PGCLEARED = 64, + UNEVICTABLE_PGSTRANDED = 65, + SWAP_RA = 66, + SWAP_RA_HIT = 67, + SWPIN_ZERO = 68, + SWPOUT_ZERO = 69, + ZSWPIN = 70, + ZSWPOUT = 71, + ZSWPWB = 72, + NR_VM_EVENT_ITEMS = 73, +}; + +struct proc_dir_entry { + atomic_t in_use; + refcount_t refcnt; + struct list_head pde_openers; + spinlock_t pde_unload_lock; + struct completion *pde_unload_completion; + const struct inode_operations *proc_iops; + union { + const struct proc_ops *proc_ops; + const struct file_operations *proc_dir_ops; + }; + const struct dentry_operations *proc_dops; + union { + const struct seq_operations *seq_ops; + int (*single_show)(struct seq_file *, void *); + }; + proc_write_t write; + void *data; + unsigned int state_size; + unsigned int low_ino; + nlink_t nlink; + kuid_t uid; + kgid_t gid; + long: 32; + loff_t size; + struct proc_dir_entry *parent; + struct rb_root subdir; + struct rb_node subdir_node; + char *name; + umode_t mode; + u8 flags; + u8 namelen; + char inline_name[0]; + long: 32; +}; + +struct nsset { + unsigned int flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 1, + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, + KERNFS_ROOT_SUPPORT_EXPORTOP = 4, + KERNFS_ROOT_SUPPORT_USER_XATTR = 8, +}; + +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *, struct kernfs_root *); + int (*mkdir)(struct kernfs_node *, const char *, umode_t); + int (*rmdir)(struct kernfs_node *); + int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); + int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); +}; + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + long unsigned int magic; + bool new_sb_created; +}; + +struct xattr; + +typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); + +struct xattr { + const char *name; + void *value; + size_t value_len; +}; + +struct jbd2_journal_handle; + +typedef struct jbd2_journal_handle handle_t; + +struct jbd2_journal_handle { + union { + transaction_t *h_transaction; + journal_t *h_journal; + }; + handle_t *h_rsv_handle; + int h_total_credits; + int h_revoke_credits; + int h_revoke_credits_requested; + int h_ref; + int h_err; + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; + long unsigned int h_start_jiffies; + unsigned int h_requested_credits; + unsigned int saved_alloc_context; +}; + +typedef unsigned int autofs_wqt_t; + +struct autofs_sb_info; + +struct autofs_info { + struct dentry *dentry; + int flags; + struct completion expire_complete; + struct list_head active; + struct list_head expiring; + struct autofs_sb_info *sbi; + long unsigned int exp_timeout; + long unsigned int last_used; + int count; + kuid_t uid; + kgid_t gid; + struct callback_head rcu; +}; + +struct autofs_wait_queue; + +struct autofs_sb_info { + u32 magic; + int pipefd; + struct file *pipe; + struct pid *oz_pgrp; + int version; + int sub_version; + int min_proto; + int max_proto; + unsigned int flags; + long unsigned int exp_timeout; + unsigned int type; + struct super_block *sb; + struct mutex wq_mutex; + struct mutex pipe_mutex; + spinlock_t fs_lock; + struct autofs_wait_queue *queues; + spinlock_t lookup_lock; + struct list_head active_list; + struct list_head expiring_list; + struct callback_head rcu; +}; + +struct autofs_wait_queue { + wait_queue_head_t queue; + struct autofs_wait_queue *next; + autofs_wqt_t wait_queue_token; + long: 32; + struct qstr name; + u32 offset; + u32 dev; + u64 ino; + kuid_t uid; + kgid_t gid; + pid_t pid; + pid_t tgid; + int status; + unsigned int wait_ctr; +}; + +struct btrfs_balance_args { + __u64 profiles; + union { + __u64 usage; + struct { + __u32 usage_min; + __u32 usage_max; + }; + }; + __u64 devid; + __u64 pstart; + __u64 pend; + __u64 vstart; + __u64 vend; + __u64 target; + __u64 flags; + union { + __u64 limit; + struct { + __u32 limit_min; + __u32 limit_max; + }; + }; + __u32 stripes_min; + __u32 stripes_max; + __u64 unused[6]; +}; + +struct btrfs_balance_progress { + __u64 expected; + __u64 considered; + __u64 completed; +}; + +enum btrfs_csum_type { + BTRFS_CSUM_TYPE_CRC32 = 0, + BTRFS_CSUM_TYPE_XXHASH = 1, + BTRFS_CSUM_TYPE_SHA256 = 2, + BTRFS_CSUM_TYPE_BLAKE2 = 3, +}; + +struct rcu_string; + +struct btrfs_zoned_device_info; + +struct scrub_ctx; + +struct btrfs_device { + struct list_head dev_list; + struct list_head dev_alloc_list; + struct list_head post_commit_list; + struct btrfs_fs_devices *fs_devices; + struct btrfs_fs_info *fs_info; + struct rcu_string *name; + long: 32; + u64 generation; + struct file *bdev_file; + struct block_device *bdev; + struct btrfs_zoned_device_info *zone_info; + dev_t devt; + long unsigned int dev_state; + blk_status_t last_flush_error; + seqcount_t data_seqcount; + long: 32; + u64 devid; + u64 total_bytes; + u64 disk_total_bytes; + u64 bytes_used; + u32 io_align; + u32 io_width; + u64 type; + atomic_t sb_write_errors; + u32 sector_size; + u8 uuid[16]; + u64 commit_total_bytes; + u64 commit_bytes_used; + struct bio flush_bio; + struct completion flush_wait; + struct scrub_ctx *scrub_ctx; + int dev_stats_valid; + atomic_t dev_stats_ccnt; + atomic_t dev_stat_values[5]; + struct extent_io_tree alloc_state; + struct completion kobj_unregister; + struct kobject devid_kobj; + long: 32; + u64 scrub_speed_max; +}; + +enum btrfs_chunk_allocation_policy { + BTRFS_CHUNK_ALLOC_REGULAR = 0, + BTRFS_CHUNK_ALLOC_ZONED = 1, +}; + +enum btrfs_read_policy { + BTRFS_READ_POLICY_PID = 0, + BTRFS_NR_READ_POLICY = 1, +}; + +struct btrfs_fs_devices { + u8 fsid[16]; + u8 metadata_uuid[16]; + struct list_head fs_list; + u64 num_devices; + u64 open_devices; + u64 rw_devices; + u64 missing_devices; + u64 total_rw_bytes; + u64 total_devices; + u64 latest_generation; + struct btrfs_device *latest_dev; + struct mutex device_list_mutex; + struct list_head devices; + struct list_head alloc_list; + struct list_head seed_list; + int opened; + bool rotating; + bool discardable; + bool seeding; + bool temp_fsid; + struct btrfs_fs_info *fs_info; + struct kobject fsid_kobj; + struct kobject *devices_kobj; + struct kobject *devinfo_kobj; + struct completion kobj_unregister; + enum btrfs_chunk_allocation_policy chunk_alloc_policy; + enum btrfs_read_policy read_policy; +}; + +struct btrfs_balance_control { + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + u64 flags; + struct btrfs_balance_progress stat; +}; + +struct btrfs_map_token { + struct extent_buffer *eb; + char *kaddr; + long unsigned int offset; +}; + +struct rcu_string { + struct callback_head rcu; + char str[0]; +}; + +enum btrfs_raid_types { + BTRFS_RAID_SINGLE = 0, + BTRFS_RAID_RAID0 = 1, + BTRFS_RAID_RAID1 = 2, + BTRFS_RAID_DUP = 3, + BTRFS_RAID_RAID10 = 4, + BTRFS_RAID_RAID5 = 5, + BTRFS_RAID_RAID6 = 6, + BTRFS_RAID_RAID1C3 = 7, + BTRFS_RAID_RAID1C4 = 8, + BTRFS_NR_RAID_TYPES = 9, +}; + +struct btrfs_io_context; + +struct btrfs_io_stripe { + struct btrfs_device *dev; + long: 32; + u64 physical; + u64 length; + bool rst_search_commit_root; + struct btrfs_io_context *bioc; +}; + +struct btrfs_io_context { + refcount_t refs; + struct btrfs_fs_info *fs_info; + u64 map_type; + struct bio *orig_bio; + atomic_t error; + u16 max_errors; + long: 32; + u64 logical; + u64 size; + struct list_head rst_ordered_entry; + u16 num_stripes; + u16 mirror_num; + u16 replace_nr_stripes; + s16 replace_stripe_src; + u64 full_stripe_logical; + struct btrfs_io_stripe stripes[0]; +}; + +struct btrfs_chunk_map { + struct rb_node rb_node; + int verified_stripes; + refcount_t refs; + long: 32; + u64 start; + u64 chunk_len; + u64 stripe_size; + u64 type; + int io_align; + int io_width; + int num_stripes; + int sub_stripes; + struct btrfs_io_stripe stripes[0]; +}; + +enum btrfs_qgroup_rsv_type { + BTRFS_QGROUP_RSV_DATA = 0, + BTRFS_QGROUP_RSV_META_PERTRANS = 1, + BTRFS_QGROUP_RSV_META_PREALLOC = 2, + BTRFS_QGROUP_RSV_LAST = 3, +}; + +enum btrfs_mod_log_op { + BTRFS_MOD_LOG_KEY_REPLACE = 0, + BTRFS_MOD_LOG_KEY_ADD = 1, + BTRFS_MOD_LOG_KEY_REMOVE = 2, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING = 3, + BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING = 4, + BTRFS_MOD_LOG_MOVE_KEYS = 5, + BTRFS_MOD_LOG_ROOT_REPLACE = 6, +}; + +struct btrfs_csums { + u16 size; + const char name[10]; + const char driver[12]; +}; + +typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); + +struct wait_bit_key { + long unsigned int *flags; + int bit_nr; + long unsigned int timeout; +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + +struct btrfs_dev_extent { + __le64 chunk_tree; + __le64 chunk_objectid; + __le64 chunk_offset; + __le64 length; + __u8 chunk_tree_uuid[16]; +}; + +struct btrfs_block_group_item { + __le64 used; + __le64 chunk_objectid; + __le64 flags; +}; + +struct btrfs_space_info { + struct btrfs_fs_info *fs_info; + spinlock_t lock; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 bytes_zone_unusable; + u64 max_extent_size; + u64 chunk_size; + int bg_reclaim_threshold; + int clamp; + unsigned int full: 1; + unsigned int chunk_alloc: 1; + unsigned int flush: 1; + unsigned int force_alloc; + u64 disk_used; + u64 disk_total; + u64 flags; + struct list_head list; + struct list_head ro_bgs; + struct list_head priority_tickets; + struct list_head tickets; + u64 reclaim_size; + u64 tickets_id; + struct rw_semaphore groups_sem; + struct list_head block_groups[9]; + struct kobject kobj; + struct kobject *block_group_kobjs[9]; + u64 reclaim_count; + u64 reclaim_bytes; + u64 reclaim_errors; + bool dynamic_reclaim; + bool periodic_reclaim; + bool periodic_reclaim_ready; + long: 32; + s64 reclaimable_bytes; +}; + +enum { + BTRFS_FS_CLOSING_START = 0, + BTRFS_FS_CLOSING_DONE = 1, + BTRFS_FS_LOG_RECOVERING = 2, + BTRFS_FS_OPEN = 3, + BTRFS_FS_QUOTA_ENABLED = 4, + BTRFS_FS_UPDATE_UUID_TREE_GEN = 5, + BTRFS_FS_CREATING_FREE_SPACE_TREE = 6, + BTRFS_FS_BTREE_ERR = 7, + BTRFS_FS_LOG1_ERR = 8, + BTRFS_FS_LOG2_ERR = 9, + BTRFS_FS_QUOTA_OVERRIDE = 10, + BTRFS_FS_FROZEN = 11, + BTRFS_FS_BALANCE_RUNNING = 12, + BTRFS_FS_RELOC_RUNNING = 13, + BTRFS_FS_CLEANER_RUNNING = 14, + BTRFS_FS_CSUM_IMPL_FAST = 15, + BTRFS_FS_DISCARD_RUNNING = 16, + BTRFS_FS_CLEANUP_SPACE_CACHE_V1 = 17, + BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED = 18, + BTRFS_FS_TREE_MOD_LOG_USERS = 19, + BTRFS_FS_COMMIT_TRANS = 20, + BTRFS_FS_UNFINISHED_DROPS = 21, + BTRFS_FS_NEED_ZONE_FINISH = 22, + BTRFS_FS_NEED_TRANS_COMMIT = 23, + BTRFS_FS_ACTIVE_ZONE_TRACKING = 24, + BTRFS_FS_FEATURE_CHANGED = 25, + BTRFS_FS_UNALIGNED_TREE_BLOCK = 26, + BTRFS_FS_32BIT_ERROR = 27, + BTRFS_FS_32BIT_WARN = 28, +}; + +enum { + BTRFS_MOUNT_NODATASUM = 1ULL, + BTRFS_MOUNT_NODATACOW = 2ULL, + BTRFS_MOUNT_NOBARRIER = 4ULL, + BTRFS_MOUNT_SSD = 8ULL, + BTRFS_MOUNT_DEGRADED = 16ULL, + BTRFS_MOUNT_COMPRESS = 32ULL, + BTRFS_MOUNT_NOTREELOG = 64ULL, + BTRFS_MOUNT_FLUSHONCOMMIT = 128ULL, + BTRFS_MOUNT_SSD_SPREAD = 256ULL, + BTRFS_MOUNT_NOSSD = 512ULL, + BTRFS_MOUNT_DISCARD_SYNC = 1024ULL, + BTRFS_MOUNT_FORCE_COMPRESS = 2048ULL, + BTRFS_MOUNT_SPACE_CACHE = 4096ULL, + BTRFS_MOUNT_CLEAR_CACHE = 8192ULL, + BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = 16384ULL, + BTRFS_MOUNT_ENOSPC_DEBUG = 32768ULL, + BTRFS_MOUNT_AUTO_DEFRAG = 65536ULL, + BTRFS_MOUNT_USEBACKUPROOT = 131072ULL, + BTRFS_MOUNT_SKIP_BALANCE = 262144ULL, + BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = 524288ULL, + BTRFS_MOUNT_RESCAN_UUID_TREE = 1048576ULL, + BTRFS_MOUNT_FRAGMENT_DATA = 2097152ULL, + BTRFS_MOUNT_FRAGMENT_METADATA = 4194304ULL, + BTRFS_MOUNT_FREE_SPACE_TREE = 8388608ULL, + BTRFS_MOUNT_NOLOGREPLAY = 16777216ULL, + BTRFS_MOUNT_REF_VERIFY = 33554432ULL, + BTRFS_MOUNT_DISCARD_ASYNC = 67108864ULL, + BTRFS_MOUNT_IGNOREBADROOTS = 134217728ULL, + BTRFS_MOUNT_IGNOREDATACSUMS = 268435456ULL, + BTRFS_MOUNT_NODISCARD = 536870912ULL, + BTRFS_MOUNT_NOSPACECACHE = 1073741824ULL, + BTRFS_MOUNT_IGNOREMETACSUMS = 2147483648ULL, + BTRFS_MOUNT_IGNORESUPERFLAGS = 4294967296ULL, +}; + +enum btrfs_disk_cache_state { + BTRFS_DC_WRITTEN = 0, + BTRFS_DC_ERROR = 1, + BTRFS_DC_CLEAR = 2, + BTRFS_DC_SETUP = 3, +}; + +enum btrfs_chunk_alloc_enum { + CHUNK_ALLOC_NO_FORCE = 0, + CHUNK_ALLOC_LIMITED = 1, + CHUNK_ALLOC_FORCE = 2, + CHUNK_ALLOC_FORCE_FOR_EXTENT = 3, +}; + +enum btrfs_block_group_flags { + BLOCK_GROUP_FLAG_IREF = 0, + BLOCK_GROUP_FLAG_REMOVED = 1, + BLOCK_GROUP_FLAG_TO_COPY = 2, + BLOCK_GROUP_FLAG_RELOCATING_REPAIR = 3, + BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED = 4, + BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE = 5, + BLOCK_GROUP_FLAG_ZONED_DATA_RELOC = 6, + BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE = 7, + BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE = 8, + BLOCK_GROUP_FLAG_NEW = 9, +}; + +enum btrfs_caching_type { + BTRFS_CACHE_NO = 0, + BTRFS_CACHE_STARTED = 1, + BTRFS_CACHE_FINISHED = 2, + BTRFS_CACHE_ERROR = 3, +}; + +struct btrfs_zoned_device_info { + u64 zone_size; + u8 zone_size_shift; + u32 nr_zones; + unsigned int max_active_zones; + int reserved_active_zones; + atomic_t active_zones_left; + long unsigned int *seq_zones; + long unsigned int *empty_zones; + long unsigned int *active_zones; + struct blk_zone *zone_cache; + long: 32; + struct blk_zone sb_zones[6]; +}; + +struct btrfs_raid_attr { + u8 sub_stripes; + u8 dev_stripes; + u8 devs_max; + u8 devs_min; + u8 tolerated_failures; + u8 devs_increment; + u8 ncopies; + u8 nparity; + u8 mindev_error; + const char raid_name[8]; + long: 32; + u64 bg_flag; +}; + +enum btrfs_reserve_flush_enum { + BTRFS_RESERVE_NO_FLUSH = 0, + BTRFS_RESERVE_FLUSH_LIMIT = 1, + BTRFS_RESERVE_FLUSH_EVICT = 2, + BTRFS_RESERVE_FLUSH_DATA = 3, + BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE = 4, + BTRFS_RESERVE_FLUSH_ALL = 5, + BTRFS_RESERVE_FLUSH_ALL_STEAL = 6, + BTRFS_RESERVE_FLUSH_EMERGENCY = 7, +}; + +struct crypto_wait { + struct completion completion; + int err; +}; + +enum { + CRYPTO_MSG_ALG_REQUEST = 0, + CRYPTO_MSG_ALG_REGISTER = 1, + CRYPTO_MSG_ALG_LOADED = 2, +}; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; + bool test_started; + long: 32; +}; + +struct crypto_attr_alg { + char name[128]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +enum { + CRYPTOA_UNSPEC = 0, + CRYPTOA_ALG = 1, + CRYPTOA_TYPE = 2, + __CRYPTOA_MAX = 3, +}; + +struct cryptomgr_param { + struct rtattr *tb[34]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } attrs[32]; + char template[128]; + struct crypto_larval *larval; + u32 otype; + u32 omask; +}; + +struct crypto_test_param { + char driver[128]; + char alg[128]; + u32 type; +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = 8, + __REQ_FAILFAST_TRANSPORT = 9, + __REQ_FAILFAST_DRIVER = 10, + __REQ_SYNC = 11, + __REQ_META = 12, + __REQ_PRIO = 13, + __REQ_NOMERGE = 14, + __REQ_IDLE = 15, + __REQ_INTEGRITY = 16, + __REQ_FUA = 17, + __REQ_PREFLUSH = 18, + __REQ_RAHEAD = 19, + __REQ_BACKGROUND = 20, + __REQ_NOWAIT = 21, + __REQ_POLLED = 22, + __REQ_ALLOC_CACHE = 23, + __REQ_SWAP = 24, + __REQ_DRV = 25, + __REQ_FS_PRIVATE = 26, + __REQ_ATOMIC = 27, + __REQ_NOUNMAP = 28, + __REQ_NR_BITS = 29, +}; + +struct elevator_type; + +struct elevator_queue { + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + long unsigned int flags; + struct hlist_head hash[64]; +}; + +struct blk_mq_ctxs; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[3]; + long: 32; + }; + unsigned int cpu; + short unsigned int index_hw[3]; + struct blk_mq_hw_ctx *hctxs[3]; + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + __RQF_STARTED = 0, + __RQF_FLUSH_SEQ = 1, + __RQF_MIXED_MERGE = 2, + __RQF_DONTPREP = 3, + __RQF_SCHED_TAGS = 4, + __RQF_USE_SCHED = 5, + __RQF_FAILED = 6, + __RQF_QUIET = 7, + __RQF_IO_STAT = 8, + __RQF_PM = 9, + __RQF_HASHED = 10, + __RQF_STATS = 11, + __RQF_SPECIAL_PAYLOAD = 12, + __RQF_ZONE_WRITE_PLUGGING = 13, + __RQF_TIMED_OUT = 14, + __RQF_RESV = 15, + __RQF_BITS = 16, +}; + +enum { + BLK_MQ_F_SHOULD_MERGE = 1, + BLK_MQ_F_TAG_QUEUE_SHARED = 2, + BLK_MQ_F_STACKING = 4, + BLK_MQ_F_TAG_HCTX_SHARED = 8, + BLK_MQ_F_BLOCKING = 16, + BLK_MQ_F_NO_SCHED = 32, + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 64, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, +}; + +enum { + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_S_MAX = 4, +}; + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx *queue_ctx; +}; + +enum { + BLK_MQ_NO_TAG = 4294967295, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = 4294967294, +}; + +typedef unsigned int blk_insert_t; + +struct blk_mq_alloc_data { + struct request_queue *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + blk_opf_t cmd_flags; + req_flags_t rq_flags; + unsigned int nr_tags; + struct rq_list *cached_rqs; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +struct blk_mq_debugfs_attr { + const char *name; + umode_t mode; + int (*show)(void *, struct seq_file *); + ssize_t (*write)(void *, const char *, size_t, loff_t *); + const struct seq_operations *seq_ops; +}; + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, blk_insert_t); + struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request * (*former_request)(struct request_queue *, struct request *); + struct request * (*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +struct elv_fs_entry; + +struct elevator_type { + struct kmem_cache *icq_cache; + struct elevator_mq_ops ops; + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + struct module *elevator_owner; + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; + char icq_cache_name[22]; + struct list_head list; +}; + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[5]; + atomic64_t aux_cnt[5]; +}; + +struct blkg_rwstat_sample { + u64 cnt[5]; +}; + +struct bfq_entity; + +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + u64 vtime; + long unsigned int wsum; + long: 32; +}; + +struct bfq_sched_data; + +struct bfq_queue; + +struct bfq_entity { + struct rb_node rb_node; + bool on_st_or_in_serv; + u64 start; + u64 finish; + struct rb_root *tree; + long: 32; + u64 min_start; + int service; + int budget; + int allocated; + int dev_weight; + int weight; + int new_weight; + int orig_weight; + struct bfq_entity *parent; + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + int prio_changed; + bool in_groups_with_pending_reqs; + struct bfq_queue *last_bfqq_created; + long: 32; +}; + +struct bfq_sched_data { + struct bfq_entity *in_service_entity; + struct bfq_entity *next_in_service; + struct bfq_service_tree service_tree[3]; + long unsigned int bfq_class_idle_last_service; + long: 32; +}; + +struct bfq_weight_counter { + unsigned int weight; + unsigned int num_active; + struct rb_node weights_node; +}; + +struct bfq_ttime { + u64 last_end_request; + u64 ttime_total; + long unsigned int ttime_samples; + long: 32; + u64 ttime_mean; +}; + +struct bfq_data; + +struct bfq_io_cq; + +struct bfq_queue { + int ref; + int stable_ref; + struct bfq_data *bfqd; + short unsigned int ioprio; + short unsigned int ioprio_class; + short unsigned int new_ioprio; + short unsigned int new_ioprio_class; + long: 32; + u64 last_serv_time_ns; + unsigned int inject_limit; + long unsigned int decrease_time_jif; + struct bfq_queue *new_bfqq; + struct rb_node pos_node; + struct rb_root *pos_root; + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int meta_pending; + struct list_head fifo; + struct bfq_entity entity; + struct bfq_weight_counter *weight_counter; + int max_budget; + long unsigned int budget_timeout; + int dispatched; + long unsigned int flags; + struct list_head bfqq_list; + long: 32; + struct bfq_ttime ttime; + u64 io_start_time; + u64 tot_idle_time; + u32 seek_history; + struct hlist_node burst_list_node; + long: 32; + sector_t last_request_pos; + unsigned int requests_within_timer; + pid_t pid; + struct bfq_io_cq *bic; + long unsigned int wr_cur_max_time; + long unsigned int soft_rt_next_start; + long unsigned int last_wr_start_finish; + unsigned int wr_coeff; + long unsigned int last_idle_bklogged; + long unsigned int service_from_backlogged; + long unsigned int service_from_wr; + long unsigned int wr_start_at_switch_to_srt; + long unsigned int split_time; + long unsigned int first_IO_time; + long unsigned int creation_time; + struct bfq_queue *waker_bfqq; + struct bfq_queue *tentative_waker_bfqq; + unsigned int num_waker_detections; + long: 32; + u64 waker_detection_started; + struct hlist_node woken_list_node; + struct hlist_head woken_list; + unsigned int actuator_idx; +}; + +struct bfq_group; + +struct bfq_data { + struct request_queue *queue; + struct list_head dispatch; + struct bfq_group *root_group; + struct rb_root_cached queue_weights_tree; + unsigned int num_groups_with_pending_reqs; + unsigned int busy_queues[3]; + int wr_busy_queues; + int queued; + int tot_rq_in_driver; + int rq_in_driver[8]; + bool nonrot_with_queueing; + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + int budgets_assigned; + struct hrtimer idle_slice_timer; + struct bfq_queue *in_service_queue; + long: 32; + sector_t last_position; + sector_t in_serv_last_pos; + u64 last_completion; + struct bfq_queue *last_completed_rq_bfqq; + struct bfq_queue *last_bfqq_created; + u64 last_empty_occupied_ns; + bool wait_dispatch; + struct request *waited_rq; + bool rqs_injected; + long: 32; + u64 first_dispatch; + u64 last_dispatch; + ktime_t last_budget_start; + ktime_t last_idling_start; + long unsigned int last_idling_start_jiffies; + int peak_rate_samples; + u32 sequential_samples; + long: 32; + u64 tot_sectors_dispatched; + u32 last_rq_max_size; + long: 32; + u64 delta_from_first; + u32 peak_rate; + int bfq_max_budget; + struct list_head active_list[8]; + struct list_head idle_list; + u64 bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + u32 bfq_slice_idle; + int bfq_user_max_budget; + unsigned int bfq_timeout; + bool strict_guarantees; + long unsigned int last_ins_in_burst; + long unsigned int bfq_burst_interval; + int burst_size; + struct bfq_entity *burst_parent_entity; + long unsigned int bfq_large_burst_thresh; + bool large_burst; + struct hlist_head burst_list; + bool low_latency; + unsigned int bfq_wr_coeff; + unsigned int bfq_wr_rt_max_time; + unsigned int bfq_wr_min_idle_time; + long unsigned int bfq_wr_min_inter_arr_async; + unsigned int bfq_wr_max_softrt_rate; + long: 32; + u64 rate_dur_prod; + struct bfq_queue oom_bfqq; + spinlock_t lock; + struct bfq_io_cq *bio_bic; + struct bfq_queue *bio_bfqq; + unsigned int word_depths[4]; + unsigned int full_depth_shift; + unsigned int num_actuators; + long: 32; + sector_t sector[8]; + sector_t nr_sectors[8]; + struct blk_independent_access_range ia_ranges[8]; + unsigned int actuator_load_threshold; + long: 32; +}; + +struct bfq_iocq_bfqq_data { + bool saved_has_short_ttime; + bool saved_IO_bound; + long: 32; + u64 saved_io_start_time; + u64 saved_tot_idle_time; + bool saved_in_large_burst; + bool was_in_burst_list; + unsigned int saved_weight; + long unsigned int saved_wr_coeff; + long unsigned int saved_last_wr_start_finish; + long unsigned int saved_service_from_wr; + long unsigned int saved_wr_start_at_switch_to_srt; + unsigned int saved_wr_cur_max_time; + long: 32; + struct bfq_ttime saved_ttime; + u64 saved_last_serv_time_ns; + unsigned int saved_inject_limit; + long unsigned int saved_decrease_time_jif; + struct bfq_queue *stable_merge_bfqq; + bool stably_merged; +}; + +struct bfq_io_cq { + struct io_cq icq; + struct bfq_queue *bfqq[16]; + int ioprio; + uint64_t blkcg_serial_nr; + struct bfq_iocq_bfqq_data bfqq_data[8]; + unsigned int requests; + long: 32; +}; + +struct bfqg_stats { + struct blkg_rwstat bytes; + struct blkg_rwstat ios; +}; + +struct bfq_group { + struct blkg_policy_data pd; + refcount_t ref; + struct bfq_entity entity; + struct bfq_sched_data sched_data; + struct bfq_data *bfqd; + struct bfq_queue *async_bfqq[128]; + struct bfq_queue *async_idle_bfqq[8]; + struct bfq_entity *my_entity; + int active_entities; + int num_queues_with_pending_reqs; + struct rb_root rq_pos_tree; + long: 32; + struct bfqg_stats stats; +}; + +enum bfqq_expiration { + BFQQE_TOO_IDLE = 0, + BFQQE_BUDGET_TIMEOUT = 1, + BFQQE_BUDGET_EXHAUSTED = 2, + BFQQE_NO_MORE_REQUESTS = 3, + BFQQE_PREEMPTED = 4, +}; + +struct bfq_group_data { + struct blkcg_policy_data pd; + unsigned int weight; +}; + +enum io_uring_napi_tracking_strategy { + IO_URING_NAPI_TRACKING_DYNAMIC = 0, + IO_URING_NAPI_TRACKING_STATIC = 1, + IO_URING_NAPI_TRACKING_INACTIVE = 255, +}; + +struct io_sq_data { + refcount_t refs; + atomic_t park_pending; + struct mutex lock; + struct list_head ctx_list; + struct task_struct *thread; + struct wait_queue_head wait; + unsigned int sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + long: 32; + u64 work_time; + long unsigned int state; + struct completion exited; + long: 32; +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +typedef int __kernel_ptrdiff_t; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +struct region { + unsigned int start; + unsigned int off; + unsigned int group_len; + unsigned int end; + unsigned int nbits; +}; + +typedef mpi_limb_t UWtype; + +typedef unsigned int USItype; + +typedef u32 unative_t; + +enum pci_mmap_state { + pci_mmap_io = 0, + pci_mmap_mem = 1, +}; + +enum pci_mmap_api { + PCI_MMAP_SYSFS = 0, + PCI_MMAP_PROCFS = 1, +}; + +struct fb_fix_screeninfo { + char id[16]; + long unsigned int smem_start; + __u32 smem_len; + __u32 type; + __u32 type_aux; + __u32 visual; + __u16 xpanstep; + __u16 ypanstep; + __u16 ywrapstep; + __u32 line_length; + long unsigned int mmio_start; + __u32 mmio_len; + __u32 accel; + __u16 capabilities; + __u16 reserved[2]; +}; + +struct fb_bitfield { + __u32 offset; + __u32 length; + __u32 msb_right; +}; + +struct fb_var_screeninfo { + __u32 xres; + __u32 yres; + __u32 xres_virtual; + __u32 yres_virtual; + __u32 xoffset; + __u32 yoffset; + __u32 bits_per_pixel; + __u32 grayscale; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + __u32 nonstd; + __u32 activate; + __u32 height; + __u32 width; + __u32 accel_flags; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 rotate; + __u32 colorspace; + __u32 reserved[4]; +}; + +struct fb_cmap { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 fg_color; + __u32 bg_color; + __u8 depth; + const char *data; + struct fb_cmap cmap; +}; + +struct fbcurpos { + __u16 x; + __u16 y; +}; + +struct fb_cursor { + __u16 set; + __u16 enable; + __u16 rop; + const char *mask; + struct fbcurpos hot; + struct fb_image image; +}; + +struct fb_chroma { + __u32 redx; + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_videomode; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; + __u8 manufacturer[4]; + __u8 monitor[14]; + __u8 serial_no[14]; + __u8 ascii[14]; + __u32 modedb_len; + __u32 model; + __u32 serial; + __u32 year; + __u32 week; + __u32 hfmin; + __u32 hfmax; + __u32 dclkmin; + __u32 dclkmax; + __u16 input; + __u16 dpms; + __u16 signal; + __u16 vfmin; + __u16 vfmax; + __u16 gamma; + __u16 gtf: 1; + __u16 misc; + __u8 version; + __u8 revision; + __u8 max_x; + __u8 max_y; +}; + +struct fb_videomode { + const char *name; + u32 refresh; + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct fb_info; + +struct fb_pixmap { + u8 *addr; + u32 size; + u32 offset; + u32 buf_align; + u32 scan_align; + u32 access_align; + u32 flags; + long unsigned int blit_x[2]; + long unsigned int blit_y[4]; + void (*writeio)(struct fb_info *, void *, void *, unsigned int); + void (*readio)(struct fb_info *, void *, void *, unsigned int); +}; + +struct lcd_device; + +struct fb_ops; + +struct fb_info { + refcount_t count; + int node; + int flags; + int fbcon_rotate_hint; + struct mutex lock; + struct mutex mm_lock; + struct fb_var_screeninfo var; + struct fb_fix_screeninfo fix; + struct fb_monspecs monspecs; + struct fb_pixmap pixmap; + struct fb_pixmap sprite; + struct fb_cmap cmap; + struct list_head modelist; + struct fb_videomode *mode; + struct lcd_device *lcd_dev; + const struct fb_ops *fbops; + struct device *device; + struct device *dev; + int class_flag; + union { + char *screen_base; + char *screen_buffer; + }; + long unsigned int screen_size; + void *pseudo_palette; + u32 state; + void *fbcon_par; + void *par; + bool skip_vt_switch; + bool skip_panic; +}; + +struct fb_blit_caps { + long unsigned int x[2]; + long unsigned int y[4]; + u32 len; + u32 flags; +}; + +struct fb_ops { + struct module *owner; + int (*fb_open)(struct fb_info *, int); + int (*fb_release)(struct fb_info *, int); + ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); + ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); + int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); + int (*fb_set_par)(struct fb_info *); + int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); + int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); + int (*fb_blank)(int, struct fb_info *); + int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); + void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); + void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); + void (*fb_imageblit)(struct fb_info *, const struct fb_image *); + int (*fb_cursor)(struct fb_info *, struct fb_cursor *); + int (*fb_sync)(struct fb_info *); + int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); + void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); + void (*fb_destroy)(struct fb_info *); + int (*fb_debug_enter)(struct fb_info *); + int (*fb_debug_leave)(struct fb_info *); +}; + +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; + +struct virtio_device; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + unsigned int num_max; + bool reset; + void *priv; +}; + +struct vringh_config_ops; + +struct virtio_config_ops; + +struct virtio_device { + int index; + bool failed; + bool config_core_enabled; + bool config_driver_disabled; + bool config_change_pending; + spinlock_t config_lock; + spinlock_t vqs_list_lock; + struct device dev; + struct virtio_device_id id; + const struct virtio_config_ops *config; + const struct vringh_config_ops *vringh_config; + struct list_head vqs; + u64 features; + void *priv; + long: 32; +}; + +struct virtqueue_info; + +struct virtio_shm_region; + +struct virtio_config_ops { + void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); + void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); + u32 (*generation)(struct virtio_device *); + u8 (*get_status)(struct virtio_device *); + void (*set_status)(struct virtio_device *, u8); + void (*reset)(struct virtio_device *); + int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, struct virtqueue_info *, struct irq_affinity *); + void (*del_vqs)(struct virtio_device *); + void (*synchronize_cbs)(struct virtio_device *); + u64 (*get_features)(struct virtio_device *); + int (*finalize_features)(struct virtio_device *); + const char * (*bus_name)(struct virtio_device *); + int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); + const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); + bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); + int (*disable_vq_and_reset)(struct virtqueue *); + int (*enable_vq_after_reset)(struct virtqueue *); +}; + +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +typedef void vq_callback_t(struct virtqueue *); + +struct virtqueue_info { + const char *name; + vq_callback_t *callback; + bool ctx; +}; + +struct virtio_pci_common_cfg { + __le32 device_feature_select; + __le32 device_feature; + __le32 guest_feature_select; + __le32 guest_feature; + __le16 msix_config; + __le16 num_queues; + __u8 device_status; + __u8 config_generation; + __le16 queue_select; + __le16 queue_size; + __le16 queue_msix_vector; + __le16 queue_enable; + __le16 queue_notify_off; + __le32 queue_desc_lo; + __le32 queue_desc_hi; + __le32 queue_avail_lo; + __le32 queue_avail_hi; + __le32 queue_used_lo; + __le32 queue_used_hi; +}; + +struct virtio_pci_legacy_device { + struct pci_dev *pci_dev; + u8 *isr; + void *ioaddr; + struct virtio_device_id id; +}; + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + struct virtio_pci_common_cfg *common; + void *device; + void *notify_base; + resource_size_t notify_pa; + u8 *isr; + size_t notify_len; + size_t device_len; + size_t common_len; + int notify_map_cap; + u32 notify_offset_multiplier; + int modern_bars; + struct virtio_device_id id; + int (*device_id_check)(struct pci_dev *); + long: 32; + u64 dma_mask; +}; + +struct virtio_pci_vq_info { + struct virtqueue *vq; + struct list_head node; + unsigned int msix_vector; +}; + +struct virtio_pci_admin_vq { + struct virtio_pci_vq_info *info; + spinlock_t lock; + u64 supported_cmds; + u64 supported_caps; + u8 max_dev_parts_objects; + struct ida dev_parts_ida; + char name[10]; + u16 vq_index; + long: 32; +}; + +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + long: 32; + union { + struct virtio_pci_legacy_device ldev; + struct virtio_pci_modern_device mdev; + }; + bool is_legacy; + u8 *isr; + spinlock_t lock; + struct list_head virtqueues; + struct list_head slow_virtqueues; + struct virtio_pci_vq_info **vqs; + struct virtio_pci_admin_vq admin_vq; + int msix_enabled; + int intx_enabled; + cpumask_var_t *msix_affinity_masks; + char (*msix_names)[256]; + unsigned int msix_vectors; + unsigned int msix_used_vectors; + bool per_vq_vectors; + struct virtqueue * (*setup_vq)(struct virtio_pci_device *, struct virtio_pci_vq_info *, unsigned int, void (*)(struct virtqueue *), const char *, bool, u16); + void (*del_vq)(struct virtio_pci_vq_info *); + u16 (*config_vector)(struct virtio_pci_device *, u16); + int (*avq_index)(struct virtio_device *, u16 *, u16 *); + long: 32; +}; + +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +enum vp_vq_vector_policy { + VP_VQ_VECTOR_POLICY_EACH = 0, + VP_VQ_VECTOR_POLICY_SHARED_SLOW = 1, + VP_VQ_VECTOR_POLICY_SHARED = 2, +}; + +enum dev_prop_type { + DEV_PROP_U8 = 0, + DEV_PROP_U16 = 1, + DEV_PROP_U32 = 2, + DEV_PROP_U64 = 3, + DEV_PROP_STRING = 4, + DEV_PROP_REF = 5, +}; + +struct software_node; + +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[8]; +}; + +struct property_entry; + +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; +}; + +struct property_entry { + const char *name; + size_t length; + bool is_inline; + enum dev_prop_type type; + union { + const void *pointer; + union { + u8 u8_data[8]; + u16 u16_data[4]; + u32 u32_data[2]; + u64 u64_data[1]; + const char *str[2]; + } value; + }; +}; + +struct swnode { + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + int id; + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + unsigned int allocated: 1; + unsigned int managed: 1; +}; + +typedef long unsigned int ulong; + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 4294967295, +}; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); + long unsigned int flags; +}; + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +typedef bool busy_tag_iter_fn(struct request *, void *); + +struct scsi_transport_template { + struct transport_container host_attrs; + struct transport_container target_attrs; + struct transport_container device_attrs; + int (*user_scan)(struct Scsi_Host *, uint, uint, u64); + int device_size; + int device_private_offset; + int target_size; + int target_private_offset; + int host_size; + unsigned int create_work_queue: 1; + void (*eh_strategy_handler)(struct Scsi_Host *); +}; + +struct scsi_host_busy_iter_data { + bool (*fn)(struct scsi_cmnd *, void *); + void *priv; +}; + +enum { + BLK_TAG_ALLOC_FIFO = 0, + BLK_TAG_ALLOC_RR = 1, + BLK_TAG_ALLOC_MAX = 2, +}; + +enum { + ATA_MAX_DEVICES = 2, + ATA_MAX_PRD = 256, + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_LBA48 = 65535, + ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_TRUSTED = 48, + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_SATA_CAPABILITY = 76, + ATA_ID_SATA_CAPABILITY_2 = 77, + ATA_ID_FEATURE_SUPP = 78, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, + ATA_ID_COMMAND_SET_3 = 119, + ATA_ID_COMMAND_SET_4 = 120, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = 2, + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + ATA_PCI_CTL_OFS = 2, + ATA_PIO0 = 1, + ATA_PIO1 = 3, + ATA_PIO2 = 7, + ATA_PIO3 = 15, + ATA_PIO4 = 31, + ATA_PIO5 = 63, + ATA_PIO6 = 127, + ATA_PIO4_ONLY = 16, + ATA_SWDMA0 = 1, + ATA_SWDMA1 = 3, + ATA_SWDMA2 = 7, + ATA_SWDMA2_ONLY = 4, + ATA_MWDMA0 = 1, + ATA_MWDMA1 = 3, + ATA_MWDMA2 = 7, + ATA_MWDMA3 = 15, + ATA_MWDMA4 = 31, + ATA_MWDMA12_ONLY = 6, + ATA_MWDMA2_ONLY = 4, + ATA_UDMA0 = 1, + ATA_UDMA1 = 3, + ATA_UDMA2 = 7, + ATA_UDMA3 = 15, + ATA_UDMA4 = 31, + ATA_UDMA5 = 63, + ATA_UDMA6 = 127, + ATA_UDMA7 = 255, + ATA_UDMA24_ONLY = 20, + ATA_UDMA_MASK_40C = 7, + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = 2048, + ATA_PRD_EOT = -2147483648, + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = 8, + ATA_DMA_START = 1, + ATA_DMA_INTR = 4, + ATA_DMA_ERR = 2, + ATA_DMA_ACTIVE = 1, + ATA_HOB = 128, + ATA_NIEN = 2, + ATA_LBA = 64, + ATA_DEV1 = 16, + ATA_DEVICE_OBS = 160, + ATA_DEVCTL_OBS = 8, + ATA_BUSY = 128, + ATA_DRDY = 64, + ATA_DF = 32, + ATA_DSC = 16, + ATA_DRQ = 8, + ATA_CORR = 4, + ATA_SENSE = 2, + ATA_ERR = 1, + ATA_SRST = 4, + ATA_ICRC = 128, + ATA_BBK = 128, + ATA_UNC = 64, + ATA_MC = 32, + ATA_IDNF = 16, + ATA_MCR = 8, + ATA_ABORTED = 4, + ATA_TRK0NF = 2, + ATA_AMNF = 1, + ATAPI_LFS = 240, + ATAPI_EOM = 2, + ATAPI_ILI = 1, + ATAPI_IO = 2, + ATAPI_COD = 1, + ATA_REG_DATA = 0, + ATA_REG_ERR = 1, + ATA_REG_NSECT = 2, + ATA_REG_LBAL = 3, + ATA_REG_LBAM = 4, + ATA_REG_LBAH = 5, + ATA_REG_DEVICE = 6, + ATA_REG_STATUS = 7, + ATA_REG_FEATURE = 1, + ATA_REG_CMD = 7, + ATA_REG_BYTEL = 4, + ATA_REG_BYTEH = 5, + ATA_REG_DEVSEL = 6, + ATA_REG_IRQ = 2, + ATA_CMD_DEV_RESET = 8, + ATA_CMD_CHK_POWER = 229, + ATA_CMD_STANDBY = 226, + ATA_CMD_IDLE = 227, + ATA_CMD_EDD = 144, + ATA_CMD_DOWNLOAD_MICRO = 146, + ATA_CMD_DOWNLOAD_MICRO_DMA = 147, + ATA_CMD_NOP = 0, + ATA_CMD_FLUSH = 231, + ATA_CMD_FLUSH_EXT = 234, + ATA_CMD_ID_ATA = 236, + ATA_CMD_ID_ATAPI = 161, + ATA_CMD_SERVICE = 162, + ATA_CMD_READ = 200, + ATA_CMD_READ_EXT = 37, + ATA_CMD_READ_QUEUED = 38, + ATA_CMD_READ_STREAM_EXT = 43, + ATA_CMD_READ_STREAM_DMA_EXT = 42, + ATA_CMD_WRITE = 202, + ATA_CMD_WRITE_EXT = 53, + ATA_CMD_WRITE_QUEUED = 54, + ATA_CMD_WRITE_STREAM_EXT = 59, + ATA_CMD_WRITE_STREAM_DMA_EXT = 58, + ATA_CMD_WRITE_FUA_EXT = 61, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 62, + ATA_CMD_FPDMA_READ = 96, + ATA_CMD_FPDMA_WRITE = 97, + ATA_CMD_NCQ_NON_DATA = 99, + ATA_CMD_FPDMA_SEND = 100, + ATA_CMD_FPDMA_RECV = 101, + ATA_CMD_PIO_READ = 32, + ATA_CMD_PIO_READ_EXT = 36, + ATA_CMD_PIO_WRITE = 48, + ATA_CMD_PIO_WRITE_EXT = 52, + ATA_CMD_READ_MULTI = 196, + ATA_CMD_READ_MULTI_EXT = 41, + ATA_CMD_WRITE_MULTI = 197, + ATA_CMD_WRITE_MULTI_EXT = 57, + ATA_CMD_WRITE_MULTI_FUA_EXT = 206, + ATA_CMD_SET_FEATURES = 239, + ATA_CMD_SET_MULTI = 198, + ATA_CMD_PACKET = 160, + ATA_CMD_VERIFY = 64, + ATA_CMD_VERIFY_EXT = 66, + ATA_CMD_WRITE_UNCORR_EXT = 69, + ATA_CMD_STANDBYNOW1 = 224, + ATA_CMD_IDLEIMMEDIATE = 225, + ATA_CMD_SLEEP = 230, + ATA_CMD_INIT_DEV_PARAMS = 145, + ATA_CMD_READ_NATIVE_MAX = 248, + ATA_CMD_READ_NATIVE_MAX_EXT = 39, + ATA_CMD_SET_MAX = 249, + ATA_CMD_SET_MAX_EXT = 55, + ATA_CMD_READ_LOG_EXT = 47, + ATA_CMD_WRITE_LOG_EXT = 63, + ATA_CMD_READ_LOG_DMA_EXT = 71, + ATA_CMD_WRITE_LOG_DMA_EXT = 87, + ATA_CMD_TRUSTED_NONDATA = 91, + ATA_CMD_TRUSTED_RCV = 92, + ATA_CMD_TRUSTED_RCV_DMA = 93, + ATA_CMD_TRUSTED_SND = 94, + ATA_CMD_TRUSTED_SND_DMA = 95, + ATA_CMD_PMP_READ = 228, + ATA_CMD_PMP_READ_DMA = 233, + ATA_CMD_PMP_WRITE = 232, + ATA_CMD_PMP_WRITE_DMA = 235, + ATA_CMD_CONF_OVERLAY = 177, + ATA_CMD_SEC_SET_PASS = 241, + ATA_CMD_SEC_UNLOCK = 242, + ATA_CMD_SEC_ERASE_PREP = 243, + ATA_CMD_SEC_ERASE_UNIT = 244, + ATA_CMD_SEC_FREEZE_LOCK = 245, + ATA_CMD_SEC_DISABLE_PASS = 246, + ATA_CMD_CONFIG_STREAM = 81, + ATA_CMD_SMART = 176, + ATA_CMD_MEDIA_LOCK = 222, + ATA_CMD_MEDIA_UNLOCK = 223, + ATA_CMD_DSM = 6, + ATA_CMD_CHK_MED_CRD_TYP = 209, + ATA_CMD_CFA_REQ_EXT_ERR = 3, + ATA_CMD_CFA_WRITE_NE = 56, + ATA_CMD_CFA_TRANS_SECT = 135, + ATA_CMD_CFA_ERASE = 192, + ATA_CMD_CFA_WRITE_MULT_NE = 205, + ATA_CMD_REQ_SENSE_DATA = 11, + ATA_CMD_SANITIZE_DEVICE = 180, + ATA_CMD_ZAC_MGMT_IN = 74, + ATA_CMD_ZAC_MGMT_OUT = 159, + ATA_CMD_RESTORE = 16, + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 1, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 2, + ATA_SUBCMD_FPDMA_SEND_DSM = 0, + ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 2, + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 5, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 6, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 7, + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0, + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 1, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 2, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 3, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 4, + ATA_LOG_DIRECTORY = 0, + ATA_LOG_SATA_NCQ = 16, + ATA_LOG_NCQ_NON_DATA = 18, + ATA_LOG_NCQ_SEND_RECV = 19, + ATA_LOG_CDL = 24, + ATA_LOG_CDL_SIZE = 512, + ATA_LOG_IDENTIFY_DEVICE = 48, + ATA_LOG_SENSE_NCQ = 15, + ATA_LOG_SENSE_NCQ_SIZE = 1024, + ATA_LOG_CONCURRENT_POSITIONING_RANGES = 71, + ATA_LOG_SUPPORTED_CAPABILITIES = 3, + ATA_LOG_CURRENT_SETTINGS = 4, + ATA_LOG_SECURITY = 6, + ATA_LOG_SATA_SETTINGS = 8, + ATA_LOG_ZONED_INFORMATION = 9, + ATA_LOG_DEVSLP_OFFSET = 48, + ATA_LOG_DEVSLP_SIZE = 8, + ATA_LOG_DEVSLP_MDAT = 0, + ATA_LOG_DEVSLP_MDAT_MASK = 31, + ATA_LOG_DEVSLP_DETO = 1, + ATA_LOG_DEVSLP_VALID = 7, + ATA_LOG_DEVSLP_VALID_MASK = 128, + ATA_LOG_NCQ_PRIO_OFFSET = 9, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = 1, + ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 4, + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = 1, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 8, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 12, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 16, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = 2, + ATA_LOG_NCQ_SEND_RECV_SIZE = 20, + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = 1, + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = 2, + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = 4, + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = 8, + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = 16, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 28, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = 1, + ATA_LOG_NCQ_NON_DATA_SIZE = 64, + ATA_CMD_READ_LONG = 34, + ATA_CMD_READ_LONG_ONCE = 35, + ATA_CMD_WRITE_LONG = 50, + ATA_CMD_WRITE_LONG_ONCE = 51, + SETFEATURES_XFER = 3, + XFER_UDMA_7 = 71, + XFER_UDMA_6 = 70, + XFER_UDMA_5 = 69, + XFER_UDMA_4 = 68, + XFER_UDMA_3 = 67, + XFER_UDMA_2 = 66, + XFER_UDMA_1 = 65, + XFER_UDMA_0 = 64, + XFER_MW_DMA_4 = 36, + XFER_MW_DMA_3 = 35, + XFER_MW_DMA_2 = 34, + XFER_MW_DMA_1 = 33, + XFER_MW_DMA_0 = 32, + XFER_SW_DMA_2 = 18, + XFER_SW_DMA_1 = 17, + XFER_SW_DMA_0 = 16, + XFER_PIO_6 = 14, + XFER_PIO_5 = 13, + XFER_PIO_4 = 12, + XFER_PIO_3 = 11, + XFER_PIO_2 = 10, + XFER_PIO_1 = 9, + XFER_PIO_0 = 8, + XFER_PIO_SLOW = 0, + SETFEATURES_WC_ON = 2, + SETFEATURES_WC_OFF = 130, + SETFEATURES_RA_ON = 170, + SETFEATURES_RA_OFF = 85, + SETFEATURES_AAM_ON = 66, + SETFEATURES_AAM_OFF = 194, + SETFEATURES_SPINUP = 7, + SETFEATURES_SPINUP_TIMEOUT = 30000, + SETFEATURES_SATA_ENABLE = 16, + SETFEATURES_SATA_DISABLE = 144, + SETFEATURES_CDL = 13, + SATA_FPDMA_OFFSET = 1, + SATA_FPDMA_AA = 2, + SATA_DIPM = 3, + SATA_FPDMA_IN_ORDER = 4, + SATA_AN = 5, + SATA_SSP = 6, + SATA_DEVSLP = 9, + SETFEATURE_SENSE_DATA = 195, + SETFEATURE_SENSE_DATA_SUCC_NCQ = 196, + ATA_SET_MAX_ADDR = 0, + ATA_SET_MAX_PASSWD = 1, + ATA_SET_MAX_LOCK = 2, + ATA_SET_MAX_UNLOCK = 3, + ATA_SET_MAX_FREEZE_LOCK = 4, + ATA_SET_MAX_PASSWD_DMA = 5, + ATA_SET_MAX_UNLOCK_DMA = 6, + ATA_DCO_RESTORE = 192, + ATA_DCO_FREEZE_LOCK = 193, + ATA_DCO_IDENTIFY = 194, + ATA_DCO_SET = 195, + ATA_SMART_ENABLE = 216, + ATA_SMART_READ_VALUES = 208, + ATA_SMART_READ_THRESHOLDS = 209, + ATA_DSM_TRIM = 1, + ATA_SMART_LBAM_PASS = 79, + ATA_SMART_LBAH_PASS = 194, + ATAPI_PKT_DMA = 1, + ATAPI_DMADIR = 4, + ATAPI_CDB_LEN = 16, + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + SATA_PMP_FEAT_BIST = 1, + SATA_PMP_FEAT_PMREQ = 2, + SATA_PMP_FEAT_DYNSSC = 4, + SATA_PMP_FEAT_NOTIFY = 8, + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, + ATA_CBL_PATA_UNK = 4, + ATA_CBL_PATA_IGN = 5, + ATA_CBL_SATA = 6, + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + SERR_DATA_RECOVERED = 1, + SERR_COMM_RECOVERED = 2, + SERR_DATA = 256, + SERR_PERSISTENT = 512, + SERR_PROTOCOL = 1024, + SERR_INTERNAL = 2048, + SERR_PHYRDY_CHG = 65536, + SERR_PHY_INT_ERR = 131072, + SERR_COMM_WAKE = 262144, + SERR_10B_8B_ERR = 524288, + SERR_DISPARITY = 1048576, + SERR_CRC = 2097152, + SERR_HANDSHAKE = 4194304, + SERR_LINK_SEQ_ERR = 8388608, + SERR_TRANS_ST_ERROR = 16777216, + SERR_UNRECOG_FIS = 33554432, + SERR_DEV_XCHG = 67108864, +}; + +enum ata_prot_flags { + ATA_PROT_FLAG_PIO = 1, + ATA_PROT_FLAG_DMA = 2, + ATA_PROT_FLAG_NCQ = 4, + ATA_PROT_FLAG_ATAPI = 8, + ATA_PROT_UNKNOWN = 255, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = 1, + ATA_PROT_DMA = 2, + ATA_PROT_NCQ_NODATA = 4, + ATA_PROT_NCQ = 6, + ATAPI_PROT_NODATA = 8, + ATAPI_PROT_PIO = 9, + ATAPI_PROT_DMA = 10, +}; + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +typedef u64 async_cookie_t; + +enum ata_quirks { + __ATA_QUIRK_DIAGNOSTIC = 0, + __ATA_QUIRK_NODMA = 1, + __ATA_QUIRK_NONCQ = 2, + __ATA_QUIRK_MAX_SEC_128 = 3, + __ATA_QUIRK_BROKEN_HPA = 4, + __ATA_QUIRK_DISABLE = 5, + __ATA_QUIRK_HPA_SIZE = 6, + __ATA_QUIRK_IVB = 7, + __ATA_QUIRK_STUCK_ERR = 8, + __ATA_QUIRK_BRIDGE_OK = 9, + __ATA_QUIRK_ATAPI_MOD16_DMA = 10, + __ATA_QUIRK_FIRMWARE_WARN = 11, + __ATA_QUIRK_1_5_GBPS = 12, + __ATA_QUIRK_NOSETXFER = 13, + __ATA_QUIRK_BROKEN_FPDMA_AA = 14, + __ATA_QUIRK_DUMP_ID = 15, + __ATA_QUIRK_MAX_SEC_LBA48 = 16, + __ATA_QUIRK_ATAPI_DMADIR = 17, + __ATA_QUIRK_NO_NCQ_TRIM = 18, + __ATA_QUIRK_NOLPM = 19, + __ATA_QUIRK_WD_BROKEN_LPM = 20, + __ATA_QUIRK_ZERO_AFTER_TRIM = 21, + __ATA_QUIRK_NO_DMA_LOG = 22, + __ATA_QUIRK_NOTRIM = 23, + __ATA_QUIRK_MAX_SEC_1024 = 24, + __ATA_QUIRK_MAX_TRIM_128M = 25, + __ATA_QUIRK_NO_NCQ_ON_ATI = 26, + __ATA_QUIRK_NO_ID_DEV_LOG = 27, + __ATA_QUIRK_NO_LOG_DIR = 28, + __ATA_QUIRK_NO_FUA = 29, + __ATA_QUIRK_MAX = 30, +}; + +enum { + LIBATA_MAX_PRD = 128, + LIBATA_DUMB_MAX_PRD = 64, + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = 32, + ATA_SHORT_PAUSE = 16, + ATAPI_MAX_DRAIN = 16384, + ATA_ALL_DEVICES = 3, + ATA_SHT_EMULATED = 1, + ATA_SHT_THIS_ID = -1, + ATA_TFLAG_LBA48 = 1, + ATA_TFLAG_ISADDR = 2, + ATA_TFLAG_DEVICE = 4, + ATA_TFLAG_WRITE = 8, + ATA_TFLAG_LBA = 16, + ATA_TFLAG_FUA = 32, + ATA_TFLAG_POLLING = 64, + ATA_DFLAG_LBA = 1, + ATA_DFLAG_LBA48 = 2, + ATA_DFLAG_CDB_INTR = 4, + ATA_DFLAG_NCQ = 8, + ATA_DFLAG_FLUSH_EXT = 16, + ATA_DFLAG_ACPI_PENDING = 32, + ATA_DFLAG_ACPI_FAILED = 64, + ATA_DFLAG_AN = 128, + ATA_DFLAG_TRUSTED = 256, + ATA_DFLAG_FUA = 512, + ATA_DFLAG_DMADIR = 1024, + ATA_DFLAG_NCQ_SEND_RECV = 2048, + ATA_DFLAG_NCQ_PRIO = 4096, + ATA_DFLAG_CDL = 8192, + ATA_DFLAG_CFG_MASK = 16383, + ATA_DFLAG_PIO = 16384, + ATA_DFLAG_NCQ_OFF = 32768, + ATA_DFLAG_SLEEPING = 65536, + ATA_DFLAG_DUBIOUS_XFER = 131072, + ATA_DFLAG_NO_UNLOAD = 262144, + ATA_DFLAG_UNLOCK_HPA = 524288, + ATA_DFLAG_INIT_MASK = 1048575, + ATA_DFLAG_NCQ_PRIO_ENABLED = 1048576, + ATA_DFLAG_CDL_ENABLED = 2097152, + ATA_DFLAG_RESUMING = 4194304, + ATA_DFLAG_DETACH = 16777216, + ATA_DFLAG_DETACHED = 33554432, + ATA_DFLAG_DA = 67108864, + ATA_DFLAG_DEVSLP = 134217728, + ATA_DFLAG_ACPI_DISABLED = 268435456, + ATA_DFLAG_D_SENSE = 536870912, + ATA_DFLAG_ZAC = 1073741824, + ATA_DFLAG_FEATURES_MASK = 201341696, + ATA_DEV_UNKNOWN = 0, + ATA_DEV_ATA = 1, + ATA_DEV_ATA_UNSUP = 2, + ATA_DEV_ATAPI = 3, + ATA_DEV_ATAPI_UNSUP = 4, + ATA_DEV_PMP = 5, + ATA_DEV_PMP_UNSUP = 6, + ATA_DEV_SEMB = 7, + ATA_DEV_SEMB_UNSUP = 8, + ATA_DEV_ZAC = 9, + ATA_DEV_ZAC_UNSUP = 10, + ATA_DEV_NONE = 11, + ATA_LFLAG_NO_HRST = 2, + ATA_LFLAG_NO_SRST = 4, + ATA_LFLAG_ASSUME_ATA = 8, + ATA_LFLAG_ASSUME_SEMB = 16, + ATA_LFLAG_ASSUME_CLASS = 24, + ATA_LFLAG_NO_RETRY = 32, + ATA_LFLAG_DISABLED = 64, + ATA_LFLAG_SW_ACTIVITY = 128, + ATA_LFLAG_NO_LPM = 256, + ATA_LFLAG_RST_ONCE = 512, + ATA_LFLAG_CHANGED = 1024, + ATA_LFLAG_NO_DEBOUNCE_DELAY = 2048, + ATA_FLAG_SLAVE_POSS = 1, + ATA_FLAG_SATA = 2, + ATA_FLAG_NO_LPM = 4, + ATA_FLAG_NO_LOG_PAGE = 32, + ATA_FLAG_NO_ATAPI = 64, + ATA_FLAG_PIO_DMA = 128, + ATA_FLAG_PIO_LBA48 = 256, + ATA_FLAG_PIO_POLLING = 512, + ATA_FLAG_NCQ = 1024, + ATA_FLAG_NO_POWEROFF_SPINDOWN = 2048, + ATA_FLAG_NO_HIBERNATE_SPINDOWN = 4096, + ATA_FLAG_DEBUGMSG = 8192, + ATA_FLAG_FPDMA_AA = 16384, + ATA_FLAG_IGN_SIMPLEX = 32768, + ATA_FLAG_NO_IORDY = 65536, + ATA_FLAG_ACPI_SATA = 131072, + ATA_FLAG_AN = 262144, + ATA_FLAG_PMP = 524288, + ATA_FLAG_FPDMA_AUX = 1048576, + ATA_FLAG_EM = 2097152, + ATA_FLAG_SW_ACTIVITY = 4194304, + ATA_FLAG_NO_DIPM = 8388608, + ATA_FLAG_SAS_HOST = 16777216, + ATA_PFLAG_EH_PENDING = 1, + ATA_PFLAG_EH_IN_PROGRESS = 2, + ATA_PFLAG_FROZEN = 4, + ATA_PFLAG_RECOVERED = 8, + ATA_PFLAG_LOADING = 16, + ATA_PFLAG_SCSI_HOTPLUG = 64, + ATA_PFLAG_INITIALIZING = 128, + ATA_PFLAG_RESETTING = 256, + ATA_PFLAG_UNLOADING = 512, + ATA_PFLAG_UNLOADED = 1024, + ATA_PFLAG_RESUMING = 65536, + ATA_PFLAG_SUSPENDED = 131072, + ATA_PFLAG_PM_PENDING = 262144, + ATA_PFLAG_INIT_GTM_VALID = 524288, + ATA_PFLAG_PIO32 = 1048576, + ATA_PFLAG_PIO32CHANGE = 2097152, + ATA_PFLAG_EXTERNAL = 4194304, + ATA_QCFLAG_ACTIVE = 1, + ATA_QCFLAG_DMAMAP = 2, + ATA_QCFLAG_RTF_FILLED = 4, + ATA_QCFLAG_IO = 8, + ATA_QCFLAG_RESULT_TF = 16, + ATA_QCFLAG_CLEAR_EXCL = 32, + ATA_QCFLAG_QUIET = 64, + ATA_QCFLAG_RETRY = 128, + ATA_QCFLAG_HAS_CDL = 256, + ATA_QCFLAG_EH = 65536, + ATA_QCFLAG_SENSE_VALID = 131072, + ATA_QCFLAG_EH_SCHEDULED = 262144, + ATA_QCFLAG_EH_SUCCESS_CMD = 524288, + ATA_HOST_SIMPLEX = 1, + ATA_HOST_STARTED = 2, + ATA_HOST_PARALLEL_SCAN = 4, + ATA_HOST_IGNORE_ATA = 8, + ATA_HOST_NO_PART = 16, + ATA_HOST_NO_SSC = 32, + ATA_HOST_NO_DEVSLP = 64, + ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, + ATA_TMOUT_FF_WAIT_LONG = 2000, + ATA_TMOUT_FF_WAIT = 800, + ATA_WAIT_AFTER_RESET = 150, + ATA_TMOUT_PMP_SRST_WAIT = 10000, + ATA_TMOUT_SPURIOUS_PHY = 10000, + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = 7, + ATA_SHIFT_UDMA = 12, + ATA_SHIFT_PRIO = 6, + ATA_PRIO_HIGH = 2, + ATA_DMA_PAD_SZ = 4, + ATA_ERING_SIZE = 32, + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + ATA_EH_DESC_LEN = 80, + ATA_EH_REVALIDATE = 1, + ATA_EH_SOFTRESET = 2, + ATA_EH_HARDRESET = 4, + ATA_EH_RESET = 6, + ATA_EH_ENABLE_LINK = 8, + ATA_EH_PARK = 32, + ATA_EH_GET_SUCCESS_SENSE = 64, + ATA_EH_SET_ACTIVE = 128, + ATA_EH_PERDEV_MASK = 225, + ATA_EH_ALL_ACTIONS = 15, + ATA_EHI_HOTPLUGGED = 1, + ATA_EHI_NO_AUTOPSY = 4, + ATA_EHI_QUIET = 8, + ATA_EHI_NO_RECOVERY = 16, + ATA_EHI_DID_SOFTRESET = 65536, + ATA_EHI_DID_HARDRESET = 131072, + ATA_EHI_PRINTINFO = 262144, + ATA_EHI_SETMODE = 524288, + ATA_EHI_POST_SETMODE = 1048576, + ATA_EHI_DID_PRINT_QUIRKS = 2097152, + ATA_EHI_DID_RESET = 196608, + ATA_EHI_TO_SLAVE_MASK = 12, + ATA_EH_MAX_TRIES = 5, + ATA_LINK_RESUME_TRIES = 5, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + SATA_PMP_RW_TIMEOUT = 3000, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, + ATA_QUIRK_DIAGNOSTIC = 1, + ATA_QUIRK_NODMA = 2, + ATA_QUIRK_NONCQ = 4, + ATA_QUIRK_MAX_SEC_128 = 8, + ATA_QUIRK_BROKEN_HPA = 16, + ATA_QUIRK_DISABLE = 32, + ATA_QUIRK_HPA_SIZE = 64, + ATA_QUIRK_IVB = 128, + ATA_QUIRK_STUCK_ERR = 256, + ATA_QUIRK_BRIDGE_OK = 512, + ATA_QUIRK_ATAPI_MOD16_DMA = 1024, + ATA_QUIRK_FIRMWARE_WARN = 2048, + ATA_QUIRK_1_5_GBPS = 4096, + ATA_QUIRK_NOSETXFER = 8192, + ATA_QUIRK_BROKEN_FPDMA_AA = 16384, + ATA_QUIRK_DUMP_ID = 32768, + ATA_QUIRK_MAX_SEC_LBA48 = 65536, + ATA_QUIRK_ATAPI_DMADIR = 131072, + ATA_QUIRK_NO_NCQ_TRIM = 262144, + ATA_QUIRK_NOLPM = 524288, + ATA_QUIRK_WD_BROKEN_LPM = 1048576, + ATA_QUIRK_ZERO_AFTER_TRIM = 2097152, + ATA_QUIRK_NO_DMA_LOG = 4194304, + ATA_QUIRK_NOTRIM = 8388608, + ATA_QUIRK_MAX_SEC_1024 = 16777216, + ATA_QUIRK_MAX_TRIM_128M = 33554432, + ATA_QUIRK_NO_NCQ_ON_ATI = 67108864, + ATA_QUIRK_NO_ID_DEV_LOG = 134217728, + ATA_QUIRK_NO_LOG_DIR = 268435456, + ATA_QUIRK_NO_FUA = 536870912, + ATA_DMA_MASK_ATA = 1, + ATA_DMA_MASK_ATAPI = 2, + ATA_DMA_MASK_CFA = 4, + ATAPI_READ = 0, + ATAPI_WRITE = 1, + ATAPI_READ_CD = 2, + ATAPI_PASS_THRU = 3, + ATAPI_MISC = 4, + ATA_TIMING_SETUP = 1, + ATA_TIMING_ACT8B = 2, + ATA_TIMING_REC8B = 4, + ATA_TIMING_CYC8B = 8, + ATA_TIMING_8BIT = 14, + ATA_TIMING_ACTIVE = 16, + ATA_TIMING_RECOVER = 32, + ATA_TIMING_DMACK_HOLD = 64, + ATA_TIMING_CYCLE = 128, + ATA_TIMING_UDMA = 256, + ATA_TIMING_ALL = 511, + ATA_ACPI_FILTER_SETXFER = 1, + ATA_ACPI_FILTER_LOCK = 2, + ATA_ACPI_FILTER_DIPM = 4, + ATA_ACPI_FILTER_FPDMA_OFFSET = 8, + ATA_ACPI_FILTER_FPDMA_AA = 16, + ATA_ACPI_FILTER_DEFAULT = 7, +}; + +enum ata_completion_errors { + AC_ERR_OK = 0, + AC_ERR_DEV = 1, + AC_ERR_HSM = 2, + AC_ERR_TIMEOUT = 4, + AC_ERR_MEDIA = 8, + AC_ERR_ATA_BUS = 16, + AC_ERR_HOST_BUS = 32, + AC_ERR_SYSTEM = 64, + AC_ERR_INVALID = 128, + AC_ERR_OTHER = 256, + AC_ERR_NODEV_HINT = 512, + AC_ERR_NCQ = 1024, +}; + +enum ata_lpm_policy { + ATA_LPM_UNKNOWN = 0, + ATA_LPM_MAX_POWER = 1, + ATA_LPM_MED_POWER = 2, + ATA_LPM_MED_POWER_WITH_DIPM = 3, + ATA_LPM_MIN_POWER_WITH_PARTIAL = 4, + ATA_LPM_MIN_POWER = 5, +}; + +struct ata_queued_cmd; + +typedef void (*ata_qc_cb_t)(struct ata_queued_cmd *); + +struct ata_taskfile { + long unsigned int flags; + u8 protocol; + u8 ctl; + u8 hob_feature; + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + union { + u8 error; + u8 feature; + }; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + union { + u8 status; + u8 command; + }; + u32 auxiliary; +}; + +struct ata_port; + +struct ata_device; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + struct ata_taskfile tf; + u8 cdb[16]; + long unsigned int flags; + unsigned int tag; + unsigned int hw_tag; + unsigned int n_elem; + unsigned int orig_n_elem; + int dma_dir; + unsigned int sect_size; + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + struct scatterlist sgent; + struct scatterlist *sg; + struct scatterlist *cursg; + unsigned int cursg_ofs; + unsigned int err_mask; + struct ata_taskfile result_tf; + ata_qc_cb_t complete_fn; + void *private_data; + void *lldd_task; +}; + +struct ata_link; + +typedef int (*ata_prereset_fn_t)(struct ata_link *, long unsigned int); + +struct ata_eh_info { + struct ata_device *dev; + u32 serror; + unsigned int err_mask; + unsigned int action; + unsigned int dev_action[2]; + unsigned int flags; + unsigned int probe_mask; + char desc[80]; + int desc_len; +}; + +struct ata_eh_context { + struct ata_eh_info i; + int tries[2]; + int cmd_timeout_idx[16]; + unsigned int classes[2]; + unsigned int did_probe_mask; + unsigned int unloaded_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[2]; + long unsigned int last_reset; +}; + +struct ata_ering_entry { + unsigned int eflags; + unsigned int err_mask; + u64 timestamp; +}; + +struct ata_ering { + int cursor; + long: 32; + struct ata_ering_entry ring[32]; +}; + +struct ata_cpr_log; + +struct ata_cdl; + +struct ata_device { + struct ata_link *link; + unsigned int devno; + unsigned int quirks; + long unsigned int flags; + struct scsi_device *sdev; + void *private_data; + struct device tdev; + u64 n_sectors; + u64 n_native_sectors; + unsigned int class; + long unsigned int unpark_deadline; + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; + unsigned int multi_count; + unsigned int max_sectors; + unsigned int cdb_len; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + u16 cylinders; + u16 heads; + u16 sectors; + long: 32; + long: 32; + long: 32; + long: 32; + union { + u16 id[256]; + u32 gscr[128]; + }; + u8 devslp_timing[8]; + u8 ncq_send_recv_cmds[20]; + u8 ncq_non_data_cmds[64]; + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; + struct ata_cpr_log *cpr_log; + struct ata_cdl *cdl; + int spdn_cnt; + struct ata_ering ering; + u8 sector_buf[512]; +}; + +struct ata_link { + struct ata_port *ap; + int pmp; + struct device tdev; + unsigned int active_tag; + u32 sactive; + unsigned int flags; + u32 saved_scontrol; + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; + enum ata_lpm_policy lpm_policy; + struct ata_eh_info eh_info; + struct ata_eh_context eh_context; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ata_device device[2]; + long unsigned int last_lpm_change; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef int (*ata_reset_fn_t)(struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*ata_postreset_fn_t)(struct ata_link *, unsigned int *); + +enum sw_activity { + OFF = 0, + BLINK_ON = 1, + BLINK_OFF = 2, +}; + +struct ata_ioports { + void *cmd_addr; + void *data_addr; + void *error_addr; + void *feature_addr; + void *nsect_addr; + void *lbal_addr; + void *lbam_addr; + void *lbah_addr; + void *device_addr; + void *status_addr; + void *command_addr; + void *altstatus_addr; + void *ctl_addr; + void *bmdma_addr; + void *scr_addr; +}; + +struct ata_port_operations; + +struct ata_host { + spinlock_t lock; + struct device *dev; + void * const *iomap; + unsigned int n_ports; + unsigned int n_tags; + void *private_data; + struct ata_port_operations *ops; + long unsigned int flags; + struct kref kref; + struct mutex eh_mutex; + struct task_struct *eh_owner; + struct ata_port *simplex_claimed; + struct ata_port *ports[0]; +}; + +struct ata_port_operations { + int (*qc_defer)(struct ata_queued_cmd *); + int (*check_atapi_dma)(struct ata_queued_cmd *); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *); + unsigned int (*qc_issue)(struct ata_queued_cmd *); + void (*qc_fill_rtf)(struct ata_queued_cmd *); + void (*qc_ncq_fill_rtf)(struct ata_port *, u64); + int (*cable_detect)(struct ata_port *); + unsigned int (*mode_filter)(struct ata_device *, unsigned int); + void (*set_piomode)(struct ata_port *, struct ata_device *); + void (*set_dmamode)(struct ata_port *, struct ata_device *); + int (*set_mode)(struct ata_link *, struct ata_device **); + unsigned int (*read_id)(struct ata_device *, struct ata_taskfile *, __le16 *); + void (*dev_config)(struct ata_device *); + void (*freeze)(struct ata_port *); + void (*thaw)(struct ata_port *); + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; + ata_prereset_fn_t pmp_prereset; + ata_reset_fn_t pmp_softreset; + ata_reset_fn_t pmp_hardreset; + ata_postreset_fn_t pmp_postreset; + void (*error_handler)(struct ata_port *); + void (*lost_interrupt)(struct ata_port *); + void (*post_internal_cmd)(struct ata_queued_cmd *); + void (*sched_eh)(struct ata_port *); + void (*end_eh)(struct ata_port *); + int (*scr_read)(struct ata_link *, unsigned int, u32 *); + int (*scr_write)(struct ata_link *, unsigned int, u32); + void (*pmp_attach)(struct ata_port *); + void (*pmp_detach)(struct ata_port *); + int (*set_lpm)(struct ata_link *, enum ata_lpm_policy, unsigned int); + int (*port_suspend)(struct ata_port *, pm_message_t); + int (*port_resume)(struct ata_port *); + int (*port_start)(struct ata_port *); + void (*port_stop)(struct ata_port *); + void (*host_stop)(struct ata_host *); + void (*sff_dev_select)(struct ata_port *, unsigned int); + void (*sff_set_devctl)(struct ata_port *, u8); + u8 (*sff_check_status)(struct ata_port *); + u8 (*sff_check_altstatus)(struct ata_port *); + void (*sff_tf_load)(struct ata_port *, const struct ata_taskfile *); + void (*sff_tf_read)(struct ata_port *, struct ata_taskfile *); + void (*sff_exec_command)(struct ata_port *, const struct ata_taskfile *); + unsigned int (*sff_data_xfer)(struct ata_queued_cmd *, unsigned char *, unsigned int, int); + void (*sff_irq_on)(struct ata_port *); + bool (*sff_irq_check)(struct ata_port *); + void (*sff_irq_clear)(struct ata_port *); + void (*sff_drain_fifo)(struct ata_queued_cmd *); + void (*bmdma_setup)(struct ata_queued_cmd *); + void (*bmdma_start)(struct ata_queued_cmd *); + void (*bmdma_stop)(struct ata_queued_cmd *); + u8 (*bmdma_status)(struct ata_port *); + ssize_t (*em_show)(struct ata_port *, char *); + ssize_t (*em_store)(struct ata_port *, const char *, size_t); + ssize_t (*sw_activity_show)(struct ata_device *, char *); + ssize_t (*sw_activity_store)(struct ata_device *, enum sw_activity); + ssize_t (*transmit_led_message)(struct ata_port *, u32, ssize_t); + const struct ata_port_operations *inherits; +}; + +struct ata_port_stats { + long unsigned int unhandled_irq; + long unsigned int idle_irq; + long unsigned int rw_reqbuf; +}; + +struct ata_port { + struct Scsi_Host *scsi_host; + struct ata_port_operations *ops; + spinlock_t *lock; + long unsigned int flags; + unsigned int pflags; + unsigned int print_id; + unsigned int port_no; + struct ata_ioports ioaddr; + u8 ctl; + u8 last_ctl; + struct ata_link *sff_pio_task_link; + struct delayed_work sff_pio_task; + struct ata_bmdma_prd *bmdma_prd; + dma_addr_t bmdma_prd_dma; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; + struct ata_queued_cmd qcmd[33]; + long: 32; + u64 qc_active; + int nr_active_links; + long: 32; + struct ata_link link; + struct ata_link *slave_link; + int nr_pmp_links; + struct ata_link *pmp_link; + struct ata_link *excl_link; + struct ata_port_stats stats; + struct ata_host *host; + struct device *dev; + long: 32; + struct device tdev; + struct mutex scsi_scan_mutex; + struct delayed_work hotplug_task; + struct delayed_work scsi_rescan_task; + unsigned int hsm_task_state; + struct list_head eh_done_q; + wait_queue_head_t eh_wait_q; + int eh_tries; + struct completion park_req_pending; + pm_message_t pm_mesg; + enum ata_lpm_policy target_lpm_policy; + struct timer_list fastdrain_timer; + unsigned int fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; + void *private_data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ata_cpr { + u8 num; + u8 num_storage_elements; + long: 32; + u64 start_lba; + u64 num_lbas; +}; + +struct ata_cpr_log { + u8 nr_cpr; + long: 32; + struct ata_cpr cpr[0]; +}; + +struct ata_cdl { + u8 desc_log_buf[512]; + u8 ncq_sense_log_buf[1024]; +}; + +struct ata_port_info { + long unsigned int flags; + long unsigned int link_flags; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + struct ata_port_operations *port_ops; + void *private_data; +}; + +enum { + K2_FLAG_SATA_8_PORTS = 16777216, + K2_FLAG_NO_ATAPI_DMA = 33554432, + K2_FLAG_BAR_POS_3 = 67108864, + K2_SATA_TF_CMD_OFFSET = 0, + K2_SATA_TF_DATA_OFFSET = 0, + K2_SATA_TF_ERROR_OFFSET = 4, + K2_SATA_TF_NSECT_OFFSET = 8, + K2_SATA_TF_LBAL_OFFSET = 12, + K2_SATA_TF_LBAM_OFFSET = 16, + K2_SATA_TF_LBAH_OFFSET = 20, + K2_SATA_TF_DEVICE_OFFSET = 24, + K2_SATA_TF_CMDSTAT_OFFSET = 28, + K2_SATA_TF_CTL_OFFSET = 32, + K2_SATA_DMA_CMD_OFFSET = 48, + K2_SATA_SCR_STATUS_OFFSET = 64, + K2_SATA_SCR_ERROR_OFFSET = 68, + K2_SATA_SCR_CONTROL_OFFSET = 72, + K2_SATA_SICR1_OFFSET = 128, + K2_SATA_SICR2_OFFSET = 132, + K2_SATA_SIM_OFFSET = 136, + K2_SATA_PORT_OFFSET = 256, + chip_svw4 = 0, + chip_svw8 = 1, + chip_svw42 = 2, + chip_svw43 = 3, +}; + +struct netdev_xmit { + u16 recursion; + u8 more; + u8 skip_txqueue; +}; + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +enum { + SKBTX_HW_TSTAMP = 1, + SKBTX_SW_TSTAMP = 2, + SKBTX_IN_PROGRESS = 4, + SKBTX_HW_TSTAMP_USE_CYCLES = 8, + SKBTX_WIFI_STATUS = 16, + SKBTX_HW_TSTAMP_NETDEV = 32, + SKBTX_SCHED_TSTAMP = 64, +}; + +enum { + SKBFL_ZEROCOPY_ENABLE = 1, + SKBFL_SHARED_FRAG = 2, + SKBFL_PURE_ZEROCOPY = 4, + SKBFL_DONT_ORPHAN = 8, + SKBFL_MANAGED_FRAG_REFS = 16, +}; + +enum { + SKB_GSO_TCPV4 = 1, + SKB_GSO_DODGY = 2, + SKB_GSO_TCP_ECN = 4, + SKB_GSO_TCP_FIXEDID = 8, + SKB_GSO_TCPV6 = 16, + SKB_GSO_FCOE = 32, + SKB_GSO_GRE = 64, + SKB_GSO_GRE_CSUM = 128, + SKB_GSO_IPXIP4 = 256, + SKB_GSO_IPXIP6 = 512, + SKB_GSO_UDP_TUNNEL = 1024, + SKB_GSO_UDP_TUNNEL_CSUM = 2048, + SKB_GSO_PARTIAL = 4096, + SKB_GSO_TUNNEL_REMCSUM = 8192, + SKB_GSO_SCTP = 16384, + SKB_GSO_ESP = 32768, + SKB_GSO_UDP = 65536, + SKB_GSO_UDP_L4 = 131072, + SKB_GSO_FRAGLIST = 262144, +}; + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE = 0, + PKT_HASH_TYPE_L2 = 1, + PKT_HASH_TYPE_L3 = 2, + PKT_HASH_TYPE_L4 = 3, +}; + +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP = 1, + XDP_PASS = 2, + XDP_TX = 3, + XDP_REDIRECT = 4, +}; + +struct netdev_hw_addr { + struct list_head list; + struct rb_node node; + unsigned char addr[32]; + unsigned char type; + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +enum { + NAPIF_STATE_SCHED = 1, + NAPIF_STATE_MISSED = 2, + NAPIF_STATE_DISABLE = 4, + NAPIF_STATE_NPSVC = 8, + NAPIF_STATE_LISTED = 16, + NAPIF_STATE_NO_BUSY_POLL = 32, + NAPIF_STATE_IN_BUSY_POLL = 64, + NAPIF_STATE_PREFER_BUSY_POLL = 128, + NAPIF_STATE_THREADED = 256, + NAPIF_STATE_SCHED_THREADED = 512, +}; + +enum gro_result { + GRO_MERGED = 0, + GRO_MERGED_FREE = 1, + GRO_HELD = 2, + GRO_NORMAL = 3, + GRO_CONSUMED = 4, +}; + +typedef enum gro_result gro_result_t; + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF = 0, + __QUEUE_STATE_STACK_XOFF = 1, + __QUEUE_STATE_FROZEN = 2, +}; + +struct pp_memory_provider_params { + void *mp_priv; +}; + +struct rps_map; + +struct rps_dev_flow_table; + +struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; + struct rps_map *rps_map; + struct rps_dev_flow_table *rps_flow_table; + struct kobject kobj; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct napi_struct *napi; + struct pp_memory_provider_params mp_params; + long: 32; + long: 32; +}; + +struct dim_cq_moder; + +struct dim_irq_moder { + u8 profile_flags; + u8 coal_flags; + u8 dim_rx_mode; + u8 dim_tx_mode; + struct dim_cq_moder *rx_profile; + struct dim_cq_moder *tx_profile; + void (*rx_dim_work)(struct work_struct *); + void (*tx_dim_work)(struct work_struct *); +}; + +struct sd_flow_limit; + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + local_lock_t process_queue_bh_lock; + unsigned int processed; + unsigned int time_squeeze; + struct softnet_data *rps_ipi_list; + unsigned int received_rps; + bool in_net_rx_action; + bool in_napi_threaded_poll; + struct sd_flow_limit *flow_limit; + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + struct netdev_xmit xmit; + unsigned int input_queue_head; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + call_single_data_t csd; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + struct sk_buff_head input_pkt_queue; + long: 32; + struct napi_struct backlog; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t dropped; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t defer_lock; + int defer_count; + int defer_ipi_scheduled; + struct sk_buff *defer_list; + call_single_data_t defer_csd; +}; + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS = 1, + ETH_SS_PRIV_FLAGS = 2, + ETH_SS_NTUPLE_FILTERS = 3, + ETH_SS_FEATURES = 4, + ETH_SS_RSS_HASH_FUNCS = 5, + ETH_SS_TUNABLES = 6, + ETH_SS_PHY_STATS = 7, + ETH_SS_PHY_TUNABLES = 8, + ETH_SS_LINK_MODES = 9, + ETH_SS_MSG_CLASSES = 10, + ETH_SS_WOL_MODES = 11, + ETH_SS_SOF_TIMESTAMPING = 12, + ETH_SS_TS_TX_TYPES = 13, + ETH_SS_TS_RX_FILTERS = 14, + ETH_SS_UDP_TUNNEL_TYPES = 15, + ETH_SS_STATS_STD = 16, + ETH_SS_STATS_ETH_PHY = 17, + ETH_SS_STATS_ETH_MAC = 18, + ETH_SS_STATS_ETH_CTRL = 19, + ETH_SS_STATS_RMON = 20, + ETH_SS_COUNT = 21, +}; + +struct virtio_driver { + struct device_driver driver; + const struct virtio_device_id *id_table; + const unsigned int *feature_table; + unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *); + int (*probe)(struct virtio_device *); + void (*scan)(struct virtio_device *); + void (*remove)(struct virtio_device *); + void (*config_changed)(struct virtio_device *); + int (*freeze)(struct virtio_device *); + int (*restore)(struct virtio_device *); +}; + +typedef __u16 __virtio16; + +typedef __u32 __virtio32; + +typedef __u64 __virtio64; + +struct virtio_net_config { + __u8 mac[6]; + __virtio16 status; + __virtio16 max_virtqueue_pairs; + __virtio16 mtu; + __le32 speed; + __u8 duplex; + __u8 rss_max_key_size; + __le16 rss_max_indirection_table_length; + __le32 supported_hash_types; +}; + +struct virtio_net_hdr_v1 { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + union { + struct { + __virtio16 csum_start; + __virtio16 csum_offset; + }; + struct { + __virtio16 start; + __virtio16 offset; + } csum; + struct { + __le16 segments; + __le16 dup_acks; + } rsc; + }; + __virtio16 num_buffers; +}; + +struct virtio_net_hdr_v1_hash { + struct virtio_net_hdr_v1 hdr; + __le32 hash_value; + __le16 hash_report; + __le16 padding; +}; + +struct virtio_net_hdr { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + __virtio16 csum_start; + __virtio16 csum_offset; +}; + +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + __virtio16 num_buffers; +}; + +struct virtio_net_ctrl_hdr { + __u8 class; + __u8 cmd; +}; + +typedef __u8 virtio_net_ctrl_ack; + +struct virtio_net_ctrl_mac { + __virtio32 entries; + __u8 macs[0]; +}; + +struct virtio_net_ctrl_mq { + __virtio16 virtqueue_pairs; +}; + +struct virtio_net_ctrl_coal_tx { + __le32 tx_max_packets; + __le32 tx_usecs; +}; + +struct virtio_net_ctrl_coal_rx { + __le32 rx_max_packets; + __le32 rx_usecs; +}; + +struct virtio_net_ctrl_coal { + __le32 max_packets; + __le32 max_usecs; +}; + +struct virtio_net_ctrl_coal_vq { + __le16 vqn; + __le16 reserved; + struct virtio_net_ctrl_coal coal; +}; + +struct virtio_net_stats_capabilities { + __le64 supported_stats_types[1]; +}; + +struct virtio_net_ctrl_queue_stats { + struct { + __le16 vq_index; + __le16 reserved[3]; + __le64 types_bitmap[1]; + } stats[1]; +}; + +struct virtio_net_stats_reply_hdr { + __u8 type; + __u8 reserved; + __le16 vq_index; + __le16 reserved1; + __le16 size; +}; + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; + struct callback_head rcu; +}; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; + long: 32; +}; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 1, + DIM_CQ_PERIOD_NUM_MODES = 2, +}; + +enum dim_state { + DIM_START_MEASURE = 0, + DIM_MEASURE_IN_PROGRESS = 1, + DIM_APPLY_NEW_PROFILE = 2, +}; + +struct failover_ops { + int (*slave_pre_register)(struct net_device *, struct net_device *); + int (*slave_register)(struct net_device *, struct net_device *); + int (*slave_pre_unregister)(struct net_device *, struct net_device *); + int (*slave_unregister)(struct net_device *, struct net_device *); + int (*slave_link_change)(struct net_device *, struct net_device *); + int (*slave_name_change)(struct net_device *, struct net_device *); + rx_handler_result_t (*slave_handle_frame)(struct sk_buff **); +}; + +struct failover { + struct list_head list; + struct net_device *failover_dev; + netdevice_tracker dev_tracker; + struct failover_ops *ops; +}; + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *, char *); + ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); +}; + +struct netdev_queue_stats_rx { + u64 bytes; + u64 packets; + u64 alloc_fail; + u64 hw_drops; + u64 hw_drop_overruns; + u64 csum_unnecessary; + u64 csum_none; + u64 csum_bad; + u64 hw_gro_packets; + u64 hw_gro_bytes; + u64 hw_gro_wire_packets; + u64 hw_gro_wire_bytes; + u64 hw_drop_ratelimits; +}; + +struct netdev_queue_stats_tx { + u64 bytes; + u64 packets; + u64 hw_drops; + u64 hw_drop_errors; + u64 csum_none; + u64 needs_csum; + u64 hw_gso_packets; + u64 hw_gso_bytes; + u64 hw_gso_wire_packets; + u64 hw_gso_wire_bytes; + u64 hw_drop_ratelimits; + u64 stop; + u64 wake; +}; + +struct ewma_pkt_len { + long unsigned int internal; +}; + +struct virtnet_stat_desc { + char desc[32]; + size_t offset; + size_t qstat_offset; +}; + +struct virtnet_sq_free_stats { + u64 packets; + u64 bytes; + u64 napi_packets; + u64 napi_bytes; + u64 xsk; +}; + +struct virtnet_sq_stats { + struct u64_stats_sync syncp; + long: 32; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t xdp_tx; + u64_stats_t xdp_tx_drops; + u64_stats_t kicks; + u64_stats_t tx_timeouts; + u64_stats_t stop; + u64_stats_t wake; +}; + +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + long: 32; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t drops; + u64_stats_t xdp_packets; + u64_stats_t xdp_tx; + u64_stats_t xdp_redirects; + u64_stats_t xdp_drops; + u64_stats_t kicks; +}; + +struct virtnet_interrupt_coalesce { + u32 max_packets; + u32 max_usecs; +}; + +struct virtnet_rq_dma { + dma_addr_t addr; + u32 ref; + u16 len; + u16 need_sync; +}; + +struct send_queue { + struct virtqueue *vq; + struct scatterlist sg[19]; + char name[16]; + struct virtnet_sq_stats stats; + struct virtnet_interrupt_coalesce intr_coal; + struct napi_struct napi; + bool reset; + struct xsk_buff_pool *xsk_pool; + dma_addr_t xsk_hdr_dma_addr; + long: 32; +}; + +struct receive_queue { + struct virtqueue *vq; + long: 32; + struct napi_struct napi; + struct bpf_prog *xdp_prog; + long: 32; + struct virtnet_rq_stats stats; + u16 calls; + bool dim_enabled; + struct mutex dim_lock; + struct dim dim; + u32 packets_in_napi; + struct virtnet_interrupt_coalesce intr_coal; + struct page *pages; + struct ewma_pkt_len mrg_avg_pkt_len; + struct page_frag alloc_frag; + struct scatterlist sg[19]; + unsigned int min_buf_len; + char name[16]; + long: 32; + struct xdp_rxq_info xdp_rxq; + struct virtnet_rq_dma *last_dma; + struct xsk_buff_pool *xsk_pool; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info xsk_rxq_info; + struct xdp_buff **xsk_buffs; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct virtio_net_ctrl_rss { + u32 hash_types; + u16 indirection_table_mask; + u16 unclassified_queue; + u16 hash_cfg_reserved; + u16 max_tx_vq; + u8 hash_key_length; + u8 key[40]; + u16 *indirection_table; +}; + +struct control_buf { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; +}; + +struct virtnet_info { + struct virtio_device *vdev; + struct virtqueue *cvq; + struct net_device *dev; + struct send_queue *sq; + struct receive_queue *rq; + unsigned int status; + u16 max_queue_pairs; + u16 curr_queue_pairs; + u16 xdp_queue_pairs; + bool xdp_enabled; + bool big_packets; + unsigned int big_packets_num_skbfrags; + bool mergeable_rx_bufs; + bool has_rss; + bool has_rss_hash_report; + u8 rss_key_size; + u16 rss_indir_table_size; + u32 rss_hash_types_supported; + u32 rss_hash_types_saved; + struct virtio_net_ctrl_rss rss; + bool has_cvq; + struct mutex cvq_lock; + bool any_header_sg; + u8 hdr_len; + struct delayed_work refill; + bool refill_enabled; + spinlock_t refill_lock; + struct work_struct config_work; + struct work_struct rx_mode_work; + bool rx_mode_work_enabled; + bool affinity_hint_set; + struct hlist_node node; + struct hlist_node node_dead; + struct control_buf *ctrl; + u8 duplex; + u32 speed; + bool rx_dim_enabled; + struct virtnet_interrupt_coalesce intr_coal_tx; + struct virtnet_interrupt_coalesce intr_coal_rx; + long unsigned int guest_offloads; + long unsigned int guest_offloads_capable; + struct failover *failover; + u64 device_stats_cap; +}; + +struct virtio_net_common_hdr { + union { + struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf mrg_hdr; + struct virtio_net_hdr_v1_hash hash_v1_hdr; + }; +}; + +enum virtnet_xmit_type { + VIRTNET_XMIT_TYPE_SKB = 0, + VIRTNET_XMIT_TYPE_SKB_ORPHAN = 1, + VIRTNET_XMIT_TYPE_XDP = 2, + VIRTNET_XMIT_TYPE_XSK = 3, +}; + +struct virtnet_stats_ctx { + bool to_qstat; + u32 desc_num[3]; + u64 bitmap[3]; + u32 size[3]; + u64 *data; +}; + +struct va_format { + const char *fmt; + va_list *va; +}; + +enum usb3_link_state { + USB3_LPM_U0 = 0, + USB3_LPM_U1 = 1, + USB3_LPM_U2 = 2, + USB3_LPM_U3 = 3, +}; + +struct usb_tt { + struct usb_device *hub; + int multi; + unsigned int think_time; + void *hcpriv; + spinlock_t lock; + struct list_head clear_list; + struct work_struct clear_work; +}; + +struct giveback_urb_bh { + bool running; + bool high_prio; + spinlock_t lock; + struct list_head head; + struct work_struct bh; + struct usb_host_endpoint *completing_ep; +}; + +enum usb_dev_authorize_policy { + USB_DEVICE_AUTHORIZE_NONE = 0, + USB_DEVICE_AUTHORIZE_ALL = 1, + USB_DEVICE_AUTHORIZE_INTERNAL = 2, +}; + +struct hc_driver; + +struct usb_phy; + +struct dma_pool; + +struct gen_pool; + +struct usb_hcd { + struct usb_bus self; + struct kref kref; + const char *product_desc; + int speed; + char irq_descr[24]; + struct timer_list rh_timer; + struct urb *status_urb; + struct work_struct wakeup_work; + struct work_struct died_work; + const struct hc_driver *driver; + struct usb_phy *usb_phy; + struct usb_phy_roothub *phy_roothub; + long unsigned int flags; + enum usb_dev_authorize_policy dev_policy; + unsigned int rh_registered: 1; + unsigned int rh_pollable: 1; + unsigned int msix_enabled: 1; + unsigned int msi_enabled: 1; + unsigned int skip_phy_initialization: 1; + unsigned int uses_new_polling: 1; + unsigned int has_tt: 1; + unsigned int amd_resume_bug: 1; + unsigned int can_do_streams: 1; + unsigned int tpl_support: 1; + unsigned int cant_recv_wakeups: 1; + unsigned int irq; + void *regs; + resource_size_t rsrc_start; + resource_size_t rsrc_len; + unsigned int power_budget; + struct giveback_urb_bh high_prio_bh; + struct giveback_urb_bh low_prio_bh; + struct mutex *address0_mutex; + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + struct dma_pool *pool[4]; + int state; + struct gen_pool *localmem_pool; + long unsigned int hcd_priv[0]; +}; + +struct hc_driver { + const char *description; + const char *product_desc; + size_t hcd_priv_size; + irqreturn_t (*irq)(struct usb_hcd *); + int flags; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*pci_suspend)(struct usb_hcd *, bool); + int (*pci_resume)(struct usb_hcd *, pm_message_t); + int (*pci_poweroff_late)(struct usb_hcd *, bool); + void (*stop)(struct usb_hcd *); + void (*shutdown)(struct usb_hcd *); + int (*get_frame_number)(struct usb_hcd *); + int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); + int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); + int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); + void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); + void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); + void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); + int (*hub_status_data)(struct usb_hcd *, char *); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned int); + long unsigned int (*get_resuming_ports)(struct usb_hcd *); + void (*relinquish_port)(struct usb_hcd *, int); + int (*port_handed_over)(struct usb_hcd *, int); + void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + void (*free_dev)(struct usb_hcd *, struct usb_device *); + int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); + int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*address_device)(struct usb_hcd *, struct usb_device *, unsigned int); + int (*enable_device)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); + int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*find_raw_port_number)(struct usb_hcd *, int); + int (*port_power)(struct usb_hcd *, int, bool); + int (*submit_single_step_set_feature)(struct usb_hcd *, struct urb *, int); +}; + +struct usb_hub_descriptor { + __u8 bDescLength; + __u8 bDescriptorType; + __u8 bNbrPorts; + __le16 wHubCharacteristics; + __u8 bPwrOn2PwrGood; + __u8 bHubContrCurrent; + union { + struct { + __u8 DeviceRemovable[4]; + __u8 PortPwrCtrlMask[4]; + } hs; + struct { + __u8 bHubHdrDecLat; + __le16 wHubDelay; + __le16 DeviceRemovable; + } __attribute__((packed)) ss; + } u; +} __attribute__((packed)); + +struct xhci_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; +}; + +struct xhci_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dev_notification; + __le64 cmd_ring; + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + __le32 reserved4[241]; + __le32 port_status_base; + __le32 port_power_base; + __le32 port_link_base; + __le32 reserved5; + __le32 reserved6[1016]; +}; + +struct xhci_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +struct xhci_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct xhci_intr_reg ir_set[128]; +}; + +struct xhci_doorbell_array { + __le32 doorbell[256]; +}; + +struct xhci_container_ctx { + unsigned int type; + int size; + u8 *bytes; + dma_addr_t dma; +}; + +struct xhci_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 reserved[3]; +}; + +union xhci_trb; + +struct xhci_command { + struct xhci_container_ctx *in_ctx; + u32 status; + int slot_id; + struct completion *completion; + union xhci_trb *command_trb; + struct list_head cmd_list; + unsigned int timeout_ms; +}; + +struct xhci_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +struct xhci_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +struct xhci_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +struct xhci_generic_trb { + __le32 field[4]; +}; + +union xhci_trb { + struct xhci_link_trb link; + struct xhci_transfer_event trans_event; + struct xhci_event_cmd event_cmd; + struct xhci_generic_trb generic; +}; + +struct xhci_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +struct xhci_ring; + +struct xhci_stream_info { + struct xhci_ring **stream_rings; + unsigned int num_streams; + struct xhci_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct xarray trb_address_map; + struct xhci_command *free_streams_command; +}; + +enum xhci_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC = 1, + TYPE_BULK = 2, + TYPE_INTR = 3, + TYPE_STREAM = 4, + TYPE_COMMAND = 5, + TYPE_EVENT = 6, +}; + +struct xhci_segment; + +struct xhci_ring { + struct xhci_segment *first_seg; + struct xhci_segment *last_seg; + union xhci_trb *enqueue; + struct xhci_segment *enq_seg; + union xhci_trb *dequeue; + struct xhci_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int stream_id; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + enum xhci_ring_type type; + bool last_td_was_short; + struct xarray *trb_address_map; +}; + +struct xhci_bw_info { + unsigned int ep_interval; + unsigned int mult; + unsigned int num_packets; + unsigned int max_packet_size; + unsigned int max_esit_payload; + unsigned int type; +}; + +struct xhci_virt_device; + +struct xhci_hcd; + +struct xhci_virt_ep { + struct xhci_virt_device *vdev; + unsigned int ep_index; + struct xhci_ring *ring; + struct xhci_stream_info *stream_info; + struct xhci_ring *new_ring; + unsigned int err_count; + unsigned int ep_state; + struct list_head cancelled_td_list; + struct xhci_hcd *xhci; + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; + bool skip; + struct xhci_bw_info bw_info; + struct list_head bw_endpoint_list; + long unsigned int stop_time; + int next_frame_id; + bool use_extended_tbc; +}; + +struct xhci_port; + +struct xhci_interval_bw_table; + +struct xhci_tt_bw_info; + +struct xhci_virt_device { + int slot_id; + struct usb_device *udev; + struct xhci_container_ctx *out_ctx; + struct xhci_container_ctx *in_ctx; + struct xhci_virt_ep eps[31]; + struct xhci_port *rhub_port; + struct xhci_interval_bw_table *bw_table; + struct xhci_tt_bw_info *tt_info; + long unsigned int flags; + u16 current_mel; + void *debugfs_private; +}; + +struct s3_save { + u32 command; + u32 dev_nt; + u64 dcbaa_ptr; + u32 config_reg; + long: 32; +}; + +struct xhci_bus_state { + long unsigned int bus_suspended; + long unsigned int next_statechange; + u32 port_c_suspend; + u32 suspended_ports; + u32 port_remote_wakeup; + long unsigned int resuming_ports; +}; + +struct xhci_hub { + struct xhci_port **ports; + unsigned int num_ports; + struct usb_hcd *hcd; + struct xhci_bus_state bus_state; + u8 maj_rev; + u8 min_rev; +}; + +struct xhci_device_context_array; + +struct xhci_interrupter; + +struct xhci_scratchpad; + +struct xhci_root_port_bw_info; + +struct xhci_port_cap; + +struct xhci_hcd { + struct usb_hcd *main_hcd; + struct usb_hcd *shared_hcd; + struct xhci_cap_regs *cap_regs; + struct xhci_op_regs *op_regs; + struct xhci_run_regs *run_regs; + struct xhci_doorbell_array *dba; + __u32 hcs_params1; + __u32 hcs_params2; + __u32 hcs_params3; + __u32 hcc_params; + __u32 hcc_params2; + spinlock_t lock; + u16 hci_version; + u16 max_interrupters; + u32 imod_interval; + int page_size; + int page_shift; + int nvecs; + struct clk *clk; + struct clk *reg_clk; + struct reset_control *reset; + struct xhci_device_context_array *dcbaa; + struct xhci_interrupter **interrupters; + struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; + struct list_head cmd_list; + unsigned int cmd_ring_reserved_trbs; + struct delayed_work cmd_timer; + struct completion cmd_ring_stop_completion; + struct xhci_command *current_cmd; + struct xhci_scratchpad *scratchpad; + struct mutex mutex; + struct xhci_virt_device *devs[256]; + struct xhci_root_port_bw_info *rh_bw; + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + struct dma_pool *small_streams_pool; + struct dma_pool *medium_streams_pool; + unsigned int xhc_state; + long unsigned int run_graceperiod; + struct s3_save s3; + long long unsigned int quirks; + unsigned int num_active_eps; + unsigned int limit_active_eps; + struct xhci_port *hw_ports; + struct xhci_hub usb2_rhub; + struct xhci_hub usb3_rhub; + unsigned int hw_lpm_support: 1; + unsigned int broken_suspend: 1; + unsigned int allow_single_roothub: 1; + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; + u16 test_mode; + struct dentry *debugfs_root; + struct dentry *debugfs_slots; + struct list_head regset_list; + void *dbc; + long unsigned int priv[0]; +}; + +struct xhci_segment { + union xhci_trb *trbs; + struct xhci_segment *next; + unsigned int num; + dma_addr_t dma; + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +struct xhci_interval_bw { + unsigned int num_packets; + struct list_head endpoints; + unsigned int overhead[3]; +}; + +struct xhci_interval_bw_table { + unsigned int interval0_esit_payload; + struct xhci_interval_bw interval_bw[16]; + unsigned int bw_used; + unsigned int ss_bw_in; + unsigned int ss_bw_out; +}; + +struct xhci_port { + __le32 *addr; + int hw_portnum; + int hcd_portnum; + struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; + unsigned int lpm_incapable: 1; + long unsigned int resume_timestamp; + bool rexit_active; + int slot_id; + struct completion rexit_done; + struct completion u3exit_done; +}; + +struct xhci_tt_bw_info { + struct list_head tt_list; + int slot_id; + int ttport; + struct xhci_interval_bw_table bw_table; + int active_eps; +}; + +struct xhci_root_port_bw_info { + struct list_head tts; + unsigned int num_active_tts; + struct xhci_interval_bw_table bw_table; +}; + +struct xhci_device_context_array { + __le64 dev_context_ptrs[256]; + dma_addr_t dma; + long: 32; +}; + +struct xhci_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 rsvd; +}; + +struct xhci_erst { + struct xhci_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; +}; + +struct xhci_scratchpad { + u64 *sp_array; + dma_addr_t sp_dma; + void **sp_buffers; +}; + +struct xhci_interrupter { + struct xhci_ring *event_ring; + struct xhci_erst erst; + struct xhci_intr_reg *ir_set; + unsigned int intr_num; + bool ip_autoclear; + u32 isoc_bei_interval; + u32 s3_irq_pending; + u32 s3_irq_control; + u32 s3_erst_size; + long: 32; + u64 s3_erst_base; + u64 s3_erst_dequeue; +}; + +struct xhci_port_cap { + u32 *psi; + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; + u32 protocol_caps; +}; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_FREEING_INITMEM = 2, + SYSTEM_RUNNING = 3, + SYSTEM_HALT = 4, + SYSTEM_POWER_OFF = 5, + SYSTEM_RESTART = 6, + SYSTEM_SUSPEND = 7, +}; + +struct i2c_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = 1, + GPIOD_OUT_LOW = 3, + GPIOD_OUT_HIGH = 7, + GPIOD_OUT_LOW_OPEN_DRAIN = 11, + GPIOD_OUT_HIGH_OPEN_DRAIN = 15, +}; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, + IRQD_AFFINITY_ON_ACTIVATE = 268435456, + IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, + IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, +}; + +struct irq_domain_chip_generic_info { + const char *name; + irq_flow_handler_t handler; + unsigned int irqs_per_chip; + unsigned int num_ct; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + int (*init)(struct irq_chip_generic *); + void (*exit)(struct irq_chip_generic *); +}; + +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +struct rt_mutex { + struct rt_mutex_base rtmutex; +}; + +struct irq_domain_info { + struct fwnode_handle *fwnode; + unsigned int domain_flags; + unsigned int size; + irq_hw_number_t hwirq_max; + int direct_max; + unsigned int hwirq_base; + unsigned int virq_base; + enum irq_domain_bus_token bus_token; + const char *name_suffix; + const struct irq_domain_ops *ops; + void *host_data; + struct irq_domain *parent; + struct irq_domain_chip_generic_info *dgc_info; + int (*init)(struct irq_domain *); + void (*exit)(struct irq_domain *); +}; + +struct i2c_msg { + __u16 addr; + __u16 flags; + __u16 len; + __u8 *buf; +}; + +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[34]; +}; + +struct i2c_adapter; + +struct i2c_client { + short unsigned int flags; + short unsigned int addr; + char name[20]; + struct i2c_adapter *adapter; + long: 32; + struct device dev; + int init_irq; + int irq; + struct list_head detected; + void *devres_group_id; + long: 32; +}; + +struct i2c_device_identity { + u16 manufacturer_id; + u16 part_id; + u8 die_revision; +}; + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT = 0, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, +}; + +struct i2c_board_info; + +struct i2c_driver { + unsigned int class; + int (*probe)(struct i2c_client *); + void (*remove)(struct i2c_client *); + void (*shutdown)(struct i2c_client *); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); + int (*command)(struct i2c_client *, unsigned int, void *); + struct device_driver driver; + const struct i2c_device_id *id_table; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const short unsigned int *address_list; + struct list_head clients; + u32 flags; +}; + +struct i2c_board_info { + char type[20]; + short unsigned int flags; + short unsigned int addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct software_node *swnode; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +struct i2c_algorithm; + +struct i2c_lock_operations; + +struct i2c_bus_recovery_info; + +struct i2c_adapter_quirks; + +struct i2c_adapter { + struct module *owner; + unsigned int class; + const struct i2c_algorithm *algo; + void *algo_data; + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + int timeout; + int retries; + long: 32; + struct device dev; + long unsigned int locked_flags; + int nr; + char name[48]; + struct completion dev_released; + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; + struct dentry *debugfs; + long unsigned int addrs_in_instantiation[4]; +}; + +struct i2c_algorithm { + union { + int (*xfer)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); + }; + union { + int (*xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + }; + int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + u32 (*functionality)(struct i2c_adapter *); +}; + +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int); + int (*trylock_bus)(struct i2c_adapter *, unsigned int); + void (*unlock_bus)(struct i2c_adapter *, unsigned int); +}; + +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; + u32 digital_filter_width_ns; + u32 analog_filter_cutoff_freq_hz; +}; + +struct pinctrl; + +struct pinctrl_state; + +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int); + int (*get_sda)(struct i2c_adapter *); + void (*set_sda)(struct i2c_adapter *, int); + int (*get_bus_free)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; +}; + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; + long: 32; +}; + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +struct trace_event_raw_i2c_write { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_read { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + char __data[0]; +}; + +struct trace_event_raw_i2c_reply { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_result { + struct trace_entry ent; + int adapter_nr; + __u16 nr_msgs; + __s16 ret; + char __data[0]; +}; + +struct trace_event_data_offsets_i2c_write { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_read {}; + +struct trace_event_data_offsets_i2c_reply { + u32 buf; + const void *buf_ptr_; +}; + +struct trace_event_data_offsets_i2c_result {}; + +typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); + +struct i2c_cmd_arg { + unsigned int cmd; + void *arg; +}; + +struct acpi_device; + +struct ir_raw_handler { + struct list_head list; + u64 protocols; + int (*decode)(struct rc_dev *, struct ir_raw_event); + int (*encode)(enum rc_proto, u32, struct ir_raw_event *, unsigned int); + u32 carrier; + u32 min_timeout; + int (*raw_register)(struct rc_dev *); + int (*raw_unregister)(struct rc_dev *); +}; + +struct ir_raw_timings_pl { + unsigned int header_pulse; + unsigned int bit_space; + unsigned int bit_pulse[2]; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +enum sony_state { + STATE_INACTIVE = 0, + STATE_HEADER_SPACE = 1, + STATE_BIT_PULSE = 2, + STATE_BIT_SPACE = 3, + STATE_FINISHED = 4, +}; + +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN = 1, + CLOCK_EVT_STATE_PERIODIC = 2, + CLOCK_EVT_STATE_ONESHOT = 3, + CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, +}; + +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(long unsigned int, struct clock_event_device *); + int (*set_next_ktime)(ktime_t, struct clock_event_device *); + long: 32; + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + long unsigned int retries; + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + long unsigned int min_delta_ticks; + long unsigned int max_delta_ticks; + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef void (*swap_func_t)(void *, void *, int); + +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[16]; +}; + +struct reserved_mem_ops; + +struct reserved_mem { + const char *name; + long unsigned int fdt_node; + const struct reserved_mem_ops *ops; + phys_addr_t base; + phys_addr_t size; + void *priv; +}; + +struct reserved_mem_ops { + int (*device_init)(struct reserved_mem *, struct device *); + void (*device_release)(struct reserved_mem *, struct device *); +}; + +typedef int (*reservedmem_of_init_fn)(struct reserved_mem *); + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, + MEMBLOCK_DRIVER_MANAGED = 8, + MEMBLOCK_RSRV_NOINIT = 16, +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; +}; + +struct memblock_type { + long unsigned int cnt; + long unsigned int max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +struct rmem_assigned_device { + struct device *dev; + struct reserved_mem *rmem; + struct list_head list; +}; + +enum { + IF_LINK_MODE_DEFAULT = 0, + IF_LINK_MODE_DORMANT = 1, + IF_LINK_MODE_TESTING = 2, +}; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; + struct callback_head rcu; +}; + +enum lw_bits { + LW_URGENT = 0, +}; + +struct sockaddr_in6 { + short unsigned int sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + unsigned char __pad[8]; +}; + +struct trace_event_raw_kfree_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + void *rx_sk; + short unsigned int protocol; + enum skb_drop_reason reason; + char __data[0]; +}; + +struct trace_event_raw_consume_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + char __data[0]; +}; + +struct trace_event_raw_skb_copy_datagram_iovec { + struct trace_entry ent; + const void *skbaddr; + int len; + char __data[0]; +}; + +struct trace_event_data_offsets_kfree_skb {}; + +struct trace_event_data_offsets_consume_skb {}; + +struct trace_event_data_offsets_skb_copy_datagram_iovec {}; + +typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *, enum skb_drop_reason, struct sock *); + +typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *, void *); + +typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); + +struct trace_event_raw_net_dev_start_xmit { + struct trace_entry ent; + u32 __data_loc_name; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + unsigned int len; + unsigned int data_len; + int network_offset; + bool transport_offset_valid; + int transport_offset; + u8 tx_flags; + u16 gso_size; + u16 gso_segs; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + int rc; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit_timeout { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_driver; + int queue_index; + char __data[0]; +}; + +struct trace_event_raw_net_dev_template { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_verbose_template { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int napi_id; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + u32 hash; + bool l4_hash; + unsigned int len; + unsigned int data_len; + unsigned int truesize; + bool mac_header_valid; + int mac_header; + unsigned char nr_frags; + u16 gso_size; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_exit_template { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_net_dev_start_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_xmit_timeout { + u32 name; + const void *name_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_net_dev_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_verbose_template { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_net_dev_rx_exit_template {}; + +typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); + +typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); + +typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); + +typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); + +typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); + +struct trace_event_raw_napi_poll { + struct trace_entry ent; + struct napi_struct *napi; + u32 __data_loc_dev_name; + int work; + int budget; + char __data[0]; +}; + +struct trace_event_raw_dql_stall_detected { + struct trace_entry ent; + short unsigned int thrs; + unsigned int len; + long unsigned int last_reap; + long unsigned int hist_head; + long unsigned int now; + long unsigned int hist[4]; + char __data[0]; +}; + +struct trace_event_data_offsets_napi_poll { + u32 dev_name; + const void *dev_name_ptr_; +}; + +struct trace_event_data_offsets_dql_stall_detected {}; + +typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); + +typedef void (*btf_trace_dql_stall_detected)(void *, short unsigned int, unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int *); + +enum { + TCPF_ESTABLISHED = 2, + TCPF_SYN_SENT = 4, + TCPF_SYN_RECV = 8, + TCPF_FIN_WAIT1 = 16, + TCPF_FIN_WAIT2 = 32, + TCPF_TIME_WAIT = 64, + TCPF_CLOSE = 128, + TCPF_CLOSE_WAIT = 256, + TCPF_LAST_ACK = 512, + TCPF_LISTEN = 1024, + TCPF_CLOSING = 2048, + TCPF_NEW_SYN_RECV = 4096, + TCPF_BOUND_INACTIVE = 8192, +}; + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct ipv6hdr { + __u8 version: 4; + __u8 priority: 4; + __u8 flow_lbl[3]; + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + union { + struct { + struct in6_addr saddr; + struct in6_addr daddr; + }; + struct { + struct in6_addr saddr; + struct in6_addr daddr; + } addrs; + }; +}; + +struct minmax_sample { + u32 t; + u32 v; +}; + +struct minmax { + struct minmax_sample s[3]; +}; + +struct fastopen_queue { + struct request_sock *rskq_rst_head; + struct request_sock *rskq_rst_tail; + spinlock_t lock; + int qlen; + int max_qlen; + struct tcp_fastopen_context *ctx; +}; + +struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + struct fastopen_queue fastopenq; +}; + +struct ip_options { + __be32 faddr; + __be32 nexthop; + unsigned char optlen; + unsigned char srr; + unsigned char rr; + unsigned char ts; + unsigned char is_strictroute: 1; + unsigned char srr_is_hit: 1; + unsigned char is_changed: 1; + unsigned char rr_needaddr: 1; + unsigned char ts_needtime: 1; + unsigned char ts_needaddr: 1; + unsigned char router_alert; + unsigned char cipso; + unsigned char __pad2; + unsigned char __data[0]; +}; + +struct ip_options_rcu { + struct callback_head rcu; + struct ip_options opt; +}; + +struct inet_request_sock { + struct request_sock req; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u16 tstamp_ok: 1; + u16 sack_ok: 1; + u16 wscale_ok: 1; + u16 ecn_ok: 1; + u16 acked: 1; + u16 no_srccheck: 1; + u16 smc_ok: 1; + u32 ir_mark; + union { + struct ip_options_rcu *ireq_opt; + struct { + struct ipv6_txoptions *ipv6_opt; + struct sk_buff *pktopts; + }; + }; +}; + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + u32 ts_opt_id; + long: 32; + u64 transmit_time; + u32 mark; + long: 32; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + +struct ipv6_pinfo; + +struct ip_mc_socklist; + +struct inet_sock { + struct sock sk; + struct ipv6_pinfo *pinet6; + long unsigned int inet_flags; + __be32 inet_saddr; + __s16 uc_ttl; + __be16 inet_sport; + struct ip_options_rcu *inet_opt; + atomic_t inet_id; + __u8 tos; + __u8 min_ttl; + __u8 mc_ttl; + __u8 pmtudisc; + __u8 rcv_tos; + __u8 convert_csum; + int uc_index; + int mc_index; + __be32 mc_addr; + u32 local_port_range; + struct ip_mc_socklist *mc_list; + long: 32; + struct inet_cork_full cork; +}; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +struct ipv6_mc_socklist; + +struct ipv6_ac_socklist; + +struct ipv6_fl_socklist; + +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; + __be32 flow_label; + __u32 frag_size; + s16 hop_limit; + u8 mcast_hops; + int ucast_oif; + int mcast_oif; + union { + struct { + __u16 srcrt: 1; + __u16 osrcrt: 1; + __u16 rxinfo: 1; + __u16 rxoinfo: 1; + __u16 rxhlim: 1; + __u16 rxohlim: 1; + __u16 hopopts: 1; + __u16 ohopopts: 1; + __u16 dstopts: 1; + __u16 odstopts: 1; + __u16 rxflow: 1; + __u16 rxtclass: 1; + __u16 rxpmtu: 1; + __u16 rxorigdstaddr: 1; + __u16 recvfragsize: 1; + } bits; + __u16 all; + } rxopt; + __u8 srcprefs; + __u8 pmtudisc; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + __u32 dst_cookie; + struct ipv6_mc_socklist *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist *ipv6_fl_list; + struct ipv6_txoptions *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +struct inet_connection_sock_af_ops { + int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); + void (*send_check)(struct sock *, struct sk_buff *); + int (*rebuild_header)(struct sock *); + void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); + int (*conn_request)(struct sock *, struct sk_buff *); + struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); + u16 net_header_len; + u16 sockaddr_len; + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*addr2sockaddr)(struct sock *, struct sockaddr *); + void (*mtu_reduced)(struct sock *); +}; + +struct inet_bind_bucket; + +struct inet_bind2_bucket; + +struct tcp_ulp_ops; + +struct inet_connection_sock { + struct inet_sock icsk_inet; + struct request_sock_queue icsk_accept_queue; + struct inet_bind_bucket *icsk_bind_hash; + struct inet_bind2_bucket *icsk_bind2_hash; + long unsigned int icsk_timeout; + struct timer_list icsk_retransmit_timer; + struct timer_list icsk_delack_timer; + __u32 icsk_rto; + __u32 icsk_rto_min; + __u32 icsk_delack_max; + __u32 icsk_pmtu_cookie; + const struct tcp_congestion_ops *icsk_ca_ops; + const struct inet_connection_sock_af_ops *icsk_af_ops; + const struct tcp_ulp_ops *icsk_ulp_ops; + void *icsk_ulp_data; + void (*icsk_clean_acked)(struct sock *, u32); + unsigned int (*icsk_sync_mss)(struct sock *, u32); + __u8 icsk_ca_state: 5; + __u8 icsk_ca_initialized: 1; + __u8 icsk_ca_setsockopt: 1; + __u8 icsk_ca_dst_locked: 1; + __u8 icsk_retransmits; + __u8 icsk_pending; + __u8 icsk_backoff; + __u8 icsk_syn_retries; + __u8 icsk_probes_out; + __u16 icsk_ext_hdr_len; + struct { + __u8 pending; + __u8 quick; + __u8 pingpong; + __u8 retry; + __u32 ato: 8; + __u32 lrcv_flowlabel: 20; + __u32 unused: 4; + long unsigned int timeout; + __u32 lrcvtime; + __u16 last_seg_size; + __u16 rcv_mss; + } icsk_ack; + struct { + int search_high; + int search_low; + u32 probe_size: 31; + u32 enabled: 1; + u32 probe_timestamp; + } icsk_mtup; + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + long: 32; + u64 icsk_ca_priv[13]; +}; + +struct inet_bind_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct in6_addr fast_v6_rcv_saddr; + __be32 fast_rcv_saddr; + short unsigned int fast_sk_family; + bool fast_ipv6_only; + struct hlist_node node; + struct hlist_head bhash2; +}; + +struct inet_bind2_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + short unsigned int addr_type; + struct in6_addr v6_rcv_saddr; + struct hlist_node node; + struct hlist_node bhash_node; + struct hlist_head owners; +}; + +struct tcp_ulp_ops { + struct list_head list; + int (*init)(struct sock *); + void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); + void (*release)(struct sock *); + int (*get_info)(struct sock *, struct sk_buff *); + size_t (*get_info_size)(const struct sock *); + void (*clone)(const struct request_sock *, struct sock *, const gfp_t); + char name[16]; + struct module *owner; +}; + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __u16 doff: 4; + __u16 res1: 4; + __u16 cwr: 1; + __u16 ece: 1; + __u16 urg: 1; + __u16 ack: 1; + __u16 psh: 1; + __u16 rst: 1; + __u16 syn: 1; + __u16 fin: 1; + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4, +}; + +struct tcp_fastopen_cookie { + __le64 val[2]; + s8 len; + bool exp; + long: 32; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +struct tcp_options_received { + int ts_recent_stamp; + u32 ts_recent; + u32 rcv_tsval; + u32 rcv_tsecr; + u16 saw_tstamp: 1; + u16 tstamp_ok: 1; + u16 dsack: 1; + u16 wscale_ok: 1; + u16 sack_ok: 3; + u16 smc_ok: 1; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u8 saw_unknown: 1; + u8 unused: 7; + u8 num_sacks; + u16 user_mss; + u16 mss_clamp; +}; + +struct tcp_rack { + u64 mstamp; + u32 rtt_us; + u32 end_seq; + u32 last_delivered; + u8 reo_wnd_steps; + u8 reo_wnd_persist: 5; + u8 dsack_seen: 1; + u8 advanced: 1; +}; + +struct tcp_fastopen_request; + +struct tcp_sock { + struct inet_connection_sock inet_conn; + __u8 __cacheline_group_begin__tcp_sock_read_tx[0]; + u32 max_window; + u32 rcv_ssthresh; + u32 reordering; + u32 notsent_lowat; + u16 gso_segs; + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + __u8 __cacheline_group_end__tcp_sock_read_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_txrx[0]; + u32 tsoffset; + u32 snd_wnd; + u32 mss_cache; + u32 snd_cwnd; + u32 prr_out; + u32 lost_out; + u32 sacked_out; + u16 tcp_header_len; + u8 scaling_ratio; + u8 chrono_type: 2; + u8 repair: 1; + u8 tcp_usec_ts: 1; + u8 is_sack_reneg: 1; + u8 is_cwnd_limited: 1; + __u8 __cacheline_group_end__tcp_sock_read_txrx[0]; + __u8 __cacheline_group_begin__tcp_sock_read_rx[0]; + u32 copied_seq; + u32 rcv_tstamp; + u32 snd_wl1; + u32 tlp_high_seq; + u32 rttvar_us; + u32 retrans_out; + u16 advmss; + u16 urg_data; + u32 lost; + struct minmax rtt_min; + struct rb_root out_of_order_queue; + u32 snd_ssthresh; + u8 recvmsg_inq: 1; + __u8 __cacheline_group_end__tcp_sock_read_rx[0]; + long: 32; + long: 32; + __u8 __cacheline_group_begin__tcp_sock_write_tx[0]; + u32 segs_out; + u32 data_segs_out; + u64 bytes_sent; + u32 snd_sml; + u32 chrono_start; + u32 chrono_stat[3]; + u32 write_seq; + u32 pushed_seq; + u32 lsndtime; + u32 mdev_us; + u32 rtt_seq; + u64 tcp_wstamp_ns; + struct list_head tsorted_sent_queue; + struct sk_buff *highest_sack; + u8 ecn_flags; + __u8 __cacheline_group_end__tcp_sock_write_tx[0]; + __u8 __cacheline_group_begin__tcp_sock_write_txrx[0]; + __be32 pred_flags; + long: 32; + u64 tcp_clock_cache; + u64 tcp_mstamp; + u32 rcv_nxt; + u32 snd_nxt; + u32 snd_una; + u32 window_clamp; + u32 srtt_us; + u32 packets_out; + u32 snd_up; + u32 delivered; + u32 delivered_ce; + u32 app_limited; + u32 rcv_wnd; + struct tcp_options_received rx_opt; + u8 nonagle: 4; + u8 rate_app_limited: 1; + __u8 __cacheline_group_end__tcp_sock_write_txrx[0]; + long: 0; + __u8 __cacheline_group_begin__tcp_sock_write_rx[0]; + u64 bytes_received; + u32 segs_in; + u32 data_segs_in; + u32 rcv_wup; + u32 max_packets_out; + u32 cwnd_usage_seq; + u32 rate_delivered; + u32 rate_interval_us; + u32 rcv_rtt_last_tsecr; + u64 first_tx_mstamp; + u64 delivered_mstamp; + u64 bytes_acked; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + __u8 __cacheline_group_end__tcp_sock_write_rx[0]; + u32 dsack_dups; + u32 compressed_ack_rcv_nxt; + struct list_head tsq_node; + struct tcp_rack rack; + u8 compressed_ack; + u8 dup_ack_counter: 2; + u8 tlp_retrans: 1; + u8 unused: 5; + u8 thin_lto: 1; + u8 fastopen_connect: 1; + u8 fastopen_no_cookie: 1; + u8 fastopen_client_fail: 2; + u8 frto: 1; + u8 repair_queue; + u8 save_syn: 2; + u8 syn_data: 1; + u8 syn_fastopen: 1; + u8 syn_fastopen_exp: 1; + u8 syn_fastopen_ch: 1; + u8 syn_data_acked: 1; + u8 keepalive_probes; + u32 tcp_tx_delay; + u32 mdev_max_us; + u32 reord_seen; + u32 snd_cwnd_cnt; + u32 snd_cwnd_clamp; + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; + u32 prr_delivered; + u32 last_oow_ack_time; + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + struct sk_buff *ooo_last_skb; + struct tcp_sack_block duplicate_sack[1]; + struct tcp_sack_block selective_acks[4]; + struct tcp_sack_block recv_sack_cache[4]; + int lost_cnt_hint; + u32 prior_ssthresh; + u32 high_seq; + u32 retrans_stamp; + u32 undo_marker; + int undo_retrans; + long: 32; + u64 bytes_retrans; + u32 total_retrans; + u32 rto_stamp; + u16 total_rto; + u16 total_rto_recoveries; + u32 total_rto_time; + u32 urg_seq; + unsigned int keepalive_time; + unsigned int keepalive_intvl; + int linger2; + u8 bpf_sock_ops_cb_flags; + u8 bpf_chg_cc_inprogress: 1; + u16 timeout_rehash; + u32 rcv_ooopack; + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 plb_rehash; + u32 mtu_info; + struct tcp_fastopen_request *fastopen_req; + struct request_sock *fastopen_rsk; + struct saved_syn *saved_syn; + long: 32; +}; + +struct tcp_fastopen_request { + struct tcp_fastopen_cookie cookie; + struct msghdr *data; + size_t size; + int copied; + struct ubuf_info *uarg; +}; + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +struct ip6_sf_socklist; + +struct ipv6_mc_socklist { + struct in6_addr addr; + int ifindex; + unsigned int sfmode; + struct ipv6_mc_socklist *next; + struct ip6_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ipv6_ac_socklist { + struct in6_addr acl_addr; + int acl_ifindex; + struct ipv6_ac_socklist *acl_next; +}; + +struct ip6_flowlabel; + +struct ipv6_fl_socklist { + struct ipv6_fl_socklist *next; + struct ip6_flowlabel *fl; + struct callback_head rcu; +}; + +struct ip6_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + struct in6_addr sl_addr[0]; +}; + +struct ip6_flowlabel { + struct ip6_flowlabel *next; + __be32 label; + atomic_t users; + struct in6_addr dst; + struct ipv6_txoptions *opt; + long unsigned int linger; + struct callback_head rcu; + u8 share; + union { + struct pid *pid; + kuid_t uid; + } owner; + long unsigned int lastuse; + long unsigned int expires; + struct net *fl_net; +}; + +struct trace_event_raw_sock_rcvqueue_full { + struct trace_entry ent; + int rmem_alloc; + unsigned int truesize; + int sk_rcvbuf; + char __data[0]; +}; + +struct trace_event_raw_sock_exceed_buf_limit { + struct trace_entry ent; + char name[32]; + long int sysctl_mem[3]; + long int allocated; + int sysctl_rmem; + int rmem_alloc; + int sysctl_wmem; + int wmem_alloc; + int wmem_queued; + int kind; + char __data[0]; +}; + +struct trace_event_raw_inet_sock_set_state { + struct trace_entry ent; + const void *skaddr; + int oldstate; + int newstate; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_inet_sk_error_report { + struct trace_entry ent; + int error; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_sk_data_ready { + struct trace_entry ent; + const void *skaddr; + __u16 family; + __u16 protocol; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_sock_msg_length { + struct trace_entry ent; + void *sk; + __u16 family; + __u16 protocol; + int ret; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_sock_rcvqueue_full {}; + +struct trace_event_data_offsets_sock_exceed_buf_limit {}; + +struct trace_event_data_offsets_inet_sock_set_state {}; + +struct trace_event_data_offsets_inet_sk_error_report {}; + +struct trace_event_data_offsets_sk_data_ready {}; + +struct trace_event_data_offsets_sock_msg_length {}; + +typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int); + +typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); + +typedef void (*btf_trace_inet_sk_error_report)(void *, const struct sock *); + +typedef void (*btf_trace_sk_data_ready)(void *, const struct sock *); + +typedef void (*btf_trace_sock_send_length)(void *, struct sock *, int, int); + +typedef void (*btf_trace_sock_recv_length)(void *, struct sock *, int, int); + +struct trace_event_raw_udp_fail_queue_rcv_skb { + struct trace_entry ent; + int rc; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; + +typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *, struct sk_buff *); + +struct tcf_walker { + int stop; + int skip; + int count; + bool nonempty; + long unsigned int cookie; + int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); +}; + +struct tcf_exts { + int action; + int police; +}; + +struct trace_event_raw_tcp_event_sk_skb { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_send_reset { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + enum sk_rst_reason reason; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + long: 32; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_retransmit_synack { + struct trace_entry ent; + const void *skaddr; + const void *req; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_probe { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 mark; + __u16 data_len; + __u32 snd_nxt; + __u32 snd_una; + __u32 snd_cwnd; + __u32 ssthresh; + __u32 snd_wnd; + __u32 srtt; + __u32 rcv_wnd; + long: 32; + __u64 sock_cookie; + const void *skbaddr; + const void *skaddr; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_skb { + struct trace_entry ent; + const void *skbaddr; + __u8 saddr[28]; + __u8 daddr[28]; + char __data[0]; +}; + +struct trace_event_raw_tcp_cong_state_set { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u8 cong_state; + char __data[0]; +}; + +struct trace_event_raw_tcp_hash_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_tcp_ao_event { + struct trace_entry ent; + __u64 net_cookie; + const void *skbaddr; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + int l3index; + __u16 sport; + __u16 dport; + __u16 family; + bool fin; + bool syn; + bool rst; + bool psh; + bool ack; + __u8 keyid; + __u8 rnext; + __u8 maclen; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sk { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u8 keyid; + __u8 rnext; + char __data[0]; +}; + +struct trace_event_raw_tcp_ao_event_sne { + struct trace_entry ent; + __u64 net_cookie; + const void *skaddr; + int state; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 new_sne; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_tcp_event_sk_skb {}; + +struct trace_event_data_offsets_tcp_send_reset {}; + +struct trace_event_data_offsets_tcp_event_sk {}; + +struct trace_event_data_offsets_tcp_retransmit_synack {}; + +struct trace_event_data_offsets_tcp_probe {}; + +struct trace_event_data_offsets_tcp_event_skb {}; + +struct trace_event_data_offsets_tcp_cong_state_set {}; + +struct trace_event_data_offsets_tcp_hash_event {}; + +struct trace_event_data_offsets_tcp_ao_event {}; + +struct trace_event_data_offsets_tcp_ao_event_sk {}; + +struct trace_event_data_offsets_tcp_ao_event_sne {}; + +typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *, const enum sk_rst_reason); + +typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); + +typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); + +typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); + +typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_tcp_bad_csum)(void *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_cong_state_set)(void *, struct sock *, const u8); + +typedef void (*btf_trace_tcp_hash_bad_header)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_unexpected)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_md5_mismatch)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_hash_ao_required)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_ao_handshake_failure)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_wrong_maclen)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_mismatch)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_key_not_found)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_rnext_request)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_synack_no_key)(void *, const struct sock *, const __u8, const __u8); + +typedef void (*btf_trace_tcp_ao_snd_sne_update)(void *, const struct sock *, __u32); + +typedef void (*btf_trace_tcp_ao_rcv_sne_update)(void *, const struct sock *, __u32); + +struct trace_event_raw_fib_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + u8 proto; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[4]; + __u8 dst[4]; + __u8 gw4[4]; + __u8 gw6[16]; + u16 sport; + u16 dport; + char name[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib_table_lookup {}; + +typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); + +struct trace_event_raw_qdisc_dequeue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + int packets; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + long unsigned int txq_state; + char __data[0]; +}; + +struct trace_event_raw_qdisc_enqueue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + char __data[0]; +}; + +struct trace_event_raw_qdisc_reset { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_destroy { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_create { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + char __data[0]; +}; + +struct trace_event_data_offsets_qdisc_dequeue {}; + +struct trace_event_data_offsets_qdisc_enqueue {}; + +struct trace_event_data_offsets_qdisc_reset { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_destroy { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +struct trace_event_data_offsets_qdisc_create { + u32 dev; + const void *dev_ptr_; + u32 kind; + const void *kind_ptr_; +}; + +typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); + +typedef void (*btf_trace_qdisc_enqueue)(void *, struct Qdisc *, const struct netdev_queue *, struct sk_buff *); + +typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); + +struct bridge_stp_xstats { + __u64 transition_blk; + __u64 transition_fwd; + __u64 rx_bpdu; + __u64 tx_bpdu; + __u64 rx_tcn; + __u64 tx_tcn; +}; + +enum { + BR_MCAST_DIR_RX = 0, + BR_MCAST_DIR_TX = 1, + BR_MCAST_DIR_SIZE = 2, +}; + +struct br_mcast_stats { + __u64 igmp_v1queries[2]; + __u64 igmp_v2queries[2]; + __u64 igmp_v3queries[2]; + __u64 igmp_leaves[2]; + __u64 igmp_v1reports[2]; + __u64 igmp_v2reports[2]; + __u64 igmp_v3reports[2]; + __u64 igmp_parse_errors; + __u64 mld_v1queries[2]; + __u64 mld_v2queries[2]; + __u64 mld_leaves[2]; + __u64 mld_v1reports[2]; + __u64 mld_v2reports[2]; + __u64 mld_parse_errors; + __u64 mcast_bytes[2]; + __u64 mcast_packets[2]; +}; + +struct br_ip { + union { + __be32 ip4; + struct in6_addr ip6; + } src; + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } dst; + __be16 proto; + __u16 vid; +}; + +struct bridge_id { + unsigned char prio[2]; + unsigned char addr[6]; +}; + +typedef struct bridge_id bridge_id; + +struct mac_addr { + unsigned char addr[6]; +}; + +typedef struct mac_addr mac_addr; + +typedef __u16 port_id; + +struct bridge_mcast_own_query { + struct timer_list timer; + u32 startup_sent; +}; + +struct bridge_mcast_other_query { + struct timer_list timer; + struct timer_list delay_timer; +}; + +struct bridge_mcast_querier { + struct br_ip addr; + int port_ifidx; + seqcount_spinlock_t seq; +}; + +struct bridge_mcast_stats { + struct br_mcast_stats mstats; + struct u64_stats_sync syncp; + long: 32; +}; + +struct net_bridge; + +struct net_bridge_vlan; + +struct net_bridge_mcast { + struct net_bridge *br; + struct net_bridge_vlan *vlan; + u32 multicast_last_member_count; + u32 multicast_startup_query_count; + u8 multicast_querier; + u8 multicast_igmp_version; + u8 multicast_router; + u8 multicast_mld_version; + long unsigned int multicast_last_member_interval; + long unsigned int multicast_membership_interval; + long unsigned int multicast_querier_interval; + long unsigned int multicast_query_interval; + long unsigned int multicast_query_response_interval; + long unsigned int multicast_startup_query_interval; + struct hlist_head ip4_mc_router_list; + struct timer_list ip4_mc_router_timer; + struct bridge_mcast_other_query ip4_other_query; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_querier ip4_querier; + struct hlist_head ip6_mc_router_list; + struct timer_list ip6_mc_router_timer; + struct bridge_mcast_other_query ip6_other_query; + struct bridge_mcast_own_query ip6_own_query; + struct bridge_mcast_querier ip6_querier; +}; + +struct net_bridge { + spinlock_t lock; + spinlock_t hash_lock; + struct hlist_head frame_type_list; + struct net_device *dev; + long unsigned int options; + struct rhashtable fdb_hash_tbl; + struct list_head port_list; + union { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; + u16 group_fwd_mask; + u16 group_fwd_mask_required; + bridge_id designated_root; + bridge_id bridge_id; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; + long unsigned int max_age; + long unsigned int hello_time; + long unsigned int forward_delay; + long unsigned int ageing_time; + long unsigned int bridge_max_age; + long unsigned int bridge_hello_time; + long unsigned int bridge_forward_delay; + long unsigned int bridge_ageing_time; + u32 root_path_cost; + u8 group_addr[6]; + enum { + BR_NO_STP = 0, + BR_KERNEL_STP = 1, + BR_USER_STP = 2, + } stp_enabled; + struct net_bridge_mcast multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 hash_max; + spinlock_t multicast_lock; + struct rhashtable mdb_hash_tbl; + struct rhashtable sg_port_tbl; + struct hlist_head mcast_gc_list; + struct hlist_head mdb_list; + struct work_struct mcast_gc_work; + struct timer_list hello_timer; + struct timer_list tcn_timer; + struct timer_list topology_change_timer; + struct delayed_work gc_work; + struct kobject *ifobj; + u32 auto_cnt; + atomic_t fdb_n_learned; + u32 fdb_max_learned; + struct hlist_head fdb_list; +}; + +struct net_bridge_port; + +struct net_bridge_mcast_port { + struct net_bridge_port *port; + struct net_bridge_vlan *vlan; + struct bridge_mcast_own_query ip4_own_query; + struct timer_list ip4_mc_router_timer; + struct hlist_node ip4_rlist; + struct bridge_mcast_own_query ip6_own_query; + struct timer_list ip6_mc_router_timer; + struct hlist_node ip6_rlist; + unsigned char multicast_router; + u32 mdb_n_entries; + u32 mdb_max_entries; +}; + +struct net_bridge_port { + struct net_bridge *br; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct list_head list; + long unsigned int flags; + struct net_bridge_port *backup_port; + u32 backup_nhid; + u8 priority; + u8 state; + u16 port_no; + unsigned char topology_change_ack; + unsigned char config_pending; + port_id port_id; + port_id designated_port; + bridge_id designated_root; + bridge_id designated_bridge; + u32 path_cost; + u32 designated_cost; + long unsigned int designated_age; + struct timer_list forward_delay_timer; + struct timer_list hold_timer; + struct timer_list message_age_timer; + struct kobject kobj; + struct callback_head rcu; + struct net_bridge_mcast_port multicast_ctx; + struct bridge_mcast_stats *mcast_stats; + u32 multicast_eht_hosts_limit; + u32 multicast_eht_hosts_cnt; + struct hlist_head mglist; + char sysfs_name[16]; + u16 group_fwd_mask; + u16 backup_redirected_cnt; + long: 32; + struct bridge_stp_xstats stp_xstats; +}; + +struct metadata_dst; + +struct br_tunnel_info { + __be64 tunnel_id; + struct metadata_dst *tunnel_dst; + long: 32; +}; + +struct net_bridge_vlan { + struct rhash_head vnode; + struct rhash_head tnode; + u16 vid; + u16 flags; + u16 priv_flags; + u8 state; + struct pcpu_sw_netstats *stats; + union { + struct net_bridge *br; + struct net_bridge_port *port; + }; + union { + refcount_t refcnt; + struct net_bridge_vlan *brvlan; + }; + long: 32; + struct br_tunnel_info tinfo; + union { + struct net_bridge_mcast br_mcast_ctx; + struct net_bridge_mcast_port port_mcast_ctx; + }; + u16 msti; + struct list_head vlist; + struct callback_head rcu; +}; + +struct net_bridge_fdb_key { + mac_addr addr; + u16 vlan_id; +}; + +struct net_bridge_fdb_entry { + struct rhash_head rhnode; + struct net_bridge_port *dst; + struct net_bridge_fdb_key key; + struct hlist_node fdb_node; + long unsigned int flags; + long: 32; + long unsigned int updated; + long unsigned int used; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct trace_event_raw_br_fdb_add { + struct trace_entry ent; + u8 ndm_flags; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + u16 nlh_flags; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_external_learn_add { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_fdb_delete { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_update { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_br_mdb_full { + struct trace_entry ent; + u32 __data_loc_dev; + int af; + u16 vid; + __u8 src[16]; + __u8 grp[16]; + __u8 grpmac[6]; + char __data[0]; +}; + +struct trace_event_data_offsets_br_fdb_add { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_external_learn_add { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_fdb_delete { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_fdb_update { + u32 br_dev; + const void *br_dev_ptr_; + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_br_mdb_full { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16); + +typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16); + +typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *); + +typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, long unsigned int); + +typedef void (*btf_trace_br_mdb_full)(void *, const struct net_device *, const struct br_ip *); + +enum compact_priority { + COMPACT_PRIO_SYNC_FULL = 0, + MIN_COMPACT_PRIORITY = 0, + COMPACT_PRIO_SYNC_LIGHT = 1, + MIN_COMPACT_COSTLY_PRIORITY = 1, + DEF_COMPACT_PRIORITY = 1, + COMPACT_PRIO_ASYNC = 2, + INIT_COMPACT_PRIORITY = 2, +}; + +enum compact_result { + COMPACT_NOT_SUITABLE_ZONE = 0, + COMPACT_SKIPPED = 1, + COMPACT_DEFERRED = 2, + COMPACT_NO_SUITABLE_PAGE = 3, + COMPACT_CONTINUE = 4, + COMPACT_COMPLETE = 5, + COMPACT_PARTIAL_SKIPPED = 6, + COMPACT_CONTENDED = 7, + COMPACT_SUCCESS = 8, +}; + +struct trace_event_raw_page_pool_release { + struct trace_entry ent; + const struct page_pool *pool; + s32 inflight; + u32 hold; + u32 release; + u64 cnt; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_release { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 release; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_hold { + struct trace_entry ent; + const struct page_pool *pool; + long unsigned int netmem; + u32 hold; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_update_nid { + struct trace_entry ent; + const struct page_pool *pool; + int pool_nid; + int new_nid; + char __data[0]; +}; + +struct trace_event_data_offsets_page_pool_release {}; + +struct trace_event_data_offsets_page_pool_state_release {}; + +struct trace_event_data_offsets_page_pool_state_hold {}; + +struct trace_event_data_offsets_page_pool_update_nid {}; + +typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); + +typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, netmem_ref, u32); + +typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); + +struct trace_event_raw_neigh_create { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + int entries; + u8 created; + u8 gc_exempt; + u8 primary_key4[4]; + u8 primary_key6[16]; + char __data[0]; +}; + +struct trace_event_raw_neigh_update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u8 new_lladdr[32]; + u8 new_state; + u32 update_flags; + u32 pid; + char __data[0]; +}; + +struct trace_event_raw_neigh__update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u32 err; + char __data[0]; +}; + +struct trace_event_data_offsets_neigh_create { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh_update { + u32 dev; + const void *dev_ptr_; +}; + +struct trace_event_data_offsets_neigh__update { + u32 dev; + const void *dev_ptr_; +}; + +typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); + +typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); + +typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); + +enum ip_conntrack_status { + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = 1, + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = 2, + IPS_ASSURED_BIT = 2, + IPS_ASSURED = 4, + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = 8, + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = 16, + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = 32, + IPS_NAT_MASK = 48, + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = 64, + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = 128, + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = 256, + IPS_NAT_DONE_MASK = 384, + IPS_DYING_BIT = 9, + IPS_DYING = 512, + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = 1024, + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = 2048, + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = 4096, + IPS_NAT_CLASH_BIT = 12, + IPS_NAT_CLASH = 4096, + IPS_HELPER_BIT = 13, + IPS_HELPER = 8192, + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = 16384, + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = 32768, + IPS_UNCHANGEABLE_MASK = 56313, + __IPS_MAX_BIT = 16, +}; + +enum ip_conntrack_events { + IPCT_NEW = 0, + IPCT_RELATED = 1, + IPCT_DESTROY = 2, + IPCT_REPLY = 3, + IPCT_ASSURED = 4, + IPCT_PROTOINFO = 5, + IPCT_HELPER = 6, + IPCT_MARK = 7, + IPCT_SEQADJ = 8, + IPCT_NATSEQADJ = 8, + IPCT_SECMARK = 9, + IPCT_LABEL = 10, + IPCT_SYNPROXY = 11, + __IPCT_MAX = 12, +}; + +struct nf_conntrack_tuple_mask { + struct { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + } src; +}; + +struct nf_conntrack_net { + atomic_t count; + unsigned int expect_count; + unsigned int users4; + unsigned int users6; + unsigned int users_bridge; + struct ctl_table_header *sysctl_header; +}; + +struct nf_conntrack_l4proto { + u_int8_t l4proto; + bool allow_clash; + u16 nlattr_size; + bool (*can_early_drop)(const struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *, bool); + int (*from_nlattr)(struct nlattr **, struct nf_conn *); + int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *); + unsigned int (*nlattr_tuple_size)(); + int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *, u_int32_t); + const struct nla_policy *nla_policy; + struct { + int (*nlattr_to_obj)(struct nlattr **, struct net *, void *); + int (*obj_to_nlattr)(struct sk_buff *, const void *); + u16 obj_size; + u16 nlattr_max; + const struct nla_policy *nla_policy; + } ctnl_timeout; + void (*print_conntrack)(struct seq_file *, struct nf_conn *); +}; + +struct nf_ct_ext { + u8 offset[4]; + u8 len; + unsigned int gen_id; + long: 32; + char data[0]; +}; + +struct nf_ct_iter_data { + struct net *net; + void *data; + u32 portid; + int report; +}; + +struct nf_conntrack_helper; + +struct nf_conntrack_expect { + struct hlist_node lnode; + struct hlist_node hnode; + struct nf_conntrack_tuple tuple; + struct nf_conntrack_tuple_mask mask; + refcount_t use; + unsigned int flags; + unsigned int class; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); + struct nf_conntrack_helper *helper; + struct nf_conn *master; + struct timer_list timeout; + union nf_inet_addr saved_addr; + union nf_conntrack_man_proto saved_proto; + enum ip_conntrack_dir dir; + struct callback_head rcu; +}; + +struct nf_conntrack_expect_policy; + +struct nf_conntrack_helper { + struct hlist_node hnode; + char name[16]; + refcount_t refcnt; + struct module *me; + const struct nf_conntrack_expect_policy *expect_policy; + struct nf_conntrack_tuple tuple; + int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info); + void (*destroy)(struct nf_conn *); + int (*from_nlattr)(struct nlattr *, struct nf_conn *); + int (*to_nlattr)(struct sk_buff *, const struct nf_conn *); + unsigned int expect_class_max; + unsigned int flags; + unsigned int queue_num; + u16 data_len; + char nat_mod_name[16]; +}; + +struct nf_conntrack_expect_policy { + unsigned int max_expected; + unsigned int timeout; + char name[16]; +}; + +enum nf_ct_ext_id { + NF_CT_EXT_HELPER = 0, + NF_CT_EXT_NAT = 1, + NF_CT_EXT_SEQADJ = 2, + NF_CT_EXT_ACCT = 3, + NF_CT_EXT_NUM = 4, +}; + +enum nf_ct_helper_flags { + NF_CT_HELPER_F_USERSPACE = 1, + NF_CT_HELPER_F_CONFIGURED = 2, +}; + +struct nf_conn_help { + struct nf_conntrack_helper *helper; + struct hlist_head expectations; + u8 expecting[4]; + long: 32; + char data[32]; +}; + +enum nf_ct_ecache_state { + NFCT_ECACHE_DESTROY_FAIL = 0, + NFCT_ECACHE_DESTROY_SENT = 1, +}; + +struct nf_conn_counter { + atomic64_t packets; + atomic64_t bytes; +}; + +struct nf_conn_acct { + struct nf_conn_counter counter[2]; +}; + +struct nf_conn_tstamp { + u_int64_t start; + u_int64_t stop; +}; + +struct nf_ct_timeout { + __u16 l3num; + const struct nf_conntrack_l4proto *l4proto; + char data[0]; +}; + +struct nf_conn_timeout { + struct nf_ct_timeout *timeout; +}; + +struct nf_conn_labels { + long unsigned int bits[4]; +}; + +struct conntrack_gc_work { + struct delayed_work dwork; + u32 next_bucket; + u32 avg_timeout; + u32 count; + u32 start_time; + bool exiting; + bool early_drop; +}; + +enum ctattr_l4proto { + CTA_PROTO_UNSPEC = 0, + CTA_PROTO_NUM = 1, + CTA_PROTO_SRC_PORT = 2, + CTA_PROTO_DST_PORT = 3, + CTA_PROTO_ICMP_ID = 4, + CTA_PROTO_ICMP_TYPE = 5, + CTA_PROTO_ICMP_CODE = 6, + CTA_PROTO_ICMPV6_ID = 7, + CTA_PROTO_ICMPV6_TYPE = 8, + CTA_PROTO_ICMPV6_CODE = 9, + __CTA_PROTO_MAX = 10, +}; + +enum nft_last_attributes { + NFTA_LAST_UNSPEC = 0, + NFTA_LAST_SET = 1, + NFTA_LAST_MSECS = 2, + NFTA_LAST_PAD = 3, + __NFTA_LAST_MAX = 4, +}; + +struct nft_last { + long unsigned int jiffies; + unsigned int set; +}; + +struct nft_last_priv { + struct nft_last *last; +}; + +enum { + INET_FLAGS_PKTINFO = 0, + INET_FLAGS_TTL = 1, + INET_FLAGS_TOS = 2, + INET_FLAGS_RECVOPTS = 3, + INET_FLAGS_RETOPTS = 4, + INET_FLAGS_PASSSEC = 5, + INET_FLAGS_ORIGDSTADDR = 6, + INET_FLAGS_CHECKSUM = 7, + INET_FLAGS_RECVFRAGSIZE = 8, + INET_FLAGS_RECVERR = 9, + INET_FLAGS_RECVERR_RFC4884 = 10, + INET_FLAGS_FREEBIND = 11, + INET_FLAGS_HDRINCL = 12, + INET_FLAGS_MC_LOOP = 13, + INET_FLAGS_MC_ALL = 14, + INET_FLAGS_TRANSPARENT = 15, + INET_FLAGS_IS_ICSK = 16, + INET_FLAGS_NODEFRAG = 17, + INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, + INET_FLAGS_DEFER_CONNECT = 19, + INET_FLAGS_MC6_LOOP = 20, + INET_FLAGS_RECVERR6_RFC4884 = 21, + INET_FLAGS_MC6_ALL = 22, + INET_FLAGS_AUTOFLOWLABEL_SET = 23, + INET_FLAGS_AUTOFLOWLABEL = 24, + INET_FLAGS_DONTFRAG = 25, + INET_FLAGS_RECVERR6 = 26, + INET_FLAGS_REPFLOW = 27, + INET_FLAGS_RTALERT_ISOLATE = 28, + INET_FLAGS_SNDFLOW = 29, + INET_FLAGS_RTALERT = 30, +}; + +struct inet_timewait_sock { + struct sock_common __tw_common; + __u32 tw_mark; + unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __be16 tw_sport; + unsigned int tw_transparent: 1; + unsigned int tw_flowlabel: 20; + unsigned int tw_usec_ts: 1; + unsigned int tw_pad: 2; + unsigned int tw_tos: 8; + u32 tw_txhash; + u32 tw_priority; + struct timer_list tw_timer; + struct inet_bind_bucket *tw_tb; + struct inet_bind2_bucket *tw_tb2; +}; + +struct udp_hslot; + +struct udp_hslot_main; + +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot_main *hash2; + struct udp_hslot *hash4; + unsigned int mask; + unsigned int log; +}; + +struct icmpv6_echo { + __be16 identifier; + __be16 sequence; +}; + +struct icmpv6_nd_advt { + __u32 router: 1; + __u32 solicited: 1; + __u32 override: 1; + __u32 reserved: 29; +}; + +struct icmpv6_nd_ra { + __u8 hop_limit; + __u8 managed: 1; + __u8 other: 1; + __u8 home_agent: 1; + __u8 router_pref: 2; + __u8 reserved: 3; + __be16 rt_lifetime; +}; + +struct icmp6hdr { + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + struct icmpv6_echo u_echo; + struct icmpv6_nd_advt u_nd_advt; + struct icmpv6_nd_ra u_nd_ra; + } icmp6_dataun; +}; + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = 1, + SOF_TIMESTAMPING_TX_SOFTWARE = 2, + SOF_TIMESTAMPING_RX_HARDWARE = 4, + SOF_TIMESTAMPING_RX_SOFTWARE = 8, + SOF_TIMESTAMPING_SOFTWARE = 16, + SOF_TIMESTAMPING_SYS_HARDWARE = 32, + SOF_TIMESTAMPING_RAW_HARDWARE = 64, + SOF_TIMESTAMPING_OPT_ID = 128, + SOF_TIMESTAMPING_TX_SCHED = 256, + SOF_TIMESTAMPING_TX_ACK = 512, + SOF_TIMESTAMPING_OPT_CMSG = 1024, + SOF_TIMESTAMPING_OPT_TSONLY = 2048, + SOF_TIMESTAMPING_OPT_STATS = 4096, + SOF_TIMESTAMPING_OPT_PKTINFO = 8192, + SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, + SOF_TIMESTAMPING_BIND_PHC = 32768, + SOF_TIMESTAMPING_OPT_ID_TCP = 65536, + SOF_TIMESTAMPING_OPT_RX_FILTER = 131072, + SOF_TIMESTAMPING_LAST = 131072, + SOF_TIMESTAMPING_MASK = 262143, +}; + +enum sock_flags { + SOCK_DEAD = 0, + SOCK_DONE = 1, + SOCK_URGINLINE = 2, + SOCK_KEEPOPEN = 3, + SOCK_LINGER = 4, + SOCK_DESTROY = 5, + SOCK_BROADCAST = 6, + SOCK_TIMESTAMP = 7, + SOCK_ZAPPED = 8, + SOCK_USE_WRITE_QUEUE = 9, + SOCK_DBG = 10, + SOCK_RCVTSTAMP = 11, + SOCK_RCVTSTAMPNS = 12, + SOCK_LOCALROUTE = 13, + SOCK_MEMALLOC = 14, + SOCK_TIMESTAMPING_RX_SOFTWARE = 15, + SOCK_FASYNC = 16, + SOCK_RXQ_OVFL = 17, + SOCK_ZEROCOPY = 18, + SOCK_WIFI_STATUS = 19, + SOCK_NOFCS = 20, + SOCK_FILTER_LOCKED = 21, + SOCK_SELECT_ERR_QUEUE = 22, + SOCK_RCU_FREE = 23, + SOCK_TXTIME = 24, + SOCK_XDP = 25, + SOCK_TSTAMP_NEW = 26, + SOCK_RCVMARK = 27, +}; + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u32 tsflags; + u32 ts_opt_id; + long: 32; +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; + +struct ipcm_cookie { + struct sockcm_cookie sockc; + __be32 addr; + int oif; + struct ip_options_rcu *opt; + __u8 protocol; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + long: 32; +}; + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + __u8 reserved[4]; + } un; +}; + +struct icmp_err { + int errno; + unsigned int fatal: 1; +}; + +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); + void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + int (*icmpv6_err_convert)(u8, u8, int *); + void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +struct pingfakehdr { + struct icmphdr icmph; + struct msghdr *msg; + sa_family_t family; + __wsum wcheck; +}; + +struct udp_hslot { + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; + int count; + spinlock_t lock; + long: 32; +}; + +struct udp_hslot_main { + struct udp_hslot hslot; + u32 hash4_cnt; + long: 32; +}; + +struct ping_table { + struct hlist_head hash[64]; + spinlock_t lock; +}; + +struct ac6_iter_state { + struct seq_net_private p; + struct net_device *dev; +}; + +struct inet_skb_parm { + int iif; + struct ip_options opt; + u16 flags; + u16 frag_max_size; +}; + +struct tcp_skb_cb { + __u32 seq; + __u32 end_seq; + union { + struct { + u16 tcp_gso_segs; + u16 tcp_gso_size; + }; + }; + __u8 tcp_flags; + __u8 sacked; + __u8 ip_dsfield; + __u8 txstamp_ack: 1; + __u8 eor: 1; + __u8 has_rxtstamp: 1; + __u8 unused: 5; + __u32 ack_seq; + long: 32; + union { + struct { + __u32 is_app_limited: 1; + __u32 delivered_ce: 20; + __u32 unused: 11; + __u32 delivered; + u64 first_tx_mstamp; + u64 delivered_mstamp; + } tx; + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + }; +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE = 0, + NLA_VALIDATE_RANGE = 1, + NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, + NLA_VALIDATE_MIN = 3, + NLA_VALIDATE_MAX = 4, + NLA_VALIDATE_MASK = 5, + NLA_VALIDATE_RANGE_PTR = 6, + NLA_VALIDATE_FUNCTION = 7, +}; + +enum { + MDBA_UNSPEC = 0, + MDBA_MDB = 1, + MDBA_ROUTER = 2, + __MDBA_MAX = 3, +}; + +enum { + MDBA_MDB_UNSPEC = 0, + MDBA_MDB_ENTRY = 1, + __MDBA_MDB_MAX = 2, +}; + +enum { + MDBA_MDB_ENTRY_UNSPEC = 0, + MDBA_MDB_ENTRY_INFO = 1, + __MDBA_MDB_ENTRY_MAX = 2, +}; + +enum { + MDBA_MDB_EATTR_UNSPEC = 0, + MDBA_MDB_EATTR_TIMER = 1, + MDBA_MDB_EATTR_SRC_LIST = 2, + MDBA_MDB_EATTR_GROUP_MODE = 3, + MDBA_MDB_EATTR_SOURCE = 4, + MDBA_MDB_EATTR_RTPROT = 5, + MDBA_MDB_EATTR_DST = 6, + MDBA_MDB_EATTR_DST_PORT = 7, + MDBA_MDB_EATTR_VNI = 8, + MDBA_MDB_EATTR_IFINDEX = 9, + MDBA_MDB_EATTR_SRC_VNI = 10, + __MDBA_MDB_EATTR_MAX = 11, +}; + +enum { + MDBA_MDB_SRCLIST_UNSPEC = 0, + MDBA_MDB_SRCLIST_ENTRY = 1, + __MDBA_MDB_SRCLIST_MAX = 2, +}; + +enum { + MDBA_MDB_SRCATTR_UNSPEC = 0, + MDBA_MDB_SRCATTR_ADDRESS = 1, + MDBA_MDB_SRCATTR_TIMER = 2, + __MDBA_MDB_SRCATTR_MAX = 3, +}; + +enum { + MDBA_ROUTER_UNSPEC = 0, + MDBA_ROUTER_PORT = 1, + __MDBA_ROUTER_MAX = 2, +}; + +enum { + MDBA_ROUTER_PATTR_UNSPEC = 0, + MDBA_ROUTER_PATTR_TIMER = 1, + MDBA_ROUTER_PATTR_TYPE = 2, + MDBA_ROUTER_PATTR_INET_TIMER = 3, + MDBA_ROUTER_PATTR_INET6_TIMER = 4, + MDBA_ROUTER_PATTR_VID = 5, + __MDBA_ROUTER_PATTR_MAX = 6, +}; + +struct br_port_msg { + __u8 family; + __u32 ifindex; +}; + +struct br_mdb_entry { + __u32 ifindex; + __u8 state; + __u8 flags; + __u16 vid; + struct { + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } u; + __be16 proto; + } addr; +}; + +enum { + MDBA_SET_ENTRY_UNSPEC = 0, + MDBA_SET_ENTRY = 1, + MDBA_SET_ENTRY_ATTRS = 2, + __MDBA_SET_ENTRY_MAX = 3, +}; + +enum { + MDBA_GET_ENTRY_UNSPEC = 0, + MDBA_GET_ENTRY = 1, + MDBA_GET_ENTRY_ATTRS = 2, + __MDBA_GET_ENTRY_MAX = 3, +}; + +enum { + MDBE_ATTR_UNSPEC = 0, + MDBE_ATTR_SOURCE = 1, + MDBE_ATTR_SRC_LIST = 2, + MDBE_ATTR_GROUP_MODE = 3, + MDBE_ATTR_RTPROT = 4, + MDBE_ATTR_DST = 5, + MDBE_ATTR_DST_PORT = 6, + MDBE_ATTR_VNI = 7, + MDBE_ATTR_IFINDEX = 8, + MDBE_ATTR_SRC_VNI = 9, + MDBE_ATTR_STATE_MASK = 10, + __MDBE_ATTR_MAX = 11, +}; + +enum { + MDBE_SRC_LIST_UNSPEC = 0, + MDBE_SRC_LIST_ENTRY = 1, + __MDBE_SRC_LIST_MAX = 2, +}; + +enum { + MDBE_SRCATTR_UNSPEC = 0, + MDBE_SRCATTR_ADDRESS = 1, + __MDBE_SRCATTR_MAX = 2, +}; + +struct br_mdb_src_entry { + struct br_ip addr; +}; + +struct br_mdb_config { + struct net_bridge *br; + struct net_bridge_port *p; + struct br_mdb_entry *entry; + struct br_ip group; + bool src_entry; + u8 filter_mode; + u16 nlflags; + struct br_mdb_src_entry *src_entries; + int num_src_entries; + u8 rt_protocol; +}; + +enum { + BR_VLFLAG_PER_PORT_STATS = 1, + BR_VLFLAG_ADDED_BY_SWITCHDEV = 2, + BR_VLFLAG_MCAST_ENABLED = 4, + BR_VLFLAG_GLOBAL_MCAST_ENABLED = 8, + BR_VLFLAG_NEIGH_SUPPRESS_ENABLED = 16, +}; + +struct net_bridge_vlan_group { + struct rhashtable vlan_hash; + struct rhashtable tunnel_hash; + struct list_head vlan_list; + u16 num_vlans; + u16 pvid; + u8 pvid_state; +}; + +struct net_bridge_mcast_gc { + struct hlist_node gc_node; + void (*destroy)(struct net_bridge_mcast_gc *); +}; + +struct net_bridge_port_group; + +struct net_bridge_group_src { + struct hlist_node node; + struct br_ip addr; + struct net_bridge_port_group *pg; + u8 flags; + u8 src_query_rexmit_cnt; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct net_bridge_port_group_sg_key { + struct net_bridge_port *port; + struct br_ip addr; +}; + +struct net_bridge_port_group { + struct net_bridge_port_group *next; + struct net_bridge_port_group_sg_key key; + unsigned char eth_addr[6]; + unsigned char flags; + unsigned char filter_mode; + unsigned char grp_query_rexmit_cnt; + unsigned char rt_protocol; + struct hlist_head src_list; + unsigned int src_ents; + struct timer_list timer; + struct timer_list rexmit_timer; + struct hlist_node mglist; + struct rb_root eht_set_tree; + struct rb_root eht_host_tree; + struct rhash_head rhnode; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +struct net_bridge_mdb_entry { + struct rhash_head rhnode; + struct net_bridge *br; + struct net_bridge_port_group *ports; + struct br_ip addr; + bool host_joined; + struct timer_list timer; + struct hlist_node mdb_node; + struct net_bridge_mcast_gc mcast_gc; + struct callback_head rcu; +}; + +enum net_bridge_opts { + BROPT_VLAN_ENABLED = 0, + BROPT_VLAN_STATS_ENABLED = 1, + BROPT_NF_CALL_IPTABLES = 2, + BROPT_NF_CALL_IP6TABLES = 3, + BROPT_NF_CALL_ARPTABLES = 4, + BROPT_GROUP_ADDR_SET = 5, + BROPT_MULTICAST_ENABLED = 6, + BROPT_MULTICAST_QUERY_USE_IFADDR = 7, + BROPT_MULTICAST_STATS_ENABLED = 8, + BROPT_HAS_IPV6_ADDR = 9, + BROPT_NEIGH_SUPPRESS_ENABLED = 10, + BROPT_MTU_SET_BY_USER = 11, + BROPT_VLAN_STATS_PER_PORT = 12, + BROPT_NO_LL_LEARN = 13, + BROPT_VLAN_BRIDGE_BINDING = 14, + BROPT_MCAST_VLAN_SNOOPING_ENABLED = 15, + BROPT_MST_ENABLED = 16, +}; + +struct br_mdb_flush_desc { + u32 port_ifindex; + u16 vid; + u8 rt_protocol; + u8 state; + u8 state_mask; +}; + +struct cpu_spec; + +typedef void (*cpu_setup_t)(long unsigned int, struct cpu_spec *); + +enum powerpc_pmc_type { + PPC_PMC_DEFAULT = 0, + PPC_PMC_IBM = 1, + PPC_PMC_PA6T = 2, + PPC_PMC_G4 = 3, +}; + +typedef void (*cpu_restore_t)(); + +struct cpu_spec { + unsigned int pvr_mask; + unsigned int pvr_value; + char *cpu_name; + long unsigned int cpu_features; + unsigned int cpu_user_features; + unsigned int cpu_user_features2; + unsigned int mmu_features; + unsigned int icache_bsize; + unsigned int dcache_bsize; + void (*cpu_down_flush)(); + unsigned int num_pmcs; + enum powerpc_pmc_type pmc_type; + cpu_setup_t cpu_setup; + cpu_restore_t cpu_restore; + char *platform; + int (*machine_check)(struct pt_regs *); + long int (*machine_check_early)(struct pt_regs *); +}; + +typedef long unsigned int kimage_entry_t; + +struct kexec_segment { + union { + void *buf; + void *kbuf; + }; + size_t bufsz; + long unsigned int mem; + size_t memsz; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + long unsigned int start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + long unsigned int nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + long unsigned int control_page; + unsigned int type: 1; + unsigned int preserve_context: 1; + unsigned int file_mode: 1; + void *elf_headers; + long unsigned int elf_headers_sz; + long unsigned int elf_load_addr; +}; + +typedef void (*relocate_new_kernel_t)(long unsigned int, long unsigned int, long unsigned int); + +struct wq_flusher; + +struct worker; + +struct workqueue_attrs; + +struct pool_workqueue; + +struct wq_device; + +struct wq_node_nr_active; + +struct workqueue_struct { + struct list_head pwqs; + struct list_head list; + struct mutex mutex; + int work_color; + int flush_color; + atomic_t nr_pwqs_to_flush; + struct wq_flusher *first_flusher; + struct list_head flusher_queue; + struct list_head flusher_overflow; + struct list_head maydays; + struct worker *rescuer; + int nr_drainers; + int max_active; + int min_active; + int saved_max_active; + int saved_min_active; + struct workqueue_attrs *unbound_attrs; + struct pool_workqueue *dfl_pwq; + struct wq_device *wq_dev; + char name[32]; + struct callback_head rcu; + long: 32; + long: 32; + unsigned int flags; + struct pool_workqueue **cpu_pwq; + struct wq_node_nr_active *node_nr_active[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum wq_affn_scope { + WQ_AFFN_DFL = 0, + WQ_AFFN_CPU = 1, + WQ_AFFN_SMT = 2, + WQ_AFFN_CACHE = 3, + WQ_AFFN_NUMA = 4, + WQ_AFFN_SYSTEM = 5, + WQ_AFFN_NR_TYPES = 6, +}; + +struct workqueue_attrs { + int nice; + cpumask_var_t cpumask; + cpumask_var_t __pod_cpumask; + bool affn_strict; + enum wq_affn_scope affn_scope; + bool ordered; +}; + +enum wq_consts { + WQ_MAX_ACTIVE = 2048, + WQ_UNBOUND_MAX_ACTIVE = 2048, + WQ_DFL_ACTIVE = 1024, + WQ_DFL_MIN_ACTIVE = 8, +}; + +struct worker_pool; + +struct worker { + union { + struct list_head entry; + struct hlist_node hentry; + }; + struct work_struct *current_work; + work_func_t current_func; + struct pool_workqueue *current_pwq; + long: 32; + u64 current_at; + unsigned int current_color; + int sleeping; + work_func_t last_func; + struct list_head scheduled; + struct task_struct *task; + struct worker_pool *pool; + struct list_head node; + long unsigned int last_active; + unsigned int flags; + int id; + char desc[32]; + struct workqueue_struct *rescue_wq; + long: 32; +}; + +struct pool_workqueue { + struct worker_pool *pool; + struct workqueue_struct *wq; + int work_color; + int flush_color; + int refcnt; + int nr_in_flight[16]; + bool plugged; + int nr_active; + struct list_head inactive_works; + struct list_head pending_node; + struct list_head pwqs_node; + struct list_head mayday_node; + long: 32; + u64 stats[8]; + struct kthread_work release_work; + struct callback_head rcu; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct worker_pool { + raw_spinlock_t lock; + int cpu; + int node; + int id; + unsigned int flags; + long unsigned int watchdog_ts; + bool cpu_stall; + int nr_running; + struct list_head worklist; + int nr_workers; + int nr_idle; + struct list_head idle_list; + struct timer_list idle_timer; + struct work_struct idle_cull_work; + struct timer_list mayday_timer; + struct hlist_head busy_hash[64]; + struct worker *manager; + struct list_head workers; + struct ida worker_ida; + struct workqueue_attrs *attrs; + struct hlist_node hash_node; + int refcnt; + struct callback_head rcu; +}; + +enum worker_pool_flags { + POOL_BH = 1, + POOL_MANAGER_ACTIVE = 2, + POOL_DISASSOCIATED = 4, + POOL_BH_DRAINING = 8, +}; + +enum worker_flags { + WORKER_DIE = 2, + WORKER_IDLE = 4, + WORKER_PREP = 8, + WORKER_CPU_INTENSIVE = 64, + WORKER_UNBOUND = 128, + WORKER_REBOUND = 256, + WORKER_NOT_RUNNING = 456, +}; + +enum work_cancel_flags { + WORK_CANCEL_DELAYED = 1, + WORK_CANCEL_DISABLE = 2, +}; + +enum wq_internal_consts { + NR_STD_WORKER_POOLS = 2, + UNBOUND_POOL_HASH_ORDER = 6, + BUSY_WORKER_HASH_ORDER = 6, + MAX_IDLE_WORKERS_RATIO = 4, + IDLE_WORKER_TIMEOUT = 300000, + MAYDAY_INITIAL_TIMEOUT = 10, + MAYDAY_INTERVAL = 100, + CREATE_COOLDOWN = 1000, + RESCUER_NICE_LEVEL = -20, + HIGHPRI_NICE_LEVEL = -20, + WQ_NAME_LEN = 32, + WORKER_ID_LEN = 42, +}; + +enum pool_workqueue_stats { + PWQ_STAT_STARTED = 0, + PWQ_STAT_COMPLETED = 1, + PWQ_STAT_CPU_TIME = 2, + PWQ_STAT_CPU_INTENSIVE = 3, + PWQ_STAT_CM_WAKEUP = 4, + PWQ_STAT_REPATRIATED = 5, + PWQ_STAT_MAYDAY = 6, + PWQ_STAT_RESCUED = 7, + PWQ_NR_STATS = 8, +}; + +struct wq_flusher { + struct list_head list; + int flush_color; + struct completion done; +}; + +struct wq_node_nr_active { + int max; + atomic_t nr; + raw_spinlock_t lock; + struct list_head pending_pwqs; +}; + +struct wq_device { + struct workqueue_struct *wq; + long: 32; + struct device dev; +}; + +struct wq_pod_type { + int nr_pods; + cpumask_var_t *pod_cpus; + int *pod_node; + int *cpu_pod; +}; + +struct work_offq_data { + u32 pool_id; + u32 disable; + u32 flags; +}; + +struct trace_event_raw_workqueue_queue_work { + struct trace_entry ent; + void *work; + void *function; + u32 __data_loc_workqueue; + int req_cpu; + int cpu; + char __data[0]; +}; + +struct trace_event_raw_workqueue_activate_work { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_data_offsets_workqueue_queue_work { + u32 workqueue; + const void *workqueue_ptr_; +}; + +struct trace_event_data_offsets_workqueue_activate_work {}; + +struct trace_event_data_offsets_workqueue_execute_start {}; + +struct trace_event_data_offsets_workqueue_execute_end {}; + +typedef void (*btf_trace_workqueue_queue_work)(void *, int, struct pool_workqueue *, struct work_struct *); + +typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); + +struct wq_drain_dead_softirq_work { + struct work_struct work; + struct worker_pool *pool; + struct completion done; +}; + +struct wq_barrier { + struct work_struct work; + struct completion done; + struct task_struct *task; +}; + +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; + struct workqueue_attrs *attrs; + struct list_head list; + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[0]; +}; + +struct pr_cont_work_struct { + bool comma; + work_func_t func; + long int ctr; +}; + +struct work_for_cpu { + struct work_struct work; + long int (*fn)(void *); + void *arg; + long int ret; +}; + +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, + IORES_DESC_CXL = 9, +}; + +struct cpu_stop_done { + atomic_t nr_todo; + int ret; + struct completion completion; +}; + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +struct cpu_stopper { + struct task_struct *thread; + raw_spinlock_t lock; + bool enabled; + struct list_head works; + struct cpu_stop_work stop_work; + long unsigned int caller; + cpu_stop_fn_t fn; +}; + +enum multi_stop_state { + MULTI_STOP_NONE = 0, + MULTI_STOP_PREPARE = 1, + MULTI_STOP_DISABLE_IRQ = 2, + MULTI_STOP_RUN = 3, + MULTI_STOP_EXIT = 4, +}; + +struct multi_stop_data { + cpu_stop_fn_t fn; + void *data; + unsigned int num_threads; + const struct cpumask *active_cpus; + enum multi_stop_state state; + atomic_t thread_ack; +}; + +enum { + TRACE_NOP_OPT_ACCEPT = 1, + TRACE_NOP_OPT_REFUSE = 2, +}; + +struct trace_event_raw_benchmark_event { + struct trace_entry ent; + char str[128]; + u64 delta; + char __data[0]; +}; + +struct trace_event_data_offsets_benchmark_event {}; + +typedef void (*btf_trace_benchmark_event)(void *, const char *, u64); + +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 jited_prog_insns; + __u64 xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 map_ids; + char name[16]; + __u32 ifindex; + __u32 gpl_compatible: 1; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 jited_ksyms; + __u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 line_info; + __u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; + __u64 recursion_misses; + __u32 verified_insns; + __u32 attach_btf_obj_id; + __u32 attach_btf_id; + long: 32; +}; + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_id; + __u64 map_extra; +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *, int, int); + int (*finalize)(struct bpf_verifier_env *); + int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); + int (*remove_insns)(struct bpf_verifier_env *, u32, u32); + int (*prepare)(struct bpf_prog *); + int (*translate)(struct bpf_prog *); + void (*destroy)(struct bpf_prog *); +}; + +struct bpf_offload_dev { + const struct bpf_prog_offload_ops *ops; + struct list_head netdevs; + void *priv; +}; + +typedef struct ns_common *ns_get_path_helper_t(void *); + +enum xdp_rx_metadata { + XDP_METADATA_KFUNC_RX_TIMESTAMP = 0, + XDP_METADATA_KFUNC_RX_HASH = 1, + XDP_METADATA_KFUNC_RX_VLAN_TAG = 2, + MAX_XDP_METADATA_KFUNC = 3, +}; + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + struct list_head progs; + struct list_head maps; + struct list_head offdev_netdevs; +}; + +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +struct ns_get_path_bpf_map_args { + struct bpf_offloaded_map *offmap; + struct bpf_map_info *info; +}; + +typedef struct pglist_data pg_data_t; + +typedef struct kmem_cache *kmem_buckets[14]; + +struct reciprocal_value { + u32 m; + u8 sh1; + u8 sh2; +}; + +struct kmem_cache_order_objects { + unsigned int x; +}; + +struct kmem_cache_cpu; + +struct kmem_cache_node; + +struct kmem_cache { + struct kmem_cache_cpu *cpu_slab; + slab_flags_t flags; + long unsigned int min_partial; + unsigned int size; + unsigned int object_size; + struct reciprocal_value reciprocal_size; + unsigned int offset; + unsigned int cpu_partial; + unsigned int cpu_partial_slabs; + struct kmem_cache_order_objects oo; + struct kmem_cache_order_objects min; + gfp_t allocflags; + int refcount; + void (*ctor)(void *); + unsigned int inuse; + unsigned int align; + unsigned int red_left_pad; + const char *name; + struct list_head list; + struct kobject kobj; + struct kmem_cache_node *node[1]; +}; + +struct slab { + long unsigned int __page_flags; + struct kmem_cache *slab_cache; + union { + struct { + union { + struct list_head slab_list; + struct { + struct slab *next; + int slabs; + }; + }; + union { + struct { + void *freelist; + union { + long unsigned int counters; + struct { + unsigned int inuse: 16; + unsigned int objects: 15; + unsigned int frozen: 1; + }; + }; + }; + }; + }; + struct callback_head callback_head; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int obj_exts; +}; + +enum slab_state { + DOWN = 0, + PARTIAL = 1, + UP = 2, + FULL = 3, +}; + +struct kmalloc_info_struct { + const char *name[4]; + unsigned int size; +}; + +struct slabinfo { + long unsigned int active_objs; + long unsigned int num_objs; + long unsigned int active_slabs; + long unsigned int num_slabs; + long unsigned int shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +struct kmem_obj_info { + void *kp_ptr; + struct slab *kp_slab; + void *kp_objp; + long unsigned int kp_data_offset; + struct kmem_cache *kp_slab_cache; + void *kp_ret; + void *kp_stack[16]; + void *kp_free_stack[16]; +}; + +struct trace_event_raw_kmem_cache_alloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + bool accounted; + char __data[0]; +}; + +struct trace_event_raw_kmalloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + long unsigned int gfp_flags; + int node; + char __data[0]; +}; + +struct trace_event_raw_kfree { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + char __data[0]; +}; + +struct trace_event_raw_kmem_cache_free { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free_batched { + struct trace_entry ent; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + long unsigned int gfp_flags; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + int percpu_refill; + char __data[0]; +}; + +struct trace_event_raw_mm_page_pcpu_drain { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc_extfrag { + struct trace_entry ent; + long unsigned int pfn; + int alloc_order; + int fallback_order; + int alloc_migratetype; + int fallback_migratetype; + int change_ownership; + char __data[0]; +}; + +struct trace_event_raw_mm_alloc_contig_migrate_range_info { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + long unsigned int nr_migrated; + long unsigned int nr_reclaimed; + long unsigned int nr_mapped; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_rss_stat { + struct trace_entry ent; + unsigned int mm_id; + unsigned int curr; + int member; + long int size; + char __data[0]; +}; + +struct trace_event_data_offsets_kmem_cache_alloc {}; + +struct trace_event_data_offsets_kmalloc {}; + +struct trace_event_data_offsets_kfree {}; + +struct trace_event_data_offsets_kmem_cache_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_mm_page_free {}; + +struct trace_event_data_offsets_mm_page_free_batched {}; + +struct trace_event_data_offsets_mm_page_alloc {}; + +struct trace_event_data_offsets_mm_page {}; + +struct trace_event_data_offsets_mm_page_pcpu_drain {}; + +struct trace_event_data_offsets_mm_page_alloc_extfrag {}; + +struct trace_event_data_offsets_mm_alloc_contig_migrate_range_info {}; + +struct trace_event_data_offsets_rss_stat {}; + +typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, struct kmem_cache *, gfp_t, int); + +typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *); + +typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *, const struct kmem_cache *); + +typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); + +typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); + +typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); + +typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int, int); + +typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); + +typedef void (*btf_trace_mm_alloc_contig_migrate_range_info)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int); + +enum vmpressure_levels { + VMPRESSURE_LOW = 0, + VMPRESSURE_MEDIUM = 1, + VMPRESSURE_CRITICAL = 2, + VMPRESSURE_NUM_LEVELS = 3, +}; + +enum vmpressure_modes { + VMPRESSURE_NO_PASSTHROUGH = 0, + VMPRESSURE_HIERARCHY = 1, + VMPRESSURE_LOCAL = 2, + VMPRESSURE_NUM_MODES = 3, +}; + +struct eventfd_ctx; + +struct vmpressure_event { + struct eventfd_ctx *efd; + enum vmpressure_levels level; + enum vmpressure_modes mode; + struct list_head node; +}; + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fsuuid2 { + __u8 len; + __u8 uuid[16]; +}; + +struct fs_sysfs_path { + __u8 len; + __u8 name[128]; +}; + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + __s32 status; + __u32 reserved; +}; + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; + +struct fileattr { + u32 flags; + u32 fsx_xflags; + u32 fsx_extsize; + u32 fsx_nextents; + u32 fsx_projid; + u32 fsx_cowextsize; + bool flags_valid: 1; + bool fsx_valid: 1; +}; + +struct space_resv { + __s16 l_type; + __s16 l_whence; + long: 32; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +}; + +struct fiemap { + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; +}; + +typedef __kernel_ulong_t ino_t; + +typedef long int intptr_t; + +struct genradix_root; + +struct __genradix { + struct genradix_root *root; +}; + +struct genradix_node { + union { + struct genradix_node *children[128]; + u8 data[512]; + }; +}; + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +struct constant_table { + const char *name; + int value; +}; + +struct proc_timens_offset { + int clockid; + long: 32; + struct timespec64 val; +}; + +enum resctrl_conf_type { + CDP_NONE = 0, + CDP_CODE = 1, + CDP_DATA = 2, +}; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); + int lsmid; +}; + +struct proc_inode { + struct pid *pid; + unsigned int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + const struct ctl_table *sysctl_entry; + struct hlist_node sibling_inodes; + const struct proc_ns_operations *ns_ops; + long: 32; + struct inode vfs_inode; +}; + +typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); + +enum proc_mem_force { + PROC_MEM_FORCE_ALWAYS = 0, + PROC_MEM_FORCE_PTRACE = 1, + PROC_MEM_FORCE_NEVER = 2, +}; + +struct pid_entry { + const char *name; + unsigned int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; + union proc_op op; +}; + +struct limit_names { + const char *name; + const char *unit; +}; + +struct map_files_info { + long unsigned int start; + long unsigned int end; + fmode_t mode; +}; + +struct tgid_iter { + unsigned int tgid; + struct task_struct *task; +}; + +enum bh_state_bits { + BH_Uptodate = 0, + BH_Dirty = 1, + BH_Lock = 2, + BH_Req = 3, + BH_Mapped = 4, + BH_New = 5, + BH_Async_Read = 6, + BH_Async_Write = 7, + BH_Delay = 8, + BH_Boundary = 9, + BH_Write_EIO = 10, + BH_Unwritten = 11, + BH_Quiet = 12, + BH_Meta = 13, + BH_Prio = 14, + BH_Defer_Completion = 15, + BH_PrivateStart = 16, +}; + +enum { + EXT4_INODE_SECRM = 0, + EXT4_INODE_UNRM = 1, + EXT4_INODE_COMPR = 2, + EXT4_INODE_SYNC = 3, + EXT4_INODE_IMMUTABLE = 4, + EXT4_INODE_APPEND = 5, + EXT4_INODE_NODUMP = 6, + EXT4_INODE_NOATIME = 7, + EXT4_INODE_DIRTY = 8, + EXT4_INODE_COMPRBLK = 9, + EXT4_INODE_NOCOMPR = 10, + EXT4_INODE_ENCRYPT = 11, + EXT4_INODE_INDEX = 12, + EXT4_INODE_IMAGIC = 13, + EXT4_INODE_JOURNAL_DATA = 14, + EXT4_INODE_NOTAIL = 15, + EXT4_INODE_DIRSYNC = 16, + EXT4_INODE_TOPDIR = 17, + EXT4_INODE_HUGE_FILE = 18, + EXT4_INODE_EXTENTS = 19, + EXT4_INODE_VERITY = 20, + EXT4_INODE_EA_INODE = 21, + EXT4_INODE_DAX = 25, + EXT4_INODE_INLINE_DATA = 28, + EXT4_INODE_PROJINHERIT = 29, + EXT4_INODE_CASEFOLD = 30, + EXT4_INODE_RESERVED = 31, +}; + +struct extent_status { + struct rb_node rb_node; + ext4_lblk_t es_lblk; + ext4_lblk_t es_len; + long: 32; + ext4_fsblk_t es_pblk; +}; + +struct ext4_es_tree { + struct rb_root root; + struct extent_status *cache_es; +}; + +struct ext4_pending_tree { + struct rb_root root; +}; + +struct ext4_inode_info { + __le32 i_data[15]; + __u32 i_dtime; + ext4_fsblk_t i_file_acl; + ext4_group_t i_block_group; + ext4_lblk_t i_dir_start_lookup; + long unsigned int i_state_flags; + long unsigned int i_flags; + struct rw_semaphore xattr_sem; + union { + struct list_head i_orphan; + unsigned int i_orphan_idx; + }; + struct list_head i_fc_dilist; + struct list_head i_fc_list; + ext4_lblk_t i_fc_lblk_start; + ext4_lblk_t i_fc_lblk_len; + atomic_t i_fc_updates; + atomic_t i_unwritten; + wait_queue_head_t i_fc_wait; + struct mutex i_fc_lock; + loff_t i_disksize; + struct rw_semaphore i_data_sem; + struct inode vfs_inode; + struct jbd2_inode *jinode; + spinlock_t i_raw_lock; + struct timespec64 i_crtime; + atomic_t i_prealloc_active; + unsigned int i_reserved_data_blocks; + struct rb_root i_prealloc_node; + rwlock_t i_prealloc_lock; + struct ext4_es_tree i_es_tree; + rwlock_t i_es_lock; + struct list_head i_es_list; + unsigned int i_es_all_nr; + unsigned int i_es_shk_nr; + ext4_lblk_t i_es_shrink_lblk; + ext4_group_t i_last_alloc_group; + struct ext4_pending_tree i_pending_tree; + __u16 i_extra_isize; + u16 i_inline_off; + u16 i_inline_size; + spinlock_t i_completed_io_lock; + struct list_head i_rsv_conversion_list; + struct work_struct i_rsv_conversion_work; + spinlock_t i_block_reservation_lock; + tid_t i_sync_tid; + tid_t i_datasync_tid; + __u32 i_csum_seed; + kprojid_t i_projid; +}; + +enum ext4_journal_trigger_type { + EXT4_JTR_ORPHAN_FILE = 0, + EXT4_JTR_NONE = 1, +}; + +typedef int wait_bit_action_f(struct wait_bit_key *, int); + +enum jbd_state_bits { + BH_JBD = 16, + BH_JWrite = 17, + BH_Freed = 18, + BH_Revoked = 19, + BH_RevokeValid = 20, + BH_JBDDirty = 21, + BH_JournalHead = 22, + BH_Shadow = 23, + BH_Verified = 24, + BH_JBDPrivateStart = 25, +}; + +enum { + Opt_direct = 0, + Opt_fd = 1, + Opt_gid = 2, + Opt_ignore = 3, + Opt_indirect = 4, + Opt_maxproto = 5, + Opt_minproto = 6, + Opt_offset = 7, + Opt_pgrp = 8, + Opt_strictexpire = 9, + Opt_uid = 10, +}; + +struct autofs_fs_context { + kuid_t uid; + kgid_t gid; + int pgrp; + bool pgrp_set; +}; + +struct btrfs_ioctl_encoded_io_args { + const struct iovec *iov; + long unsigned int iovcnt; + __s64 offset; + __u64 flags; + __u64 len; + __u64 unencoded_len; + __u64 unencoded_offset; + __u32 compression; + __u32 encryption; + __u8 reserved[64]; +}; + +struct btrfs_replace_extent_info { + u64 disk_offset; + u64 disk_len; + u64 data_offset; + u64 data_len; + u64 file_offset; + char *extent_buf; + bool is_new_extent; + bool update_times; + int qgroup_reserved; + int insertions; +}; + +struct btrfs_drop_extents_args { + struct btrfs_path *path; + long: 32; + u64 start; + u64 end; + bool drop_cache; + bool replace_extent; + u32 extent_item_size; + u64 drop_end; + u64 bytes_found; + bool extent_inserted; + long: 32; +}; + +struct btrfs_file_private { + void *filldir_buf; + long: 32; + u64 last_index; + struct extent_state *llseek_cached_state; + struct task_struct *owner_task; +}; + +struct btrfs_file_extent { + u64 disk_bytenr; + u64 disk_num_bytes; + u64 num_bytes; + u64 ram_bytes; + u64 offset; + u8 compression; + long: 32; +}; + +enum btrfs_ilock_type { + __BTRFS_ILOCK_SHARED_BIT = 0, + BTRFS_ILOCK_SHARED = 1, + __BTRFS_ILOCK_SHARED_SEQ = 0, + __BTRFS_ILOCK_TRY_BIT = 1, + BTRFS_ILOCK_TRY = 2, + __BTRFS_ILOCK_TRY_SEQ = 1, + __BTRFS_ILOCK_MMAP_BIT = 2, + BTRFS_ILOCK_MMAP = 4, + __BTRFS_ILOCK_MMAP_SEQ = 2, +}; + +enum btrfs_ref_type { + BTRFS_REF_NOT_SET = 0, + BTRFS_REF_DATA = 1, + BTRFS_REF_METADATA = 2, + BTRFS_REF_LAST = 3, +} __attribute__((mode(byte))); + +struct btrfs_ref { + enum btrfs_ref_type type; + enum btrfs_delayed_ref_action action; + bool skip_qgroup; + long: 32; + u64 bytenr; + u64 num_bytes; + u64 owning_root; + u64 ref_root; + u64 parent; + union { + struct btrfs_data_ref data_ref; + struct btrfs_tree_ref tree_ref; + }; +}; + +struct btrfs_log_ctx { + int log_ret; + int log_transid; + bool log_new_dentries; + bool logging_new_name; + bool logging_new_delayed_dentries; + bool logged_before; + struct btrfs_inode *inode; + struct list_head list; + struct list_head ordered_extents; + struct list_head conflict_inodes; + int num_conflict_inodes; + bool logging_conflict_inodes; + struct extent_buffer *scratch_eb; +}; + +struct falloc_range { + struct list_head list; + u64 start; + u64 len; +}; + +enum { + RANGE_BOUNDARY_WRITTEN_EXTENT = 0, + RANGE_BOUNDARY_PREALLOC_EXTENT = 1, + RANGE_BOUNDARY_HOLE = 2, +}; + +enum { + btrfs_bitmap_nr_uptodate = 0, + btrfs_bitmap_nr_dirty = 1, + btrfs_bitmap_nr_writeback = 2, + btrfs_bitmap_nr_ordered = 3, + btrfs_bitmap_nr_checked = 4, + btrfs_bitmap_nr_locked = 5, + btrfs_bitmap_nr_max = 6, +}; + +struct btrfs_subpage { + spinlock_t lock; + union { + atomic_t eb_refs; + atomic_t nr_locked; + }; + long unsigned int bitmaps[0]; +}; + +enum btrfs_subpage_type { + BTRFS_SUBPAGE_METADATA = 0, + BTRFS_SUBPAGE_DATA = 1, +}; + +struct sha256_state { + u32 state[8]; + u64 count; + u8 buf[64]; +}; + +typedef struct { + __be64 a; + __be64 b; +} be128; + +struct gf128mul_4k { + be128 t[256]; +}; + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[16]; + u32 bytes; +}; + +enum rq_qos_id { + RQ_QOS_WBT = 0, + RQ_QOS_LATENCY = 1, + RQ_QOS_COST = 2, +}; + +struct rq_qos_ops; + +struct rq_qos { + const struct rq_qos_ops *ops; + struct gendisk *disk; + enum rq_qos_id id; + struct rq_qos *next; + struct dentry *debugfs_dir; +}; + +enum blk_integrity_flags { + BLK_INTEGRITY_NOVERIFY = 1, + BLK_INTEGRITY_NOGENERATE = 2, + BLK_INTEGRITY_DEVICE_CAPABLE = 4, + BLK_INTEGRITY_REF_TAG = 8, + BLK_INTEGRITY_STACKED = 16, +}; + +struct rq_qos_ops { + void (*throttle)(struct rq_qos *, struct bio *); + void (*track)(struct rq_qos *, struct request *, struct bio *); + void (*merge)(struct rq_qos *, struct request *, struct bio *); + void (*issue)(struct rq_qos *, struct request *); + void (*requeue)(struct rq_qos *, struct request *); + void (*done)(struct rq_qos *, struct request *); + void (*done_bio)(struct rq_qos *, struct bio *); + void (*cleanup)(struct rq_qos *, struct bio *); + void (*queue_depth_changed)(struct rq_qos *); + void (*exit)(struct rq_qos *); + const struct blk_mq_debugfs_attr *debugfs_attrs; +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; + struct list_head queued[2]; + unsigned int nr_queued[2]; + struct rb_root_cached pending_tree; + unsigned int nr_pending; + long unsigned int first_pending_disptime; + struct timer_list pending_timer; +}; + +struct throtl_data { + struct throtl_service_queue service_queue; + struct request_queue *queue; + unsigned int nr_queued[2]; + unsigned int throtl_slice; + struct work_struct dispatch_work; + bool track_bio_latency; +}; + +enum blktrace_cat { + BLK_TC_READ = 1, + BLK_TC_WRITE = 2, + BLK_TC_FLUSH = 4, + BLK_TC_SYNC = 8, + BLK_TC_SYNCIO = 8, + BLK_TC_QUEUE = 16, + BLK_TC_REQUEUE = 32, + BLK_TC_ISSUE = 64, + BLK_TC_COMPLETE = 128, + BLK_TC_FS = 256, + BLK_TC_PC = 512, + BLK_TC_NOTIFY = 1024, + BLK_TC_AHEAD = 2048, + BLK_TC_META = 4096, + BLK_TC_DISCARD = 8192, + BLK_TC_DRV_DATA = 16384, + BLK_TC_FUA = 32768, + BLK_TC_END = 32768, +}; + +struct throtl_grp; + +struct throtl_qnode { + struct list_head node; + struct bio_list bios; + struct throtl_grp *tg; +}; + +struct throtl_grp { + struct blkg_policy_data pd; + struct rb_node rb_node; + struct throtl_data *td; + struct throtl_service_queue service_queue; + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + long unsigned int disptime; + unsigned int flags; + bool has_rules_bps[2]; + bool has_rules_iops[2]; + uint64_t bps[2]; + unsigned int iops[2]; + uint64_t bytes_disp[2]; + unsigned int io_disp[2]; + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + long long int carryover_bytes[2]; + int carryover_ios[2]; + long unsigned int last_check_time; + long unsigned int slice_start[2]; + long unsigned int slice_end[2]; + long: 32; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1, + THROTL_TG_WAS_EMPTY = 2, + THROTL_TG_CANCELING = 4, +}; + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE = 0, + FSNOTIFY_EVENT_PATH = 1, + FSNOTIFY_EVENT_INODE = 2, + FSNOTIFY_EVENT_DENTRY = 3, + FSNOTIFY_EVENT_ERROR = 4, +}; + +struct io_sync { + struct file *file; + long: 32; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +enum { + PERCPU_REF_INIT_ATOMIC = 1, + PERCPU_REF_INIT_DEAD = 2, + PERCPU_REF_ALLOW_REINIT = 4, +}; + +typedef s16 int16_t; + +typedef u16 uint16_t; + +typedef uint8_t BYTE; + +typedef uint8_t U8; + +typedef uint16_t U16; + +typedef int16_t S16; + +typedef uint32_t U32; + +typedef uint64_t U64; + +typedef enum { + ZSTD_fast = 1, + ZSTD_dfast = 2, + ZSTD_greedy = 3, + ZSTD_lazy = 4, + ZSTD_lazy2 = 5, + ZSTD_btlazy2 = 6, + ZSTD_btopt = 7, + ZSTD_btultra = 8, + ZSTD_btultra2 = 9, +} ZSTD_strategy; + +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int minMatch; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef enum { + ZSTD_ps_auto = 0, + ZSTD_ps_enable = 1, + ZSTD_ps_disable = 2, +} ZSTD_paramSwitch_e; + +typedef void * (*ZSTD_allocFunction)(void *, size_t); + +typedef void (*ZSTD_freeFunction)(void *, void *); + +typedef struct { + ZSTD_allocFunction customAlloc; + ZSTD_freeFunction customFree; + void *opaque; +} ZSTD_customMem; + +typedef unsigned int FSE_CTable; + +typedef enum { + FSE_repeat_none = 0, + FSE_repeat_check = 1, + FSE_repeat_valid = 2, +} FSE_repeat; + +typedef size_t HUF_CElt; + +typedef enum { + HUF_repeat_none = 0, + HUF_repeat_check = 1, + HUF_repeat_valid = 2, +} HUF_repeat; + +typedef enum { + ZSTD_no_overlap = 0, + ZSTD_overlap_src_before_dst = 1, +} ZSTD_overlap_e; + +struct seqDef_s { + U32 offBase; + U16 litLength; + U16 mlBase; +}; + +typedef struct seqDef_s seqDef; + +typedef enum { + ZSTD_llt_none = 0, + ZSTD_llt_literalLength = 1, + ZSTD_llt_matchLength = 2, +} ZSTD_longLengthType_e; + +typedef struct { + seqDef *sequencesStart; + seqDef *sequences; + BYTE *litStart; + BYTE *lit; + BYTE *llCode; + BYTE *mlCode; + BYTE *ofCode; + size_t maxNbSeq; + size_t maxNbLit; + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; +} seqStore_t; + +typedef struct { + HUF_CElt CTable[257]; + HUF_repeat repeatMode; +} ZSTD_hufCTables_t; + +typedef struct { + FSE_CTable offcodeCTable[193]; + FSE_CTable matchlengthCTable[363]; + FSE_CTable litlengthCTable[329]; + FSE_repeat offcode_repeatMode; + FSE_repeat matchlength_repeatMode; + FSE_repeat litlength_repeatMode; +} ZSTD_fseCTables_t; + +typedef struct { + ZSTD_hufCTables_t huf; + ZSTD_fseCTables_t fse; +} ZSTD_entropyCTables_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + U32 offset; + U32 litLength; + U32 matchLength; +} rawSeq; + +typedef struct { + rawSeq *seq; + size_t pos; + size_t posInSequence; + size_t size; + size_t capacity; +} rawSeqStore_t; + +typedef struct { + int price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[3]; +} ZSTD_optimal_t; + +typedef enum { + zop_dynamic = 0, + zop_predef = 1, +} ZSTD_OptPrice_e; + +typedef struct { + unsigned int *litFreq; + unsigned int *litLengthFreq; + unsigned int *matchLengthFreq; + unsigned int *offCodeFreq; + ZSTD_match_t *matchTable; + ZSTD_optimal_t *priceTable; + U32 litSum; + U32 litLengthSum; + U32 matchLengthSum; + U32 offCodeSum; + U32 litSumBasePrice; + U32 litLengthSumBasePrice; + U32 matchLengthSumBasePrice; + U32 offCodeSumBasePrice; + ZSTD_OptPrice_e priceType; + const ZSTD_entropyCTables_t *symbolCosts; + ZSTD_paramSwitch_e literalCompressionMode; +} optState_t; + +typedef struct { + const BYTE *nextSrc; + const BYTE *base; + const BYTE *dictBase; + U32 dictLimit; + U32 lowLimit; + U32 nbOverflowCorrections; +} ZSTD_window_t; + +struct ZSTD_matchState_t; + +typedef struct ZSTD_matchState_t ZSTD_matchState_t; + +struct ZSTD_matchState_t { + ZSTD_window_t window; + U32 loadedDictEnd; + U32 nextToUpdate; + U32 hashLog3; + U32 rowHashLog; + U16 *tagTable; + U32 hashCache[8]; + U32 *hashTable; + U32 *hashTable3; + U32 *chainTable; + U32 forceNonContiguous; + int dedicatedDictSearch; + optState_t opt; + const ZSTD_matchState_t *dictMatchState; + ZSTD_compressionParameters cParams; + const rawSeqStore_t *ldmSeqStore; +}; + +typedef enum { + ZSTD_noDict = 0, + ZSTD_extDict = 1, + ZSTD_dictMatchState = 2, + ZSTD_dedicatedDictSearch = 3, +} ZSTD_dictMode_e; + +typedef U64 ZSTD_VecMask; + +typedef enum { + search_hashChain = 0, + search_binaryTree = 1, + search_rowHash = 2, +} searchMethod_e; + +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe: 1; + const struct bus_type *bus; + struct device *dev_root; + struct kset glue_dirs; + const struct class *class; + struct lock_class_key lock_key; +}; + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(const struct class *, const struct class_attribute *, char *); + ssize_t (*store)(const struct class *, const struct class_attribute *, const char *, size_t); +}; + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +struct class_compat { + struct kobject *kobj; +}; + +enum hsm_task_states { + HSM_ST_IDLE = 0, + HSM_ST_FIRST = 1, + HSM_ST = 2, + HSM_ST_LAST = 3, + HSM_ST_ERR = 4, +}; + +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +struct cyclecounter { + u64 (*read)(const struct cyclecounter *); + long: 32; + u64 mask; + u32 mult; + u32 shift; +}; + +struct timecounter { + const struct cyclecounter *cc; + long: 32; + u64 cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +struct ptp_clock_time { + __s64 sec; + __u32 nsec; + __u32 reserved; +}; + +struct ptp_extts_request { + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct ptp_perout_request { + union { + struct ptp_clock_time start; + struct ptp_clock_time phase; + }; + struct ptp_clock_time period; + unsigned int index; + unsigned int flags; + union { + struct ptp_clock_time on; + unsigned int rsv[4]; + }; +}; + +enum ptp_pin_function { + PTP_PF_NONE = 0, + PTP_PF_EXTTS = 1, + PTP_PF_PEROUT = 2, + PTP_PF_PHYSYNC = 3, +}; + +struct ptp_pin_desc { + char name[64]; + unsigned int index; + unsigned int func; + unsigned int chan; + unsigned int rsv[5]; +}; + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS = 0, + PTP_CLK_REQ_PEROUT = 1, + PTP_CLK_REQ_PPS = 2, + } type; + long: 32; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; + clockid_t clockid; + long: 32; +}; + +struct ptp_clock_info { + struct module *owner; + char name[32]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int n_pins; + int pps; + struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *, long int); + int (*adjphase)(struct ptp_clock_info *, s32); + s32 (*getmaxphase)(struct ptp_clock_info *); + int (*adjtime)(struct ptp_clock_info *, s64); + int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); + int (*gettimex64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); + int (*getcycles64)(struct ptp_clock_info *, struct timespec64 *); + int (*getcyclesx64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosscycles)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); + int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); + long int (*do_aux_work)(struct ptp_clock_info *); +}; + +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +enum e1000_mac_type___2 { + e1000_82571 = 0, + e1000_82572 = 1, + e1000_82573 = 2, + e1000_82574 = 3, + e1000_82583 = 4, + e1000_80003es2lan = 5, + e1000_ich8lan = 6, + e1000_ich9lan = 7, + e1000_ich10lan = 8, + e1000_pchlan = 9, + e1000_pch2lan = 10, + e1000_pch_lpt = 11, + e1000_pch_spt = 12, + e1000_pch_cnp = 13, + e1000_pch_tgp = 14, + e1000_pch_adp = 15, + e1000_pch_mtp = 16, + e1000_pch_lnp = 17, + e1000_pch_ptp = 18, + e1000_pch_nvp = 19, +}; + +enum e1000_nvm_type___2 { + e1000_nvm_unknown___2 = 0, + e1000_nvm_none___2 = 1, + e1000_nvm_eeprom_spi___2 = 2, + e1000_nvm_flash_hw___2 = 3, + e1000_nvm_flash_sw___2 = 4, +}; + +enum e1000_phy_type___2 { + e1000_phy_unknown___2 = 0, + e1000_phy_none___2 = 1, + e1000_phy_m88___2 = 2, + e1000_phy_igp___2 = 3, + e1000_phy_igp_2___2 = 4, + e1000_phy_gg82563___2 = 5, + e1000_phy_igp_3___2 = 6, + e1000_phy_ife___2 = 7, + e1000_phy_bm = 8, + e1000_phy_82578 = 9, + e1000_phy_82577 = 10, + e1000_phy_82579 = 11, + e1000_phy_i217 = 12, +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress = 1, + e1000_serdes_link_autoneg_complete = 2, + e1000_serdes_link_forced_up = 3, +}; + +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf = 1, + e1000_mng_mode_pt = 2, + e1000_mng_mode_ipmi = 3, + e1000_mng_mode_host_if_only = 4, +}; + +struct e1000_hw___2; + +struct e1000_mac_operations___2 { + s32 (*id_led_init)(struct e1000_hw___2 *); + s32 (*blink_led)(struct e1000_hw___2 *); + bool (*check_mng_mode)(struct e1000_hw___2 *); + s32 (*check_for_link)(struct e1000_hw___2 *); + s32 (*cleanup_led)(struct e1000_hw___2 *); + void (*clear_hw_cntrs)(struct e1000_hw___2 *); + void (*clear_vfta)(struct e1000_hw___2 *); + s32 (*get_bus_info)(struct e1000_hw___2 *); + void (*set_lan_id)(struct e1000_hw___2 *); + s32 (*get_link_up_info)(struct e1000_hw___2 *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw___2 *); + s32 (*led_off)(struct e1000_hw___2 *); + void (*update_mc_addr_list)(struct e1000_hw___2 *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw___2 *); + s32 (*init_hw)(struct e1000_hw___2 *); + s32 (*setup_link)(struct e1000_hw___2 *); + s32 (*setup_physical_interface)(struct e1000_hw___2 *); + s32 (*setup_led)(struct e1000_hw___2 *); + void (*write_vfta)(struct e1000_hw___2 *, u32, u32); + void (*config_collision_dist)(struct e1000_hw___2 *); + int (*rar_set)(struct e1000_hw___2 *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw___2 *); + u32 (*rar_get_count)(struct e1000_hw___2 *); +}; + +struct e1000_mac_info___2 { + struct e1000_mac_operations___2 ops; + u8 addr[6]; + u8 perm_addr[6]; + enum e1000_mac_type___2 type; + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u32 mta_shadow[128]; + u16 rar_entry_count; + u8 forced_speed_duplex; + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_fc_info___2 { + u32 high_water; + u32 low_water; + u16 pause_time; + u16 refresh_time; + bool send_xon; + bool strict_ieee; + enum e1000_fc_mode current_mode; + enum e1000_fc_mode requested_mode; +}; + +struct e1000_phy_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*cfg_on_link_up)(struct e1000_hw___2 *); + s32 (*check_polarity)(struct e1000_hw___2 *); + s32 (*check_reset_block)(struct e1000_hw___2 *); + s32 (*commit)(struct e1000_hw___2 *); + s32 (*force_speed_duplex)(struct e1000_hw___2 *); + s32 (*get_cfg_done)(struct e1000_hw___2 *); + s32 (*get_cable_length)(struct e1000_hw___2 *); + s32 (*get_info)(struct e1000_hw___2 *); + s32 (*set_page)(struct e1000_hw___2 *, u16); + s32 (*read_reg)(struct e1000_hw___2 *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw___2 *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw___2 *, u32, u16 *); + void (*release)(struct e1000_hw___2 *); + s32 (*reset)(struct e1000_hw___2 *); + s32 (*set_d0_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw___2 *, bool); + s32 (*write_reg)(struct e1000_hw___2 *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw___2 *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw___2 *, u32, u16); + void (*power_up)(struct e1000_hw___2 *); + void (*power_down)(struct e1000_hw___2 *); +}; + +struct e1000_phy_info___2 { + struct e1000_phy_operations___2 ops; + enum e1000_phy_type___2 type; + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + u32 addr; + u32 id; + u32 reset_delay_us; + u32 revision; + u32 retry_count; + enum e1000_media_type media_type; + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u8 mdix; + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; + bool retry_enabled; +}; + +struct e1000_nvm_operations___2 { + s32 (*acquire)(struct e1000_hw___2 *); + s32 (*read)(struct e1000_hw___2 *, u16, u16, u16 *); + void (*release)(struct e1000_hw___2 *); + void (*reload)(struct e1000_hw___2 *); + s32 (*update)(struct e1000_hw___2 *); + s32 (*valid_led_default)(struct e1000_hw___2 *, u16 *); + s32 (*validate)(struct e1000_hw___2 *); + s32 (*write)(struct e1000_hw___2 *, u16, u16, u16 *); +}; + +struct e1000_nvm_info___2 { + struct e1000_nvm_operations___2 ops; + enum e1000_nvm_type___2 type; + enum e1000_nvm_override override; + u32 flash_bank_size; + u32 flash_base_addr; + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info___2 { + enum e1000_bus_width width; + u16 func; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +enum e1000_ulp_state { + e1000_ulp_state_unknown = 0, + e1000_ulp_state_off = 1, + e1000_ulp_state_on = 2, +}; + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[2048]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; +}; + +struct e1000_adapter; + +struct e1000_hw___2 { + struct e1000_adapter *adapter; + void *hw_addr; + void *flash_address; + struct e1000_mac_info___2 mac; + struct e1000_fc_info___2 fc; + struct e1000_phy_info___2 phy; + struct e1000_nvm_info___2 nvm; + struct e1000_bus_info___2 bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +struct e1000_phy_regs { + u16 bmcr; + u16 bmsr; + u16 advertise; + u16 lpa; + u16 expansion; + u16 ctrl1000; + u16 stat1000; + u16 estatus; +}; + +struct e1000_buffer; + +struct e1000_ring { + struct e1000_adapter *adapter; + void *desc; + dma_addr_t dma; + unsigned int size; + unsigned int count; + u16 next_to_use; + u16 next_to_clean; + void *head; + void *tail; + struct e1000_buffer *buffer_info; + char name[21]; + u32 ims_val; + u32 itr_val; + void *itr_register; + int set_itr; + struct sk_buff *rx_skb_top; +}; + +struct e1000_info; + +struct ptp_clock; + +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + struct work_struct reset_task; + struct work_struct watchdog_task; + const struct e1000_info *ei; + long unsigned int active_vlans[128]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + long unsigned int state; + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + struct e1000_ring *tx_ring; + u32 tx_fifo_limit; + struct napi_struct napi; + unsigned int uncorr_errors; + unsigned int corr_errors; + unsigned int restart_queue; + u32 txd_cmd; + bool detect_tx_hung; + bool tx_hang_recheck; + u8 tx_timeout_factor; + u32 tx_int_delay; + u32 tx_abs_int_delay; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + long: 32; + u64 tpt_old; + u64 colc_old; + u32 gotc; + long: 32; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + bool (*clean_rx)(struct e1000_ring *, int *, int); + void (*alloc_rx_buf)(struct e1000_ring *, int, gfp_t); + struct e1000_ring *rx_ring; + u32 rx_int_delay; + u32 rx_abs_int_delay; + long: 32; + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + long: 32; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + struct net_device *netdev; + struct pci_dev *pdev; + struct e1000_hw___2 hw; + spinlock_t stats64_lock; + struct e1000_hw_stats stats; + struct e1000_phy_info___2 phy_info; + struct e1000_phy_stats phy_stats; + struct e1000_phy_regs phy_regs; + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + bool fc_autoneg; + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + int phy_hang_count; + u16 tx_ring_count; + u16 rx_ring_count; + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + long unsigned int tx_hwtstamp_start; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; + long int ptp_delta; + u16 eee_advert; + long: 32; +}; + +struct e1000_ps_page { + struct page *page; + long: 32; + u64 dma; +}; + +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + struct { + long unsigned int time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + struct { + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_info { + enum e1000_mac_type___2 mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations___2 *mac_ops; + const struct e1000_phy_operations___2 *phy_ops; + const struct e1000_nvm_operations___2 *nvm_ops; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +struct usb_sg_request { + int status; + size_t bytes; + spinlock_t lock; + struct usb_device *dev; + int pipe; + int entries; + struct urb **urbs; + int count; + struct completion complete; +}; + +struct usb_cdc_header_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdCDC; +} __attribute__((packed)); + +struct usb_cdc_call_mgmt_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; + __u8 bDataInterface; +}; + +struct usb_cdc_acm_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; +}; + +struct usb_cdc_union_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bMasterInterface0; + __u8 bSlaveInterface0; +}; + +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; +}; + +struct usb_cdc_network_terminal_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bEntityId; + __u8 iName; + __u8 bChannelIndex; + __u8 bPhysicalInterface; +}; + +struct usb_cdc_ether_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iMACAddress; + __le32 bmEthernetStatistics; + __le16 wMaxSegmentSize; + __le16 wNumberMCFilters; + __u8 bNumberPowerFilters; +} __attribute__((packed)); + +struct usb_cdc_dmm_desc { + __u8 bFunctionLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u16 bcdVersion; + __le16 wMaxCommand; +} __attribute__((packed)); + +struct usb_cdc_mdlm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; + __u8 bGUID[16]; +} __attribute__((packed)); + +struct usb_cdc_mdlm_detail_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bGuidDescriptorType; + __u8 bDetailData[0]; +}; + +struct usb_cdc_obex_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; +} __attribute__((packed)); + +struct usb_cdc_ncm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdNcmVersion; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMVersion; + __le16 wMaxControlMessage; + __u8 bNumberFilters; + __u8 bMaxFilterSize; + __le16 wMaxSegmentSize; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_extended_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMExtendedVersion; + __u8 bMaxOutstandingCommandMessages; + __le16 wMTU; +} __attribute__((packed)); + +struct usb_cdc_parsed_header { + struct usb_cdc_union_desc *usb_cdc_union_desc; + struct usb_cdc_header_desc *usb_cdc_header_desc; + struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; + struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; + struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; + struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; + struct usb_cdc_ether_desc *usb_cdc_ether_desc; + struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; + struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; + struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; + struct usb_cdc_obex_desc *usb_cdc_obex_desc; + struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; + struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; + struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; + bool phonet_magic_present; +}; + +enum usb_phy_type { + USB_PHY_TYPE_UNDEFINED = 0, + USB_PHY_TYPE_USB2 = 1, + USB_PHY_TYPE_USB3 = 2, +}; + +enum usb_phy_events { + USB_EVENT_NONE = 0, + USB_EVENT_VBUS = 1, + USB_EVENT_ID = 2, + USB_EVENT_CHARGER = 3, + USB_EVENT_ENUMERATED = 4, +}; + +struct extcon_dev; + +enum usb_charger_type { + UNKNOWN_TYPE = 0, + SDP_TYPE = 1, + DCP_TYPE = 2, + CDP_TYPE = 3, + ACA_TYPE = 4, +}; + +enum usb_charger_state { + USB_CHARGER_DEFAULT = 0, + USB_CHARGER_PRESENT = 1, + USB_CHARGER_ABSENT = 2, +}; + +struct usb_charger_current { + unsigned int sdp_min; + unsigned int sdp_max; + unsigned int dcp_min; + unsigned int dcp_max; + unsigned int cdp_min; + unsigned int cdp_max; + unsigned int aca_min; + unsigned int aca_max; +}; + +struct usb_otg; + +struct usb_phy_io_ops; + +struct usb_phy { + struct device *dev; + const char *label; + unsigned int flags; + enum usb_phy_type type; + enum usb_phy_events last_event; + struct usb_otg *otg; + struct device *io_dev; + struct usb_phy_io_ops *io_ops; + void *io_priv; + struct extcon_dev *edev; + struct extcon_dev *id_edev; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct notifier_block type_nb; + enum usb_charger_type chg_type; + enum usb_charger_state chg_state; + struct usb_charger_current chg_cur; + struct work_struct chg_work; + struct atomic_notifier_head notifier; + u16 port_status; + u16 port_change; + struct list_head head; + int (*init)(struct usb_phy *); + void (*shutdown)(struct usb_phy *); + int (*set_vbus)(struct usb_phy *, int); + int (*set_power)(struct usb_phy *, unsigned int); + int (*set_suspend)(struct usb_phy *, int); + int (*set_wakeup)(struct usb_phy *, bool); + int (*notify_connect)(struct usb_phy *, enum usb_device_speed); + int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed); + enum usb_charger_type (*charger_detect)(struct usb_phy *); +}; + +enum usb_otg_state { + OTG_STATE_UNDEFINED = 0, + OTG_STATE_B_IDLE = 1, + OTG_STATE_B_SRP_INIT = 2, + OTG_STATE_B_PERIPHERAL = 3, + OTG_STATE_B_WAIT_ACON = 4, + OTG_STATE_B_HOST = 5, + OTG_STATE_A_IDLE = 6, + OTG_STATE_A_WAIT_VRISE = 7, + OTG_STATE_A_WAIT_BCON = 8, + OTG_STATE_A_HOST = 9, + OTG_STATE_A_SUSPEND = 10, + OTG_STATE_A_PERIPHERAL = 11, + OTG_STATE_A_WAIT_VFALL = 12, + OTG_STATE_A_VBUS_ERR = 13, +}; + +struct usb_phy_io_ops { + int (*read)(struct usb_phy *, u32); + int (*write)(struct usb_phy *, u32, u32); +}; + +struct usb_gadget; + +struct usb_otg { + u8 default_a; + struct phy *phy; + struct usb_phy *usb_phy; + struct usb_bus *host; + struct usb_gadget *gadget; + enum usb_otg_state state; + int (*set_host)(struct usb_otg *, struct usb_bus *); + int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); + int (*set_vbus)(struct usb_otg *, bool); + int (*start_srp)(struct usb_otg *); + int (*start_hnp)(struct usb_otg *); +}; + +struct api_context { + struct completion done; + int status; +}; + +struct set_config_request { + struct usb_device *udev; + int config; + struct work_struct work; + struct list_head node; +}; + +struct us_data; + +struct us_unusual_dev { + const char *vendorName; + const char *productName; + __u8 useProtocol; + __u8 useTransport; + int (*initFunction)(struct us_data *); +}; + +typedef int (*trans_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef int (*trans_reset)(struct us_data *); + +typedef void (*proto_cmnd)(struct scsi_cmnd *, struct us_data *); + +typedef void (*extra_data_destructor)(void *); + +typedef void (*pm_hook)(struct us_data *, int); + +struct us_data { + struct mutex dev_mutex; + struct usb_device *pusb_dev; + struct usb_interface *pusb_intf; + const struct us_unusual_dev *unusual_dev; + u64 fflags; + long unsigned int dflags; + unsigned int send_bulk_pipe; + unsigned int recv_bulk_pipe; + unsigned int send_ctrl_pipe; + unsigned int recv_ctrl_pipe; + unsigned int recv_intr_pipe; + char *transport_name; + char *protocol_name; + __le32 bcs_signature; + u8 subclass; + u8 protocol; + u8 max_lun; + u8 ifnum; + u8 ep_bInterval; + trans_cmnd transport; + trans_reset transport_reset; + proto_cmnd proto_handler; + struct scsi_cmnd *srb; + unsigned int tag; + char scsi_name[32]; + struct urb *current_urb; + struct usb_ctrlrequest *cr; + struct usb_sg_request current_sg; + unsigned char *iobuf; + dma_addr_t iobuf_dma; + struct task_struct *ctl_thread; + struct completion cmnd_ready; + struct completion notify; + wait_queue_head_t delay_wait; + struct delayed_work scan_dwork; + void *extra; + extra_data_destructor extra_destructor; + pm_hook suspend_resume_hook; + int use_last_sector_hacks; + int last_sector_retries; +}; + +struct swoc_info { + __u8 rev; + __u8 reserved[8]; + __u16 LinuxSKU; + __u16 LinuxVer; + __u8 reserved2[47]; +} __attribute__((packed)); + +enum hwmon_sensor_types { + hwmon_chip = 0, + hwmon_temp = 1, + hwmon_in = 2, + hwmon_curr = 3, + hwmon_power = 4, + hwmon_energy = 5, + hwmon_humidity = 6, + hwmon_fan = 7, + hwmon_pwm = 8, + hwmon_intrusion = 9, + hwmon_max = 10, +}; + +enum hwmon_chip_attributes { + hwmon_chip_temp_reset_history = 0, + hwmon_chip_in_reset_history = 1, + hwmon_chip_curr_reset_history = 2, + hwmon_chip_power_reset_history = 3, + hwmon_chip_register_tz = 4, + hwmon_chip_update_interval = 5, + hwmon_chip_alarms = 6, + hwmon_chip_samples = 7, + hwmon_chip_curr_samples = 8, + hwmon_chip_in_samples = 9, + hwmon_chip_power_samples = 10, + hwmon_chip_temp_samples = 11, + hwmon_chip_beep_enable = 12, + hwmon_chip_pec = 13, +}; + +enum hwmon_temp_attributes { + hwmon_temp_enable = 0, + hwmon_temp_input = 1, + hwmon_temp_type = 2, + hwmon_temp_lcrit = 3, + hwmon_temp_lcrit_hyst = 4, + hwmon_temp_min = 5, + hwmon_temp_min_hyst = 6, + hwmon_temp_max = 7, + hwmon_temp_max_hyst = 8, + hwmon_temp_crit = 9, + hwmon_temp_crit_hyst = 10, + hwmon_temp_emergency = 11, + hwmon_temp_emergency_hyst = 12, + hwmon_temp_alarm = 13, + hwmon_temp_lcrit_alarm = 14, + hwmon_temp_min_alarm = 15, + hwmon_temp_max_alarm = 16, + hwmon_temp_crit_alarm = 17, + hwmon_temp_emergency_alarm = 18, + hwmon_temp_fault = 19, + hwmon_temp_offset = 20, + hwmon_temp_label = 21, + hwmon_temp_lowest = 22, + hwmon_temp_highest = 23, + hwmon_temp_reset_history = 24, + hwmon_temp_rated_min = 25, + hwmon_temp_rated_max = 26, + hwmon_temp_beep = 27, +}; + +enum hwmon_in_attributes { + hwmon_in_enable = 0, + hwmon_in_input = 1, + hwmon_in_min = 2, + hwmon_in_max = 3, + hwmon_in_lcrit = 4, + hwmon_in_crit = 5, + hwmon_in_average = 6, + hwmon_in_lowest = 7, + hwmon_in_highest = 8, + hwmon_in_reset_history = 9, + hwmon_in_label = 10, + hwmon_in_alarm = 11, + hwmon_in_min_alarm = 12, + hwmon_in_max_alarm = 13, + hwmon_in_lcrit_alarm = 14, + hwmon_in_crit_alarm = 15, + hwmon_in_rated_min = 16, + hwmon_in_rated_max = 17, + hwmon_in_beep = 18, + hwmon_in_fault = 19, +}; + +enum hwmon_curr_attributes { + hwmon_curr_enable = 0, + hwmon_curr_input = 1, + hwmon_curr_min = 2, + hwmon_curr_max = 3, + hwmon_curr_lcrit = 4, + hwmon_curr_crit = 5, + hwmon_curr_average = 6, + hwmon_curr_lowest = 7, + hwmon_curr_highest = 8, + hwmon_curr_reset_history = 9, + hwmon_curr_label = 10, + hwmon_curr_alarm = 11, + hwmon_curr_min_alarm = 12, + hwmon_curr_max_alarm = 13, + hwmon_curr_lcrit_alarm = 14, + hwmon_curr_crit_alarm = 15, + hwmon_curr_rated_min = 16, + hwmon_curr_rated_max = 17, + hwmon_curr_beep = 18, +}; + +enum hwmon_power_attributes { + hwmon_power_enable = 0, + hwmon_power_average = 1, + hwmon_power_average_interval = 2, + hwmon_power_average_interval_max = 3, + hwmon_power_average_interval_min = 4, + hwmon_power_average_highest = 5, + hwmon_power_average_lowest = 6, + hwmon_power_average_max = 7, + hwmon_power_average_min = 8, + hwmon_power_input = 9, + hwmon_power_input_highest = 10, + hwmon_power_input_lowest = 11, + hwmon_power_reset_history = 12, + hwmon_power_accuracy = 13, + hwmon_power_cap = 14, + hwmon_power_cap_hyst = 15, + hwmon_power_cap_max = 16, + hwmon_power_cap_min = 17, + hwmon_power_min = 18, + hwmon_power_max = 19, + hwmon_power_crit = 20, + hwmon_power_lcrit = 21, + hwmon_power_label = 22, + hwmon_power_alarm = 23, + hwmon_power_cap_alarm = 24, + hwmon_power_min_alarm = 25, + hwmon_power_max_alarm = 26, + hwmon_power_lcrit_alarm = 27, + hwmon_power_crit_alarm = 28, + hwmon_power_rated_min = 29, + hwmon_power_rated_max = 30, +}; + +enum hwmon_energy_attributes { + hwmon_energy_enable = 0, + hwmon_energy_input = 1, + hwmon_energy_label = 2, +}; + +enum hwmon_humidity_attributes { + hwmon_humidity_enable = 0, + hwmon_humidity_input = 1, + hwmon_humidity_label = 2, + hwmon_humidity_min = 3, + hwmon_humidity_min_hyst = 4, + hwmon_humidity_max = 5, + hwmon_humidity_max_hyst = 6, + hwmon_humidity_alarm = 7, + hwmon_humidity_fault = 8, + hwmon_humidity_rated_min = 9, + hwmon_humidity_rated_max = 10, + hwmon_humidity_min_alarm = 11, + hwmon_humidity_max_alarm = 12, +}; + +enum hwmon_fan_attributes { + hwmon_fan_enable = 0, + hwmon_fan_input = 1, + hwmon_fan_label = 2, + hwmon_fan_min = 3, + hwmon_fan_max = 4, + hwmon_fan_div = 5, + hwmon_fan_pulses = 6, + hwmon_fan_target = 7, + hwmon_fan_alarm = 8, + hwmon_fan_min_alarm = 9, + hwmon_fan_max_alarm = 10, + hwmon_fan_fault = 11, + hwmon_fan_beep = 12, +}; + +enum hwmon_pwm_attributes { + hwmon_pwm_input = 0, + hwmon_pwm_enable = 1, + hwmon_pwm_mode = 2, + hwmon_pwm_freq = 3, + hwmon_pwm_auto_channels_temp = 4, +}; + +enum hwmon_intrusion_attributes { + hwmon_intrusion_alarm = 0, + hwmon_intrusion_beep = 1, +}; + +struct hwmon_ops { + umode_t visible; + umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); + int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *); + int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); + int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int); +}; + +struct hwmon_channel_info { + enum hwmon_sensor_types type; + const u32 *config; +}; + +struct hwmon_chip_info { + const struct hwmon_ops *ops; + const struct hwmon_channel_info * const *info; +}; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED = 1, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE = 1, + THERMAL_TRIP_HOT = 2, + THERMAL_TRIP_CRITICAL = 3, +}; + +enum thermal_trend { + THERMAL_TREND_STABLE = 0, + THERMAL_TREND_RAISING = 1, + THERMAL_TREND_DROPPING = 2, +}; + +enum thermal_notify_event { + THERMAL_EVENT_UNSPECIFIED = 0, + THERMAL_EVENT_TEMP_SAMPLE = 1, + THERMAL_TRIP_VIOLATED = 2, + THERMAL_TRIP_CHANGED = 3, + THERMAL_DEVICE_DOWN = 4, + THERMAL_DEVICE_UP = 5, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, + THERMAL_TABLE_CHANGED = 7, + THERMAL_EVENT_KEEP_ALIVE = 8, + THERMAL_TZ_BIND_CDEV = 9, + THERMAL_TZ_UNBIND_CDEV = 10, + THERMAL_INSTANCE_WEIGHT_CHANGED = 11, + THERMAL_TZ_RESUME = 12, + THERMAL_TZ_ADD_THRESHOLD = 13, + THERMAL_TZ_DEL_THRESHOLD = 14, + THERMAL_TZ_FLUSH_THRESHOLDS = 15, +}; + +struct thermal_trip { + int temperature; + int hysteresis; + enum thermal_trip_type type; + u8 flags; + void *priv; +}; + +struct cooling_spec { + long unsigned int upper; + long unsigned int lower; + unsigned int weight; +}; + +struct thermal_zone_device; + +struct thermal_cooling_device; + +struct thermal_zone_device_ops { + bool (*should_bind)(struct thermal_zone_device *, const struct thermal_trip *, struct thermal_cooling_device *, struct cooling_spec *); + int (*get_temp)(struct thermal_zone_device *, int *); + int (*set_trips)(struct thermal_zone_device *, int, int); + int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); + int (*set_trip_temp)(struct thermal_zone_device *, const struct thermal_trip *, int); + int (*get_crit_temp)(struct thermal_zone_device *, int *); + int (*set_emul_temp)(struct thermal_zone_device *, int); + int (*get_trend)(struct thermal_zone_device *, const struct thermal_trip *, enum thermal_trend *); + void (*hot)(struct thermal_zone_device *); + void (*critical)(struct thermal_zone_device *); +}; + +struct thermal_cooling_device_ops; + +struct thermal_cooling_device { + int id; + const char *type; + long unsigned int max_state; + long: 32; + struct device device; + struct device_node *np; + void *devdata; + void *stats; + const struct thermal_cooling_device_ops *ops; + bool updated; + struct mutex lock; + struct list_head thermal_instances; + struct list_head node; +}; + +struct thermal_cooling_device_ops { + int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); + int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); + int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, long unsigned int, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, long unsigned int *); +}; + +struct trace_event_raw_hwmon_attr_class { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + long int val; + char __data[0]; +}; + +struct trace_event_raw_hwmon_attr_show_string { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + u32 __data_loc_label; + char __data[0]; +}; + +struct trace_event_data_offsets_hwmon_attr_class { + u32 attr_name; + const void *attr_name_ptr_; +}; + +struct trace_event_data_offsets_hwmon_attr_show_string { + u32 attr_name; + const void *attr_name_ptr_; + u32 label; + const void *label_ptr_; +}; + +typedef void (*btf_trace_hwmon_attr_show)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_store)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_show_string)(void *, int, const char *, const char *); + +struct hwmon_device { + const char *name; + const char *label; + struct device dev; + const struct hwmon_chip_info *chip; + struct list_head tzdata; + struct attribute_group group; + const struct attribute_group **groups; +}; + +struct hwmon_device_attribute { + struct device_attribute dev_attr; + const struct hwmon_ops *ops; + enum hwmon_sensor_types type; + u32 attr; + int index; + char name[32]; +}; + +struct hwmon_thermal_data { + struct list_head node; + struct device *dev; + int index; + struct thermal_zone_device *tzd; +}; + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + long: 32; + u64 snt_synack; + bool tfo_listener; + bool is_mptcp; + bool req_usec_ts; + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; + u32 rcv_nxt; + u8 syn_tos; +}; + +enum tcp_synack_type { + TCP_SYNACK_NORMAL = 0, + TCP_SYNACK_FASTOPEN = 1, + TCP_SYNACK_COOKIE = 2, +}; + +struct tcp_request_sock_ops { + u16 mss_clamp; + __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); + struct dst_entry * (*route_req)(const struct sock *, struct sk_buff *, struct flowi *, struct request_sock *, u32); + u32 (*init_seq)(const struct sk_buff *); + u32 (*init_ts_off)(const struct net *, const struct sk_buff *); + int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); +}; + +struct xa_limit { + u32 max; + u32 min; +}; + +typedef int (*netlink_filter_fn)(struct sock *, struct sk_buff *, void *); + +enum { + NETDEV_A_PAGE_POOL_ID = 1, + NETDEV_A_PAGE_POOL_IFINDEX = 2, + NETDEV_A_PAGE_POOL_NAPI_ID = 3, + NETDEV_A_PAGE_POOL_INFLIGHT = 4, + NETDEV_A_PAGE_POOL_INFLIGHT_MEM = 5, + NETDEV_A_PAGE_POOL_DETACH_TIME = 6, + NETDEV_A_PAGE_POOL_DMABUF = 7, + __NETDEV_A_PAGE_POOL_MAX = 8, + NETDEV_A_PAGE_POOL_MAX = 7, +}; + +enum { + NETDEV_A_PAGE_POOL_STATS_INFO = 1, + NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW = 9, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER = 10, + NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY = 11, + NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL = 12, + NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE = 13, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED = 14, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL = 15, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING = 16, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL = 17, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT = 18, + __NETDEV_A_PAGE_POOL_STATS_MAX = 19, + NETDEV_A_PAGE_POOL_STATS_MAX = 18, +}; + +enum { + NETDEV_A_QUEUE_ID = 1, + NETDEV_A_QUEUE_IFINDEX = 2, + NETDEV_A_QUEUE_TYPE = 3, + NETDEV_A_QUEUE_NAPI_ID = 4, + NETDEV_A_QUEUE_DMABUF = 5, + __NETDEV_A_QUEUE_MAX = 6, + NETDEV_A_QUEUE_MAX = 5, +}; + +enum { + NETDEV_CMD_DEV_GET = 1, + NETDEV_CMD_DEV_ADD_NTF = 2, + NETDEV_CMD_DEV_DEL_NTF = 3, + NETDEV_CMD_DEV_CHANGE_NTF = 4, + NETDEV_CMD_PAGE_POOL_GET = 5, + NETDEV_CMD_PAGE_POOL_ADD_NTF = 6, + NETDEV_CMD_PAGE_POOL_DEL_NTF = 7, + NETDEV_CMD_PAGE_POOL_CHANGE_NTF = 8, + NETDEV_CMD_PAGE_POOL_STATS_GET = 9, + NETDEV_CMD_QUEUE_GET = 10, + NETDEV_CMD_NAPI_GET = 11, + NETDEV_CMD_QSTATS_GET = 12, + NETDEV_CMD_BIND_RX = 13, + NETDEV_CMD_NAPI_SET = 14, + __NETDEV_CMD_MAX = 15, + NETDEV_CMD_MAX = 14, +}; + +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; + +struct dma_buf; + +struct dma_buf_attachment; + +struct net_devmem_dmabuf_binding { + struct dma_buf *dmabuf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct net_device *dev; + struct gen_pool *chunk_pool; + refcount_t ref; + struct list_head list; + struct xarray bound_rxqs; + u32 id; +}; + +struct genl_dumpit_info { + struct genl_split_ops op; + struct genl_info info; +}; + +enum { + NETDEV_NLGRP_MGMT = 0, + NETDEV_NLGRP_PAGE_POOL = 1, +}; + +typedef int (*pp_nl_fill_cb)(struct sk_buff *, const struct page_pool *, const struct genl_info *); + +struct page_pool_dump_cb { + long unsigned int ifindex; + u32 pp_id; +}; + +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID = 0, + NL_ATTR_TYPE_FLAG = 1, + NL_ATTR_TYPE_U8 = 2, + NL_ATTR_TYPE_U16 = 3, + NL_ATTR_TYPE_U32 = 4, + NL_ATTR_TYPE_U64 = 5, + NL_ATTR_TYPE_S8 = 6, + NL_ATTR_TYPE_S16 = 7, + NL_ATTR_TYPE_S32 = 8, + NL_ATTR_TYPE_S64 = 9, + NL_ATTR_TYPE_BINARY = 10, + NL_ATTR_TYPE_STRING = 11, + NL_ATTR_TYPE_NUL_STRING = 12, + NL_ATTR_TYPE_NESTED = 13, + NL_ATTR_TYPE_NESTED_ARRAY = 14, + NL_ATTR_TYPE_BITFIELD32 = 15, + NL_ATTR_TYPE_SINT = 16, + NL_ATTR_TYPE_UINT = 17, +}; + +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC = 0, + NL_POLICY_TYPE_ATTR_TYPE = 1, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, + NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, + NL_POLICY_TYPE_ATTR_PAD = 11, + NL_POLICY_TYPE_ATTR_MASK = 12, + __NL_POLICY_TYPE_ATTR_MAX = 13, + NL_POLICY_TYPE_ATTR_MAX = 12, +}; + +struct netlink_policy_dump_state { + unsigned int policy_idx; + unsigned int attr_idx; + unsigned int n_alloc; + struct { + const struct nla_policy *policy; + unsigned int maxtype; + } policies[0]; +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +struct nf_sockopt_ops { + struct list_head list; + u_int8_t pf; + int set_optmin; + int set_optmax; + int (*set)(struct sock *, int, sockptr_t, unsigned int); + int get_optmin; + int get_optmax; + int (*get)(struct sock *, int, void *, int *); + struct module *owner; +}; + +struct nf_ct_bridge_info { + struct nf_hook_ops *ops; + unsigned int ops_size; + struct module *me; +}; + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + u_int16_t flags; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +enum nf_ip6_hook_priorities { + NF_IP6_PRI_FIRST = -2147483648, + NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450, + NF_IP6_PRI_CONNTRACK_DEFRAG = -400, + NF_IP6_PRI_RAW = -300, + NF_IP6_PRI_SELINUX_FIRST = -225, + NF_IP6_PRI_CONNTRACK = -200, + NF_IP6_PRI_MANGLE = -150, + NF_IP6_PRI_NAT_DST = -100, + NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, + NF_IP6_PRI_NAT_SRC = 100, + NF_IP6_PRI_SELINUX_LAST = 225, + NF_IP6_PRI_CONNTRACK_HELPER = 300, + NF_IP6_PRI_LAST = 2147483647, +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, + FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, +}; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; + struct Qdisc *sch; + struct list_head *cb_list_head; +}; + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + struct Qdisc *sch; + enum flow_block_binder_type binder_type; + void *data; + void *cb_priv; + void (*cleanup)(struct flow_block_cb *); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +enum flow_cls_command { + FLOW_CLS_REPLACE = 0, + FLOW_CLS_DESTROY = 1, + FLOW_CLS_STATS = 2, + FLOW_CLS_TMPLT_CREATE = 3, + FLOW_CLS_TMPLT_DESTROY = 4, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + bool skip_sw; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + bool use_act_stats; + long unsigned int cookie; + struct flow_rule *rule; + long: 32; + struct flow_stats stats; + u32 classid; + long: 32; +}; + +struct nft_offload_ethertype { + __be16 value; + __be16 mask; +}; + +struct snmp_mib { + const char *name; + int entry; +}; + +struct raw_hashinfo { + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct hlist_head ht[256]; +}; + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + char __data[126]; + }; + void *__align; + }; +}; + +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + int ipv6mr_ifindex; +}; + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; +}; + +struct seg6_pernet_data { + struct mutex lock; + struct in6_addr *tun_src; +}; + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255, +}; + +enum { + XFRM_MSG_BASE = 16, + XFRM_MSG_NEWSA = 16, + XFRM_MSG_DELSA = 17, + XFRM_MSG_GETSA = 18, + XFRM_MSG_NEWPOLICY = 19, + XFRM_MSG_DELPOLICY = 20, + XFRM_MSG_GETPOLICY = 21, + XFRM_MSG_ALLOCSPI = 22, + XFRM_MSG_ACQUIRE = 23, + XFRM_MSG_EXPIRE = 24, + XFRM_MSG_UPDPOLICY = 25, + XFRM_MSG_UPDSA = 26, + XFRM_MSG_POLEXPIRE = 27, + XFRM_MSG_FLUSHSA = 28, + XFRM_MSG_FLUSHPOLICY = 29, + XFRM_MSG_NEWAE = 30, + XFRM_MSG_GETAE = 31, + XFRM_MSG_REPORT = 32, + XFRM_MSG_MIGRATE = 33, + XFRM_MSG_NEWSADINFO = 34, + XFRM_MSG_GETSADINFO = 35, + XFRM_MSG_NEWSPDINFO = 36, + XFRM_MSG_GETSPDINFO = 37, + XFRM_MSG_MAPPING = 38, + XFRM_MSG_SETDEFAULT = 39, + XFRM_MSG_GETDEFAULT = 40, + __XFRM_MSG_MAX = 41, +}; + +enum xfrm_attr_type_t { + XFRMA_UNSPEC = 0, + XFRMA_ALG_AUTH = 1, + XFRMA_ALG_CRYPT = 2, + XFRMA_ALG_COMP = 3, + XFRMA_ENCAP = 4, + XFRMA_TMPL = 5, + XFRMA_SA = 6, + XFRMA_POLICY = 7, + XFRMA_SEC_CTX = 8, + XFRMA_LTIME_VAL = 9, + XFRMA_REPLAY_VAL = 10, + XFRMA_REPLAY_THRESH = 11, + XFRMA_ETIMER_THRESH = 12, + XFRMA_SRCADDR = 13, + XFRMA_COADDR = 14, + XFRMA_LASTUSED = 15, + XFRMA_POLICY_TYPE = 16, + XFRMA_MIGRATE = 17, + XFRMA_ALG_AEAD = 18, + XFRMA_KMADDRESS = 19, + XFRMA_ALG_AUTH_TRUNC = 20, + XFRMA_MARK = 21, + XFRMA_TFCPAD = 22, + XFRMA_REPLAY_ESN_VAL = 23, + XFRMA_SA_EXTRA_FLAGS = 24, + XFRMA_PROTO = 25, + XFRMA_ADDRESS_FILTER = 26, + XFRMA_PAD = 27, + XFRMA_OFFLOAD_DEV = 28, + XFRMA_SET_MARK = 29, + XFRMA_SET_MARK_MASK = 30, + XFRMA_IF_ID = 31, + XFRMA_MTIMER_THRESH = 32, + XFRMA_SA_DIR = 33, + XFRMA_NAT_KEEPALIVE_INTERVAL = 34, + XFRMA_SA_PCPU = 35, + __XFRMA_MAX = 36, +}; + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct compat_group_filter { + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux; + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1]; + }; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[0]; + }; + }; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +struct udp_sock { + struct inet_sock inet; + long unsigned int udp_flags; + int pending; + __u8 encap_type; + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; + __u16 len; + __u16 gso_size; + __u16 pcslen; + __u16 pcrlen; + int (*encap_rcv)(struct sock *, struct sk_buff *); + void (*encap_err_rcv)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*encap_err_lookup)(struct sock *, struct sk_buff *); + void (*encap_destroy)(struct sock *); + struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sock *, struct sk_buff *, int); + long: 32; + struct sk_buff_head reader_queue; + int forward_deficit; + int forward_threshold; + bool peeking_with_offset; + long: 32; +}; + +struct ip6_ra_chain { + struct ip6_ra_chain *next; + struct sock *sk; + int sel; + void (*destructor)(struct sock *); +}; + +struct ipcm6_cookie { + struct sockcm_cookie sockc; + __s16 hlimit; + __s16 tclass; + __u16 gso_size; + __s8 dontfrag; + struct ipv6_txoptions *opt; + long: 32; +}; + +enum { + INET_ECN_NOT_ECT = 0, + INET_ECN_ECT_1 = 1, + INET_ECN_ECT_0 = 2, + INET_ECN_CE = 3, + INET_ECN_MASK = 3, +}; + +struct ipv6_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u8 first_segment; + __u8 flags; + __u16 tag; + struct in6_addr segments[0]; +}; + +enum { + IP6_FH_F_FRAG = 1, + IP6_FH_F_AUTH = 2, + IP6_FH_F_SKIP_RH = 4, +}; + +typedef initcall_t initcall_entry_t; + +typedef int (*parse_unknown_fn)(char *, char *, const char *, void *); + +struct trace_event_raw_initcall_level { + struct trace_entry ent; + u32 __data_loc_level; + char __data[0]; +}; + +struct trace_event_raw_initcall_start { + struct trace_entry ent; + initcall_t func; + char __data[0]; +}; + +struct trace_event_raw_initcall_finish { + struct trace_entry ent; + initcall_t func; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_initcall_level { + u32 level; + const void *level_ptr_; +}; + +struct trace_event_data_offsets_initcall_start {}; + +struct trace_event_data_offsets_initcall_finish {}; + +typedef void (*btf_trace_initcall_level)(void *, const char *); + +typedef void (*btf_trace_initcall_start)(void *, initcall_t); + +typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); + +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +enum fixed_addresses { + FIX_HOLE = 0, + FIX_EARLY_DEBUG_TOP = 0, + FIX_EARLY_DEBUG_BASE = 31, + __end_of_permanent_fixed_addresses = 32, + FIX_BTMAP_END = 32, + FIX_BTMAP_BEGIN = 1055, + __end_of_fixed_addresses = 1056, +}; + +struct pdev_archdata { + u64 dma_mask; + void *priv; + long: 32; +}; + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +struct mfd_cell; + +struct platform_device_id; + +struct platform_device { + const char *name; + int id; + bool id_auto; + long: 32; + struct device dev; + u64 platform_dma_mask; + struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + const struct platform_device_id *id_entry; + const char *driver_override; + struct mfd_cell *mfd_cell; + struct pdev_archdata archdata; +}; + +struct platform_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct pmf_args { + union { + u32 v; + u32 *p; + } u[4]; + unsigned int count; +}; + +struct pmf_function; + +struct pmf_handlers { + void * (*begin)(struct pmf_function *, struct pmf_args *); + void (*end)(struct pmf_function *, void *); + int (*irq_enable)(struct pmf_function *); + int (*irq_disable)(struct pmf_function *); + int (*write_gpio)(struct pmf_function *, void *, struct pmf_args *, u8, u8); + int (*read_gpio)(struct pmf_function *, void *, struct pmf_args *, u8, int, u8); + int (*write_reg32)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32); + int (*read_reg32)(struct pmf_function *, void *, struct pmf_args *, u32); + int (*write_reg16)(struct pmf_function *, void *, struct pmf_args *, u32, u16, u16); + int (*read_reg16)(struct pmf_function *, void *, struct pmf_args *, u32); + int (*write_reg8)(struct pmf_function *, void *, struct pmf_args *, u32, u8, u8); + int (*read_reg8)(struct pmf_function *, void *, struct pmf_args *, u32); + int (*delay)(struct pmf_function *, void *, struct pmf_args *, u32); + int (*wait_reg32)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32); + int (*wait_reg16)(struct pmf_function *, void *, struct pmf_args *, u32, u16, u16); + int (*wait_reg8)(struct pmf_function *, void *, struct pmf_args *, u32, u8, u8); + int (*read_i2c)(struct pmf_function *, void *, struct pmf_args *, u32); + int (*write_i2c)(struct pmf_function *, void *, struct pmf_args *, u32, const u8 *); + int (*rmw_i2c)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32, const u8 *, const u8 *); + int (*read_cfg)(struct pmf_function *, void *, struct pmf_args *, u32, u32); + int (*write_cfg)(struct pmf_function *, void *, struct pmf_args *, u32, u32, const u8 *); + int (*rmw_cfg)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32, u32, const u8 *, const u8 *); + int (*read_i2c_sub)(struct pmf_function *, void *, struct pmf_args *, u8, u32); + int (*write_i2c_sub)(struct pmf_function *, void *, struct pmf_args *, u8, u32, const u8 *); + int (*set_i2c_mode)(struct pmf_function *, void *, struct pmf_args *, int); + int (*rmw_i2c_sub)(struct pmf_function *, void *, struct pmf_args *, u8, u32, u32, u32, const u8 *, const u8 *); + int (*read_reg32_msrx)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32, u32); + int (*read_reg16_msrx)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32, u32); + int (*read_reg8_msrx)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32, u32); + int (*write_reg32_slm)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32); + int (*write_reg16_slm)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32); + int (*write_reg8_slm)(struct pmf_function *, void *, struct pmf_args *, u32, u32, u32); + int (*mask_and_compare)(struct pmf_function *, void *, struct pmf_args *, u32, const u8 *, const u8 *); + struct module *owner; +}; + +struct pmf_device; + +struct pmf_function { + struct list_head link; + struct device_node *node; + void *driver_data; + struct pmf_device *dev; + const char *name; + u32 phandle; + u32 flags; + const void *data; + unsigned int length; + struct list_head irq_clients; + struct kref ref; +}; + +enum { + pmac_i2c_mode_dumb = 1, + pmac_i2c_mode_std = 2, + pmac_i2c_mode_stdsub = 3, + pmac_i2c_mode_combined = 4, +}; + +enum { + pmac_i2c_read = 1, + pmac_i2c_write = 0, +}; + +enum { + pmac_i2c_bus_keywest = 0, + pmac_i2c_bus_pmu = 1, + pmac_i2c_bus_smu = 2, +}; + +enum { + pmac_i2c_can_largesub = 1, + pmac_i2c_multibus = 2, +}; + +struct pmac_i2c_bus { + struct list_head link; + struct device_node *controller; + struct device_node *busnode; + int type; + int flags; + struct i2c_adapter adapter; + void *hostdata; + int channel; + int mode; + struct mutex mutex; + int opened; + int polled; + struct platform_device *platform_dev; + struct lock_class_key lock_key; + int (*open)(struct pmac_i2c_bus *); + void (*close)(struct pmac_i2c_bus *); + int (*xfer)(struct pmac_i2c_bus *, u8, int, u32, u8 *, int); +}; + +struct pmac_i2c_host_kw { + struct mutex mutex; + void *base; + int bsteps; + int speed; + int irq; + u8 *data; + unsigned int len; + int state; + int rw; + int polled; + int result; + struct completion complete; + spinlock_t lock; + struct timer_list timeout_timer; +}; + +typedef enum { + reg_mode = 0, + reg_control = 1, + reg_status = 2, + reg_isr = 3, + reg_ier = 4, + reg_addr = 5, + reg_subaddr = 6, + reg_data = 7, +} reg_t; + +enum { + state_idle = 0, + state_addr = 1, + state_read = 2, + state_write = 3, + state_stop = 4, + state_dead = 5, +}; + +enum { + pmac_i2c_quirk_invmask = 1, + pmac_i2c_quirk_skip = 2, +}; + +struct pmac_i2c_pf_inst { + struct pmac_i2c_bus *bus; + u8 addr; + u8 buffer[64]; + u8 scratch[64]; + int bytes; + int quirks; +}; + +struct whitelist_ent { + char *name; + char *compatible; + int quirks; +}; + +typedef bool (*smp_cond_func_t)(int, void *); + +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; + long: 32; +}; + +struct dma_chan { + int lock; + const char *device_id; +}; + +struct sched_domain_attr { + int relax_domain_level; +}; + +enum { + CGRP_NOTIFY_ON_RELEASE = 0, + CGRP_CPUSET_CLONE_CHILDREN = 1, + CGRP_FREEZE = 2, + CGRP_FROZEN = 3, + CGRP_KILL = 4, +}; + +enum { + CGRP_ROOT_NOPREFIX = 2, + CGRP_ROOT_XATTR = 4, + CGRP_ROOT_NS_DELEGATE = 8, + CGRP_ROOT_FAVOR_DYNMODS = 16, + CGRP_ROOT_CPUSET_V2_MODE = 65536, + CGRP_ROOT_MEMORY_LOCAL_EVENTS = 131072, + CGRP_ROOT_MEMORY_RECURSIVE_PROT = 262144, + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = 524288, + CGRP_ROOT_PIDS_LOCAL_EVENTS = 1048576, +}; + +struct cgroup_taskset { + struct list_head src_csets; + struct list_head dst_csets; + int nr_tasks; + int ssid; + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + struct list_head *cset_pos; + struct list_head *cset_head; + struct list_head *tcset_pos; + struct list_head *tcset_head; + struct list_head *task_pos; + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +struct uf_node { + struct uf_node *parent; + unsigned int rank; +}; + +struct fmeter { + int cnt; + int val; + time64_t time; + spinlock_t lock; + long: 32; +}; + +enum prs_errcode { + PERR_NONE = 0, + PERR_INVCPUS = 1, + PERR_INVPARENT = 2, + PERR_NOTPART = 3, + PERR_NOTEXCL = 4, + PERR_NOCPUS = 5, + PERR_HOTPLUG = 6, + PERR_CPUSEMPTY = 7, + PERR_HKEEPING = 8, + PERR_ACCESS = 9, +}; + +typedef enum { + CS_ONLINE = 0, + CS_CPU_EXCLUSIVE = 1, + CS_MEM_EXCLUSIVE = 2, + CS_MEM_HARDWALL = 3, + CS_MEMORY_MIGRATE = 4, + CS_SCHED_LOAD_BALANCE = 5, + CS_SPREAD_PAGE = 6, + CS_SPREAD_SLAB = 7, +} cpuset_flagbits_t; + +typedef enum { + FILE_MEMORY_MIGRATE = 0, + FILE_CPULIST = 1, + FILE_MEMLIST = 2, + FILE_EFFECTIVE_CPULIST = 3, + FILE_EFFECTIVE_MEMLIST = 4, + FILE_SUBPARTS_CPULIST = 5, + FILE_EXCLUSIVE_CPULIST = 6, + FILE_EFFECTIVE_XCPULIST = 7, + FILE_ISOLATED_CPULIST = 8, + FILE_CPU_EXCLUSIVE = 9, + FILE_MEM_EXCLUSIVE = 10, + FILE_MEM_HARDWALL = 11, + FILE_SCHED_LOAD_BALANCE = 12, + FILE_PARTITION_ROOT = 13, + FILE_SCHED_RELAX_DOMAIN_LEVEL = 14, + FILE_MEMORY_PRESSURE_ENABLED = 15, + FILE_MEMORY_PRESSURE = 16, + FILE_SPREAD_PAGE = 17, + FILE_SPREAD_SLAB = 18, +} cpuset_filetype_t; + +struct cpuset { + struct cgroup_subsys_state css; + long unsigned int flags; + cpumask_var_t cpus_allowed; + nodemask_t mems_allowed; + cpumask_var_t effective_cpus; + nodemask_t effective_mems; + cpumask_var_t effective_xcpus; + cpumask_var_t exclusive_cpus; + nodemask_t old_mems_allowed; + struct fmeter fmeter; + int attach_in_progress; + int relax_domain_level; + int nr_subparts; + int partition_root_state; + int nr_deadline_tasks; + int nr_migrate_dl_tasks; + u64 sum_migrate_dl_bw; + enum prs_errcode prs_err; + struct cgroup_file partition_file; + struct list_head remote_sibling; + struct uf_node node; +}; + +struct tmpmasks { + cpumask_var_t addmask; + cpumask_var_t delmask; + cpumask_var_t new_cpus; +}; + +enum partition_cmd { + partcmd_enable = 0, + partcmd_enablei = 1, + partcmd_disable = 2, + partcmd_update = 3, + partcmd_invalidate = 4, +}; + +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 1, + TRACE_FLAG_NEED_RESCHED_LAZY = 2, + TRACE_FLAG_NEED_RESCHED = 4, + TRACE_FLAG_HARDIRQ = 8, + TRACE_FLAG_SOFTIRQ = 16, + TRACE_FLAG_PREEMPT_RESCHED = 32, + TRACE_FLAG_NMI = 64, + TRACE_FLAG_BH_OFF = 128, +}; + +struct ftrace_event_field { + struct list_head link; + const char *name; + const char *type; + int filter_type; + int offset; + int size; + int is_signed; + int len; +}; + +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; + struct { + enum bpf_cgroup_iter_order order; + __u32 cgroup_fd; + __u64 cgroup_id; + } cgroup; + struct { + __u32 tid; + __u32 pid; + __u32 pid_fd; + } task; +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); + +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); + +typedef const struct bpf_func_proto * (*bpf_iter_get_func_proto_t)(enum bpf_func_id, const struct bpf_prog *); + +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + bpf_iter_show_fdinfo_t show_fdinfo; + bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; + u32 ctx_arg_info_size; + u32 feature; + struct bpf_ctx_arg_aux ctx_arg_info[2]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_iter_meta { + union { + struct seq_file *seq; + }; + u64 session_id; + u64 seq_num; +}; + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +struct bpf_iter__bpf_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; +}; + +enum { + BPF_F_NO_PREALLOC = 1, + BPF_F_NO_COMMON_LRU = 2, + BPF_F_NUMA_NODE = 4, + BPF_F_RDONLY = 8, + BPF_F_WRONLY = 16, + BPF_F_STACK_BUILD_ID = 32, + BPF_F_ZERO_SEED = 64, + BPF_F_RDONLY_PROG = 128, + BPF_F_WRONLY_PROG = 256, + BPF_F_CLONE = 512, + BPF_F_MMAPABLE = 1024, + BPF_F_PRESERVE_ELEMS = 2048, + BPF_F_INNER_MAP = 4096, + BPF_F_LINK = 8192, + BPF_F_PATH_FD = 16384, + BPF_F_VTYPE_BTF_OBJ_FD = 32768, + BPF_F_TOKEN_FD = 65536, + BPF_F_SEGV_ON_FAULT = 131072, + BPF_F_NO_USER_CONV = 262144, +}; + +enum { + BPF_F_INGRESS = 1, + BPF_F_BROADCAST = 8, + BPF_F_EXCLUDE_INGRESS = 16, +}; + +struct bpf_cpumap_val { + __u32 qsize; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct xdp_cpumap_stats { + unsigned int redirect; + unsigned int pass; + unsigned int drop; +}; + +struct bpf_cpu_map_entry; + +struct xdp_bulk_queue { + void *q[8]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; + unsigned int count; +}; + +struct bpf_cpu_map_entry { + u32 cpu; + int map_id; + struct xdp_bulk_queue *bulkq; + struct ptr_ring *queue; + struct task_struct *kthread; + struct bpf_cpumap_val value; + struct bpf_prog *prog; + struct completion kthread_running; + struct rcu_work free_work; +}; + +struct bpf_cpu_map { + struct bpf_map map; + struct bpf_cpu_map_entry **cpu_map; + long: 32; +}; + +typedef union { +} release_pages_arg; + +struct vm_event_state { + long unsigned int event[73]; +}; + +enum objext_flags { + OBJEXTS_ALLOC_FAIL = 4, + __NR_OBJEXTS_FLAGS = 8, +}; + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + AS_NO_WRITEBACK_TAGS = 5, + AS_RELEASE_ALWAYS = 6, + AS_STABLE_WRITES = 7, + AS_INACCESSIBLE = 8, + AS_FOLIO_ORDER_BITS = 5, + AS_FOLIO_ORDER_MIN = 16, + AS_FOLIO_ORDER_MAX = 21, +}; + +struct trace_event_raw_mm_lru_insertion { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + enum lru_list lru; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_mm_lru_activate { + struct trace_entry ent; + struct folio *folio; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_lru_insertion {}; + +struct trace_event_data_offsets_mm_lru_activate {}; + +typedef void (*btf_trace_mm_lru_insertion)(void *, struct folio *); + +typedef void (*btf_trace_mm_lru_activate)(void *, struct folio *); + +struct cpu_fbatches { + local_lock_t lock; + struct folio_batch lru_add; + struct folio_batch lru_deactivate_file; + struct folio_batch lru_deactivate; + struct folio_batch lru_lazyfree; + struct folio_batch lru_activate; + local_lock_t lock_irq; + struct folio_batch lru_move_tail; +}; + +typedef void (*move_fn_t)(struct lruvec *, struct folio *); + +struct anon_vma_name { + struct kref kref; + char name[0]; +}; + +struct vm_unmapped_area_info { + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + long unsigned int start_gap; +}; + +enum { + HUGETLB_SHMFS_INODE = 1, + HUGETLB_ANONHUGE_INODE = 2, +}; + +struct hstate {}; + +struct mempolicy {}; + +struct trace_event_raw_vm_unmapped_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int total_vm; + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + char __data[0]; +}; + +struct trace_event_raw_vma_mas_szero { + struct trace_entry ent; + struct maple_tree *mt; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_vma_store { + struct trace_entry ent; + struct maple_tree *mt; + struct vm_area_struct *vma; + long unsigned int vm_start; + long unsigned int vm_end; + char __data[0]; +}; + +struct trace_event_raw_exit_mmap { + struct trace_entry ent; + struct mm_struct *mm; + struct maple_tree *mt; + char __data[0]; +}; + +struct trace_event_data_offsets_vm_unmapped_area {}; + +struct trace_event_data_offsets_vma_mas_szero {}; + +struct trace_event_data_offsets_vma_store {}; + +struct trace_event_data_offsets_exit_mmap {}; + +typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *); + +typedef void (*btf_trace_vma_mas_szero)(void *, struct maple_tree *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_vma_store)(void *, struct maple_tree *, struct vm_area_struct *); + +typedef void (*btf_trace_exit_mmap)(void *, struct mm_struct *); + +enum vma_merge_state { + VMA_MERGE_START = 0, + VMA_MERGE_ERROR_NOMEM = 1, + VMA_MERGE_NOMERGE = 2, + VMA_MERGE_SUCCESS = 3, +}; + +enum vma_merge_flags { + VMG_FLAG_DEFAULT = 0, + VMG_FLAG_JUST_EXPAND = 1, +}; + +struct vma_merge_struct { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int pgoff; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vm_area_struct *vma; + long unsigned int start; + long unsigned int end; + long unsigned int flags; + struct file *file; + struct anon_vma *anon_vma; + struct mempolicy *policy; + struct vm_userfaultfd_ctx uffd_ctx; + struct anon_vma_name *anon_name; + enum vma_merge_flags merge_flags; + enum vma_merge_state state; +}; + +typedef struct task_struct *class_task_lock_t; + +struct ipc_ids { + int in_use; + short unsigned int seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; + int last_idx; + struct rhashtable key_ht; +}; + +struct ipc_namespace { + struct ipc_ids ids[3]; + int sem_ctls[4]; + int used_sems; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + long: 32; + struct percpu_counter percpu_msg_bytes; + struct percpu_counter percpu_msg_hdrs; + size_t shm_ctlmax; + size_t shm_ctlall; + long unsigned int shm_tot; + int shm_ctlmni; + int shm_rmid_forced; + struct notifier_block ipcns_nb; + struct vfsmount *mq_mnt; + unsigned int mq_queues_count; + unsigned int mq_queues_max; + unsigned int mq_msg_max; + unsigned int mq_msgsize_max; + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + struct ctl_table_set mq_set; + struct ctl_table_header *mq_sysctls; + struct ctl_table_set ipc_set; + struct ctl_table_header *ipc_sysctls; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; +}; + +struct mount; + +struct mnt_namespace { + struct ns_common ns; + struct mount *root; + struct rb_root mounts; + struct user_namespace *user_ns; + struct ucounts *ucounts; + u64 seq; + wait_queue_head_t poll; + long: 32; + u64 event; + unsigned int nr_mounts; + unsigned int pending_mounts; + struct rb_node mnt_ns_tree_node; + refcount_t passive; +}; + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler * const *xattr; + const struct dentry_operations *dops; + long unsigned int magic; +}; + +struct pidfd_info { + __u64 mask; + __u64 cgroupid; + __u32 pid; + __u32 tgid; + __u32 ppid; + __u32 ruid; + __u32 rgid; + __u32 euid; + __u32 egid; + __u32 suid; + __u32 sgid; + __u32 fsuid; + __u32 fsgid; + __u32 spare0[1]; +}; + +struct stashed_operations { + void (*put_data)(void *); + int (*init_inode)(struct inode *, void *); +}; + +struct mnt_pcp; + +struct mountpoint; + +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; + union { + struct callback_head mnt_rcu; + struct llist_node mnt_llist; + }; + struct mnt_pcp *mnt_pcp; + struct list_head mnt_mounts; + struct list_head mnt_child; + struct list_head mnt_instance; + const char *mnt_devname; + union { + struct rb_node mnt_node; + struct list_head mnt_list; + }; + struct list_head mnt_expire; + struct list_head mnt_share; + struct list_head mnt_slave_list; + struct list_head mnt_slave; + struct mount *mnt_master; + struct mnt_namespace *mnt_ns; + struct mountpoint *mnt_mp; + union { + struct hlist_node mnt_mp_list; + struct hlist_node mnt_umount; + }; + struct list_head mnt_umounting; + struct fsnotify_mark_connector *mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; + int mnt_id; + long: 32; + u64 mnt_id_unique; + int mnt_group_id; + int mnt_expiry_mark; + struct hlist_head mnt_pins; + struct hlist_head mnt_stuck_children; +}; + +struct mnt_pcp { + int mnt_count; + int mnt_writers; +}; + +struct mountpoint { + struct hlist_node m_hash; + struct dentry *m_dentry; + struct hlist_head m_list; + int m_count; +}; + +enum kernfs_node_type { + KERNFS_DIR = 1, + KERNFS_FILE = 2, + KERNFS_LINK = 4, +}; + +typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); + +struct folio_iter { + struct folio *folio; + size_t offset; + size_t length; + struct folio *_next; + size_t _seg_count; + int _i; +}; + +struct ext4_map_blocks { + ext4_fsblk_t m_pblk; + ext4_lblk_t m_lblk; + unsigned int m_len; + unsigned int m_flags; + long: 32; +}; + +enum bio_post_read_step { + STEP_INITIAL = 0, + STEP_DECRYPT = 1, + STEP_VERITY = 2, + STEP_MAX = 3, +}; + +struct bio_post_read_ctx { + struct bio *bio; + struct work_struct work; + unsigned int cur_step; + unsigned int enabled_steps; +}; + +struct fsverity_info; + +enum fid_type { + FILEID_ROOT = 0, + FILEID_INO32_GEN = 1, + FILEID_INO32_GEN_PARENT = 2, + FILEID_BTRFS_WITHOUT_PARENT = 77, + FILEID_BTRFS_WITH_PARENT = 78, + FILEID_BTRFS_WITH_PARENT_ROOT = 79, + FILEID_UDF_WITHOUT_PARENT = 81, + FILEID_UDF_WITH_PARENT = 82, + FILEID_NILFS_WITHOUT_PARENT = 97, + FILEID_NILFS_WITH_PARENT = 98, + FILEID_FAT_WITHOUT_PARENT = 113, + FILEID_FAT_WITH_PARENT = 114, + FILEID_INO64_GEN = 129, + FILEID_INO64_GEN_PARENT = 130, + FILEID_LUSTRE = 151, + FILEID_BCACHEFS_WITHOUT_PARENT = 177, + FILEID_BCACHEFS_WITH_PARENT = 178, + FILEID_KERNFS = 254, + FILEID_INVALID = 255, +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u64 ino; + u32 gen; + } i64; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + struct { + struct {} __empty_raw; + __u32 raw[0]; + }; + }; +}; + +struct iso_directory_record { + __u8 length[1]; + __u8 ext_attr_length[1]; + __u8 extent[8]; + __u8 size[8]; + __u8 date[7]; + __u8 flags[1]; + __u8 file_unit_size[1]; + __u8 interleave[1]; + __u8 volume_sequence_number[4]; + __u8 name_len[1]; + char name[0]; +}; + +struct iso_inode_info { + long unsigned int i_iget5_block; + long unsigned int i_iget5_offset; + unsigned int i_first_extent; + unsigned char i_file_format; + unsigned char i_format_parm[3]; + long unsigned int i_next_section_block; + long unsigned int i_next_section_offset; + off_t i_section_size; + long: 32; + struct inode vfs_inode; +}; + +struct isofs_fid { + u32 block; + u16 offset; + u16 parent_offset; + u32 generation; + u32 parent_block; + u32 parent_generation; +}; + +struct fuse_forget_one { + uint64_t nodeid; + uint64_t nlookup; +}; + +struct fuse_in_header { + uint32_t len; + uint32_t opcode; + uint64_t unique; + uint64_t nodeid; + uint32_t uid; + uint32_t gid; + uint32_t pid; + uint16_t total_extlen; + uint16_t padding; +}; + +struct fuse_out_header { + uint32_t len; + int32_t error; + uint64_t unique; +}; + +struct tree_descr { + const char *name; + const struct file_operations *ops; + int mode; +}; + +struct fuse_forget_link { + struct fuse_forget_one forget_one; + struct fuse_forget_link *next; + long: 32; +}; + +struct fuse_conn; + +struct fuse_mount { + struct fuse_conn *fc; + struct super_block *sb; + struct list_head fc_entry; + struct callback_head rcu; +}; + +struct fuse_in_arg { + unsigned int size; + const void *value; +}; + +struct fuse_arg { + unsigned int size; + void *value; +}; + +struct fuse_args { + uint64_t nodeid; + uint32_t opcode; + uint8_t in_numargs; + uint8_t out_numargs; + uint8_t ext_idx; + bool force: 1; + bool noreply: 1; + bool nocreds: 1; + bool in_pages: 1; + bool out_pages: 1; + bool user_pages: 1; + bool out_argvar: 1; + bool page_zeroing: 1; + bool page_replace: 1; + bool may_block: 1; + bool is_ext: 1; + bool is_pinned: 1; + bool invalidate_vmap: 1; + struct fuse_in_arg in_args[3]; + struct fuse_arg out_args[2]; + void (*end)(struct fuse_mount *, struct fuse_args *, int); + void *vmap_base; + long: 32; +}; + +struct fuse_req { + struct list_head list; + struct list_head intr_entry; + struct fuse_args *args; + refcount_t count; + long unsigned int flags; + long: 32; + struct { + struct fuse_in_header h; + } in; + struct { + struct fuse_out_header h; + } out; + wait_queue_head_t waitq; + struct fuse_mount *fm; +}; + +struct fuse_iqueue; + +struct fuse_iqueue_ops { + void (*send_forget)(struct fuse_iqueue *, struct fuse_forget_link *); + void (*send_interrupt)(struct fuse_iqueue *, struct fuse_req *); + void (*send_req)(struct fuse_iqueue *, struct fuse_req *); + void (*release)(struct fuse_iqueue *); +}; + +struct fuse_iqueue { + unsigned int connected; + spinlock_t lock; + wait_queue_head_t waitq; + long: 32; + u64 reqctr; + struct list_head pending; + struct list_head interrupts; + struct fuse_forget_link forget_list_head; + struct fuse_forget_link *forget_list_tail; + int forget_batch; + struct fasync_struct *fasync; + const struct fuse_iqueue_ops *ops; + void *priv; + long: 32; +}; + +struct fuse_sync_bucket; + +struct fuse_conn { + spinlock_t lock; + refcount_t count; + atomic_t dev_count; + struct callback_head rcu; + kuid_t user_id; + kgid_t group_id; + struct pid_namespace *pid_ns; + struct user_namespace *user_ns; + unsigned int max_read; + unsigned int max_write; + unsigned int max_pages; + unsigned int max_pages_limit; + long: 32; + struct fuse_iqueue iq; + atomic64_t khctr; + struct rb_root polled_files; + unsigned int max_background; + unsigned int congestion_threshold; + unsigned int num_background; + unsigned int active_background; + struct list_head bg_queue; + spinlock_t bg_lock; + int initialized; + int blocked; + wait_queue_head_t blocked_waitq; + unsigned int connected; + bool aborted; + unsigned int conn_error: 1; + unsigned int conn_init: 1; + unsigned int async_read: 1; + unsigned int abort_err: 1; + unsigned int atomic_o_trunc: 1; + unsigned int export_support: 1; + unsigned int writeback_cache: 1; + unsigned int parallel_dirops: 1; + unsigned int handle_killpriv: 1; + unsigned int cache_symlinks: 1; + unsigned int legacy_opts_show: 1; + unsigned int handle_killpriv_v2: 1; + unsigned int no_open: 1; + unsigned int no_opendir: 1; + unsigned int no_fsync: 1; + unsigned int no_fsyncdir: 1; + unsigned int no_flush: 1; + unsigned int no_setxattr: 1; + unsigned int setxattr_ext: 1; + unsigned int no_getxattr: 1; + unsigned int no_listxattr: 1; + unsigned int no_removexattr: 1; + unsigned int no_lock: 1; + unsigned int no_access: 1; + unsigned int no_create: 1; + unsigned int no_interrupt: 1; + unsigned int no_bmap: 1; + unsigned int no_poll: 1; + unsigned int big_writes: 1; + unsigned int dont_mask: 1; + unsigned int no_flock: 1; + unsigned int no_fallocate: 1; + unsigned int no_rename2: 1; + unsigned int auto_inval_data: 1; + unsigned int explicit_inval_data: 1; + unsigned int do_readdirplus: 1; + unsigned int readdirplus_auto: 1; + unsigned int async_dio: 1; + unsigned int no_lseek: 1; + unsigned int posix_acl: 1; + unsigned int default_permissions: 1; + unsigned int allow_other: 1; + unsigned int no_copy_file_range: 1; + unsigned int destroy: 1; + unsigned int delete_stale: 1; + unsigned int no_control: 1; + unsigned int no_force_umount: 1; + unsigned int auto_submounts: 1; + unsigned int sync_fs: 1; + unsigned int init_security: 1; + unsigned int create_supp_group: 1; + unsigned int inode_dax: 1; + unsigned int no_tmpfile: 1; + unsigned int direct_io_allow_mmap: 1; + unsigned int no_statx: 1; + unsigned int passthrough: 1; + unsigned int use_pages_for_kvec_io: 1; + int max_stack_depth; + atomic_t num_waiting; + unsigned int minor; + struct list_head entry; + dev_t dev; + struct dentry *ctl_dentry[5]; + int ctl_ndents; + u32 scramble_key[4]; + long: 32; + atomic64_t attr_version; + atomic64_t evict_ctr; + void (*release)(struct fuse_conn *); + struct rw_semaphore killsb; + struct list_head devices; + struct list_head mounts; + struct fuse_sync_bucket *curr_bucket; + struct idr backing_files_map; + long: 32; +}; + +struct fuse_sync_bucket { + atomic_t count; + wait_queue_head_t waitq; + struct callback_head rcu; +}; + +struct posix_acl_entry { + short int e_tag; + short unsigned int e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + unsigned int a_count; + struct callback_head a_rcu; + struct posix_acl_entry a_entries[0]; +}; + +struct btrfs_dir_item { + struct btrfs_disk_key location; + __le64 transid; + __le16 data_len; + __le16 name_len; + __u8 type; +} __attribute__((packed)); + +struct btrfs_stripe { + __le64 devid; + __le64 offset; + __u8 dev_uuid[16]; +}; + +struct btrfs_chunk { + __le64 length; + __le64 owner; + __le64 stripe_len; + __le64 type; + __le32 io_align; + __le32 io_width; + __le32 sector_size; + __le16 num_stripes; + __le16 sub_stripes; + struct btrfs_stripe stripe; +}; + +enum btrfs_tree_block_status { + BTRFS_TREE_BLOCK_CLEAN = 0, + BTRFS_TREE_BLOCK_INVALID_NRITEMS = 1, + BTRFS_TREE_BLOCK_INVALID_PARENT_KEY = 2, + BTRFS_TREE_BLOCK_BAD_KEY_ORDER = 3, + BTRFS_TREE_BLOCK_INVALID_LEVEL = 4, + BTRFS_TREE_BLOCK_INVALID_FREE_SPACE = 5, + BTRFS_TREE_BLOCK_INVALID_OFFSETS = 6, + BTRFS_TREE_BLOCK_INVALID_BLOCKPTR = 7, + BTRFS_TREE_BLOCK_INVALID_ITEM = 8, + BTRFS_TREE_BLOCK_INVALID_OWNER = 9, + BTRFS_TREE_BLOCK_WRITTEN_NOT_SET = 10, +}; + +struct btrfs_fiemap_entry { + u64 offset; + u64 phys; + u64 len; + u32 flags; + long: 32; +}; + +struct fiemap_cache { + struct btrfs_fiemap_entry *entries; + int entries_size; + int entries_pos; + long: 32; + u64 next_search_offset; + unsigned int extents_mapped; + long: 32; + u64 offset; + u64 phys; + u64 len; + u32 flags; + bool cached; +}; + +struct shash_instance { + void (*free)(struct shash_instance *); + long: 32; + union { + struct { + char head[56]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +typedef s32 dma_cookie_t; + +enum dma_status { + DMA_COMPLETE = 0, + DMA_IN_PROGRESS = 1, + DMA_PAUSED = 2, + DMA_ERROR = 3, + DMA_OUT_OF_ORDER = 4, +}; + +enum dma_transaction_type { + DMA_MEMCPY = 0, + DMA_XOR = 1, + DMA_PQ = 2, + DMA_XOR_VAL = 3, + DMA_PQ_VAL = 4, + DMA_MEMSET = 5, + DMA_MEMSET_SG = 6, + DMA_INTERRUPT = 7, + DMA_PRIVATE = 8, + DMA_ASYNC_TX = 9, + DMA_SLAVE = 10, + DMA_CYCLIC = 11, + DMA_INTERLEAVE = 12, + DMA_COMPLETION_NO_ORDER = 13, + DMA_REPEAT = 14, + DMA_LOAD_EOT = 15, + DMA_TX_TYPE_END = 16, +}; + +enum dma_transfer_direction { + DMA_MEM_TO_MEM = 0, + DMA_MEM_TO_DEV = 1, + DMA_DEV_TO_MEM = 2, + DMA_DEV_TO_DEV = 3, + DMA_TRANS_NONE = 4, +}; + +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +struct dma_vec { + dma_addr_t addr; + size_t len; +}; + +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = 1, + DMA_CTRL_ACK = 2, + DMA_PREP_PQ_DISABLE_P = 4, + DMA_PREP_PQ_DISABLE_Q = 8, + DMA_PREP_CONTINUE = 16, + DMA_PREP_FENCE = 32, + DMA_CTRL_REUSE = 64, + DMA_PREP_CMD = 128, + DMA_PREP_REPEAT = 256, + DMA_PREP_LOAD_EOT = 512, +}; + +enum sum_check_flags { + SUM_CHECK_P_RESULT = 1, + SUM_CHECK_Q_RESULT = 2, +}; + +typedef struct { + long unsigned int bits[1]; +} dma_cap_mask_t; + +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = 1, + DESC_METADATA_ENGINE = 2, +}; + +struct dma_chan_percpu { + long unsigned int memcpy_count; + long unsigned int bytes_transferred; +}; + +struct dma_router { + struct device *dev; + void (*route_free)(struct device *, void *); +}; + +struct dma_device; + +struct dma_chan_dev; + +struct dma_chan___2 { + struct dma_device *device; + struct device *slave; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + int chan_id; + struct dma_chan_dev *dev; + const char *name; + char *dbg_client_name; + struct list_head device_node; + struct dma_chan_percpu *local; + int client_count; + int table_count; + struct dma_router *router; + void *route_data; + void *private; +}; + +typedef bool (*dma_filter_fn)(struct dma_chan___2 *, void *); + +struct dma_slave_map; + +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, +}; + +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +struct dma_async_tx_descriptor; + +struct dma_slave_caps; + +struct dma_slave_config; + +struct dma_tx_state; + +struct dma_device { + struct kref ref; + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; + short unsigned int max_xor; + short unsigned int max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + int dev_id; + struct device *dev; + struct module *owner; + struct ida chan_ida; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan___2 *); + int (*device_router_config)(struct dma_chan___2 *); + void (*device_free_chan_resources)(struct dma_chan___2 *); + struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan___2 *, dma_addr_t, dma_addr_t, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan___2 *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan___2 *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan___2 *, dma_addr_t, int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, int, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan___2 *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_peripheral_dma_vec)(struct dma_chan___2 *, const struct dma_vec *, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); + struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan___2 *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan___2 *, struct dma_interleaved_template *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan___2 *, dma_addr_t, u64, long unsigned int); + void (*device_caps)(struct dma_chan___2 *, struct dma_slave_caps *); + int (*device_config)(struct dma_chan___2 *, struct dma_slave_config *); + int (*device_pause)(struct dma_chan___2 *); + int (*device_resume)(struct dma_chan___2 *); + int (*device_terminate_all)(struct dma_chan___2 *); + void (*device_synchronize)(struct dma_chan___2 *); + enum dma_status (*device_tx_status)(struct dma_chan___2 *, dma_cookie_t, struct dma_tx_state *); + void (*device_issue_pending)(struct dma_chan___2 *); + void (*device_release)(struct dma_device *); + void (*dbg_summary_show)(struct seq_file *, struct dma_device *); + struct dentry *dbg_dev_root; +}; + +struct dma_chan_dev { + struct dma_chan___2 *chan; + long: 32; + struct device device; + int dev_id; + bool chan_dma_dev; +}; + +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, + DMA_SLAVE_BUSWIDTH_128_BYTES = 128, +}; + +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + void *peripheral_config; + size_t peripheral_size; +}; + +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +typedef void (*dma_async_tx_callback)(void *); + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, + DMA_TRANS_READ_FAILED = 1, + DMA_TRANS_WRITE_FAILED = 2, + DMA_TRANS_ABORTED = 3, +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); + +struct dmaengine_unmap_data { + u8 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); + void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); + int (*set_len)(struct dma_async_tx_descriptor *, size_t); +}; + +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; + dma_addr_t phys; + struct dma_chan___2 *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); + int (*desc_free)(struct dma_async_tx_descriptor *); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; +}; + +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; + u32 in_flight_bytes; +}; + +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = 1, + ASYNC_TX_XOR_DROP_DST = 2, + ASYNC_TX_ACK = 4, + ASYNC_TX_FENCE = 8, + ASYNC_TX_PQ_XOR_DST = 16, +}; + +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + +enum submit_disposition { + ASYNC_TX_SUBMITTED = 0, + ASYNC_TX_CHANNEL_SWITCH = 1, + ASYNC_TX_DIRECT_SUBMIT = 2, +}; + +enum { + GENHD_FL_REMOVABLE = 1, + GENHD_FL_HIDDEN = 2, + GENHD_FL_NO_PART = 4, +}; + +struct badblocks { + struct device *dev; + int count; + int unacked_exist; + int shift; + u64 *page; + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; +}; + +struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; + void (*probe)(dev_t); +}; + +struct xattr_name { + char name[256]; +}; + +struct kernel_xattr_ctx { + union { + const void *cvalue; + void *value; + }; + void *kvalue; + size_t size; + struct xattr_name *kname; + unsigned int flags; +}; + +struct io_xattr { + struct file *file; + struct kernel_xattr_ctx ctx; + struct filename *filename; +}; + +typedef struct { + size_t bitContainer; + unsigned int bitsConsumed; + const char *ptr; + const char *start; + const char *limitPtr; +} BIT_DStream_t; + +typedef enum { + BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3, +} BIT_DStream_status; + +typedef unsigned int FSE_DTable; + +typedef struct { + size_t state; + const void *table; +} FSE_DState_t; + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; + +typedef struct { + short unsigned int newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; + +typedef struct { + short int ncount[256]; + FSE_DTable dtable[0]; +} FSE_DecompressWksp; + +typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); + +typedef void sg_free_fn(struct scatterlist *, unsigned int); + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +enum lockdep_ok { + LOCKDEP_STILL_OK = 0, + LOCKDEP_NOW_UNRELIABLE = 1, +}; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(const struct bus_type *, char *); + ssize_t (*store)(const struct bus_type *, const char *, size_t); +}; + +enum { + PCI_STD_RESOURCES = 0, + PCI_STD_RESOURCE_END = 5, + PCI_ROM_RESOURCE = 6, + PCI_BRIDGE_RESOURCES = 7, + PCI_BRIDGE_RESOURCE_END = 10, + PCI_NUM_RESOURCES = 11, + DEVICE_COUNT_RESOURCE = 11, +}; + +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = 1, + PCI_BUS_FLAGS_NO_MMRBC = 2, + PCI_BUS_FLAGS_NO_AERSID = 4, + PCI_BUS_FLAGS_NO_EXTCFG = 8, +}; + +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0, + PCIE_LNK_X1 = 1, + PCIE_LNK_X2 = 2, + PCIE_LNK_X4 = 4, + PCIE_LNK_X8 = 8, + PCIE_LNK_X12 = 12, + PCIE_LNK_X16 = 16, + PCIE_LNK_X32 = 32, + PCIE_LNK_WIDTH_UNKNOWN = 255, +}; + +enum pci_bus_speed { + PCI_SPEED_33MHz = 0, + PCI_SPEED_66MHz = 1, + PCI_SPEED_66MHz_PCIX = 2, + PCI_SPEED_100MHz_PCIX = 3, + PCI_SPEED_133MHz_PCIX = 4, + PCI_SPEED_66MHz_PCIX_ECC = 5, + PCI_SPEED_100MHz_PCIX_ECC = 6, + PCI_SPEED_133MHz_PCIX_ECC = 7, + PCI_SPEED_66MHz_PCIX_266 = 9, + PCI_SPEED_100MHz_PCIX_266 = 10, + PCI_SPEED_133MHz_PCIX_266 = 11, + AGP_UNKNOWN = 12, + AGP_1X = 13, + AGP_2X = 14, + AGP_4X = 15, + AGP_8X = 16, + PCI_SPEED_66MHz_PCIX_533 = 17, + PCI_SPEED_100MHz_PCIX_533 = 18, + PCI_SPEED_133MHz_PCIX_533 = 19, + PCIE_SPEED_2_5GT = 20, + PCIE_SPEED_5_0GT = 21, + PCIE_SPEED_8_0GT = 22, + PCIE_SPEED_16_0GT = 23, + PCIE_SPEED_32_0GT = 24, + PCIE_SPEED_64_0GT = 25, + PCI_SPEED_UNKNOWN = 255, +}; + +struct pipe_buffer; + +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait; + wait_queue_head_t wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + bool poll_usage; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; +}; + +struct pipe_buf_operations; + +struct pipe_buffer { + struct page *page; + unsigned int offset; + unsigned int len; + const struct pipe_buf_operations *ops; + unsigned int flags; + long unsigned int private; +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + union { + void *userptr; + struct file *file; + void *data; + } u; + void (*splice_eof)(struct splice_desc *); + long: 32; + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; + long: 32; +}; + +typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); + +struct memdev { + const char *name; + const struct file_operations *fops; + fmode_t fmode; + umode_t mode; +}; + +struct iosys_map { + union { + void *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +struct dma_fence_ops; + +struct dma_fence { + spinlock_t *lock; + const struct dma_fence_ops *ops; + union { + struct list_head cb_list; + ktime_t timestamp; + struct callback_head rcu; + }; + u64 context; + u64 seqno; + long unsigned int flags; + struct kref refcount; + int error; + long: 32; +}; + +struct dma_fence_ops { + bool use_64bit_seqno; + const char * (*get_driver_name)(struct dma_fence *); + const char * (*get_timeline_name)(struct dma_fence *); + bool (*enable_signaling)(struct dma_fence *); + bool (*signaled)(struct dma_fence *); + long int (*wait)(struct dma_fence *, bool, long int); + void (*release)(struct dma_fence *); + void (*fence_value_str)(struct dma_fence *, char *, int); + void (*timeline_value_str)(struct dma_fence *, char *, int); + void (*set_deadline)(struct dma_fence *, ktime_t); +}; + +struct dma_fence_cb; + +typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); + +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +struct dma_buf_ops { + bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + int (*pin)(struct dma_buf_attachment *); + void (*unpin)(struct dma_buf_attachment *); + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); + void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*mmap)(struct dma_buf *, struct vm_area_struct *); + int (*vmap)(struct dma_buf *, struct iosys_map *); + void (*vunmap)(struct dma_buf *, struct iosys_map *); +}; + +struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + __poll_t active; +}; + +struct dma_resv; + +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + unsigned int vmapping_counter; + struct iosys_map vmap_ptr; + const char *exp_name; + const char *name; + spinlock_t name_lock; + struct module *owner; + struct list_head list_node; + void *priv; + struct dma_resv *resv; + wait_queue_head_t poll; + struct dma_buf_poll_cb_t cb_in; + struct dma_buf_poll_cb_t cb_out; +}; + +struct dma_buf_attach_ops; + +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; + bool peer2peer; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; + void *priv; +}; + +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +}; + +struct dma_resv_list; + +struct dma_resv { + struct ww_mutex lock; + struct dma_resv_list *fences; +}; + +struct dma_buf_attach_ops { + bool allow_peer2peer; + void (*move_notify)(struct dma_buf_attachment *); +}; + +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct dma_resv *resv; + void *priv; +}; + +struct dma_fence_unwrap { + struct dma_fence *chain; + struct dma_fence *array; + unsigned int index; +}; + +struct sync_file { + struct file *file; + char user_name[32]; + struct list_head sync_file_list; + wait_queue_head_t wq; + long unsigned int flags; + struct dma_fence *fence; + struct dma_fence_cb cb; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + long unsigned int stamp; + unsigned int acquired; + short unsigned int wounded; + short unsigned int is_wait_die; +}; + +enum dma_resv_usage { + DMA_RESV_USAGE_KERNEL = 0, + DMA_RESV_USAGE_WRITE = 1, + DMA_RESV_USAGE_READ = 2, + DMA_RESV_USAGE_BOOKKEEP = 3, +}; + +struct dma_resv_iter { + struct dma_resv *obj; + enum dma_resv_usage usage; + struct dma_fence *fence; + enum dma_resv_usage fence_usage; + unsigned int index; + struct dma_resv_list *fences; + unsigned int num_fences; + bool is_restarted; +}; + +struct dma_buf_sync { + __u64 flags; +}; + +struct dma_buf_export_sync_file { + __u32 flags; + __s32 fd; +}; + +struct dma_buf_import_sync_file { + __u32 flags; + __s32 fd; +}; + +enum ata_lpm_hints { + ATA_LPM_EMPTY = 1, + ATA_LPM_HIPM = 2, + ATA_LPM_WAKE_ONLY = 4, +}; + +enum ata_link_iter_mode { + ATA_LITER_EDGE = 0, + ATA_LITER_HOST_FIRST = 1, + ATA_LITER_PMP_FIRST = 2, +}; + +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +enum { + AHCI_MAX_PORTS = 32, + AHCI_MAX_SG = 168, + AHCI_DMA_BOUNDARY = 4294967295, + AHCI_MAX_CMDS = 32, + AHCI_CMD_SZ = 32, + AHCI_CMD_SLOT_SZ = 1024, + AHCI_RX_FIS_SZ = 256, + AHCI_CMD_TBL_CDB = 64, + AHCI_CMD_TBL_HDR_SZ = 128, + AHCI_CMD_TBL_SZ = 2816, + AHCI_CMD_TBL_AR_SZ = 90112, + AHCI_PORT_PRIV_DMA_SZ = 91392, + AHCI_PORT_PRIV_FBS_DMA_SZ = 95232, + AHCI_IRQ_ON_SG = 2147483648, + AHCI_CMD_ATAPI = 32, + AHCI_CMD_WRITE = 64, + AHCI_CMD_PREFETCH = 128, + AHCI_CMD_RESET = 256, + AHCI_CMD_CLR_BUSY = 1024, + RX_FIS_PIO_SETUP = 32, + RX_FIS_D2H_REG = 64, + RX_FIS_SDB = 88, + RX_FIS_UNK = 96, + HOST_CAP = 0, + HOST_CTL = 4, + HOST_IRQ_STAT = 8, + HOST_PORTS_IMPL = 12, + HOST_VERSION = 16, + HOST_EM_LOC = 28, + HOST_EM_CTL = 32, + HOST_CAP2 = 36, + HOST_RESET = 1, + HOST_IRQ_EN = 2, + HOST_MRSM = 4, + HOST_AHCI_EN = 2147483648, + HOST_CAP_SXS = 32, + HOST_CAP_EMS = 64, + HOST_CAP_CCC = 128, + HOST_CAP_PART = 8192, + HOST_CAP_SSC = 16384, + HOST_CAP_PIO_MULTI = 32768, + HOST_CAP_FBS = 65536, + HOST_CAP_PMP = 131072, + HOST_CAP_ONLY = 262144, + HOST_CAP_CLO = 16777216, + HOST_CAP_LED = 33554432, + HOST_CAP_ALPM = 67108864, + HOST_CAP_SSS = 134217728, + HOST_CAP_MPS = 268435456, + HOST_CAP_SNTF = 536870912, + HOST_CAP_NCQ = 1073741824, + HOST_CAP_64 = 2147483648, + HOST_CAP2_BOH = 1, + HOST_CAP2_NVMHCI = 2, + HOST_CAP2_APST = 4, + HOST_CAP2_SDS = 8, + HOST_CAP2_SADM = 16, + HOST_CAP2_DESO = 32, + PORT_LST_ADDR = 0, + PORT_LST_ADDR_HI = 4, + PORT_FIS_ADDR = 8, + PORT_FIS_ADDR_HI = 12, + PORT_IRQ_STAT = 16, + PORT_IRQ_MASK = 20, + PORT_CMD = 24, + PORT_TFDATA = 32, + PORT_SIG = 36, + PORT_CMD_ISSUE = 56, + PORT_SCR_STAT = 40, + PORT_SCR_CTL = 44, + PORT_SCR_ERR = 48, + PORT_SCR_ACT = 52, + PORT_SCR_NTF = 60, + PORT_FBS = 64, + PORT_DEVSLP = 68, + PORT_IRQ_COLD_PRES = 2147483648, + PORT_IRQ_TF_ERR = 1073741824, + PORT_IRQ_HBUS_ERR = 536870912, + PORT_IRQ_HBUS_DATA_ERR = 268435456, + PORT_IRQ_IF_ERR = 134217728, + PORT_IRQ_IF_NONFATAL = 67108864, + PORT_IRQ_OVERFLOW = 16777216, + PORT_IRQ_BAD_PMP = 8388608, + PORT_IRQ_PHYRDY = 4194304, + PORT_IRQ_DMPS = 128, + PORT_IRQ_CONNECT = 64, + PORT_IRQ_SG_DONE = 32, + PORT_IRQ_UNK_FIS = 16, + PORT_IRQ_SDB_FIS = 8, + PORT_IRQ_DMAS_FIS = 4, + PORT_IRQ_PIOS_FIS = 2, + PORT_IRQ_D2H_REG_FIS = 1, + PORT_IRQ_FREEZE = 683671632, + PORT_IRQ_ERROR = 2025848912, + DEF_PORT_IRQ = 2025848959, + PORT_CMD_ASP = 134217728, + PORT_CMD_ALPE = 67108864, + PORT_CMD_ATAPI = 16777216, + PORT_CMD_FBSCP = 4194304, + PORT_CMD_ESP = 2097152, + PORT_CMD_CPD = 1048576, + PORT_CMD_MPSP = 524288, + PORT_CMD_HPCP = 262144, + PORT_CMD_PMP = 131072, + PORT_CMD_LIST_ON = 32768, + PORT_CMD_FIS_ON = 16384, + PORT_CMD_FIS_RX = 16, + PORT_CMD_CLO = 8, + PORT_CMD_POWER_ON = 4, + PORT_CMD_SPIN_UP = 2, + PORT_CMD_START = 1, + PORT_CMD_ICC_MASK = 4026531840, + PORT_CMD_ICC_ACTIVE = 268435456, + PORT_CMD_ICC_PARTIAL = 536870912, + PORT_CMD_ICC_SLUMBER = 1610612736, + PORT_CMD_CAP = 8126464, + PORT_FBS_DWE_OFFSET = 16, + PORT_FBS_ADO_OFFSET = 12, + PORT_FBS_DEV_OFFSET = 8, + PORT_FBS_DEV_MASK = 3840, + PORT_FBS_SDE = 4, + PORT_FBS_DEC = 2, + PORT_FBS_EN = 1, + PORT_DEVSLP_DM_OFFSET = 25, + PORT_DEVSLP_DM_MASK = 503316480, + PORT_DEVSLP_DITO_OFFSET = 15, + PORT_DEVSLP_MDAT_OFFSET = 10, + PORT_DEVSLP_DETO_OFFSET = 2, + PORT_DEVSLP_DSP = 2, + PORT_DEVSLP_ADSE = 1, + AHCI_HFLAG_NO_NCQ = 1, + AHCI_HFLAG_IGN_IRQ_IF_ERR = 2, + AHCI_HFLAG_IGN_SERR_INTERNAL = 4, + AHCI_HFLAG_32BIT_ONLY = 8, + AHCI_HFLAG_MV_PATA = 16, + AHCI_HFLAG_NO_MSI = 32, + AHCI_HFLAG_NO_PMP = 64, + AHCI_HFLAG_SECT255 = 256, + AHCI_HFLAG_YES_NCQ = 512, + AHCI_HFLAG_NO_SUSPEND = 1024, + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = 2048, + AHCI_HFLAG_NO_SNTF = 4096, + AHCI_HFLAG_NO_FPDMA_AA = 8192, + AHCI_HFLAG_YES_FBS = 16384, + AHCI_HFLAG_DELAY_ENGINE = 32768, + AHCI_HFLAG_NO_DEVSLP = 131072, + AHCI_HFLAG_NO_FBS = 262144, + AHCI_HFLAG_MULTI_MSI = 1048576, + AHCI_HFLAG_WAKE_BEFORE_STOP = 4194304, + AHCI_HFLAG_YES_ALPM = 8388608, + AHCI_HFLAG_NO_WRITE_TO_RO = 16777216, + AHCI_HFLAG_SUSPEND_PHYS = 33554432, + AHCI_HFLAG_NO_SXS = 67108864, + AHCI_HFLAG_43BIT_ONLY = 134217728, + AHCI_HFLAG_INTEL_PCS_QUIRK = 268435456, + AHCI_FLAG_COMMON = 393346, + ICH_MAP = 144, + PCS_6 = 146, + PCS_7 = 148, + EM_MAX_SLOTS = 15, + EM_MAX_RETRY = 5, + EM_CTL_RST = 512, + EM_CTL_TM = 256, + EM_CTL_MR = 1, + EM_CTL_ALHD = 67108864, + EM_CTL_XMT = 33554432, + EM_CTL_SMB = 16777216, + EM_CTL_SGPIO = 524288, + EM_CTL_SES = 262144, + EM_CTL_SAFTE = 131072, + EM_CTL_LED = 65536, + EM_MSG_TYPE_LED = 1, + EM_MSG_TYPE_SAFTE = 2, + EM_MSG_TYPE_SES2 = 4, + EM_MSG_TYPE_SGPIO = 8, +}; + +struct ahci_cmd_hdr { + __le32 opts; + __le32 status; + __le32 tbl_addr; + __le32 tbl_addr_hi; + __le32 reserved[4]; +}; + +struct ahci_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 flags_size; +}; + +struct ahci_em_priv { + enum sw_activity blink_policy; + struct timer_list timer; + long unsigned int saved_activity; + long unsigned int activity; + long unsigned int led_state; + struct ata_link *link; +}; + +struct ahci_port_priv { + struct ata_link *active_link; + struct ahci_cmd_hdr *cmd_slot; + dma_addr_t cmd_slot_dma; + void *cmd_tbl; + dma_addr_t cmd_tbl_dma; + void *rx_fis; + dma_addr_t rx_fis_dma; + unsigned int ncq_saw_d2h: 1; + unsigned int ncq_saw_dmas: 1; + unsigned int ncq_saw_sdb: 1; + spinlock_t lock; + u32 intr_mask; + bool fbs_supported; + bool fbs_enabled; + int fbs_last_dev; + struct ahci_em_priv em_priv[15]; + char *irq_desc; +}; + +struct ahci_host_priv { + unsigned int flags; + u32 mask_port_map; + void *mmio; + u32 cap; + u32 cap2; + u32 version; + u32 port_map; + u32 saved_cap; + u32 saved_cap2; + u32 saved_port_map; + u32 saved_port_cap[32]; + u32 em_loc; + u32 em_buf_sz; + u32 em_msg_type; + u32 remapped_nvme; + bool got_runtime_pm; + unsigned int n_clks; + struct clk_bulk_data *clks; + unsigned int f_rsts; + struct reset_control *rsts; + struct regulator **target_pwrs; + struct regulator *ahci_regulator; + struct regulator *phy_regulator; + struct phy **phys; + unsigned int nports; + void *plat_data; + unsigned int irq; + void (*start_engine)(struct ata_port *); + int (*stop_engine)(struct ata_port *); + irqreturn_t (*irq_handler)(int, void *); + int (*get_irq_vector)(struct ata_host *, int); +}; + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = 1, + ETH_TEST_FL_FAILED = 2, + ETH_TEST_FL_EXTERNAL_LB = 4, + ETH_TEST_FL_EXTERNAL_LB_DONE = 8, +}; + +struct e1000_hw_stats___2 { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_info___2 { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + } read; + struct { + struct { + struct { + __le16 pkt_info; + __le16 hdr_info; + } lo_dword; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +struct i2c_algo_bit_data { + void *data; + void (*setsda)(void *, int); + void (*setscl)(void *, int); + int (*getsda)(void *); + int (*getscl)(void *); + int (*pre_xfer)(struct i2c_adapter *); + void (*post_xfer)(struct i2c_adapter *); + int udelay; + int timeout; + bool can_do_atomic; +}; + +struct vf_data_storage { + unsigned char vf_mac_addresses[6]; + u16 vf_mc_hashes[30]; + u16 num_vf_mc_hashes; + u32 flags; + long unsigned int last_nack; + u16 pf_vlan; + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[6]; +}; + +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP = 1, +}; + +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + long unsigned int time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; + __u16 page_offset; + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring; + +struct igb_ring_container { + struct igb_ring *ring; + unsigned int total_bytes; + unsigned int total_packets; + u16 work_limit; + u8 count; + u8 itr; +}; + +struct igb_q_vector; + +struct igb_ring { + struct igb_q_vector *q_vector; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct device *dev; + union { + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; + long unsigned int flags; + void *tail; + dma_addr_t dma; + unsigned int size; + u16 count; + u8 queue_index; + u8 reg_idx; + bool launchtime_enable; + bool cbs_enable; + s32 idleslope; + s32 sendslope; + s32 hicredit; + s32 locredit; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + union { + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + struct { + struct sk_buff *skb; + long: 32; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + long: 32; + }; + }; + struct xdp_rxq_info xdp_rxq; +}; + +struct igb_adapter; + +struct igb_q_vector { + struct igb_adapter *adapter; + int cpu; + u32 eims_value; + u16 itr_val; + u8 set_itr; + void *itr_register; + struct igb_ring_container rx; + struct igb_ring_container tx; + long: 32; + struct napi_struct napi; + struct callback_head rcu; + char name[25]; + long: 32; + long: 32; + long: 32; + struct igb_ring ring[0]; +}; + +struct hwmon_buff; + +struct igb_mac_addr; + +struct igb_adapter { + long unsigned int active_vlans[128]; + struct net_device *netdev; + struct bpf_prog *xdp_prog; + long unsigned int state; + unsigned int flags; + unsigned int num_q_vectors; + struct msix_entry msix_entries[10]; + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + int num_rx_queues; + struct igb_ring *rx_ring[16]; + u32 max_frame_size; + u32 min_frame_size; + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + u8 *io_addr; + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + long unsigned int led_status; + struct pci_dev *pdev; + spinlock_t stats64_lock; + long: 32; + struct rtnl_link_stats64 stats64; + struct e1000_hw hw; + struct e1000_hw_stats___2 stats; + struct e1000_phy_info phy_info; + u32 test_icr; + long: 32; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + int msg_enable; + struct igb_q_vector *q_vector[8]; + u32 eims_enable_mask; + u32 eims_other; + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + long unsigned int ptp_tx_start; + long unsigned int last_rx_ptp_check; + long unsigned int last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + long: 32; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + struct ptp_pin_desc sdp_config[4]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[2]; + char fw_version[48]; + struct hwmon_buff *igb_hwmon_buff; + bool ets; + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[128]; + long unsigned int link_check_timeout; + int copper_tries; + struct e1000_info___2 ei; + u16 eee_advert; + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + spinlock_t nfc_lock; + bool etype_bitmap[3]; + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; + spinlock_t vfs_lock; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[13]; + struct hwmon_attr hwmon_list[12]; + unsigned int n_hwmon; +}; + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 1, + IGB_FILTER_FLAG_VLAN_TCI = 2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 8, +}; + +struct igb_nfc_input { + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[6]; + u8 dst_addr[6]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + long unsigned int cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +struct igb_mac_addr { + u8 addr[6]; + u8 queue; + u8 state; +}; + +enum e1000_state_t { + __IGB_TESTING = 0, + __IGB_RESETTING = 1, + __IGB_DOWN = 2, + __IGB_PTP_TX_IN_PROGRESS = 3, +}; + +struct igb_stats { + char stat_string[32]; + int sizeof_stat; + int stat_offset; +}; + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP = 1, + TEST_IRQ = 2, + TEST_LOOP = 3, + TEST_LINK = 4, +}; + +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +enum sam_status { + SAM_STAT_GOOD = 0, + SAM_STAT_CHECK_CONDITION = 2, + SAM_STAT_CONDITION_MET = 4, + SAM_STAT_BUSY = 8, + SAM_STAT_INTERMEDIATE = 16, + SAM_STAT_INTERMEDIATE_CONDITION_MET = 20, + SAM_STAT_RESERVATION_CONFLICT = 24, + SAM_STAT_COMMAND_TERMINATED = 34, + SAM_STAT_TASK_SET_FULL = 40, + SAM_STAT_ACA_ACTIVE = 48, + SAM_STAT_TASK_ABORTED = 64, +}; + +enum { + US_FL_SINGLE_LUN = 1, + US_FL_NEED_OVERRIDE = 2, + US_FL_SCM_MULT_TARG = 4, + US_FL_FIX_INQUIRY = 8, + US_FL_FIX_CAPACITY = 16, + US_FL_IGNORE_RESIDUE = 32, + US_FL_BULK32 = 64, + US_FL_NOT_LOCKABLE = 128, + US_FL_GO_SLOW = 256, + US_FL_NO_WP_DETECT = 512, + US_FL_MAX_SECTORS_64 = 1024, + US_FL_IGNORE_DEVICE = 2048, + US_FL_CAPACITY_HEURISTICS = 4096, + US_FL_MAX_SECTORS_MIN = 8192, + US_FL_BULK_IGNORE_TAG = 16384, + US_FL_SANE_SENSE = 32768, + US_FL_CAPACITY_OK = 65536, + US_FL_BAD_SENSE = 131072, + US_FL_NO_READ_DISC_INFO = 262144, + US_FL_NO_READ_CAPACITY_16 = 524288, + US_FL_INITIAL_READ10 = 1048576, + US_FL_WRITE_CACHE = 2097152, + US_FL_NEEDS_CAP16 = 4194304, + US_FL_IGNORE_UAS = 8388608, + US_FL_BROKEN_FUA = 16777216, + US_FL_NO_ATA_1X = 33554432, + US_FL_NO_REPORT_OPCODES = 67108864, + US_FL_MAX_SECTORS_240 = 134217728, + US_FL_NO_REPORT_LUNS = 268435456, + US_FL_ALWAYS_SYNC = 536870912, + US_FL_NO_SAME = 1073741824, + US_FL_SENSE_AFTER_SYNC = 2147483648, +}; + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING = 1, + POWER_SUPPLY_STATUS_DISCHARGING = 2, + POWER_SUPPLY_STATUS_NOT_CHARGING = 3, + POWER_SUPPLY_STATUS_FULL = 4, +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE = 1, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, + POWER_SUPPLY_CHARGE_TYPE_FAST = 3, + POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, + POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, + POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, + POWER_SUPPLY_CHARGE_TYPE_BYPASS = 8, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD = 1, + POWER_SUPPLY_HEALTH_OVERHEAT = 2, + POWER_SUPPLY_HEALTH_DEAD = 3, + POWER_SUPPLY_HEALTH_OVERVOLTAGE = 4, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE = 5, + POWER_SUPPLY_HEALTH_COLD = 6, + POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE = 7, + POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE = 8, + POWER_SUPPLY_HEALTH_OVERCURRENT = 9, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED = 10, + POWER_SUPPLY_HEALTH_WARM = 11, + POWER_SUPPLY_HEALTH_COOL = 12, + POWER_SUPPLY_HEALTH_HOT = 13, + POWER_SUPPLY_HEALTH_NO_BATTERY = 14, +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH = 1, + POWER_SUPPLY_TECHNOLOGY_LION = 2, + POWER_SUPPLY_TECHNOLOGY_LIPO = 3, + POWER_SUPPLY_TECHNOLOGY_LiFe = 4, + POWER_SUPPLY_TECHNOLOGY_NiCd = 5, + POWER_SUPPLY_TECHNOLOGY_LiMn = 6, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, + POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, + POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM = 1, + POWER_SUPPLY_SCOPE_DEVICE = 2, +}; + +enum power_supply_property { + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE = 1, + POWER_SUPPLY_PROP_HEALTH = 2, + POWER_SUPPLY_PROP_PRESENT = 3, + POWER_SUPPLY_PROP_ONLINE = 4, + POWER_SUPPLY_PROP_AUTHENTIC = 5, + POWER_SUPPLY_PROP_TECHNOLOGY = 6, + POWER_SUPPLY_PROP_CYCLE_COUNT = 7, + POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, + POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, + POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, + POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, + POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, + POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, + POWER_SUPPLY_PROP_CURRENT_MAX = 16, + POWER_SUPPLY_PROP_CURRENT_NOW = 17, + POWER_SUPPLY_PROP_CURRENT_AVG = 18, + POWER_SUPPLY_PROP_CURRENT_BOOT = 19, + POWER_SUPPLY_PROP_POWER_NOW = 20, + POWER_SUPPLY_PROP_POWER_AVG = 21, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, + POWER_SUPPLY_PROP_CHARGE_FULL = 24, + POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, + POWER_SUPPLY_PROP_CHARGE_NOW = 26, + POWER_SUPPLY_PROP_CHARGE_AVG = 27, + POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, + POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, + POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR = 37, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 38, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 39, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 40, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 41, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 42, + POWER_SUPPLY_PROP_ENERGY_FULL = 43, + POWER_SUPPLY_PROP_ENERGY_EMPTY = 44, + POWER_SUPPLY_PROP_ENERGY_NOW = 45, + POWER_SUPPLY_PROP_ENERGY_AVG = 46, + POWER_SUPPLY_PROP_CAPACITY = 47, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 48, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 49, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 50, + POWER_SUPPLY_PROP_CAPACITY_LEVEL = 51, + POWER_SUPPLY_PROP_TEMP = 52, + POWER_SUPPLY_PROP_TEMP_MAX = 53, + POWER_SUPPLY_PROP_TEMP_MIN = 54, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 55, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 56, + POWER_SUPPLY_PROP_TEMP_AMBIENT = 57, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 58, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 59, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 60, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 61, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 62, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 63, + POWER_SUPPLY_PROP_TYPE = 64, + POWER_SUPPLY_PROP_USB_TYPE = 65, + POWER_SUPPLY_PROP_SCOPE = 66, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 67, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 68, + POWER_SUPPLY_PROP_CALIBRATE = 69, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 70, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 71, + POWER_SUPPLY_PROP_MANUFACTURE_DAY = 72, + POWER_SUPPLY_PROP_MODEL_NAME = 73, + POWER_SUPPLY_PROP_MANUFACTURER = 74, + POWER_SUPPLY_PROP_SERIAL_NUMBER = 75, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY = 1, + POWER_SUPPLY_TYPE_UPS = 2, + POWER_SUPPLY_TYPE_MAINS = 3, + POWER_SUPPLY_TYPE_USB = 4, + POWER_SUPPLY_TYPE_USB_DCP = 5, + POWER_SUPPLY_TYPE_USB_CDP = 6, + POWER_SUPPLY_TYPE_USB_ACA = 7, + POWER_SUPPLY_TYPE_USB_TYPE_C = 8, + POWER_SUPPLY_TYPE_USB_PD = 9, + POWER_SUPPLY_TYPE_USB_PD_DRP = 10, + POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, + POWER_SUPPLY_TYPE_WIRELESS = 12, +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP = 1, + POWER_SUPPLY_USB_TYPE_DCP = 2, + POWER_SUPPLY_USB_TYPE_CDP = 3, + POWER_SUPPLY_USB_TYPE_ACA = 4, + POWER_SUPPLY_USB_TYPE_C = 5, + POWER_SUPPLY_USB_TYPE_PD = 6, + POWER_SUPPLY_USB_TYPE_PD_DRP = 7, + POWER_SUPPLY_USB_TYPE_PD_PPS = 8, + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, +}; + +enum power_supply_charge_behaviour { + POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0, + POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE = 1, + POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE = 2, +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply; + +struct power_supply_desc { + const char *name; + enum power_supply_type type; + u8 charge_behaviours; + u32 usb_types; + const enum power_supply_property *properties; + size_t num_properties; + int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); + int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); + int (*property_is_writeable)(struct power_supply *, enum power_supply_property); + void (*external_power_changed)(struct power_supply *); + void (*set_charged)(struct power_supply *); + bool no_thermal; + int use_for_apm; +}; + +struct power_supply_battery_info; + +struct power_supply { + const struct power_supply_desc *desc; + char **supplied_to; + size_t num_supplicants; + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + void *drv_data; + long: 32; + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; + struct power_supply_battery_info *battery_info; + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; + long: 32; +}; + +struct power_supply_maintenance_charge_table; + +struct power_supply_battery_ocv_table; + +struct power_supply_resistance_temp_table; + +struct power_supply_vbat_ri_table; + +struct power_supply_battery_info { + unsigned int technology; + int energy_full_design_uwh; + int charge_full_design_uah; + int voltage_min_design_uv; + int voltage_max_design_uv; + int tricklecharge_current_ua; + int precharge_current_ua; + int precharge_voltage_max_uv; + int charge_term_current_ua; + int charge_restart_voltage_uv; + int overvoltage_limit_uv; + int constant_charge_current_max_ua; + int constant_charge_voltage_max_uv; + const struct power_supply_maintenance_charge_table *maintenance_charge; + int maintenance_charge_size; + int alert_low_temp_charge_current_ua; + int alert_low_temp_charge_voltage_uv; + int alert_high_temp_charge_current_ua; + int alert_high_temp_charge_voltage_uv; + int factory_internal_resistance_uohm; + int factory_internal_resistance_charging_uohm; + int ocv_temp[20]; + int temp_ambient_alert_min; + int temp_ambient_alert_max; + int temp_alert_min; + int temp_alert_max; + int temp_min; + int temp_max; + const struct power_supply_battery_ocv_table *ocv_table[20]; + int ocv_table_size[20]; + const struct power_supply_resistance_temp_table *resist_table; + int resist_table_size; + const struct power_supply_vbat_ri_table *vbat2ri_discharging; + int vbat2ri_discharging_size; + const struct power_supply_vbat_ri_table *vbat2ri_charging; + int vbat2ri_charging_size; + int bti_resistance_ohm; + int bti_resistance_tolerance; +}; + +struct power_supply_battery_ocv_table { + int ocv; + int capacity; +}; + +struct power_supply_resistance_temp_table { + int temp; + int resistance; +}; + +struct power_supply_vbat_ri_table { + int vbat_uv; + int ri_uohm; +}; + +struct power_supply_maintenance_charge_table { + int charge_current_max_ua; + int charge_voltage_max_uv; + int charge_safety_timer_minutes; +}; + +struct power_supply_attr { + const char *prop_name; + char attr_name[31]; + struct device_attribute dev_attr; + const char * const *text_values; + int text_values_len; +}; + +struct usage_priority { + __u32 usage; + bool global; + unsigned int slot_overwrite; +}; + +typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int); + +struct ifconf { + int ifc_len; + union { + char *ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; + +typedef u32 compat_ulong_t; + +typedef u32 compat_caddr_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +struct devlink; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET = 0, + DEVLINK_PORT_TYPE_AUTO = 1, + DEVLINK_PORT_TYPE_ETH = 2, + DEVLINK_PORT_TYPE_IB = 3, +}; + +struct ib_device; + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, + DEVLINK_PORT_FLAVOUR_CPU = 1, + DEVLINK_PORT_FLAVOUR_DSA = 2, + DEVLINK_PORT_FLAVOUR_PCI_PF = 3, + DEVLINK_PORT_FLAVOUR_PCI_VF = 4, + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, + DEVLINK_PORT_FLAVOUR_UNUSED = 6, + DEVLINK_PORT_FLAVOUR_PCI_SF = 7, +}; + +struct devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u32 controller; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_pci_vf_attrs { + u32 controller; + u16 pf; + u16 vf; + u8 external: 1; +}; + +struct devlink_port_pci_sf_attrs { + u32 controller; + u32 sf; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_attrs { + u8 split: 1; + u8 splittable: 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + struct devlink_port_pci_sf_attrs pci_sf; + }; +}; + +struct devlink_linecard; + +struct devlink_port_ops; + +struct devlink_rate; + +struct devlink_port { + struct list_head list; + struct list_head region_list; + struct devlink *devlink; + const struct devlink_port_ops *ops; + unsigned int index; + spinlock_t type_lock; + enum devlink_port_type type; + enum devlink_port_type desired_type; + union { + struct { + struct net_device *netdev; + int ifindex; + char ifname[16]; + } type_eth; + struct { + struct ib_device *ibdev; + } type_ib; + }; + struct devlink_port_attrs attrs; + u8 attrs_set: 1; + u8 switch_port: 1; + u8 registered: 1; + u8 initialized: 1; + struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct devlink_rate *devlink_rate; + struct devlink_linecard *linecard; + u32 rel_index; +}; + +enum hwtstamp_flags { + HWTSTAMP_FLAG_BONDED_PHC_INDEX = 1, + HWTSTAMP_FLAG_LAST = 1, + HWTSTAMP_FLAG_MASK = 1, +}; + +struct phylib_stubs { + int (*hwtstamp_get)(struct phy_device *, struct kernel_hwtstamp_config *); + int (*hwtstamp_set)(struct phy_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); +}; + +enum devlink_rate_type { + DEVLINK_RATE_TYPE_LEAF = 0, + DEVLINK_RATE_TYPE_NODE = 1, +}; + +enum devlink_port_fn_state { + DEVLINK_PORT_FN_STATE_INACTIVE = 0, + DEVLINK_PORT_FN_STATE_ACTIVE = 1, +}; + +enum devlink_port_fn_opstate { + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, +}; + +struct devlink_rate { + struct list_head list; + enum devlink_rate_type type; + struct devlink *devlink; + void *priv; + long: 32; + u64 tx_share; + u64 tx_max; + struct devlink_rate *parent; + union { + struct devlink_port *devlink_port; + struct { + char *name; + refcount_t refcnt; + }; + }; + u32 tx_priority; + u32 tx_weight; + long: 32; +}; + +struct devlink_port_ops { + int (*port_split)(struct devlink *, struct devlink_port *, unsigned int, struct netlink_ext_ack *); + int (*port_unsplit)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_type_set)(struct devlink_port *, enum devlink_port_type); + int (*port_del)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_get)(struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); + int (*port_fn_hw_addr_set)(struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); + int (*port_fn_roce_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_roce_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_migratable_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_migratable_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_state_get)(struct devlink_port *, enum devlink_port_fn_state *, enum devlink_port_fn_opstate *, struct netlink_ext_ack *); + int (*port_fn_state_set)(struct devlink_port *, enum devlink_port_fn_state, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_crypto_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); + int (*port_fn_ipsec_packet_set)(struct devlink_port *, bool, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_get)(struct devlink_port *, u32 *, struct netlink_ext_ack *); + int (*port_fn_max_io_eqs_set)(struct devlink_port *, u32, struct netlink_ext_ack *); +}; + +struct napi_gro_cb { + union { + struct { + void *frag0; + unsigned int frag0_len; + }; + struct { + struct sk_buff *last; + long unsigned int age; + }; + }; + int data_offset; + u16 flush; + u16 count; + u16 proto; + u16 pad; + union { + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + }; + struct { + u16 gro_remcsum_start; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 ip_fixedid: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + } zeroed; + }; + __wsum csum; + union { + struct { + u16 network_offset; + u16 inner_network_offset; + }; + u16 network_offsets[2]; + }; +}; + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE = 1, + __ETHTOOL_A_TUNNEL_UDP_CNT = 2, + ETHTOOL_A_TUNNEL_UDP_MAX = 1, +}; + +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN = 1, + UDP_TUNNEL_TYPE_GENEVE = 2, + UDP_TUNNEL_TYPE_VXLAN_GPE = 4, +}; + +struct udp_tunnel_info { + short unsigned int type; + sa_family_t sa_family; + __be16 port; + u8 hw_priv; +}; + +enum udp_tunnel_nic_info_flags { + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, +}; + +struct udp_tunnel_nic_shared { + struct udp_tunnel_nic *udp_tunnel_nic_info; + struct list_head devices; +}; + +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); + void (*add_port)(struct net_device *, struct udp_tunnel_info *); + void (*del_port)(struct net_device *, struct udp_tunnel_info *); + void (*reset_ntf)(struct net_device *); + size_t (*dump_size)(struct net_device *, unsigned int); + int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); +}; + +typedef const char (* const ethnl_string_array_t)[32]; + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + long unsigned int ifindex; +}; + +struct nf_ct_tcp_flags { + __u8 flags; + __u8 mask; +}; + +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +enum { + TCP_FLAG_CWR = 8388608, + TCP_FLAG_ECE = 4194304, + TCP_FLAG_URG = 2097152, + TCP_FLAG_ACK = 1048576, + TCP_FLAG_PSH = 524288, + TCP_FLAG_RST = 262144, + TCP_FLAG_SYN = 131072, + TCP_FLAG_FIN = 65536, + TCP_RESERVED_BITS = 251658240, + TCP_DATA_OFFSET = 4026531840, +}; + +struct nf_conn_synproxy { + u32 isn; + u32 its; + u32 tsoff; +}; + +enum nf_ct_tcp_action { + NFCT_TCP_IGNORE = 0, + NFCT_TCP_INVALID = 1, + NFCT_TCP_ACCEPT = 2, +}; + +enum tcp_bit_set { + TCP_SYN_SET = 0, + TCP_SYNACK_SET = 1, + TCP_FIN_SET = 2, + TCP_ACK_SET = 3, + TCP_RST_SET = 4, + TCP_NONE_SET = 5, +}; + +enum ctattr_protoinfo { + CTA_PROTOINFO_UNSPEC = 0, + CTA_PROTOINFO_TCP = 1, + CTA_PROTOINFO_DCCP = 2, + CTA_PROTOINFO_SCTP = 3, + __CTA_PROTOINFO_MAX = 4, +}; + +enum ctattr_protoinfo_tcp { + CTA_PROTOINFO_TCP_UNSPEC = 0, + CTA_PROTOINFO_TCP_STATE = 1, + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2, + CTA_PROTOINFO_TCP_WSCALE_REPLY = 3, + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4, + CTA_PROTOINFO_TCP_FLAGS_REPLY = 5, + __CTA_PROTOINFO_TCP_MAX = 6, +}; + +enum nft_dynset_ops { + NFT_DYNSET_OP_ADD = 0, + NFT_DYNSET_OP_UPDATE = 1, + NFT_DYNSET_OP_DELETE = 2, +}; + +enum nft_dynset_flags { + NFT_DYNSET_F_INV = 1, + NFT_DYNSET_F_EXPR = 2, +}; + +enum nft_dynset_attributes { + NFTA_DYNSET_UNSPEC = 0, + NFTA_DYNSET_SET_NAME = 1, + NFTA_DYNSET_SET_ID = 2, + NFTA_DYNSET_OP = 3, + NFTA_DYNSET_SREG_KEY = 4, + NFTA_DYNSET_SREG_DATA = 5, + NFTA_DYNSET_TIMEOUT = 6, + NFTA_DYNSET_EXPR = 7, + NFTA_DYNSET_PAD = 8, + NFTA_DYNSET_FLAGS = 9, + NFTA_DYNSET_EXPRESSIONS = 10, + __NFTA_DYNSET_MAX = 11, +}; + +struct nft_dynset { + struct nft_set *set; + struct nft_set_ext_tmpl tmpl; + enum nft_dynset_ops op: 8; + u8 sreg_key; + u8 sreg_data; + bool invert; + bool expr; + u8 num_exprs; + long: 32; + u64 timeout; + struct nft_expr *expr_array[2]; + struct nft_set_binding binding; +}; + +struct xt_action_param; + +struct xt_mtchk_param; + +struct xt_mtdtor_param; + +struct xt_match { + struct list_head list; + const char name[29]; + u_int8_t revision; + bool (*match)(const struct sk_buff *, struct xt_action_param *); + int (*checkentry)(const struct xt_mtchk_param *); + void (*destroy)(const struct xt_mtdtor_param *); + struct module *me; + const char *table; + unsigned int matchsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_tgchk_param; + +struct xt_tgdtor_param; + +struct xt_target { + struct list_head list; + const char name[29]; + u_int8_t revision; + unsigned int (*target)(struct sk_buff *, const struct xt_action_param *); + int (*checkentry)(const struct xt_tgchk_param *); + void (*destroy)(const struct xt_tgdtor_param *); + struct module *me; + const char *table; + unsigned int targetsize; + unsigned int usersize; + unsigned int hooks; + short unsigned int proto; + short unsigned int family; +}; + +struct xt_action_param { + union { + const struct xt_match *match; + const struct xt_target *target; + }; + union { + const void *matchinfo; + const void *targinfo; + }; + const struct nf_hook_state *state; + unsigned int thoff; + u16 fragoff; + bool hotdrop; +}; + +struct xt_mtchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_mtdtor_param { + struct net *net; + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +struct xt_tgchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +struct xt_tgdtor_param { + struct net *net; + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + +struct xt_cgroup_info_v0 { + __u32 id; + __u32 invert; +}; + +struct xt_cgroup_info_v1 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + char path[4096]; + __u32 classid; + void *priv; + long: 32; +}; + +struct xt_cgroup_info_v2 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + union { + char path[512]; + __u32 classid; + }; + long: 32; + void *priv; + long: 32; +}; + +enum tcp_metric_index { + TCP_METRIC_RTT = 0, + TCP_METRIC_RTTVAR = 1, + TCP_METRIC_SSTHRESH = 2, + TCP_METRIC_CWND = 3, + TCP_METRIC_REORDERING = 4, + TCP_METRIC_RTT_US = 5, + TCP_METRIC_RTTVAR_US = 6, + __TCP_METRIC_MAX = 7, +}; + +enum { + TCP_METRICS_ATTR_UNSPEC = 0, + TCP_METRICS_ATTR_ADDR_IPV4 = 1, + TCP_METRICS_ATTR_ADDR_IPV6 = 2, + TCP_METRICS_ATTR_AGE = 3, + TCP_METRICS_ATTR_TW_TSVAL = 4, + TCP_METRICS_ATTR_TW_TS_STAMP = 5, + TCP_METRICS_ATTR_VALS = 6, + TCP_METRICS_ATTR_FOPEN_MSS = 7, + TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, + TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, + TCP_METRICS_ATTR_FOPEN_COOKIE = 10, + TCP_METRICS_ATTR_SADDR_IPV4 = 11, + TCP_METRICS_ATTR_SADDR_IPV6 = 12, + TCP_METRICS_ATTR_PAD = 13, + __TCP_METRICS_ATTR_MAX = 14, +}; + +enum { + TCP_METRICS_CMD_UNSPEC = 0, + TCP_METRICS_CMD_GET = 1, + TCP_METRICS_CMD_DEL = 2, + __TCP_METRICS_CMD_MAX = 3, +}; + +struct ipv4_addr_key { + __be32 addr; + int vif; +}; + +struct inetpeer_addr { + union { + struct ipv4_addr_key a4; + struct in6_addr a6; + u32 key[4]; + }; + __u16 family; +}; + +struct tcp_fastopen_metrics { + u16 mss; + u16 syn_loss: 10; + u16 try_exp: 2; + long unsigned int last_syn_loss; + struct tcp_fastopen_cookie cookie; +}; + +struct tcp_metrics_block { + struct tcp_metrics_block *tcpm_next; + struct net *tcpm_net; + struct inetpeer_addr tcpm_saddr; + struct inetpeer_addr tcpm_daddr; + long unsigned int tcpm_stamp; + u32 tcpm_lock; + u32 tcpm_vals[5]; + long: 32; + struct tcp_fastopen_metrics tcpm_fastopen; + struct callback_head callback_head; +}; + +struct tcpm_hash_bucket { + struct tcp_metrics_block *chain; +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *stream_parser; + struct bpf_prog *stream_verdict; + struct bpf_prog *skb_verdict; + struct bpf_link *msg_parser_link; + struct bpf_link *stream_parser_link; + struct bpf_link *stream_verdict_link; + struct bpf_link *skb_verdict_link; +}; + +struct sk_psock_work_state { + u32 len; + u32 off; +}; + +struct sk_msg; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + bool redir_ingress; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + spinlock_t ingress_lock; + long unsigned int state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *); + void (*saved_destroy)(struct sock *); + void (*saved_close)(struct sock *, long int); + void (*saved_write_space)(struct sock *); + void (*saved_data_ready)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + struct proto *sk_proto; + struct mutex work_mutex; + struct sk_psock_work_state work_state; + struct delayed_work work; + struct sock *sk_pair; + struct rcu_work rwork; +}; + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS = 1, + __SK_REDIRECT = 2, + __SK_NONE = 3, +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + long unsigned int copy[1]; + struct scatterlist data[19]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED = 0, + SK_PSOCK_RX_STRP_ENABLED = 1, +}; + +struct tls_crypto_info { + __u16 version; + __u16 cipher_type; +}; + +struct tls12_crypto_info_aes_gcm_128 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[32]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_chacha20_poly1305 { + struct tls_crypto_info info; + unsigned char iv[12]; + unsigned char key[32]; + unsigned char salt[0]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_rec; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + u8 async_capable: 1; + long unsigned int tx_bitmask; +}; + +struct tls_prot_info { + u16 version; + u16 cipher_type; + u16 prepend_size; + u16 tag_size; + u16 overhead_size; + u16 iv_size; + u16 salt_size; + u16 rec_seq_size; + u16 aad_size; + u16 tail_size; +}; + +struct cipher_context { + char iv[20]; + char rec_seq[8]; +}; + +union tls_crypto_context { + struct tls_crypto_info info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; + }; +}; + +struct tls_context { + struct tls_prot_info prot_info; + u8 tx_conf: 3; + u8 rx_conf: 3; + u8 zerocopy_sendfile: 1; + u8 rx_no_pad: 1; + int (*push_pending_record)(struct sock *, int); + void (*sk_write_space)(struct sock *); + void *priv_ctx_tx; + void *priv_ctx_rx; + struct net_device *netdev; + struct cipher_context tx; + struct cipher_context rx; + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + bool splicing_pages; + bool pending_open_record_frags; + struct mutex tx_lock; + long unsigned int flags; + struct proto *sk_proto; + struct sock *sk; + void (*sk_destruct)(struct sock *); + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + struct list_head list; + refcount_t refcount; + struct callback_head rcu; +}; + +enum { + TCP_BPF_IPV4 = 0, + TCP_BPF_IPV6 = 1, + TCP_BPF_NUM_PROTS = 2, +}; + +enum { + TCP_BPF_BASE = 0, + TCP_BPF_TX = 1, + TCP_BPF_RX = 2, + TCP_BPF_TXRX = 3, + TCP_BPF_NUM_CFGS = 4, +}; + +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u32 cmpri: 4; + __u32 cmpre: 4; + __u32 pad: 4; + __u32 reserved: 20; + union { + struct { + struct {} __empty_addr; + struct in6_addr addr[0]; + }; + struct { + struct {} __empty_data; + __u8 data[0]; + }; + } segments; +}; + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *, struct cred *); + void (*cleanup)(struct subprocess_info *); + void *data; +}; + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *); + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); +}; + +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; + +struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +}; + +enum { + PROC_ROOT_INO = 1, + PROC_IPC_INIT_INO = 4026531839, + PROC_UTS_INIT_INO = 4026531838, + PROC_USER_INIT_INO = 4026531837, + PROC_PID_INIT_INO = 4026531836, + PROC_CGROUP_INIT_INO = 4026531835, + PROC_TIME_INIT_INO = 4026531834, +}; + +struct taint_flag { + char c_true; + char c_false; + bool module; + const char *desc; +}; + +enum ftrace_dump_mode { + DUMP_NONE = 0, + DUMP_ALL = 1, + DUMP_ORIG = 2, + DUMP_PARAM = 3, +}; + +enum ctx_state { + CT_STATE_DISABLED = -1, + CT_STATE_KERNEL = 0, + CT_STATE_IDLE = 1, + CT_STATE_USER = 2, + CT_STATE_GUEST = 3, + CT_STATE_MAX = 4, +}; + +struct context_tracking { + atomic_t state; + long int nesting; + long int nmi_nesting; +}; + +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF = 0, + KMSG_DUMP_PANIC = 1, + KMSG_DUMP_OOPS = 2, + KMSG_DUMP_EMERG = 3, + KMSG_DUMP_SHUTDOWN = 4, + KMSG_DUMP_MAX = 5, +}; + +enum reboot_mode { + REBOOT_UNDEFINED = -1, + REBOOT_COLD = 0, + REBOOT_WARM = 1, + REBOOT_HARD = 2, + REBOOT_SOFT = 3, + REBOOT_GPIO = 4, +}; + +enum con_flush_mode { + CONSOLE_FLUSH_PENDING = 0, + CONSOLE_REPLAY_ALL = 1, +}; + +enum error_detector { + ERROR_DETECTOR_KFENCE = 0, + ERROR_DETECTOR_KASAN = 1, + ERROR_DETECTOR_WARN = 2, +}; + +struct warn_args { + const char *fmt; + va_list args; +}; + +struct kprobe_insn_cache { + struct mutex mutex; + void * (*alloc)(); + void (*free)(void *); + const char *sym; + struct list_head pages; + size_t insn_size; + int nr_garbage; +}; + +enum pm_qos_req_action { + PM_QOS_ADD_REQ = 0, + PM_QOS_UPDATE_REQ = 1, + PM_QOS_REMOVE_REQ = 2, +}; + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source; + struct module *target; +}; + +typedef __kernel_timer_t timer_t; + +typedef __u64 timeu64_t; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long int tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + long: 32; + long long int offset; + long long int freq; + long long int maxerror; + long long int esterror; + int status; + long: 32; + long long int constant; + long long int precision; + long long int tolerance; + struct __kernel_timex_timeval time; + long long int tick; + long long int ppsfreq; + long long int jitter; + int shift; + long: 32; + long long int stabil; + long long int jitcnt; + long long int calcnt; + long long int errcnt; + long long int stbcnt; + int tai; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +typedef struct { + spinlock_t *lock; + long unsigned int flags; +} class_spinlock_irqsave_t; + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct ucounts *ucounts; +}; + +enum alarmtimer_type { + ALARM_REALTIME = 0, + ALARM_BOOTTIME = 1, + ALARM_NUMTYPE = 2, + ALARM_REALTIME_FREEZER = 3, + ALARM_BOOTTIME_FREEZER = 4, +}; + +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + void (*function)(struct alarm *, ktime_t); + enum alarmtimer_type type; + int state; + void *data; +}; + +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + bool firing; + bool nanosleep; + struct task_struct *handling; +}; + +struct k_clock; + +struct k_itimer { + struct hlist_node list; + struct hlist_node ignored_list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_status; + bool it_sig_periodic; + s64 it_overrun; + s64 it_overrun_last; + unsigned int it_signal_seq; + unsigned int it_sigqueue_seq; + int it_sigev_notify; + enum pid_type it_pid_type; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue sigq; + rcuref_t rcuref; + long: 32; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +struct k_clock { + int (*clock_getres)(const clockid_t, struct timespec64 *); + int (*clock_set)(const clockid_t, const struct timespec64 *); + int (*clock_get_timespec)(const clockid_t, struct timespec64 *); + ktime_t (*clock_get_ktime)(const clockid_t); + int (*clock_adj)(const clockid_t, struct __kernel_timex *); + int (*timer_create)(struct k_itimer *); + int (*nsleep)(const clockid_t, int, const struct timespec64 *); + int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); + int (*timer_del)(struct k_itimer *); + void (*timer_get)(struct k_itimer *, struct itimerspec64 *); + void (*timer_rearm)(struct k_itimer *); + s64 (*timer_forward)(struct k_itimer *, ktime_t); + ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); + int (*timer_try_to_cancel)(struct k_itimer *); + void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); + void (*timer_wait_running)(struct k_itimer *); +}; + +struct rtc_wkalrm { + unsigned char enabled; + unsigned char pending; + struct rtc_time time; +}; + +struct rtc_param { + __u64 param; + union { + __u64 uvalue; + __s64 svalue; + __u64 ptr; + }; + __u32 index; + __u32 __pad; +}; + +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, long unsigned int); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*alarm_irq_enable)(struct device *, unsigned int); + int (*read_offset)(struct device *, long int *); + int (*set_offset)(struct device *, long int); + int (*param_get)(struct device *, struct rtc_param *); + int (*param_set)(struct device *, struct rtc_param *); +}; + +struct rtc_device; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(struct rtc_device *); + struct rtc_device *rtc; + int enabled; + long: 32; +}; + +struct rtc_device { + struct device dev; + struct module *owner; + int id; + const struct rtc_class_ops *ops; + struct mutex ops_lock; + struct cdev char_dev; + long unsigned int flags; + long unsigned int irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + int irq_freq; + int max_user_freq; + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; + int pie_enabled; + struct work_struct irqwork; + long unsigned int set_offset_nsec; + long unsigned int features[1]; + long: 32; + time64_t range_min; + timeu64_t range_max; + timeu64_t alarm_offset_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; + long: 32; +}; + +struct platform_driver { + int (*probe)(struct platform_device *); + union { + void (*remove)(struct platform_device *); + void (*remove_new)(struct platform_device *); + }; + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; + bool driver_managed_dma; +}; + +struct trace_event_raw_alarmtimer_suspend { + struct trace_entry ent; + s64 expires; + unsigned char alarm_type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_alarm_class { + struct trace_entry ent; + void *alarm; + unsigned char alarm_type; + s64 expires; + s64 now; + char __data[0]; +}; + +struct trace_event_data_offsets_alarmtimer_suspend {}; + +struct trace_event_data_offsets_alarm_class {}; + +typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); + +typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); + +struct alarm_base { + spinlock_t lock; + struct timerqueue_head timerqueue; + ktime_t (*get_ktime)(); + void (*get_timespec)(struct timespec64 *); + clockid_t base_clockid; +}; + +typedef int (*eventfs_callback)(const char *, umode_t *, void **, const struct file_operations **); + +typedef void (*eventfs_release)(const char *, void *); + +struct eventfs_entry { + const char *name; + eventfs_callback callback; + eventfs_release release; +}; + +enum { + TRACE_PIDS = 1, + TRACE_NO_PIDS = 2, +}; + +enum { + TRACE_ARRAY_FL_GLOBAL = 1, + TRACE_ARRAY_FL_BOOT = 2, +}; + +struct trace_parser { + bool cont; + char *buffer; + unsigned int idx; + unsigned int size; +}; + +enum trace_iterator_flags { + TRACE_ITER_PRINT_PARENT = 1, + TRACE_ITER_SYM_OFFSET = 2, + TRACE_ITER_SYM_ADDR = 4, + TRACE_ITER_VERBOSE = 8, + TRACE_ITER_RAW = 16, + TRACE_ITER_HEX = 32, + TRACE_ITER_BIN = 64, + TRACE_ITER_BLOCK = 128, + TRACE_ITER_FIELDS = 256, + TRACE_ITER_PRINTK = 512, + TRACE_ITER_ANNOTATE = 1024, + TRACE_ITER_USERSTACKTRACE = 2048, + TRACE_ITER_SYM_USEROBJ = 4096, + TRACE_ITER_PRINTK_MSGONLY = 8192, + TRACE_ITER_CONTEXT_INFO = 16384, + TRACE_ITER_LATENCY_FMT = 32768, + TRACE_ITER_RECORD_CMD = 65536, + TRACE_ITER_RECORD_TGID = 131072, + TRACE_ITER_OVERWRITE = 262144, + TRACE_ITER_STOP_ON_FREE = 524288, + TRACE_ITER_IRQ_INFO = 1048576, + TRACE_ITER_MARKERS = 2097152, + TRACE_ITER_EVENT_FORK = 4194304, + TRACE_ITER_TRACE_PRINTK = 8388608, + TRACE_ITER_PAUSE_ON_TRACE = 16777216, + TRACE_ITER_HASH_PTR = 33554432, + TRACE_ITER_STACKTRACE = 67108864, +}; + +struct module_string { + struct list_head next; + struct module *module; + char *str; +}; + +enum { + FORMAT_HEADER = 1, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, +}; + +struct boot_triggers { + const char *event; + char *trigger; +}; + +struct bpf_tramp_link; + +struct bpf_tramp_links { + struct bpf_tramp_link *links[38]; + int nr_links; +}; + +struct bpf_tramp_link { + struct bpf_link link; + struct hlist_node tramp_hlist; + u64 cookie; +}; + +struct bpf_tramp_run_ctx; + +typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *, struct bpf_tramp_run_ctx *); + +struct bpf_tramp_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; + struct bpf_run_ctx *saved_run_ctx; + long: 32; +}; + +typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *, u64, struct bpf_tramp_run_ctx *); + +struct bpf_attach_target_info { + struct btf_func_model fmodel; + long int tgt_addr; + struct module *tgt_mod; + const char *tgt_name; + const struct btf_type *tgt_type; +}; + +enum bpf_text_poke_type { + BPF_MOD_CALL = 0, + BPF_MOD_JUMP = 1, +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, + PERF_RECORD_KSYMBOL_TYPE_MAX = 3, +}; + +typedef void (*xa_update_node_t)(struct xa_node *); + +struct xa_state { + struct xarray *xa; + long unsigned int xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; + struct list_lru *xa_lru; +}; + +enum { + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, + IOPRIO_CLASS_INVALID = 7, +}; + +enum { + IOPRIO_HINT_NONE = 0, + IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, + IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, + IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, + IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, + IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, + IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, + IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, +}; + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 524288, + AOP_TRUNCATED_PAGE = 524289, +}; + +struct wait_page_key { + struct folio *folio; + int bit_nr; + int page_match; +}; + +struct cachestat_range { + __u64 off; + __u64 len; +}; + +struct cachestat { + __u64 nr_cache; + __u64 nr_dirty; + __u64 nr_writeback; + __u64 nr_evicted; + __u64 nr_recently_evicted; +}; + +struct trace_event_raw_mm_filemap_op_page_cache { + struct trace_entry ent; + long unsigned int pfn; + long unsigned int i_ino; + long unsigned int index; + dev_t s_dev; + unsigned char order; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_op_page_cache_range { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + long unsigned int last_index; + char __data[0]; +}; + +struct trace_event_raw_mm_filemap_fault { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_filemap_set_wb_err { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + errseq_t errseq; + char __data[0]; +}; + +struct trace_event_raw_file_check_and_advance_wb_err { + struct trace_entry ent; + struct file *file; + long unsigned int i_ino; + dev_t s_dev; + errseq_t old; + errseq_t new; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache {}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache_range {}; + +struct trace_event_data_offsets_mm_filemap_fault {}; + +struct trace_event_data_offsets_filemap_set_wb_err {}; + +struct trace_event_data_offsets_file_check_and_advance_wb_err {}; + +typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct folio *); + +typedef void (*btf_trace_mm_filemap_get_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_map_pages)(void *, struct address_space *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_filemap_fault)(void *, struct address_space *, long unsigned int); + +typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); + +typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); + +enum behavior { + EXCLUSIVE = 0, + SHARED = 1, + DROP = 2, +}; + +enum zpool_mapmode { + ZPOOL_MM_RW = 0, + ZPOOL_MM_RO = 1, + ZPOOL_MM_WO = 2, + ZPOOL_MM_DEFAULT = 0, +}; + +struct zpool_driver { + char *type; + struct module *owner; + atomic_t refcount; + struct list_head list; + void * (*create)(const char *, gfp_t); + void (*destroy)(void *); + bool malloc_support_movable; + int (*malloc)(void *, size_t, gfp_t, long unsigned int *); + void (*free)(void *, long unsigned int); + bool sleep_mapped; + void * (*map)(void *, long unsigned int, enum zpool_mapmode); + void (*unmap)(void *, long unsigned int); + u64 (*total_pages)(void *); +}; + +struct zbud_pool { + spinlock_t lock; + union { + struct list_head buddied; + struct list_head unbuddied[63]; + }; + long: 32; + u64 pages_nr; +}; + +struct zbud_header { + struct list_head buddy; + unsigned int first_chunks; + unsigned int last_chunks; +}; + +enum buddy { + FIRST = 0, + LAST = 1, +}; + +typedef struct fd class_fd_pos_t; + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + short unsigned int d_reclen; + unsigned char d_type; + char d_name[0]; + long: 32; +}; + +struct old_linux_dirent { + long unsigned int d_ino; + long unsigned int d_offset; + short unsigned int d_namlen; + char d_name[0]; +}; + +struct readdir_callback { + struct dir_context ctx; + struct old_linux_dirent *dirent; + int result; +}; + +struct linux_dirent { + long unsigned int d_ino; + long unsigned int d_off; + short unsigned int d_reclen; + char d_name[0]; +}; + +struct getdents_callback { + struct dir_context ctx; + struct linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct mnt_idmap { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + refcount_t count; +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + unsigned char f_handle[0]; +}; + +enum iter_type { + ITER_UBUF = 0, + ITER_IOVEC = 1, + ITER_BVEC = 2, + ITER_KVEC = 3, + ITER_FOLIOQ = 4, + ITER_XARRAY = 5, + ITER_DISCARD = 6, +}; + +struct iomap_folio_ops; + +struct iomap { + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + struct block_device *bdev; + struct dax_device *dax_dev; + void *inline_data; + void *private; + const struct iomap_folio_ops *folio_ops; + u64 validity_cookie; +}; + +struct iomap_iter; + +struct iomap_folio_ops { + struct folio * (*get_folio)(struct iomap_iter *, loff_t, unsigned int); + void (*put_folio)(struct inode *, loff_t, unsigned int, struct folio *); + bool (*iomap_valid)(struct inode *, const struct iomap *); +}; + +struct iomap_iter { + struct inode *inode; + long: 32; + loff_t pos; + u64 len; + s64 processed; + unsigned int flags; + long: 32; + struct iomap iomap; + struct iomap srcmap; + void *private; + long: 32; +}; + +struct iomap_ops { + int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, struct iomap *); + int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *); +}; + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); + void (*submit_io)(const struct iomap_iter *, struct bio *, loff_t); + struct bio_set *bio_set; +}; + +struct iomap_dio { + struct kiocb *iocb; + const struct iomap_dio_ops *dops; + loff_t i_size; + loff_t size; + atomic_t ref; + unsigned int flags; + int error; + size_t done_before; + bool wait_for_completion; + union { + struct { + struct iov_iter *iter; + struct task_struct *waiter; + } submit; + struct { + struct work_struct work; + } aio; + }; + long: 32; +}; + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 16, + KERNFS_NS = 32, + KERNFS_HAS_SEQ_SHOW = 64, + KERNFS_HAS_MMAP = 128, + KERNFS_LOCKDEP = 256, + KERNFS_HIDDEN = 512, + KERNFS_SUICIDAL = 1024, + KERNFS_SUICIDED = 2048, + KERNFS_EMPTY_DIR = 4096, + KERNFS_HAS_RELEASE = 8192, + KERNFS_REMOVING = 16384, +}; + +enum { + EXT4_STATE_NEW = 0, + EXT4_STATE_XATTR = 1, + EXT4_STATE_NO_EXPAND = 2, + EXT4_STATE_DA_ALLOC_CLOSE = 3, + EXT4_STATE_EXT_MIGRATE = 4, + EXT4_STATE_NEWENTRY = 5, + EXT4_STATE_MAY_INLINE_DATA = 6, + EXT4_STATE_EXT_PRECACHED = 7, + EXT4_STATE_LUSTRE_EA_INODE = 8, + EXT4_STATE_VERITY_IN_PROGRESS = 9, + EXT4_STATE_FC_COMMITTING = 10, + EXT4_STATE_ORPHAN_FILE = 11, +}; + +struct ext4_extent { + __le32 ee_block; + __le16 ee_len; + __le16 ee_start_hi; + __le32 ee_start_lo; +}; + +struct ext4_extent_idx { + __le32 ei_block; + __le32 ei_leaf_lo; + __le16 ei_leaf_hi; + __u16 ei_unused; +}; + +struct ext4_extent_header { + __le16 eh_magic; + __le16 eh_entries; + __le16 eh_max; + __le16 eh_depth; + __le32 eh_generation; +}; + +struct ext4_ext_path { + ext4_fsblk_t p_block; + __u16 p_depth; + __u16 p_maxdepth; + struct ext4_extent *p_ext; + struct ext4_extent_idx *p_idx; + struct ext4_extent_header *p_hdr; + struct buffer_head *p_bh; + long: 32; +}; + +struct migrate_struct { + ext4_lblk_t first_block; + ext4_lblk_t last_block; + ext4_lblk_t curr_block; + long: 32; + ext4_fsblk_t first_pblock; + ext4_fsblk_t last_pblock; +}; + +struct ramfs_mount_opts { + umode_t mode; +}; + +struct ramfs_fs_info { + struct ramfs_mount_opts mount_opts; +}; + +enum ramfs_param { + Opt_mode = 0, +}; + +enum inode_i_mutex_lock_class { + I_MUTEX_NORMAL = 0, + I_MUTEX_PARENT = 1, + I_MUTEX_CHILD = 2, + I_MUTEX_XATTR = 3, + I_MUTEX_NONDIR2 = 4, + I_MUTEX_PARENT2 = 5, +}; + +enum fsnotify_iter_type { + FSNOTIFY_ITER_TYPE_INODE = 0, + FSNOTIFY_ITER_TYPE_VFSMOUNT = 1, + FSNOTIFY_ITER_TYPE_SB = 2, + FSNOTIFY_ITER_TYPE_PARENT = 3, + FSNOTIFY_ITER_TYPE_INODE2 = 4, + FSNOTIFY_ITER_TYPE_COUNT = 5, +}; + +enum { + TRACEFS_EVENT_INODE = 2, + TRACEFS_GID_PERM_SET = 4, + TRACEFS_UID_PERM_SET = 8, + TRACEFS_INSTANCE_INODE = 16, +}; + +struct tracefs_inode { + struct inode vfs_inode; + struct list_head list; + long unsigned int flags; + void *private; +}; + +struct eventfs_attr { + int mode; + kuid_t uid; + kgid_t gid; +}; + +struct eventfs_inode { + union { + struct list_head list; + struct callback_head rcu; + }; + struct list_head children; + const struct eventfs_entry *entries; + const char *name; + struct eventfs_attr *entry_attrs; + void *data; + struct eventfs_attr attr; + struct kref kref; + unsigned int is_freed: 1; + unsigned int is_events: 1; + unsigned int nr_entries: 30; + unsigned int ino; +}; + +struct tracefs_dir_ops { + int (*mkdir)(const char *); + int (*rmdir)(const char *); +}; + +struct tracefs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid___2 = 0, + Opt_gid___2 = 1, + Opt_mode___2 = 2, +}; + +struct btrfs_free_space_entry { + __le64 offset; + __le64 bytes; + __u8 type; +} __attribute__((packed)); + +struct btrfs_free_space_header { + struct btrfs_disk_key location; + __le64 generation; + __le64 num_entries; + __le64 num_bitmaps; +} __attribute__((packed)); + +struct btrfs_truncate_control { + struct btrfs_inode *inode; + long: 32; + u64 new_size; + u64 extents_found; + u64 last_size; + u64 sub_bytes; + u64 ino; + u32 min_type; + bool skip_ref_updates; + bool clear_extent_range; +}; + +struct btrfs_trim_range { + u64 start; + u64 bytes; + struct list_head list; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +struct crypto_sig { + struct crypto_tfm base; +}; + +struct sig_alg { + int (*sign)(struct crypto_sig *, const void *, unsigned int, void *, unsigned int); + int (*verify)(struct crypto_sig *, const void *, unsigned int, const void *, unsigned int); + int (*set_pub_key)(struct crypto_sig *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_sig *, const void *, unsigned int); + unsigned int (*key_size)(struct crypto_sig *); + unsigned int (*digest_size)(struct crypto_sig *); + unsigned int (*max_size)(struct crypto_sig *); + int (*init)(struct crypto_sig *); + void (*exit)(struct crypto_sig *); + long: 32; + struct crypto_alg base; +}; + +struct sig_instance { + void (*free)(struct sig_instance *); + long: 32; + union { + struct { + char head[40]; + struct crypto_instance base; + }; + struct sig_alg alg; + }; +}; + +struct crypto_sig_spawn { + struct crypto_spawn base; +}; + +struct parsed_partitions { + struct gendisk *disk; + char name[32]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + long: 32; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +typedef struct { + struct folio *v; +} Sector; + +enum io_uring_op { + IORING_OP_NOP = 0, + IORING_OP_READV = 1, + IORING_OP_WRITEV = 2, + IORING_OP_FSYNC = 3, + IORING_OP_READ_FIXED = 4, + IORING_OP_WRITE_FIXED = 5, + IORING_OP_POLL_ADD = 6, + IORING_OP_POLL_REMOVE = 7, + IORING_OP_SYNC_FILE_RANGE = 8, + IORING_OP_SENDMSG = 9, + IORING_OP_RECVMSG = 10, + IORING_OP_TIMEOUT = 11, + IORING_OP_TIMEOUT_REMOVE = 12, + IORING_OP_ACCEPT = 13, + IORING_OP_ASYNC_CANCEL = 14, + IORING_OP_LINK_TIMEOUT = 15, + IORING_OP_CONNECT = 16, + IORING_OP_FALLOCATE = 17, + IORING_OP_OPENAT = 18, + IORING_OP_CLOSE = 19, + IORING_OP_FILES_UPDATE = 20, + IORING_OP_STATX = 21, + IORING_OP_READ = 22, + IORING_OP_WRITE = 23, + IORING_OP_FADVISE = 24, + IORING_OP_MADVISE = 25, + IORING_OP_SEND = 26, + IORING_OP_RECV = 27, + IORING_OP_OPENAT2 = 28, + IORING_OP_EPOLL_CTL = 29, + IORING_OP_SPLICE = 30, + IORING_OP_PROVIDE_BUFFERS = 31, + IORING_OP_REMOVE_BUFFERS = 32, + IORING_OP_TEE = 33, + IORING_OP_SHUTDOWN = 34, + IORING_OP_RENAMEAT = 35, + IORING_OP_UNLINKAT = 36, + IORING_OP_MKDIRAT = 37, + IORING_OP_SYMLINKAT = 38, + IORING_OP_LINKAT = 39, + IORING_OP_MSG_RING = 40, + IORING_OP_FSETXATTR = 41, + IORING_OP_SETXATTR = 42, + IORING_OP_FGETXATTR = 43, + IORING_OP_GETXATTR = 44, + IORING_OP_SOCKET = 45, + IORING_OP_URING_CMD = 46, + IORING_OP_SEND_ZC = 47, + IORING_OP_SENDMSG_ZC = 48, + IORING_OP_READ_MULTISHOT = 49, + IORING_OP_WAITID = 50, + IORING_OP_FUTEX_WAIT = 51, + IORING_OP_FUTEX_WAKE = 52, + IORING_OP_FUTEX_WAITV = 53, + IORING_OP_FIXED_FD_INSTALL = 54, + IORING_OP_FTRUNCATE = 55, + IORING_OP_BIND = 56, + IORING_OP_LISTEN = 57, + IORING_OP_LAST = 58, +}; + +struct io_uring_buf { + __u64 addr; + __u32 len; + __u16 bid; + __u16 resv; +}; + +struct io_uring_buf_ring { + union { + struct { + __u64 resv1; + __u32 resv2; + __u16 resv3; + __u16 tail; + }; + struct { + struct {} __empty_bufs; + struct io_uring_buf bufs[0]; + }; + }; +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __u32 len; + __u16 bid; + __u16 bgid; +}; + +struct io_buffer_list { + union { + struct list_head buf_list; + struct { + struct page **buf_pages; + struct io_uring_buf_ring *buf_ring; + }; + struct callback_head rcu; + }; + __u16 bgid; + __u16 buf_nr_pages; + __u16 nr_entries; + __u16 head; + __u16 mask; + __u16 flags; + atomic_t refs; +}; + +struct io_poll { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + int retries; + struct wait_queue_entry wait; +}; + +struct async_poll { + struct io_poll poll; + struct io_poll *double_poll; +}; + +struct io_issue_def { + unsigned int needs_file: 1; + unsigned int plug: 1; + unsigned int hash_reg_file: 1; + unsigned int unbound_nonreg_file: 1; + unsigned int pollin: 1; + unsigned int pollout: 1; + unsigned int poll_exclusive: 1; + unsigned int buffer_select: 1; + unsigned int audit_skip: 1; + unsigned int ioprio: 1; + unsigned int iopoll: 1; + unsigned int iopoll_queue: 1; + unsigned int vectored: 1; + short unsigned int async_size; + int (*issue)(struct io_kiocb *, unsigned int); + int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); +}; + +struct io_cold_def { + const char *name; + void (*cleanup)(struct io_kiocb *); + void (*fail)(struct io_kiocb *); +}; + +struct io_ftrunc { + struct file *file; + long: 32; + loff_t len; +}; + +typedef int (*dr_match_t)(struct device *, void *, void *); + +typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); + +struct gen_pool { + spinlock_t lock; + struct list_head chunks; + int min_alloc_order; + genpool_algo_t algo; + void *data; + const char *name; +}; + +struct gen_pool_chunk { + struct list_head next_chunk; + atomic_long_t avail; + phys_addr_t phys_addr; + void *owner; + long unsigned int start_addr; + long unsigned int end_addr; + long unsigned int bits[0]; +}; + +struct genpool_data_align { + int align; +}; + +struct genpool_data_fixed { + long unsigned int offset; +}; + +enum asn1_method { + ASN1_PRIM = 0, + ASN1_CONS = 1, +}; + +enum asn1_tag { + ASN1_EOC = 0, + ASN1_BOOL = 1, + ASN1_INT = 2, + ASN1_BTS = 3, + ASN1_OTS = 4, + ASN1_NULL = 5, + ASN1_OID = 6, + ASN1_ODE = 7, + ASN1_EXT = 8, + ASN1_REAL = 9, + ASN1_ENUM = 10, + ASN1_EPDV = 11, + ASN1_UTF8STR = 12, + ASN1_RELOID = 13, + ASN1_SEQ = 16, + ASN1_SET = 17, + ASN1_NUMSTR = 18, + ASN1_PRNSTR = 19, + ASN1_TEXSTR = 20, + ASN1_VIDSTR = 21, + ASN1_IA5STR = 22, + ASN1_UNITIM = 23, + ASN1_GENTIM = 24, + ASN1_GRASTR = 25, + ASN1_VISSTR = 26, + ASN1_GENSTR = 27, + ASN1_UNISTR = 28, + ASN1_CHRSTR = 29, + ASN1_BMPSTR = 30, + ASN1_LONG_TAG = 31, +}; + +typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum asn1_opcode { + ASN1_OP_MATCH = 0, + ASN1_OP_MATCH_OR_SKIP = 1, + ASN1_OP_MATCH_ACT = 2, + ASN1_OP_MATCH_ACT_OR_SKIP = 3, + ASN1_OP_MATCH_JUMP = 4, + ASN1_OP_MATCH_JUMP_OR_SKIP = 5, + ASN1_OP_MATCH_ANY = 8, + ASN1_OP_MATCH_ANY_OR_SKIP = 9, + ASN1_OP_MATCH_ANY_ACT = 10, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, + ASN1_OP_COND_MATCH_OR_SKIP = 17, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, + ASN1_OP_COND_MATCH_ANY = 24, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, + ASN1_OP_COND_MATCH_ANY_ACT = 26, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, + ASN1_OP_COND_FAIL = 28, + ASN1_OP_COMPLETE = 29, + ASN1_OP_ACT = 30, + ASN1_OP_MAYBE_ACT = 31, + ASN1_OP_END_SEQ = 32, + ASN1_OP_END_SET = 33, + ASN1_OP_END_SEQ_OF = 34, + ASN1_OP_END_SET_OF = 35, + ASN1_OP_END_SEQ_ACT = 36, + ASN1_OP_END_SET_ACT = 37, + ASN1_OP_END_SEQ_OF_ACT = 38, + ASN1_OP_END_SET_OF_ACT = 39, + ASN1_OP_RETURN = 40, + ASN1_OP__NR = 41, +}; + +struct dmi_strmatch { + unsigned char slot: 7; + unsigned char exact_match: 1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; + +enum { + LOGIC_PIO_INDIRECT = 0, + LOGIC_PIO_CPU_MMIO = 1, +}; + +struct logic_pio_host_ops; + +struct logic_pio_hwaddr { + struct list_head list; + const struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + long unsigned int flags; + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *, long unsigned int, size_t); + void (*out)(void *, long unsigned int, u32, size_t); + u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); + void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); +}; + +struct hotplug_slot_ops; + +struct hotplug_slot { + const struct hotplug_slot_ops *ops; + struct list_head slot_list; + struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; +}; + +enum { + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, +}; + +enum pcie_reset_state { + pcie_deassert_reset = 1, + pcie_warm_reset = 2, + pcie_hot_reset = 3, +}; + +enum pci_dev_flags { + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, + PCI_DEV_FLAGS_NO_D3 = 2, + PCI_DEV_FLAGS_ASSIGNED = 4, + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, + PCI_DEV_FLAGS_NO_BUS_RESET = 64, + PCI_DEV_FLAGS_NO_PM_RESET = 128, + PCI_DEV_FLAGS_VPD_REF_F0 = 256, + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, + PCI_DEV_FLAGS_NO_FLR_RESET = 1024, + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, + PCI_DEV_FLAGS_HAS_MSI_MASKING = 4096, +}; + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF = 0, + PCIE_BUS_DEFAULT = 1, + PCIE_BUS_SAFE = 2, + PCIE_BUS_PERFORMANCE = 3, + PCIE_BUS_PEER2PEER = 4, +}; + +typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); + +enum pci_fixup_pass { + pci_fixup_early = 0, + pci_fixup_header = 1, + pci_fixup_final = 2, + pci_fixup_enable = 3, + pci_fixup_resume = 4, + pci_fixup_suspend = 5, + pci_fixup_resume_early = 6, + pci_fixup_suspend_late = 7, +}; + +struct hotplug_slot_ops { + int (*enable_slot)(struct hotplug_slot *); + int (*disable_slot)(struct hotplug_slot *); + int (*set_attention_status)(struct hotplug_slot *, u8); + int (*hardware_test)(struct hotplug_slot *, u32); + int (*get_power_status)(struct hotplug_slot *, u8 *); + int (*get_attention_status)(struct hotplug_slot *, u8 *); + int (*get_latch_status)(struct hotplug_slot *, u8 *); + int (*get_adapter_status)(struct hotplug_slot *, u8 *); + int (*reset_slot)(struct hotplug_slot *, bool); +}; + +struct pcie_tlp_log { + u32 dw[4]; +}; + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct pci_reset_fn_method { + int (*reset_fn)(struct pci_dev *, bool); + char *name; +}; + +struct pci_pme_device { + struct list_head list; + struct pci_dev *dev; +}; + +struct pci_acs { + u16 cap; + u16 ctrl; + u16 fw_ctrl; +}; + +struct pci_saved_state { + u32 config_space[16]; + struct pci_cap_saved_data cap[0]; +}; + +struct clk_core; + +struct clk { + struct clk_core *core; + struct device *dev; + const char *dev_id; + const char *con_id; + long unsigned int min_rate; + long unsigned int max_rate; + unsigned int exclusive_count; + struct hlist_node clks_node; +}; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +typedef u32 note_buf_t[75]; + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask * const map; +}; + +typedef void (*async_func_t)(void *, async_cookie_t); + +struct scsi_lun { + __u8 scsi_lun[8]; +}; + +enum scsi_timeouts { + SCSI_DEFAULT_EH_TIMEOUT = 10000, +}; + +enum scsi_disposition { + NEEDS_RETRY = 8193, + SUCCESS = 8194, + FAILED = 8195, + QUEUED = 8196, + SOFT_ERROR = 8197, + ADD_TO_MLQUEUE = 8198, + TIMEOUT_ERROR = 8199, + SCSI_RETURN_NOT_HANDLED = 8200, + FAST_IO_FAIL = 8201, +}; + +enum scsi_scan_mode { + SCSI_SCAN_INITIAL = 0, + SCSI_SCAN_RESCAN = 1, + SCSI_SCAN_MANUAL = 2, +}; + +typedef void (*activate_complete)(void *, int); + +struct scsi_device_handler { + struct list_head list; + struct module *module; + const char *name; + enum scsi_disposition (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); + int (*attach)(struct scsi_device *); + void (*detach)(struct scsi_device *); + int (*activate)(struct scsi_device *, activate_complete, void *); + blk_status_t (*prep_fn)(struct scsi_device *, struct request *); + int (*set_params)(struct scsi_device *, const char *); + void (*rescan)(struct scsi_device *); +}; + +struct scsi_failure { + int result; + u8 sense; + u8 asc; + u8 ascq; + s8 allowed; + s8 retries; +}; + +struct scsi_failures { + int total_allowed; + int total_retries; + struct scsi_failure *failure_definitions; +}; + +struct scsi_exec_args { + unsigned char *sense; + unsigned int sense_len; + struct scsi_sense_hdr *sshdr; + blk_mq_req_flags_t req_flags; + int scmd_flags; + int *resid; + struct scsi_failures *failures; +}; + +struct scsi_driver { + struct device_driver gendrv; + int (*resume)(struct device *); + void (*rescan)(struct device *); + blk_status_t (*init_command)(struct scsi_cmnd *); + void (*uninit_command)(struct scsi_cmnd *); + int (*done)(struct scsi_cmnd *); + int (*eh_action)(struct scsi_cmnd *, int); + void (*eh_reset)(struct scsi_cmnd *); +}; + +struct async_scan_data { + struct list_head list; + struct Scsi_Host *shost; + struct completion prev_finished; +}; + +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + unsigned int __nents; + int __pg_advance; +}; + +struct sg_mapping_iter { + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +enum xfer_buf_dir { + TO_XFER_BUF = 0, + FROM_XFER_BUF = 1, +}; + +struct thermal_attr { + struct device_attribute attr; + char name[20]; +}; + +struct thermal_trip_attrs { + struct thermal_attr type; + struct thermal_attr temp; + struct thermal_attr hyst; +}; + +struct thermal_trip_desc { + struct thermal_trip trip; + struct thermal_trip_attrs trip_attrs; + struct list_head list_node; + struct list_head thermal_instances; + int threshold; +}; + +struct thermal_zone_params; + +struct thermal_governor; + +struct thermal_zone_device { + int id; + char type[20]; + struct device device; + struct completion removal; + struct completion resume; + struct attribute_group trips_attribute_group; + struct list_head trips_high; + struct list_head trips_reached; + struct list_head trips_invalid; + enum thermal_device_mode mode; + void *devdata; + int num_trips; + long unsigned int passive_delay_jiffies; + long unsigned int polling_delay_jiffies; + long unsigned int recheck_delay_jiffies; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + struct thermal_zone_device_ops ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; + u8 state; + struct list_head user_thresholds; + struct thermal_trip_desc trips[0]; +}; + +struct thermal_zone_params { + const char *governor_name; + bool no_hwmon; + u32 sustainable_power; + s32 k_po; + s32 k_pu; + s32 k_i; + s32 k_d; + s32 integral_cutoff; + int slope; + int offset; +}; + +struct user_threshold { + struct list_head list_node; + int temperature; + int direction; +}; + +struct thermal_governor { + const char *name; + int (*bind_to_tz)(struct thermal_zone_device *); + void (*unbind_from_tz)(struct thermal_zone_device *); + void (*trip_crossed)(struct thermal_zone_device *, const struct thermal_trip *, bool); + void (*manage)(struct thermal_zone_device *); + void (*update_tz)(struct thermal_zone_device *, enum thermal_notify_event); + struct list_head governor_list; +}; + +typedef struct thermal_zone_device *class_thermal_zone_t; + +typedef size_t (*iov_step_f)(void *, size_t, size_t, void *, void *); + +typedef size_t (*iov_ustep_f)(void *, size_t, size_t, void *, void *); + +struct csum_state { + __wsum csum; + size_t off; +}; + +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; + long: 32; +}; + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *); + int (*get_strings)(struct phy_device *, u8 *); + int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); + int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *, struct netlink_ext_ack *); + int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); + int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); + int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); +}; + +enum { + ETHTOOL_A_STRING_UNSPEC = 0, + ETHTOOL_A_STRING_INDEX = 1, + ETHTOOL_A_STRING_VALUE = 2, + __ETHTOOL_A_STRING_CNT = 3, + ETHTOOL_A_STRING_MAX = 2, +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC = 0, + ETHTOOL_A_STRINGS_STRING = 1, + __ETHTOOL_A_STRINGS_CNT = 2, + ETHTOOL_A_STRINGS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC = 0, + ETHTOOL_A_STRINGSET_ID = 1, + ETHTOOL_A_STRINGSET_COUNT = 2, + ETHTOOL_A_STRINGSET_STRINGS = 3, + __ETHTOOL_A_STRINGSET_CNT = 4, + ETHTOOL_A_STRINGSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC = 0, + ETHTOOL_A_STRINGSETS_STRINGSET = 1, + __ETHTOOL_A_STRINGSETS_CNT = 2, + ETHTOOL_A_STRINGSETS_MAX = 1, +}; + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[32]; +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[21]; +}; + +enum nfnl_batch_attributes { + NFNL_BATCH_UNSPEC = 0, + NFNL_BATCH_GENID = 1, + __NFNL_BATCH_MAX = 2, +}; + +struct nfnl_net { + struct sock *nfnl; +}; + +struct nfnl_err { + struct list_head head; + struct nlmsghdr *nlh; + int err; + struct netlink_ext_ack extack; +}; + +enum { + NFNL_BATCH_FAILURE = 1, + NFNL_BATCH_DONE = 2, + NFNL_BATCH_REPLAY = 4, +}; + +struct nf_nat_range2 { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; + union nf_conntrack_man_proto base_proto; +}; + +union nf_conntrack_nat_help {}; + +struct nf_conn_nat { + union nf_conntrack_nat_help help; + int masq_index; +}; + +struct masq_dev_work { + struct work_struct work; + struct net *net; + netns_tracker ns_tracker; + union nf_inet_addr addr; + int ifindex; + int (*iter)(struct nf_conn *, void *); +}; + +enum nft_nat_types { + NFT_NAT_SNAT = 0, + NFT_NAT_DNAT = 1, +}; + +enum nft_nat_attributes { + NFTA_NAT_UNSPEC = 0, + NFTA_NAT_TYPE = 1, + NFTA_NAT_FAMILY = 2, + NFTA_NAT_REG_ADDR_MIN = 3, + NFTA_NAT_REG_ADDR_MAX = 4, + NFTA_NAT_REG_PROTO_MIN = 5, + NFTA_NAT_REG_PROTO_MAX = 6, + NFTA_NAT_FLAGS = 7, + __NFTA_NAT_MAX = 8, +}; + +struct nf_nat_range { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; +}; + +struct nft_nat { + u8 sreg_addr_min; + u8 sreg_addr_max; + u8 sreg_proto_min; + u8 sreg_proto_max; + enum nf_nat_manip_type type: 8; + u8 family; + u16 flags; +}; + +enum { + BPF_SOCK_OPS_VOID = 0, + BPF_SOCK_OPS_TIMEOUT_INIT = 1, + BPF_SOCK_OPS_RWND_INIT = 2, + BPF_SOCK_OPS_TCP_CONNECT_CB = 3, + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, + BPF_SOCK_OPS_NEEDS_ECN = 6, + BPF_SOCK_OPS_BASE_RTT = 7, + BPF_SOCK_OPS_RTO_CB = 8, + BPF_SOCK_OPS_RETRANS_CB = 9, + BPF_SOCK_OPS_STATE_CB = 10, + BPF_SOCK_OPS_TCP_LISTEN_CB = 11, + BPF_SOCK_OPS_RTT_CB = 12, + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, +}; + +enum { + BTF_SOCK_TYPE_INET = 0, + BTF_SOCK_TYPE_INET_CONN = 1, + BTF_SOCK_TYPE_INET_REQ = 2, + BTF_SOCK_TYPE_INET_TW = 3, + BTF_SOCK_TYPE_REQ = 4, + BTF_SOCK_TYPE_SOCK = 5, + BTF_SOCK_TYPE_SOCK_COMMON = 6, + BTF_SOCK_TYPE_TCP = 7, + BTF_SOCK_TYPE_TCP_REQ = 8, + BTF_SOCK_TYPE_TCP_TW = 9, + BTF_SOCK_TYPE_TCP6 = 10, + BTF_SOCK_TYPE_UDP = 11, + BTF_SOCK_TYPE_UDP6 = 12, + BTF_SOCK_TYPE_UNIX = 13, + BTF_SOCK_TYPE_MPTCP = 14, + BTF_SOCK_TYPE_SOCKET = 15, + MAX_BTF_SOCK_TYPE = 16, +}; + +struct sock_bh_locked { + struct sock *sock; + local_lock_t bh_lock; +}; + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16, + ICSK_ACK_NOMEM = 32, +}; + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + u32 tw_last_oow_ack_time; + int tw_ts_recent_stamp; + u32 tw_tx_delay; +}; + +struct ip_reply_arg { + struct kvec iov[1]; + int flags; + __wsum csum; + int csumoffset; + int bound_dev_if; + u8 tos; + kuid_t uid; +}; + +typedef u32 inet_ehashfn_t(const struct net *, const __be32, const __u16, const __be32, const __be16); + +struct bpf_sock_ops_kern { + struct sock *sk; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; + long: 32; + u64 temp; +}; + +union tcp_ao_addr { + struct in_addr a4; + struct in6_addr a6; +}; + +struct tcp_ao_hdr { + u8 kind; + u8 length; + u8 keyid; + u8 rnext_keyid; +}; + +struct tcp_ao_key { + struct hlist_node node; + union tcp_ao_addr addr; + u8 key[80]; + unsigned int tcp_sigpool_id; + unsigned int digest_size; + int l3index; + u8 prefixlen; + u8 family; + u8 keylen; + u8 keyflags; + u8 sndid; + u8 rcvid; + u8 maclen; + struct callback_head rcu; + long: 32; + atomic64_t pkt_good; + atomic64_t pkt_bad; + u8 traffic_keys[0]; +}; + +enum tcp_tw_status { + TCP_TW_SUCCESS = 0, + TCP_TW_RST = 1, + TCP_TW_ACK = 2, + TCP_TW_SYN = 3, +}; + +struct tcp_md5sig_key { + struct hlist_node node; + u8 keylen; + u8 family; + u8 prefixlen; + u8 flags; + union tcp_ao_addr addr; + int l3index; + u8 key[80]; + struct callback_head rcu; +}; + +enum tcp_seq_states { + TCP_SEQ_STATE_LISTENING = 0, + TCP_SEQ_STATE_ESTABLISHED = 1, +}; + +struct tcp_seq_afinfo { + sa_family_t family; +}; + +struct tcp_iter_state { + struct seq_net_private p; + enum tcp_seq_states state; + struct sock *syn_wait_sk; + int bucket; + int offset; + int sbucket; + int num; + long: 32; + loff_t last_pos; +}; + +struct tcp_key { + union { + struct { + struct tcp_ao_key *ao_key; + char *traffic_key; + u32 sne; + u8 rcv_next; + }; + struct tcp_md5sig_key *md5_key; + }; + enum { + TCP_KEY_NONE = 0, + TCP_KEY_MD5 = 1, + TCP_KEY_AO = 2, + } type; +}; + +struct bpf_tcp_iter_state { + struct tcp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; + long: 32; +}; + +struct bpf_iter__tcp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct sock_common *sk_common; + }; + uid_t uid; + long: 32; +}; + +struct xt_counters { + __u64 pcnt; + __u64 bcnt; +}; + +struct xt_table_info; + +struct xt_table { + struct list_head list; + unsigned int valid_hooks; + struct xt_table_info *private; + struct nf_hook_ops *ops; + struct module *me; + u_int8_t af; + int priority; + const char name[32]; +}; + +struct xt_table_info { + unsigned int size; + unsigned int number; + unsigned int initial_entries; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int stacksize; + void ***jumpstack; + long: 32; + unsigned char entries[0]; +}; + +struct ipt_ip { + struct in_addr src; + struct in_addr dst; + struct in_addr smsk; + struct in_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 flags; + __u8 invflags; +}; + +struct ipt_entry { + struct ipt_ip ip; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ipt_replace { + char name[32]; + unsigned int valid_hooks; + unsigned int num_entries; + unsigned int size; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_counters; + struct xt_counters *counters; + long: 32; + struct ipt_entry entries[0]; +}; + +typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); + +struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +}; + +struct radix_tree_iter { + long unsigned int index; + long unsigned int next_index; + long unsigned int tags; + struct xa_node *node; +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 15, + RADIX_TREE_ITER_TAGGED = 16, + RADIX_TREE_ITER_CONTIG = 32, +}; + +enum { + Root_NFS = 255, + Root_CIFS = 254, + Root_Generic = 253, + Root_RAM0 = 1048576, +}; + +enum rpc_display_format_t { + RPC_DISPLAY_ADDR = 0, + RPC_DISPLAY_PORT = 1, + RPC_DISPLAY_PROTO = 2, + RPC_DISPLAY_HEX_ADDR = 3, + RPC_DISPLAY_HEX_PORT = 4, + RPC_DISPLAY_NETID = 5, + RPC_DISPLAY_MAX = 6, +}; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_CLONE = 71, + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, +}; + +struct patch_context { + union { + struct vm_struct *area; + struct mm_struct *mm; + }; + long unsigned int addr; + pte_t *pte; +}; + +typedef struct { + unsigned int __softirq_pending; + unsigned int timer_irqs_event; + unsigned int broadcast_irqs_event; + unsigned int timer_irqs_others; + unsigned int pmu_irqs; + unsigned int mce_exceptions; + unsigned int spurious_irqs; + unsigned int sreset_irqs; +} irq_cpustat_t; + +struct softirq_action { + void (*action)(); +}; + +struct tasklet_struct { + struct tasklet_struct *next; + long unsigned int state; + atomic_t count; + bool use_callback; + union { + void (*func)(long unsigned int); + void (*callback)(struct tasklet_struct *); + }; + long unsigned int data; +}; + +enum { + TASKLET_STATE_SCHED = 0, + TASKLET_STATE_RUN = 1, +}; + +struct kernel_stat { + long unsigned int irqs_sum; + unsigned int softirqs[10]; +}; + +struct trace_event_raw_irq_handler_entry { + struct trace_entry ent; + int irq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_irq_handler_exit { + struct trace_entry ent; + int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_softirq { + struct trace_entry ent; + unsigned int vec; + char __data[0]; +}; + +struct trace_event_raw_tasklet { + struct trace_entry ent; + void *tasklet; + void *func; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_handler_entry { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_irq_handler_exit {}; + +struct trace_event_data_offsets_softirq {}; + +struct trace_event_data_offsets_tasklet {}; + +typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); + +typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); + +typedef void (*btf_trace_softirq_entry)(void *, unsigned int); + +typedef void (*btf_trace_softirq_exit)(void *, unsigned int); + +typedef void (*btf_trace_softirq_raise)(void *, unsigned int); + +typedef void (*btf_trace_tasklet_entry)(void *, struct tasklet_struct *, void *); + +typedef void (*btf_trace_tasklet_exit)(void *, struct tasklet_struct *, void *); + +struct tasklet_head { + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +enum { + IRQTF_RUNTHREAD = 0, + IRQTF_WARNED = 1, + IRQTF_AFFINITY = 2, + IRQTF_FORCED_THREAD = 3, + IRQTF_READY = 4, +}; + +enum { + IRQS_AUTODETECT = 1, + IRQS_SPURIOUS_DISABLED = 2, + IRQS_POLL_INPROGRESS = 8, + IRQS_ONESHOT = 32, + IRQS_REPLAY = 64, + IRQS_WAITING = 128, + IRQS_PENDING = 512, + IRQS_SUSPENDED = 2048, + IRQS_TIMINGS = 4096, + IRQS_NMI = 8192, + IRQS_SYSFS = 16384, +}; + +enum { + _IRQ_DEFAULT_INIT_FLAGS = 2048, + _IRQ_PER_CPU = 512, + _IRQ_LEVEL = 256, + _IRQ_NOPROBE = 1024, + _IRQ_NOREQUEST = 2048, + _IRQ_NOTHREAD = 65536, + _IRQ_NOAUTOEN = 4096, + _IRQ_MOVE_PCNTXT = 16384, + _IRQ_NO_BALANCING = 8192, + _IRQ_NESTED_THREAD = 32768, + _IRQ_PER_CPU_DEVID = 131072, + _IRQ_IS_POLLED = 262144, + _IRQ_DISABLE_UNLAZY = 524288, + _IRQ_HIDDEN = 1048576, + _IRQ_NO_DEBUG = 2097152, + _IRQF_MODIFY_MASK = 2096911, +}; + +struct dma_coherent_mem { + void *virt_base; + dma_addr_t device_base; + long unsigned int pfn_base; + int size; + long unsigned int *bitmap; + spinlock_t spinlock; + bool use_dev_dma_pfn_offset; +}; + +enum { + MEMREMAP_WB = 1, + MEMREMAP_WT = 2, + MEMREMAP_WC = 4, + MEMREMAP_ENC = 8, + MEMREMAP_DEC = 16, +}; + +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER = 1, + CSID_S390_TOD = 2, + CSID_X86_TSC_EARLY = 3, + CSID_X86_TSC = 4, + CSID_X86_KVM_CLK = 5, + CSID_X86_ART = 6, + CSID_MAX = 7, +}; + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE = 0, + VDSO_CLOCKMODE_ARCHTIMER = 1, + VDSO_CLOCKMODE_MAX = 2, + VDSO_CLOCKMODE_TIMENS = 2147483647, +}; + +struct clocksource_base; + +struct clocksource { + u64 (*read)(struct clocksource *); + long: 32; + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; + u32 uncertainty_margin; + u64 max_cycles; + const char *name; + struct list_head list; + u32 freq_khz; + int rating; + enum clocksource_ids id; + enum vdso_clock_mode vdso_clock_mode; + long unsigned int flags; + struct clocksource_base *base; + int (*enable)(struct clocksource *); + void (*disable)(struct clocksource *); + void (*suspend)(struct clocksource *); + void (*resume)(struct clocksource *); + void (*mark_unstable)(struct clocksource *); + void (*tick_stable)(struct clocksource *); + struct module *owner; +}; + +struct clocksource_base { + enum clocksource_ids id; + u32 freq_khz; + u64 offset; + u32 numerator; + u32 denominator; +}; + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC = 0, + TICKDEV_MODE_ONESHOT = 1, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +typedef unsigned int elf_greg_t32; + +typedef elf_greg_t32 elf_gregset_t32[48]; + +typedef elf_gregset_t32 elf_gregset_t; + +typedef __u64 Elf64_Addr; + +typedef __u16 Elf64_Half; + +typedef __u64 Elf64_Off; + +typedef __u32 Elf64_Word; + +typedef __u64 Elf64_Xword; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +typedef struct elf64_hdr Elf64_Ehdr; + +struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +}; + +typedef struct elf64_phdr Elf64_Phdr; + +struct elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct elf_prstatus_common { + struct elf_siginfo pr_info; + short int pr_cursig; + long unsigned int pr_sigpend; + long unsigned int pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; + struct __kernel_old_timeval pr_stime; + struct __kernel_old_timeval pr_cutime; + struct __kernel_old_timeval pr_cstime; +}; + +struct elf_prstatus { + struct elf_prstatus_common common; + elf_gregset_t pr_reg; + int pr_fpvalid; +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct range ranges[0]; +}; + +enum blktrace_notify { + __BLK_TN_PROCESS = 0, + __BLK_TN_TIMESTAMP = 1, + __BLK_TN_MESSAGE = 2, + __BLK_TN_CGROUP = 256, +}; + +struct blk_io_trace { + __u32 magic; + __u32 sequence; + __u64 time; + __u64 sector; + __u32 bytes; + __u32 action; + __u32 pid; + __u32 device; + __u32 cpu; + __u16 error; + __u16 pdu_len; +}; + +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running = 2, + Blktrace_stopped = 3, +}; + +struct blk_user_trace_setup { + char name[32]; + __u16 act_mask; + __u32 buf_size; + __u32 buf_nr; + long: 32; + __u64 start_lba; + __u64 end_lba; + __u32 pid; + long: 32; +}; + +enum trace_type { + __TRACE_FIRST_TYPE = 0, + TRACE_FN = 1, + TRACE_CTX = 2, + TRACE_WAKE = 3, + TRACE_STACK = 4, + TRACE_PRINT = 5, + TRACE_BPRINT = 6, + TRACE_MMIO_RW = 7, + TRACE_MMIO_MAP = 8, + TRACE_BRANCH = 9, + TRACE_GRAPH_RET = 10, + TRACE_GRAPH_ENT = 11, + TRACE_GRAPH_RETADDR_ENT = 12, + TRACE_USER_STACK = 13, + TRACE_BLK = 14, + TRACE_BPUTS = 15, + TRACE_HWLAT = 16, + TRACE_OSNOISE = 17, + TRACE_TIMERLAT = 18, + TRACE_RAW_DATA = 19, + TRACE_FUNC_REPEATS = 20, + __TRACE_LAST_TYPE = 21, +}; + +typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); + +struct bpf_local_storage_data; + +struct bpf_local_storage { + struct bpf_local_storage_data *cache[16]; + struct bpf_local_storage_map *smap; + struct hlist_head list; + void *owner; + struct callback_head rcu; + raw_spinlock_t lock; +}; + +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = 1, + BPF_SK_STORAGE_GET_F_CREATE = 1, +}; + +struct bpf_local_storage_map_bucket; + +struct bpf_local_storage_map { + struct bpf_map map; + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; + struct bpf_mem_alloc selem_ma; + struct bpf_mem_alloc storage_ma; + bool bpf_ma; +}; + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_data { + struct bpf_local_storage_map *smap; + long: 32; + u8 data[0]; +}; + +struct bpf_local_storage_elem { + struct hlist_node map_node; + struct hlist_node snode; + struct bpf_local_storage *local_storage; + union { + struct callback_head rcu; + struct hlist_node free_node; + }; + long: 32; + struct bpf_local_storage_data sdata; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + long: 32; + u64 idx_usage_counts[16]; +}; + +typedef u64 (*btf_bpf_task_storage_get_recur)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_task_storage_delete_recur)(struct bpf_map *, struct task_struct *); + +typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); + +typedef struct { + u64 val; +} pfn_t; + +typedef unsigned int zap_flags_t; + +typedef unsigned int pgtbl_mod_mask; + +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC = 0, + MTHP_STAT_ANON_FAULT_FALLBACK = 1, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE = 2, + MTHP_STAT_ZSWPOUT = 3, + MTHP_STAT_SWPIN = 4, + MTHP_STAT_SWPOUT = 5, + MTHP_STAT_SWPOUT_FALLBACK = 6, + MTHP_STAT_SHMEM_ALLOC = 7, + MTHP_STAT_SHMEM_FALLBACK = 8, + MTHP_STAT_SHMEM_FALLBACK_CHARGE = 9, + MTHP_STAT_SPLIT = 10, + MTHP_STAT_SPLIT_FAILED = 11, + MTHP_STAT_SPLIT_DEFERRED = 12, + MTHP_STAT_NR_ANON = 13, + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED = 14, + __MTHP_STAT_COUNT = 15, +}; + +struct zap_details { + struct folio *single_folio; + bool even_cows; + zap_flags_t zap_flags; +}; + +struct follow_pfnmap_args { + struct vm_area_struct *vma; + long unsigned int address; + spinlock_t *lock; + pte_t *ptep; + long unsigned int pfn; + pgprot_t pgprot; + bool writable; + bool special; +}; + +typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *); + +enum { + SWP_USED = 1, + SWP_WRITEOK = 2, + SWP_DISCARDABLE = 4, + SWP_DISCARDING = 8, + SWP_SOLIDSTATE = 16, + SWP_CONTINUED = 32, + SWP_BLKDEV = 64, + SWP_ACTIVATED = 128, + SWP_FS_OPS = 256, + SWP_AREA_DISCARD = 512, + SWP_PAGE_DISCARD = 1024, + SWP_STABLE_WRITES = 2048, + SWP_SYNCHRONOUS_IO = 4096, + SWP_SCANNING = 16384, +}; + +enum rmap_level { + RMAP_LEVEL_PTE = 0, + RMAP_LEVEL_PMD = 1, +}; + +struct unlink_vma_file_batch { + int count; + struct vm_area_struct *vmas[8]; +}; + +typedef int fpb_t; + +typedef struct { + spinlock_t *lock; +} class_spinlock_t; + +typedef __kernel_rwf_t rwf_t; + +typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); + +enum { + DIO_LOCKING = 1, + DIO_SKIP_HOLES = 2, +}; + +typedef unsigned int iov_iter_extraction_t; + +struct dio_submit { + struct bio *bio; + unsigned int blkbits; + unsigned int blkfactor; + unsigned int start_zero_done; + int pages_in_io; + long: 32; + sector_t block_in_file; + unsigned int blocks_available; + int reap_counter; + sector_t final_block_in_request; + int boundary; + get_block_t *get_block; + loff_t logical_offset_in_bio; + sector_t final_block_in_bio; + sector_t next_block_for_io; + struct page *cur_page; + unsigned int cur_page_offset; + unsigned int cur_page_len; + long: 32; + sector_t cur_page_block; + loff_t cur_page_fs_offset; + struct iov_iter *iter; + unsigned int head; + unsigned int tail; + size_t from; + size_t to; + long: 32; +}; + +struct dio { + int flags; + blk_opf_t opf; + struct gendisk *bio_disk; + struct inode *inode; + loff_t i_size; + dio_iodone_t *end_io; + bool is_pinned; + void *private; + spinlock_t bio_lock; + int page_errors; + int is_async; + bool defer_completion; + bool should_dirty; + int io_error; + long unsigned int refcount; + struct bio *bio_list; + struct task_struct *waiter; + struct kiocb *iocb; + ssize_t result; + union { + struct page *pages[64]; + struct work_struct complete_work; + }; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum { + MBE_REFERENCED_B = 0, + MBE_REUSABLE_B = 1, +}; + +struct mb_cache_entry { + struct list_head e_list; + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + u32 e_key; + long unsigned int e_flags; + long: 32; + u64 e_value; +}; + +struct mb_cache { + struct hlist_bl_head *c_hash; + int c_bucket_bits; + long unsigned int c_max_entries; + spinlock_t c_list_lock; + struct list_head c_list; + long unsigned int c_entry_count; + struct shrinker *c_shrink; + struct work_struct c_shrink_work; +}; + +struct match_token { + int token; + const char *pattern; +}; + +enum { + MAX_OPT_ARGS = 3, +}; + +struct pts_mount_opts { + int setuid; + int setgid; + kuid_t uid; + kgid_t gid; + umode_t mode; + umode_t ptmxmode; + int reserve; + int max; +}; + +enum { + Opt_uid___3 = 0, + Opt_gid___3 = 1, + Opt_mode___3 = 2, + Opt_ptmxmode = 3, + Opt_newinstance = 4, + Opt_max = 5, + Opt_err = 6, +}; + +struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; + struct super_block *sb; + struct dentry *ptmx_dentry; +}; + +struct __fat_dirent { + long int d_ino; + __kernel_off_t d_off; + short unsigned int d_reclen; + char d_name[256]; +}; + +typedef long long unsigned int llu; + +enum { + PARSE_INVALID = 1, + PARSE_NOT_LONGNAME = 2, + PARSE_EOF = 3, +}; + +struct fat_ioctl_filldir_callback { + struct dir_context ctx; + void *dirent; + int result; + const char *longname; + int long_len; + const char *shortname; + int short_len; +}; + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 10, + ZSTD_error_version_unsupported = 12, + ZSTD_error_frameParameter_unsupported = 14, + ZSTD_error_frameParameter_windowTooLarge = 16, + ZSTD_error_corruption_detected = 20, + ZSTD_error_checksum_wrong = 22, + ZSTD_error_dictionary_corrupted = 30, + ZSTD_error_dictionary_wrong = 32, + ZSTD_error_dictionaryCreation_failed = 34, + ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_outOfBound = 42, + ZSTD_error_tableLog_tooLarge = 44, + ZSTD_error_maxSymbolValue_tooLarge = 46, + ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stage_wrong = 60, + ZSTD_error_init_missing = 62, + ZSTD_error_memory_allocation = 64, + ZSTD_error_workSpace_tooSmall = 66, + ZSTD_error_dstSize_tooSmall = 70, + ZSTD_error_srcSize_wrong = 72, + ZSTD_error_dstBuffer_null = 74, + ZSTD_error_frameIndex_tooLarge = 100, + ZSTD_error_seekableIO = 102, + ZSTD_error_dstBuffer_wrong = 104, + ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_maxCode = 120, +} ZSTD_ErrorCode; + +typedef enum { + ZSTDcs_created = 0, + ZSTDcs_init = 1, + ZSTDcs_ongoing = 2, + ZSTDcs_ending = 3, +} ZSTD_compressionStage_e; + +typedef enum { + ZSTD_f_zstd1 = 0, + ZSTD_f_zstd1_magicless = 1, +} ZSTD_format_e; + +typedef struct { + int contentSizeFlag; + int checksumFlag; + int noDictIDFlag; +} ZSTD_frameParameters; + +typedef enum { + ZSTD_dictDefaultAttach = 0, + ZSTD_dictForceAttach = 1, + ZSTD_dictForceCopy = 2, + ZSTD_dictForceLoad = 3, +} ZSTD_dictAttachPref_e; + +typedef struct { + ZSTD_paramSwitch_e enableLdm; + U32 hashLog; + U32 bucketSizeLog; + U32 minMatchLength; + U32 hashRateLog; + U32 windowLog; +} ldmParams_t; + +typedef enum { + ZSTD_bm_buffered = 0, + ZSTD_bm_stable = 1, +} ZSTD_bufferMode_e; + +typedef enum { + ZSTD_sf_noBlockDelimiters = 0, + ZSTD_sf_explicitBlockDelimiters = 1, +} ZSTD_sequenceFormat_e; + +struct ZSTD_CCtx_params_s { + ZSTD_format_e format; + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; + int compressionLevel; + int forceWindow; + size_t targetCBlockSize; + int srcSizeHint; + ZSTD_dictAttachPref_e attachDictPref; + ZSTD_paramSwitch_e literalCompressionMode; + int nbWorkers; + size_t jobSize; + int overlapLog; + int rsyncable; + ldmParams_t ldmParams; + int enableDedicatedDictSearch; + ZSTD_bufferMode_e inBufferMode; + ZSTD_bufferMode_e outBufferMode; + ZSTD_sequenceFormat_e blockDelimiters; + int validateSequences; + ZSTD_paramSwitch_e useBlockSplitter; + ZSTD_paramSwitch_e useRowMatchFinder; + int deterministicRefPrefix; + ZSTD_customMem customMem; +}; + +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; + +typedef enum { + ZSTD_cwksp_alloc_objects = 0, + ZSTD_cwksp_alloc_buffers = 1, + ZSTD_cwksp_alloc_aligned = 2, +} ZSTD_cwksp_alloc_phase_e; + +typedef enum { + ZSTD_cwksp_dynamic_alloc = 0, + ZSTD_cwksp_static_alloc = 1, +} ZSTD_cwksp_static_alloc_e; + +typedef struct { + void *workspace; + void *workspaceEnd; + void *objectEnd; + void *tableEnd; + void *tableValidEnd; + void *allocStart; + BYTE allocFailed; + int workspaceOversizedDuration; + ZSTD_cwksp_alloc_phase_e phase; + ZSTD_cwksp_static_alloc_e isStatic; +} ZSTD_cwksp; + +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + long: 32; +}; + +struct POOL_ctx_s; + +typedef struct POOL_ctx_s ZSTD_threadPool; + +typedef struct { + unsigned int offset; + unsigned int litLength; + unsigned int matchLength; + unsigned int rep; +} ZSTD_Sequence; + +typedef struct { + int collectSequences; + ZSTD_Sequence *seqStart; + size_t seqIndex; + size_t maxSequences; +} SeqCollector; + +typedef struct { + U32 offset; + U32 checksum; +} ldmEntry_t; + +typedef struct { + const BYTE *split; + U32 hash; + U32 checksum; + ldmEntry_t *bucket; +} ldmMatchCandidate_t; + +typedef struct { + ZSTD_window_t window; + ldmEntry_t *hashTable; + U32 loadedDictEnd; + BYTE *bucketOffsets; + size_t splitIndices[64]; + ldmMatchCandidate_t matchCandidates[64]; +} ldmState_t; + +typedef struct { + ZSTD_entropyCTables_t entropy; + U32 rep[3]; +} ZSTD_compressedBlockState_t; + +typedef struct { + ZSTD_compressedBlockState_t *prevCBlock; + ZSTD_compressedBlockState_t *nextCBlock; + ZSTD_matchState_t matchState; +} ZSTD_blockState_t; + +typedef enum { + ZSTDb_not_buffered = 0, + ZSTDb_buffered = 1, +} ZSTD_buffered_policy_e; + +typedef enum { + zcss_init = 0, + zcss_load = 1, + zcss_flush = 2, +} ZSTD_cStreamStage; + +struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; + +typedef enum { + ZSTD_dct_auto = 0, + ZSTD_dct_rawContent = 1, + ZSTD_dct_fullDict = 2, +} ZSTD_dictContentType_e; + +struct ZSTD_CDict_s; + +typedef struct ZSTD_CDict_s ZSTD_CDict; + +typedef struct { + void *dictBuffer; + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; + ZSTD_CDict *cdict; +} ZSTD_localDict; + +struct ZSTD_prefixDict_s { + const void *dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; +}; + +typedef struct ZSTD_prefixDict_s ZSTD_prefixDict; + +typedef enum { + set_basic = 0, + set_rle = 1, + set_compressed = 2, + set_repeat = 3, +} symbolEncodingType_e; + +typedef struct { + symbolEncodingType_e hType; + BYTE hufDesBuffer[128]; + size_t hufDesSize; +} ZSTD_hufCTablesMetadata_t; + +typedef struct { + symbolEncodingType_e llType; + symbolEncodingType_e ofType; + symbolEncodingType_e mlType; + BYTE fseTablesBuffer[133]; + size_t fseTablesSize; + size_t lastCountSize; +} ZSTD_fseCTablesMetadata_t; + +typedef struct { + ZSTD_hufCTablesMetadata_t hufMetadata; + ZSTD_fseCTablesMetadata_t fseMetadata; +} ZSTD_entropyCTablesMetadata_t; + +typedef struct { + seqStore_t fullSeqStoreChunk; + seqStore_t firstHalfSeqStore; + seqStore_t secondHalfSeqStore; + seqStore_t currSeqStore; + seqStore_t nextSeqStore; + U32 partitions[196]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +} ZSTD_blockSplitCtx; + +struct ZSTD_CCtx_s { + ZSTD_compressionStage_e stage; + int cParamsChanged; + int bmi2; + ZSTD_CCtx_params requestedParams; + ZSTD_CCtx_params appliedParams; + ZSTD_CCtx_params simpleApiParams; + U32 dictID; + size_t dictContentSize; + ZSTD_cwksp workspace; + size_t blockSize; + long long unsigned int pledgedSrcSizePlusOne; + long long unsigned int consumedSrcSize; + long long unsigned int producedCSize; + struct xxh64_state xxhState; + ZSTD_customMem customMem; + ZSTD_threadPool *pool; + size_t staticSize; + SeqCollector seqCollector; + int isFirstBlock; + int initialized; + seqStore_t seqStore; + ldmState_t ldmState; + rawSeq *ldmSequences; + size_t maxNbLdmSequences; + rawSeqStore_t externSeqStore; + ZSTD_blockState_t blockState; + U32 *entropyWorkspace; + ZSTD_buffered_policy_e bufferedPolicy; + char *inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + char *outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage streamStage; + U32 frameEnded; + ZSTD_inBuffer expectedInBuffer; + size_t expectedOutBufferSize; + ZSTD_localDict localDict; + const ZSTD_CDict *cdict; + ZSTD_prefixDict prefixDict; + ZSTD_blockSplitCtx blockSplitCtx; +}; + +typedef struct ZSTD_CCtx_s ZSTD_CCtx; + +typedef struct { + U16 nextState; + BYTE nbAdditionalBits; + BYTE nbBits; + U32 baseValue; +} ZSTD_seqSymbol; + +typedef U32 HUF_DTable; + +typedef struct { + ZSTD_seqSymbol LLTable[513]; + ZSTD_seqSymbol OFTable[257]; + ZSTD_seqSymbol MLTable[513]; + HUF_DTable hufTable[4097]; + U32 rep[3]; + U32 workspace[157]; +} ZSTD_entropyDTables_t; + +typedef enum { + ZSTD_frame = 0, + ZSTD_skippableFrame = 1, +} ZSTD_frameType_e; + +typedef struct { + long long unsigned int frameContentSize; + long long unsigned int windowSize; + unsigned int blockSizeMax; + ZSTD_frameType_e frameType; + unsigned int headerSize; + unsigned int dictID; + unsigned int checksumFlag; + long: 32; +} ZSTD_frameHeader; + +typedef enum { + bt_raw = 0, + bt_rle = 1, + bt_compressed = 2, + bt_reserved = 3, +} blockType_e; + +typedef enum { + ZSTDds_getFrameHeaderSize = 0, + ZSTDds_decodeFrameHeader = 1, + ZSTDds_decodeBlockHeader = 2, + ZSTDds_decompressBlock = 3, + ZSTDds_decompressLastBlock = 4, + ZSTDds_checkChecksum = 5, + ZSTDds_decodeSkippableHeader = 6, + ZSTDds_skipFrame = 7, +} ZSTD_dStage; + +typedef enum { + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1, +} ZSTD_forceIgnoreChecksum_e; + +typedef enum { + ZSTD_use_indefinitely = -1, + ZSTD_dont_use = 0, + ZSTD_use_once = 1, +} ZSTD_dictUses_e; + +struct ZSTD_DDict_s; + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +typedef struct { + const ZSTD_DDict **ddictPtrTable; + size_t ddictPtrTableSize; + size_t ddictPtrCount; +} ZSTD_DDictHashSet; + +typedef enum { + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1, +} ZSTD_refMultipleDDicts_e; + +typedef enum { + zdss_init = 0, + zdss_loadHeader = 1, + zdss_read = 2, + zdss_load = 3, + zdss_flush = 4, +} ZSTD_dStreamStage; + +struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; + +typedef enum { + ZSTD_not_in_dst = 0, + ZSTD_in_dst = 1, + ZSTD_split = 2, +} ZSTD_litLocation_e; + +struct ZSTD_DCtx_s { + const ZSTD_seqSymbol *LLTptr; + const ZSTD_seqSymbol *MLTptr; + const ZSTD_seqSymbol *OFTptr; + const HUF_DTable *HUFptr; + ZSTD_entropyDTables_t entropy; + U32 workspace[640]; + const void *previousDstEnd; + const void *prefixStart; + const void *virtualStart; + const void *dictEnd; + size_t expected; + ZSTD_frameHeader fParams; + U64 processedCSize; + U64 decodedSize; + blockType_e bType; + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + struct xxh64_state xxhState; + size_t headerSize; + ZSTD_format_e format; + ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; + U32 validateChecksum; + const BYTE *litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + size_t staticSize; + ZSTD_DDict *ddictLocal; + const ZSTD_DDict *ddict; + U32 dictID; + int ddictIsCold; + ZSTD_dictUses_e dictUses; + ZSTD_DDictHashSet *ddictSet; + ZSTD_refMultipleDDicts_e refMultipleDDicts; + ZSTD_dStreamStage streamStage; + char *inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char *outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t lhSize; + U32 hostageByte; + int noForwardProgress; + ZSTD_bufferMode_e outBufferMode; + ZSTD_outBuffer expectedOutBuffer; + BYTE *litBuffer; + const BYTE *litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[65568]; + BYTE headerBuffer[18]; + size_t oversizedDuration; + long: 32; +}; + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +typedef ZSTD_CCtx ZSTD_CStream; + +typedef ZSTD_DCtx ZSTD_DStream; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +typedef ZSTD_ErrorCode zstd_error_code; + +typedef ZSTD_compressionParameters zstd_compression_parameters; + +typedef ZSTD_parameters zstd_parameters; + +typedef ZSTD_inBuffer zstd_in_buffer; + +typedef ZSTD_outBuffer zstd_out_buffer; + +typedef ZSTD_CStream zstd_cstream; + +typedef ZSTD_DStream zstd_dstream; + +struct compressed_bio { + unsigned int nr_folios; + struct folio **compressed_folios; + u64 start; + unsigned int len; + unsigned int compressed_len; + u8 compress_type; + bool writeback; + union { + struct btrfs_bio *orig_bbio; + struct work_struct write_end_work; + }; + long: 32; + struct btrfs_bio bbio; +}; + +struct workspace_manager { + struct list_head idle_ws; + spinlock_t ws_lock; + int free_ws; + atomic_t total_ws; + wait_queue_head_t ws_wait; +}; + +struct btrfs_compress_op { + struct workspace_manager *workspace_manager; + unsigned int max_level; + unsigned int default_level; +}; + +struct workspace { + void *mem; + size_t size; + char *buf; + unsigned int level; + unsigned int req_level; + long unsigned int last_used; + struct list_head list; + struct list_head lru_list; + zstd_in_buffer in_buf; + zstd_out_buffer out_buf; +}; + +struct zstd_workspace_manager { + const struct btrfs_compress_op *ops; + spinlock_t lock; + struct list_head lru_list; + struct list_head idle_ws[15]; + long unsigned int active_map; + wait_queue_head_t wait; + struct timer_list timer; +}; + +enum { + BIOSET_NEED_BVECS = 1, + BIOSET_NEED_RESCUER = 2, + BIOSET_PERCPU_CACHE = 4, +}; + +enum btrfs_map_op { + BTRFS_MAP_READ = 0, + BTRFS_MAP_WRITE = 1, + BTRFS_MAP_GET_READ_MIRRORS = 2, +}; + +struct btrfs_failed_bio { + struct btrfs_bio *bbio; + int num_copies; + atomic_t repair_count; +}; + +struct async_submit_bio { + struct btrfs_bio *bbio; + struct btrfs_io_context *bioc; + struct btrfs_io_stripe smap; + int mirror_num; + struct btrfs_work work; + long: 32; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + unsigned int qlen; + unsigned int max_qlen; +}; + +enum { + ICQ_EXITED = 4, + ICQ_DESTROYED = 8, +}; + +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; + +struct io_uring_cmd { + struct file *file; + const struct io_uring_sqe *sqe; + void (*task_work_cb)(struct io_uring_cmd *, unsigned int); + u32 cmd_op; + u32 flags; + u8 pdu[32]; +}; + +enum io_uring_sqe_flags_bit { + IOSQE_FIXED_FILE_BIT = 0, + IOSQE_IO_DRAIN_BIT = 1, + IOSQE_IO_LINK_BIT = 2, + IOSQE_IO_HARDLINK_BIT = 3, + IOSQE_ASYNC_BIT = 4, + IOSQE_BUFFER_SELECT_BIT = 5, + IOSQE_CQE_SKIP_SUCCESS_BIT = 6, +}; + +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 user_addr; +}; + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 user_addr; +}; + +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; + +enum io_uring_register_op { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + IORING_REGISTER_PBUF_RING = 22, + IORING_UNREGISTER_PBUF_RING = 23, + IORING_REGISTER_SYNC_CANCEL = 24, + IORING_REGISTER_FILE_ALLOC_RANGE = 25, + IORING_REGISTER_PBUF_STATUS = 26, + IORING_REGISTER_NAPI = 27, + IORING_UNREGISTER_NAPI = 28, + IORING_REGISTER_CLOCK = 29, + IORING_REGISTER_CLONE_BUFFERS = 30, + IORING_REGISTER_SEND_MSG_RING = 31, + IORING_REGISTER_RESIZE_RINGS = 33, + IORING_REGISTER_MEM_REGION = 34, + IORING_REGISTER_LAST = 35, + IORING_REGISTER_USE_REGISTERED_RING = 2147483648, +}; + +enum { + IORING_REG_WAIT_TS = 1, +}; + +struct io_uring_reg_wait { + struct __kernel_timespec ts; + __u32 min_wait_usec; + __u32 flags; + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad[3]; + __u64 pad2[2]; +}; + +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 min_wait_usec; + __u64 ts; +}; + +enum { + IOU_F_TWQ_LAZY_WAKE = 1, +}; + +enum { + REQ_F_FIXED_FILE_BIT = 0, + REQ_F_IO_DRAIN_BIT = 1, + REQ_F_LINK_BIT = 2, + REQ_F_HARDLINK_BIT = 3, + REQ_F_FORCE_ASYNC_BIT = 4, + REQ_F_BUFFER_SELECT_BIT = 5, + REQ_F_CQE_SKIP_BIT = 6, + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT = 9, + REQ_F_CUR_POS_BIT = 10, + REQ_F_NOWAIT_BIT = 11, + REQ_F_LINK_TIMEOUT_BIT = 12, + REQ_F_NEED_CLEANUP_BIT = 13, + REQ_F_POLLED_BIT = 14, + REQ_F_HYBRID_IOPOLL_STATE_BIT = 15, + REQ_F_BUFFER_SELECTED_BIT = 16, + REQ_F_BUFFER_RING_BIT = 17, + REQ_F_REISSUE_BIT = 18, + REQ_F_CREDS_BIT = 19, + REQ_F_REFCOUNT_BIT = 20, + REQ_F_ARM_LTIMEOUT_BIT = 21, + REQ_F_ASYNC_DATA_BIT = 22, + REQ_F_SKIP_LINK_CQES_BIT = 23, + REQ_F_SINGLE_POLL_BIT = 24, + REQ_F_DOUBLE_POLL_BIT = 25, + REQ_F_APOLL_MULTISHOT_BIT = 26, + REQ_F_CLEAR_POLLIN_BIT = 27, + REQ_F_SUPPORT_NOWAIT_BIT = 28, + REQ_F_ISREG_BIT = 29, + REQ_F_POLL_NO_LAZY_BIT = 30, + REQ_F_CAN_POLL_BIT = 31, + REQ_F_BL_EMPTY_BIT = 32, + REQ_F_BL_NO_RECYCLE_BIT = 33, + REQ_F_BUFFERS_COMMIT_BIT = 34, + REQ_F_BUF_NODE_BIT = 35, + __REQ_F_LAST_BIT = 36, +}; + +struct trace_event_raw_io_uring_create { + struct trace_entry ent; + int fd; + void *ctx; + u32 sq_entries; + u32 cq_entries; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_register { + struct trace_entry ent; + void *ctx; + unsigned int opcode; + unsigned int nr_files; + unsigned int nr_bufs; + long int ret; + char __data[0]; +}; + +struct trace_event_raw_io_uring_file_get { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int fd; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_queue_async_work { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + u8 opcode; + long: 32; + long long unsigned int flags; + struct io_wq_work *work; + int rw; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_defer { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int data; + u8 opcode; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_link { + struct trace_entry ent; + void *ctx; + void *req; + void *target_req; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqring_wait { + struct trace_entry ent; + void *ctx; + int min_events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_fail_link { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + void *link; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_complete { + struct trace_entry ent; + void *ctx; + void *req; + u64 user_data; + int res; + unsigned int cflags; + u64 extra1; + u64 extra2; + char __data[0]; +}; + +struct trace_event_raw_io_uring_submit_req { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + long: 32; + long long unsigned int flags; + bool sq_thread; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_arm { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + int events; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_add { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + int mask; + u32 __data_loc_op_str; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_req_failed { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int user_data; + u8 opcode; + u8 flags; + u8 ioprio; + long: 32; + u64 off; + u64 addr; + u32 len; + u32 op_flags; + u16 buf_index; + u16 personality; + u32 file_index; + u64 pad1; + u64 addr3; + int error; + u32 __data_loc_op_str; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqe_overflow { + struct trace_entry ent; + void *ctx; + long: 32; + long long unsigned int user_data; + s32 res; + u32 cflags; + void *ocqe; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_io_uring_task_work_run { + struct trace_entry ent; + void *tctx; + unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_io_uring_short_write { + struct trace_entry ent; + void *ctx; + long: 32; + u64 fpos; + u64 wanted; + u64 got; + char __data[0]; +}; + +struct trace_event_raw_io_uring_local_work_run { + struct trace_entry ent; + void *ctx; + int count; + unsigned int loops; + char __data[0]; +}; + +struct trace_event_data_offsets_io_uring_create {}; + +struct trace_event_data_offsets_io_uring_register {}; + +struct trace_event_data_offsets_io_uring_file_get {}; + +struct trace_event_data_offsets_io_uring_queue_async_work { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_defer { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_link {}; + +struct trace_event_data_offsets_io_uring_cqring_wait {}; + +struct trace_event_data_offsets_io_uring_fail_link { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_complete {}; + +struct trace_event_data_offsets_io_uring_submit_req { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_poll_arm { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_task_add { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_req_failed { + u32 op_str; + const void *op_str_ptr_; +}; + +struct trace_event_data_offsets_io_uring_cqe_overflow {}; + +struct trace_event_data_offsets_io_uring_task_work_run {}; + +struct trace_event_data_offsets_io_uring_short_write {}; + +struct trace_event_data_offsets_io_uring_local_work_run {}; + +typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); + +typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, long int); + +typedef void (*btf_trace_io_uring_file_get)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_queue_async_work)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_defer)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); + +typedef void (*btf_trace_io_uring_fail_link)(void *, struct io_kiocb *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_complete)(void *, struct io_ring_ctx *, void *, struct io_uring_cqe *); + +typedef void (*btf_trace_io_uring_submit_req)(void *, struct io_kiocb *); + +typedef void (*btf_trace_io_uring_poll_arm)(void *, struct io_kiocb *, int, int); + +typedef void (*btf_trace_io_uring_task_add)(void *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_req_failed)(void *, const struct io_uring_sqe *, struct io_kiocb *, int); + +typedef void (*btf_trace_io_uring_cqe_overflow)(void *, void *, long long unsigned int, s32, u32, void *); + +typedef void (*btf_trace_io_uring_task_work_run)(void *, void *, unsigned int); + +typedef void (*btf_trace_io_uring_short_write)(void *, void *, u64, u64, u64); + +typedef void (*btf_trace_io_uring_local_work_run)(void *, void *, int, unsigned int); + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_CONCURRENT = 16, + IO_WQ_HASH_SHIFT = 24, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK = 0, + IO_WQ_CANCEL_RUNNING = 1, + IO_WQ_CANCEL_NOTFOUND = 2, +}; + +typedef bool work_cancel_fn(struct io_wq_work *, void *); + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned int cq_tail; + unsigned int cq_min_tail; + unsigned int nr_timeouts; + int hit_timeout; + ktime_t min_timeout; + ktime_t timeout; + struct hrtimer t; + ktime_t napi_busy_poll_dt; + bool napi_prefer_busy_poll; + long: 32; +}; + +enum { + IO_CHECK_CQ_OVERFLOW_BIT = 0, + IO_CHECK_CQ_DROPPED_BIT = 1, +}; + +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct io_ring_ctx *ctx; +}; + +enum { + IOBL_BUF_RING = 1, + IOBL_MMAP = 2, + IOBL_INC = 4, +}; + +enum { + IO_APOLL_OK = 0, + IO_APOLL_ABORTED = 1, + IO_APOLL_READY = 2, +}; + +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; +}; + +struct ext_arg { + size_t argsz; + long: 32; + struct timespec64 ts; + const sigset_t *sig; + long: 32; + ktime_t min_time; + bool ts_set; + long: 32; +}; + +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; + +struct io_task_cancel { + struct io_uring_task *tctx; + bool all; +}; + +struct creds; + +enum { + IRQ_DOMAIN_FLAG_HIERARCHY = 1, + IRQ_DOMAIN_NAME_ALLOCATED = 2, + IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, + IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, + IRQ_DOMAIN_FLAG_MSI = 16, + IRQ_DOMAIN_FLAG_ISOLATED_MSI = 32, + IRQ_DOMAIN_FLAG_NO_MAP = 64, + IRQ_DOMAIN_FLAG_MSI_PARENT = 256, + IRQ_DOMAIN_FLAG_MSI_DEVICE = 512, + IRQ_DOMAIN_FLAG_DESTROY_GC = 1024, + IRQ_DOMAIN_FLAG_NONCORE = 65536, +}; + +enum { + IRQCHIP_SET_TYPE_MASKED = 1, + IRQCHIP_EOI_IF_HANDLED = 2, + IRQCHIP_MASK_ON_SUSPEND = 4, + IRQCHIP_ONOFFLINE_ENABLED = 8, + IRQCHIP_SKIP_SET_WAKE = 16, + IRQCHIP_ONESHOT_SAFE = 32, + IRQCHIP_EOI_THREADED = 64, + IRQCHIP_SUPPORTS_LEVEL_MSI = 128, + IRQCHIP_SUPPORTS_NMI = 256, + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, + IRQCHIP_AFFINITY_PRE_STARTUP = 1024, + IRQCHIP_IMMUTABLE = 2048, +}; + +struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + long unsigned int flags; + union { + long unsigned int ul; + void *ptr; + } scratchpad[2]; +}; + +typedef struct msi_alloc_info msi_alloc_info_t; + +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); + int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); + void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); + int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); + void (*prepare_desc)(struct irq_domain *, msi_alloc_info_t *, struct msi_desc *); + void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); + int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); + void (*domain_free_irqs)(struct irq_domain *, struct device *); + void (*msi_post_free)(struct irq_domain *, struct device *); + int (*msi_translate)(struct irq_domain *, struct irq_fwspec *, irq_hw_number_t *, unsigned int *); +}; + +struct msi_domain_info { + u32 flags; + enum irq_domain_bus_token bus_token; + unsigned int hwsize; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +struct msi_domain_template { + char name[48]; + struct irq_chip chip; + struct msi_domain_ops ops; + struct msi_domain_info info; +}; + +typedef unsigned char u_char; + +typedef short unsigned int u_short; + +struct vt_mode { + char mode; + char waitv; + short int relsig; + short int acqsig; + short int frsig; +}; + +struct console_font { + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +enum vc_intensity { + VCI_HALF_BRIGHT = 0, + VCI_NORMAL = 1, + VCI_BOLD = 2, + VCI_MASK = 3, +}; + +struct vc_state { + unsigned int x; + unsigned int y; + unsigned char color; + unsigned char Gx_charset[2]; + unsigned int charset: 1; + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; + +struct consw; + +struct uni_pagedict; + +struct vc_data { + struct tty_port port; + struct vc_state state; + struct vc_state saved_state; + short unsigned int vc_num; + unsigned int vc_cols; + unsigned int vc_rows; + unsigned int vc_size_row; + unsigned int vc_scan_lines; + unsigned int vc_cell_height; + long unsigned int vc_origin; + long unsigned int vc_scr_end; + long unsigned int vc_visible_origin; + unsigned int vc_top; + unsigned int vc_bottom; + const struct consw *vc_sw; + short unsigned int *vc_screenbuf; + unsigned int vc_screenbuf_size; + unsigned char vc_mode; + unsigned char vc_attr; + unsigned char vc_def_color; + unsigned char vc_ulcolor; + unsigned char vc_itcolor; + unsigned char vc_halfcolor; + unsigned int vc_cursor_type; + short unsigned int vc_complement_mask; + short unsigned int vc_s_complement_mask; + long unsigned int vc_pos; + short unsigned int vc_hi_font_mask; + struct console_font vc_font; + short unsigned int vc_video_erase_char; + unsigned int vc_state; + unsigned int vc_npar; + unsigned int vc_par[16]; + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + unsigned int vc_disp_ctrl: 1; + unsigned int vc_toggle_meta: 1; + unsigned int vc_decscnm: 1; + unsigned int vc_decom: 1; + unsigned int vc_decawm: 1; + unsigned int vc_deccm: 1; + unsigned int vc_decim: 1; + unsigned int vc_priv: 3; + unsigned int vc_need_wrap: 1; + unsigned int vc_can_do_color: 1; + unsigned int vc_report_mouse: 2; + unsigned char vc_utf: 1; + unsigned char vc_utf_count; + int vc_utf_char; + long unsigned int vc_tab_stop[8]; + unsigned char vc_palette[48]; + short unsigned int *vc_translate; + unsigned int vc_bell_pitch; + unsigned int vc_bell_duration; + short unsigned int vc_cur_blink_ms; + struct vc_data **vc_display_fg; + struct uni_pagedict *uni_pagedict; + struct uni_pagedict **uni_pagedict_loc; + u32 **vc_uni_lines; +}; + +enum con_scroll { + SM_UP = 0, + SM_DOWN = 1, +}; + +struct consw { + struct module *owner; + const char * (*con_startup)(); + void (*con_init)(struct vc_data *, bool); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, unsigned int, unsigned int, unsigned int); + void (*con_putc)(struct vc_data *, u16, unsigned int, unsigned int); + void (*con_putcs)(struct vc_data *, const u16 *, unsigned int, unsigned int, unsigned int); + void (*con_cursor)(struct vc_data *, bool); + bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); + bool (*con_switch)(struct vc_data *); + bool (*con_blank)(struct vc_data *, enum vesa_blank_mode, bool); + int (*con_font_set)(struct vc_data *, const struct console_font *, unsigned int, unsigned int); + int (*con_font_get)(struct vc_data *, struct console_font *, unsigned int); + int (*con_font_default)(struct vc_data *, struct console_font *, const char *); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, bool); + void (*con_set_palette)(struct vc_data *, const unsigned char *); + void (*con_scrolldelta)(struct vc_data *, int); + bool (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); + void (*con_invert_region)(struct vc_data *, u16 *, int); + void (*con_debug_enter)(struct vc_data *); + void (*con_debug_leave)(struct vc_data *); +}; + +struct fbcon_display { + const u_char *fontdata; + int userfont; + u_short inverse; + short int yscroll; + int vrows; + int cursor_shape; + int con_rotate; + u32 xres_virtual; + u32 yres_virtual; + u32 height; + u32 width; + u32 bits_per_pixel; + u32 grayscale; + u32 nonstd; + u32 accel_flags; + u32 rotate; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + const struct fb_videomode *mode; +}; + +struct fbcon_ops { + void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); + void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); + void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); + void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); + void (*cursor)(struct vc_data *, struct fb_info *, bool, int, int); + int (*update_start)(struct fb_info *); + int (*rotate_font)(struct fb_info *, struct vc_data *); + struct fb_var_screeninfo var; + struct delayed_work cursor_work; + struct fb_cursor cursor_state; + struct fbcon_display *p; + struct fb_info *info; + int currcon; + int cur_blink_jiffies; + int cursor_flash; + int cursor_reset; + int blank_state; + int graphics; + int save_graphics; + bool initialized; + int rotate; + int cur_rotate; + char *cursor_data; + u8 *fontbuffer; + u8 *fontdata; + u8 *cursor_src; + u32 cursor_size; + u32 fd_size; +}; + +typedef short unsigned int ushort; + +struct console_font_op { + unsigned int op; + unsigned int flags; + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; +}; + +enum translation_map { + LAT1_MAP = 0, + GRAF_MAP = 1, + IBMPC_MAP = 2, + USER_MAP = 3, + FIRST_MAP = 0, + LAST_MAP = 3, +}; + +struct vt_notifier_param { + struct vc_data *vc; + unsigned int c; +}; + +struct tiocl_selection { + short unsigned int xs; + short unsigned int ys; + short unsigned int xe; + short unsigned int ye; + short unsigned int sel_mode; +}; + +struct con_driver { + const struct consw *con; + const char *desc; + struct device *dev; + int node; + int first; + int last; + int flag; +}; + +enum { + blank_off = 0, + blank_normal_wait = 1, + blank_vesa_wait = 2, +}; + +enum { + EPecma = 0, + EPdec = 1, + EPeq = 2, + EPgt = 3, + EPlt = 4, +}; + +enum CSI_J { + CSI_J_CURSOR_TO_END = 0, + CSI_J_START_TO_CURSOR = 1, + CSI_J_VISIBLE = 2, + CSI_J_FULL = 3, +}; + +enum { + CSI_K_CURSOR_TO_LINEEND = 0, + CSI_K_LINESTART_TO_CURSOR = 1, + CSI_K_LINE = 2, +}; + +struct rgb { + u8 r; + u8 g; + u8 b; +}; + +enum { + CSI_m_DEFAULT = 0, + CSI_m_BOLD = 1, + CSI_m_HALF_BRIGHT = 2, + CSI_m_ITALIC = 3, + CSI_m_UNDERLINE = 4, + CSI_m_BLINK = 5, + CSI_m_REVERSE = 7, + CSI_m_PRI_FONT = 10, + CSI_m_ALT_FONT1 = 11, + CSI_m_ALT_FONT2 = 12, + CSI_m_DOUBLE_UNDERLINE = 21, + CSI_m_NORMAL_INTENSITY = 22, + CSI_m_NO_ITALIC = 23, + CSI_m_NO_UNDERLINE = 24, + CSI_m_NO_BLINK = 25, + CSI_m_NO_REVERSE = 27, + CSI_m_FG_COLOR_BEG = 30, + CSI_m_FG_COLOR_END = 37, + CSI_m_FG_COLOR = 38, + CSI_m_DEFAULT_FG_COLOR = 39, + CSI_m_BG_COLOR_BEG = 40, + CSI_m_BG_COLOR_END = 47, + CSI_m_BG_COLOR = 48, + CSI_m_DEFAULT_BG_COLOR = 49, + CSI_m_BRIGHT_FG_COLOR_BEG = 90, + CSI_m_BRIGHT_FG_COLOR_END = 97, + CSI_m_BRIGHT_FG_COLOR_OFF = 60, + CSI_m_BRIGHT_BG_COLOR_BEG = 100, + CSI_m_BRIGHT_BG_COLOR_END = 107, + CSI_m_BRIGHT_BG_COLOR_OFF = 60, +}; + +enum { + CSI_DEC_hl_CURSOR_KEYS = 1, + CSI_DEC_hl_132_COLUMNS = 3, + CSI_DEC_hl_REVERSE_VIDEO = 5, + CSI_DEC_hl_ORIGIN_MODE = 6, + CSI_DEC_hl_AUTOWRAP = 7, + CSI_DEC_hl_AUTOREPEAT = 8, + CSI_DEC_hl_MOUSE_X10 = 9, + CSI_DEC_hl_SHOW_CURSOR = 25, + CSI_DEC_hl_MOUSE_VT200 = 1000, +}; + +enum { + CSI_hl_DISPLAY_CTRL = 3, + CSI_hl_INSERT = 4, + CSI_hl_AUTO_NL = 20, +}; + +enum CSI_right_square_bracket { + CSI_RSB_COLOR_FOR_UNDERLINE = 1, + CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2, + CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8, + CSI_RSB_BLANKING_INTERVAL = 9, + CSI_RSB_BELL_FREQUENCY = 10, + CSI_RSB_BELL_DURATION = 11, + CSI_RSB_BRING_CONSOLE_TO_FRONT = 12, + CSI_RSB_UNBLANK = 13, + CSI_RSB_VESA_OFF_INTERVAL = 14, + CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15, + CSI_RSB_CURSOR_BLINK_INTERVAL = 16, +}; + +enum vc_ctl_state { + ESnormal = 0, + ESesc = 1, + ESsquare = 2, + ESgetpars = 3, + ESfunckey = 4, + EShash = 5, + ESsetG0 = 6, + ESsetG1 = 7, + ESpercent = 8, + EScsiignore = 9, + ESnonstd = 10, + ESpalette = 11, + ESosc = 12, + ESANSI_first = 12, + ESapc = 13, + ESpm = 14, + ESdcs = 15, + ESANSI_last = 15, +}; + +enum { + ASCII_NULL = 0, + ASCII_BELL = 7, + ASCII_BACKSPACE = 8, + ASCII_IGNORE_FIRST = 8, + ASCII_HTAB = 9, + ASCII_LINEFEED = 10, + ASCII_VTAB = 11, + ASCII_FORMFEED = 12, + ASCII_CAR_RET = 13, + ASCII_IGNORE_LAST = 13, + ASCII_SHIFTOUT = 14, + ASCII_SHIFTIN = 15, + ASCII_CANCEL = 24, + ASCII_SUBSTITUTE = 26, + ASCII_ESCAPE = 27, + ASCII_CSI_IGNORE_FIRST = 32, + ASCII_CSI_IGNORE_LAST = 63, + ASCII_DEL = 127, + ASCII_EXT_CSI = 155, +}; + +struct interval { + uint32_t first; + uint32_t last; +}; + +struct vc_draw_region { + long unsigned int from; + long unsigned int to; + int x; +}; + +struct request_sense; + +struct cdrom_generic_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + union { + void *reserved[1]; + void *unused; + }; +}; + +struct request_sense { + __u8 valid: 1; + __u8 error_code: 7; + __u8 segment_number; + __u8 reserved1: 2; + __u8 ili: 1; + __u8 reserved2: 1; + __u8 sense_key: 4; + __u8 information[4]; + __u8 add_sense_len; + __u8 command_info[4]; + __u8 asc; + __u8 ascq; + __u8 fruc; + __u8 sks[3]; + __u8 asb[46]; +}; + +struct scsi_ioctl_command { + unsigned int inlen; + unsigned int outlen; + unsigned char data[0]; +}; + +struct scsi_idlun { + __u32 dev_id; + __u32 host_unique_id; +}; + +enum dmi_field { + DMI_NONE = 0, + DMI_BIOS_VENDOR = 1, + DMI_BIOS_VERSION = 2, + DMI_BIOS_DATE = 3, + DMI_BIOS_RELEASE = 4, + DMI_EC_FIRMWARE_RELEASE = 5, + DMI_SYS_VENDOR = 6, + DMI_PRODUCT_NAME = 7, + DMI_PRODUCT_VERSION = 8, + DMI_PRODUCT_SERIAL = 9, + DMI_PRODUCT_UUID = 10, + DMI_PRODUCT_SKU = 11, + DMI_PRODUCT_FAMILY = 12, + DMI_BOARD_VENDOR = 13, + DMI_BOARD_NAME = 14, + DMI_BOARD_VERSION = 15, + DMI_BOARD_SERIAL = 16, + DMI_BOARD_ASSET_TAG = 17, + DMI_CHASSIS_VENDOR = 18, + DMI_CHASSIS_TYPE = 19, + DMI_CHASSIS_VERSION = 20, + DMI_CHASSIS_SERIAL = 21, + DMI_CHASSIS_ASSET_TAG = 22, + DMI_STRING_MAX = 23, + DMI_OEM_STRING = 24, +}; + +struct pci_bits { + unsigned int reg; + unsigned int width; + long unsigned int mask; + long unsigned int val; +}; + +enum { + PIIX_IOCFG = 84, + ICH5_PMR = 144, + ICH5_PCS = 146, + PIIX_SIDPR_BAR = 5, + PIIX_SIDPR_LEN = 16, + PIIX_SIDPR_IDX = 0, + PIIX_SIDPR_DATA = 4, + PIIX_FLAG_CHECKINTR = 268435456, + PIIX_FLAG_SIDPR = 536870912, + PIIX_PATA_FLAGS = 1, + PIIX_SATA_FLAGS = 268435458, + PIIX_FLAG_PIO16 = 1073741824, + PIIX_80C_PRI = 48, + PIIX_80C_SEC = 192, + P0 = 0, + P1 = 1, + P2 = 2, + P3 = 3, + IDE = -1, + NA = -2, + RV = -3, + PIIX_AHCI_DEVICE = 6, + PIIX_HOST_BROKEN_SUSPEND = 16777216, +}; + +enum piix_controller_ids { + piix_pata_mwdma = 0, + piix_pata_33 = 1, + ich_pata_33 = 2, + ich_pata_66 = 3, + ich_pata_100 = 4, + ich_pata_100_nomwdma1 = 5, + ich5_sata = 6, + ich6_sata = 7, + ich6m_sata = 8, + ich8_sata = 9, + ich8_2port_sata = 10, + ich8m_apple_sata = 11, + tolapai_sata = 12, + piix_pata_vmw = 13, + ich8_sata_snb = 14, + ich8_2port_sata_snb = 15, + ich8_2port_sata_byt = 16, +}; + +struct piix_map_db { + const u32 mask; + const u16 port_enable; + const int map[0]; +}; + +struct piix_host_priv { + const int *map; + u32 saved_iocfg; + void *sidpr; +}; + +struct ich_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; + +enum { + VETH_INFO_UNSPEC = 0, + VETH_INFO_PEER = 1, + __VETH_INFO_MAX = 2, +}; + +struct page_pool_params { + union { + struct { + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; + }; + struct page_pool_params_fast fast; + }; + union { + struct { + struct net_device *netdev; + unsigned int queue_idx; + unsigned int flags; + void (*init_callback)(netmem_ref, void *); + void *init_arg; + }; + struct page_pool_params_slow slow; + }; +}; + +struct veth_stats { + u64 rx_drops; + u64 xdp_packets; + u64 xdp_bytes; + u64 xdp_redirect; + u64 xdp_drops; + u64 xdp_tx; + u64 xdp_tx_err; + u64 peer_tq_xdp_xmit; + u64 peer_tq_xdp_xmit_err; +}; + +struct veth_rq_stats { + struct veth_stats vs; + struct u64_stats_sync syncp; + long: 32; +}; + +struct veth_rq { + struct napi_struct xdp_napi; + struct napi_struct *napi; + struct net_device *dev; + struct bpf_prog *xdp_prog; + struct xdp_mem_info xdp_mem; + long: 32; + struct veth_rq_stats stats; + bool rx_notify_masked; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct ptr_ring xdp_ring; + struct xdp_rxq_info xdp_rxq; + struct page_pool *page_pool; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct veth_priv { + struct net_device *peer; + long: 32; + atomic64_t dropped; + struct bpf_prog *_xdp_prog; + struct veth_rq *rq; + unsigned int requested_headroom; + long: 32; +}; + +struct veth_xdp_tx_bq { + struct xdp_frame *q[16]; + unsigned int count; +}; + +struct veth_q_stat_desc { + char desc[32]; + size_t offset; +}; + +struct veth_xdp_buff { + struct xdp_buff xdp; + struct sk_buff *skb; +}; + +struct ep_device { + struct usb_endpoint_descriptor *desc; + struct usb_device *udev; + struct device dev; +}; + +struct ehci_stats { + long unsigned int normal; + long unsigned int error; + long unsigned int iaa; + long unsigned int lost_iaa; + long unsigned int complete; + long unsigned int unlink; +}; + +struct ehci_per_sched { + struct usb_device *udev; + struct usb_host_endpoint *ep; + struct list_head ps_list; + u16 tt_usecs; + u16 cs_mask; + u16 period; + u16 phase; + u8 bw_phase; + u8 phase_uf; + u8 usecs; + u8 c_usecs; + u8 bw_uperiod; + u8 bw_period; +}; + +enum ehci_rh_state { + EHCI_RH_HALTED = 0, + EHCI_RH_SUSPENDED = 1, + EHCI_RH_RUNNING = 2, + EHCI_RH_STOPPING = 3, +}; + +enum ehci_hrtimer_event { + EHCI_HRTIMER_POLL_ASS = 0, + EHCI_HRTIMER_POLL_PSS = 1, + EHCI_HRTIMER_POLL_DEAD = 2, + EHCI_HRTIMER_UNLINK_INTR = 3, + EHCI_HRTIMER_FREE_ITDS = 4, + EHCI_HRTIMER_ACTIVE_UNLINK = 5, + EHCI_HRTIMER_START_UNLINK_INTR = 6, + EHCI_HRTIMER_ASYNC_UNLINKS = 7, + EHCI_HRTIMER_IAA_WATCHDOG = 8, + EHCI_HRTIMER_DISABLE_PERIODIC = 9, + EHCI_HRTIMER_DISABLE_ASYNC = 10, + EHCI_HRTIMER_IO_WATCHDOG = 11, + EHCI_HRTIMER_NUM_EVENTS = 12, +}; + +struct ehci_caps; + +struct ehci_regs; + +struct ehci_dbg_port; + +struct ehci_qh; + +union ehci_shadow; + +struct ehci_itd; + +struct ehci_sitd; + +struct ehci_hcd { + enum ehci_hrtimer_event next_hrtimer_event; + unsigned int enabled_hrtimer_events; + ktime_t hr_timeouts[12]; + struct hrtimer hrtimer; + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + struct ehci_caps *caps; + struct ehci_regs *regs; + struct ehci_dbg_port *debug; + __u32 hcs_params; + spinlock_t lock; + enum ehci_rh_state rh_state; + bool scanning: 1; + bool need_rescan: 1; + bool intr_unlinking: 1; + bool iaa_in_progress: 1; + bool async_unlinking: 1; + bool shutdown: 1; + struct ehci_qh *qh_scan_next; + struct ehci_qh *async; + struct ehci_qh *dummy; + struct list_head async_unlink; + struct list_head async_idle; + unsigned int async_unlink_cycle; + unsigned int async_count; + __le32 old_current; + __le32 old_token; + unsigned int periodic_size; + __le32 *periodic; + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned int i_thresh; + union ehci_shadow *pshadow; + struct list_head intr_unlink_wait; + struct list_head intr_unlink; + unsigned int intr_unlink_wait_cycle; + unsigned int intr_unlink_cycle; + unsigned int now_frame; + unsigned int last_iso_frame; + unsigned int intr_count; + unsigned int isoc_count; + unsigned int periodic_count; + unsigned int uframe_periodic_max; + struct list_head cached_itd_list; + struct ehci_itd *last_itd_to_free; + struct list_head cached_sitd_list; + struct ehci_sitd *last_sitd_to_free; + long unsigned int reset_done[15]; + long unsigned int bus_suspended; + long unsigned int companion_ports; + long unsigned int owned_ports; + long unsigned int port_c_suspend; + long unsigned int suspended_ports; + long unsigned int resuming_ports; + struct dma_pool *qh_pool; + struct dma_pool *qtd_pool; + struct dma_pool *itd_pool; + struct dma_pool *sitd_pool; + unsigned int random_frame; + long unsigned int next_statechange; + long: 32; + ktime_t last_periodic_enable; + u32 command; + unsigned int no_selective_suspend: 1; + unsigned int has_fsl_port_bug: 1; + unsigned int has_fsl_hs_errata: 1; + unsigned int has_fsl_susp_errata: 1; + unsigned int has_ci_pec_bug: 1; + unsigned int big_endian_mmio: 1; + unsigned int big_endian_desc: 1; + unsigned int big_endian_capbase: 1; + unsigned int has_amcc_usb23: 1; + unsigned int need_io_watchdog: 1; + unsigned int amd_pll_fix: 1; + unsigned int use_dummy_qh: 1; + unsigned int has_synopsys_hc_bug: 1; + unsigned int frame_index_bug: 1; + unsigned int need_oc_pp_cycle: 1; + unsigned int imx28_write_fix: 1; + unsigned int spurious_oc: 1; + unsigned int is_aspeed: 1; + unsigned int zx_wakeup_clear_needed: 1; + __le32 *ohci_hcctrl_reg; + unsigned int has_hostpc: 1; + unsigned int has_tdi_phy_lpm: 1; + unsigned int has_ppcd: 1; + u8 sbrn; + struct ehci_stats stats; + struct dentry *debug_dir; + u8 bandwidth[64]; + u8 tt_budget[64]; + struct list_head tt_list; + long: 32; + long unsigned int priv[0]; +}; + +struct ehci_caps { + u32 hc_capbase; + u32 hcs_params; + u32 hcc_params; + u8 portroute[8]; +}; + +struct ehci_regs { + u32 command; + u32 status; + u32 intr_enable; + u32 frame_index; + u32 segment; + u32 frame_list; + u32 async_next; + u32 reserved1[2]; + u32 txfill_tuning; + u32 reserved2[6]; + u32 configured_flag; + union { + u32 port_status[15]; + struct { + u32 reserved3[9]; + u32 usbmode; + }; + }; + union { + struct { + u32 reserved4; + u32 hostpc[15]; + }; + u32 brcm_insnreg[4]; + }; + u32 reserved5[2]; + u32 usbmode_ex; +}; + +struct ehci_dbg_port { + u32 control; + u32 pids; + u32 data03; + u32 data47; + u32 address; +}; + +struct ehci_fstn; + +union ehci_shadow { + struct ehci_qh *qh; + struct ehci_itd *itd; + struct ehci_sitd *sitd; + struct ehci_fstn *fstn; + __le32 *hw_next; + void *ptr; +}; + +struct ehci_qh_hw; + +struct ehci_qtd; + +struct ehci_qh { + struct ehci_qh_hw *hw; + dma_addr_t qh_dma; + union ehci_shadow qh_next; + struct list_head qtd_list; + struct list_head intr_node; + struct ehci_qtd *dummy; + struct list_head unlink_node; + struct ehci_per_sched ps; + unsigned int unlink_cycle; + u8 qh_state; + u8 xacterrs; + u8 unlink_reason; + u8 gap_uf; + unsigned int is_out: 1; + unsigned int clearing_tt: 1; + unsigned int dequeue_during_giveback: 1; + unsigned int should_be_inactive: 1; +}; + +struct ehci_iso_stream; + +struct ehci_itd { + __le32 hw_next; + __le32 hw_transaction[8]; + __le32 hw_bufp[7]; + __le32 hw_bufp_hi[7]; + dma_addr_t itd_dma; + union ehci_shadow itd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head itd_list; + unsigned int frame; + unsigned int pg; + unsigned int index[8]; + long: 32; +}; + +struct ehci_sitd { + __le32 hw_next; + __le32 hw_fullspeed_ep; + __le32 hw_uframe; + __le32 hw_results; + __le32 hw_buf[2]; + __le32 hw_backpointer; + __le32 hw_buf_hi[2]; + dma_addr_t sitd_dma; + union ehci_shadow sitd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head sitd_list; + unsigned int frame; + unsigned int index; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_qtd { + __le32 hw_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + dma_addr_t qtd_dma; + struct list_head qtd_list; + struct urb *urb; + size_t length; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_fstn { + __le32 hw_next; + __le32 hw_prev; + dma_addr_t fstn_dma; + union ehci_shadow fstn_next; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_qh_hw { + __le32 hw_next; + __le32 hw_info1; + __le32 hw_info2; + __le32 hw_current; + __le32 hw_qtd_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ehci_iso_packet { + u64 bufp; + __le32 transaction; + u8 cross; + u32 buf1; + long: 32; +}; + +struct ehci_iso_sched { + struct list_head td_list; + unsigned int span; + unsigned int first_packet; + struct ehci_iso_packet packet[0]; +}; + +struct ehci_iso_stream { + struct ehci_qh_hw *hw; + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; + struct list_head free_list; + struct ehci_per_sched ps; + unsigned int next_uframe; + __le32 splits; + u16 uperiod; + u16 maxp; + unsigned int bandwidth; + __le32 buf0; + __le32 buf1; + __le32 buf2; + __le32 address; +}; + +struct ehci_tt { + u16 bandwidth[8]; + struct list_head tt_list; + struct list_head ps_list; + struct usb_tt *usb_tt; + int tt_port; +}; + +struct ehci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*port_power)(struct usb_hcd *, int, bool); +}; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); + struct usb_bus *bus; + struct mutex mutex; + size_t count; + char *output_buf; + size_t alloc_size; +}; + +struct ir_raw_timings_manchester { + unsigned int leader_pulse; + unsigned int leader_space; + unsigned int clock; + unsigned int invert: 1; + unsigned int trailer_space; +}; + +enum rc6_mode { + RC6_MODE_0 = 0, + RC6_MODE_6A = 1, + RC6_MODE_UNKNOWN = 2, +}; + +enum rc6_state { + STATE_INACTIVE___2 = 0, + STATE_PREFIX_SPACE = 1, + STATE_HEADER_BIT_START = 2, + STATE_HEADER_BIT_END = 3, + STATE_TOGGLE_START = 4, + STATE_TOGGLE_END = 5, + STATE_BODY_BIT_START = 6, + STATE_BODY_BIT_END = 7, + STATE_FINISHED___2 = 8, +}; + +enum { + SKB_FCLONE_UNAVAILABLE = 0, + SKB_FCLONE_ORIG = 1, + SKB_FCLONE_CLONE = 2, +}; + +enum skb_tstamp_type { + SKB_CLOCK_REALTIME = 0, + SKB_CLOCK_MONOTONIC = 1, + SKB_CLOCK_TAI = 2, + __SKB_CLOCK_MAX = 2, +}; + +struct skb_checksum_ops { + __wsum (*update)(const void *, int, __wsum); + __wsum (*combine)(__wsum, __wsum, int, int); +}; + +enum tcx_action_base { + TCX_NEXT = -1, + TCX_PASS = 0, + TCX_DROP = 2, + TCX_REDIRECT = 7, +}; + +struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +}; + +typedef struct ifbond ifbond; + +struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +}; + +typedef struct ifslave ifslave; + +enum netdev_queue_type { + NETDEV_QUEUE_TYPE_RX = 0, + NETDEV_QUEUE_TYPE_TX = 1, +}; + +struct net_device_path_stack { + int num_paths; + struct net_device_path path[5]; +}; + +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; + int flags; +}; + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; + +struct bpf_mprog_fp { + struct bpf_prog *prog; +}; + +struct bpf_mprog_bundle; + +struct bpf_mprog_entry { + struct bpf_mprog_fp fp_items[64]; + struct bpf_mprog_bundle *parent; +}; + +struct cpu_rmap { + struct kref refcount; + u16 size; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; + +struct phy_link_topology { + struct xarray phys; + u32 next_phy_index; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +struct netdev_notifier_offload_xstats_rd { + struct rtnl_hw_stats64 stats; + bool used; + long: 32; +}; + +struct netdev_notifier_offload_xstats_ru { + bool used; +}; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; + enum netdev_offload_xstats_type type; + union { + struct netdev_notifier_offload_xstats_rd *report_delta; + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[128]; + u8 buckets[0]; +}; + +enum { + NESTED_SYNC_IMM_BIT = 0, + NESTED_SYNC_TODO_BIT = 1, +}; + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct vlan_ethhdr { + union { + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + }; + struct { + unsigned char h_dest[6]; + unsigned char h_source[6]; + } addrs; + }; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct ip_tunnel_key { + __be64 tun_id; + union { + struct { + __be32 src; + __be32 dst; + } ipv4; + struct { + struct in6_addr src; + struct in6_addr dst; + } ipv6; + } u; + long unsigned int tun_flags[1]; + __be32 label; + u32 nhid; + u8 tos; + u8 ttl; + __be16 tp_src; + __be16 tp_dst; + __u8 flow_flags; + long: 32; +}; + +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + +struct dst_cache_pcpu; + +struct dst_cache { + struct dst_cache_pcpu *cache; + long unsigned int reset_ts; +}; + +struct ip_tunnel_info { + struct ip_tunnel_key key; + struct ip_tunnel_encap encap; + struct dst_cache dst_cache; + u8 options_len; + u8 mode; + long: 32; +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED = 0, + __QDISC_STATE_DEACTIVATED = 1, + __QDISC_STATE_MISSED = 2, + __QDISC_STATE_DRAINING = 3, +}; + +enum qdisc_state2_t { + __QDISC_STATE2_RUNNING = 0, +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + unsigned char data[20]; +}; + +struct tc_skb_cb { + struct qdisc_skb_cb qdisc_cb; + u32 drop_reason; + u16 zone; + u16 mru; + u8 post_ct: 1; + u8 post_ct_snat: 1; + u8 post_ct_dnat: 1; +}; + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_sync *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + long unsigned int rcu_state; +}; + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +typedef u64 sci_t; + +enum metadata_type { + METADATA_IP_TUNNEL = 0, + METADATA_HW_PORT_MUX = 1, + METADATA_MACSEC = 2, + METADATA_XFRM = 3, +}; + +struct hw_port_info { + struct net_device *lower_dev; + u32 port_id; +}; + +struct macsec_info { + sci_t sci; +}; + +struct xfrm_md_info { + u32 if_id; + int link; + struct dst_entry *dst_orig; +}; + +struct metadata_dst { + struct dst_entry dst; + enum metadata_type type; + long: 32; + union { + struct ip_tunnel_info tun_info; + struct hw_port_info port_info; + struct macsec_info macsec_info; + struct xfrm_md_info xfrm_info; + } u; +}; + +struct rps_sock_flow_table { + u32 mask; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + u32 ents[0]; +}; + +struct bpf_mprog_cp { + struct bpf_link *link; +}; + +struct bpf_mprog_bundle { + struct bpf_mprog_entry a; + struct bpf_mprog_entry b; + struct bpf_mprog_cp cp_items[64]; + struct bpf_prog *ref; + long: 32; + atomic64_t revision; + u32 count; + long: 32; +}; + +struct tcx_entry { + struct mini_Qdisc *miniq; + long: 32; + struct bpf_mprog_bundle bundle; + u32 miniq_active; + struct callback_head rcu; + long: 32; +}; + +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +enum { + SCM_TSTAMP_SND = 0, + SCM_TSTAMP_SCHED = 1, + SCM_TSTAMP_ACK = 2, +}; + +enum sctp_msg_flags { + MSG_NOTIFICATION = 32768, +}; + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[0]; +}; + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[0]; +}; + +typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); + +struct dev_kfree_skb_cb { + enum skb_drop_reason reason; +}; + +enum { + NAPI_F_PREFER_BUSY_POLL = 1, + NAPI_F_END_ON_RESCHED = 2, +}; + +struct netdev_adjacent { + struct net_device *dev; + netdevice_tracker dev_tracker; + bool master; + bool ignore; + u16 ref_nr; + void *private; + struct list_head list; + struct callback_head rcu; +}; + +enum nft_immediate_attributes { + NFTA_IMMEDIATE_UNSPEC = 0, + NFTA_IMMEDIATE_DREG = 1, + NFTA_IMMEDIATE_DATA = 2, + __NFTA_IMMEDIATE_MAX = 3, +}; + +struct nft_immediate_expr { + struct nft_data data; + u8 dreg; + u8 dlen; + long: 32; +}; + +struct nf_nat_ipv4_range { + unsigned int flags; + __be32 min_ip; + __be32 max_ip; + union nf_conntrack_man_proto min; + union nf_conntrack_man_proto max; +}; + +struct nf_nat_ipv4_multi_range_compat { + unsigned int rangesize; + struct nf_nat_ipv4_range range[1]; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct net_proto_family { + int family; + int (*create)(struct net *, struct socket *, int, int); + struct module *owner; +}; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + __be32 sl_addr[0]; +}; + +struct ip_mc_socklist { + struct ip_mc_socklist *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; + struct ip_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + long unsigned int sf_count[2]; + __be32 sf_inaddr; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + long unsigned int sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list *next_rcu; + }; + struct ip_mc_list *next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; + unsigned char crcount; + struct callback_head rcu; +}; + +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct rtentry { + long unsigned int rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + long unsigned int rt_pad3; + void *rt_pad4; + short int rt_metric; + char *rt_dev; + long unsigned int rt_mtu; + long unsigned int rt_window; + short unsigned int rt_irtt; +}; + +struct inet_protosw { + struct list_head list; + short unsigned int type; + short unsigned int protocol; + struct proto *prot; + const struct proto_ops *ops; + unsigned char flags; +}; + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + +enum { + INET_FRAG_FIRST_IN = 1, + INET_FRAG_LAST_IN = 2, + INET_FRAG_COMPLETE = 4, + INET_FRAG_HASH_DEAD = 8, + INET_FRAG_DROP = 16, +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +struct frag_queue { + struct inet_frag_queue q; + int iif; + __u16 nhoffset; + u8 ecn; +}; + +struct nft_ct_frag6_pernet { + struct ctl_table_header *nf_frag_frags_hdr; + struct fqdir *fqdir; +}; + +struct arphdr { + __be16 ar_hrd; + __be16 ar_pro; + unsigned char ar_hln; + unsigned char ar_pln; + __be16 ar_op; +}; + +struct nd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + __u8 opt[0]; +}; + +struct br_input_skb_cb { + struct net_device *brdev; + u16 frag_max_size; + u8 igmp; + u8 mrouters_only: 1; + u8 proxyarp_replied: 1; + u8 src_port_isolated: 1; + u8 promisc: 1; + u8 br_netfilter_broute: 1; + u32 backup_nhid; +}; + +struct msi_bitmap { + struct device_node *of_node; + long unsigned int *bitmap; + spinlock_t lock; + unsigned int irq_count; + bool bitmap_from_slab; +}; + +enum mpic_reg_type { + mpic_access_mmio_le = 0, + mpic_access_mmio_be = 1, +}; + +struct mpic_reg_bank { + u32 *base; +}; + +struct mpic_irq_save { + u32 vecprio; + u32 dest; +}; + +struct mpic { + struct device_node *node; + struct irq_domain *irqhost; + struct irq_chip hc_irq; + struct irq_chip hc_ipi; + struct irq_chip hc_tm; + struct irq_chip hc_err; + const char *name; + unsigned int flags; + unsigned int isu_size; + unsigned int isu_shift; + unsigned int isu_mask; + unsigned int num_sources; + unsigned int ipi_vecs[4]; + unsigned int timer_vecs[8]; + unsigned int err_int_vecs[32]; + unsigned int spurious_vec; + enum mpic_reg_type reg_type; + phys_addr_t paddr; + struct mpic_reg_bank thiscpuregs; + struct mpic_reg_bank gregs; + struct mpic_reg_bank tmregs; + struct mpic_reg_bank cpuregs[32]; + struct mpic_reg_bank isus[32]; + u32 *err_regs; + long unsigned int *protected; + struct msi_bitmap msi_bitmap; + struct mpic *next; + struct mpic_irq_save *save_data; +}; + +struct mv643xx_eth_platform_data { + struct platform_device *shared; + int port_number; + int phy_addr; + struct device_node *phy_node; + u8 mac_addr[6]; + int speed; + int duplex; + phy_interface_t interface; + int rx_queue_count; + int tx_queue_count; + int rx_queue_size; + int tx_queue_size; + long unsigned int rx_sram_addr; + int rx_sram_size; + long unsigned int tx_sram_addr; + int tx_sram_size; +}; + +struct clone_args { + __u64 flags; + __u64 pidfd; + __u64 child_tid; + __u64 parent_tid; + __u64 exit_signal; + __u64 stack; + __u64 stack_size; + __u64 tls; + __u64 set_tid; + __u64 set_tid_size; + __u64 cgroup; +}; + +struct robust_list { + struct robust_list *next; +}; + +struct robust_list_head { + struct robust_list list; + long int futex_offset; + struct robust_list *list_op_pending; +}; + +enum mm_cid_state { + MM_CID_UNSET = 4294967295, + MM_CID_LAZY_PUT = 2147483648, +}; + +struct kernel_clone_args { + u64 flags; + int *pidfd; + int *child_tid; + int *parent_tid; + const char *name; + int exit_signal; + u32 kthread: 1; + u32 io_thread: 1; + u32 user_worker: 1; + u32 no_files: 1; + long unsigned int stack; + long unsigned int stack_size; + long unsigned int tls; + pid_t *set_tid; + size_t set_tid_size; + int cgroup; + int idle; + int (*fn)(void *); + void *fn_arg; + struct cgroup *cgrp; + struct css_set *cset; + long: 32; +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +struct fd_range { + unsigned int from; + unsigned int to; +}; + +enum { + FUTEX_STATE_OK = 0, + FUTEX_STATE_EXITING = 1, + FUTEX_STATE_DEAD = 2, +}; + +struct trace_event_raw_task_newtask { + struct trace_entry ent; + pid_t pid; + char comm[16]; + long unsigned int clone_flags; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_task_rename { + struct trace_entry ent; + pid_t pid; + char oldcomm[16]; + char newcomm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_data_offsets_task_newtask {}; + +struct trace_event_data_offsets_task_rename {}; + +typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int); + +typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); + +struct vm_stack { + struct callback_head rcu; + struct vm_struct *stack_vm_area; +}; + +struct audit_context; + +enum { + GP_IDLE = 0, + GP_ENTER = 1, + GP_PASSED = 2, + GP_EXIT = 3, + GP_REPLAY = 4, +}; + +typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); + +struct rcu_gp_oldstate { + long unsigned int rgos_norm; + long unsigned int rgos_exp; +}; + +typedef int (*task_call_f)(struct task_struct *, void *); + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +struct rcu_exp_work { + long unsigned int rew_s; + struct kthread_work rew_work; +}; + +struct rcu_node { + raw_spinlock_t lock; + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + long unsigned int completedqs; + long unsigned int qsmask; + long unsigned int rcu_gp_init_mask; + long unsigned int qsmaskinit; + long unsigned int qsmaskinitnext; + long unsigned int expmask; + long unsigned int expmaskinit; + long unsigned int expmaskinitnext; + struct kthread_worker *exp_kworker; + long unsigned int cbovldmask; + long unsigned int ffmask; + long unsigned int grpmask; + int grplo; + int grphi; + u8 grpnum; + u8 level; + bool wait_blkd_tasks; + struct rcu_node *parent; + struct list_head blkd_tasks; + struct list_head *gp_tasks; + struct list_head *exp_tasks; + struct list_head *boost_tasks; + struct rt_mutex boost_mtx; + long unsigned int boost_time; + struct mutex kthread_mutex; + struct task_struct *boost_kthread_task; + unsigned int boost_kthread_status; + long unsigned int n_boosts; + long: 32; + long: 32; + long: 32; + raw_spinlock_t fqslock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t exp_lock; + long unsigned int exp_seq_rq; + wait_queue_head_t exp_wq[4]; + struct rcu_exp_work rew; + bool exp_need_flush; + raw_spinlock_t exp_poll_lock; + long unsigned int exp_seq_poll_rq; + struct work_struct exp_poll_wq; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct rt_waiter_node { + struct rb_node entry; + int prio; + u64 deadline; +}; + +struct rt_mutex_waiter { + struct rt_waiter_node tree; + struct rt_waiter_node pi_tree; + struct task_struct *task; + struct rt_mutex_base *lock; + unsigned int wake_state; + struct ww_acquire_ctx *ww_ctx; +}; + +struct rcu_synchronize { + struct callback_head head; + struct completion completion; +}; + +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long int len; +}; + +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4, + TICK_DEP_BIT_RCU_EXP = 5, +}; + +union rcu_noqs { + struct { + u8 norm; + u8 exp; + } b; + u16 s; +}; + +struct rcu_snap_record { + long unsigned int gp_seq; + long: 32; + u64 cputime_irq; + u64 cputime_softirq; + u64 cputime_system; + long unsigned int nr_hardirqs; + unsigned int nr_softirqs; + long long unsigned int nr_csw; + long unsigned int jiffies; + long: 32; +}; + +struct rcu_data { + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + union rcu_noqs cpu_no_qs; + bool core_needs_qs; + bool beenonline; + bool gpwrap; + bool cpu_started; + struct rcu_node *mynode; + long unsigned int grpmask; + long unsigned int ticks_this_gp; + struct irq_work defer_qs_iw; + bool defer_qs_iw_pending; + struct work_struct strict_work; + struct rcu_segcblist cblist; + long int qlen_last_fqs_check; + long unsigned int n_cbs_invoked; + long unsigned int n_force_qs_snap; + long int blimit; + int watching_snap; + bool rcu_need_heavy_qs; + bool rcu_urgent_qs; + bool rcu_forced_tick; + bool rcu_forced_tick_exp; + long unsigned int barrier_seq_snap; + struct callback_head barrier_head; + int exp_watching_snap; + struct task_struct *rcu_cpu_kthread_task; + unsigned int rcu_cpu_kthread_status; + char rcu_cpu_has_work; + long unsigned int rcuc_activity; + unsigned int softirq_snap; + struct irq_work rcu_iw; + bool rcu_iw_pending; + long unsigned int rcu_iw_gp_seq; + long unsigned int rcu_ofl_gp_seq; + short int rcu_ofl_gp_state; + long unsigned int rcu_onl_gp_seq; + short int rcu_onl_gp_state; + long unsigned int last_fqs_resched; + long unsigned int last_sched_clock; + struct rcu_snap_record snap_record; + long int lazy_len; + int cpu; +}; + +struct sr_wait_node { + atomic_t inuse; + struct llist_node node; +}; + +struct rcu_state { + struct rcu_node node[3]; + struct rcu_node *level[3]; + int ncpus; + int n_online_cpus; + long: 32; + long: 32; + long: 32; + long unsigned int gp_seq; + long unsigned int gp_max; + struct task_struct *gp_kthread; + struct swait_queue_head gp_wq; + short int gp_flags; + short int gp_state; + long unsigned int gp_wake_time; + long unsigned int gp_wake_seq; + long unsigned int gp_seq_polled; + long unsigned int gp_seq_polled_snap; + long unsigned int gp_seq_polled_exp_snap; + struct mutex barrier_mutex; + atomic_t barrier_cpu_count; + struct completion barrier_completion; + long unsigned int barrier_sequence; + raw_spinlock_t barrier_lock; + struct mutex exp_mutex; + struct mutex exp_wake_mutex; + long unsigned int expedited_sequence; + atomic_t expedited_need_qs; + struct swait_queue_head expedited_wq; + int ncpus_snap; + u8 cbovld; + u8 cbovldnext; + long unsigned int jiffies_force_qs; + long unsigned int jiffies_kick_kthreads; + long unsigned int n_force_qs; + long unsigned int gp_start; + long unsigned int gp_end; + long unsigned int gp_activity; + long unsigned int gp_req_activity; + long unsigned int jiffies_stall; + int nr_fqs_jiffies_stall; + long unsigned int jiffies_resched; + long unsigned int n_force_qs_gpstart; + const char *name; + char abbr; + long: 32; + long: 32; + arch_spinlock_t ofl_lock; + struct llist_head srs_next; + struct llist_node *srs_wait_tail; + struct llist_node *srs_done_tail; + struct sr_wait_node srs_wait_nodes[5]; + struct work_struct srs_cleanup_work; + atomic_t srs_cleanups_pending; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kvfree_rcu_bulk_data { + struct list_head list; + struct rcu_gp_oldstate gp_snap; + long unsigned int nr_records; + void *records[0]; +}; + +struct kfree_rcu_cpu; + +struct kfree_rcu_cpu_work { + struct rcu_work rcu_work; + struct callback_head *head_free; + struct rcu_gp_oldstate head_free_gp_snap; + struct list_head bulk_head_free[2]; + struct kfree_rcu_cpu *krcp; +}; + +struct kfree_rcu_cpu { + struct callback_head *head; + long unsigned int head_gp_snap; + atomic_t head_count; + struct list_head bulk_head[2]; + atomic_t bulk_count[2]; + struct kfree_rcu_cpu_work krw_arr[2]; + raw_spinlock_t lock; + struct delayed_work monitor_work; + bool initialized; + struct delayed_work page_cache_work; + atomic_t backoff_page_cache_fill; + atomic_t work_in_progress; + long: 32; + struct hrtimer hrtimer; + struct llist_head bkvcache; + int nr_bkv_objs; +}; + +struct rcu_stall_chk_rdr { + int nesting; + union rcu_special rs; + bool on_blkd_list; +}; + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +enum tp_func_state { + TP_FUNC_0 = 0, + TP_FUNC_1 = 1, + TP_FUNC_2 = 2, + TP_FUNC_N = 3, +}; + +enum tp_transition_sync { + TP_TRANSITION_SYNC_1_0_1 = 0, + TP_TRANSITION_SYNC_N_2_1 = 1, + _NR_TP_TRANSITION_SYNC = 2, +}; + +struct tp_transition_snapshot { + long unsigned int rcu; + bool ongoing; +}; + +struct tp_probes { + struct callback_head rcu; + struct tracepoint_func probes[0]; +}; + +struct syscall_trace_enter { + struct trace_entry ent; + int nr; + long unsigned int args[0]; +}; + +struct syscall_trace_exit { + struct trace_entry ent; + int nr; + long int ret; +}; + +struct syscall_tp_t { + struct trace_entry ent; + int syscall_nr; + long unsigned int ret; +}; + +struct syscall_tp_t___2 { + struct trace_entry ent; + int syscall_nr; + long unsigned int args[6]; + long: 32; +}; + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +struct bpf_iter__bpf_prog { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_prog *prog; + }; +}; + +enum bpf_iter_feature { + BPF_ITER_RESCHED = 1, +}; + +struct bpf_iter__cgroup { + union { + struct bpf_iter_meta *meta; + }; + union { + struct cgroup *cgroup; + }; +}; + +struct cgroup_iter_priv { + struct cgroup_subsys_state *start_css; + bool visited_all; + bool terminate; + int order; +}; + +struct bpf_iter_css { + __u64 __opaque[3]; +}; + +struct bpf_iter_css_kern { + struct cgroup_subsys_state *start; + struct cgroup_subsys_state *pos; + unsigned int flags; + long: 32; +}; + +struct static_key_mod { + struct static_key_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +struct static_key_deferred { + struct static_key key; + long unsigned int timeout; + struct delayed_work work; +}; + +enum { + XA_CHECK_SCHED = 4096, +}; + +enum wb_state { + WB_registered = 0, + WB_writeback_running = 1, + WB_has_dirty_io = 2, + WB_start_all = 3, +}; + +struct wb_lock_cookie { + bool locked; + long unsigned int flags; +}; + +typedef int (*writepage_t)(struct folio *, struct writeback_control *, void *); + +struct dirty_throttle_control { + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + long unsigned int avail; + long unsigned int dirty; + long unsigned int thresh; + long unsigned int bg_thresh; + long unsigned int wb_dirty; + long unsigned int wb_thresh; + long unsigned int wb_bg_thresh; + long unsigned int pos_ratio; + bool freerun; + bool dirty_exceeded; +}; + +typedef unsigned int isolate_mode_t; + +typedef struct folio *new_folio_t(struct folio *, long unsigned int); + +typedef void free_folio_t(struct folio *, long unsigned int); + +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *, struct page *, enum migrate_mode); + void (*putback_page)(struct page *); +}; + +enum ttu_flags { + TTU_SPLIT_HUGE_PMD = 4, + TTU_IGNORE_MLOCK = 8, + TTU_SYNC = 16, + TTU_HWPOISON = 32, + TTU_BATCH_FLUSH = 64, + TTU_RMAP_LOCKED = 128, +}; + +enum rmp_flags { + RMP_LOCKED = 1, + RMP_USE_SHARED_ZEROPAGE = 2, +}; + +struct rmap_walk_control { + void *arg; + bool try_lock; + bool contended; + bool (*rmap_one)(struct folio *, struct vm_area_struct *, long unsigned int, void *); + int (*done)(struct folio *); + struct anon_vma * (*anon_lock)(const struct folio *, struct rmap_walk_control *); + bool (*invalid_vma)(struct vm_area_struct *, void *); +}; + +struct migration_target_control { + int nid; + nodemask_t *nmask; + gfp_t gfp_mask; + enum migrate_reason reason; +}; + +struct rmap_walk_arg { + struct folio *folio; + bool map_unused_to_zeropage; +}; + +enum { + PAGE_WAS_MAPPED = 1, + PAGE_WAS_MLOCKED = 2, + PAGE_OLD_STATES = 3, +}; + +struct migrate_pages_stats { + int nr_succeeded; + int nr_failed_pages; + int nr_thp_succeeded; + int nr_thp_failed; + int nr_thp_split; + int nr_split; +}; + +struct hugepage_subpool; + +enum legacy_fs_param { + LEGACY_FS_UNSET_PARAMS = 0, + LEGACY_FS_MONOLITHIC_PARAMS = 1, + LEGACY_FS_INDIVIDUAL_PARAMS = 2, +}; + +struct legacy_fs_context { + char *legacy_data; + size_t data_size; + enum legacy_fs_param param_type; +}; + +struct proc_fs_context { + struct pid_namespace *pid_ns; + unsigned int mask; + enum proc_hidepid hidepid; + int gid; + enum proc_pidonly pidonly; +}; + +enum proc_param { + Opt_gid___4 = 0, + Opt_hidepid = 1, + Opt_subset = 2, +}; + +struct ext4_io_end_vec { + struct list_head list; + loff_t offset; + ssize_t size; + long: 32; +}; + +struct ext4_io_end { + struct list_head list; + handle_t *handle; + struct inode *inode; + struct bio *bio; + unsigned int flag; + refcount_t count; + struct list_head list_vec; +}; + +typedef struct ext4_io_end ext4_io_end_t; + +struct ext4_io_submit { + struct writeback_control *io_wbc; + struct bio *io_bio; + ext4_io_end_t *io_end; + long: 32; + sector_t io_next_block; +}; + +typedef s64 int64_t; + +struct fuse_attr { + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint32_t rdev; + uint32_t blksize; + uint32_t flags; +}; + +struct fuse_sx_time { + int64_t tv_sec; + uint32_t tv_nsec; + int32_t __reserved; +}; + +struct fuse_statx { + uint32_t mask; + uint32_t blksize; + uint64_t attributes; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint16_t mode; + uint16_t __spare0[1]; + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t attributes_mask; + struct fuse_sx_time atime; + struct fuse_sx_time btime; + struct fuse_sx_time ctime; + struct fuse_sx_time mtime; + uint32_t rdev_major; + uint32_t rdev_minor; + uint32_t dev_major; + uint32_t dev_minor; + uint64_t __spare2[14]; +}; + +enum fuse_ext_type { + FUSE_MAX_NR_SECCTX = 31, + FUSE_EXT_GROUPS = 32, +}; + +enum fuse_opcode { + FUSE_LOOKUP = 1, + FUSE_FORGET = 2, + FUSE_GETATTR = 3, + FUSE_SETATTR = 4, + FUSE_READLINK = 5, + FUSE_SYMLINK = 6, + FUSE_MKNOD = 8, + FUSE_MKDIR = 9, + FUSE_UNLINK = 10, + FUSE_RMDIR = 11, + FUSE_RENAME = 12, + FUSE_LINK = 13, + FUSE_OPEN = 14, + FUSE_READ = 15, + FUSE_WRITE = 16, + FUSE_STATFS = 17, + FUSE_RELEASE = 18, + FUSE_FSYNC = 20, + FUSE_SETXATTR = 21, + FUSE_GETXATTR = 22, + FUSE_LISTXATTR = 23, + FUSE_REMOVEXATTR = 24, + FUSE_FLUSH = 25, + FUSE_INIT = 26, + FUSE_OPENDIR = 27, + FUSE_READDIR = 28, + FUSE_RELEASEDIR = 29, + FUSE_FSYNCDIR = 30, + FUSE_GETLK = 31, + FUSE_SETLK = 32, + FUSE_SETLKW = 33, + FUSE_ACCESS = 34, + FUSE_CREATE = 35, + FUSE_INTERRUPT = 36, + FUSE_BMAP = 37, + FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, + FUSE_BATCH_FORGET = 42, + FUSE_FALLOCATE = 43, + FUSE_READDIRPLUS = 44, + FUSE_RENAME2 = 45, + FUSE_LSEEK = 46, + FUSE_COPY_FILE_RANGE = 47, + FUSE_SETUPMAPPING = 48, + FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, + FUSE_TMPFILE = 51, + FUSE_STATX = 52, + CUSE_INIT = 4096, + CUSE_INIT_BSWAP_RESERVED = 1048576, + FUSE_INIT_BSWAP_RESERVED = 436207616, +}; + +struct fuse_entry_out { + uint64_t nodeid; + uint64_t generation; + uint64_t entry_valid; + uint64_t attr_valid; + uint32_t entry_valid_nsec; + uint32_t attr_valid_nsec; + struct fuse_attr attr; +}; + +struct fuse_getattr_in { + uint32_t getattr_flags; + uint32_t dummy; + uint64_t fh; +}; + +struct fuse_attr_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t dummy; + struct fuse_attr attr; +}; + +struct fuse_statx_in { + uint32_t getattr_flags; + uint32_t reserved; + uint64_t fh; + uint32_t sx_flags; + uint32_t sx_mask; +}; + +struct fuse_statx_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t flags; + uint64_t spare[2]; + struct fuse_statx stat; +}; + +struct fuse_mknod_in { + uint32_t mode; + uint32_t rdev; + uint32_t umask; + uint32_t padding; +}; + +struct fuse_mkdir_in { + uint32_t mode; + uint32_t umask; +}; + +struct fuse_rename2_in { + uint64_t newdir; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_link_in { + uint64_t oldnodeid; +}; + +struct fuse_setattr_in { + uint32_t valid; + uint32_t padding; + uint64_t fh; + uint64_t size; + uint64_t lock_owner; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t unused4; + uint32_t uid; + uint32_t gid; + uint32_t unused5; +}; + +struct fuse_create_in { + uint32_t flags; + uint32_t mode; + uint32_t umask; + uint32_t open_flags; +}; + +struct fuse_open_out { + uint64_t fh; + uint32_t open_flags; + int32_t backing_id; +}; + +struct fuse_release_in { + uint64_t fh; + uint32_t flags; + uint32_t release_flags; + uint64_t lock_owner; +}; + +struct fuse_access_in { + uint32_t mask; + uint32_t padding; +}; + +struct fuse_secctx { + uint32_t size; + uint32_t padding; +}; + +struct fuse_secctx_header { + uint32_t size; + uint32_t nr_secctx; +}; + +struct fuse_ext_header { + uint32_t size; + uint32_t type; +}; + +struct fuse_supp_groups { + uint32_t nr_groups; + uint32_t groups[0]; +}; + +struct fuse_submount_lookup { + refcount_t count; + long: 32; + u64 nodeid; + struct fuse_forget_link *forget; + long: 32; +}; + +struct fuse_backing { + struct file *file; + struct cred *cred; + refcount_t count; + struct callback_head rcu; +}; + +struct fuse_inode { + struct inode inode; + u64 nodeid; + u64 nlookup; + struct fuse_forget_link *forget; + long: 32; + u64 i_time; + u32 inval_mask; + umode_t orig_i_mode; + struct timespec64 i_btime; + u64 orig_ino; + u64 attr_version; + union { + struct { + struct list_head write_files; + struct list_head queued_writes; + int writectr; + int iocachectr; + wait_queue_head_t page_waitq; + wait_queue_head_t direct_io_waitq; + struct rb_root writepages; + }; + struct { + bool cached; + long: 32; + loff_t size; + loff_t pos; + u64 version; + struct timespec64 mtime; + u64 iversion; + spinlock_t lock; + long: 32; + } rdc; + }; + long unsigned int state; + struct mutex mutex; + spinlock_t lock; + struct fuse_submount_lookup *submount_lookup; + struct fuse_backing *fb; + long: 32; +}; + +enum { + FUSE_I_ADVISE_RDPLUS = 0, + FUSE_I_INIT_RDPLUS = 1, + FUSE_I_SIZE_UNSTABLE = 2, + FUSE_I_BAD = 3, + FUSE_I_BTIME = 4, + FUSE_I_CACHE_IO_MODE = 5, +}; + +union fuse_file_args; + +struct fuse_file { + struct fuse_mount *fm; + union fuse_file_args *args; + u64 kh; + u64 fh; + u64 nodeid; + refcount_t count; + u32 open_flags; + struct list_head write_entry; + struct { + loff_t pos; + loff_t cache_off; + u64 version; + } readdir; + struct rb_node polled_node; + wait_queue_head_t poll_wait; + enum { + IOM_NONE = 0, + IOM_CACHED = 1, + IOM_UNCACHED = 2, + } iomode; + struct file *passthrough; + const struct cred *cred; + bool flock: 1; +}; + +struct fuse_release_args { + struct fuse_args args; + struct fuse_release_in inarg; + struct inode *inode; + long: 32; +}; + +union fuse_file_args { + struct fuse_open_out open_outarg; + struct fuse_release_args release_args; +}; + +struct fuse_folio_desc { + unsigned int length; + unsigned int offset; +}; + +struct fuse_args_pages { + struct fuse_args args; + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int num_folios; + long: 32; +}; + +union fuse_dentry { + u64 time; + struct callback_head rcu; +}; + +struct btrfs_dir_log_item { + __le64 end; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + bool is_nokey_name; +}; + +enum { + BTRFS_ORDERED_REGULAR = 0, + BTRFS_ORDERED_NOCOW = 1, + BTRFS_ORDERED_PREALLOC = 2, + BTRFS_ORDERED_COMPRESSED = 3, + BTRFS_ORDERED_DIRECT = 4, + BTRFS_ORDERED_IO_DONE = 5, + BTRFS_ORDERED_COMPLETE = 6, + BTRFS_ORDERED_IOERR = 7, + BTRFS_ORDERED_TRUNCATED = 8, + BTRFS_ORDERED_LOGGED = 9, + BTRFS_ORDERED_LOGGED_CSUM = 10, + BTRFS_ORDERED_PENDING = 11, + BTRFS_ORDERED_ENCODED = 12, +}; + +enum btrfs_delayed_item_type { + BTRFS_DELAYED_INSERTION_ITEM = 0, + BTRFS_DELAYED_DELETION_ITEM = 1, +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + long: 32; + u64 index; + struct list_head tree_list; + struct list_head readdir_list; + struct list_head log_list; + u64 bytes_reserved; + struct btrfs_delayed_node *delayed_node; + refcount_t refs; + enum btrfs_delayed_item_type type: 8; + bool logged; + u16 data_len; + char data[0]; + long: 32; +}; + +enum { + LOG_INODE_ALL = 0, + LOG_INODE_EXISTS = 1, +}; + +enum { + LOG_WALK_PIN_ONLY = 0, + LOG_WALK_REPLAY_INODES = 1, + LOG_WALK_REPLAY_DIR_INDEX = 2, + LOG_WALK_REPLAY_ALL = 3, +}; + +struct walk_control { + int free; + int pin; + int stage; + bool ignore_cur_inode; + struct btrfs_root *replay_dest; + struct btrfs_trans_handle *trans; + int (*process_func)(struct btrfs_root *, struct extent_buffer *, struct walk_control *, u64, int); +}; + +struct btrfs_dir_list { + u64 ino; + struct list_head list; +}; + +struct btrfs_ino_list { + u64 ino; + u64 parent; + struct list_head list; +}; + +enum devcg_behavior { + DEVCG_DEFAULT_NONE = 0, + DEVCG_DEFAULT_ALLOW = 1, + DEVCG_DEFAULT_DENY = 2, +}; + +struct dev_exception_item { + u32 major; + u32 minor; + short int type; + short int access; + struct list_head list; + struct callback_head rcu; +}; + +struct dev_cgroup { + struct cgroup_subsys_state css; + struct list_head exceptions; + enum devcg_behavior behavior; + long: 32; +}; + +struct comp_alg_common { + struct crypto_alg base; +}; + +struct crypto_scomp { + struct crypto_tfm base; +}; + +struct scomp_alg { + void * (*alloc_ctx)(struct crypto_scomp *); + void (*free_ctx)(struct crypto_scomp *, void *); + int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned int done; +}; + +struct bio_integrity_payload { + struct bio *bip_bio; + struct bvec_iter bip_iter; + short unsigned int bip_vcnt; + short unsigned int bip_max_vcnt; + short unsigned int bip_flags; + struct bvec_iter bio_iter; + struct work_struct bip_work; + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0]; +}; + +struct bio_map_data { + bool is_our_pages: 1; + bool is_null_mapped: 1; + long: 32; + struct iov_iter iter; + struct iovec iov[0]; +}; + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + long: 32; + u64 batch; +}; + +struct rq_wait { + wait_queue_head_t wait; + atomic_t inflight; +}; + +typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); + +typedef void cleanup_cb_t(struct rq_wait *, void *); + +struct blk_iolatency { + struct rq_qos rqos; + struct timer_list timer; + bool enabled; + atomic_t enable_cnt; + struct work_struct enable_work; +}; + +struct iolatency_grp; + +struct child_latency_info { + spinlock_t lock; + long: 32; + u64 last_scale_event; + u64 scale_lat; + u64 nr_samples; + struct iolatency_grp *scale_grp; + atomic_t scale_cookie; +}; + +struct percentile_stats { + u64 total; + u64 missed; +}; + +struct latency_stat { + union { + struct percentile_stats ps; + struct blk_rq_stat rqs; + }; +}; + +struct iolatency_grp { + struct blkg_policy_data pd; + struct latency_stat *stats; + struct latency_stat cur_stat; + struct blk_iolatency *blkiolat; + unsigned int max_depth; + struct rq_wait rq_wait; + atomic64_t window_start; + atomic_t scale_cookie; + long: 32; + u64 min_lat_nsec; + u64 cur_win_nsec; + u64 lat_avg; + u64 nr_samples; + bool ssd; + long: 32; + struct child_latency_info child_lat; +}; + +enum io_uring_msg_ring_flags { + IORING_MSG_DATA = 0, + IORING_MSG_SEND_FD = 1, +}; + +struct io_msg { + struct file *file; + struct file *src_file; + struct callback_head tw; + u64 user_data; + u32 len; + u32 cmd; + u32 src_fd; + union { + u32 dst_fd; + u32 cqe_flags; + }; + u32 flags; + long: 32; +}; + +typedef ZSTD_ErrorCode ERR_enum; + +enum dim_tune_state { + DIM_PARKING_ON_TOP = 0, + DIM_PARKING_TIRED = 1, + DIM_GOING_RIGHT = 2, + DIM_GOING_LEFT = 3, +}; + +enum dim_stats_state { + DIM_STATS_WORSE = 0, + DIM_STATS_SAME = 1, + DIM_STATS_BETTER = 2, +}; + +enum dim_step_result { + DIM_STEPPED = 0, + DIM_TOO_TIRED = 1, + DIM_ON_EDGE = 2, +}; + +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +struct simple_pm_bus { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct pcie_link_state { + struct pci_dev *pdev; + struct pci_dev *downstream; + struct pcie_link_state *root; + struct pcie_link_state *parent; + struct list_head sibling; + u32 aspm_support: 7; + u32 aspm_enabled: 7; + u32 aspm_capable: 7; + u32 aspm_default: 7; + long: 4; + u32 aspm_disable: 7; + u32 clkpm_capable: 1; + u32 clkpm_enabled: 1; + u32 clkpm_default: 1; + u32 clkpm_disable: 1; +}; + +struct clk_hw; + +struct clk_rate_request { + struct clk_core *core; + long unsigned int rate; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +struct clk_init_data; + +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +struct clk_ops { + int (*prepare)(struct clk_hw *); + void (*unprepare)(struct clk_hw *); + int (*is_prepared)(struct clk_hw *); + void (*unprepare_unused)(struct clk_hw *); + int (*enable)(struct clk_hw *); + void (*disable)(struct clk_hw *); + int (*is_enabled)(struct clk_hw *); + void (*disable_unused)(struct clk_hw *); + int (*save_context)(struct clk_hw *); + void (*restore_context)(struct clk_hw *); + long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int); + long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *); + int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); + int (*set_parent)(struct clk_hw *, u8); + u8 (*get_parent)(struct clk_hw *); + int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int); + int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8); + long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int); + int (*get_phase)(struct clk_hw *); + int (*set_phase)(struct clk_hw *, int); + int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*init)(struct clk_hw *); + void (*terminate)(struct clk_hw *); + void (*debug_init)(struct clk_hw *, struct dentry *); +}; + +struct clk_parent_data { + const struct clk_hw *hw; + const char *fw_name; + const char *name; + int index; +}; + +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + const char * const *parent_names; + const struct clk_parent_data *parent_data; + const struct clk_hw **parent_hws; + u8 num_parents; + long unsigned int flags; +}; + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +struct clk_divider { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u16 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + short unsigned int kb_value; +}; + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; + +struct kbdiacr { + unsigned char diacr; + unsigned char base; + unsigned char result; +}; + +struct kbdiacrs { + unsigned int kb_cnt; + struct kbdiacr kbdiacr[256]; +}; + +struct kbdiacruc { + unsigned int diacr; + unsigned int base; + unsigned int result; +}; + +struct kbdiacrsuc { + unsigned int kb_cnt; + struct kbdiacruc kbdiacruc[256]; +}; + +struct kbkeycode { + unsigned int scancode; + unsigned int keycode; +}; + +struct kbd_repeat { + int delay; + int period; +}; + +struct keyboard_notifier_param { + struct vc_data *vc; + int down; + int shift; + int ledstate; + unsigned int value; +}; + +struct kbd_struct { + unsigned char lockstate; + unsigned char slockstate; + unsigned char ledmode: 1; + unsigned char ledflagstate: 4; + char: 3; + unsigned char default_ledflagstate: 4; + unsigned char kbdmode: 3; + long: 1; + unsigned char modeflags: 5; +}; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; + +typedef void k_handler_fn(struct vc_data *, unsigned char, char); + +typedef void fn_handler_fn(struct vc_data *); + +struct getset_keycode_data { + struct input_keymap_entry ke; + int error; +}; + +struct eeprom_93cx6 { + void *data; + void (*register_read)(struct eeprom_93cx6 *); + void (*register_write)(struct eeprom_93cx6 *); + int width; + unsigned int quirks; + char drive_data; + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +enum scsi_prot_operations { + SCSI_PROT_NORMAL = 0, + SCSI_PROT_READ_INSERT = 1, + SCSI_PROT_WRITE_STRIP = 2, + SCSI_PROT_READ_STRIP = 3, + SCSI_PROT_WRITE_INSERT = 4, + SCSI_PROT_READ_PASS = 5, + SCSI_PROT_WRITE_PASS = 6, +}; + +struct scsi_eh_save { + int result; + unsigned int resid_len; + int eh_eflags; + enum dma_data_direction data_direction; + unsigned int underflow; + unsigned char cmd_len; + unsigned char prot_op; + unsigned char cmnd[32]; + struct scsi_data_buffer sdb; + struct scatterlist sense_sgl; +}; + +enum scsi_ml_status { + SCSIML_STAT_OK = 0, + SCSIML_STAT_RESV_CONFLICT = 1, + SCSIML_STAT_NOSPC = 2, + SCSIML_STAT_MED_ERROR = 3, + SCSIML_STAT_TGT_FAILURE = 4, + SCSIML_STAT_DL_TIMEOUT = 5, +}; + +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; + void *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; + void *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void *context; +}; + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[256]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +struct usbdevfs_conninfo_ex { + __u32 size; + __u32 busnum; + __u32 devnum; + __u32 speed; + __u8 num_ports; + __u8 ports[7]; +}; + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void *buffer; + int buffer_length; + int actual_length; + int start_frame; + union { + int number_of_packets; + unsigned int stream_id; + }; + int error_count; + unsigned int signr; + void *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbdevfs_ioctl { + int ifno; + int ioctl_code; + void *data; +}; + +struct usbdevfs_disconnect_claim { + unsigned int interface; + unsigned int flags; + char driver[256]; +}; + +struct usbdevfs_streams { + unsigned int num_streams; + unsigned int num_eps; + unsigned char eps[0]; +}; + +struct usb_dev_state { + struct list_head list; + struct usb_device *dev; + struct file *file; + spinlock_t lock; + struct list_head async_pending; + struct list_head async_completed; + struct list_head memory_list; + wait_queue_head_t wait; + wait_queue_head_t wait_for_resume; + unsigned int discsignr; + struct pid *disc_pid; + const struct cred *cred; + sigval_t disccontext; + long unsigned int ifclaimed; + u32 disabled_bulk_eps; + long unsigned int interface_allowed_mask; + int not_yet_resumed; + bool suspend_allowed; + bool privileges_dropped; +}; + +struct usb_memory { + struct list_head memlist; + int vma_use_count; + int urb_use_count; + u32 size; + void *mem; + dma_addr_t dma_handle; + long unsigned int vm_start; + struct usb_dev_state *ps; +}; + +struct async { + struct list_head asynclist; + struct usb_dev_state *ps; + struct pid *pid; + const struct cred *cred; + unsigned int signr; + unsigned int ifnum; + void *userbuffer; + void *userurb; + sigval_t userurb_sigval; + struct urb *urb; + struct usb_memory *usbm; + unsigned int mem_usage; + int status; + u8 bulk_addr; + u8 bulk_status; +}; + +enum snoop_when { + SUBMIT = 0, + COMPLETE = 1, +}; + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +struct serio_driver; + +struct serio { + void *port_data; + char name[32]; + char phys[32]; + char firmware_id[128]; + bool manual_bind; + struct serio_device_id id; + spinlock_t lock; + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + struct serio *parent; + struct list_head child_node; + struct list_head children; + unsigned int depth; + struct serio_driver *drv; + struct mutex drv_mutex; + long: 32; + struct device dev; + struct list_head node; + struct mutex *ps2_cmd_mutex; + long: 32; +}; + +struct serio_driver { + const char *description; + const struct serio_device_id *id_table; + bool manual_bind; + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *); + int (*reconnect)(struct serio *); + int (*fast_reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + struct device_driver driver; +}; + +typedef struct serio *class_serio_pause_rx_t; + +enum ps2_disposition { + PS2_PROCESS = 0, + PS2_IGNORE = 1, + PS2_ERROR = 2, +}; + +struct ps2dev; + +typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8, unsigned int); + +typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8); + +struct ps2dev { + struct serio *serio; + struct mutex cmd_mutex; + wait_queue_head_t wait; + long unsigned int flags; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; + ps2_pre_receive_handler_t pre_receive_handler; + ps2_receive_handler_t receive_handler; +}; + +struct ir_raw_timings_pd { + unsigned int header_pulse; + unsigned int header_space; + unsigned int bit_pulse; + unsigned int bit_space[2]; + unsigned int trailer_pulse; + unsigned int trailer_space; + unsigned int msb_first: 1; +}; + +enum nec_state { + STATE_INACTIVE___3 = 0, + STATE_HEADER_SPACE___2 = 1, + STATE_BIT_PULSE___2 = 2, + STATE_BIT_SPACE___2 = 3, + STATE_TRAILER_PULSE = 4, + STATE_TRAILER_SPACE = 5, +}; + +struct hid_debug_list { + struct { + union { + struct __kfifo kfifo; + char *type; + const char *const_type; + char (*rectype)[0]; + char *ptr; + const char *ptr_const; + }; + char buf[0]; + } hid_debug_fifo; + struct fasync_struct *fasync; + struct hid_device *hdev; + struct list_head node; + struct mutex read_mutex; +}; + +struct hid_usage_entry { + unsigned int page; + unsigned int usage; + const char *description; +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS = 1, +}; + +struct strp_msg { + int full_len; + int offset; +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A = 0, + ETHTOOL_A_CABLE_PAIR_B = 1, + ETHTOOL_A_CABLE_PAIR_C = 2, + ETHTOOL_A_CABLE_PAIR_D = 3, +}; + +enum { + ETHTOOL_A_CABLE_INF_SRC_UNSPEC = 0, + ETHTOOL_A_CABLE_INF_SRC_TDR = 1, + ETHTOOL_A_CABLE_INF_SRC_ALCD = 2, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, + ETHTOOL_A_CABLE_RESULT_PAIR = 1, + ETHTOOL_A_CABLE_RESULT_CODE = 2, + ETHTOOL_A_CABLE_RESULT_SRC = 3, + __ETHTOOL_A_CABLE_RESULT_CNT = 4, + ETHTOOL_A_CABLE_RESULT_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, + ETHTOOL_A_CABLE_FAULT_LENGTH_SRC = 3, + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 4, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_NEST_RESULT = 1, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, + __ETHTOOL_A_CABLE_NEST_CNT = 3, + ETHTOOL_A_CABLE_NEST_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, + ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, + __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, + ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, +}; + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, + ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, + __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, + ETHTOOL_A_CABLE_PULSE_mV = 1, + __ETHTOOL_A_CABLE_PULSE_CNT = 2, + ETHTOOL_A_CABLE_PULSE_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC = 0, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, + __ETHTOOL_A_CABLE_STEP_CNT = 4, + ETHTOOL_A_CABLE_STEP_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, + ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, + __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, + ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, +}; + +enum nft_objref_attributes { + NFTA_OBJREF_UNSPEC = 0, + NFTA_OBJREF_IMM_TYPE = 1, + NFTA_OBJREF_IMM_NAME = 2, + NFTA_OBJREF_SET_SREG = 3, + NFTA_OBJREF_SET_NAME = 4, + NFTA_OBJREF_SET_ID = 5, + __NFTA_OBJREF_MAX = 6, +}; + +struct nft_objref_map { + struct nft_set *set; + u8 sreg; + struct nft_set_binding binding; +}; + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + __be16 sport; + u16 dport; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + struct sock *selected_sk; + u32 ingress_ifindex; + bool no_reuseport; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + union { + __be32 imsf_slist[1]; + struct { + struct {} __empty_imsf_slist_flex; + __be32 imsf_slist_flex[0]; + }; + }; +}; + +struct igmphdr { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; +}; + +struct igmp_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *in_dev; +}; + +struct igmp_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *idev; + struct ip_mc_list *im; +}; + +enum { + BTF_KIND_UNKN = 0, + BTF_KIND_INT = 1, + BTF_KIND_PTR = 2, + BTF_KIND_ARRAY = 3, + BTF_KIND_STRUCT = 4, + BTF_KIND_UNION = 5, + BTF_KIND_ENUM = 6, + BTF_KIND_FWD = 7, + BTF_KIND_TYPEDEF = 8, + BTF_KIND_VOLATILE = 9, + BTF_KIND_CONST = 10, + BTF_KIND_RESTRICT = 11, + BTF_KIND_FUNC = 12, + BTF_KIND_FUNC_PROTO = 13, + BTF_KIND_VAR = 14, + BTF_KIND_DATASEC = 15, + BTF_KIND_FLOAT = 16, + BTF_KIND_DECL_TAG = 17, + BTF_KIND_TYPE_TAG = 18, + BTF_KIND_ENUM64 = 19, + NR_BTF_KINDS = 20, + BTF_KIND_MAX = 19, +}; + +typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); + +struct bpf_struct_ops_tcp_congestion_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct tcp_congestion_ops data; +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, + NETEVENT_REDIRECT = 2, + NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, + NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, + NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, +}; + +struct rtas_t { + long unsigned int entry; + long unsigned int base; + long unsigned int size; + struct device_node *dev; +}; + +struct rtas_error_log { + u8 byte0; + u8 byte1; + u8 byte2; + u8 byte3; + __be32 extended_log_length; + unsigned char buffer[1]; +}; + +enum rtas_function_index { + RTAS_FNIDX__CHECK_EXCEPTION = 0, + RTAS_FNIDX__DISPLAY_CHARACTER = 1, + RTAS_FNIDX__EVENT_SCAN = 2, + RTAS_FNIDX__FREEZE_TIME_BASE = 3, + RTAS_FNIDX__GET_POWER_LEVEL = 4, + RTAS_FNIDX__GET_SENSOR_STATE = 5, + RTAS_FNIDX__GET_TERM_CHAR = 6, + RTAS_FNIDX__GET_TIME_OF_DAY = 7, + RTAS_FNIDX__IBM_ACTIVATE_FIRMWARE = 8, + RTAS_FNIDX__IBM_CBE_START_PTCAL = 9, + RTAS_FNIDX__IBM_CBE_STOP_PTCAL = 10, + RTAS_FNIDX__IBM_CHANGE_MSI = 11, + RTAS_FNIDX__IBM_CLOSE_ERRINJCT = 12, + RTAS_FNIDX__IBM_CONFIGURE_BRIDGE = 13, + RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR = 14, + RTAS_FNIDX__IBM_CONFIGURE_KERNEL_DUMP = 15, + RTAS_FNIDX__IBM_CONFIGURE_PE = 16, + RTAS_FNIDX__IBM_CREATE_PE_DMA_WINDOW = 17, + RTAS_FNIDX__IBM_DISPLAY_MESSAGE = 18, + RTAS_FNIDX__IBM_ERRINJCT = 19, + RTAS_FNIDX__IBM_EXTI2C = 20, + RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO = 21, + RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO2 = 22, + RTAS_FNIDX__IBM_GET_DYNAMIC_SENSOR_STATE = 23, + RTAS_FNIDX__IBM_GET_INDICES = 24, + RTAS_FNIDX__IBM_GET_RIO_TOPOLOGY = 25, + RTAS_FNIDX__IBM_GET_SYSTEM_PARAMETER = 26, + RTAS_FNIDX__IBM_GET_VPD = 27, + RTAS_FNIDX__IBM_GET_XIVE = 28, + RTAS_FNIDX__IBM_INT_OFF = 29, + RTAS_FNIDX__IBM_INT_ON = 30, + RTAS_FNIDX__IBM_IO_QUIESCE_ACK = 31, + RTAS_FNIDX__IBM_LPAR_PERFTOOLS = 32, + RTAS_FNIDX__IBM_MANAGE_FLASH_IMAGE = 33, + RTAS_FNIDX__IBM_MANAGE_STORAGE_PRESERVATION = 34, + RTAS_FNIDX__IBM_NMI_INTERLOCK = 35, + RTAS_FNIDX__IBM_NMI_REGISTER = 36, + RTAS_FNIDX__IBM_OPEN_ERRINJCT = 37, + RTAS_FNIDX__IBM_OPEN_SRIOV_ALLOW_UNFREEZE = 38, + RTAS_FNIDX__IBM_OPEN_SRIOV_MAP_PE_NUMBER = 39, + RTAS_FNIDX__IBM_OS_TERM = 40, + RTAS_FNIDX__IBM_PARTNER_CONTROL = 41, + RTAS_FNIDX__IBM_PHYSICAL_ATTESTATION = 42, + RTAS_FNIDX__IBM_PLATFORM_DUMP = 43, + RTAS_FNIDX__IBM_POWER_OFF_UPS = 44, + RTAS_FNIDX__IBM_QUERY_INTERRUPT_SOURCE_NUMBER = 45, + RTAS_FNIDX__IBM_QUERY_PE_DMA_WINDOW = 46, + RTAS_FNIDX__IBM_READ_PCI_CONFIG = 47, + RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE = 48, + RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2 = 49, + RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW = 50, + RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW = 51, + RTAS_FNIDX__IBM_SCAN_LOG_DUMP = 52, + RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR = 53, + RTAS_FNIDX__IBM_SET_EEH_OPTION = 54, + RTAS_FNIDX__IBM_SET_SLOT_RESET = 55, + RTAS_FNIDX__IBM_SET_SYSTEM_PARAMETER = 56, + RTAS_FNIDX__IBM_SET_XIVE = 57, + RTAS_FNIDX__IBM_SLOT_ERROR_DETAIL = 58, + RTAS_FNIDX__IBM_SUSPEND_ME = 59, + RTAS_FNIDX__IBM_TUNE_DMA_PARMS = 60, + RTAS_FNIDX__IBM_UPDATE_FLASH_64_AND_REBOOT = 61, + RTAS_FNIDX__IBM_UPDATE_NODES = 62, + RTAS_FNIDX__IBM_UPDATE_PROPERTIES = 63, + RTAS_FNIDX__IBM_VALIDATE_FLASH_IMAGE = 64, + RTAS_FNIDX__IBM_WRITE_PCI_CONFIG = 65, + RTAS_FNIDX__NVRAM_FETCH = 66, + RTAS_FNIDX__NVRAM_STORE = 67, + RTAS_FNIDX__POWER_OFF = 68, + RTAS_FNIDX__PUT_TERM_CHAR = 69, + RTAS_FNIDX__QUERY_CPU_STOPPED_STATE = 70, + RTAS_FNIDX__READ_PCI_CONFIG = 71, + RTAS_FNIDX__RTAS_LAST_ERROR = 72, + RTAS_FNIDX__SET_INDICATOR = 73, + RTAS_FNIDX__SET_POWER_LEVEL = 74, + RTAS_FNIDX__SET_TIME_FOR_POWER_ON = 75, + RTAS_FNIDX__SET_TIME_OF_DAY = 76, + RTAS_FNIDX__START_CPU = 77, + RTAS_FNIDX__STOP_SELF = 78, + RTAS_FNIDX__SYSTEM_REBOOT = 79, + RTAS_FNIDX__THAW_TIME_BASE = 80, + RTAS_FNIDX__WRITE_PCI_CONFIG = 81, +}; + +typedef struct { + const enum rtas_function_index index; +} rtas_fn_handle_t; + +typedef const struct cpumask * (*sched_domain_mask_f)(int); + +typedef int (*sched_domain_flags_f)(); + +struct sd_data { + struct sched_domain **sd; + struct sched_domain_shared **sds; + struct sched_group **sg; + struct sched_group_capacity **sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; + char *name; +}; + +struct thread_groups { + unsigned int property; + unsigned int nr_groups; + unsigned int threads_per_group; + unsigned int thread_list[8]; +}; + +struct thread_groups_list { + unsigned int nr_properties; + struct thread_groups property_tgs[2]; +}; + +struct cpu_messages { + long int messages; +}; + +enum { + IRQ_STARTUP_NORMAL = 0, + IRQ_STARTUP_MANAGED = 1, + IRQ_STARTUP_ABORT = 2, +}; + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +struct arch_vdso_time_data {}; + +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +struct vdso_data { + u32 seq; + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + union { + struct vdso_timestamp basetime[12]; + struct timens_offset offset[12]; + }; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; + struct arch_vdso_time_data arch_data; +}; + +enum event_trigger_type { + ETT_NONE = 0, + ETT_TRACE_ONOFF = 1, + ETT_SNAPSHOT = 2, + ETT_STACKTRACE = 4, + ETT_EVENT_ENABLE = 8, + ETT_EVENT_HIST = 16, + ETT_HIST_ENABLE = 32, + ETT_EVENT_EPROBE = 64, +}; + +enum { + EVENT_TRIGGER_FL_PROBE = 1, +}; + +struct event_trigger_ops; + +struct event_command; + +struct event_trigger_data { + long unsigned int count; + int ref; + int flags; + struct event_trigger_ops *ops; + struct event_command *cmd_ops; + struct event_filter *filter; + char *filter_str; + void *private_data; + bool paused; + bool paused_tmp; + struct list_head list; + char *name; + struct list_head named_list; + struct event_trigger_data *named_data; +}; + +struct event_trigger_ops { + void (*trigger)(struct event_trigger_data *, struct trace_buffer *, void *, struct ring_buffer_event *); + int (*init)(struct event_trigger_data *); + void (*free)(struct event_trigger_data *); + int (*print)(struct seq_file *, struct event_trigger_data *); +}; + +struct event_command { + struct list_head list; + char *name; + enum event_trigger_type trigger_type; + int flags; + int (*parse)(struct event_command *, struct trace_event_file *, char *, char *, char *); + int (*reg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg)(char *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg_all)(struct trace_event_file *); + int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); + struct event_trigger_ops * (*get_trigger_ops)(char *, char *); +}; + +struct enable_trigger_data { + struct trace_event_file *file; + bool enable; + bool hist; +}; + +enum event_command_flags { + EVENT_CMD_FL_POST_TRIGGER = 1, + EVENT_CMD_FL_NEEDS_REC = 2, +}; + +struct mlock_fbatch { + local_lock_t lock; + struct folio_batch fbatch; +}; + +enum lru_status { + LRU_REMOVED = 0, + LRU_REMOVED_RETRY = 1, + LRU_ROTATE = 2, + LRU_SKIP = 3, + LRU_RETRY = 4, + LRU_STOP = 5, +}; + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, void *); + +struct inodes_stat_t { + long int nr_inodes; + long int nr_unused; + long int dummy[5]; +}; + +struct trace_event_raw_ctime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + u32 ctime_ns; + u32 gen; + char __data[0]; +}; + +struct trace_event_raw_ctime_ns_xchg { + struct trace_entry ent; + dev_t dev; + ino_t ino; + u32 gen; + u32 old; + u32 new; + u32 cur; + char __data[0]; +}; + +struct trace_event_raw_fill_mg_cmtime { + struct trace_entry ent; + dev_t dev; + ino_t ino; + time64_t ctime_s; + time64_t mtime_s; + u32 ctime_ns; + u32 mtime_ns; + u32 gen; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_ctime {}; + +struct trace_event_data_offsets_ctime_ns_xchg {}; + +struct trace_event_data_offsets_fill_mg_cmtime {}; + +typedef void (*btf_trace_inode_set_ctime_to_ts)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_xchg_skip)(void *, struct inode *, struct timespec64 *); + +typedef void (*btf_trace_ctime_ns_xchg)(void *, struct inode *, u32, u32, u32); + +typedef void (*btf_trace_fill_mg_cmtime)(void *, struct inode *, struct timespec64 *, struct timespec64 *); + +struct kernfs_root { + struct kernfs_node *kn; + unsigned int flags; + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + struct list_head supers; + wait_queue_head_t deactivate_waitq; + struct rw_semaphore kernfs_rwsem; + struct rw_semaphore kernfs_iattr_rwsem; + struct rw_semaphore kernfs_supers_rwsem; + struct callback_head rcu; +}; + +struct simple_xattrs { + struct rb_root rb_root; + rwlock_t lock; +}; + +struct kernfs_iattrs { + kuid_t ia_uid; + kgid_t ia_gid; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; +}; + +struct kernfs_super_info { + struct super_block *sb; + struct kernfs_root *root; + const void *ns; + struct list_head node; +}; + +struct ext4_inode { + __le16 i_mode; + __le16 i_uid; + __le32 i_size_lo; + __le32 i_atime; + __le32 i_ctime; + __le32 i_mtime; + __le32 i_dtime; + __le16 i_gid; + __le16 i_links_count; + __le32 i_blocks_lo; + __le32 i_flags; + union { + struct { + __le32 l_i_version; + } linux1; + struct { + __u32 h_i_translator; + } hurd1; + struct { + __u32 m_i_reserved1; + } masix1; + } osd1; + __le32 i_block[15]; + __le32 i_generation; + __le32 i_file_acl_lo; + __le32 i_size_high; + __le32 i_obso_faddr; + union { + struct { + __le16 l_i_blocks_high; + __le16 l_i_file_acl_high; + __le16 l_i_uid_high; + __le16 l_i_gid_high; + __le16 l_i_checksum_lo; + __le16 l_i_reserved; + } linux2; + struct { + __le16 h_i_reserved1; + __u16 h_i_mode_high; + __u16 h_i_uid_high; + __u16 h_i_gid_high; + __u32 h_i_author; + } hurd2; + struct { + __le16 h_i_reserved1; + __le16 m_i_file_acl_high; + __u32 m_i_reserved2[2]; + } masix2; + } osd2; + __le16 i_extra_isize; + __le16 i_checksum_hi; + __le32 i_ctime_extra; + __le32 i_mtime_extra; + __le32 i_atime_extra; + __le32 i_crtime; + __le32 i_crtime_extra; + __le32 i_version_hi; + __le32 i_projid; +}; + +struct ext4_fc_tl { + __le16 fc_tag; + __le16 fc_len; +}; + +struct ext4_fc_head { + __le32 fc_features; + __le32 fc_tid; +}; + +struct ext4_fc_add_range { + __le32 fc_ino; + __u8 fc_ex[12]; +}; + +struct ext4_fc_del_range { + __le32 fc_ino; + __le32 fc_lblk; + __le32 fc_len; +}; + +struct ext4_fc_dentry_info { + __le32 fc_parent_ino; + __le32 fc_ino; + __u8 fc_dname[0]; +}; + +struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[0]; +}; + +struct ext4_fc_tail { + __le32 fc_tid; + __le32 fc_crc; +}; + +enum { + EXT4_FC_STATUS_OK = 0, + EXT4_FC_STATUS_INELIGIBLE = 1, + EXT4_FC_STATUS_SKIPPED = 2, + EXT4_FC_STATUS_FAILED = 3, +}; + +enum { + EXT4_FC_REASON_XATTR = 0, + EXT4_FC_REASON_CROSS_RENAME = 1, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, + EXT4_FC_REASON_NOMEM = 3, + EXT4_FC_REASON_SWAP_BOOT = 4, + EXT4_FC_REASON_RESIZE = 5, + EXT4_FC_REASON_RENAME_DIR = 6, + EXT4_FC_REASON_FALLOC_RANGE = 7, + EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, + EXT4_FC_REASON_ENCRYPTED_FILENAME = 9, + EXT4_FC_REASON_MAX = 10, +}; + +struct ext4_fc_dentry_update { + int fcd_op; + int fcd_parent; + int fcd_ino; + long: 32; + struct qstr fcd_name; + unsigned char fcd_iname[36]; + struct list_head fcd_list; + struct list_head fcd_dilist; + long: 32; +}; + +struct ext4_locality_group { + struct mutex lg_mutex; + struct list_head lg_prealloc_list[10]; + spinlock_t lg_prealloc_lock; +}; + +enum { + EXT4_MF_MNTDIR_SAMPLED = 0, + EXT4_MF_FC_INELIGIBLE = 1, +}; + +struct ext4_iloc { + struct buffer_head *bh; + long unsigned int offset; + ext4_group_t block_group; +}; + +typedef enum { + EXT4_IGET_NORMAL = 0, + EXT4_IGET_SPECIAL = 1, + EXT4_IGET_HANDLE = 2, + EXT4_IGET_BAD = 4, + EXT4_IGET_EA_INODE = 8, +} ext4_iget_flags; + +struct __track_dentry_update_args { + struct dentry *dentry; + int op; +}; + +struct __track_range_args { + ext4_lblk_t start; + ext4_lblk_t end; +}; + +struct dentry_info_args { + int parent_ino; + int dname_len; + int ino; + int inode_len; + char *dname; +}; + +struct ext4_fc_tl_mem { + u16 fc_tag; + u16 fc_len; +}; + +struct btrfs_ioctl_balance_args { + __u64 flags; + __u64 state; + struct btrfs_balance_args data; + struct btrfs_balance_args meta; + struct btrfs_balance_args sys; + struct btrfs_balance_progress stat; + __u64 unused[72]; +}; + +struct btrfs_ioctl_get_dev_stats { + __u64 devid; + __u64 nr_items; + __u64 flags; + __u64 values[5]; + __u64 unused[121]; +}; + +enum btrfs_err_code { + BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, + BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET = 2, + BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET = 3, + BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET = 4, + BTRFS_ERROR_DEV_TGT_REPLACE = 5, + BTRFS_ERROR_DEV_MISSING_NOT_FOUND = 6, + BTRFS_ERROR_DEV_ONLY_WRITABLE = 7, + BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS = 8, + BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET = 9, + BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET = 10, +}; + +struct btrfs_disk_balance_args { + __le64 profiles; + union { + __le64 usage; + struct { + __le32 usage_min; + __le32 usage_max; + }; + }; + __le64 devid; + __le64 pstart; + __le64 pend; + __le64 vstart; + __le64 vend; + __le64 target; + __le64 flags; + union { + __le64 limit; + struct { + __le32 limit_min; + __le32 limit_max; + }; + }; + __le32 stripes_min; + __le32 stripes_max; + __le64 unused[6]; +}; + +struct btrfs_balance_item { + __le64 flags; + struct btrfs_disk_balance_args data; + struct btrfs_disk_balance_args meta; + struct btrfs_disk_balance_args sys; + __le64 unused[4]; +}; + +struct btrfs_dev_stats_item { + __le64 values[5]; +}; + +enum { + IO_TREE_FS_PINNED_EXTENTS = 0, + IO_TREE_FS_EXCLUDED_EXTENTS = 1, + IO_TREE_BTREE_INODE_IO = 2, + IO_TREE_INODE_IO = 3, + IO_TREE_RELOC_BLOCKS = 4, + IO_TREE_TRANS_DIRTY_PAGES = 5, + IO_TREE_ROOT_DIRTY_LOG_PAGES = 6, + IO_TREE_INODE_FILE_EXTENT = 7, + IO_TREE_LOG_CSUM_RANGE = 8, + IO_TREE_SELFTEST = 9, + IO_TREE_DEVICE_ALLOC_STATE = 10, +}; + +struct btrfs_swapfile_pin { + struct rb_node node; + void *ptr; + struct inode *inode; + bool is_block_group; + int bg_extent_count; +}; + +struct btrfs_discard_stripe { + struct btrfs_device *dev; + long: 32; + u64 physical; + u64 length; +}; + +struct btrfs_device_info { + struct btrfs_device *dev; + long: 32; + u64 dev_offset; + u64 max_avail; + u64 total_avail; +}; + +struct btrfs_dev_lookup_args { + u64 devid; + u8 *uuid; + u8 *fsid; + bool missing; + long: 32; +}; + +struct btrfs_io_geometry { + u32 stripe_index; + u32 stripe_nr; + int mirror_num; + int num_stripes; + u64 stripe_offset; + u64 raid56_full_stripe_start; + int max_errors; + enum btrfs_map_op op; +}; + +struct alloc_chunk_ctl { + u64 start; + u64 type; + int num_stripes; + int sub_stripes; + int dev_stripes; + int devs_max; + int devs_min; + int devs_increment; + int ncopies; + int nparity; + u64 max_stripe_size; + u64 max_chunk_size; + u64 dev_extent_min; + u64 stripe_size; + u64 chunk_size; + int ndevs; + long: 32; +}; + +struct request_key_auth { + struct callback_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +}; + +enum asn1_class { + ASN1_UNIV = 0, + ASN1_APPL = 1, + ASN1_CONT = 2, + ASN1_PRIV = 3, +}; + +enum rsaprivkey_actions { + ACT_rsa_get_d = 0, + ACT_rsa_get_dp = 1, + ACT_rsa_get_dq = 2, + ACT_rsa_get_e = 3, + ACT_rsa_get_n = 4, + ACT_rsa_get_p = 5, + ACT_rsa_get_q = 6, + ACT_rsa_get_qinv = 7, + NR__rsaprivkey_actions = 8, +}; + +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct crypto_akcipher { + unsigned int reqsize; + long: 32; + struct crypto_tfm base; +}; + +struct akcipher_alg { + int (*encrypt)(struct akcipher_request *); + int (*decrypt)(struct akcipher_request *); + int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); + unsigned int (*max_size)(struct crypto_akcipher *); + int (*init)(struct crypto_akcipher *); + void (*exit)(struct crypto_akcipher *); + long: 32; + struct crypto_alg base; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +struct hash_prefix { + const char *name; + const u8 *data; + size_t size; +}; + +struct rsassa_pkcs1_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct rsassa_pkcs1_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct hash_prefix *hash_prefix; +}; + +struct crypto_aes_ctx { + u32 key_enc[60]; + u32 key_dec[60]; + u32 key_length; +}; + +enum x509_akid_actions { + ACT_x509_akid_note_kid = 0, + ACT_x509_akid_note_name = 1, + ACT_x509_akid_note_serial = 2, + ACT_x509_extract_name_segment = 3, + ACT_x509_note_OID = 4, + NR__x509_akid_actions = 5, +}; + +enum { + DIO_SHOULD_DIRTY = 1, + DIO_IS_SYNC = 2, +}; + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + unsigned int flags; + long: 32; + long: 32; + long: 32; + long: 32; + struct bio bio; +}; + +enum bfqq_state_flags { + BFQQF_just_created = 0, + BFQQF_busy = 1, + BFQQF_wait_request = 2, + BFQQF_non_blocking_wait_rq = 3, + BFQQF_fifo_expire = 4, + BFQQF_has_short_ttime = 5, + BFQQF_sync = 6, + BFQQF_IO_bound = 7, + BFQQF_in_large_burst = 8, + BFQQF_softrt_update = 9, + BFQQF_coop = 10, + BFQQF_split_coop = 11, +}; + +typedef struct { + __u8 b[16]; +} guid_t; + +struct lwq { + spinlock_t lock; + struct llist_node *ready; + struct llist_head new; +}; + +struct once_work { + struct work_struct work; + struct static_key_true *key; + struct module *module; +}; + +typedef unsigned char Byte; + +typedef long unsigned int uLong; + +struct internal_state; + +struct z_stream_s { + const Byte *next_in; + uLong avail_in; + uLong total_in; + Byte *next_out; + uLong avail_out; + uLong total_out; + char *msg; + struct internal_state *state; + void *workspace; + int data_type; + uLong adler; + uLong reserved; +}; + +struct internal_state { + int dummy; +}; + +typedef struct z_stream_s z_stream; + +typedef z_stream *z_streamp; + +enum { + FB_BLANK_UNBLANK = 0, + FB_BLANK_NORMAL = 1, + FB_BLANK_VSYNC_SUSPEND = 2, + FB_BLANK_HSYNC_SUSPEND = 3, + FB_BLANK_POWERDOWN = 4, +}; + +struct fb_cmap_user { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char *); + ssize_t (*store)(struct device_driver *, const char *, size_t); +}; + +enum bus_notifier_event { + BUS_NOTIFY_ADD_DEVICE = 0, + BUS_NOTIFY_DEL_DEVICE = 1, + BUS_NOTIFY_REMOVED_DEVICE = 2, + BUS_NOTIFY_BIND_DRIVER = 3, + BUS_NOTIFY_BOUND_DRIVER = 4, + BUS_NOTIFY_UNBIND_DRIVER = 5, + BUS_NOTIFY_UNBOUND_DRIVER = 6, + BUS_NOTIFY_DRIVER_NOT_BOUND = 7, +}; + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[0]; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED = 1, + PCE_STATUS_PREPARED = 2, + PCE_STATUS_ENABLED = 3, + PCE_STATUS_ERROR = 4, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; + bool enabled_when_prepared; +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = 127, + ATA_MASK_MWDMA = 3968, + ATA_MASK_UDMA = 1044480, +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED = 0, + ATA_DITER_ENABLED_REVERSE = 1, + ATA_DITER_ALL = 2, + ATA_DITER_ALL_REVERSE = 3, +}; + +struct trace_event_raw_ata_qc_issue_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + unsigned char proto; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_qc_complete_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char status; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char error; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_tf_load { + struct trace_entry ent; + unsigned int ata_port; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_exec_command_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char cmd; + unsigned char feature; + unsigned char hob_nsect; + unsigned char proto; + char __data[0]; +}; + +struct trace_event_raw_ata_bmdma_status { + struct trace_entry ent; + unsigned int ata_port; + unsigned int tag; + unsigned char host_stat; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy_qc { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_action_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_begin_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + long unsigned int deadline; + char __data[0]; +}; + +struct trace_event_raw_ata_link_reset_end_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int class[2]; + int rc; + char __data[0]; +}; + +struct trace_event_raw_ata_port_eh_begin_template { + struct trace_entry ent; + unsigned int ata_port; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_hsm_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int protocol; + unsigned int hsm_state; + unsigned char dev_state; + char __data[0]; +}; + +struct trace_event_raw_ata_transfer_data_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int flags; + unsigned int offset; + unsigned int bytes; + char __data[0]; +}; + +struct trace_event_raw_ata_sff_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned char hsm_state; + char __data[0]; +}; + +struct trace_event_data_offsets_ata_qc_issue_template {}; + +struct trace_event_data_offsets_ata_qc_complete_template {}; + +struct trace_event_data_offsets_ata_tf_load {}; + +struct trace_event_data_offsets_ata_exec_command_template {}; + +struct trace_event_data_offsets_ata_bmdma_status {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy_qc {}; + +struct trace_event_data_offsets_ata_eh_action_template {}; + +struct trace_event_data_offsets_ata_link_reset_begin_template {}; + +struct trace_event_data_offsets_ata_link_reset_end_template {}; + +struct trace_event_data_offsets_ata_port_eh_begin_template {}; + +struct trace_event_data_offsets_ata_sff_hsm_template {}; + +struct trace_event_data_offsets_ata_transfer_data_template {}; + +struct trace_event_data_offsets_ata_sff_template {}; + +typedef void (*btf_trace_ata_qc_prep)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_issue)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_internal)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_failed)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_done)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_tf_load)(void *, struct ata_port *, const struct ata_taskfile *); + +typedef void (*btf_trace_ata_exec_command)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_setup)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_start)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_stop)(void *, struct ata_port *, const struct ata_taskfile *, unsigned int); + +typedef void (*btf_trace_ata_bmdma_status)(void *, struct ata_port *, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy)(void *, struct ata_device *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy_qc)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_eh_about_to_do)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_done)(void *, struct ata_link *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_slave_hardreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_softreset_begin)(void *, struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*btf_trace_ata_link_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_hardreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_softreset_end)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_link_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_slave_postreset)(void *, struct ata_link *, unsigned int *, int); + +typedef void (*btf_trace_ata_std_sched_eh)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_freeze)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_port_thaw)(void *, struct ata_port *); + +typedef void (*btf_trace_ata_sff_hsm_state)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_hsm_command_complete)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_port_intr)(void *, struct ata_queued_cmd *, unsigned char); + +typedef void (*btf_trace_ata_sff_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_pio_transfer_data)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_atapi_send_cdb)(void *, struct ata_queued_cmd *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_sff_flush_pio_task)(void *, struct ata_port *); + +enum { + ATA_READID_POSTRESET = 1, + ATA_DNXFER_PIO = 0, + ATA_DNXFER_DMA = 1, + ATA_DNXFER_40C = 2, + ATA_DNXFER_FORCE_PIO = 3, + ATA_DNXFER_FORCE_PIO0 = 4, + ATA_DNXFER_QUIET = -2147483648, +}; + +struct ata_force_param { + const char *name; + u8 cbl; + u8 spd_limit; + unsigned int xfer_mask; + unsigned int quirk_on; + unsigned int quirk_off; + u16 lflags_on; + u16 lflags_off; +}; + +struct ata_force_ent { + int port; + int device; + struct ata_force_param param; +}; + +struct ata_xfer_ent { + int shift; + int bits; + u8 base; +}; + +struct ata_dev_quirks_entry { + const char *model_num; + const char *model_rev; + unsigned int quirks; +}; + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 1, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 2, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 3, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 4, + E1000_INVM_INVALIDATED_STRUCTURE = 15, +}; + +struct input_event { + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct posix_clock; + +struct posix_clock_context; + +struct posix_clock_operations { + struct module *owner; + int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); + int (*clock_gettime)(struct posix_clock *, struct timespec64 *); + int (*clock_getres)(struct posix_clock *, struct timespec64 *); + int (*clock_settime)(struct posix_clock *, const struct timespec64 *); + long int (*ioctl)(struct posix_clock_context *, unsigned int, long unsigned int); + int (*open)(struct posix_clock_context *, fmode_t); + __poll_t (*poll)(struct posix_clock_context *, struct file *, poll_table *); + int (*release)(struct posix_clock_context *); + ssize_t (*read)(struct posix_clock_context *, uint, char *, size_t); +}; + +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +struct posix_clock_context { + struct posix_clock *clk; + void *private_clkdata; +}; + +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +struct ptp_clock_caps { + int max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int pps; + int n_pins; + int cross_timestamping; + int adjust_phase; + int max_phase_adj; + int rsv[11]; +}; + +struct ptp_sys_offset { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[51]; +}; + +struct ptp_sys_offset_extended { + unsigned int n_samples; + __kernel_clockid_t clockid; + unsigned int rsv[2]; + struct ptp_clock_time ts[75]; +}; + +struct ptp_sys_offset_precise { + struct ptp_clock_time device; + struct ptp_clock_time sys_realtime; + struct ptp_clock_time sys_monoraw; + unsigned int rsv[4]; +}; + +struct ptp_extts_event { + struct ptp_clock_time t; + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_kparams { + int api_version; + int mode; + struct pps_ktime assert_off_tu; + struct pps_ktime clear_off_tu; +}; + +struct pps_device; + +struct pps_source_info { + char name[32]; + char path[32]; + int mode; + void (*echo)(struct pps_device *, int, void *); + struct module *owner; + struct device *dev; +}; + +struct pps_device { + struct pps_source_info info; + struct pps_kparams params; + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + unsigned int last_ev; + wait_queue_head_t queue; + unsigned int id; + const void *lookup_cookie; + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; + spinlock_t lock; + long: 32; +}; + +struct timestamp_event_queue { + struct ptp_extts_event buf[128]; + int head; + int tail; + spinlock_t lock; + struct list_head qlist; + long unsigned int *mask; + struct dentry *debugfs_instance; + struct debugfs_u32_array dfs_bitmap; + long: 32; +}; + +struct ptp_clock { + struct posix_clock clock; + long: 32; + struct device dev; + struct ptp_clock_info *info; + dev_t devid; + int index; + struct pps_device *pps_source; + long int dialed_frequency; + struct list_head tsevqs; + spinlock_t tsevqs_lock; + struct mutex pincfg_mux; + wait_queue_head_t tsev_wq; + int defunct; + struct device_attribute *pin_dev_attr; + struct attribute **pin_attr; + struct attribute_group pin_attr_group; + const struct attribute_group *pin_attr_groups[2]; + struct kthread_worker *kworker; + struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + int *vclock_index; + struct mutex n_vclocks_mux; + bool is_virtual_clock; + bool has_cycles; + struct dentry *debugfs_root; +}; + +struct of_phandle_iterator { + const char *cells_name; + int cell_count; + const struct device_node *parent; + const __be32 *list_end; + const __be32 *phandle_end; + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + +struct alias_prop { + struct list_head link; + const char *alias; + struct device_node *np; + int id; + char stem[0]; +}; + +struct net_device_devres { + struct net_device *ndev; +}; + +enum netdev_xdp_rx_metadata { + NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, + NETDEV_XDP_RX_METADATA_HASH = 2, + NETDEV_XDP_RX_METADATA_VLAN_TAG = 4, +}; + +enum netdev_xsk_flags { + NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, + NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, +}; + +enum netdev_qstats_scope { + NETDEV_QSTATS_SCOPE_QUEUE = 1, +}; + +enum { + NETDEV_A_DEV_IFINDEX = 1, + NETDEV_A_DEV_PAD = 2, + NETDEV_A_DEV_XDP_FEATURES = 3, + NETDEV_A_DEV_XDP_ZC_MAX_SEGS = 4, + NETDEV_A_DEV_XDP_RX_METADATA_FEATURES = 5, + NETDEV_A_DEV_XSK_FEATURES = 6, + __NETDEV_A_DEV_MAX = 7, + NETDEV_A_DEV_MAX = 6, +}; + +enum { + NETDEV_A_NAPI_IFINDEX = 1, + NETDEV_A_NAPI_ID = 2, + NETDEV_A_NAPI_IRQ = 3, + NETDEV_A_NAPI_PID = 4, + NETDEV_A_NAPI_DEFER_HARD_IRQS = 5, + NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT = 6, + NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT = 7, + __NETDEV_A_NAPI_MAX = 8, + NETDEV_A_NAPI_MAX = 7, +}; + +enum { + NETDEV_A_QSTATS_IFINDEX = 1, + NETDEV_A_QSTATS_QUEUE_TYPE = 2, + NETDEV_A_QSTATS_QUEUE_ID = 3, + NETDEV_A_QSTATS_SCOPE = 4, + NETDEV_A_QSTATS_RX_PACKETS = 8, + NETDEV_A_QSTATS_RX_BYTES = 9, + NETDEV_A_QSTATS_TX_PACKETS = 10, + NETDEV_A_QSTATS_TX_BYTES = 11, + NETDEV_A_QSTATS_RX_ALLOC_FAIL = 12, + NETDEV_A_QSTATS_RX_HW_DROPS = 13, + NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS = 14, + NETDEV_A_QSTATS_RX_CSUM_COMPLETE = 15, + NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY = 16, + NETDEV_A_QSTATS_RX_CSUM_NONE = 17, + NETDEV_A_QSTATS_RX_CSUM_BAD = 18, + NETDEV_A_QSTATS_RX_HW_GRO_PACKETS = 19, + NETDEV_A_QSTATS_RX_HW_GRO_BYTES = 20, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS = 21, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES = 22, + NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS = 23, + NETDEV_A_QSTATS_TX_HW_DROPS = 24, + NETDEV_A_QSTATS_TX_HW_DROP_ERRORS = 25, + NETDEV_A_QSTATS_TX_CSUM_NONE = 26, + NETDEV_A_QSTATS_TX_NEEDS_CSUM = 27, + NETDEV_A_QSTATS_TX_HW_GSO_PACKETS = 28, + NETDEV_A_QSTATS_TX_HW_GSO_BYTES = 29, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS = 30, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES = 31, + NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS = 32, + NETDEV_A_QSTATS_TX_STOP = 33, + NETDEV_A_QSTATS_TX_WAKE = 34, + __NETDEV_A_QSTATS_MAX = 35, + NETDEV_A_QSTATS_MAX = 34, +}; + +enum { + NETDEV_A_DMABUF_IFINDEX = 1, + NETDEV_A_DMABUF_QUEUES = 2, + NETDEV_A_DMABUF_FD = 3, + NETDEV_A_DMABUF_ID = 4, + __NETDEV_A_DMABUF_MAX = 5, + NETDEV_A_DMABUF_MAX = 4, +}; + +struct netdev_nl_dump_ctx { + long unsigned int ifindex; + unsigned int rxq_idx; + unsigned int txq_idx; + unsigned int napi_id; +}; + +struct bpf_dummy_ops_state { + int val; +}; + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *); + int (*test_2)(struct bpf_dummy_ops_state *, int, short unsigned int, char, long unsigned int); + int (*test_sleepable)(struct bpf_dummy_ops_state *); +}; + +typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *, ...); + +struct bpf_dummy_ops_test_args { + u64 args[12]; + struct bpf_dummy_ops_state state; + long: 32; +}; + +struct bpf_struct_ops_bpf_dummy_ops { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_dummy_ops data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum ethtool_module_fw_flash_status { + ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS = 2, + ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED = 3, + ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR = 4, +}; + +struct firmware { + size_t size; + const u8 *data; + void *priv; +}; + +enum { + SFF8024_ID_UNK = 0, + SFF8024_ID_SFF_8472 = 2, + SFF8024_ID_SFP = 3, + SFF8024_ID_DWDM_SFP = 11, + SFF8024_ID_QSFP_8438 = 12, + SFF8024_ID_QSFP_8436_8636 = 13, + SFF8024_ID_QSFP28_8636 = 17, + SFF8024_ID_QSFP_DD = 24, + SFF8024_ID_OSFP = 25, + SFF8024_ID_DSFP = 27, + SFF8024_ID_QSFP_PLUS_CMIS = 30, + SFF8024_ID_SFP_DD_CMIS = 31, + SFF8024_ID_SFP_PLUS_CMIS = 32, + SFF8024_ENCODING_UNSPEC = 0, + SFF8024_ENCODING_8B10B = 1, + SFF8024_ENCODING_4B5B = 2, + SFF8024_ENCODING_NRZ = 3, + SFF8024_ENCODING_8472_MANCHESTER = 4, + SFF8024_ENCODING_8472_SONET = 5, + SFF8024_ENCODING_8472_64B66B = 6, + SFF8024_ENCODING_8436_MANCHESTER = 6, + SFF8024_ENCODING_8436_SONET = 4, + SFF8024_ENCODING_8436_64B66B = 5, + SFF8024_ENCODING_256B257B = 7, + SFF8024_ENCODING_PAM4 = 8, + SFF8024_CONNECTOR_UNSPEC = 0, + SFF8024_CONNECTOR_SC = 1, + SFF8024_CONNECTOR_FIBERJACK = 6, + SFF8024_CONNECTOR_LC = 7, + SFF8024_CONNECTOR_MT_RJ = 8, + SFF8024_CONNECTOR_MU = 9, + SFF8024_CONNECTOR_SG = 10, + SFF8024_CONNECTOR_OPTICAL_PIGTAIL = 11, + SFF8024_CONNECTOR_MPO_1X12 = 12, + SFF8024_CONNECTOR_MPO_2X16 = 13, + SFF8024_CONNECTOR_HSSDC_II = 32, + SFF8024_CONNECTOR_COPPER_PIGTAIL = 33, + SFF8024_CONNECTOR_RJ45 = 34, + SFF8024_CONNECTOR_NOSEPARATE = 35, + SFF8024_CONNECTOR_MXC_2X16 = 36, + SFF8024_ECC_UNSPEC = 0, + SFF8024_ECC_100G_25GAUI_C2M_AOC = 1, + SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 2, + SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 3, + SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 4, + SFF8024_ECC_100GBASE_SR10 = 5, + SFF8024_ECC_100GBASE_CR4 = 11, + SFF8024_ECC_25GBASE_CR_S = 12, + SFF8024_ECC_25GBASE_CR_N = 13, + SFF8024_ECC_10GBASE_T_SFI = 22, + SFF8024_ECC_10GBASE_T_SR = 28, + SFF8024_ECC_5GBASE_T = 29, + SFF8024_ECC_2_5GBASE_T = 30, +}; + +enum { + SFP_PHYS_ID = 0, + SFP_PHYS_EXT_ID = 1, + SFP_PHYS_EXT_ID_SFP = 4, + SFP_CONNECTOR = 2, + SFP_COMPLIANCE = 3, + SFP_ENCODING = 11, + SFP_BR_NOMINAL = 12, + SFP_RATE_ID = 13, + SFF_RID_8079 = 1, + SFF_RID_8431_RX_ONLY = 2, + SFF_RID_8431_TX_ONLY = 4, + SFF_RID_8431 = 6, + SFF_RID_10G8G = 14, + SFP_LINK_LEN_SM_KM = 14, + SFP_LINK_LEN_SM_100M = 15, + SFP_LINK_LEN_50UM_OM2_10M = 16, + SFP_LINK_LEN_62_5UM_OM1_10M = 17, + SFP_LINK_LEN_COPPER_1M = 18, + SFP_LINK_LEN_50UM_OM4_10M = 18, + SFP_LINK_LEN_50UM_OM3_10M = 19, + SFP_VENDOR_NAME = 20, + SFP_VENDOR_OUI = 37, + SFP_VENDOR_PN = 40, + SFP_VENDOR_REV = 56, + SFP_OPTICAL_WAVELENGTH_MSB = 60, + SFP_OPTICAL_WAVELENGTH_LSB = 61, + SFP_CABLE_SPEC = 60, + SFP_CC_BASE = 63, + SFP_OPTIONS = 64, + SFP_OPTIONS_HIGH_POWER_LEVEL = 8192, + SFP_OPTIONS_PAGING_A2 = 4096, + SFP_OPTIONS_RETIMER = 2048, + SFP_OPTIONS_COOLED_XCVR = 1024, + SFP_OPTIONS_POWER_DECL = 512, + SFP_OPTIONS_RX_LINEAR_OUT = 256, + SFP_OPTIONS_RX_DECISION_THRESH = 128, + SFP_OPTIONS_TUNABLE_TX = 64, + SFP_OPTIONS_RATE_SELECT = 32, + SFP_OPTIONS_TX_DISABLE = 16, + SFP_OPTIONS_TX_FAULT = 8, + SFP_OPTIONS_LOS_INVERTED = 4, + SFP_OPTIONS_LOS_NORMAL = 2, + SFP_BR_MAX = 66, + SFP_BR_MIN = 67, + SFP_VENDOR_SN = 68, + SFP_DATECODE = 84, + SFP_DIAGMON = 92, + SFP_DIAGMON_DDM = 64, + SFP_DIAGMON_INT_CAL = 32, + SFP_DIAGMON_EXT_CAL = 16, + SFP_DIAGMON_RXPWR_AVG = 8, + SFP_DIAGMON_ADDRMODE = 4, + SFP_ENHOPTS = 93, + SFP_ENHOPTS_ALARMWARN = 128, + SFP_ENHOPTS_SOFT_TX_DISABLE = 64, + SFP_ENHOPTS_SOFT_TX_FAULT = 32, + SFP_ENHOPTS_SOFT_RX_LOS = 16, + SFP_ENHOPTS_SOFT_RATE_SELECT = 8, + SFP_ENHOPTS_APP_SELECT_SFF8079 = 4, + SFP_ENHOPTS_SOFT_RATE_SFF8431 = 2, + SFP_SFF8472_COMPLIANCE = 94, + SFP_SFF8472_COMPLIANCE_NONE = 0, + SFP_SFF8472_COMPLIANCE_REV9_3 = 1, + SFP_SFF8472_COMPLIANCE_REV9_5 = 2, + SFP_SFF8472_COMPLIANCE_REV10_2 = 3, + SFP_SFF8472_COMPLIANCE_REV10_4 = 4, + SFP_SFF8472_COMPLIANCE_REV11_0 = 5, + SFP_SFF8472_COMPLIANCE_REV11_3 = 6, + SFP_SFF8472_COMPLIANCE_REV11_4 = 7, + SFP_SFF8472_COMPLIANCE_REV12_0 = 8, + SFP_CC_EXT = 95, +}; + +enum ethnl_sock_type { + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH = 0, +}; + +struct ethnl_sock_priv { + struct net_device *dev; + u32 portid; + enum ethnl_sock_type type; +}; + +struct ethnl_module_fw_flash_ntf_params { + u32 portid; + u32 seq; + bool closed_sock; +}; + +struct ethtool_module_fw_flash_params { + __be32 password; + u8 password_valid: 1; +}; + +struct ethtool_cmis_fw_update_params { + struct net_device *dev; + struct ethtool_module_fw_flash_params params; + struct ethnl_module_fw_flash_ntf_params ntf_params; + const struct firmware *fw; +}; + +struct ethtool_module_fw_flash { + struct list_head list; + netdevice_tracker dev_tracker; + struct work_struct work; + struct ethtool_cmis_fw_update_params fw_update; +}; + +struct module_reply_data { + struct ethnl_reply_data base; + struct ethtool_module_power_mode_params power; +}; + +struct nf_ct_helper_expectfn { + struct list_head head; + const char *name; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); +}; + +struct nf_ct_seqadj { + u32 correction_pos; + s32 offset_before; + s32 offset_after; +}; + +struct nf_conn_seqadj { + struct nf_ct_seqadj seq[2]; +}; + +enum cntl_msg_types { + IPCTNL_MSG_CT_NEW = 0, + IPCTNL_MSG_CT_GET = 1, + IPCTNL_MSG_CT_DELETE = 2, + IPCTNL_MSG_CT_GET_CTRZERO = 3, + IPCTNL_MSG_CT_GET_STATS_CPU = 4, + IPCTNL_MSG_CT_GET_STATS = 5, + IPCTNL_MSG_CT_GET_DYING = 6, + IPCTNL_MSG_CT_GET_UNCONFIRMED = 7, + IPCTNL_MSG_MAX = 8, +}; + +enum ctnl_exp_msg_types { + IPCTNL_MSG_EXP_NEW = 0, + IPCTNL_MSG_EXP_GET = 1, + IPCTNL_MSG_EXP_DELETE = 2, + IPCTNL_MSG_EXP_GET_STATS_CPU = 3, + IPCTNL_MSG_EXP_MAX = 4, +}; + +enum ctattr_type { + CTA_UNSPEC = 0, + CTA_TUPLE_ORIG = 1, + CTA_TUPLE_REPLY = 2, + CTA_STATUS = 3, + CTA_PROTOINFO = 4, + CTA_HELP = 5, + CTA_NAT_SRC = 6, + CTA_TIMEOUT = 7, + CTA_MARK = 8, + CTA_COUNTERS_ORIG = 9, + CTA_COUNTERS_REPLY = 10, + CTA_USE = 11, + CTA_ID = 12, + CTA_NAT_DST = 13, + CTA_TUPLE_MASTER = 14, + CTA_SEQ_ADJ_ORIG = 15, + CTA_NAT_SEQ_ADJ_ORIG = 15, + CTA_SEQ_ADJ_REPLY = 16, + CTA_NAT_SEQ_ADJ_REPLY = 16, + CTA_SECMARK = 17, + CTA_ZONE = 18, + CTA_SECCTX = 19, + CTA_TIMESTAMP = 20, + CTA_MARK_MASK = 21, + CTA_LABELS = 22, + CTA_LABELS_MASK = 23, + CTA_SYNPROXY = 24, + CTA_FILTER = 25, + CTA_STATUS_MASK = 26, + __CTA_MAX = 27, +}; + +enum ctattr_tuple { + CTA_TUPLE_UNSPEC = 0, + CTA_TUPLE_IP = 1, + CTA_TUPLE_PROTO = 2, + CTA_TUPLE_ZONE = 3, + __CTA_TUPLE_MAX = 4, +}; + +enum ctattr_ip { + CTA_IP_UNSPEC = 0, + CTA_IP_V4_SRC = 1, + CTA_IP_V4_DST = 2, + CTA_IP_V6_SRC = 3, + CTA_IP_V6_DST = 4, + __CTA_IP_MAX = 5, +}; + +enum ctattr_counters { + CTA_COUNTERS_UNSPEC = 0, + CTA_COUNTERS_PACKETS = 1, + CTA_COUNTERS_BYTES = 2, + CTA_COUNTERS32_PACKETS = 3, + CTA_COUNTERS32_BYTES = 4, + CTA_COUNTERS_PAD = 5, + __CTA_COUNTERS_MAX = 6, +}; + +enum ctattr_tstamp { + CTA_TIMESTAMP_UNSPEC = 0, + CTA_TIMESTAMP_START = 1, + CTA_TIMESTAMP_STOP = 2, + CTA_TIMESTAMP_PAD = 3, + __CTA_TIMESTAMP_MAX = 4, +}; + +enum ctattr_seqadj { + CTA_SEQADJ_UNSPEC = 0, + CTA_SEQADJ_CORRECTION_POS = 1, + CTA_SEQADJ_OFFSET_BEFORE = 2, + CTA_SEQADJ_OFFSET_AFTER = 3, + __CTA_SEQADJ_MAX = 4, +}; + +enum ctattr_synproxy { + CTA_SYNPROXY_UNSPEC = 0, + CTA_SYNPROXY_ISN = 1, + CTA_SYNPROXY_ITS = 2, + CTA_SYNPROXY_TSOFF = 3, + __CTA_SYNPROXY_MAX = 4, +}; + +enum ctattr_expect { + CTA_EXPECT_UNSPEC = 0, + CTA_EXPECT_MASTER = 1, + CTA_EXPECT_TUPLE = 2, + CTA_EXPECT_MASK = 3, + CTA_EXPECT_TIMEOUT = 4, + CTA_EXPECT_ID = 5, + CTA_EXPECT_HELP_NAME = 6, + CTA_EXPECT_ZONE = 7, + CTA_EXPECT_FLAGS = 8, + CTA_EXPECT_CLASS = 9, + CTA_EXPECT_NAT = 10, + CTA_EXPECT_FN = 11, + __CTA_EXPECT_MAX = 12, +}; + +enum ctattr_expect_nat { + CTA_EXPECT_NAT_UNSPEC = 0, + CTA_EXPECT_NAT_DIR = 1, + CTA_EXPECT_NAT_TUPLE = 2, + __CTA_EXPECT_NAT_MAX = 3, +}; + +enum ctattr_help { + CTA_HELP_UNSPEC = 0, + CTA_HELP_NAME = 1, + CTA_HELP_INFO = 2, + __CTA_HELP_MAX = 3, +}; + +enum ctattr_stats_cpu { + CTA_STATS_UNSPEC = 0, + CTA_STATS_SEARCHED = 1, + CTA_STATS_FOUND = 2, + CTA_STATS_NEW = 3, + CTA_STATS_INVALID = 4, + CTA_STATS_IGNORE = 5, + CTA_STATS_DELETE = 6, + CTA_STATS_DELETE_LIST = 7, + CTA_STATS_INSERT = 8, + CTA_STATS_INSERT_FAILED = 9, + CTA_STATS_DROP = 10, + CTA_STATS_EARLY_DROP = 11, + CTA_STATS_ERROR = 12, + CTA_STATS_SEARCH_RESTART = 13, + CTA_STATS_CLASH_RESOLVE = 14, + CTA_STATS_CHAIN_TOOLONG = 15, + __CTA_STATS_MAX = 16, +}; + +enum ctattr_stats_global { + CTA_STATS_GLOBAL_UNSPEC = 0, + CTA_STATS_GLOBAL_ENTRIES = 1, + CTA_STATS_GLOBAL_MAX_ENTRIES = 2, + __CTA_STATS_GLOBAL_MAX = 3, +}; + +enum ctattr_expect_stats { + CTA_STATS_EXP_UNSPEC = 0, + CTA_STATS_EXP_NEW = 1, + CTA_STATS_EXP_CREATE = 2, + CTA_STATS_EXP_DELETE = 3, + __CTA_STATS_EXP_MAX = 4, +}; + +enum ctattr_filter { + CTA_FILTER_UNSPEC = 0, + CTA_FILTER_ORIG_FLAGS = 1, + CTA_FILTER_REPLY_FLAGS = 2, + __CTA_FILTER_MAX = 3, +}; + +struct ctnetlink_list_dump_ctx { + struct nf_conn *last; + unsigned int cpu; + bool done; +}; + +struct ctnetlink_filter_u32 { + u32 val; + u32 mask; +}; + +struct ctnetlink_filter { + u8 family; + bool zone_filter; + u_int32_t orig_flags; + u_int32_t reply_flags; + struct nf_conntrack_tuple orig; + struct nf_conntrack_tuple reply; + struct nf_conntrack_zone zone; + struct ctnetlink_filter_u32 mark; + struct ctnetlink_filter_u32 status; +}; + +struct nft_rhash { + struct rhashtable ht; + struct delayed_work gc_work; +}; + +struct nft_rhash_elem { + struct nft_elem_priv priv; + struct rhash_head node; + struct nft_set_ext ext; +}; + +struct nft_rhash_cmp_arg { + const struct nft_set *set; + const u32 *key; + u8 genmask; + long: 32; + u64 tstamp; +}; + +struct nft_rhash_ctx { + const struct nft_ctx ctx; + const struct nft_set *set; +}; + +struct nft_hash { + u32 seed; + u32 buckets; + struct hlist_head table[0]; +}; + +struct nft_hash_elem { + struct nft_elem_priv priv; + struct hlist_node node; + struct nft_set_ext ext; +}; + +typedef union { + __be32 a4; + __be32 a6[4]; + struct in6_addr in6; +} xfrm_address_t; + +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; + +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_trunc_len; + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_icv_len; + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + +struct xfrm_mark { + __u32 v; + __u32 m; +}; + +struct xfrm_address_filter { + xfrm_address_t saddr; + xfrm_address_t daddr; + __u16 family; + __u8 splen; + __u8 dplen; +}; + +enum { + XFRM_LOOKUP_ICMP = 1, + XFRM_LOOKUP_QUEUE = 2, + XFRM_LOOKUP_KEEP_DST_REF = 4, +}; + +struct inet_peer { + struct rb_node rb_node; + struct inetpeer_addr daddr; + u32 metrics[17]; + u32 rate_tokens; + u32 n_redirects; + long unsigned int rate_last; + union { + struct { + atomic_t rid; + }; + struct callback_head rcu; + }; + __u32 dtime; + refcount_t refcnt; +}; + +struct icmp_ext_hdr { + __u8 version: 4; + __u8 reserved1: 4; + __u8 reserved2; + __sum16 checksum; +}; + +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; + +struct icmp_ext_echo_ctype3_hdr { + __be16 afi; + __u8 addrlen; + __u8 reserved; +}; + +struct icmp_ext_echo_iio { + struct icmp_extobj_hdr extobj_hdr; + union { + char name[16]; + __be32 ifindex; + struct { + struct icmp_ext_echo_ctype3_hdr ctype3_hdr; + union { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + } ip_addr; + } addr; + } ident; +}; + +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + +struct xfrm_state_walk { + struct list_head all; + u8 state; + u8 dying; + u8 proto; + u32 seq; + struct xfrm_address_filter *filter; +}; + +struct xfrm_dev_offload { + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net_device *real_dev; + long unsigned int offload_handle; + u8 dir: 2; + u8 type: 2; + u8 flags: 2; +}; + +struct xfrm_mode { + u8 encap; + u8 family; + u8 flags; +}; + +enum xfrm_replay_mode { + XFRM_REPLAY_MODE_LEGACY = 0, + XFRM_REPLAY_MODE_BMP = 1, + XFRM_REPLAY_MODE_ESN = 2, +}; + +struct xfrm_type; + +struct xfrm_type_offload; + +struct xfrm_state { + possible_net_t xs_net; + union { + struct hlist_node gclist; + struct hlist_node bydst; + }; + union { + struct hlist_node dev_gclist; + struct hlist_node bysrc; + }; + struct hlist_node byspi; + struct hlist_node byseq; + struct hlist_node state_cache; + struct hlist_node state_cache_input; + refcount_t refcnt; + spinlock_t lock; + u32 pcpu_num; + struct xfrm_id id; + struct xfrm_selector sel; + struct xfrm_mark mark; + u32 if_id; + u32 tfcpad; + u32 genid; + struct xfrm_state_walk km; + struct { + u32 reqid; + u8 mode; + u8 replay_window; + u8 aalgo; + u8 ealgo; + u8 calgo; + u8 flags; + u16 family; + xfrm_address_t saddr; + int header_len; + int trailer_len; + u32 extra_flags; + struct xfrm_mark smark; + } props; + struct xfrm_lifetime_cfg lft; + struct xfrm_algo_auth *aalg; + struct xfrm_algo *ealg; + struct xfrm_algo *calg; + struct xfrm_algo_aead *aead; + const char *geniv; + __be16 new_mapping_sport; + u32 new_mapping; + u32 mapping_maxage; + struct xfrm_encap_tmpl *encap; + struct sock *encap_sk; + u32 nat_keepalive_interval; + long: 32; + time64_t nat_keepalive_expiration; + xfrm_address_t *coaddr; + struct xfrm_state *tunnel; + atomic_t tunnel_users; + struct xfrm_replay_state replay; + struct xfrm_replay_state_esn *replay_esn; + struct xfrm_replay_state preplay; + struct xfrm_replay_state_esn *preplay_esn; + enum xfrm_replay_mode repl_mode; + u32 xflags; + u32 replay_maxage; + u32 replay_maxdiff; + struct timer_list rtimer; + struct xfrm_stats stats; + long: 32; + struct xfrm_lifetime_cur curlft; + struct hrtimer mtimer; + struct xfrm_dev_offload xso; + long int saved_tmo; + long: 32; + time64_t lastused; + struct page_frag xfrag; + const struct xfrm_type *type; + struct xfrm_mode inner_mode; + struct xfrm_mode inner_mode_iaf; + struct xfrm_mode outer_mode; + const struct xfrm_type_offload *type_offload; + struct xfrm_sec_ctx *security; + void *data; + u8 dir; +}; + +struct xfrm_type { + struct module *owner; + u8 proto; + u8 flags; + int (*init_state)(struct xfrm_state *, struct netlink_ext_ack *); + void (*destructor)(struct xfrm_state *); + int (*input)(struct xfrm_state *, struct sk_buff *); + int (*output)(struct xfrm_state *, struct sk_buff *); + int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); +}; + +struct xfrm_type_offload { + struct module *owner; + u8 proto; + void (*encap)(struct xfrm_state *, struct sk_buff *); + int (*input_tail)(struct xfrm_state *, struct sk_buff *); + int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); +}; + +struct xfrm_offload { + struct { + __u32 low; + __u32 hi; + } seq; + __u32 flags; + __u32 status; + __u32 orig_mac_len; + __u8 proto; + __u8 inner_ipproto; +}; + +struct sec_path { + int len; + int olen; + int verified_cnt; + struct xfrm_state *xvec[6]; + struct xfrm_offload ovec[1]; +}; + +struct trace_event_raw_icmp_send { + struct trace_entry ent; + const void *skbaddr; + int type; + int code; + __u8 saddr[4]; + __u8 daddr[4]; + __u16 sport; + __u16 dport; + short unsigned int ulen; + char __data[0]; +}; + +struct trace_event_data_offsets_icmp_send {}; + +typedef void (*btf_trace_icmp_send)(void *, const struct sk_buff *, int, int); + +struct icmp_bxm { + struct sk_buff *skb; + int offset; + int data_len; + struct { + struct icmphdr icmph; + __be32 times[3]; + } data; + int head_len; + struct ip_options_data replyopts; +}; + +struct icmp_control { + enum skb_drop_reason (*handler)(struct sk_buff *); + short int error; +}; + +enum flow_dissector_ctrl_flags { + FLOW_DIS_IS_FRAGMENT = 1, + FLOW_DIS_FIRST_FRAG = 2, + FLOW_DIS_F_TUNNEL_CSUM = 4, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = 8, + FLOW_DIS_F_TUNNEL_OAM = 16, + FLOW_DIS_F_TUNNEL_CRIT_OPT = 32, + FLOW_DIS_ENCAPSULATION = 64, +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_tipc { + __be32 key; +}; + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_addrs addrs; + long: 32; +}; + +struct uncached_list { + spinlock_t lock; + struct list_head head; +}; + +struct fib_dump_filter { + u32 table_id; + bool filter_set; + bool dump_routes; + bool dump_exceptions; + bool rtnl_held; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + long unsigned int rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +struct fib6_gc_args { + int timeout; + int more; +}; + +struct rt6_exception { + struct hlist_node hlist; + struct rt6_info *rt6i; + long unsigned int stamp; + struct callback_head rcu; +}; + +typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); + +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + +struct rt6_rtnl_dump_arg { + struct sk_buff *skb; + struct netlink_callback *cb; + struct net *net; + struct fib_dump_filter filter; +}; + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; +}; + +struct trace_event_raw_fib6_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[16]; + __u8 dst[16]; + u16 sport; + u16 dport; + u8 proto; + u8 rt_type; + char name[16]; + __u8 gw[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib6_table_lookup {}; + +typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); + +enum rt6_nud_state { + RT6_NUD_FAIL_HARD = -3, + RT6_NUD_FAIL_PROBE = -2, + RT6_NUD_FAIL_DO_RR = -1, + RT6_NUD_SUCCEED = 1, +}; + +struct fib6_nh_dm_arg { + struct net *net; + const struct in6_addr *saddr; + int oif; + int flags; + struct fib6_nh *nh; +}; + +struct fib6_nh_frl_arg { + u32 flags; + int oif; + int strict; + int *mpri; + bool *do_rr; + struct fib6_nh *nh; +}; + +struct fib6_nh_excptn_arg { + struct rt6_info *rt; + int plen; +}; + +struct fib6_nh_match_arg { + const struct net_device *dev; + const struct in6_addr *gw; + struct fib6_nh *match; +}; + +struct fib6_nh_age_excptn_arg { + struct fib6_gc_args *gc_args; + long unsigned int now; +}; + +struct fib6_nh_rd_arg { + struct fib6_result *res; + struct flowi6 *fl6; + const struct in6_addr *gw; + struct rt6_info **ret; +}; + +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +struct fib6_nh_del_cached_rt_arg { + struct fib6_config *cfg; + struct fib6_info *f6i; +}; + +struct arg_dev_net_ip { + struct net *net; + struct in6_addr *addr; +}; + +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned char nh_flags; + long unsigned int event; + }; +}; + +struct rt6_mtu_change_arg { + struct net_device *dev; + unsigned int mtu; + struct fib6_info *f6i; +}; + +struct rt6_nh { + struct fib6_info *fib6_info; + struct fib6_config r_cfg; + struct list_head next; +}; + +struct fib6_nh_exception_dump_walker { + struct rt6_rtnl_dump_arg *dump; + struct fib6_info *rt; + unsigned int flags; + unsigned int skip; + unsigned int count; +}; + +typedef u32 ihandle; + +struct prom_args { + __be32 service; + __be32 nargs; + __be32 nret; + __be32 args[10]; +}; + +struct prom_t { + ihandle root; + phandle chosen; + int cpu; + ihandle stdout; + ihandle mmumap; + ihandle memory; +}; + +struct mem_map_entry { + __be64 base; + __be64 size; +}; + +typedef __be32 cell_t; + +typedef u32 pci_bus_addr_t; + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +struct drmem_lmb { + u64 base_addr; + u32 drc_index; + u32 aa_index; + u32 flags; + long: 32; +}; + +struct drmem_lmb_info { + struct drmem_lmb *lmbs; + int n_lmbs; + u64 lmb_size; +}; + +struct of_drconf_cell_v1 { + __be64 base_addr; + __be32 drc_index; + __be32 reserved; + __be32 aa_index; + __be32 flags; +}; + +struct of_drconf_cell_v2 { + u32 seq_lmbs; + u64 base_addr; + u32 drc_index; + u32 aa_index; + u32 flags; +}; + +struct pmf_device { + struct list_head link; + struct device_node *node; + struct pmf_handlers *handlers; + struct list_head functions; + struct kref ref; +}; + +struct pmf_irq_client { + void (*handler)(void *); + void *data; + struct module *owner; + struct list_head link; + struct pmf_function *func; +}; + +struct pmf_cmd { + const void *cmdptr; + const void *cmdend; + struct pmf_function *func; + void *instdata; + struct pmf_args *args; + int error; +}; + +typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *, struct pmf_handlers *); + +enum perf_callchain_context { + PERF_CONTEXT_HV = 18446744073709551584ULL, + PERF_CONTEXT_KERNEL = 18446744073709551488ULL, + PERF_CONTEXT_USER = 18446744073709551104ULL, + PERF_CONTEXT_GUEST = 18446744073709549568ULL, + PERF_CONTEXT_GUEST_KERNEL = 18446744073709549440ULL, + PERF_CONTEXT_GUEST_USER = 18446744073709549056ULL, + PERF_CONTEXT_MAX = 18446744073709547521ULL, +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short int contexts; + bool contexts_maxed; +}; + +typedef __kernel_long_t __kernel_suseconds_t; + +typedef __kernel_suseconds_t suseconds_t; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; + +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct action_cache { + long unsigned int allow_native[15]; +}; + +struct notification; + +struct seccomp_filter { + refcount_t refs; + refcount_t users; + bool log; + bool wait_killable_recv; + struct action_cache cache; + struct seccomp_filter *prev; + struct bpf_prog *prog; + struct notification *notif; + struct mutex notify_lock; + wait_queue_head_t wqh; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +struct sock_fprog { + short unsigned int len; + struct sock_filter *filter; +}; + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); + +enum notify_state { + SECCOMP_NOTIFY_INIT = 0, + SECCOMP_NOTIFY_SENT = 1, + SECCOMP_NOTIFY_REPLIED = 2, +}; + +struct seccomp_knotif { + struct task_struct *task; + long: 32; + u64 id; + const struct seccomp_data *data; + enum notify_state state; + int error; + long int val; + u32 flags; + struct completion ready; + struct list_head list; + struct list_head addfd; + long: 32; +}; + +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + __u32 ioctl_flags; + union { + bool setfd; + int ret; + }; + struct completion completion; + struct list_head list; +}; + +struct notification { + atomic_t requests; + u32 flags; + u64 next_id; + struct list_head notifications; +}; + +struct seccomp_log_name { + u32 log; + const char *name; +}; + +enum bpf_cond_pseudo_jmp { + BPF_MAY_GOTO = 0, +}; + +enum bpf_addr_space_cast { + BPF_ADDR_SPACE_CAST = 1, +}; + +typedef void (*bpf_insn_print_t)(void *, const char *, ...); + +typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); + +typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +struct bpf_struct_ops_arg_info { + struct bpf_ctx_arg_aux *info; + u32 cnt; +}; + +struct bpf_struct_ops_desc { + struct bpf_struct_ops *st_ops; + const struct btf_type *type; + const struct btf_type *value_type; + u32 type_id; + u32 value_id; + struct bpf_struct_ops_arg_info *arg_info; +}; + +struct bpf_struct_ops_value { + struct bpf_struct_ops_common_value common; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + char data[0]; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + const struct bpf_struct_ops_desc *st_ops_desc; + struct mutex lock; + struct bpf_link **links; + struct bpf_ksym **ksyms; + u32 funcs_cnt; + u32 image_pages_cnt; + void *image_pages[8]; + struct btf *btf; + struct bpf_struct_ops_value *uvalue; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct bpf_struct_ops_value kvalue; +}; + +struct bpf_struct_ops_link { + struct bpf_link link; + struct bpf_map *map; + wait_queue_head_t wait_hup; +}; + +enum { + IDX_MODULE_ID = 0, + IDX_ST_OPS_COMMON_VALUE_ID = 1, +}; + +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; + struct rb_node rb; + long unsigned int rb_subtree_last; +}; + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *, struct rb_node *); + void (*copy)(struct rb_node *, struct rb_node *); + void (*rotate)(struct rb_node *, struct rb_node *); +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH = 0, + TLB_REMOTE_SHOOTDOWN = 1, + TLB_LOCAL_SHOOTDOWN = 2, + TLB_LOCAL_MM_SHOOTDOWN = 3, + TLB_REMOTE_SEND_IPI = 4, + NR_TLB_FLUSH_REASONS = 5, +}; + +struct trace_event_raw_tlb_flush { + struct trace_entry ent; + int reason; + long unsigned int pages; + char __data[0]; +}; + +struct trace_event_data_offsets_tlb_flush {}; + +typedef void (*btf_trace_tlb_flush)(void *, int, long unsigned int); + +struct trace_event_raw_mm_migrate_pages { + struct trace_entry ent; + long unsigned int succeeded; + long unsigned int failed; + long unsigned int thp_succeeded; + long unsigned int thp_failed; + long unsigned int thp_split; + long unsigned int large_folio_split; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_mm_migrate_pages_start { + struct trace_entry ent; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_migration_pte { + struct trace_entry ent; + long unsigned int addr; + long unsigned int pte; + int order; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_migrate_pages {}; + +struct trace_event_data_offsets_mm_migrate_pages_start {}; + +struct trace_event_data_offsets_migration_pte {}; + +typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, enum migrate_mode, int); + +typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); + +typedef void (*btf_trace_set_migration_pte)(void *, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_remove_migration_pte)(void *, long unsigned int, long unsigned int, int); + +struct folio_referenced_arg { + int mapcount; + int referenced; + long unsigned int vm_flags; + struct mem_cgroup *memcg; +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + long unsigned int private; +}; + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); + +enum siginfo_layout { + SIL_KILL = 0, + SIL_TIMER = 1, + SIL_POLL = 2, + SIL_FAULT = 3, + SIL_FAULT_TRAPNO = 4, + SIL_FAULT_MCEERR = 5, + SIL_FAULT_BNDERR = 6, + SIL_FAULT_PKUERR = 7, + SIL_FAULT_PERF_EVENT = 8, + SIL_CHLD = 9, + SIL_RT = 10, + SIL_SYS = 11, +}; + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + __u16 __pad2; + __s32 ssi_syscall; + __u64 ssi_call_addr; + __u32 ssi_arch; + __u8 __pad[28]; +}; + +struct signalfd_ctx { + sigset_t sigmask; +}; + +struct ext4_group_desc { + __le32 bg_block_bitmap_lo; + __le32 bg_inode_bitmap_lo; + __le32 bg_inode_table_lo; + __le16 bg_free_blocks_count_lo; + __le16 bg_free_inodes_count_lo; + __le16 bg_used_dirs_count_lo; + __le16 bg_flags; + __le32 bg_exclude_bitmap_lo; + __le16 bg_block_bitmap_csum_lo; + __le16 bg_inode_bitmap_csum_lo; + __le16 bg_itable_unused_lo; + __le16 bg_checksum; + __le32 bg_block_bitmap_hi; + __le32 bg_inode_bitmap_hi; + __le32 bg_inode_table_hi; + __le16 bg_free_blocks_count_hi; + __le16 bg_free_inodes_count_hi; + __le16 bg_used_dirs_count_hi; + __le16 bg_itable_unused_hi; + __le32 bg_exclude_bitmap_hi; + __le16 bg_block_bitmap_csum_hi; + __le16 bg_inode_bitmap_csum_hi; + __u32 bg_reserved; +}; + +struct ext4_system_zone { + struct rb_node node; + long: 32; + ext4_fsblk_t start_blk; + unsigned int count; + u32 ino; +}; + +struct jbd2_journal_revoke_header_s { + journal_header_t r_header; + __be32 r_count; +}; + +typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; + +struct jbd2_revoke_table_s { + int hash_size; + int hash_shift; + struct list_head *hash_table; +}; + +struct jbd2_revoke_record_s { + struct list_head hash; + tid_t sequence; + long: 32; + long long unsigned int blocknr; +}; + +struct fuse_file_lock { + uint64_t start; + uint64_t end; + uint32_t type; + uint32_t pid; +}; + +struct fuse_open_in { + uint32_t flags; + uint32_t open_flags; +}; + +struct fuse_flush_in { + uint64_t fh; + uint32_t unused; + uint32_t padding; + uint64_t lock_owner; +}; + +struct fuse_read_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t read_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t write_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_out { + uint32_t size; + uint32_t padding; +}; + +struct fuse_fsync_in { + uint64_t fh; + uint32_t fsync_flags; + uint32_t padding; +}; + +struct fuse_lk_in { + uint64_t fh; + uint64_t owner; + struct fuse_file_lock lk; + uint32_t lk_flags; + uint32_t padding; +}; + +struct fuse_lk_out { + struct fuse_file_lock lk; +}; + +struct fuse_bmap_in { + uint64_t block; + uint32_t blocksize; + uint32_t padding; +}; + +struct fuse_bmap_out { + uint64_t block; +}; + +struct fuse_poll_in { + uint64_t fh; + uint64_t kh; + uint32_t flags; + uint32_t events; +}; + +struct fuse_poll_out { + uint32_t revents; + uint32_t padding; +}; + +struct fuse_notify_poll_wakeup_out { + uint64_t kh; +}; + +struct fuse_fallocate_in { + uint64_t fh; + uint64_t offset; + uint64_t length; + uint32_t mode; + uint32_t padding; +}; + +struct fuse_lseek_in { + uint64_t fh; + uint64_t offset; + uint32_t whence; + uint32_t padding; +}; + +struct fuse_lseek_out { + uint64_t offset; +}; + +struct fuse_copy_file_range_in { + uint64_t fh_in; + uint64_t off_in; + uint64_t nodeid_out; + uint64_t fh_out; + uint64_t off_out; + uint64_t len; + uint64_t flags; +}; + +struct fuse_io_priv { + struct kref refcnt; + int async; + spinlock_t lock; + unsigned int reqs; + ssize_t bytes; + size_t size; + __u64 offset; + bool write; + bool should_dirty; + int err; + struct kiocb *iocb; + struct completion *done; + bool blocking; + long: 32; +}; + +struct fuse_io_args { + union { + struct { + struct fuse_read_in in; + u64 attr_ver; + } read; + struct { + struct fuse_write_in in; + struct fuse_write_out out; + bool folio_locked; + long: 32; + } write; + }; + struct fuse_args_pages ap; + struct fuse_io_priv *io; + struct fuse_file *ff; +}; + +struct fuse_writepage_args { + struct fuse_io_args ia; + struct rb_node writepages_entry; + struct list_head queue_entry; + struct fuse_writepage_args *next; + struct inode *inode; + struct fuse_sync_bucket *bucket; +}; + +struct fuse_fill_wb_data { + struct fuse_writepage_args *wpa; + struct fuse_file *ff; + struct inode *inode; + struct folio **orig_folios; + unsigned int max_folios; +}; + +struct btrfs_root_ref { + __le64 dirid; + __le64 sequence; + __le16 name_len; +} __attribute__((packed)); + +struct btrfs_fid { + u64 objectid; + u64 root_objectid; + u32 gen; + u64 parent_objectid; + u32 parent_gen; + u64 parent_root_objectid; +}; + +struct hmac_ctx { + struct crypto_shash *hash; + u8 pads[0]; +}; + +typedef long unsigned int cycles_t; + +struct crypto_rng; + +struct rng_alg { + int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); + int (*seed)(struct crypto_rng *, const u8 *, unsigned int); + void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); + unsigned int seedsize; + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +struct rand_data; + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data *entropy_collector; + struct crypto_shash *tfm; + struct shash_desc *sdesc; +}; + +enum { + MILLION = 1000000, + MIN_PERIOD = 1000, + MAX_PERIOD = 1000000, + MARGIN_MIN_PCT = 10, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + INUSE_ADJ_STEP_PCT = 25, + TIMER_SLACK_PCT = 1, + WEIGHT_ONE = 65536, +}; + +enum { + VTIME_PER_SEC_SHIFT = 37ULL, + VTIME_PER_SEC = 137438953472ULL, + VTIME_PER_USEC = 137438ULL, + VTIME_PER_NSEC = 137ULL, + VRATE_MIN_PPM = 10000ULL, + VRATE_MAX_PPM = 100000000ULL, + VRATE_MIN = 1374ULL, + VRATE_CLAMP_ADJ_PCT = 4ULL, + AUTOP_CYCLE_NSEC = 10000000000ULL, +}; + +enum { + RQ_WAIT_BUSY_PCT = 5, + UNBUSY_THR_PCT = 75, + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250000, + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100000, + MAX_LAGGING_PERIODS = 10, + IOC_PAGE_SHIFT = 12, + IOC_PAGE_SIZE = 4096, + IOC_SECT_TO_PAGE_SHIFT = 3, + LCOEF_RANDIO_PAGES = 4096, +}; + +enum ioc_running { + IOC_IDLE = 0, + IOC_RUNNING = 1, + IOC_STOP = 2, +}; + +enum { + QOS_ENABLE = 0, + QOS_CTRL = 1, + NR_QOS_CTRL_PARAMS = 2, +}; + +enum { + QOS_RPPM = 0, + QOS_RLAT = 1, + QOS_WPPM = 2, + QOS_WLAT = 3, + QOS_MIN = 4, + QOS_MAX = 5, + NR_QOS_PARAMS = 6, +}; + +enum { + COST_CTRL = 0, + COST_MODEL = 1, + NR_COST_CTRL_PARAMS = 2, +}; + +enum { + I_LCOEF_RBPS = 0, + I_LCOEF_RSEQIOPS = 1, + I_LCOEF_RRANDIOPS = 2, + I_LCOEF_WBPS = 3, + I_LCOEF_WSEQIOPS = 4, + I_LCOEF_WRANDIOPS = 5, + NR_I_LCOEFS = 6, +}; + +enum { + LCOEF_RPAGE = 0, + LCOEF_RSEQIO = 1, + LCOEF_RRANDIO = 2, + LCOEF_WPAGE = 3, + LCOEF_WSEQIO = 4, + LCOEF_WRANDIO = 5, + NR_LCOEFS = 6, +}; + +enum { + AUTOP_INVALID = 0, + AUTOP_HDD = 1, + AUTOP_SSD_QD1 = 2, + AUTOP_SSD_DFL = 3, + AUTOP_SSD_FAST = 4, +}; + +struct ioc_params { + u32 qos[6]; + u64 i_lcoefs[6]; + u64 lcoefs[6]; + u32 too_fast_vrate_pct; + u32 too_slow_vrate_pct; +}; + +struct ioc_margins { + s64 min; + s64 low; + s64 target; +}; + +struct ioc_missed { + local_t nr_met; + local_t nr_missed; + u32 last_met; + u32 last_missed; +}; + +struct ioc_pcpu_stat { + struct ioc_missed missed[2]; + local64_t rq_wait_ns; + u64 last_rq_wait_ns; +}; + +struct ioc { + struct rq_qos rqos; + bool enabled; + struct ioc_params params; + struct ioc_margins margins; + u32 period_us; + u32 timer_slack_ns; + u64 vrate_min; + u64 vrate_max; + spinlock_t lock; + struct timer_list timer; + struct list_head active_iocgs; + struct ioc_pcpu_stat *pcpu_stat; + enum ioc_running running; + atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; + seqcount_spinlock_t period_seqcount; + long: 32; + u64 period_at; + u64 period_at_vtime; + atomic64_t cur_period; + int busy_level; + bool weights_updated; + atomic_t hweight_gen; + long: 32; + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; + u64 autop_too_fast_at; + u64 autop_too_slow_at; + int autop_idx; + bool user_qos_params: 1; + bool user_cost_model: 1; +}; + +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct iocg_stat { + u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; +}; + +struct ioc_gq { + struct blkg_policy_data pd; + struct ioc *ioc; + u32 cfg_weight; + u32 weight; + u32 active; + u32 inuse; + u32 last_inuse; + long: 32; + s64 saved_margin; + sector_t cursor; + atomic64_t vtime; + atomic64_t done_vtime; + u64 abs_vdebt; + u64 delay; + u64 delay_at; + atomic64_t active_period; + struct list_head active_list; + u64 child_active_sum; + u64 child_inuse_sum; + u64 child_adjusted_sum; + int hweight_gen; + u32 hweight_active; + u32 hweight_inuse; + u32 hweight_donating; + u32 hweight_after_donation; + struct list_head walk_list; + struct list_head surplus_list; + struct wait_queue_head waitq; + struct hrtimer waitq_timer; + u64 activated_at; + struct iocg_pcpu_stat *pcpu_stat; + long: 32; + struct iocg_stat stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; + int level; + struct ioc_gq *ancestors[0]; + long: 32; +}; + +struct ioc_cgrp { + struct blkcg_policy_data cpd; + unsigned int dfl_weight; +}; + +struct ioc_now { + u64 now_ns; + u64 now; + u64 vnow; +}; + +struct iocg_wait { + struct wait_queue_entry wait; + struct bio *bio; + u64 abs_cost; + bool committed; + long: 32; +}; + +struct iocg_wake_ctx { + struct ioc_gq *iocg; + u32 hw_inuse; + s64 vbudget; +}; + +struct trace_event_raw_iocost_iocg_state { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u64 vrate; + u64 last_period; + u64 cur_period; + u64 vtime; + u32 weight; + u32 inuse; + u64 hweight_active; + u64 hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocg_inuse_update { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u32 old_inuse; + u32 new_inuse; + u64 old_hweight_inuse; + u64 new_hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocost_ioc_vrate_adj { + struct trace_entry ent; + u32 __data_loc_devname; + long: 32; + u64 old_vrate; + u64 new_vrate; + int busy_level; + u32 read_missed_ppm; + u32 write_missed_ppm; + u32 rq_wait_pct; + int nr_lagging; + int nr_shortages; + char __data[0]; +}; + +struct trace_event_raw_iocost_iocg_forgive_debt { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u32 usage_pct; + long: 32; + u64 old_debt; + u64 new_debt; + u64 old_delay; + u64 new_delay; + char __data[0]; +}; + +struct trace_event_data_offsets_iocost_iocg_state { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocg_inuse_update { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +struct trace_event_data_offsets_iocost_ioc_vrate_adj { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_iocost_iocg_forgive_debt { + u32 devname; + const void *devname_ptr_; + u32 cgroup; + const void *cgroup_ptr_; +}; + +typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_iocg_idle)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); + +typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u64, u64, u64, u64); + +typedef struct { + __le64 b; + __le64 a; +} le128; + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +struct resource_entry { + struct list_head node; + struct resource *res; + resource_size_t offset; + struct resource __res; +}; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +struct vga_device { + struct list_head list; + struct pci_dev *pdev; + unsigned int decodes; + unsigned int owns; + unsigned int locks; + unsigned int io_lock_cnt; + unsigned int mem_lock_cnt; + unsigned int io_norm_cnt; + unsigned int mem_norm_cnt; + bool bridge_has_one_vga; + bool is_firmware_default; + unsigned int (*set_decode)(struct pci_dev *, bool); +}; + +struct vga_arb_user_card { + struct pci_dev *pdev; + unsigned int mem_cnt; + unsigned int io_cnt; +}; + +struct vga_arb_private { + struct list_head list; + struct pci_dev *target; + struct vga_arb_user_card cards[16]; + spinlock_t lock; +}; + +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +struct termios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_cc[19]; + cc_t c_line; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + short int sg_flags; +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; + +struct termio { + short unsigned int c_iflag; + short unsigned int c_oflag; + short unsigned int c_cflag; + short unsigned int c_lflag; + unsigned char c_line; + unsigned char c_cc[10]; +}; + +enum tty_flow_change { + TTY_FLOW_NO_CHANGE = 0, + TTY_THROTTLE_SAFE = 1, + TTY_UNTHROTTLE_SAFE = 2, +}; + +typedef void * (*devcon_match_fn_t)(const struct fwnode_handle *, const char *, void *); + +enum { + BLK_MQ_REQ_NOWAIT = 1, + BLK_MQ_REQ_RESERVED = 2, + BLK_MQ_REQ_PM = 4, +}; + +enum scsi_devinfo_key { + SCSI_DEVINFO_GLOBAL = 0, + SCSI_DEVINFO_SPI = 1, +}; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, struct device *); + int (*configure)(struct transport_container *, struct device *, struct device *); + int (*remove)(struct transport_container *, struct device *, struct device *); +}; + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +struct spi_transport_attrs { + int period; + int min_period; + int offset; + int max_offset; + unsigned int width: 1; + unsigned int max_width: 1; + unsigned int iu: 1; + unsigned int max_iu: 1; + unsigned int dt: 1; + unsigned int qas: 1; + unsigned int max_qas: 1; + unsigned int wr_flow: 1; + unsigned int rd_strm: 1; + unsigned int rti: 1; + unsigned int pcomp_en: 1; + unsigned int hold_mcs: 1; + unsigned int initial_dv: 1; + long unsigned int flags; + unsigned int support_sync: 1; + unsigned int support_wide: 1; + unsigned int support_dt: 1; + unsigned int support_dt_only; + unsigned int support_ius; + unsigned int support_qas; + unsigned int dv_pending: 1; + unsigned int dv_in_progress: 1; + struct mutex dv_mutex; +}; + +enum spi_signal_type { + SPI_SIGNAL_UNKNOWN = 1, + SPI_SIGNAL_SE = 2, + SPI_SIGNAL_LVD = 3, + SPI_SIGNAL_HVD = 4, +}; + +struct spi_host_attrs { + enum spi_signal_type signalling; +}; + +struct spi_function_template { + void (*get_period)(struct scsi_target *); + void (*set_period)(struct scsi_target *, int); + void (*get_offset)(struct scsi_target *); + void (*set_offset)(struct scsi_target *, int); + void (*get_width)(struct scsi_target *); + void (*set_width)(struct scsi_target *, int); + void (*get_iu)(struct scsi_target *); + void (*set_iu)(struct scsi_target *, int); + void (*get_dt)(struct scsi_target *); + void (*set_dt)(struct scsi_target *, int); + void (*get_qas)(struct scsi_target *); + void (*set_qas)(struct scsi_target *, int); + void (*get_wr_flow)(struct scsi_target *); + void (*set_wr_flow)(struct scsi_target *, int); + void (*get_rd_strm)(struct scsi_target *); + void (*set_rd_strm)(struct scsi_target *, int); + void (*get_rti)(struct scsi_target *); + void (*set_rti)(struct scsi_target *, int); + void (*get_pcomp_en)(struct scsi_target *); + void (*set_pcomp_en)(struct scsi_target *, int); + void (*get_hold_mcs)(struct scsi_target *); + void (*set_hold_mcs)(struct scsi_target *, int); + void (*get_signalling)(struct Scsi_Host *); + void (*set_signalling)(struct Scsi_Host *, enum spi_signal_type); + int (*deny_binding)(struct scsi_target *); + long unsigned int show_period: 1; + long unsigned int show_offset: 1; + long unsigned int show_width: 1; + long unsigned int show_iu: 1; + long unsigned int show_dt: 1; + long unsigned int show_qas: 1; + long unsigned int show_wr_flow: 1; + long unsigned int show_rd_strm: 1; + long unsigned int show_rti: 1; + long unsigned int show_pcomp_en: 1; + long unsigned int show_hold_mcs: 1; +}; + +enum { + SPI_BLIST_NOIUS = 1, +}; + +struct spi_internal { + struct scsi_transport_template t; + struct spi_function_template *f; +}; + +enum spi_compare_returns { + SPI_COMPARE_SUCCESS = 0, + SPI_COMPARE_FAILURE = 1, + SPI_COMPARE_SKIP_TEST = 2, +}; + +struct work_queue_wrapper { + struct work_struct work; + struct scsi_device *sdev; +}; + +struct mii_ioctl_data { + __u16 phy_id; + __u16 reg_num; + __u16 val_in; + __u16 val_out; +}; + +struct mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + unsigned int full_duplex: 1; + unsigned int force_media: 1; + unsigned int supports_gmii: 1; + struct net_device *dev; + int (*mdio_read)(struct net_device *, int, int); + void (*mdio_write)(struct net_device *, int, int, int); +}; + +struct usb_device_driver { + const char *name; + bool (*match)(struct usb_device *); + int (*probe)(struct usb_device *); + void (*disconnect)(struct usb_device *); + int (*suspend)(struct usb_device *, pm_message_t); + int (*resume)(struct usb_device *, pm_message_t); + int (*choose_configuration)(struct usb_device *); + const struct attribute_group **dev_groups; + struct device_driver driver; + const struct usb_device_id *id_table; + unsigned int supports_autosuspend: 1; + unsigned int generic_subclass: 1; +}; + +typedef u32 acpi_size; + +typedef u64 acpi_io_address; + +typedef u32 acpi_status; + +typedef char *acpi_string; + +typedef void *acpi_handle; + +typedef u32 acpi_object_type; + +union acpi_object { + acpi_object_type type; + struct { + acpi_object_type type; + long: 32; + u64 value; + } integer; + struct { + acpi_object_type type; + u32 length; + char *pointer; + } string; + struct { + acpi_object_type type; + u32 length; + u8 *pointer; + } buffer; + struct { + acpi_object_type type; + u32 count; + union acpi_object *elements; + } package; + struct { + acpi_object_type type; + acpi_object_type actual_type; + acpi_handle handle; + } reference; + struct { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + long: 32; + } processor; + struct { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +struct acpi_buffer { + acpi_size length; + void *pointer; +}; + +enum spd_duplex { + NWAY_10M_HALF = 0, + NWAY_10M_FULL = 1, + NWAY_100M_HALF = 2, + NWAY_100M_FULL = 3, + NWAY_1000M_FULL = 4, + FORCE_10M_HALF = 5, + FORCE_10M_FULL = 6, + FORCE_100M_HALF = 7, + FORCE_100M_FULL = 8, + FORCE_1000M_FULL = 9, + NWAY_2500M_FULL = 10, +}; + +enum rtl_register_content { + _2500bps = 1024, + _1250bps = 512, + _500bps = 256, + _tx_flow = 64, + _rx_flow = 32, + _1000bps = 16, + _100bps = 8, + _10bps = 4, + LINK_STATUS = 2, + FULL_DUP = 1, +}; + +enum rtl8152_flags { + RTL8152_INACCESSIBLE = 0, + RTL8152_SET_RX_MODE = 1, + WORK_ENABLE = 2, + RTL8152_LINK_CHG = 3, + SELECTIVE_SUSPEND = 4, + PHY_RESET = 5, + SCHEDULE_TASKLET = 6, + GREEN_ETHERNET = 7, + RX_EPROTO = 8, + IN_PRE_RESET = 9, + PROBED_WITH_NO_ERRORS = 10, + PROBE_SHOULD_RETRY = 11, +}; + +struct tally_counter { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; +}; + +struct rx_desc { + __le32 opts1; + __le32 opts2; + __le32 opts3; + __le32 opts4; + __le32 opts5; + __le32 opts6; +}; + +struct tx_desc { + __le32 opts1; + __le32 opts2; +}; + +struct r8152; + +struct rx_agg { + struct list_head list; + struct list_head info_list; + struct urb *urb; + struct r8152 *context; + struct page *page; + void *buffer; +}; + +struct tx_agg { + struct list_head list; + struct urb *urb; + struct r8152 *context; + void *buffer; + void *head; + u32 skb_num; + u32 skb_len; +}; + +struct rtl_ops { + void (*init)(struct r8152 *); + int (*enable)(struct r8152 *); + void (*disable)(struct r8152 *); + void (*up)(struct r8152 *); + void (*down)(struct r8152 *); + void (*unload)(struct r8152 *); + int (*eee_get)(struct r8152 *, struct ethtool_keee *); + int (*eee_set)(struct r8152 *, struct ethtool_keee *); + bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); + void (*autosuspend_en)(struct r8152 *, bool); + void (*change_mtu)(struct r8152 *); +}; + +struct ups_info { + u32 r_tune: 1; + u32 _10m_ckdiv: 1; + u32 _250m_ckdiv: 1; + u32 aldps: 1; + u32 lite_mode: 2; + u32 speed_duplex: 4; + u32 eee: 1; + u32 eee_lite: 1; + u32 eee_ckdiv: 1; + u32 eee_plloff_100: 1; + u32 eee_plloff_giga: 1; + u32 eee_cmod_lv: 1; + u32 green: 1; + u32 flow_control: 1; + u32 ctap_short_off: 1; +}; + +struct rtl_fw { + const char *fw_name; + const struct firmware *fw; + char version[32]; + int (*pre_fw)(struct r8152 *); + int (*post_fw)(struct r8152 *); + bool retry; +}; + +struct r8152 { + long unsigned int flags; + struct usb_device *udev; + struct napi_struct napi; + struct usb_interface *intf; + struct net_device *netdev; + struct urb *intr_urb; + struct tx_agg tx_info[4]; + struct list_head rx_info; + struct list_head rx_used; + struct list_head rx_done; + struct list_head tx_free; + struct sk_buff_head tx_queue; + struct sk_buff_head rx_queue; + spinlock_t rx_lock; + spinlock_t tx_lock; + struct delayed_work schedule; + struct delayed_work hw_phy_work; + struct mii_if_info mii; + struct mutex control; + struct tasklet_struct tx_tl; + struct rtl_ops rtl_ops; + struct ups_info ups_info; + struct rtl_fw rtl_fw; + atomic_t rx_count; + bool eee_en; + int intr_interval; + u32 saved_wolopts; + u32 msg_enable; + u32 tx_qlen; + u32 coalesce; + u32 advertising; + u32 rx_buf_sz; + u32 rx_copybreak; + u32 rx_pending; + u32 fc_pause_on; + u32 fc_pause_off; + unsigned int pipe_in; + unsigned int pipe_out; + unsigned int pipe_intr; + unsigned int pipe_ctrl_in; + unsigned int pipe_ctrl_out; + u32 support_2500full: 1; + u32 lenovo_macpassthru: 1; + u32 dell_tb_rx_agg_bug: 1; + u16 ocp_base; + u16 speed; + u16 eee_adv; + u8 *intr_buff; + u8 version; + u8 duplex; + u8 autoneg; + unsigned int reg_access_reset_count; + long: 32; +}; + +struct fw_block { + __le32 type; + __le32 length; +}; + +struct fw_header { + u8 checksum[32]; + char version[32]; + struct fw_block blocks[0]; +}; + +enum rtl8152_fw_flags { + FW_FLAGS_USB = 0, + FW_FLAGS_PLA = 1, + FW_FLAGS_START = 2, + FW_FLAGS_STOP = 3, + FW_FLAGS_NC = 4, + FW_FLAGS_NC1 = 5, + FW_FLAGS_NC2 = 6, + FW_FLAGS_UC2 = 7, + FW_FLAGS_UC = 8, + FW_FLAGS_SPEED_UP = 9, + FW_FLAGS_VER = 10, +}; + +enum rtl8152_fw_fixup_cmd { + FW_FIXUP_AND = 0, + FW_FIXUP_OR = 1, + FW_FIXUP_NOT = 2, + FW_FIXUP_XOR = 3, +}; + +struct fw_phy_set { + __le16 addr; + __le16 data; +}; + +struct fw_phy_speed_up { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 version; + __le16 fw_reg; + __le16 reserved; + char info[0]; +}; + +struct fw_phy_ver { + struct fw_block blk_hdr; + struct fw_phy_set ver; + __le32 reserved; +}; + +struct fw_phy_fixup { + struct fw_block blk_hdr; + struct fw_phy_set setting; + __le16 bit_cmd; + __le16 reserved; +}; + +struct fw_phy_union { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + struct fw_phy_set pre_set[2]; + struct fw_phy_set bp[8]; + struct fw_phy_set bp_en; + u8 pre_num; + u8 bp_num; + char info[0]; +} __attribute__((packed)); + +struct fw_mac { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 bp_ba_addr; + __le16 bp_ba_value; + __le16 bp_en_addr; + __le16 bp_en_value; + __le16 bp_start; + __le16 bp_num; + __le16 bp[16]; + __le32 reserved; + __le16 fw_ver_reg; + u8 fw_ver_data; + char info[0]; +} __attribute__((packed)); + +struct fw_phy_patch_key { + struct fw_block blk_hdr; + __le16 key_reg; + __le16 key_data; + __le32 reserved; +}; + +struct fw_phy_nc { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + __le16 ba_reg; + __le16 ba_data; + __le16 patch_en_addr; + __le16 patch_en_value; + __le16 mode_reg; + __le16 mode_pre; + __le16 mode_post; + __le16 reserved; + __le16 bp_start; + __le16 bp_num; + __le16 bp[4]; + char info[0]; +}; + +enum rtl_fw_type { + RTL_FW_END = 0, + RTL_FW_PLA = 1, + RTL_FW_USB = 2, + RTL_FW_PHY_START = 3, + RTL_FW_PHY_STOP = 4, + RTL_FW_PHY_NC = 5, + RTL_FW_PHY_FIXUP = 6, + RTL_FW_PHY_UNION_NC = 7, + RTL_FW_PHY_UNION_NC1 = 8, + RTL_FW_PHY_UNION_NC2 = 9, + RTL_FW_PHY_UNION_UC2 = 10, + RTL_FW_PHY_UNION_UC = 11, + RTL_FW_PHY_UNION_MISC = 12, + RTL_FW_PHY_SPEED_UP = 13, + RTL_FW_PHY_VER = 14, +}; + +enum rtl_version { + RTL_VER_UNKNOWN = 0, + RTL_VER_01 = 1, + RTL_VER_02 = 2, + RTL_VER_03 = 3, + RTL_VER_04 = 4, + RTL_VER_05 = 5, + RTL_VER_06 = 6, + RTL_VER_07 = 7, + RTL_VER_08 = 8, + RTL_VER_09 = 9, + RTL_TEST_01 = 10, + RTL_VER_10 = 11, + RTL_VER_11 = 12, + RTL_VER_12 = 13, + RTL_VER_13 = 14, + RTL_VER_14 = 15, + RTL_VER_15 = 16, + RTL_VER_MAX = 17, +}; + +enum tx_csum_stat { + TX_CSUM_SUCCESS = 0, + TX_CSUM_TSO = 1, + TX_CSUM_NONE = 2, +}; + +enum sanyo_state { + STATE_INACTIVE___4 = 0, + STATE_HEADER_SPACE___3 = 1, + STATE_BIT_PULSE___3 = 2, + STATE_BIT_SPACE___3 = 3, + STATE_TRAILER_PULSE___2 = 4, + STATE_TRAILER_SPACE___2 = 5, +}; + +struct dst_cache_pcpu { + long unsigned int refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +enum phy_upstream { + PHY_UPSTREAM_MAC = 0, + PHY_UPSTREAM_PHY = 1, +}; + +enum ethtool_multicast_groups { + ETHNL_MCGRP_MONITOR = 0, +}; + +struct phy_device_node { + enum phy_upstream upstream_type; + union { + struct net_device *netdev; + struct phy_device *phydev; + } upstream; + struct sfp_bus *parent_sfp_bus; + struct phy_device *phy; +}; + +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + long unsigned int pos_ifindex; +}; + +typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); + +enum nfnl_acct_msg_types { + NFNL_MSG_ACCT_NEW = 0, + NFNL_MSG_ACCT_GET = 1, + NFNL_MSG_ACCT_GET_CTRZERO = 2, + NFNL_MSG_ACCT_DEL = 3, + NFNL_MSG_ACCT_OVERQUOTA = 4, + NFNL_MSG_ACCT_MAX = 5, +}; + +enum nfnl_acct_flags { + NFACCT_F_QUOTA_PKTS = 1, + NFACCT_F_QUOTA_BYTES = 2, + NFACCT_F_OVERQUOTA = 4, +}; + +enum nfnl_acct_type { + NFACCT_UNSPEC = 0, + NFACCT_NAME = 1, + NFACCT_PKTS = 2, + NFACCT_BYTES = 3, + NFACCT_USE = 4, + NFACCT_FLAGS = 5, + NFACCT_QUOTA = 6, + NFACCT_FILTER = 7, + NFACCT_PAD = 8, + __NFACCT_MAX = 9, +}; + +enum nfnl_attr_filter_type { + NFACCT_FILTER_UNSPEC = 0, + NFACCT_FILTER_MASK = 1, + NFACCT_FILTER_VALUE = 2, + __NFACCT_FILTER_MAX = 3, +}; + +enum { + NFACCT_NO_QUOTA = -1, + NFACCT_UNDERQUOTA = 0, + NFACCT_OVERQUOTA = 1, +}; + +struct nf_acct { + atomic64_t pkts; + atomic64_t bytes; + long unsigned int flags; + struct list_head head; + refcount_t refcnt; + char name[32]; + struct callback_head callback_head; + char data[0]; +}; + +struct nfacct_filter { + u32 value; + u32 mask; +}; + +struct nfnl_acct_net { + struct list_head nfnl_acct_list; +}; + +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long int tm_year; + int tm_wday; + int tm_yday; +}; + +enum nft_meta_keys { + NFT_META_LEN = 0, + NFT_META_PROTOCOL = 1, + NFT_META_PRIORITY = 2, + NFT_META_MARK = 3, + NFT_META_IIF = 4, + NFT_META_OIF = 5, + NFT_META_IIFNAME = 6, + NFT_META_OIFNAME = 7, + NFT_META_IFTYPE = 8, + NFT_META_OIFTYPE = 9, + NFT_META_SKUID = 10, + NFT_META_SKGID = 11, + NFT_META_NFTRACE = 12, + NFT_META_RTCLASSID = 13, + NFT_META_SECMARK = 14, + NFT_META_NFPROTO = 15, + NFT_META_L4PROTO = 16, + NFT_META_BRI_IIFNAME = 17, + NFT_META_BRI_OIFNAME = 18, + NFT_META_PKTTYPE = 19, + NFT_META_CPU = 20, + NFT_META_IIFGROUP = 21, + NFT_META_OIFGROUP = 22, + NFT_META_CGROUP = 23, + NFT_META_PRANDOM = 24, + NFT_META_SECPATH = 25, + NFT_META_IIFKIND = 26, + NFT_META_OIFKIND = 27, + NFT_META_BRI_IIFPVID = 28, + NFT_META_BRI_IIFVPROTO = 29, + NFT_META_TIME_NS = 30, + NFT_META_TIME_DAY = 31, + NFT_META_TIME_HOUR = 32, + NFT_META_SDIF = 33, + NFT_META_SDIFNAME = 34, + NFT_META_BRI_BROUTE = 35, + __NFT_META_IIFTYPE = 36, +}; + +enum nft_meta_attributes { + NFTA_META_UNSPEC = 0, + NFTA_META_DREG = 1, + NFTA_META_KEY = 2, + NFTA_META_SREG = 3, + __NFTA_META_MAX = 4, +}; + +enum { + NFT_PKTINFO_L4PROTO = 1, + NFT_PKTINFO_INNER = 2, + NFT_PKTINFO_INNER_FULL = 4, +}; + +enum { + NFT_PAYLOAD_CTX_INNER_TUN = 1, + NFT_PAYLOAD_CTX_INNER_LL = 2, + NFT_PAYLOAD_CTX_INNER_NH = 4, + NFT_PAYLOAD_CTX_INNER_TH = 8, +}; + +struct nft_inner_tun_ctx { + u16 type; + u16 inner_tunoff; + u16 inner_lloff; + u16 inner_nhoff; + u16 inner_thoff; + __be16 llproto; + u8 l4proto; + u8 flags; +}; + +struct nft_meta { + enum nft_meta_keys key: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; +}; + +struct xt_entry_match { + union { + struct { + __u16 match_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 match_size; + struct xt_match *match; + } kernel; + __u16 match_size; + } u; + unsigned char data[0]; +}; + +struct xt_entry_target { + union { + struct { + __u16 target_size; + char name[29]; + __u8 revision; + } user; + struct { + __u16 target_size; + struct xt_target *target; + } kernel; + __u16 target_size; + } u; + unsigned char data[0]; +}; + +struct xt_standard_target { + struct xt_entry_target target; + int verdict; +}; + +struct xt_error_target { + struct xt_entry_target target; + char errorname[30]; +}; + +struct xt_get_revision { + char name[29]; + __u8 revision; +}; + +struct xt_counters_info { + char name[32]; + unsigned int num_counters; + long: 32; + struct xt_counters counters[0]; +}; + +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char *mem; +}; + +struct ipt_getinfo { + char name[32]; + unsigned int valid_hooks; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int num_entries; + unsigned int size; +}; + +struct ipt_get_entries { + char name[32]; + unsigned int size; + long: 32; + struct ipt_entry entrytable[0]; +}; + +struct ipt_standard { + struct ipt_entry entry; + struct xt_standard_target target; + long: 32; +}; + +struct ipt_error { + struct ipt_entry entry; + struct xt_error_target target; +}; + +enum { + UDP_FLAGS_CORK = 0, + UDP_FLAGS_NO_CHECK6_TX = 1, + UDP_FLAGS_NO_CHECK6_RX = 2, + UDP_FLAGS_GRO_ENABLED = 3, + UDP_FLAGS_ACCEPT_FRAGLIST = 4, + UDP_FLAGS_ACCEPT_L4 = 5, + UDP_FLAGS_ENCAP_ENABLED = 6, + UDP_FLAGS_UDPLITE_SEND_CC = 7, + UDP_FLAGS_UDPLITE_RECV_CC = 8, +}; + +struct static_key_false_deferred { + struct static_key_false key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); +}; + +typedef u32 inet6_ehashfn_t(const struct net *, const struct in6_addr *, const u16, const struct in6_addr *, const __be16); + +struct udp_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + __u16 cscov; + __u8 partial_cov; +}; + +struct udp_iter_state { + struct seq_net_private p; + int bucket; +}; + +enum switchdev_attr_id { + SWITCHDEV_ATTR_ID_UNDEFINED = 0, + SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1, + SWITCHDEV_ATTR_ID_PORT_MST_STATE = 2, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 3, + SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 4, + SWITCHDEV_ATTR_ID_PORT_MROUTER = 5, + SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 6, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 7, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL = 8, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 9, + SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 10, + SWITCHDEV_ATTR_ID_BRIDGE_MST = 11, + SWITCHDEV_ATTR_ID_MRP_PORT_ROLE = 12, + SWITCHDEV_ATTR_ID_VLAN_MSTI = 13, +}; + +struct switchdev_mst_state { + u16 msti; + u8 state; +}; + +struct switchdev_brport_flags { + long unsigned int val; + long unsigned int mask; +}; + +struct switchdev_vlan_msti { + u16 vid; + u16 msti; +}; + +struct switchdev_attr { + struct net_device *orig_dev; + enum switchdev_attr_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); + union { + u8 stp_state; + struct switchdev_mst_state mst_state; + struct switchdev_brport_flags brport_flags; + bool mrouter; + clock_t ageing_time; + bool vlan_filtering; + u16 vlan_protocol; + bool mst; + bool mc_disabled; + u8 mrp_port_role; + struct switchdev_vlan_msti vlan_msti; + } u; +}; + +struct br_config_bpdu { + unsigned int topology_change: 1; + unsigned int topology_change_ack: 1; + bridge_id root; + int root_path_cost; + bridge_id bridge_id; + port_id port_id; + int message_age; + int max_age; + int hello_time; + int forward_delay; +}; + +struct ppc_debug_info { + __u32 version; + __u32 num_instruction_bps; + __u32 num_data_bps; + __u32 num_condition_regs; + __u32 data_bp_alignment; + __u32 sizeof_condition; + __u64 features; +}; + +struct ppc_hw_breakpoint { + __u32 version; + __u32 trigger_type; + __u32 addr_mode; + __u32 condition_mode; + __u64 addr; + __u64 addr2; + __u64 condition_value; +}; + +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +typedef struct { + void *lock; +} class_rcu_tasks_trace_t; + +struct trace_event_raw_sys_enter { + struct trace_entry ent; + long int id; + long unsigned int args[6]; + char __data[0]; +}; + +struct trace_event_raw_sys_exit { + struct trace_entry ent; + long int id; + long int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_sys_enter {}; + +struct trace_event_data_offsets_sys_exit {}; + +typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int); + +typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int); + +enum powerpc_regset { + REGSET_GPR = 0, + REGSET_FPR = 1, +}; + +typedef __u32 Elf32_Off; + +typedef __s32 Elf32_Sword; + +struct elf32_rela { + Elf32_Addr r_offset; + Elf32_Word r_info; + Elf32_Sword r_addend; +}; + +typedef struct elf32_rela Elf32_Rela; + +struct elf32_hdr { + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +}; + +typedef struct elf32_hdr Elf32_Ehdr; + +struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +}; + +typedef struct elf32_shdr Elf32_Shdr; + +struct ppc_plt_entry { + unsigned int jump[4]; +}; + +struct syscore_ops { + struct list_head node; + int (*suspend)(); + void (*resume)(); + void (*shutdown)(); +}; + +struct pmac_irq_hw { + unsigned int event; + unsigned int enable; + unsigned int ack; + unsigned int level; +}; + +struct siginfo { + union { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; + int _si_pad[32]; + }; +}; + +struct waitid_info { + pid_t pid; + uid_t uid; + int status; + int cause; +}; + +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + struct waitid_info *wo_info; + int wo_stat; + struct rusage *wo_rusage; + wait_queue_entry_t child_wait; + int notask_error; +}; + +struct trace_event_raw_rcu_utilization { + struct trace_entry ent; + const char *s; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_future_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long int gp_seq_req; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_grace_period_init { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + u8 level; + int grplo; + int grphi; + long unsigned int qsmask; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_grace_period { + struct trace_entry ent; + const char *rcuname; + long int gpseq; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_exp_funnel_lock { + struct trace_entry ent; + const char *rcuname; + u8 level; + int grplo; + int grphi; + const char *gpevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_preempt_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_unlock_preempted_task { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int pid; + char __data[0]; +}; + +struct trace_event_raw_rcu_quiescent_state_report { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + long unsigned int mask; + long unsigned int qsmask; + u8 level; + int grplo; + int grphi; + u8 gp_tasks; + char __data[0]; +}; + +struct trace_event_raw_rcu_fqs { + struct trace_entry ent; + const char *rcuname; + long int gp_seq; + int cpu; + const char *qsevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_stall_warning { + struct trace_entry ent; + const char *rcuname; + const char *msg; + char __data[0]; +}; + +struct trace_event_raw_rcu_watching { + struct trace_entry ent; + const char *polarity; + long int oldnesting; + long int newnesting; + int counter; + char __data[0]; +}; + +struct trace_event_raw_rcu_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_segcb_stats { + struct trace_entry ent; + const char *ctx; + long unsigned int gp_seq[4]; + long int seglen[4]; + char __data[0]; +}; + +struct trace_event_raw_rcu_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + long int qlen; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_start { + struct trace_entry ent; + const char *rcuname; + long int qlen; + long int blimit; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + void *func; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kvfree_callback { + struct trace_entry ent; + const char *rcuname; + void *rhp; + long unsigned int offset; + char __data[0]; +}; + +struct trace_event_raw_rcu_invoke_kfree_bulk_callback { + struct trace_entry ent; + const char *rcuname; + long unsigned int nr_records; + void **p; + char __data[0]; +}; + +struct trace_event_raw_rcu_sr_normal { + struct trace_entry ent; + const char *rcuname; + void *rhp; + const char *srevent; + char __data[0]; +}; + +struct trace_event_raw_rcu_batch_end { + struct trace_entry ent; + const char *rcuname; + int callbacks_invoked; + char cb; + char nr; + char iit; + char risk; + char __data[0]; +}; + +struct trace_event_raw_rcu_torture_read { + struct trace_entry ent; + char rcutorturename[8]; + struct callback_head *rhp; + long unsigned int secs; + long unsigned int c_old; + long unsigned int c; + char __data[0]; +}; + +struct trace_event_raw_rcu_barrier { + struct trace_entry ent; + const char *rcuname; + const char *s; + int cpu; + int cnt; + long unsigned int done; + char __data[0]; +}; + +struct trace_event_data_offsets_rcu_utilization {}; + +struct trace_event_data_offsets_rcu_grace_period {}; + +struct trace_event_data_offsets_rcu_future_grace_period {}; + +struct trace_event_data_offsets_rcu_grace_period_init {}; + +struct trace_event_data_offsets_rcu_exp_grace_period {}; + +struct trace_event_data_offsets_rcu_exp_funnel_lock {}; + +struct trace_event_data_offsets_rcu_preempt_task {}; + +struct trace_event_data_offsets_rcu_unlock_preempted_task {}; + +struct trace_event_data_offsets_rcu_quiescent_state_report {}; + +struct trace_event_data_offsets_rcu_fqs {}; + +struct trace_event_data_offsets_rcu_stall_warning {}; + +struct trace_event_data_offsets_rcu_watching {}; + +struct trace_event_data_offsets_rcu_callback {}; + +struct trace_event_data_offsets_rcu_segcb_stats {}; + +struct trace_event_data_offsets_rcu_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_batch_start {}; + +struct trace_event_data_offsets_rcu_invoke_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kvfree_callback {}; + +struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {}; + +struct trace_event_data_offsets_rcu_sr_normal {}; + +struct trace_event_data_offsets_rcu_batch_end {}; + +struct trace_event_data_offsets_rcu_torture_read {}; + +struct trace_event_data_offsets_rcu_barrier {}; + +typedef void (*btf_trace_rcu_utilization)(void *, const char *); + +typedef void (*btf_trace_rcu_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, long unsigned int, long unsigned int, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, long unsigned int, u8, int, int, long unsigned int); + +typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, long unsigned int, const char *); + +typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *); + +typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, long unsigned int, int); + +typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, long unsigned int, long unsigned int, long unsigned int, u8, int, int, int); + +typedef void (*btf_trace_rcu_fqs)(void *, const char *, long unsigned int, int, const char *); + +typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); + +typedef void (*btf_trace_rcu_watching)(void *, const char *, long int, long int, int); + +typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long int); + +typedef void (*btf_trace_rcu_segcb_stats)(void *, struct rcu_segcblist *, const char *); + +typedef void (*btf_trace_rcu_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int, long int); + +typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long int, long int); + +typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *); + +typedef void (*btf_trace_rcu_invoke_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int); + +typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, long unsigned int, void **); + +typedef void (*btf_trace_rcu_sr_normal)(void *, const char *, struct callback_head *, const char *); + +typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char); + +typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, long unsigned int); + +struct rcu_tasks; + +typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); + +typedef void (*pregp_func_t)(struct list_head *); + +typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); + +typedef void (*postscan_func_t)(struct list_head *); + +typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); + +typedef void (*postgp_func_t)(struct rcu_tasks *); + +struct rcu_tasks_percpu; + +struct rcu_tasks { + struct rcuwait cbs_wait; + raw_spinlock_t cbs_gbl_lock; + struct mutex tasks_gp_mutex; + int gp_state; + int gp_sleep; + int init_fract; + long unsigned int gp_jiffies; + long unsigned int gp_start; + long unsigned int tasks_gp_seq; + long unsigned int n_ipis; + long unsigned int n_ipis_fails; + struct task_struct *kthread_ptr; + long unsigned int lazy_jiffies; + rcu_tasks_gp_func_t gp_func; + pregp_func_t pregp_func; + pertask_func_t pertask_func; + postscan_func_t postscan_func; + holdouts_func_t holdouts_func; + postgp_func_t postgp_func; + call_rcu_func_t call_func; + unsigned int wait_state; + struct rcu_tasks_percpu *rtpcpu; + struct rcu_tasks_percpu **rtpcp_array; + int percpu_enqueue_shift; + int percpu_enqueue_lim; + int percpu_dequeue_lim; + long unsigned int percpu_dequeue_gpseq; + struct mutex barrier_q_mutex; + atomic_t barrier_q_count; + struct completion barrier_q_completion; + long unsigned int barrier_q_seq; + long unsigned int barrier_q_start; + char *name; + char *kname; +}; + +struct rcu_tasks_percpu { + struct rcu_segcblist cblist; + raw_spinlock_t lock; + long unsigned int rtp_jiffies; + long unsigned int rtp_n_lock_retries; + struct timer_list lazy_timer; + unsigned int urgent_gp; + struct work_struct rtp_work; + struct irq_work rtp_irq_work; + struct callback_head barrier_q_head; + struct list_head rtp_blkd_tasks; + struct list_head rtp_exit_list; + int cpu; + int index; + struct rcu_tasks *rtpp; +}; + +struct trc_stall_chk_rdr { + int nesting; + int ipi_to_cpu; + u8 needqs; +}; + +struct eprobe_trace_entry_head { + struct trace_entry ent; +}; + +struct trace_eprobe { + const char *event_system; + const char *event_name; + char *filter_str; + struct trace_event_call *event; + struct dyn_event devent; + struct trace_probe tp; +}; + +struct eprobe_data { + struct trace_event_file *file; + struct trace_eprobe *ep; +}; + +struct bpf_iter_num { + __u64 __opaque[1]; +}; + +enum { + BPF_MAX_LOOPS = 8388608, +}; + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + long: 32; + u64 session_id; + u64 seq_num; + bool done_stop; + long: 32; + u8 target_private[0]; +}; + +typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_loop)(u32, void *, void *, u64); + +struct bpf_iter_num_kern { + int cur; + int end; +}; + +enum vm_stat_item { + NR_DIRTY_THRESHOLD = 0, + NR_DIRTY_BG_THRESHOLD = 1, + NR_MEMMAP_PAGES = 2, + NR_MEMMAP_BOOT_PAGES = 3, + NR_VM_STAT_ITEMS = 4, +}; + +struct contig_page_info { + long unsigned int free_pages; + long unsigned int free_blocks_total; + long unsigned int free_blocks_suitable; +}; + +struct vma_prepare { + struct vm_area_struct *vma; + struct vm_area_struct *adj_next; + struct file *file; + struct address_space *mapping; + struct anon_vma *anon_vma; + struct vm_area_struct *insert; + struct vm_area_struct *remove; + struct vm_area_struct *remove2; +}; + +struct vma_munmap_struct { + struct vma_iterator *vmi; + struct vm_area_struct *vma; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct list_head *uf; + long unsigned int start; + long unsigned int end; + long unsigned int unmap_start; + long unsigned int unmap_end; + int vma_count; + bool unlock; + bool clear_ptes; + long unsigned int nr_pages; + long unsigned int locked_vm; + long unsigned int nr_accounted; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int data_vm; +}; + +struct mmap_state { + struct mm_struct *mm; + struct vma_iterator *vmi; + long unsigned int addr; + long unsigned int end; + long unsigned int pgoff; + long unsigned int pglen; + long unsigned int flags; + struct file *file; + long unsigned int charged; + bool retry_merge; + struct vm_area_struct *prev; + struct vm_area_struct *next; + struct vma_munmap_struct vms; + struct ma_state mas_detach; + struct maple_tree mt_detach; +}; + +typedef int class_get_unused_fd_t; + +struct mnt_ns_info { + __u32 size; + __u32 nr_mounts; + __u64 mnt_ns_id; +}; + +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +struct eventfd_ctx { + struct kref kref; + wait_queue_head_t wqh; + __u64 count; + unsigned int flags; + int id; +}; + +struct simple_xattr { + struct rb_node rb_node; + char *name; + size_t size; + char value[0]; +}; + +struct jbd2_journal_block_tail { + __be32 t_checksum; +}; + +struct trace_event_raw_jbd2_checkpoint { + struct trace_entry ent; + dev_t dev; + int result; + char __data[0]; +}; + +struct trace_event_raw_jbd2_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + char __data[0]; +}; + +struct trace_event_raw_jbd2_end_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + tid_t transaction; + tid_t head; + char __data[0]; +}; + +struct trace_event_raw_jbd2_submit_inode_data { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_start_class { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_extend { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int buffer_credits; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + unsigned int type; + unsigned int line_no; + int interval; + int sync; + int requested_blocks; + int dirtied_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_run_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int wait; + long unsigned int request_delay; + long unsigned int running; + long unsigned int locked; + long unsigned int flushing; + long unsigned int logging; + __u32 handle_count; + __u32 blocks; + __u32 blocks_logged; + char __data[0]; +}; + +struct trace_event_raw_jbd2_checkpoint_stats { + struct trace_entry ent; + dev_t dev; + tid_t tid; + long unsigned int chp_time; + __u32 forced_to_close; + __u32 written; + __u32 dropped; + char __data[0]; +}; + +struct trace_event_raw_jbd2_update_log_tail { + struct trace_entry ent; + dev_t dev; + tid_t tail_sequence; + tid_t first_tid; + long unsigned int block_nr; + long unsigned int freed; + char __data[0]; +}; + +struct trace_event_raw_jbd2_write_superblock { + struct trace_entry ent; + dev_t dev; + blk_opf_t write_flags; + char __data[0]; +}; + +struct trace_event_raw_jbd2_lock_buffer_stall { + struct trace_entry ent; + dev_t dev; + long unsigned int stall_ms; + char __data[0]; +}; + +struct trace_event_raw_jbd2_journal_shrink { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + long unsigned int nr_to_scan; + long unsigned int nr_shrunk; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_jbd2_shrink_checkpoint_list { + struct trace_entry ent; + dev_t dev; + tid_t first_tid; + tid_t tid; + tid_t last_tid; + long unsigned int nr_freed; + tid_t next_tid; + char __data[0]; +}; + +struct trace_event_data_offsets_jbd2_checkpoint {}; + +struct trace_event_data_offsets_jbd2_commit {}; + +struct trace_event_data_offsets_jbd2_end_commit {}; + +struct trace_event_data_offsets_jbd2_submit_inode_data {}; + +struct trace_event_data_offsets_jbd2_handle_start_class {}; + +struct trace_event_data_offsets_jbd2_handle_extend {}; + +struct trace_event_data_offsets_jbd2_handle_stats {}; + +struct trace_event_data_offsets_jbd2_run_stats {}; + +struct trace_event_data_offsets_jbd2_checkpoint_stats {}; + +struct trace_event_data_offsets_jbd2_update_log_tail {}; + +struct trace_event_data_offsets_jbd2_write_superblock {}; + +struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; + +struct trace_event_data_offsets_jbd2_journal_shrink {}; + +struct trace_event_data_offsets_jbd2_shrink_scan_exit {}; + +struct trace_event_data_offsets_jbd2_shrink_checkpoint_list {}; + +typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); + +typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, tid_t, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int); + +typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int, int, int); + +typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, tid_t, struct transaction_run_stats_s *); + +typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, tid_t, struct transaction_chp_stats_s *); + +typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, blk_opf_t); + +typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_count)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_enter)(void *, journal_t *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_scan_exit)(void *, journal_t *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_shrink_checkpoint_list)(void *, journal_t *, tid_t, tid_t, tid_t, long unsigned int, tid_t); + +struct jbd2_stats_proc_session { + journal_t *journal; + struct transaction_stats_s *stats; + int start; + int max; +}; + +struct btrfs_compr_pool { + struct shrinker *shrinker; + spinlock_t lock; + struct list_head list; + int count; + int thresh; +}; + +struct bucket_item { + u32 count; +}; + +struct heuristic_ws { + u8 *sample; + u32 sample_size; + struct bucket_item *bucket; + struct bucket_item *bucket_b; + struct list_head list; +}; + +enum key_lookup_flag { + KEY_LOOKUP_CREATE = 1, + KEY_LOOKUP_PARTIAL = 2, + KEY_LOOKUP_ALL = 3, +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); + int (*encrypt)(struct skcipher_request *); + int (*decrypt)(struct skcipher_request *); + int (*export)(struct skcipher_request *, void *); + int (*import)(struct skcipher_request *, const void *); + int (*init)(struct crypto_skcipher *); + void (*exit)(struct crypto_skcipher *); + unsigned int walksize; + union { + struct { + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int statesize; + long: 32; + struct crypto_alg base; + }; + struct skcipher_alg_common co; + }; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } src; + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } dst; + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + unsigned int ivsize; + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *); + long: 32; + union { + struct { + char head[56]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct essiv_instance_ctx { + union { + struct crypto_skcipher_spawn skcipher_spawn; + struct crypto_aead_spawn aead_spawn; + } u; + char essiv_cipher_name[128]; + char shash_driver_name[128]; +}; + +struct essiv_tfm_ctx { + union { + struct crypto_skcipher *skcipher; + struct crypto_aead *aead; + } u; + struct crypto_cipher *essiv_cipher; + struct crypto_shash *hash; + int ivoffset; +}; + +struct essiv_aead_request_ctx { + struct scatterlist sg[4]; + u8 *assoc; + long: 32; + struct aead_request aead_req; +}; + +struct badblocks_context { + sector_t start; + sector_t len; + int ack; + long: 32; +}; + +enum dd_data_dir { + DD_READ = 0, + DD_WRITE = 1, +}; + +enum { + DD_DIR_COUNT = 2, +}; + +enum dd_prio { + DD_RT_PRIO = 0, + DD_BE_PRIO = 1, + DD_IDLE_PRIO = 2, + DD_PRIO_MAX = 2, +}; + +enum { + DD_PRIO_COUNT = 3, +}; + +struct io_stats_per_prio { + uint32_t inserted; + uint32_t merged; + uint32_t dispatched; + atomic_t completed; +}; + +struct dd_per_prio { + struct list_head dispatch; + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + sector_t latest_pos[2]; + struct io_stats_per_prio stats; +}; + +struct deadline_data { + struct dd_per_prio per_prio[3]; + enum dd_data_dir last_dir; + unsigned int batching; + unsigned int starved; + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; + u32 async_depth; + int prio_aging_expire; + spinlock_t lock; + long: 32; +}; + +struct io_cancel_data { + struct io_ring_ctx *ctx; + long: 32; + union { + u64 data; + struct file *file; + }; + u8 opcode; + u32 flags; + int seq; + long: 32; +}; + +struct io_timeout_data { + struct io_kiocb *req; + long: 32; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; + u32 flags; +}; + +struct io_timeout { + struct file *file; + u32 off; + u32 target_seq; + u32 repeats; + struct list_head list; + struct io_kiocb *head; + struct io_kiocb *prev; +}; + +struct io_timeout_rem { + struct file *file; + long: 32; + u64 addr; + struct timespec64 ts; + u32 flags; + bool ltimeout; +}; + +typedef struct { + size_t bitContainer; + unsigned int bitPos; + char *startPtr; + char *ptr; + char *endPtr; +} BIT_CStream_t; + +typedef struct { + ptrdiff_t value; + const void *stateTable; + const void *symbolTT; + unsigned int stateLog; +} FSE_CState_t; + +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1, +} ZSTD_defaultPolicy_e; + +typedef struct { + S16 norm[53]; + U32 wksp[285]; +} ZSTD_BuildCTableWksp; + +struct node_groups { + unsigned int id; + union { + unsigned int ngroups; + unsigned int ncpus; + }; +}; + +struct fb_cvt_data { + u32 xres; + u32 yres; + u32 refresh; + u32 f_refresh; + u32 pixclock; + u32 hperiod; + u32 hblank; + u32 hfreq; + u32 htotal; + u32 vtotal; + u32 vsync; + u32 hsync; + u32 h_front_porch; + u32 h_back_porch; + u32 v_front_porch; + u32 v_back_porch; + u32 h_margin; + u32 v_margin; + u32 interlace; + u32 aspect_ratio; + u32 active_pixels; + u32 flags; + u32 status; +}; + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; + unsigned int is_wait_die; +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT = 0, + DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, + DMA_FENCE_FLAG_USER_BITS = 3, +}; + +struct dma_resv_list { + struct callback_head rcu; + u32 num_fences; + u32 max_fences; + struct dma_fence *table[0]; +}; + +struct dma_fence_array; + +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +struct dma_fence_array { + struct dma_fence base; + spinlock_t lock; + unsigned int num_fences; + atomic_t num_pending; + struct dma_fence **fences; + struct irq_work work; + struct dma_fence_array_cb callbacks[0]; +}; + +enum { + AHCI_PCI_BAR_STA2X11 = 0, + AHCI_PCI_BAR_CAVIUM = 0, + AHCI_PCI_BAR_LOONGSON = 0, + AHCI_PCI_BAR_ENMOTUS = 2, + AHCI_PCI_BAR_CAVIUM_GEN5 = 4, + AHCI_PCI_BAR_STANDARD = 5, +}; + +enum board_ids { + board_ahci = 0, + board_ahci_43bit_dma = 1, + board_ahci_ign_iferr = 2, + board_ahci_no_debounce_delay = 3, + board_ahci_no_msi = 4, + board_ahci_pcs_quirk = 5, + board_ahci_pcs_quirk_no_devslp = 6, + board_ahci_pcs_quirk_no_sntf = 7, + board_ahci_yes_fbs = 8, + board_ahci_al = 9, + board_ahci_avn = 10, + board_ahci_mcp65 = 11, + board_ahci_mcp77 = 12, + board_ahci_mcp89 = 13, + board_ahci_mv = 14, + board_ahci_sb600 = 15, + board_ahci_sb700 = 16, + board_ahci_vt8251 = 17, + board_ahci_mcp_linux = 11, + board_ahci_mcp67 = 11, + board_ahci_mcp73 = 11, + board_ahci_mcp79 = 12, +}; + +struct e1000_opt_list { + int i; + char *str; +}; + +struct e1000_option { + enum { + enable_option = 0, + range_option = 1, + list_option = 2, + } type; + const char *name; + const char *err; + int def; + union { + struct { + int min; + int max; + } r; + struct { + int nr; + struct e1000_opt_list *p; + } l; + } arg; +}; + +struct usb_class_driver { + char *name; + char * (*devnode)(const struct device *, umode_t *); + const struct file_operations *fops; + int minor_base; +}; + +struct quirk_entry { + u16 vid; + u16 pid; + u32 flags; +}; + +struct xhci_slot_ctx { + __le32 dev_info; + __le32 dev_info2; + __le32 tt_info; + __le32 dev_state; + __le32 reserved[4]; +}; + +struct input_seq_state { + short unsigned int pos; + bool mutex_acquired; + int input_devices_state; +}; + +struct input_devres { + struct input_dev *input; +}; + +enum jvc_state { + STATE_INACTIVE___5 = 0, + STATE_HEADER_SPACE___4 = 1, + STATE_BIT_PULSE___4 = 2, + STATE_BIT_SPACE___4 = 3, + STATE_TRAILER_PULSE___3 = 4, + STATE_TRAILER_SPACE___3 = 5, + STATE_CHECK_REPEAT = 6, +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1, + DISK_EVENT_EJECT_REQUEST = 2, +}; + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + long unsigned int start; +}; + +struct mdp_device_descriptor_s { + __u32 number; + __u32 major; + __u32 minor; + __u32 raid_disk; + __u32 state; + __u32 reserved[27]; +}; + +typedef struct mdp_device_descriptor_s mdp_disk_t; + +struct mdp_superblock_s { + __u32 md_magic; + __u32 major_version; + __u32 minor_version; + __u32 patch_version; + __u32 gvalid_words; + __u32 set_uuid0; + __u32 ctime; + __u32 level; + __u32 size; + __u32 nr_disks; + __u32 raid_disks; + __u32 md_minor; + __u32 not_persistent; + __u32 set_uuid1; + __u32 set_uuid2; + __u32 set_uuid3; + __u32 gstate_creserved[16]; + __u32 utime; + __u32 state; + __u32 active_disks; + __u32 working_disks; + __u32 failed_disks; + __u32 spare_disks; + __u32 sb_csum; + __u32 events_hi; + __u32 events_lo; + __u32 cp_events_hi; + __u32 cp_events_lo; + __u32 recovery_cp; + __u64 reshape_position; + __u32 new_level; + __u32 delta_disks; + __u32 new_layout; + __u32 new_chunk; + __u32 gstate_sreserved[14]; + __u32 layout; + __u32 chunk_size; + __u32 root_pv; + __u32 root_block; + __u32 pstate_reserved[60]; + mdp_disk_t disks[27]; + __u32 reserved[0]; + mdp_disk_t this_disk; +}; + +typedef struct mdp_superblock_s mdp_super_t; + +struct mdp_superblock_1 { + __le32 magic; + __le32 major_version; + __le32 feature_map; + __le32 pad0; + __u8 set_uuid[16]; + char set_name[32]; + __le64 ctime; + __le32 level; + __le32 layout; + __le64 size; + __le32 chunksize; + __le32 raid_disks; + union { + __le32 bitmap_offset; + struct { + __le16 offset; + __le16 size; + } ppl; + }; + __le32 new_level; + __le64 reshape_position; + __le32 delta_disks; + __le32 new_layout; + __le32 new_chunk; + __le32 new_offset; + __le64 data_offset; + __le64 data_size; + __le64 super_offset; + union { + __le64 recovery_offset; + __le64 journal_tail; + }; + __le32 dev_number; + __le32 cnt_corrected_read; + __u8 device_uuid[16]; + __u8 devflags; + __u8 bblog_shift; + __le16 bblog_size; + __le32 bblog_offset; + __le64 utime; + __le64 events; + __le64 resync_offset; + __le32 sb_csum; + __le32 max_dev; + __u8 pad3[32]; + __le16 dev_roles[0]; +}; + +struct mdu_version_s { + int major; + int minor; + int patchlevel; +}; + +typedef struct mdu_version_s mdu_version_t; + +struct mdu_array_info_s { + int major_version; + int minor_version; + int patch_version; + unsigned int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + unsigned int utime; + int state; + int active_disks; + int working_disks; + int failed_disks; + int spare_disks; + int layout; + int chunk_size; +}; + +typedef struct mdu_array_info_s mdu_array_info_t; + +struct mdu_disk_info_s { + int number; + int major; + int minor; + int raid_disk; + int state; +}; + +typedef struct mdu_disk_info_s mdu_disk_info_t; + +struct mdu_bitmap_file_s { + char pathname[4096]; +}; + +typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; + +struct mddev; + +struct md_rdev; + +struct md_cluster_operations { + int (*join)(struct mddev *, int); + int (*leave)(struct mddev *); + int (*slot_number)(struct mddev *); + int (*resync_info_update)(struct mddev *, sector_t, sector_t); + int (*resync_start_notify)(struct mddev *); + int (*resync_status_get)(struct mddev *); + void (*resync_info_get)(struct mddev *, sector_t *, sector_t *); + int (*metadata_update_start)(struct mddev *); + int (*metadata_update_finish)(struct mddev *); + void (*metadata_update_cancel)(struct mddev *); + int (*resync_start)(struct mddev *); + int (*resync_finish)(struct mddev *); + int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); + int (*add_new_disk)(struct mddev *, struct md_rdev *); + void (*add_new_disk_cancel)(struct mddev *); + int (*new_disk_ack)(struct mddev *, bool); + int (*remove_disk)(struct mddev *, struct md_rdev *); + void (*load_bitmaps)(struct mddev *, int); + int (*gather_bitmaps)(struct md_rdev *); + int (*resize_bitmaps)(struct mddev *, sector_t, sector_t); + int (*lock_all_bitmaps)(struct mddev *); + void (*unlock_all_bitmaps)(struct mddev *); + void (*update_size)(struct mddev *, sector_t); +}; + +enum sync_action { + ACTION_RESYNC = 0, + ACTION_RECOVER = 1, + ACTION_CHECK = 2, + ACTION_REPAIR = 3, + ACTION_RESHAPE = 4, + ACTION_FROZEN = 5, + ACTION_IDLE = 6, + NR_SYNC_ACTIONS = 7, +}; + +struct md_cluster_info; + +struct md_personality; + +struct md_thread; + +struct bitmap_operations; + +struct mddev { + void *private; + struct md_personality *pers; + dev_t unit; + int md_minor; + struct list_head disks; + long unsigned int flags; + long unsigned int sb_flags; + int suspended; + struct mutex suspend_mutex; + struct percpu_ref active_io; + int ro; + int sysfs_active; + struct gendisk *gendisk; + struct kobject kobj; + int hold_active; + int major_version; + int minor_version; + int patch_version; + int persistent; + int external; + char metadata_type[17]; + int chunk_sectors; + time64_t ctime; + time64_t utime; + int level; + int layout; + char clevel[16]; + int raid_disks; + int max_disks; + sector_t dev_sectors; + sector_t array_sectors; + int external_size; + long: 32; + __u64 events; + int can_decrease_events; + char uuid[16]; + long: 32; + sector_t reshape_position; + int delta_disks; + int new_level; + int new_layout; + int new_chunk_sectors; + int reshape_backwards; + struct md_thread *thread; + struct md_thread *sync_thread; + enum sync_action last_sync_action; + sector_t curr_resync; + sector_t curr_resync_completed; + long unsigned int resync_mark; + long: 32; + sector_t resync_mark_cnt; + sector_t curr_mark_cnt; + sector_t resync_max_sectors; + atomic64_t resync_mismatches; + sector_t suspend_lo; + sector_t suspend_hi; + int sync_speed_min; + int sync_speed_max; + int parallel_resync; + int ok_start_degraded; + long unsigned int recovery; + int recovery_disabled; + int in_sync; + struct mutex open_mutex; + struct mutex reconfig_mutex; + atomic_t active; + atomic_t openers; + int changed; + int degraded; + atomic_t recovery_active; + wait_queue_head_t recovery_wait; + long: 32; + sector_t recovery_cp; + sector_t resync_min; + sector_t resync_max; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_action; + struct kernfs_node *sysfs_completed; + struct kernfs_node *sysfs_degraded; + struct kernfs_node *sysfs_level; + struct work_struct del_work; + struct work_struct sync_work; + spinlock_t lock; + wait_queue_head_t sb_wait; + atomic_t pending_writes; + unsigned int safemode; + unsigned int safemode_delay; + struct timer_list safemode_timer; + struct percpu_ref writes_pending; + int sync_checkers; + void *bitmap; + struct bitmap_operations *bitmap_ops; + struct { + struct file *file; + long: 32; + loff_t offset; + long unsigned int space; + long: 32; + loff_t default_offset; + long unsigned int default_space; + struct mutex mutex; + long unsigned int chunksize; + long unsigned int daemon_sleep; + long unsigned int max_write_behind; + int external; + int nodes; + char cluster_name[64]; + long: 32; + } bitmap_info; + atomic_t max_corr_read_errors; + struct list_head all_mddevs; + const struct attribute_group *to_remove; + struct bio_set bio_set; + struct bio_set sync_set; + struct bio_set io_clone_set; + struct work_struct event_work; + mempool_t *serial_info_pool; + void (*sync_super)(struct mddev *, struct md_rdev *); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; + unsigned int noio_flag; + struct list_head deleting; + atomic_t sync_seq; + bool has_superblocks: 1; + bool fail_last_dev: 1; + bool serialize_policy: 1; + long: 32; +}; + +struct serial_in_rdev; + +struct md_rdev { + struct list_head same_set; + sector_t sectors; + struct mddev *mddev; + int last_events; + struct block_device *meta_bdev; + struct block_device *bdev; + struct file *bdev_file; + struct page *sb_page; + struct page *bb_page; + int sb_loaded; + __u64 sb_events; + sector_t data_offset; + sector_t new_data_offset; + sector_t sb_start; + int sb_size; + int preferred_minor; + struct kobject kobj; + long unsigned int flags; + wait_queue_head_t blocked_wait; + int desc_nr; + int raid_disk; + int new_raid_disk; + int saved_raid_disk; + long: 32; + union { + sector_t recovery_offset; + sector_t journal_tail; + }; + atomic_t nr_pending; + atomic_t read_errors; + time64_t last_read_error; + atomic_t corrected_errors; + struct serial_in_rdev *serial; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_unack_badblocks; + struct kernfs_node *sysfs_badblocks; + long: 32; + struct badblocks badblocks; + struct { + short int offset; + unsigned int size; + sector_t sector; + } ppl; +}; + +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + +enum flag_bits { + Faulty = 0, + In_sync = 1, + Bitmap_sync = 2, + WriteMostly = 3, + AutoDetected = 4, + Blocked = 5, + WriteErrorSeen = 6, + FaultRecorded = 7, + BlockedBadBlocks = 8, + WantReplacement = 9, + Replacement = 10, + Candidate = 11, + Journal = 12, + ClusterRemove = 13, + ExternalBbl = 14, + FailFast = 15, + LastDev = 16, + CollisionCheck = 17, + Nonrot = 18, +}; + +enum mddev_flags { + MD_ARRAY_FIRST_USE = 0, + MD_CLOSING = 1, + MD_JOURNAL_CLEAN = 2, + MD_HAS_JOURNAL = 3, + MD_CLUSTER_RESYNC_LOCKED = 4, + MD_FAILFAST_SUPPORTED = 5, + MD_HAS_PPL = 6, + MD_HAS_MULTIPLE_PPLS = 7, + MD_NOT_READY = 8, + MD_BROKEN = 9, + MD_DELETED = 10, +}; + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS = 0, + MD_SB_CHANGE_CLEAN = 1, + MD_SB_CHANGE_PENDING = 2, + MD_SB_NEED_REWRITE = 3, +}; + +enum { + MD_RESYNC_NONE = 0, + MD_RESYNC_YIELDED = 1, + MD_RESYNC_DELAYED = 2, + MD_RESYNC_ACTIVE = 3, +}; + +struct md_personality { + char *name; + int level; + struct list_head list; + struct module *owner; + bool (*make_request)(struct mddev *, struct bio *); + int (*run)(struct mddev *); + int (*start)(struct mddev *); + void (*free)(struct mddev *, void *); + void (*status)(struct seq_file *, struct mddev *); + void (*error_handler)(struct mddev *, struct md_rdev *); + int (*hot_add_disk)(struct mddev *, struct md_rdev *); + int (*hot_remove_disk)(struct mddev *, struct md_rdev *); + int (*spare_active)(struct mddev *); + sector_t (*sync_request)(struct mddev *, sector_t, sector_t, int *); + int (*resize)(struct mddev *, sector_t); + sector_t (*size)(struct mddev *, sector_t, int); + int (*check_reshape)(struct mddev *); + int (*start_reshape)(struct mddev *); + void (*finish_reshape)(struct mddev *); + void (*update_reshape_pos)(struct mddev *); + void (*prepare_suspend)(struct mddev *); + void (*quiesce)(struct mddev *, int); + void * (*takeover)(struct mddev *); + int (*change_consistency_policy)(struct mddev *, const char *); +}; + +struct md_thread { + void (*run)(struct md_thread *); + struct mddev *mddev; + wait_queue_head_t wqueue; + long unsigned int flags; + struct task_struct *tsk; + long unsigned int timeout; + void *private; +}; + +struct md_bitmap_stats; + +struct bitmap_operations { + bool (*enabled)(struct mddev *); + int (*create)(struct mddev *, int); + int (*resize)(struct mddev *, sector_t, int, bool); + int (*load)(struct mddev *); + void (*destroy)(struct mddev *); + void (*flush)(struct mddev *); + void (*write_all)(struct mddev *); + void (*dirty_bits)(struct mddev *, long unsigned int, long unsigned int); + void (*unplug)(struct mddev *, bool); + void (*daemon_work)(struct mddev *); + void (*wait_behind_writes)(struct mddev *); + int (*startwrite)(struct mddev *, sector_t, long unsigned int, bool); + void (*endwrite)(struct mddev *, sector_t, long unsigned int, bool, bool); + bool (*start_sync)(struct mddev *, sector_t, sector_t *, bool); + void (*end_sync)(struct mddev *, sector_t, sector_t *); + void (*cond_end_sync)(struct mddev *, sector_t, bool); + void (*close_sync)(struct mddev *); + void (*update_sb)(void *); + int (*get_stats)(void *, struct md_bitmap_stats *); + void (*sync_with_cluster)(struct mddev *, sector_t, sector_t, sector_t, sector_t); + void * (*get_from_slot)(struct mddev *, int); + int (*copy_from_slot)(struct mddev *, int, sector_t *, sector_t *, bool); + void (*set_pages)(void *, long unsigned int); + void (*free)(void *); +}; + +enum recovery_flags { + MD_RECOVERY_NEEDED = 0, + MD_RECOVERY_RUNNING = 1, + MD_RECOVERY_INTR = 2, + MD_RECOVERY_DONE = 3, + MD_RECOVERY_FROZEN = 4, + MD_RECOVERY_WAIT = 5, + MD_RECOVERY_ERROR = 6, + MD_RECOVERY_SYNC = 7, + MD_RECOVERY_REQUESTED = 8, + MD_RECOVERY_CHECK = 9, + MD_RECOVERY_RECOVER = 10, + MD_RECOVERY_RESHAPE = 11, + MD_RESYNCING_REMOTE = 12, +}; + +enum md_ro_state { + MD_RDWR = 0, + MD_RDONLY = 1, + MD_AUTO_READ = 2, + MD_MAX_STATE = 3, +}; + +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct mddev *, char *); + ssize_t (*store)(struct mddev *, const char *, size_t); +}; + +struct md_io_clone { + struct mddev *mddev; + struct bio *orig_bio; + long unsigned int start_time; + long: 32; + struct bio bio_clone; +}; + +struct md_bitmap_stats { + u64 events_cleared; + int behind_writes; + bool behind_wait; + long unsigned int missing_pages; + long unsigned int file_pages; + long unsigned int sync_size; + long unsigned int pages; + struct file *file; + long: 32; +}; + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(struct md_rdev *, struct md_rdev *, int); + int (*validate_super)(struct mddev *, struct md_rdev *, struct md_rdev *); + void (*sync_super)(struct mddev *, struct md_rdev *); + long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t); + int (*allow_new_offset)(struct md_rdev *, long long unsigned int); +}; + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct md_rdev *, char *); + ssize_t (*store)(struct md_rdev *, const char *, size_t); +}; + +enum array_state { + clear = 0, + inactive = 1, + suspended = 2, + readonly = 3, + read_auto = 4, + clean = 5, + active = 6, + write_pending = 7, + active_idle = 8, + broken = 9, + bad_word = 10, +}; + +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +enum { + CTRL_CMD_UNSPEC = 0, + CTRL_CMD_NEWFAMILY = 1, + CTRL_CMD_DELFAMILY = 2, + CTRL_CMD_GETFAMILY = 3, + CTRL_CMD_NEWOPS = 4, + CTRL_CMD_DELOPS = 5, + CTRL_CMD_GETOPS = 6, + CTRL_CMD_NEWMCAST_GRP = 7, + CTRL_CMD_DELMCAST_GRP = 8, + CTRL_CMD_GETMCAST_GRP = 9, + CTRL_CMD_GETPOLICY = 10, + __CTRL_CMD_MAX = 11, +}; + +enum { + CTRL_ATTR_UNSPEC = 0, + CTRL_ATTR_FAMILY_ID = 1, + CTRL_ATTR_FAMILY_NAME = 2, + CTRL_ATTR_VERSION = 3, + CTRL_ATTR_HDRSIZE = 4, + CTRL_ATTR_MAXATTR = 5, + CTRL_ATTR_OPS = 6, + CTRL_ATTR_MCAST_GROUPS = 7, + CTRL_ATTR_POLICY = 8, + CTRL_ATTR_OP_POLICY = 9, + CTRL_ATTR_OP = 10, + __CTRL_ATTR_MAX = 11, +}; + +enum { + CTRL_ATTR_OP_UNSPEC = 0, + CTRL_ATTR_OP_ID = 1, + CTRL_ATTR_OP_FLAGS = 2, + __CTRL_ATTR_OP_MAX = 3, +}; + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC = 0, + CTRL_ATTR_MCAST_GRP_NAME = 1, + CTRL_ATTR_MCAST_GRP_ID = 2, + __CTRL_ATTR_MCAST_GRP_MAX = 3, +}; + +enum { + CTRL_ATTR_POLICY_UNSPEC = 0, + CTRL_ATTR_POLICY_DO = 1, + CTRL_ATTR_POLICY_DUMP = 2, + __CTRL_ATTR_POLICY_DUMP_MAX = 3, + CTRL_ATTR_POLICY_DUMP_MAX = 2, +}; + +struct genl_op_iter { + const struct genl_family *family; + struct genl_split_ops doit; + struct genl_split_ops dumpit; + int cmd_idx; + int entry_idx; + u32 cmd; + u8 flags; +}; + +struct genl_start_context { + const struct genl_family *family; + struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; + const struct genl_split_ops *ops; + int hdrlen; +}; + +struct ctrl_dump_policy_ctx { + struct netlink_policy_dump_state *state; + const struct genl_family *rt; + struct genl_op_iter *op_iter; + u32 op; + u16 fam_id; + u8 dump_map: 1; + u8 single_op: 1; +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +enum { + BPF_F_CURRENT_NETNS = -1, +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __be16 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 data_meta; + union { + struct bpf_flow_keys *flow_keys; + }; + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { + struct bpf_sock *sk; + }; + __u32 gso_size; + __u8 tstamp_type; + __u64 hwtstamp; +}; + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +struct nf_conn___init { + struct nf_conn ct; +}; + +struct bpf_ct_opts { + s32 netns_id; + s32 error; + u8 l4proto; + u8 dir; + u16 ct_zone_id; + u8 ct_zone_dir; + u8 reserved[3]; +}; + +enum { + NF_BPF_CT_OPTS_SZ = 16, +}; + +struct xt_ecn_info { + __u8 operation; + __u8 invert; + __u8 ip_ect; + union { + struct { + __u8 ect; + } tcp; + } proto; +}; + +struct ip6t_ip6 { + struct in6_addr src; + struct in6_addr dst; + struct in6_addr smsk; + struct in6_addr dmsk; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u16 proto; + __u8 tos; + __u8 flags; + __u8 invflags; +}; + +struct arpreq { + struct sockaddr arp_pa; + struct sockaddr arp_ha; + int arp_flags; + struct sockaddr arp_netmask; + char arp_dev[16]; +}; + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); + unsigned int bucket; + unsigned int flags; +}; + +struct neighbour_cb { + long unsigned int sched_next; + unsigned int flags; +}; + +enum { + AX25_VALUES_IPDEFMODE = 0, + AX25_VALUES_AXDEFMODE = 1, + AX25_VALUES_BACKOFF = 2, + AX25_VALUES_CONMODE = 3, + AX25_VALUES_WINDOW = 4, + AX25_VALUES_EWINDOW = 5, + AX25_VALUES_T1 = 6, + AX25_VALUES_T2 = 7, + AX25_VALUES_T3 = 8, + AX25_VALUES_IDLE = 9, + AX25_VALUES_N2 = 10, + AX25_VALUES_PACLEN = 11, + AX25_VALUES_PROTOCOL = 12, + AX25_MAX_VALUES = 13, +}; + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; +}; + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats: 1; + u8 unused: 7; +}; + +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +struct elf32_phdr { + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +}; + +typedef struct elf32_phdr Elf32_Phdr; + +typedef struct elf32_note Elf32_Nhdr; + +struct freader { + void *buf; + u32 buf_sz; + int err; + long: 32; + union { + struct { + struct file *file; + struct folio *folio; + void *addr; + long: 32; + loff_t folio_off; + bool may_fault; + long: 32; + }; + struct { + const char *data; + long: 32; + u64 data_sz; + }; + }; +}; + +struct ptdesc { + long unsigned int __page_flags; + union { + struct callback_head pt_rcu_head; + struct list_head pt_list; + struct { + long unsigned int _pt_pad_1; + pgtable_t pmd_huge_pte; + }; + }; + long unsigned int __page_mapping; + union { + long unsigned int pt_index; + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + union { + long unsigned int _pt_pad_2; + spinlock_t ptl; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int pt_memcg_data; +}; + +typedef void (*crash_shutdown_t)(); + +enum { + KTW_FREEZABLE = 1, +}; + +struct kthread_create_info { + char *full_name; + int (*threadfn)(void *); + void *data; + int node; + struct task_struct *result; + struct completion *done; + struct list_head list; +}; + +struct kthread { + long unsigned int flags; + unsigned int cpu; + int result; + int (*threadfn)(void *); + void *data; + struct completion parked; + struct completion exited; + struct cgroup_subsys_state *blkcg_css; + char *full_name; +}; + +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP = 1, + KTHREAD_SHOULD_PARK = 2, +}; + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +struct irq_desc_devres { + unsigned int from; + unsigned int cnt; +}; + +struct tracer_stat { + const char *name; + void * (*stat_start)(struct tracer_stat *); + void * (*stat_next)(void *, int); + cmp_func_t stat_cmp; + int (*stat_show)(struct seq_file *, void *); + void (*stat_release)(void *); + int (*stat_headers)(struct seq_file *); +}; + +struct stat_node { + struct rb_node node; + void *stat; +}; + +struct stat_session { + struct list_head session_list; + struct tracer_stat *ts; + struct rb_root stat_root; + struct mutex stat_mutex; + struct dentry *file; +}; + +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[0]; +}; + +typedef int (*objpool_init_obj_cb)(void *, void *); + +struct objpool_head; + +typedef int (*objpool_fini_cb)(struct objpool_head *, void *); + +struct objpool_head { + int obj_size; + int nr_objs; + int nr_possible_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + long unsigned int flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +struct rethook_node; + +typedef void (*rethook_handler_t)(struct rethook_node *, void *, long unsigned int, struct pt_regs *); + +struct rethook; + +struct rethook_node { + struct callback_head rcu; + struct llist_node llist; + struct rethook *rethook; + long unsigned int ret_addr; + long unsigned int frame; +}; + +struct rethook { + void *data; + void (*handler)(struct rethook_node *, void *, long unsigned int, struct pt_regs *); + struct objpool_head pool; + struct callback_head rcu; +}; + +struct bpf_iter_seq_link_info { + u32 link_id; +}; + +struct bpf_iter__bpf_link { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_link *link; + }; +}; + +struct bpf_tuple { + struct bpf_prog *prog; + struct bpf_link *link; +}; + +struct tcx_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +struct kstatfs { + long int f_type; + long int f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long int f_namelen; + long int f_frsize; + long int f_flags; + long int f_spare[4]; + long: 32; +}; + +struct shared_policy {}; + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; + long unsigned int flags; + long unsigned int alloced; + long unsigned int swapped; + union { + struct offset_ctx dir_offsets; + struct { + struct list_head shrinklist; + struct list_head swaplist; + }; + }; + long: 32; + struct timespec64 i_crtime; + struct shared_policy policy; + struct simple_xattrs xattrs; + long unsigned int fallocend; + unsigned int fsflags; + atomic_t stop_eviction; + long: 32; + struct inode vfs_inode; +}; + +struct shmem_quota_limits { + qsize_t usrquota_bhardlimit; + qsize_t usrquota_ihardlimit; + qsize_t grpquota_bhardlimit; + qsize_t grpquota_ihardlimit; +}; + +struct shmem_sb_info { + long unsigned int max_blocks; + long: 32; + struct percpu_counter used_blocks; + long unsigned int max_inodes; + long unsigned int free_ispace; + raw_spinlock_t stat_lock; + umode_t mode; + unsigned char huge; + kuid_t uid; + kgid_t gid; + bool full_inums; + bool noswap; + ino_t next_ino; + ino_t *ino_batch; + struct mempolicy *mpol; + spinlock_t shrinklist_lock; + struct list_head shrinklist; + long unsigned int shrinklist_len; + struct shmem_quota_limits qlimits; +}; + +enum sgp_type { + SGP_READ = 0, + SGP_NOALLOC = 1, + SGP_CACHE = 2, + SGP_WRITE = 3, + SGP_FALLOC = 4, +}; + +struct shmem_falloc { + wait_queue_head_t *waitq; + long unsigned int start; + long unsigned int next; + long unsigned int nr_falloced; + long unsigned int nr_unswapped; +}; + +struct shmem_options { + long long unsigned int blocks; + long long unsigned int inodes; + struct mempolicy *mpol; + kuid_t uid; + kgid_t gid; + umode_t mode; + bool full_inums; + int huge; + int seen; + bool noswap; + short unsigned int quota_types; + long: 32; + struct shmem_quota_limits qlimits; +}; + +enum shmem_param { + Opt_gid___5 = 0, + Opt_huge = 1, + Opt_mode___4 = 2, + Opt_mpol = 3, + Opt_nr_blocks = 4, + Opt_nr_inodes = 5, + Opt_size = 6, + Opt_uid___4 = 7, + Opt_inode32 = 8, + Opt_inode64 = 9, + Opt_noswap = 10, + Opt_quota = 11, + Opt_usrquota = 12, + Opt_grpquota = 13, + Opt_usrquota_block_hardlimit = 14, + Opt_usrquota_inode_hardlimit = 15, + Opt_grpquota_block_hardlimit = 16, + Opt_grpquota_inode_hardlimit = 17, + Opt_casefold_version = 18, + Opt_casefold = 19, + Opt_strict_encoding = 20, +}; + +struct trace_event_raw_test_pages_isolated { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int fin_pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_test_pages_isolated {}; + +typedef void (*btf_trace_test_pages_isolated)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct dnotify_struct { + struct dnotify_struct *dn_next; + __u32 dn_mask; + int dn_fd; + struct file *dn_filp; + fl_owner_t dn_owner; +}; + +struct dnotify_mark { + struct fsnotify_mark fsn_mark; + struct dnotify_struct *dn; +}; + +struct sysctl_alias { + const char *kernel_param; + const char *sysctl_param; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +struct fat_entry { + int entry; + union { + u8 *ent12_p[2]; + __le16 *ent16_p; + __le32 *ent32_p; + } u; + int nr_bhs; + struct buffer_head *bhs[2]; + struct inode *fat_inode; +}; + +struct fatent_ra { + sector_t cur; + sector_t limit; + unsigned int ra_blocks; + long: 32; + sector_t ra_advance; + sector_t ra_next; + sector_t ra_limit; +}; + +struct fuse_backing_map { + int32_t fd; + uint32_t flags; + uint64_t padding; +}; + +struct backing_file_ctx { + const struct cred *cred; + void (*accessed)(struct file *); + void (*end_write)(struct kiocb *, ssize_t); +}; + +enum btrfs_feature_set { + FEAT_COMPAT = 0, + FEAT_COMPAT_RO = 1, + FEAT_INCOMPAT = 2, + FEAT_MAX = 3, +}; + +struct btrfs_qgroup_rsv { + u64 values[3]; +}; + +struct btrfs_qgroup { + u64 qgroupid; + u64 rfer; + u64 rfer_cmpr; + u64 excl; + u64 excl_cmpr; + u64 lim_flags; + u64 max_rfer; + u64 max_excl; + u64 rsv_rfer; + u64 rsv_excl; + struct btrfs_qgroup_rsv rsv; + struct list_head groups; + struct list_head members; + struct list_head dirty; + struct list_head iterator; + struct list_head nested_iterator; + struct rb_node node; + long: 32; + u64 old_refcnt; + u64 new_refcnt; + struct kobject kobj; + long: 32; +}; + +enum btrfs_qgroup_mode { + BTRFS_QGROUP_MODE_DISABLED = 0, + BTRFS_QGROUP_MODE_FULL = 1, + BTRFS_QGROUP_MODE_SIMPLE = 2, +}; + +struct btrfs_feature_attr { + struct kobj_attribute kobj_attr; + enum btrfs_feature_set feature_set; + long: 32; + u64 feature_bit; +}; + +struct raid_kobject { + u64 flags; + struct kobject kobj; + long: 32; +}; + +struct btrfs_lru_cache_entry { + struct list_head lru_list; + u64 key; + u64 gen; + struct list_head list; +}; + +struct btrfs_lru_cache { + struct list_head lru_list; + struct maple_tree entries; + unsigned int size; + unsigned int max_size; +}; + +typedef int __kernel_key_t; + +typedef __kernel_key_t key_t; + +struct msgbuf; + +struct ipc_kludge { + struct msgbuf *msgp; + long int msgtyp; +}; + +struct sembuf { + short unsigned int sem_num; + short int sem_op; + short int sem_flg; +}; + +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + long: 32; + void *__ctx[0]; +}; + +struct crypto_acomp { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct scomp_scratch { + spinlock_t lock; + void *src; + void *dst; +}; + +struct blk_plug_cb; + +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); + +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; + +struct trace_event_raw_block_buffer { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + size_t size; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_rq_requeue { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_rq_completion { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + int error; + short unsigned int ioprio; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + unsigned int bytes; + short unsigned int ioprio; + char rwbs[8]; + char comm[16]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_bio_complete { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_bio { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_block_plug { + struct trace_entry ent; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_unplug { + struct trace_entry ent; + int nr_rq; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_split { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + sector_t new_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_remap { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_rq_remap { + struct trace_entry ent; + dev_t dev; + long: 32; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + unsigned int nr_bios; + char rwbs[8]; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_block_buffer {}; + +struct trace_event_data_offsets_block_rq_requeue { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq_completion { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_rq { + u32 cmd; + const void *cmd_ptr_; +}; + +struct trace_event_data_offsets_block_bio_complete {}; + +struct trace_event_data_offsets_block_bio {}; + +struct trace_event_data_offsets_block_plug {}; + +struct trace_event_data_offsets_block_unplug {}; + +struct trace_event_data_offsets_block_split {}; + +struct trace_event_data_offsets_block_bio_remap {}; + +struct trace_event_data_offsets_block_rq_remap {}; + +typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_complete)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_error)(void *, struct request *, blk_status_t, unsigned int); + +typedef void (*btf_trace_block_rq_insert)(void *, struct request *); + +typedef void (*btf_trace_block_rq_issue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_merge)(void *, struct request *); + +typedef void (*btf_trace_block_io_start)(void *, struct request *); + +typedef void (*btf_trace_block_io_done)(void *, struct request *); + +typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); + +typedef void (*btf_trace_block_getrq)(void *, struct bio *); + +typedef void (*btf_trace_block_plug)(void *, struct request_queue *); + +typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); + +typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); + +typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); + +typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); + +struct io_nop { + struct file *file; + int result; + int fd; + int buffer; + unsigned int flags; +}; + +struct reciprocal_value_adv { + u32 m; + u8 sh; + u8 exp; + bool is_wide_m; +}; + +typedef resource_size_t (*resource_alignf)(void *, const struct resource *, resource_size_t, resource_size_t); + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +struct clk_gated_fixed { + struct clk_gpio clk_gpio; + struct regulator *supply; + long unsigned int rate; +}; + +struct dev_printk_info { + char subsystem[16]; + char device[48]; +}; + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +enum dpm_order { + DPM_ORDER_NONE = 0, + DPM_ORDER_DEV_AFTER_PARENT = 1, + DPM_ORDER_PARENT_BEFORE_DEV = 2, + DPM_ORDER_DEV_LAST = 3, +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +struct fwnode_link { + struct fwnode_handle *supplier; + struct list_head s_hook; + struct fwnode_handle *consumer; + struct list_head c_hook; + u8 flags; +}; + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +struct class_dir { + struct kobject kobj; + const struct class *class; +}; + +struct root_device { + struct device dev; + struct module *owner; + long: 32; +}; + +typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *); + +struct xhci_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +enum xhci_ep_reset_type { + EP_HARD_RESET = 0, + EP_SOFT_RESET = 1, +}; + +enum xhci_setup_dev { + SETUP_CONTEXT_ONLY = 0, + SETUP_CONTEXT_ADDRESS = 1, +}; + +enum xhci_cancelled_td_status { + TD_DIRTY = 0, + TD_HALTED = 1, + TD_CLEARING_CACHE = 2, + TD_CLEARING_CACHE_DEFERRED = 3, + TD_CLEARED = 4, +}; + +struct xhci_td { + struct list_head td_list; + struct list_head cancelled_td_list; + int status; + enum xhci_cancelled_td_status cancel_status; + struct urb *urb; + struct xhci_segment *start_seg; + union xhci_trb *start_trb; + struct xhci_segment *end_seg; + union xhci_trb *end_trb; + struct xhci_segment *bounce_seg; + bool urb_length_set; + bool error_mid_td; +}; + +struct urb_priv { + int num_tds; + int num_tds_done; + struct xhci_td td[0]; +}; + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct hlist_node vclock_hash_node; + struct cyclecounter cc; + struct timecounter tc; + struct mutex lock; + long: 32; +}; + +enum { + SOCK_WAKE_IO = 0, + SOCK_WAKE_WAITD = 1, + SOCK_WAKE_SPACE = 2, + SOCK_WAKE_URG = 3, +}; + +struct bpf_sk_lookup { + union { + union { + struct bpf_sock *sk; + }; + __u64 cookie; + }; + __u32 family; + __u32 protocol; + __u32 remote_ip4; + __u32 remote_ip6[4]; + __be16 remote_port; + __u32 local_ip4; + __u32 local_ip6[4]; + __u32 local_port; + __u32 ingress_ifindex; + long: 32; +}; + +struct btf_id_dtor_kfunc { + u32 btf_id; + u32 kfunc_btf_id; +}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; + int retval; +}; + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + const void *data; + const void *data_end; +}; + +struct bpf_nf_ctx { + const struct nf_hook_state *state; + struct sk_buff *skb; +}; + +struct trace_event_raw_bpf_trigger_tp { + struct trace_entry ent; + int nonce; + char __data[0]; +}; + +struct trace_event_raw_bpf_test_finish { + struct trace_entry ent; + int err; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trigger_tp {}; + +struct trace_event_data_offsets_bpf_test_finish {}; + +typedef void (*btf_trace_bpf_trigger_tp)(void *, int); + +typedef void (*btf_trace_bpf_test_finish)(void *, int *); + +struct bpf_test_timer { + enum { + NO_PREEMPT = 0, + NO_MIGRATE = 1, + } mode; + u32 i; + u64 time_start; + u64 time_spent; +}; + +struct xdp_page_head { + struct xdp_buff orig_ctx; + struct xdp_buff ctx; + union { + struct { + struct {} __empty_frame; + struct xdp_frame frame[0]; + }; + struct { + struct {} __empty_data; + u8 data[0]; + }; + }; +}; + +struct xdp_test_data { + struct xdp_buff *orig_ctx; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct xdp_rxq_info rxq; + struct net_device *dev; + struct page_pool *pp; + struct xdp_frame **frames; + struct sk_buff **skbs; + struct xdp_mem_info mem; + u32 batch_size; + u32 frame_cnt; +}; + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; + +struct bpf_raw_tp_test_run_info { + struct bpf_prog *prog; + void *ctx; + u32 retval; +}; + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED = 0, + BRNF_PROTO_8021Q = 1, + BRNF_PROTO_PPPOE = 2, + } orig_proto: 8; + u8 pkt_otherhost: 1; + u8 in_prerouting: 1; + u8 bridged_dnat: 1; + u8 sabotage_in_done: 1; + __u16 frag_max_size; + int physinif; + struct net_device *physoutdev; + union { + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + char neigh_header[8]; + }; +}; + +enum nfulnl_msg_types { + NFULNL_MSG_PACKET = 0, + NFULNL_MSG_CONFIG = 1, + NFULNL_MSG_MAX = 2, +}; + +struct nfulnl_msg_packet_hdr { + __be16 hw_protocol; + __u8 hook; + __u8 _pad; +}; + +struct nfulnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfulnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfulnl_vlan_attr { + NFULA_VLAN_UNSPEC = 0, + NFULA_VLAN_PROTO = 1, + NFULA_VLAN_TCI = 2, + __NFULA_VLAN_MAX = 3, +}; + +enum nfulnl_attr_type { + NFULA_UNSPEC = 0, + NFULA_PACKET_HDR = 1, + NFULA_MARK = 2, + NFULA_TIMESTAMP = 3, + NFULA_IFINDEX_INDEV = 4, + NFULA_IFINDEX_OUTDEV = 5, + NFULA_IFINDEX_PHYSINDEV = 6, + NFULA_IFINDEX_PHYSOUTDEV = 7, + NFULA_HWADDR = 8, + NFULA_PAYLOAD = 9, + NFULA_PREFIX = 10, + NFULA_UID = 11, + NFULA_SEQ = 12, + NFULA_SEQ_GLOBAL = 13, + NFULA_GID = 14, + NFULA_HWTYPE = 15, + NFULA_HWHEADER = 16, + NFULA_HWLEN = 17, + NFULA_CT = 18, + NFULA_CT_INFO = 19, + NFULA_VLAN = 20, + NFULA_L2HDR = 21, + __NFULA_MAX = 22, +}; + +enum nfulnl_msg_config_cmds { + NFULNL_CFG_CMD_NONE = 0, + NFULNL_CFG_CMD_BIND = 1, + NFULNL_CFG_CMD_UNBIND = 2, + NFULNL_CFG_CMD_PF_BIND = 3, + NFULNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfulnl_msg_config_cmd { + __u8 command; +}; + +struct nfulnl_msg_config_mode { + __be32 copy_range; + __u8 copy_mode; + __u8 _pad; +} __attribute__((packed)); + +enum nfulnl_attr_config { + NFULA_CFG_UNSPEC = 0, + NFULA_CFG_CMD = 1, + NFULA_CFG_MODE = 2, + NFULA_CFG_NLBUFSIZ = 3, + NFULA_CFG_TIMEOUT = 4, + NFULA_CFG_QTHRESH = 5, + NFULA_CFG_FLAGS = 6, + __NFULA_CFG_MAX = 7, +}; + +struct nfulnl_instance { + struct hlist_node hlist; + spinlock_t lock; + refcount_t use; + unsigned int qlen; + struct sk_buff *skb; + struct timer_list timer; + struct net *net; + netns_tracker ns_tracker; + struct user_namespace *peer_user_ns; + u32 peer_portid; + unsigned int flushtimeout; + unsigned int nlbufsiz; + unsigned int qthreshold; + u_int32_t copy_range; + u_int32_t seq; + u_int16_t group_num; + u_int16_t flags; + u_int8_t copy_mode; + struct callback_head rcu; +}; + +struct nfnl_log_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; + atomic_t global_seq; +}; + +struct iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum nft_trace_attributes { + NFTA_TRACE_UNSPEC = 0, + NFTA_TRACE_TABLE = 1, + NFTA_TRACE_CHAIN = 2, + NFTA_TRACE_RULE_HANDLE = 3, + NFTA_TRACE_TYPE = 4, + NFTA_TRACE_VERDICT = 5, + NFTA_TRACE_ID = 6, + NFTA_TRACE_LL_HEADER = 7, + NFTA_TRACE_NETWORK_HEADER = 8, + NFTA_TRACE_TRANSPORT_HEADER = 9, + NFTA_TRACE_IIF = 10, + NFTA_TRACE_IIFTYPE = 11, + NFTA_TRACE_OIF = 12, + NFTA_TRACE_OIFTYPE = 13, + NFTA_TRACE_MARK = 14, + NFTA_TRACE_NFPROTO = 15, + NFTA_TRACE_POLICY = 16, + NFTA_TRACE_PAD = 17, + __NFTA_TRACE_MAX = 18, +}; + +enum nft_trace_types { + NFT_TRACETYPE_UNSPEC = 0, + NFT_TRACETYPE_POLICY = 1, + NFT_TRACETYPE_RETURN = 2, + NFT_TRACETYPE_RULE = 3, + __NFT_TRACETYPE_MAX = 4, +}; + +struct nft_traceinfo { + bool trace; + bool nf_trace; + bool packet_dumped; + enum nft_trace_types type: 8; + u32 skbid; + const struct nft_base_chain *basechain; +}; + +struct fib_entry_notifier_info { + struct fib_notifier_info info; + u32 dst; + int dst_len; + struct fib_info *fi; + dscp_t dscp; + u8 type; + u32 tb_id; +}; + +typedef unsigned int t_key; + +struct key_vector { + t_key key; + unsigned char pos; + unsigned char bits; + unsigned char slen; + union { + struct hlist_head leaf; + struct { + struct {} __empty_tnode; + struct key_vector *tnode[0]; + }; + }; +}; + +struct tnode { + struct callback_head rcu; + t_key empty_children; + t_key full_children; + struct key_vector *parent; + struct key_vector kv[1]; +}; + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int prefixes; + unsigned int nodesizes[32]; +}; + +struct trie { + struct key_vector kv[1]; +}; + +struct fib_trie_iter { + struct seq_net_private p; + struct fib_table *tb; + struct key_vector *tnode; + unsigned int index; + unsigned int depth; +}; + +struct fib_route_iter { + struct seq_net_private p; + struct fib_table *main_tb; + struct key_vector *tnode; + long: 32; + loff_t pos; + t_key key; + long: 32; +}; + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +enum fib6_walk_state { + FWS_L = 0, + FWS_R = 1, + FWS_C = 2, + FWS_U = 3, +}; + +struct fib6_walker { + struct list_head lh; + struct fib6_node *root; + struct fib6_node *node; + struct fib6_info *leaf; + enum fib6_walk_state state; + unsigned int skip; + unsigned int count; + unsigned int skip_in_node; + int (*func)(struct fib6_walker *); + void *args; +}; + +struct fib6_entry_notifier_info { + struct fib_notifier_info info; + struct fib6_info *rt; + unsigned int nsiblings; +}; + +struct ipv6_route_iter { + struct seq_net_private p; + struct fib6_walker w; + loff_t skip; + struct fib6_table *tbl; + int sernum; +}; + +struct bpf_iter__ipv6_route { + union { + struct bpf_iter_meta *meta; + }; + union { + struct fib6_info *rt; + }; +}; + +struct fib6_cleaner { + struct fib6_walker w; + struct net *net; + int (*func)(struct fib6_info *, void *); + int sernum; + void *arg; + bool skip_notify; +}; + +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; + struct netlink_ext_ack *extack; +}; + +struct fib6_nh_pcpu_arg { + struct fib6_info *from; + const struct fib6_table *table; +}; + +struct lookup_args { + int offset; + const struct in6_addr *addr; +}; + +typedef __be32 rtas_arg_t; + +struct rtas_args { + __be32 token; + __be32 nargs; + __be32 nret; + rtas_arg_t args[16]; + rtas_arg_t *rets; +}; + +struct trace_event_raw_ppc64_interrupt_class { + struct trace_entry ent; + struct pt_regs *regs; + char __data[0]; +}; + +struct trace_event_raw_rtas_input { + struct trace_entry ent; + __u32 nargs; + u32 __data_loc_name; + u32 __data_loc_inputs; + char __data[0]; +}; + +struct trace_event_raw_rtas_output { + struct trace_entry ent; + __u32 nr_other; + __s32 status; + u32 __data_loc_name; + u32 __data_loc_other_outputs; + char __data[0]; +}; + +struct trace_event_raw_rtas_parameter_block { + struct trace_entry ent; + u32 token; + u32 nargs; + u32 nret; + __u32 params[16]; + char __data[0]; +}; + +struct trace_event_raw_tlbie { + struct trace_entry ent; + long unsigned int lpid; + long unsigned int local; + long unsigned int rb; + long unsigned int rs; + long unsigned int ric; + long unsigned int prs; + long unsigned int r; + char __data[0]; +}; + +struct trace_event_raw_tlbia { + struct trace_entry ent; + long unsigned int id; + char __data[0]; +}; + +struct trace_event_data_offsets_ppc64_interrupt_class {}; + +struct trace_event_data_offsets_rtas_input { + u32 name; + const void *name_ptr_; + u32 inputs; + const void *inputs_ptr_; +}; + +struct trace_event_data_offsets_rtas_output { + u32 name; + const void *name_ptr_; + u32 other_outputs; + const void *other_outputs_ptr_; +}; + +struct trace_event_data_offsets_rtas_parameter_block {}; + +struct trace_event_data_offsets_tlbie {}; + +struct trace_event_data_offsets_tlbia {}; + +typedef void (*btf_trace_irq_entry)(void *, struct pt_regs *); + +typedef void (*btf_trace_irq_exit)(void *, struct pt_regs *); + +typedef void (*btf_trace_timer_interrupt_entry)(void *, struct pt_regs *); + +typedef void (*btf_trace_timer_interrupt_exit)(void *, struct pt_regs *); + +typedef void (*btf_trace_rtas_input)(void *, struct rtas_args *, const char *); + +typedef void (*btf_trace_rtas_output)(void *, struct rtas_args *, const char *); + +typedef void (*btf_trace_rtas_ll_entry)(void *, struct rtas_args *); + +typedef void (*btf_trace_rtas_ll_exit)(void *, struct rtas_args *); + +typedef void (*btf_trace_tlbie)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_tlbia)(void *, long unsigned int); + +struct boot_param_header { + __be32 magic; + __be32 totalsize; + __be32 off_dt_struct; + __be32 off_dt_strings; + __be32 off_mem_rsvmap; + __be32 version; + __be32 last_comp_version; + __be32 boot_cpuid_phys; + __be32 dt_strings_size; + __be32 dt_struct_size; +}; + +struct boot_info_map_entry { + __u32 physAddr; + __u32 size; +}; + +typedef struct boot_info_map_entry boot_info_map_entry_t; + +struct boot_infos { + __u32 version; + __u32 compatible_version; + __u8 *logicalDisplayBase; + __u32 machineID; + __u32 architecture; + __u32 deviceTreeOffset; + __u32 deviceTreeSize; + __u32 dispDeviceRect[4]; + __u32 dispDeviceDepth; + __u8 *dispDeviceBase; + __u32 dispDeviceRowBytes; + __u32 dispDeviceColorsOffset; + __u32 dispDeviceRegEntryOffset; + __u32 ramDisk; + __u32 ramDiskSize; + __u32 kernelParamsOffset; + boot_info_map_entry_t physMemoryMap[26]; + __u32 physMemoryMapSize; + __u32 frameBufferSize; + __u32 totalParamsSize; +}; + +typedef struct boot_infos boot_infos_t; + +struct bootx_dt_prop { + u32 name; + int length; + u32 value; + u32 next; +}; + +struct bootx_dt_node { + u32 unused0; + u32 unused1; + u32 phandle; + u32 unused2; + u32 unused3; + u32 unused4; + u32 unused5; + u32 full_name; + u32 properties; + u32 parent; + u32 child; + u32 sibling; + u32 next; + u32 allnext; +}; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING = 1, + UMH_DISABLED = 2, +}; + +enum reboot_type { + BOOT_TRIPLE = 116, + BOOT_KBD = 107, + BOOT_BIOS = 98, + BOOT_ACPI = 97, + BOOT_EFI = 101, + BOOT_CF9_FORCE = 112, + BOOT_CF9_SAFE = 113, +}; + +enum sys_off_mode { + SYS_OFF_MODE_POWER_OFF_PREPARE = 0, + SYS_OFF_MODE_POWER_OFF = 1, + SYS_OFF_MODE_RESTART_PREPARE = 2, + SYS_OFF_MODE_RESTART = 3, +}; + +struct sys_off_data { + int mode; + void *cb_data; + const char *cmd; + struct device *dev; +}; + +struct sys_off_handler { + struct notifier_block nb; + int (*sys_off_cb)(struct sys_off_data *); + void *cb_data; + enum sys_off_mode mode; + bool blocking; + void *list; + struct device *dev; +}; + +struct printk_info { + u64 seq; + u64 ts_nsec; + u16 text_len; + u8 facility; + u8 flags: 5; + u8 level: 3; + u32 caller_id; + struct dev_printk_info dev_info; +}; + +struct printk_record { + struct printk_info *info; + char *text_buf; + unsigned int text_buf_size; +}; + +struct prb_data_blk_lpos { + long unsigned int begin; + long unsigned int next; +}; + +struct prb_desc { + atomic_long_t state_var; + struct prb_data_blk_lpos text_blk_lpos; +}; + +struct prb_data_ring { + unsigned int size_bits; + char *data; + atomic_long_t head_lpos; + atomic_long_t tail_lpos; +}; + +struct prb_desc_ring { + unsigned int count_bits; + struct prb_desc *descs; + struct printk_info *infos; + atomic_long_t head_id; + atomic_long_t tail_id; + atomic_long_t last_finalized_seq; +}; + +struct printk_ringbuffer { + struct prb_desc_ring desc_ring; + struct prb_data_ring text_data_ring; + atomic_long_t fail; +}; + +struct prb_reserved_entry { + struct printk_ringbuffer *rb; + long unsigned int irqflags; + long unsigned int id; + unsigned int text_space; +}; + +enum desc_state { + desc_miss = -1, + desc_reserved = 0, + desc_committed = 1, + desc_finalized = 2, + desc_reusable = 3, +}; + +struct prb_data_block { + long unsigned int id; + char data[0]; +}; + +struct load_info { + const char *name; + struct module *mod; + Elf32_Ehdr *hdr; + long unsigned int len; + Elf32_Shdr *sechdrs; + char *secstrings; + char *strtab; + long unsigned int symoffs; + long unsigned int stroffs; + long unsigned int init_typeoffs; + long unsigned int core_typeoffs; + bool sig_ok; + long unsigned int mod_kallsyms_init_off; + struct { + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; + } index; +}; + +struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[13]; + int _tid; + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +}; + +typedef struct sigevent sigevent_t; + +enum posix_timer_state { + POSIX_TIMER_DISARMED = 0, + POSIX_TIMER_ARMED = 1, + POSIX_TIMER_REQUEUE_PENDING = 2, +}; + +struct btf_anon_stack { + u32 tid; + u32 offset; +}; + +enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, + BPF_TOKEN_CREATE = 36, + __MAX_BPF_CMD = 37, +}; + +struct btf_enum { + __u32 name_off; + __s32 val; +}; + +struct bpf_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; + long: 32; + u64 delegate_cmds; + u64 delegate_maps; + u64 delegate_progs; + u64 delegate_attachs; +}; + +struct bpf_preload_info { + char link_name[16]; + struct bpf_link *link; +}; + +struct bpf_preload_ops { + int (*preload)(struct bpf_preload_info *); + struct module *owner; +}; + +enum bpf_type { + BPF_TYPE_UNSPEC = 0, + BPF_TYPE_PROG = 1, + BPF_TYPE_MAP = 2, + BPF_TYPE_LINK = 3, +}; + +struct map_iter { + void *key; + bool done; +}; + +struct bpffs_btf_enums { + const struct btf *btf; + const struct btf_type *cmd_t; + const struct btf_type *map_t; + const struct btf_type *prog_t; + const struct btf_type *attach_t; +}; + +enum { + OPT_UID = 0, + OPT_GID = 1, + OPT_MODE = 2, + OPT_DELEGATE_CMDS = 3, + OPT_DELEGATE_MAPS = 4, + OPT_DELEGATE_PROGS = 5, + OPT_DELEGATE_ATTACHS = 6, +}; + +enum bpf_stack_build_id_status { + BPF_STACK_BUILD_ID_EMPTY = 0, + BPF_STACK_BUILD_ID_VALID = 1, + BPF_STACK_BUILD_ID_IP = 2, +}; + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + +enum { + BPF_F_SKIP_FIELD_MASK = 255, + BPF_F_USER_STACK = 256, + BPF_F_FAST_STACK_CMP = 512, + BPF_F_REUSE_STACKID = 1024, + BPF_F_USER_BUILD_ID = 2048, +}; + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, +}; + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +struct pcpu_freelist_node; + +struct pcpu_freelist_head { + struct pcpu_freelist_node *first; + raw_spinlock_t lock; +}; + +struct pcpu_freelist_node { + struct pcpu_freelist_node *next; +}; + +struct pcpu_freelist { + struct pcpu_freelist_head *freelist; + struct pcpu_freelist_head extralist; +}; + +struct mmap_unlock_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +struct stack_map_bucket { + struct pcpu_freelist_node fnode; + u32 hash; + u32 nr; + long: 32; + u64 data[0]; +}; + +struct bpf_stack_map { + struct bpf_map map; + void *elems; + struct pcpu_freelist freelist; + u32 n_buckets; + struct stack_map_bucket *buckets[0]; + long: 32; +}; + +typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_sleepable)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack_sleepable)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct mpage_readpage_args { + struct bio *bio; + struct folio *folio; + unsigned int nr_pages; + bool is_readahead; + sector_t last_block_in_bio; + struct buffer_head map_bh; + long unsigned int first_logical_block; + get_block_t *get_block; +}; + +struct mpage_data { + struct bio *bio; + long: 32; + sector_t last_block_in_bio; + get_block_t *get_block; + long: 32; +}; + +struct ext4_new_group_data { + __u32 group; + long: 32; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 mdata_blocks; + __u32 free_clusters_count; + long: 32; +}; + +enum { + BLOCK_BITMAP = 0, + INODE_BITMAP = 1, + INODE_TABLE = 2, + GROUP_TABLE_COUNT = 3, +}; + +struct ext4_rcu_ptr { + struct callback_head rcu; + void *ptr; +}; + +struct ext4_new_flex_group_data { + struct ext4_new_group_data *groups; + __u16 *bg_flags; + ext4_group_t resize_bg; + ext4_group_t count; +}; + +struct fuse_ioctl_in { + uint64_t fh; + uint32_t flags; + uint32_t cmd; + uint64_t arg; + uint32_t in_size; + uint32_t out_size; +}; + +struct fuse_ioctl_iovec { + uint64_t base; + uint64_t len; +}; + +struct fuse_ioctl_out { + int32_t result; + uint32_t flags; + uint32_t in_iovs; + uint32_t out_iovs; +}; + +enum hash_algo { + HASH_ALGO_MD4 = 0, + HASH_ALGO_MD5 = 1, + HASH_ALGO_SHA1 = 2, + HASH_ALGO_RIPE_MD_160 = 3, + HASH_ALGO_SHA256 = 4, + HASH_ALGO_SHA384 = 5, + HASH_ALGO_SHA512 = 6, + HASH_ALGO_SHA224 = 7, + HASH_ALGO_RIPE_MD_128 = 8, + HASH_ALGO_RIPE_MD_256 = 9, + HASH_ALGO_RIPE_MD_320 = 10, + HASH_ALGO_WP_256 = 11, + HASH_ALGO_WP_384 = 12, + HASH_ALGO_WP_512 = 13, + HASH_ALGO_TGR_128 = 14, + HASH_ALGO_TGR_160 = 15, + HASH_ALGO_TGR_192 = 16, + HASH_ALGO_SM3_256 = 17, + HASH_ALGO_STREEBOG_256 = 18, + HASH_ALGO_STREEBOG_512 = 19, + HASH_ALGO_SHA3_256 = 20, + HASH_ALGO_SHA3_384 = 21, + HASH_ALGO_SHA3_512 = 22, + HASH_ALGO__LAST = 23, +}; + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; + __u8 digest[0]; +}; + +enum { + __PAGE_UNLOCK_BIT = 0, + PAGE_UNLOCK = 1, + __PAGE_UNLOCK_SEQ = 0, + __PAGE_START_WRITEBACK_BIT = 1, + PAGE_START_WRITEBACK = 2, + __PAGE_START_WRITEBACK_SEQ = 1, + __PAGE_END_WRITEBACK_BIT = 2, + PAGE_END_WRITEBACK = 4, + __PAGE_END_WRITEBACK_SEQ = 2, + __PAGE_SET_ORDERED_BIT = 3, + PAGE_SET_ORDERED = 8, + __PAGE_SET_ORDERED_SEQ = 3, +}; + +struct btrfs_eb_write_context { + struct writeback_control *wbc; + struct extent_buffer *eb; + struct btrfs_block_group *zoned_bg; +}; + +struct btrfs_bio_ctrl { + struct btrfs_bio *bbio; + enum btrfs_compression_type compress_type; + u32 len_to_oe_boundary; + blk_opf_t opf; + btrfs_bio_end_io_t end_io_func; + struct writeback_control *wbc; + long unsigned int submit_bitmap; +}; + +struct tree_mod_root { + u64 logical; + u8 level; + long: 32; +}; + +struct tree_mod_elem { + struct rb_node node; + long: 32; + u64 logical; + u64 seq; + enum btrfs_mod_log_op op; + int slot; + u64 generation; + struct btrfs_disk_key key; + long: 32; + u64 blockptr; + struct { + int dst_slot; + int nr_items; + } move; + struct tree_mod_root old_root; +}; + +struct keyctl_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + __u32 __spare[7]; +}; + +enum { + Opt_err___2 = 0, + Opt_enc = 1, + Opt_hash = 2, +}; + +struct ahash_alg { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_ahash *); + void (*exit_tfm)(struct crypto_ahash *); + int (*clone_tfm)(struct crypto_ahash *, struct crypto_ahash *); + long: 32; + struct hash_alg_common halg; +}; + +struct crypto_hash_walk { + char *data; + unsigned int offset; + unsigned int flags; + struct page *pg; + unsigned int entrylen; + unsigned int total; + struct scatterlist *sg; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *); + long: 32; + union { + struct { + char head[56]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +typedef union { + long unsigned int addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +struct blk_ia_range_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_independent_access_range *, char *); +}; + +struct iov_iter_state { + size_t iov_offset; + size_t count; + long unsigned int nr_segs; +}; + +struct io_async_rw { + size_t bytes_done; + long: 32; + struct iov_iter iter; + struct iov_iter_state iter_state; + struct iovec fast_iov; + struct iovec *free_iovec; + int free_iov_nr; + struct wait_page_queue wpq; +}; + +struct io_rw { + struct kiocb kiocb; + u64 addr; + u32 len; + rwf_t flags; +}; + +typedef struct { + unsigned char op; + unsigned char bits; + short unsigned int val; +} code; + +typedef enum { + CODES = 0, + LENS = 1, + DISTS = 2, +} codetype; + +struct resource_constraint { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_alignf alignf; + void *alignf_data; +}; + +struct pci_dev_resource { + struct list_head list; + struct resource *res; + struct pci_dev *dev; + resource_size_t start; + resource_size_t end; + resource_size_t add_size; + resource_size_t min_align; + long unsigned int flags; +}; + +enum release_type { + leaf_only = 0, + whole_subtree = 1, +}; + +enum enable_type { + undefined = -1, + user_disabled = 0, + auto_disabled = 1, + user_enabled = 2, + auto_enabled = 3, +}; + +struct clk_mux { + struct clk_hw hw; + void *reg; + const u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +enum oom_constraint { + CONSTRAINT_NONE = 0, + CONSTRAINT_CPUSET = 1, + CONSTRAINT_MEMORY_POLICY = 2, + CONSTRAINT_MEMCG = 3, +}; + +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct mem_cgroup *memcg; + const gfp_t gfp_mask; + const int order; + long unsigned int totalpages; + struct task_struct *chosen; + long int chosen_points; + enum oom_constraint constraint; +}; + +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + long unsigned int key_down[24]; + unsigned int alt; + unsigned int alt_use; + unsigned int shift; + unsigned int shift_use; + bool active; + bool need_reinject; + bool reinjecting; + bool reset_canceled; + bool reset_requested; + long unsigned int reset_keybit[24]; + int reset_seq_len; + int reset_seq_cnt; + int reset_seq_version; + struct timer_list keyreset_timer; +}; + +typedef struct { + spinlock_t *lock; +} class_spinlock_irq_t; + +enum phy_state_work { + PHY_STATE_WORK_NONE = 0, + PHY_STATE_WORK_ANEG = 1, + PHY_STATE_WORK_SUSPEND = 2, +}; + +enum reset_control_flags { + RESET_CONTROL_EXCLUSIVE = 4, + RESET_CONTROL_EXCLUSIVE_DEASSERTED = 12, + RESET_CONTROL_EXCLUSIVE_RELEASED = 0, + RESET_CONTROL_SHARED = 1, + RESET_CONTROL_SHARED_DEASSERTED = 9, + RESET_CONTROL_OPTIONAL_EXCLUSIVE = 6, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = 14, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = 2, + RESET_CONTROL_OPTIONAL_SHARED = 3, + RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = 11, +}; + +struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); +}; + +typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); + +struct serial_info { + struct rb_node node; + long: 32; + sector_t start; + sector_t last; + sector_t _subtree_last; +}; + +struct raid1_info { + struct md_rdev *rdev; + long: 32; + sector_t head_position; + sector_t next_seq_sect; + sector_t seq_start; +}; + +struct pool_info { + struct mddev *mddev; + int raid_disks; +}; + +struct r1conf { + struct mddev *mddev; + struct raid1_info *mirrors; + int raid_disks; + int nonrot_disks; + spinlock_t device_lock; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + wait_queue_head_t wait_barrier; + spinlock_t resync_lock; + atomic_t nr_sync_pending; + atomic_t *nr_pending; + atomic_t *nr_waiting; + atomic_t *nr_queued; + atomic_t *barrier; + int array_frozen; + int fullsync; + int recovery_disabled; + struct pool_info *poolinfo; + mempool_t r1bio_pool; + mempool_t r1buf_pool; + struct bio_set bio_split; + struct page *tmppage; + struct md_thread *thread; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r1bio { + atomic_t remaining; + atomic_t behind_remaining; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_disk; + struct list_head retry_list; + struct bio *behind_master_bio; + struct bio *bios[0]; +}; + +enum r1bio_state { + R1BIO_Uptodate = 0, + R1BIO_IsSync = 1, + R1BIO_Degraded = 2, + R1BIO_BehindIO = 3, + R1BIO_ReadError = 4, + R1BIO_Returned = 5, + R1BIO_MadeGood = 6, + R1BIO_WriteError = 7, + R1BIO_FailFast = 8, +}; + +struct resync_pages { + void *raid_bio; + struct page *pages[16]; +}; + +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + unsigned int count; +}; + +struct read_balance_ctl { + sector_t closest_dist; + int closest_dist_disk; + int min_pending; + int min_pending_disk; + int sequential_disk; + int readable_disks; + long: 32; +}; + +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +struct ethtool_forced_speed_map { + u32 speed; + long unsigned int caps[4]; + const u32 *cap_arr; + u32 arr_size; +}; + +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +struct bpf_nf_link { + struct bpf_link link; + struct nf_hook_ops hook_ops; + netns_tracker ns_tracker; + struct net *net; + u32 dead; + const struct nf_defrag_hook *defrag_hook; + long: 32; +}; + +enum nft_payload_bases { + NFT_PAYLOAD_LL_HEADER = 0, + NFT_PAYLOAD_NETWORK_HEADER = 1, + NFT_PAYLOAD_TRANSPORT_HEADER = 2, + NFT_PAYLOAD_INNER_HEADER = 3, + NFT_PAYLOAD_TUN_HEADER = 4, +}; + +struct nft_bitwise_fast_expr { + u32 mask; + u32 xor; + u8 sreg; + u8 dreg; +}; + +struct nft_cmp_fast_expr { + u32 data; + u32 mask; + u8 sreg; + u8 len; + bool inv; +}; + +struct nft_cmp16_fast_expr { + struct nft_data data; + struct nft_data mask; + u8 sreg; + u8 len; + bool inv; + long: 32; +}; + +struct nft_payload { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 dreg; +}; + +struct nft_jumpstack { + const struct nft_rule_dp *rule; +}; + +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC = 0, + LWTUNNEL_IP_ID = 1, + LWTUNNEL_IP_DST = 2, + LWTUNNEL_IP_SRC = 3, + LWTUNNEL_IP_TTL = 4, + LWTUNNEL_IP_TOS = 5, + LWTUNNEL_IP_FLAGS = 6, + LWTUNNEL_IP_PAD = 7, + LWTUNNEL_IP_OPTS = 8, + __LWTUNNEL_IP_MAX = 9, +}; + +enum lwtunnel_ip6_t { + LWTUNNEL_IP6_UNSPEC = 0, + LWTUNNEL_IP6_ID = 1, + LWTUNNEL_IP6_DST = 2, + LWTUNNEL_IP6_SRC = 3, + LWTUNNEL_IP6_HOPLIMIT = 4, + LWTUNNEL_IP6_TC = 5, + LWTUNNEL_IP6_FLAGS = 6, + LWTUNNEL_IP6_PAD = 7, + LWTUNNEL_IP6_OPTS = 8, + __LWTUNNEL_IP6_MAX = 9, +}; + +enum { + LWTUNNEL_IP_OPTS_UNSPEC = 0, + LWTUNNEL_IP_OPTS_GENEVE = 1, + LWTUNNEL_IP_OPTS_VXLAN = 2, + LWTUNNEL_IP_OPTS_ERSPAN = 3, + __LWTUNNEL_IP_OPTS_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, + LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, + LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, + LWTUNNEL_IP_OPT_GENEVE_DATA = 3, + __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_VXLAN_GBP = 1, + __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, +}; + +enum { + LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_ERSPAN_VER = 1, + LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, + LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, + LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, + __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, +}; + +struct lwtunnel_encap_ops { + int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); + void (*destroy_state)(struct lwtunnel_state *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*input)(struct sk_buff *); + int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); + int (*get_encap_size)(struct lwtunnel_state *); + int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); + int (*xmit)(struct sk_buff *); + struct module *owner; +}; + +enum { + IFLA_IPTUN_UNSPEC = 0, + IFLA_IPTUN_LINK = 1, + IFLA_IPTUN_LOCAL = 2, + IFLA_IPTUN_REMOTE = 3, + IFLA_IPTUN_TTL = 4, + IFLA_IPTUN_TOS = 5, + IFLA_IPTUN_ENCAP_LIMIT = 6, + IFLA_IPTUN_FLOWINFO = 7, + IFLA_IPTUN_FLAGS = 8, + IFLA_IPTUN_PROTO = 9, + IFLA_IPTUN_PMTUDISC = 10, + IFLA_IPTUN_6RD_PREFIX = 11, + IFLA_IPTUN_6RD_RELAY_PREFIX = 12, + IFLA_IPTUN_6RD_PREFIXLEN = 13, + IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14, + IFLA_IPTUN_ENCAP_TYPE = 15, + IFLA_IPTUN_ENCAP_FLAGS = 16, + IFLA_IPTUN_ENCAP_SPORT = 17, + IFLA_IPTUN_ENCAP_DPORT = 18, + IFLA_IPTUN_COLLECT_METADATA = 19, + IFLA_IPTUN_FWMARK = 20, + __IFLA_IPTUN_MAX = 21, +}; + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); + int (*err_handler)(struct sk_buff *, u32); +}; + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 r1: 1; + u8 r2: 1; + u8 r3: 1; + u8 length: 5; + u8 opt_data[0]; +}; + +struct vxlan_metadata { + u32 gbp; +}; + +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; + __u8 p: 1; + __u8 ft: 5; + __u8 hwid_upper: 2; + __u8 hwid: 4; + __u8 dir: 1; + __u8 gra: 2; + __u8 o: 1; +}; + +struct erspan_metadata { + int version; + union { + __be32 index; + struct erspan_md2 md2; + } u; +}; + +enum { + UDP_BPF_IPV4 = 0, + UDP_BPF_IPV6 = 1, + UDP_BPF_NUM_PROTS = 2, +}; + +enum br_pkt_type { + BR_PKT_UNICAST = 0, + BR_PKT_MULTICAST = 1, + BR_PKT_BROADCAST = 2, +}; + +struct maple_metadata { + unsigned char end; + unsigned char gap; +}; + +struct maple_pnode; + +struct maple_range_64 { + struct maple_pnode *parent; + long unsigned int pivot[31]; + union { + void *slot[32]; + struct { + void *pad[31]; + struct maple_metadata meta; + }; + }; +}; + +struct maple_arange_64 { + struct maple_pnode *parent; + long unsigned int pivot[20]; + void *slot[21]; + long unsigned int gap[21]; + struct maple_metadata meta; +}; + +struct maple_topiary { + struct maple_pnode *parent; + struct maple_enode *next; +}; + +enum maple_type { + maple_dense = 0, + maple_leaf_64 = 1, + maple_range_64 = 2, + maple_arange_64 = 3, +}; + +struct maple_node { + union { + struct { + struct maple_pnode *parent; + void *slot[63]; + }; + struct { + void *pad; + struct callback_head rcu; + struct maple_enode *piv_parent; + unsigned char parent_slot; + enum maple_type type; + unsigned char slot_len; + unsigned int ma_flags; + }; + struct maple_range_64 mr64; + struct maple_arange_64 ma64; + struct maple_alloc alloc; + }; +}; + +struct ma_topiary { + struct maple_enode *head; + struct maple_enode *tail; + struct maple_tree *mtree; +}; + +struct ma_wr_state { + struct ma_state *mas; + struct maple_node *node; + long unsigned int r_min; + long unsigned int r_max; + enum maple_type type; + unsigned char offset_end; + long unsigned int *pivots; + long unsigned int end_piv; + void **slots; + void *entry; + void *content; +}; + +struct trace_event_raw_ma_op { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_read { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + void *node; + char __data[0]; +}; + +struct trace_event_raw_ma_write { + struct trace_entry ent; + const char *fn; + long unsigned int min; + long unsigned int max; + long unsigned int index; + long unsigned int last; + long unsigned int piv; + void *val; + void *node; + char __data[0]; +}; + +struct trace_event_data_offsets_ma_op {}; + +struct trace_event_data_offsets_ma_read {}; + +struct trace_event_data_offsets_ma_write {}; + +typedef void (*btf_trace_ma_op)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_read)(void *, const char *, struct ma_state *); + +typedef void (*btf_trace_ma_write)(void *, const char *, struct ma_state *, long unsigned int, void *); + +struct maple_big_node { + long unsigned int pivot[65]; + union { + struct maple_enode *slot[66]; + struct { + long unsigned int padding[43]; + long unsigned int gap[43]; + }; + }; + unsigned char b_end; + enum maple_type type; +}; + +struct maple_subtree_state { + struct ma_state *orig_l; + struct ma_state *orig_r; + struct ma_state *l; + struct ma_state *m; + struct ma_state *r; + struct ma_topiary *free; + struct ma_topiary *destroy; + struct maple_big_node *bn; +}; + +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY = 1, + IRQ_SET_MASK_OK_DONE = 2, +}; + +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED = 1, +}; + +struct ce_unbind { + struct clock_event_device *ce; + int res; +}; + +union futex_key { + struct { + u64 i_seq; + long unsigned int pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + long unsigned int address; + unsigned int offset; + } private; + struct { + u64 ptr; + long unsigned int word; + unsigned int offset; + } both; +}; + +struct futex_pi_state { + struct list_head list; + struct rt_mutex_base pi_mutex; + struct task_struct *owner; + refcount_t refcount; + union futex_key key; +}; + +struct futex_hash_bucket { + atomic_t waiters; + spinlock_t lock; + struct plist_head chain; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct futex_q; + +typedef void futex_wake_fn(struct wake_q_head *, struct futex_q *); + +struct futex_q { + struct plist_node list; + struct task_struct *task; + spinlock_t *lock_ptr; + futex_wake_fn *wake; + void *wake_data; + long: 32; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; + atomic_t requeue_state; + long: 32; +}; + +enum futex_access { + FUTEX_READ = 0, + FUTEX_WRITE = 1, +}; + +enum { + Q_REQUEUE_PI_NONE = 0, + Q_REQUEUE_PI_IGNORE = 1, + Q_REQUEUE_PI_IN_PROGRESS = 2, + Q_REQUEUE_PI_WAIT = 3, + Q_REQUEUE_PI_DONE = 4, + Q_REQUEUE_PI_LOCKED = 5, +}; + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE = 2, + DYNEVENT_TYPE_NONE = 3, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +typedef int (*dynevent_check_arg_fn_t)(void *); + +struct dynevent_arg { + const char *str; + char separator; +}; + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; + char separator; +}; + +enum { + BPF_RB_NO_WAKEUP = 1, + BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +enum { + BPF_RINGBUF_BUSY_BIT = 2147483648, + BPF_RINGBUF_DISCARD_BIT = 1073741824, + BPF_RINGBUF_HDR_SZ = 8, +}; + +struct bpf_dynptr_kern { + void *data; + u32 size; + u32 offset; + long: 32; +}; + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + long: 32; + u64 mask; + struct page **pages; + int nr_pages; + long: 32; + long: 32; + long: 32; + long: 32; + raw_spinlock_t spinlock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t busy; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int consumer_pos; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long unsigned int producer_pos; + long unsigned int pending_pos; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + char data[0]; +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_ringbuf *rb; + long: 32; +}; + +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_ringbuf_reserve_dynptr)(struct bpf_map *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_ringbuf_submit_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard_dynptr)(struct bpf_dynptr_kern *, u64); + +typedef u64 (*btf_bpf_user_ringbuf_drain)(struct bpf_map *, void *, void *, u64); + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0: 1; + __u64 cap_bit0_is_deprecated: 1; + __u64 cap_user_rdpmc: 1; + __u64 cap_user_time: 1; + __u64 cap_user_time_zero: 1; + __u64 cap_user_time_short: 1; + __u64 cap_____res: 58; + }; + }; + __u16 pmc_width; + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + __u64 time_zero; + __u32 size; + __u32 __reserved_1; + __u64 time_cycles; + __u64 time_mask; + __u8 __reserved[928]; + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_AUX_OUTPUT_HW_ID = 21, + PERF_RECORD_MAX = 22, +}; + +struct perf_buffer { + refcount_t refcount; + struct callback_head callback_head; + int nr_pages; + int overwrite; + int paused; + atomic_t poll; + local_t head; + unsigned int nest; + local_t events; + local_t wakeup; + local_t lost; + long int watermark; + long int aux_watermark; + spinlock_t event_lock; + struct list_head event_list; + atomic_t mmap_count; + long unsigned int mmap_locked; + struct user_struct *mmap_user; + struct mutex aux_mutex; + long int aux_head; + unsigned int aux_nest; + long int aux_wakeup; + long unsigned int aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + atomic_t aux_mmap_count; + long unsigned int aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + int aux_in_pause_resume; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct list_lru_memcg { + struct callback_head rcu; + struct list_lru_one node[0]; +}; + +typedef void (*swap_r_func_t)(void *, void *, int, const void *); + +typedef int (*cmp_r_func_t)(const void *, const void *, const void *); + +struct slabobj_ext { + struct obj_cgroup *objcg; + long: 32; +}; + +typedef u64 freelist_full_t; + +typedef union { + struct { + void *freelist; + long unsigned int counter; + }; + freelist_full_t full; +} freelist_aba_t; + +struct kmem_cache_cpu { + union { + struct { + void **freelist; + long unsigned int tid; + }; + freelist_aba_t freelist_tid; + }; + struct slab *slab; + struct slab *partial; + local_lock_t lock; +}; + +struct kmem_cache_node { + spinlock_t list_lock; + long unsigned int nr_partial; + struct list_head partial; + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +}; + +typedef u32 depot_stack_handle_t; + +struct memory_notify { + long unsigned int altmap_start_pfn; + long unsigned int altmap_nr_pages; + long unsigned int start_pfn; + long unsigned int nr_pages; + int status_change_nid_normal; + int status_change_nid; +}; + +struct partial_context { + gfp_t flags; + unsigned int orig_size; + void *object; +}; + +struct track { + long unsigned int addr; + depot_stack_handle_t handle; + int cpu; + int pid; + long unsigned int when; +}; + +enum track_item { + TRACK_ALLOC = 0, + TRACK_FREE = 1, +}; + +enum stat_item { + ALLOC_FASTPATH = 0, + ALLOC_SLOWPATH = 1, + FREE_FASTPATH = 2, + FREE_SLOWPATH = 3, + FREE_FROZEN = 4, + FREE_ADD_PARTIAL = 5, + FREE_REMOVE_PARTIAL = 6, + ALLOC_FROM_PARTIAL = 7, + ALLOC_SLAB = 8, + ALLOC_REFILL = 9, + ALLOC_NODE_MISMATCH = 10, + FREE_SLAB = 11, + CPUSLAB_FLUSH = 12, + DEACTIVATE_FULL = 13, + DEACTIVATE_EMPTY = 14, + DEACTIVATE_TO_HEAD = 15, + DEACTIVATE_TO_TAIL = 16, + DEACTIVATE_REMOTE_FREES = 17, + DEACTIVATE_BYPASS = 18, + ORDER_FALLBACK = 19, + CMPXCHG_DOUBLE_CPU_FAIL = 20, + CMPXCHG_DOUBLE_FAIL = 21, + CPU_PARTIAL_ALLOC = 22, + CPU_PARTIAL_FREE = 23, + CPU_PARTIAL_NODE = 24, + CPU_PARTIAL_DRAIN = 25, + NR_SLUB_STAT_ITEMS = 26, +}; + +struct slub_flush_work { + struct work_struct work; + struct kmem_cache *s; + bool skip; +}; + +struct detached_freelist { + struct slab *slab; + void *tail; + void *freelist; + int cnt; + struct kmem_cache *s; +}; + +struct location { + depot_stack_handle_t handle; + long unsigned int count; + long unsigned int addr; + long unsigned int waste; + long long int sum_time; + long int min_time; + long int max_time; + long int min_pid; + long int max_pid; + long unsigned int cpus[1]; + nodemask_t nodes; +}; + +struct loc_track { + long unsigned int max; + long unsigned int count; + struct location *loc; + long: 32; + loff_t idx; +}; + +enum slab_stat_type { + SL_ALL = 0, + SL_PARTIAL = 1, + SL_CPU = 2, + SL_OBJECTS = 3, + SL_TOTAL = 4, +}; + +struct slab_attribute { + struct attribute attr; + ssize_t (*show)(struct kmem_cache *, char *); + ssize_t (*store)(struct kmem_cache *, const char *, size_t); +}; + +struct saved_alias { + struct kmem_cache *s; + const char *name; + struct saved_alias *next; +}; + +struct wb_writeback_work { + long int nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages: 1; + unsigned int for_kupdate: 1; + unsigned int range_cyclic: 1; + unsigned int for_background: 1; + unsigned int for_sync: 1; + unsigned int auto_free: 1; + enum wb_reason reason; + struct list_head list; + struct wb_completion *done; +}; + +struct trace_event_raw_writeback_folio_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_writeback_dirty_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_inode_foreign_history { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t cgroup_ino; + unsigned int history; + char __data[0]; +}; + +struct trace_event_raw_inode_switch_wbs { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t old_cgroup_ino; + ino_t new_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_track_foreign_dirty { + struct trace_entry ent; + char name[32]; + u64 bdi_id; + ino_t ino; + unsigned int memcg_id; + ino_t cgroup_ino; + ino_t page_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_flush_foreign { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + unsigned int frn_bdi_id; + unsigned int frn_memcg_id; + char __data[0]; +}; + +struct trace_event_raw_writeback_write_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + int sync_mode; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_work_class { + struct trace_entry ent; + char name[32]; + long int nr_pages; + dev_t sb_dev; + int sync_mode; + int for_kupdate; + int range_cyclic; + int for_background; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_pages_written { + struct trace_entry ent; + long int pages; + char __data[0]; +}; + +struct trace_event_raw_writeback_class { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_bdi_register { + struct trace_entry ent; + char name[32]; + char __data[0]; +}; + +struct trace_event_raw_wbc_class { + struct trace_entry ent; + char name[32]; + long int nr_to_write; + long int pages_skipped; + int sync_mode; + int for_kupdate; + int for_background; + int for_reclaim; + int range_cyclic; + long int range_start; + long int range_end; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_queue_io { + struct trace_entry ent; + char name[32]; + long unsigned int older; + long int age; + int moved; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_global_dirty_state { + struct trace_entry ent; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int background_thresh; + long unsigned int dirty_thresh; + long unsigned int dirty_limit; + long unsigned int nr_dirtied; + long unsigned int nr_written; + char __data[0]; +}; + +struct trace_event_raw_bdi_dirty_ratelimit { + struct trace_entry ent; + char bdi[32]; + long unsigned int write_bw; + long unsigned int avg_write_bw; + long unsigned int dirty_rate; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + long unsigned int balanced_dirty_ratelimit; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_balance_dirty_pages { + struct trace_entry ent; + char bdi[32]; + long unsigned int limit; + long unsigned int setpoint; + long unsigned int dirty; + long unsigned int bdi_setpoint; + long unsigned int bdi_dirty; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + unsigned int dirtied; + unsigned int dirtied_pause; + long unsigned int paused; + long int pause; + long unsigned int period; + long int think; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_sb_inodes_requeue { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_single_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + long unsigned int writeback_index; + long int nr_to_write; + long unsigned int wrote; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_inode_template { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int state; + __u16 mode; + long unsigned int dirtied_when; + char __data[0]; +}; + +struct trace_event_data_offsets_writeback_folio_template {}; + +struct trace_event_data_offsets_writeback_dirty_inode_template {}; + +struct trace_event_data_offsets_inode_foreign_history {}; + +struct trace_event_data_offsets_inode_switch_wbs {}; + +struct trace_event_data_offsets_track_foreign_dirty {}; + +struct trace_event_data_offsets_flush_foreign {}; + +struct trace_event_data_offsets_writeback_write_inode_template {}; + +struct trace_event_data_offsets_writeback_work_class {}; + +struct trace_event_data_offsets_writeback_pages_written {}; + +struct trace_event_data_offsets_writeback_class {}; + +struct trace_event_data_offsets_writeback_bdi_register {}; + +struct trace_event_data_offsets_wbc_class {}; + +struct trace_event_data_offsets_writeback_queue_io {}; + +struct trace_event_data_offsets_global_dirty_state {}; + +struct trace_event_data_offsets_bdi_dirty_ratelimit {}; + +struct trace_event_data_offsets_balance_dirty_pages {}; + +struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; + +struct trace_event_data_offsets_writeback_single_inode_template {}; + +struct trace_event_data_offsets_writeback_inode_template {}; + +typedef void (*btf_trace_writeback_dirty_folio)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_folio_wait_writeback)(void *, struct folio *, struct address_space *); + +typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); + +typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); + +typedef void (*btf_trace_track_foreign_dirty)(void *, struct folio *, struct bdi_writeback *); + +typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_pages_written)(void *, long int); + +typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); + +typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); + +typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); + +typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, long unsigned int, int); + +typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int); + +typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); + +typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); + +typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); + +typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); + +typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); + +typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); + +struct inode_switch_wbs_context { + struct rcu_work work; + struct bdi_writeback *new_wb; + struct inode *inodes[0]; +}; + +struct fsuuid { + __u32 fsu_len; + __u32 fsu_flags; + __u8 fsu_uuid[0]; +}; + +struct move_extent { + __u32 reserved; + __u32 donor_fd; + __u64 orig_start; + __u64 donor_start; + __u64 len; + __u64 moved_len; +}; + +struct ext4_new_group_input { + __u32 group; + long: 32; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 unused; +}; + +struct fsmap { + __u32 fmr_device; + __u32 fmr_flags; + __u64 fmr_physical; + __u64 fmr_owner; + __u64 fmr_offset; + __u64 fmr_length; + __u64 fmr_reserved[3]; +}; + +struct fsmap_head { + __u32 fmh_iflags; + __u32 fmh_oflags; + __u32 fmh_count; + __u32 fmh_entries; + __u64 fmh_reserved[6]; + struct fsmap fmh_keys[2]; + struct fsmap fmh_recs[0]; +}; + +struct ext4_fsmap { + struct list_head fmr_list; + dev_t fmr_device; + uint32_t fmr_flags; + uint64_t fmr_physical; + uint64_t fmr_owner; + uint64_t fmr_length; +}; + +struct ext4_fsmap_head { + uint32_t fmh_iflags; + uint32_t fmh_oflags; + unsigned int fmh_count; + unsigned int fmh_entries; + struct ext4_fsmap fmh_keys[2]; +}; + +typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); + +typedef void ext4_update_sb_callback(struct ext4_super_block *, const void *); + +struct getfsmap_info { + struct super_block *gi_sb; + struct fsmap_head *gi_data; + unsigned int gi_idx; + __u32 gi_last_flags; +}; + +struct isofs_sb_info { + long unsigned int s_ninodes; + long unsigned int s_nzones; + long unsigned int s_firstdatazone; + long unsigned int s_log_zone_size; + long unsigned int s_max_size; + int s_rock_offset; + s32 s_sbsector; + unsigned char s_joliet_level; + unsigned char s_mapping; + unsigned char s_check; + unsigned char s_session; + unsigned int s_high_sierra: 1; + unsigned int s_rock: 2; + unsigned int s_cruft: 1; + unsigned int s_nocompress: 1; + unsigned int s_hide: 1; + unsigned int s_showassoc: 1; + unsigned int s_overriderockperm: 1; + unsigned int s_uid_set: 1; + unsigned int s_gid_set: 1; + umode_t s_fmode; + umode_t s_dmode; + kgid_t s_gid; + kuid_t s_uid; + struct nls_table *s_nls_iocharset; +}; + +struct fuse_kstatfs { + uint64_t blocks; + uint64_t bfree; + uint64_t bavail; + uint64_t files; + uint64_t ffree; + uint32_t bsize; + uint32_t namelen; + uint32_t frsize; + uint32_t padding; + uint32_t spare[6]; +}; + +struct fuse_statfs_out { + struct fuse_kstatfs st; +}; + +struct fuse_init_in { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint32_t flags2; + uint32_t unused[11]; +}; + +struct fuse_init_out { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint16_t max_background; + uint16_t congestion_threshold; + uint32_t max_write; + uint32_t time_gran; + uint16_t max_pages; + uint16_t map_alignment; + uint32_t flags2; + uint32_t max_stack_depth; + uint32_t unused[6]; +}; + +struct fuse_syncfs_in { + uint64_t padding; +}; + +struct fuse_pqueue { + unsigned int connected; + spinlock_t lock; + struct list_head *processing; + struct list_head io; +}; + +struct fuse_dev { + struct fuse_conn *fc; + struct fuse_pqueue pq; + struct list_head entry; +}; + +enum fuse_dax_mode { + FUSE_DAX_INODE_DEFAULT = 0, + FUSE_DAX_ALWAYS = 1, + FUSE_DAX_NEVER = 2, + FUSE_DAX_INODE_USER = 3, +}; + +struct fuse_fs_context { + int fd; + struct file *file; + unsigned int rootmode; + kuid_t user_id; + kgid_t group_id; + bool is_bdev: 1; + bool fd_present: 1; + bool rootmode_present: 1; + bool user_id_present: 1; + bool group_id_present: 1; + bool default_permissions: 1; + bool allow_other: 1; + bool destroy: 1; + bool no_control: 1; + bool no_force_umount: 1; + bool legacy_opts_show: 1; + enum fuse_dax_mode dax_mode; + unsigned int max_read; + unsigned int blksize; + const char *subtype; + struct dax_device *dax_dev; + void **fudptr; +}; + +enum { + OPT_SOURCE = 0, + OPT_SUBTYPE = 1, + OPT_FD = 2, + OPT_ROOTMODE = 3, + OPT_USER_ID = 4, + OPT_GROUP_ID = 5, + OPT_DEFAULT_PERMISSIONS = 6, + OPT_ALLOW_OTHER = 7, + OPT_MAX_READ = 8, + OPT_BLKSIZE = 9, + OPT_ERR = 10, +}; + +struct fuse_inode_handle { + u64 nodeid; + u32 generation; + long: 32; +}; + +struct fuse_init_args { + struct fuse_args args; + struct fuse_init_in in; + struct fuse_init_out out; +}; + +struct btrfs_qgroup_extent_record { + u64 num_bytes; + u32 data_rsv; + long: 32; + u64 data_rsv_refroot; + struct ulist *old_roots; + long: 32; +}; + +typedef unsigned int __kernel_mode_t; + +typedef unsigned int __kernel_uid_t; + +typedef unsigned int __kernel_gid_t; + +typedef __kernel_long_t __kernel_old_time_t; + +struct ipc_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + short unsigned int seq; +}; + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned int seq; + unsigned int __pad1; + long long unsigned int __unused1; + long long unsigned int __unused2; +}; + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + long unsigned int seq; + void *security; + struct rhash_head khtnode; + struct callback_head rcu; + refcount_t refcount; + long: 32; +}; + +struct sem; + +struct sem_queue; + +struct sem_undo; + +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + short unsigned int sem_nsems; +}; + +struct sem { + int semval; + struct pid *sempid; + spinlock_t lock; + struct list_head pending_alter; + struct list_head pending_const; + long: 32; + time64_t sem_otime; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct sem_queue { + struct list_head list; + struct task_struct *sleeper; + struct sem_undo *undo; + struct pid *pid; + int status; + struct sembuf *sops; + struct sembuf *blocking; + int nsops; + bool alter; + bool dupsop; +}; + +struct sem_undo { + struct list_head list_proc; + struct callback_head rcu; + struct sem_undo_list *ulp; + struct list_head list_id; + int semid; + short int semadj[0]; +}; + +struct semid64_ds { + struct ipc64_perm sem_perm; + long unsigned int sem_otime_high; + long unsigned int sem_otime; + long unsigned int sem_ctime_high; + long unsigned int sem_ctime; + long unsigned int sem_nsems; + long unsigned int __unused3; + long unsigned int __unused4; + long: 32; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +struct sem_undo_list { + refcount_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + +struct ipc_params { + key_t key; + int flg; + union { + size_t size; + int nsems; + } u; +}; + +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); +}; + +struct sem_array { + struct kern_ipc_perm sem_perm; + time64_t sem_ctime; + struct list_head pending_alter; + struct list_head pending_const; + struct list_head list_id; + int sem_nsems; + int complex_count; + unsigned int use_global_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct sem sems[0]; +}; + +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; + MPI p; + MPI q; + MPI dp; + MPI dq; + MPI qinv; +}; + +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; + __u8 statelen; + __u8 blocklen_bytes; + char cra_name[128]; + char backend_cra_name[128]; +}; + +struct drbg_state; + +struct drbg_state_ops { + int (*update)(struct drbg_state *, struct list_head *, int); + int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); + int (*crypto_init)(struct drbg_state *); + int (*crypto_fini)(struct drbg_state *); +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED = 0, + DRBG_SEED_STATE_PARTIAL = 1, + DRBG_SEED_STATE_FULL = 2, +}; + +struct drbg_state { + struct mutex drbg_mutex; + unsigned char *V; + unsigned char *Vbuf; + unsigned char *C; + unsigned char *Cbuf; + size_t reseed_ctr; + size_t reseed_threshold; + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; + struct crypto_skcipher *ctr_handle; + struct skcipher_request *ctr_req; + __u8 *outscratchpadbuf; + __u8 *outscratchpad; + struct crypto_wait ctr_wait; + struct scatterlist sg_in; + struct scatterlist sg_out; + enum drbg_seed_state seeded; + long unsigned int last_seed_time; + bool pr; + bool fips_primed; + unsigned char *prev; + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +enum drbg_prefixes { + DRBG_PREFIX0 = 0, + DRBG_PREFIX1 = 1, + DRBG_PREFIX2 = 2, + DRBG_PREFIX3 = 3, +}; + +struct sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void *data; +}; + +struct blkpg_partition { + long long int start; + long long int length; + int pno; + char devname[64]; + char volname[64]; + long: 32; +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +struct pr_keys { + u32 generation; + u32 num_keys; + u64 keys[0]; +}; + +struct pr_held_reservation { + u64 key; + u32 generation; + enum pr_type type; +}; + +struct blk_iou_cmd { + int res; + bool nowait; +}; + +struct io_notif_data { + struct file *file; + struct ubuf_info uarg; + struct io_notif_data *next; + struct io_notif_data *head; + unsigned int account_pages; + bool zc_report; + bool zc_used; + bool zc_copied; +}; + +enum msi_desc_filter { + MSI_DESC_ALL = 0, + MSI_DESC_NOTASSOCIATED = 1, + MSI_DESC_ASSOCIATED = 2, +}; + +struct ldsem_waiter { + struct list_head list; + struct task_struct *task; +}; + +struct trace_event_raw_devres { + struct trace_entry ent; + u32 __data_loc_devname; + struct device *dev; + const char *op; + void *node; + u32 __data_loc_name; + size_t size; + char __data[0]; +}; + +struct trace_event_data_offsets_devres { + u32 devname; + const void *devname_ptr_; + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_devres_log)(void *, struct device *, const char *, void *, const char *, size_t); + +enum { + NVME_AEN_BIT_NS_ATTR = 8, + NVME_AEN_BIT_FW_ACT = 9, + NVME_AEN_BIT_ANA_CHANGE = 11, + NVME_AEN_BIT_DISC_CHANGE = 31, +}; + +enum nvme_opcode { + nvme_cmd_flush = 0, + nvme_cmd_write = 1, + nvme_cmd_read = 2, + nvme_cmd_write_uncor = 4, + nvme_cmd_compare = 5, + nvme_cmd_write_zeroes = 8, + nvme_cmd_dsm = 9, + nvme_cmd_verify = 12, + nvme_cmd_resv_register = 13, + nvme_cmd_resv_report = 14, + nvme_cmd_resv_acquire = 17, + nvme_cmd_resv_release = 21, + nvme_cmd_zone_mgmt_send = 121, + nvme_cmd_zone_mgmt_recv = 122, + nvme_cmd_zone_append = 125, + nvme_cmd_vendor_start = 128, +}; + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0, + nvme_admin_create_sq = 1, + nvme_admin_get_log_page = 2, + nvme_admin_delete_cq = 4, + nvme_admin_create_cq = 5, + nvme_admin_identify = 6, + nvme_admin_abort_cmd = 8, + nvme_admin_set_features = 9, + nvme_admin_get_features = 10, + nvme_admin_async_event = 12, + nvme_admin_ns_mgmt = 13, + nvme_admin_activate_fw = 16, + nvme_admin_download_fw = 17, + nvme_admin_dev_self_test = 20, + nvme_admin_ns_attach = 21, + nvme_admin_keep_alive = 24, + nvme_admin_directive_send = 25, + nvme_admin_directive_recv = 26, + nvme_admin_virtual_mgmt = 28, + nvme_admin_nvme_mi_send = 29, + nvme_admin_nvme_mi_recv = 30, + nvme_admin_dbbuf = 124, + nvme_admin_format_nvm = 128, + nvme_admin_security_send = 129, + nvme_admin_security_recv = 130, + nvme_admin_sanitize_nvm = 132, + nvme_admin_get_lba_status = 134, + nvme_admin_vendor_start = 192, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0, + nvme_fabrics_type_connect = 1, + nvme_fabrics_type_property_get = 4, + nvme_fabrics_type_auth_send = 5, + nvme_fabrics_type_auth_receive = 6, +}; + +typedef __u32 nvme_submit_flags_t; + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +struct usb_qualifier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __u8 bNumConfigurations; + __u8 bRESERVED; +}; + +struct usb_set_sel_req { + __u8 u1_sel; + __u8 u1_pel; + __le16 u2_sel; + __le16 u2_pel; +}; + +enum usb_port_connect_type { + USB_PORT_CONNECT_TYPE_UNKNOWN = 0, + USB_PORT_CONNECT_TYPE_HOT_PLUG = 1, + USB_PORT_CONNECT_TYPE_HARD_WIRED = 2, + USB_PORT_NOT_USED = 3, +}; + +struct usbdevfs_hub_portinfo { + char nports; + char port[127]; +}; + +struct usb_port_status { + __le16 wPortStatus; + __le16 wPortChange; + __le32 dwExtPortStatus; +}; + +struct usb_hub_status { + __le16 wHubStatus; + __le16 wHubChange; +}; + +enum hub_led_mode { + INDICATOR_AUTO = 0, + INDICATOR_CYCLE = 1, + INDICATOR_GREEN_BLINK = 2, + INDICATOR_GREEN_BLINK_OFF = 3, + INDICATOR_AMBER_BLINK = 4, + INDICATOR_AMBER_BLINK_OFF = 5, + INDICATOR_ALT_BLINK = 6, + INDICATOR_ALT_BLINK_OFF = 7, +} __attribute__((mode(byte))); + +struct usb_tt_clear { + struct list_head clear_list; + unsigned int tt; + u16 devinfo; + struct usb_hcd *hcd; + struct usb_host_endpoint *ep; +}; + +struct typec_connector { + void (*attach)(struct typec_connector *, struct device *); + void (*deattach)(struct typec_connector *, struct device *); +}; + +typedef u32 usb_port_location_t; + +struct usb_port; + +struct usb_hub { + struct device *intfdev; + struct usb_device *hdev; + struct kref kref; + struct urb *urb; + u8 (*buffer)[8]; + union { + struct usb_hub_status hub; + struct usb_port_status port; + } *status; + struct mutex status_mutex; + int error; + int nerrors; + long unsigned int event_bits[1]; + long unsigned int change_bits[1]; + long unsigned int removed_bits[1]; + long unsigned int wakeup_bits[1]; + long unsigned int power_bits[1]; + long unsigned int child_usage_bits[1]; + long unsigned int warm_reset_bits[1]; + struct usb_hub_descriptor *descriptor; + struct usb_tt tt; + unsigned int mA_per_port; + unsigned int wakeup_enabled_descendants; + unsigned int limited_power: 1; + unsigned int quiescing: 1; + unsigned int disconnected: 1; + unsigned int in_reset: 1; + unsigned int quirk_disable_autosuspend: 1; + unsigned int quirk_check_port_auto_suspend: 1; + unsigned int has_indicators: 1; + u8 indicator[31]; + struct delayed_work leds; + struct delayed_work init_work; + struct work_struct events; + spinlock_t irq_urb_lock; + struct timer_list irq_urb_retry; + struct usb_port **ports; + struct list_head onboard_devs; +}; + +struct usb_port { + struct usb_device *child; + long: 32; + struct device dev; + struct usb_dev_state *port_owner; + struct usb_port *peer; + struct typec_connector *connector; + struct dev_pm_qos_request *req; + enum usb_port_connect_type connect_type; + enum usb_device_state state; + struct kernfs_node *state_kn; + usb_port_location_t location; + struct mutex status_lock; + u32 over_current_count; + u8 portnum; + u32 quirks; + unsigned int early_stop: 1; + unsigned int ignore_event: 1; + unsigned int is_superspeed: 1; + unsigned int usb3_lpm_u1_permit: 1; + unsigned int usb3_lpm_u2_permit: 1; + long: 32; +}; + +enum hub_activation_type { + HUB_INIT = 0, + HUB_INIT2 = 1, + HUB_INIT3 = 2, + HUB_POST_RESET = 3, + HUB_RESUME = 4, + HUB_RESET_RESUME = 5, +}; + +enum hub_quiescing_type { + HUB_DISCONNECT = 0, + HUB_PRE_RESET = 1, + HUB_SUSPEND = 2, +}; + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +struct mbox_client { + struct device *dev; + bool tx_block; + long unsigned int tx_tout; + bool knows_txdone; + void (*rx_callback)(struct mbox_client *, void *); + void (*tx_prepare)(struct mbox_client *, void *); + void (*tx_done)(struct mbox_client *, void *, int); +}; + +struct mbox_chan; + +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *, void *); + int (*flush)(struct mbox_chan *, long unsigned int); + int (*startup)(struct mbox_chan *); + void (*shutdown)(struct mbox_chan *); + bool (*last_tx_done)(struct mbox_chan *); + bool (*peek_data)(struct mbox_chan *); +}; + +struct mbox_controller; + +struct mbox_chan { + struct mbox_controller *mbox; + unsigned int txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned int msg_count; + unsigned int msg_free; + void *msg_data[20]; + spinlock_t lock; + void *con_priv; +}; + +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned int txpoll_period; + struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); + long: 32; + struct hrtimer poll_hrt; + spinlock_t poll_hrt_lock; + struct list_head node; + long: 32; +}; + +struct fib_notifier_net { + struct list_head fib_notifier_ops; + struct atomic_notifier_head fib_chain; +}; + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; + u8 ver; + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __attribute__((packed)); + +struct rss_req_info { + struct ethnl_req_info base; + u32 rss_context; +}; + +struct rss_reply_data { + struct ethnl_reply_data base; + bool no_key_fields; + u32 indir_size; + u32 hkey_size; + u32 hfunc; + u32 input_xfrm; + u32 *indir_table; + u8 *hkey; +}; + +struct rss_nl_dump_ctx { + long unsigned int ifindex; + long unsigned int ctx_idx; + unsigned int match_ifindex; + unsigned int start_ctx; +}; + +enum ethtool_c33_pse_ext_state { + ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1, + ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID = 2, + ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE = 3, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED = 4, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM = 5, + ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED = 6, + ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE = 7, + ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE = 8, + ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED = 9, +}; + +enum ethtool_c33_pse_ext_substate_error_condition { + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON = 4, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS = 5, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF = 6, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN = 7, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE = 8, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP = 9, +}; + +enum ethtool_c33_pse_ext_substate_mr_pse_enable { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1, +}; + +enum ethtool_c33_pse_ext_substate_option_detect_ted { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR = 2, +}; + +enum ethtool_c33_pse_ext_substate_option_vport_lim { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION = 3, +}; + +enum ethtool_c33_pse_ext_substate_ovld_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1, +}; + +enum ethtool_c33_pse_ext_substate_power_not_available { + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET = 2, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT = 3, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT = 4, +}; + +enum ethtool_c33_pse_ext_substate_short_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1, +}; + +enum ethtool_c33_pse_admin_state { + ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_c33_pse_pw_d_status { + ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_C33_PSE_PW_D_STATUS_TEST = 5, + ETHTOOL_C33_PSE_PW_D_STATUS_FAULT = 6, + ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT = 7, +}; + +enum ethtool_podl_pse_admin_state { + ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED = 2, + ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED = 3, +}; + +enum ethtool_podl_pse_pw_d_status { + ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED = 2, + ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING = 3, + ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING = 4, + ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP = 5, + ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE = 6, + ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR = 7, +}; + +struct ethtool_c33_pse_ext_state_info { + enum ethtool_c33_pse_ext_state c33_pse_ext_state; + union { + enum ethtool_c33_pse_ext_substate_error_condition error_condition; + enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; + enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; + enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; + enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; + enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; + enum ethtool_c33_pse_ext_substate_short_detected short_detected; + u32 __c33_pse_ext_substate; + }; +}; + +struct ethtool_c33_pse_pw_limit_range { + u32 min; + u32 max; +}; + +struct pse_control_config { + enum ethtool_podl_pse_admin_state podl_admin_control; + enum ethtool_c33_pse_admin_state c33_admin_control; +}; + +struct pse_control_status { + enum ethtool_podl_pse_admin_state podl_admin_state; + enum ethtool_podl_pse_pw_d_status podl_pw_status; + enum ethtool_c33_pse_admin_state c33_admin_state; + enum ethtool_c33_pse_pw_d_status c33_pw_status; + u32 c33_pw_class; + u32 c33_actual_pw; + struct ethtool_c33_pse_ext_state_info c33_ext_state_info; + u32 c33_avail_pw_limit; + struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; + u32 c33_pw_limit_nb_ranges; +}; + +enum { + ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC = 0, + ETHTOOL_A_C33_PSE_PW_LIMIT_MIN = 1, + ETHTOOL_A_C33_PSE_PW_LIMIT_MAX = 2, +}; + +struct pse_reply_data { + struct ethnl_reply_data base; + struct pse_control_status status; +}; + +enum nft_bitwise_ops { + NFT_BITWISE_MASK_XOR = 0, + NFT_BITWISE_LSHIFT = 1, + NFT_BITWISE_RSHIFT = 2, + NFT_BITWISE_AND = 3, + NFT_BITWISE_OR = 4, + NFT_BITWISE_XOR = 5, +}; + +enum nft_bitwise_attributes { + NFTA_BITWISE_UNSPEC = 0, + NFTA_BITWISE_SREG = 1, + NFTA_BITWISE_DREG = 2, + NFTA_BITWISE_LEN = 3, + NFTA_BITWISE_MASK = 4, + NFTA_BITWISE_XOR = 5, + NFTA_BITWISE_OP = 6, + NFTA_BITWISE_DATA = 7, + NFTA_BITWISE_SREG2 = 8, + __NFTA_BITWISE_MAX = 9, +}; + +struct nft_bitwise { + u8 sreg; + u8 sreg2; + u8 dreg; + enum nft_bitwise_ops op: 8; + u8 len; + struct nft_data mask; + struct nft_data xor; + struct nft_data data; +}; + +struct rt_cache_stat { + unsigned int in_slow_tot; + unsigned int in_slow_mc; + unsigned int in_no_route; + unsigned int in_brd; + unsigned int in_martian_dst; + unsigned int in_martian_src; + unsigned int out_slow_tot; + unsigned int out_slow_mc; +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; + +typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); + +typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *, const struct inet6_skb_parm *); + +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + +struct icmpv6_msg { + struct sk_buff *skb; + int offset; + uint8_t type; +}; + +struct icmp6_err { + int err; + int fatal; +}; + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +typedef struct { + __u32 u[4]; +} __vector128; + +struct sigaltstack { + void *ss_sp; + int ss_flags; + __kernel_size_t ss_size; +}; + +typedef struct sigaltstack stack_t; + +struct sig_dbg_op { + int dbg_type; + long unsigned int dbg_value; +}; + +typedef struct siginfo siginfo_t; + +typedef double elf_fpreg_t; + +typedef elf_fpreg_t elf_fpregset_t[33]; + +typedef __vector128 elf_vrreg_t; + +typedef elf_vrreg_t elf_vrregset_t[33]; + +struct mcontext { + elf_gregset_t mc_gregs; + elf_fpregset_t mc_fregs; + long unsigned int mc_pad[2]; + elf_vrregset_t mc_vregs; +}; + +struct ucontext { + long unsigned int uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + int uc_pad[7]; + struct mcontext *uc_regs; + sigset_t uc_sigmask; + int uc_maskext[30]; + int uc_pad2[3]; + struct mcontext uc_mcontext; +}; + +struct sigcontext { + long unsigned int _unused[4]; + int signal; + long unsigned int handler; + long unsigned int oldmask; + struct user_pt_regs *regs; +}; + +struct sigframe { + struct sigcontext sctx; + struct mcontext mctx; + int abigap[56]; +}; + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; + int abigap[56]; +}; + +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +}; + +enum { + pmac_nvram_OF = 0, + pmac_nvram_XPRAM = 1, + pmac_nvram_NR = 2, +}; + +enum sys_ctrler_kind { + SYS_CTRLER_UNKNOWN = 0, + SYS_CTRLER_CUDA = 1, + SYS_CTRLER_PMU = 2, + SYS_CTRLER_SMU = 3, +}; + +typedef enum sys_ctrler_kind sys_ctrler_t; + +struct chrp_header { + u8 signature; + u8 cksum; + u16 len; + char name[12]; + u8 data[0]; +}; + +struct core99_header { + struct chrp_header hdr; + u32 adler; + u32 generation; + u32 reserved[2]; +}; + +enum die_val { + DIE_OOPS = 1, + DIE_IABR_MATCH = 2, + DIE_DABR_MATCH = 3, + DIE_BPT = 4, + DIE_SSTEP = 5, +}; + +struct die_args { + struct pt_regs *regs; + const char *str; + long int err; + int trapnr; + int signr; +}; + +struct trace_event_raw_notifier_info { + struct trace_entry ent; + void *cb; + char __data[0]; +}; + +struct trace_event_data_offsets_notifier_info {}; + +typedef void (*btf_trace_notifier_register)(void *, void *); + +typedef void (*btf_trace_notifier_unregister)(void *, void *); + +typedef void (*btf_trace_notifier_run)(void *, void *); + +struct rt_wake_q_head { + struct wake_q_head head; + struct task_struct *rtlock_task; +}; + +enum kernel_load_data_id { + LOADING_UNKNOWN = 0, + LOADING_FIRMWARE = 1, + LOADING_MODULE = 2, + LOADING_KEXEC_IMAGE = 3, + LOADING_KEXEC_INITRAMFS = 4, + LOADING_POLICY = 5, + LOADING_X509_CERTIFICATE = 6, + LOADING_MAX_ID = 7, +}; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED = 0, + CPUFREQ_TABLE_SORTED_ASCENDING = 1, + CPUFREQ_TABLE_SORTED_DESCENDING = 2, +}; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct cpufreq_stats; + +struct cpufreq_governor; + +struct cpufreq_frequency_table; + +struct cpufreq_policy { + cpumask_var_t cpus; + cpumask_var_t related_cpus; + cpumask_var_t real_cpus; + unsigned int shared_type; + unsigned int cpu; + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo; + unsigned int min; + unsigned int max; + unsigned int cur; + unsigned int suspend_freq; + unsigned int policy; + unsigned int last_policy; + struct cpufreq_governor *governor; + void *governor_data; + char last_governor[16]; + struct work_struct update; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + struct rw_semaphore rwsem; + bool fast_switch_possible; + bool fast_switch_enabled; + bool strict_target; + bool efficiencies_available; + unsigned int transition_delay_us; + bool dvfs_possible_from_any_cpu; + bool boost_enabled; + unsigned int cached_target_freq; + unsigned int cached_resolved_idx; + bool transition_ongoing; + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; + struct cpufreq_stats *stats; + void *driver_data; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct cpufreq_governor { + char name[16]; + int (*init)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*start)(struct cpufreq_policy *); + void (*stop)(struct cpufreq_policy *); + void (*limits)(struct cpufreq_policy *); + ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); + int (*store_setspeed)(struct cpufreq_policy *, unsigned int); + struct list_head governor_list; + struct module *owner; + u8 flags; +}; + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; + unsigned int frequency; +}; + +struct trace_event_raw_cpu { + struct trace_entry ent; + u32 state; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_idle_miss { + struct trace_entry ent; + u32 cpu_id; + u32 state; + bool below; + char __data[0]; +}; + +struct trace_event_raw_powernv_throttle { + struct trace_entry ent; + int chip_id; + u32 __data_loc_reason; + int pmax; + char __data[0]; +}; + +struct trace_event_raw_pstate_sample { + struct trace_entry ent; + u32 core_busy; + u32 scaled_busy; + u32 from; + u32 to; + u64 mperf; + u64 aperf; + u64 tsc; + u32 freq; + u32 io_boost; + char __data[0]; +}; + +struct trace_event_raw_cpu_frequency_limits { + struct trace_entry ent; + u32 min_freq; + u32 max_freq; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_start { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u32 __data_loc_parent; + u32 __data_loc_pm_ops; + int event; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_end { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + int error; + char __data[0]; +}; + +struct trace_event_raw_suspend_resume { + struct trace_entry ent; + const char *action; + int val; + bool start; + char __data[0]; +}; + +struct trace_event_raw_wakeup_source { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + char __data[0]; +}; + +struct trace_event_raw_clock { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_power_domain { + struct trace_entry ent; + u32 __data_loc_name; + long: 32; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_latency_qos_request { + struct trace_entry ent; + s32 value; + char __data[0]; +}; + +struct trace_event_raw_pm_qos_update { + struct trace_entry ent; + enum pm_qos_req_action action; + int prev_value; + int curr_value; + char __data[0]; +}; + +struct trace_event_raw_dev_pm_qos_request { + struct trace_entry ent; + u32 __data_loc_name; + enum dev_pm_qos_req_type type; + s32 new_value; + char __data[0]; +}; + +struct trace_event_raw_guest_halt_poll_ns { + struct trace_entry ent; + bool grow; + unsigned int new; + unsigned int old; + char __data[0]; +}; + +struct trace_event_data_offsets_cpu {}; + +struct trace_event_data_offsets_cpu_idle_miss {}; + +struct trace_event_data_offsets_powernv_throttle { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_pstate_sample {}; + +struct trace_event_data_offsets_cpu_frequency_limits {}; + +struct trace_event_data_offsets_device_pm_callback_start { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; + u32 parent; + const void *parent_ptr_; + u32 pm_ops; + const void *pm_ops_ptr_; +}; + +struct trace_event_data_offsets_device_pm_callback_end { + u32 device; + const void *device_ptr_; + u32 driver; + const void *driver_ptr_; +}; + +struct trace_event_data_offsets_suspend_resume {}; + +struct trace_event_data_offsets_wakeup_source { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clock { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_power_domain { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cpu_latency_qos_request {}; + +struct trace_event_data_offsets_pm_qos_update {}; + +struct trace_event_data_offsets_dev_pm_qos_request { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_guest_halt_poll_ns {}; + +typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_idle_miss)(void *, unsigned int, unsigned int, bool); + +typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); + +typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); + +typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); + +typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); + +typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); + +typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); + +typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_pm_qos_add_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_guest_halt_poll_ns)(void *, bool, unsigned int, unsigned int); + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct xdp_dev_bulk_queue { + struct xdp_frame *q[16]; + struct list_head flush_node; + struct net_device *dev; + struct net_device *dev_rx; + struct bpf_prog *xdp_prog; + unsigned int count; +}; + +struct bpf_dtab_netdev { + struct net_device *dev; + struct hlist_node index_hlist; + struct bpf_prog *xdp_prog; + struct callback_head rcu; + unsigned int idx; + struct bpf_devmap_val val; +}; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev **netdev_map; + struct list_head list; + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; + long: 32; +}; + +struct reserve_mem_table { + char name[16]; + phys_addr_t start; + phys_addr_t size; +}; + +typedef __kernel_ulong_t __kernel_ino_t; + +struct __old_kernel_stat { + short unsigned int st_dev; + short unsigned int st_ino; + short unsigned int st_mode; + short unsigned int st_nlink; + short unsigned int st_uid; + short unsigned int st_gid; + short unsigned int st_rdev; + long unsigned int st_size; + long unsigned int st_atime; + long unsigned int st_mtime; + long unsigned int st_ctime; +}; + +struct stat { + long unsigned int st_dev; + __kernel_ino_t st_ino; + __kernel_mode_t st_mode; + short unsigned int st_nlink; + __kernel_uid32_t st_uid; + __kernel_gid32_t st_gid; + long unsigned int st_rdev; + long int st_size; + long unsigned int st_blksize; + long unsigned int st_blocks; + long unsigned int st_atime; + long unsigned int st_atime_nsec; + long unsigned int st_mtime; + long unsigned int st_mtime_nsec; + long unsigned int st_ctime; + long unsigned int st_ctime_nsec; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct stat64 { + long long unsigned int st_dev; + long long unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + long long unsigned int st_rdev; + short unsigned int __pad2; + long: 32; + long long int st_size; + int st_blksize; + long: 32; + long long int st_blocks; + int st_atime; + unsigned int st_atime_nsec; + int st_mtime; + unsigned int st_mtime_nsec; + int st_ctime; + unsigned int st_ctime_nsec; + unsigned int __unused4; + unsigned int __unused5; +}; + +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +struct statx { + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + __u64 stx_mnt_id; + __u32 stx_dio_mem_align; + __u32 stx_dio_offset_align; + __u64 stx_subvol; + __u32 stx_atomic_write_unit_min; + __u32 stx_atomic_write_unit_max; + __u32 stx_atomic_write_segments_max; + __u32 __spare1[1]; + __u64 __spare3[9]; +}; + +typedef struct fd class_fd_raw_t; + +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, + FSCONFIG_SET_STRING = 1, + FSCONFIG_SET_BINARY = 2, + FSCONFIG_SET_PATH = 3, + FSCONFIG_SET_PATH_EMPTY = 4, + FSCONFIG_SET_FD = 5, + FSCONFIG_CMD_CREATE = 6, + FSCONFIG_CMD_RECONFIGURE = 7, + FSCONFIG_CMD_CREATE_EXCL = 8, +}; + +typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); + +struct ext4_getfsmap_info { + struct ext4_fsmap_head *gfi_head; + ext4_fsmap_format_t gfi_formatter; + void *gfi_format_arg; + long: 32; + ext4_fsblk_t gfi_next_fsblk; + u32 gfi_dev; + ext4_group_t gfi_agno; + struct ext4_fsmap gfi_low; + struct ext4_fsmap gfi_high; + struct ext4_fsmap gfi_lastfree; + struct list_head gfi_meta_list; + bool gfi_last; + long: 32; +}; + +struct ext4_getfsmap_dev { + int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); + u32 gfd_dev; +}; + +struct fuse_setxattr_in { + uint32_t size; + uint32_t flags; + uint32_t setxattr_flags; + uint32_t padding; +}; + +struct fuse_getxattr_in { + uint32_t size; + uint32_t padding; +}; + +struct fuse_getxattr_out { + uint32_t size; + uint32_t padding; +}; + +struct btrfs_new_inode_args { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool orphan; + bool subvol; + struct posix_acl *default_acl; + struct posix_acl *acl; + struct fscrypt_name fname; +}; + +struct btrfs_iget_args { + u64 ino; + struct btrfs_root *root; + long: 32; +}; + +struct btrfs_rename_ctx { + u64 index; +}; + +struct data_reloc_warn { + struct btrfs_path path; + struct btrfs_fs_info *fs_info; + u64 extent_item_size; + u64 logical; + int mirror_num; + long: 32; +}; + +struct async_extent { + u64 start; + u64 ram_size; + u64 compressed_size; + struct folio **folios; + long unsigned int nr_folios; + int compress_type; + struct list_head list; + long: 32; +}; + +struct async_cow; + +struct async_chunk { + struct btrfs_inode *inode; + struct folio *locked_folio; + u64 start; + u64 end; + blk_opf_t write_flags; + struct list_head extents; + struct cgroup_subsys_state *blkcg_css; + struct btrfs_work work; + struct async_cow *async_cow; + long: 32; +}; + +struct async_cow { + atomic_t num_chunks; + long: 32; + struct async_chunk chunks[0]; +}; + +struct can_nocow_file_extent_args { + u64 start; + u64 end; + bool writeback_path; + bool strict; + bool free_path; + long: 32; + struct btrfs_file_extent file_extent; +}; + +struct btrfs_writepage_fixup { + struct folio *folio; + struct btrfs_inode *inode; + struct btrfs_work work; +}; + +struct dir_entry { + u64 ino; + u64 offset; + unsigned int type; + int name_len; +}; + +struct btrfs_delalloc_work { + struct inode *inode; + struct completion completion; + struct list_head list; + struct btrfs_work work; +}; + +struct btrfs_encoded_read_private { + wait_queue_head_t wait; + void *uring_ctx; + atomic_t pending; + blk_status_t status; +}; + +struct btrfs_swap_info { + u64 start; + u64 block_start; + u64 block_len; + u64 lowest_ppage; + u64 highest_ppage; + long unsigned int nr_pages; + int nr_extents; +}; + +struct user_key_payload { + struct callback_head rcu; + short unsigned int datalen; + long: 32; + char data[0]; +}; + +struct bpf_crypto_type { + void * (*alloc_tfm)(const char *); + void (*free_tfm)(void *); + int (*has_algo)(const char *); + int (*setkey)(void *, const u8 *, unsigned int); + int (*setauthsize)(void *, unsigned int); + int (*encrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + int (*decrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); + unsigned int (*ivsize)(void *); + unsigned int (*statesize)(void *); + u32 (*get_flags)(void *); + struct module *owner; + char name[14]; +}; + +struct crypto_lskcipher { + struct crypto_tfm base; +}; + +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *, const u8 *, unsigned int); + int (*encrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*decrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); + int (*init)(struct crypto_lskcipher *); + void (*exit)(struct crypto_lskcipher *); + long: 32; + struct skcipher_alg_common co; +}; + +struct acomp_alg { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + int (*init)(struct crypto_acomp *); + void (*exit)(struct crypto_acomp *); + unsigned int reqsize; + union { + struct { + struct crypto_alg base; + }; + struct comp_alg_common calg; + }; +}; + +enum OID { + OID_id_dsa_with_sha1 = 0, + OID_id_dsa = 1, + OID_id_ecPublicKey = 2, + OID_id_prime192v1 = 3, + OID_id_prime256v1 = 4, + OID_id_ecdsa_with_sha1 = 5, + OID_id_ecdsa_with_sha224 = 6, + OID_id_ecdsa_with_sha256 = 7, + OID_id_ecdsa_with_sha384 = 8, + OID_id_ecdsa_with_sha512 = 9, + OID_rsaEncryption = 10, + OID_sha1WithRSAEncryption = 11, + OID_sha256WithRSAEncryption = 12, + OID_sha384WithRSAEncryption = 13, + OID_sha512WithRSAEncryption = 14, + OID_sha224WithRSAEncryption = 15, + OID_data = 16, + OID_signed_data = 17, + OID_email_address = 18, + OID_contentType = 19, + OID_messageDigest = 20, + OID_signingTime = 21, + OID_smimeCapabilites = 22, + OID_smimeAuthenticatedAttrs = 23, + OID_mskrb5 = 24, + OID_krb5 = 25, + OID_krb5u2u = 26, + OID_msIndirectData = 27, + OID_msStatementType = 28, + OID_msSpOpusInfo = 29, + OID_msPeImageDataObjId = 30, + OID_msIndividualSPKeyPurpose = 31, + OID_msOutlookExpress = 32, + OID_ntlmssp = 33, + OID_negoex = 34, + OID_spnego = 35, + OID_IAKerb = 36, + OID_PKU2U = 37, + OID_Scram = 38, + OID_certAuthInfoAccess = 39, + OID_sha1 = 40, + OID_id_ansip384r1 = 41, + OID_id_ansip521r1 = 42, + OID_sha256 = 43, + OID_sha384 = 44, + OID_sha512 = 45, + OID_sha224 = 46, + OID_commonName = 47, + OID_surname = 48, + OID_countryName = 49, + OID_locality = 50, + OID_stateOrProvinceName = 51, + OID_organizationName = 52, + OID_organizationUnitName = 53, + OID_title = 54, + OID_description = 55, + OID_name = 56, + OID_givenName = 57, + OID_initials = 58, + OID_generationalQualifier = 59, + OID_subjectKeyIdentifier = 60, + OID_keyUsage = 61, + OID_subjectAltName = 62, + OID_issuerAltName = 63, + OID_basicConstraints = 64, + OID_crlDistributionPoints = 65, + OID_certPolicies = 66, + OID_authorityKeyIdentifier = 67, + OID_extKeyUsage = 68, + OID_NetlogonMechanism = 69, + OID_appleLocalKdcSupported = 70, + OID_gostCPSignA = 71, + OID_gostCPSignB = 72, + OID_gostCPSignC = 73, + OID_gost2012PKey256 = 74, + OID_gost2012PKey512 = 75, + OID_gost2012Digest256 = 76, + OID_gost2012Digest512 = 77, + OID_gost2012Signature256 = 78, + OID_gost2012Signature512 = 79, + OID_gostTC26Sign256A = 80, + OID_gostTC26Sign256B = 81, + OID_gostTC26Sign256C = 82, + OID_gostTC26Sign256D = 83, + OID_gostTC26Sign512A = 84, + OID_gostTC26Sign512B = 85, + OID_gostTC26Sign512C = 86, + OID_sm2 = 87, + OID_sm3 = 88, + OID_SM2_with_SM3 = 89, + OID_sm3WithRSAEncryption = 90, + OID_TPMLoadableKey = 91, + OID_TPMImportableKey = 92, + OID_TPMSealedData = 93, + OID_sha3_256 = 94, + OID_sha3_384 = 95, + OID_sha3_512 = 96, + OID_id_ecdsa_with_sha3_256 = 97, + OID_id_ecdsa_with_sha3_384 = 98, + OID_id_ecdsa_with_sha3_512 = 99, + OID_id_rsassa_pkcs1_v1_5_with_sha3_256 = 100, + OID_id_rsassa_pkcs1_v1_5_with_sha3_384 = 101, + OID_id_rsassa_pkcs1_v1_5_with_sha3_512 = 102, + OID__NR = 103, +}; + +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; + long unsigned int key_eflags; +}; + +struct asymmetric_key_id; + +struct public_key_signature { + struct asymmetric_key_id *auth_ids[3]; + u8 *s; + u8 *digest; + u32 s_size; + u32 digest_size; + const char *pkey_algo; + const char *hash_algo; + const char *encoding; +}; + +struct asymmetric_key_id { + short unsigned int len; + unsigned char data[0]; +}; + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; + struct public_key *pub; + struct public_key_signature *sig; + char *issuer; + char *subject; + struct asymmetric_key_id *id; + struct asymmetric_key_id *skid; + time64_t valid_from; + time64_t valid_to; + const void *tbs; + unsigned int tbs_size; + unsigned int raw_sig_size; + const void *raw_sig; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_subject; + unsigned int raw_subject_size; + unsigned int raw_skid_size; + const void *raw_skid; + unsigned int index; + bool seen; + bool verified; + bool self_signed; + bool unsupported_sig; + bool blacklisted; + long: 32; +}; + +struct pkcs7_signed_info { + struct pkcs7_signed_info *next; + struct x509_certificate *signer; + unsigned int index; + bool unsupported_crypto; + bool blacklisted; + const void *msgdigest; + unsigned int msgdigest_len; + unsigned int authattrs_len; + const void *authattrs; + long unsigned int aa_set; + long: 32; + time64_t signing_time; + struct public_key_signature *sig; + long: 32; +}; + +struct pkcs7_message { + struct x509_certificate *certs; + struct x509_certificate *crl; + struct pkcs7_signed_info *signed_infos; + u8 version; + bool have_authattrs; + enum OID data_type; + size_t data_len; + size_t data_hdrlen; + const void *data; +}; + +struct pkcs7_parse_context { + struct pkcs7_message *msg; + struct pkcs7_signed_info *sinfo; + struct pkcs7_signed_info **ppsinfo; + struct x509_certificate *certs; + struct x509_certificate **ppcerts; + long unsigned int data; + enum OID last_oid; + unsigned int x509_index; + unsigned int sinfo_index; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_skid; + unsigned int raw_skid_size; + bool expect_skid; +}; + +struct blk_queue_stats { + struct list_head callbacks; + spinlock_t lock; + int accounting; +}; + +struct blk_stat_callback { + struct list_head list; + struct timer_list timer; + struct blk_rq_stat *cpu_stat; + int (*bucket_fn)(const struct request *); + unsigned int buckets; + struct blk_rq_stat *stat; + void (*timer_fn)(struct blk_stat_callback *); + void *data; + struct callback_head rcu; +}; + +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __u64 data; +}; + +typedef struct io_wq_work *free_work_fn(struct io_wq_work *); + +typedef void io_wq_work_fn(struct io_wq_work *); + +struct io_wq_data { + struct io_wq_hash *hash; + struct task_struct *task; + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +struct io_uring_sync_cancel_reg { + __u64 addr; + __s32 fd; + __u32 flags; + struct __kernel_timespec timeout; + __u8 opcode; + __u8 pad[7]; + __u64 pad2[3]; +}; + +struct io_cancel { + struct file *file; + long: 32; + u64 addr; + u32 flags; + s32 fd; + u8 opcode; + long: 32; +}; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +struct component_master_ops { + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct aggregate_device; + +struct component { + struct list_head node; + struct aggregate_device *adev; + bool bound; + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct aggregate_device { + struct list_head node; + bool bound; + const struct component_master_ops *ops; + struct device *parent; + struct component_match *match; +}; + +enum mac_version { + RTL_GIGA_MAC_VER_02 = 0, + RTL_GIGA_MAC_VER_03 = 1, + RTL_GIGA_MAC_VER_04 = 2, + RTL_GIGA_MAC_VER_05 = 3, + RTL_GIGA_MAC_VER_06 = 4, + RTL_GIGA_MAC_VER_07 = 5, + RTL_GIGA_MAC_VER_08 = 6, + RTL_GIGA_MAC_VER_09 = 7, + RTL_GIGA_MAC_VER_10 = 8, + RTL_GIGA_MAC_VER_11 = 9, + RTL_GIGA_MAC_VER_14 = 10, + RTL_GIGA_MAC_VER_17 = 11, + RTL_GIGA_MAC_VER_18 = 12, + RTL_GIGA_MAC_VER_19 = 13, + RTL_GIGA_MAC_VER_20 = 14, + RTL_GIGA_MAC_VER_21 = 15, + RTL_GIGA_MAC_VER_22 = 16, + RTL_GIGA_MAC_VER_23 = 17, + RTL_GIGA_MAC_VER_24 = 18, + RTL_GIGA_MAC_VER_25 = 19, + RTL_GIGA_MAC_VER_26 = 20, + RTL_GIGA_MAC_VER_28 = 21, + RTL_GIGA_MAC_VER_29 = 22, + RTL_GIGA_MAC_VER_30 = 23, + RTL_GIGA_MAC_VER_31 = 24, + RTL_GIGA_MAC_VER_32 = 25, + RTL_GIGA_MAC_VER_33 = 26, + RTL_GIGA_MAC_VER_34 = 27, + RTL_GIGA_MAC_VER_35 = 28, + RTL_GIGA_MAC_VER_36 = 29, + RTL_GIGA_MAC_VER_37 = 30, + RTL_GIGA_MAC_VER_38 = 31, + RTL_GIGA_MAC_VER_39 = 32, + RTL_GIGA_MAC_VER_40 = 33, + RTL_GIGA_MAC_VER_42 = 34, + RTL_GIGA_MAC_VER_43 = 35, + RTL_GIGA_MAC_VER_44 = 36, + RTL_GIGA_MAC_VER_46 = 37, + RTL_GIGA_MAC_VER_48 = 38, + RTL_GIGA_MAC_VER_51 = 39, + RTL_GIGA_MAC_VER_52 = 40, + RTL_GIGA_MAC_VER_53 = 41, + RTL_GIGA_MAC_VER_61 = 42, + RTL_GIGA_MAC_VER_63 = 43, + RTL_GIGA_MAC_VER_64 = 44, + RTL_GIGA_MAC_VER_65 = 45, + RTL_GIGA_MAC_VER_66 = 46, + RTL_GIGA_MAC_NONE = 47, +}; + +struct rtl8169_private; + +typedef void (*rtl_fw_write_t)(struct rtl8169_private *, int, int); + +enum rtl_dash_type { + RTL_DASH_NONE = 0, + RTL_DASH_DP = 1, + RTL_DASH_EP = 2, +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_tc_offsets { + bool inited; + long: 32; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +struct r8169_led_classdev; + +struct TxDesc; + +struct RxDesc; + +struct rtl8169_counters; + +struct rtl_fw___2; + +struct rtl8169_private { + void *mmio_addr; + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; + u32 cur_tx; + u32 dirty_tx; + struct TxDesc *TxDescArray; + struct RxDesc *RxDescArray; + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[256]; + struct ring_info tx_skb[256]; + u16 cp_cmd; + u16 tx_lpi_timer; + u32 irq_mask; + int irq; + struct clk *clk; + struct { + long unsigned int flags[1]; + struct work_struct work; + } wk; + raw_spinlock_t mac_ocp_lock; + struct mutex led_lock; + unsigned int supports_gmii: 1; + unsigned int aspm_manageable: 1; + unsigned int dash_enabled: 1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + long: 32; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + const char *fw_name; + struct rtl_fw___2 *rtl_fw; + struct r8169_led_classdev *leds; + u32 ocp_base; + long: 32; +}; + +typedef int (*rtl_fw_read_t)(struct rtl8169_private *, int); + +struct rtl_fw_phy_action { + __le32 *code; + size_t size; +}; + +struct rtl_fw___2 { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + char version[32]; + struct rtl_fw_phy_action phy_action; +}; + +enum rtl_registers { + MAC0 = 0, + MAC4 = 4, + MAR0 = 8, + CounterAddrLow = 16, + CounterAddrHigh = 20, + TxDescStartAddrLow = 32, + TxDescStartAddrHigh = 36, + TxHDescStartAddrLow = 40, + TxHDescStartAddrHigh = 44, + FLASH = 48, + ERSR = 54, + ChipCmd = 55, + TxPoll = 56, + IntrMask = 60, + IntrStatus = 62, + TxConfig = 64, + RxConfig = 68, + Cfg9346 = 80, + Config0 = 81, + Config1 = 82, + Config2 = 83, + Config3 = 84, + Config4 = 85, + Config5 = 86, + PHYAR = 96, + PHYstatus = 108, + RxMaxSize = 218, + CPlusCmd = 224, + IntrMitigate = 226, + RxDescAddrLow = 228, + RxDescAddrHigh = 232, + EarlyTxThres = 236, + MaxTxPacketSize = 236, + FuncEvent = 240, + FuncEventMask = 244, + FuncPresetState = 248, + IBCR0 = 248, + IBCR2 = 249, + IBIMR0 = 250, + IBISR0 = 251, + FuncForceEvent = 252, +}; + +enum rtl8168_8101_registers { + CSIDR = 100, + CSIAR = 104, + PMCH = 111, + EPHYAR = 128, + DLLPR = 208, + DBG_REG = 209, + TWSI = 210, + MCU = 211, + EFUSEAR = 220, + MISC_1 = 242, +}; + +enum rtl8168_registers { + LED_CTRL = 24, + LED_FREQ = 26, + EEE_LED = 27, + ERIDR = 112, + ERIAR = 116, + EPHY_RXER_NUM = 124, + OCPDR = 176, + OCPAR = 180, + GPHY_OCP = 184, + RDSAR1 = 208, + MISC = 240, +}; + +enum rtl8125_registers { + LEDSEL0 = 24, + INT_CFG0_8125 = 52, + IntrMask_8125 = 56, + IntrStatus_8125 = 60, + INT_CFG1_8125 = 122, + LEDSEL2 = 132, + LEDSEL1 = 134, + TxPoll_8125 = 144, + LEDSEL3 = 150, + MAC0_BKP = 6624, + RSS_CTRL_8125 = 17664, + Q_NUM_CTRL_8125 = 18432, + EEE_TXIDLE_TIMER_8125 = 24648, +}; + +enum rtl_register_content___2 { + SYSErr = 32768, + PCSTimeout = 16384, + SWInt = 256, + TxDescUnavail = 128, + RxFIFOOver = 64, + LinkChg = 32, + RxOverflow = 16, + TxErr = 8, + TxOK = 4, + RxErr = 2, + RxOK = 1, + RxRWT = 4194304, + RxRES = 2097152, + RxRUNT = 1048576, + RxCRC = 524288, + StopReq = 128, + CmdReset = 16, + CmdRxEnb = 8, + CmdTxEnb = 4, + RxBufEmpty = 1, + HPQ = 128, + NPQ = 64, + FSWInt = 1, + Cfg9346_Lock = 0, + Cfg9346_Unlock = 192, + AcceptErr = 32, + AcceptRunt = 16, + AcceptBroadcast = 8, + AcceptMulticast = 4, + AcceptMyPhys = 2, + AcceptAllPhys = 1, + TxInterFrameGapShift = 24, + TxDMAShift = 8, + LEDS1 = 128, + LEDS0 = 64, + Speed_down = 16, + MEMMAP = 8, + IOMAP = 4, + VPD = 2, + PMEnable = 1, + ClkReqEn = 128, + MSIEnable = 32, + PCI_Clock_66MHz = 1, + PCI_Clock_33MHz = 0, + MagicPacket = 32, + LinkUp = 16, + Jumbo_En0 = 4, + Rdy_to_L23 = 2, + Beacon_en = 1, + Jumbo_En1 = 2, + BWF = 64, + MWF = 32, + UWF = 16, + Spi_en = 8, + LanWake = 2, + PMEStatus = 1, + ASPM_en = 1, + EnableBist = 32768, + Mac_dbgo_oe = 16384, + EnAnaPLL = 16384, + Normal_mode = 8192, + Force_half_dup = 4096, + Force_rxflow_en = 2048, + Force_txflow_en = 1024, + Cxpl_dbg_sel = 512, + ASF = 256, + PktCntrDisable = 128, + Mac_dbgo_sel = 28, + RxVlan = 64, + RxChkSum = 32, + PCIDAC = 16, + PCIMulRW = 8, + TBI_Enable = 128, + TxFlowCtrl = 64, + RxFlowCtrl = 32, + _1000bpsF = 16, + _100bps___2 = 8, + _10bps___2 = 4, + LinkStatus = 2, + FullDup = 1, + CounterReset = 1, + CounterDump = 8, + MagicPacket_v2 = 65536, +}; + +enum rtl_desc_bit { + DescOwn = -2147483648, + RingEnd = 1073741824, + FirstFrag = 536870912, + LastFrag = 268435456, +}; + +enum rtl_tx_desc_bit { + TD_LSO = 134217728, + TxVlanTag = 131072, +}; + +enum rtl_tx_desc_bit_0 { + TD0_TCP_CS = 65536, + TD0_UDP_CS = 131072, + TD0_IP_CS = 262144, +}; + +enum rtl_tx_desc_bit_1 { + TD1_GTSENV4 = 67108864, + TD1_GTSENV6 = 33554432, + TD1_IPv6_CS = 268435456, + TD1_IPv4_CS = 536870912, + TD1_TCP_CS = 1073741824, + TD1_UDP_CS = -2147483648, +}; + +enum rtl_rx_desc_bit { + PID1 = 262144, + PID0 = 131072, + IPFail = 65536, + UDPFail = 32768, + TCPFail = 16384, + RxVlanTag = 65536, +}; + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; + __le64 tx_octets; + __le64 rx_octets; + __le64 rx_multicast64; + __le64 tx_unicast64; + __le64 tx_broadcast64; + __le64 tx_multicast64; + __le32 tx_pause_on; + __le32 tx_pause_off; + __le32 tx_pause_all; + __le32 tx_deferred; + __le32 tx_late_collision; + __le32 tx_all_collision; + __le32 tx_aborted32; + __le32 align_errors32; + __le32 rx_frame_too_long; + __le32 rx_runt; + __le32 rx_pause_on; + __le32 rx_pause_off; + __le32 rx_pause_all; + __le32 rx_unknown_opcode; + __le32 rx_mac_error; + __le32 tx_underrun32; + __le32 rx_mac_missed; + __le32 rx_tcam_dropped; + __le32 tdu; + __le32 rdu; +}; + +enum rtl_flag { + RTL_FLAG_TASK_RESET_PENDING = 0, + RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE = 1, + RTL_FLAG_TASK_TX_TIMEOUT = 2, + RTL_FLAG_MAX = 3, +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *); + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; +}; + +enum mce_kbd_mode { + MCIR2_MODE_KEYBOARD = 0, + MCIR2_MODE_MOUSE = 1, + MCIR2_MODE_UNKNOWN = 2, +}; + +enum mce_kbd_state { + STATE_INACTIVE___6 = 0, + STATE_HEADER_BIT_START___2 = 1, + STATE_HEADER_BIT_END___2 = 2, + STATE_BODY_BIT_START___2 = 3, + STATE_BODY_BIT_END___2 = 4, + STATE_FINISHED___3 = 5, +}; + +struct raid10_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + sector_t head_position; + int recovery_disabled; + long: 32; +}; + +struct geom { + int raid_disks; + int near_copies; + int far_copies; + int far_offset; + sector_t stride; + int far_set_size; + int chunk_shift; + sector_t chunk_mask; +}; + +struct r10conf { + struct mddev *mddev; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new; + struct raid10_info *mirrors_old; + spinlock_t device_lock; + long: 32; + struct geom prev; + struct geom geo; + int copies; + long: 32; + sector_t dev_sectors; + sector_t reshape_progress; + sector_t reshape_safe; + long unsigned int reshape_checkpoint; + long: 32; + sector_t offset_diff; + struct list_head retry_list; + struct list_head bio_end_io_list; + struct bio_list pending_bio_list; + seqlock_t resync_lock; + atomic_t nr_pending; + int nr_waiting; + int nr_queued; + int barrier; + int array_freeze_pending; + long: 32; + sector_t next_resync; + int fullsync; + int have_replacement; + wait_queue_head_t wait_barrier; + mempool_t r10bio_pool; + mempool_t r10buf_pool; + struct page *tmppage; + struct bio_set bio_split; + struct md_thread *thread; + long: 32; + sector_t cluster_sync_low; + sector_t cluster_sync_high; +}; + +struct r10dev { + struct bio *bio; + union { + struct bio *repl_bio; + struct md_rdev *rdev; + }; + sector_t addr; + int devnum; + long: 32; +}; + +struct r10bio { + atomic_t remaining; + long: 32; + sector_t sector; + int sectors; + long unsigned int state; + struct mddev *mddev; + struct bio *master_bio; + int read_slot; + struct list_head retry_list; + long: 32; + struct r10dev devs[0]; +}; + +enum r10bio_state { + R10BIO_Uptodate = 0, + R10BIO_IsSync = 1, + R10BIO_IsRecover = 2, + R10BIO_IsReshape = 3, + R10BIO_Degraded = 4, + R10BIO_ReadError = 5, + R10BIO_MadeGood = 6, + R10BIO_WriteError = 7, + R10BIO_Previous = 8, + R10BIO_FailFast = 9, + R10BIO_Discard = 10, +}; + +struct strip_zone { + sector_t zone_end; + sector_t dev_start; + int nb_dev; + int disk_shift; +}; + +enum r0layout { + RAID0_ORIG_LAYOUT = 1, + RAID0_ALT_MULTIZONE_LAYOUT = 2, +}; + +struct r0conf { + struct strip_zone *strip_zone; + struct md_rdev **devlist; + int nr_strip_zones; + enum r0layout layout; +}; + +enum geo_type { + geo_new = 0, + geo_old = 1, + geo_start = 2, +}; + +struct rsync_pages; + +enum { + BPF_F_RECOMPUTE_CSUM = 1, + BPF_F_INVALIDATE_HASH = 2, +}; + +enum { + BPF_F_HDR_FIELD_MASK = 15, +}; + +enum { + BPF_F_PSEUDO_HDR = 16, + BPF_F_MARK_MANGLED_0 = 32, + BPF_F_MARK_ENFORCE = 64, +}; + +enum { + BPF_F_TUNINFO_IPV6 = 1, +}; + +enum { + BPF_F_ZERO_CSUM_TX = 2, + BPF_F_DONT_FRAGMENT = 4, + BPF_F_SEQ_NUMBER = 8, + BPF_F_NO_TUNNEL_KEY = 16, +}; + +enum { + BPF_F_TUNINFO_FLAGS = 16, +}; + +enum { + BPF_F_INDEX_MASK = 4294967295ULL, + BPF_F_CURRENT_CPU = 4294967295ULL, + BPF_F_CTXLEN_MASK = 4503595332403200ULL, +}; + +enum { + BPF_CSUM_LEVEL_QUERY = 0, + BPF_CSUM_LEVEL_INC = 1, + BPF_CSUM_LEVEL_DEC = 2, + BPF_CSUM_LEVEL_RESET = 3, +}; + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = 1, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128, + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256, +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + +enum { + BPF_SK_LOOKUP_F_REPLACE = 1, + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, +}; + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET = 0, + BPF_ADJ_ROOM_MAC = 1, +}; + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC = 0, + BPF_HDR_START_NET = 1, +}; + +enum { + BPF_SKB_TSTAMP_UNSPEC = 0, + BPF_SKB_TSTAMP_DELIVERY_MONO = 1, + BPF_SKB_CLOCK_REALTIME = 0, + BPF_SKB_CLOCK_MONOTONIC = 1, + BPF_SKB_CLOCK_TAI = 2, +}; + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + union { + __u16 tunnel_ext; + __be16 tunnel_flags; + }; + __u32 tunnel_label; + union { + __u32 local_ipv4; + __u32 local_ipv6[4]; + }; +}; + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + __u64 bytes_acked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +struct bpf_xdp_sock { + __u32 queue_id; +}; + +struct sk_msg_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + union { + struct bpf_sock *sk; + }; +}; + +struct sk_reuseport_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 len; + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; + long: 32; + union { + struct bpf_sock *sk; + }; + union { + struct bpf_sock *migrating_sk; + }; +}; + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + __u32 user_ip6[4]; + __u32 user_port; + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + __u32 msg_src_ip6[4]; + long: 32; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { + struct bpf_sock *sk; + }; + union { + void *skb_data; + }; + union { + void *skb_data_end; + }; + __u32 skb_len; + __u32 skb_tcp_flags; + __u64 skb_hwtstamp; +}; + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = 1, + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, + BPF_SOCK_OPS_STATE_CB_FLAG = 4, + BPF_SOCK_OPS_RTT_CB_FLAG = 8, + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, + BPF_SOCK_OPS_ALL_CB_FLAGS = 127, +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, + TCP_BPF_DELACK_MAX = 1003, + TCP_BPF_RTO_MIN = 1004, + TCP_BPF_SYN = 1005, + TCP_BPF_SYN_IP = 1006, + TCP_BPF_SYN_MAC = 1007, + TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = 1, +}; + +enum { + BPF_FIB_LOOKUP_DIRECT = 1, + BPF_FIB_LOOKUP_OUTPUT = 2, + BPF_FIB_LOOKUP_SKIP_NEIGH = 4, + BPF_FIB_LOOKUP_TBID = 8, + BPF_FIB_LOOKUP_SRC = 16, + BPF_FIB_LOOKUP_MARK = 32, +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS = 0, + BPF_FIB_LKUP_RET_BLACKHOLE = 1, + BPF_FIB_LKUP_RET_UNREACHABLE = 2, + BPF_FIB_LKUP_RET_PROHIBIT = 3, + BPF_FIB_LKUP_RET_NOT_FWDED = 4, + BPF_FIB_LKUP_RET_FWD_DISABLED = 5, + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, + BPF_FIB_LKUP_RET_NO_NEIGH = 7, + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9, +}; + +struct bpf_fib_lookup { + __u8 family; + __u8 l4_protocol; + __be16 sport; + __be16 dport; + union { + __u16 tot_len; + __u16 mtu_result; + }; + __u32 ifindex; + union { + __u8 tos; + __be32 flowinfo; + __u32 rt_metric; + }; + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + union { + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + __u32 tbid; + }; + union { + struct { + __u32 mark; + }; + struct { + __u8 smac[6]; + __u8 dmac[6]; + }; + }; +}; + +struct bpf_redir_neigh { + __u32 nh_family; + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; + }; +}; + +enum bpf_check_mtu_flags { + BPF_MTU_CHK_SEGS = 1, +}; + +enum bpf_check_mtu_ret { + BPF_MTU_CHK_RET_SUCCESS = 0, + BPF_MTU_CHK_RET_FRAG_NEEDED = 1, + BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, +}; + +struct bpf_dynptr { + __u64 __opaque[2]; +}; + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + void *rw_image; + u32 image_off; + struct bpf_ksym ksym; + struct static_call_key *sc_key; + void *sc_tramp; +}; + +typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); + +struct bpf_empty_prog_array { + struct bpf_prog_array hdr; + struct bpf_prog *null_prog; + long: 32; +}; + +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + struct sock *migrating_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + u64 tmp_reg; + void *t_ctx; + u32 uaddrlen; +}; + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct ipv6_bpf_stub { + int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); + struct sock * (*udp6_lib_lookup)(const struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); + int (*ipv6_setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*ipv6_getsockopt)(struct sock *, int, int, sockptr_t, sockptr_t); + int (*ipv6_dev_get_saddr)(struct net *, const struct net_device *, const struct in6_addr *, unsigned int, struct in6_addr *); +}; + +struct mptcp_sock {}; + +struct bpf_tcp_req_attrs { + u32 rcv_tsval; + u32 rcv_tsecr; + u16 mss; + u8 rcv_wscale; + u8 snd_wscale; + u8 ecn_ok; + u8 wscale_ok; + u8 sack_ok; + u8 tstamp_ok; + u8 usec_ts_ok; + u8 reserved[3]; +}; + +struct _strp_msg { + struct strp_msg strp; + int accum_len; +}; + +struct tls_msg { + u8 control; +}; + +struct sk_skb_cb { + unsigned char data[20]; + unsigned char pad[4]; + struct _strp_msg strp; + struct tls_msg tls; + u64 temp_reg; +}; + +struct xdp_sock { + struct sock sk; + struct xsk_queue *rx; + struct net_device *dev; + struct xdp_umem *umem; + struct list_head flush_node; + struct xsk_buff_pool *pool; + u16 queue_id; + bool zc; + bool sg; + enum { + XSK_READY = 0, + XSK_BOUND = 1, + XSK_UNBOUND = 2, + } state; + struct xsk_queue *tx; + struct list_head tx_list; + u32 tx_budget_spent; + spinlock_t rx_lock; + long: 32; + u64 rx_dropped; + u64 rx_queue_full; + struct sk_buff *skb; + struct list_head map_list; + spinlock_t map_list_lock; + struct mutex mutex; + struct xsk_queue *fq_tmp; + struct xsk_queue *cq_tmp; + long: 32; + long: 32; + long: 32; +}; + +struct tls_strparser { + struct sock *sk; + u32 mark: 8; + u32 stopped: 1; + u32 copy_mode: 1; + u32 mixed_decrypted: 1; + bool msg_ready; + struct strp_msg stm; + struct sk_buff *anchor; + struct work_struct work; +}; + +struct tls_sw_context_rx { + struct crypto_aead *aead_recv; + struct crypto_wait async_wait; + struct sk_buff_head rx_list; + void (*saved_data_ready)(struct sock *); + u8 reader_present; + u8 async_capable: 1; + u8 zc_capable: 1; + u8 reader_contended: 1; + struct tls_strparser strp; + atomic_t decrypt_pending; + struct sk_buff_head async_hold; + struct wait_queue_head wq; +}; + +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; + +typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); + +typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); + +typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); + +typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); + +typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); + +typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); + +enum { + BPF_F_NEIGH = 65536, + BPF_F_PEER = 131072, + BPF_F_NEXTHOP = 262144, +}; + +typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_redirect)(u32, u64); + +typedef u64 (*btf_bpf_redirect_peer)(u32, u64); + +typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); + +typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_get_cgroup_classid_curr)(); + +typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); + +typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); + +typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); + +typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_xdp_get_buff_len)(struct xdp_buff *); + +typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_load_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_store_bytes)(struct xdp_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); + +typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); + +typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); + +typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); + +typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); + +typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); + +typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); + +typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sk_msg)(struct sk_msg *); + +typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); + +typedef u64 (*btf_bpf_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_setsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_unlocked_sk_getsockopt)(struct sock *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); + +typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); + +typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tc_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_release)(struct sock *); + +typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); + +typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); + +typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); + +typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tstamp)(struct sk_buff *, u64, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *, struct tcphdr *); + +typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *); + +typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); + +typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_unix_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_mptcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_sock_from_file)(struct file *); + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC = 0, + ETHTOOL_A_STATS_GRP_PAD = 1, + ETHTOOL_A_STATS_GRP_ID = 2, + ETHTOOL_A_STATS_GRP_SS_ID = 3, + ETHTOOL_A_STATS_GRP_STAT = 4, + ETHTOOL_A_STATS_GRP_HIST_RX = 5, + ETHTOOL_A_STATS_GRP_HIST_TX = 6, + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, + ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, + ETHTOOL_A_STATS_GRP_HIST_VAL = 9, + __ETHTOOL_A_STATS_GRP_CNT = 10, + ETHTOOL_A_STATS_GRP_MAX = 9, +}; + +struct stats_req_info { + struct ethnl_req_info base; + long unsigned int stat_mask[1]; + enum ethtool_mac_stats_src src; +}; + +struct stats_reply_data { + struct ethnl_reply_data base; + long: 32; + union { + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + }; + struct { + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + } stats; + }; + const struct ethtool_rmon_hist_range *rmon_ranges; + long: 32; +}; + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; + __be16 reserved; + __be32 spi; + __be32 seq_no; + __u8 auth_data[0]; +}; + +struct ip_esp_hdr { + __be32 spi; + __be32 seq_no; + __u8 enc_data[0]; +}; + +struct arppayload { + unsigned char mac_src[6]; + unsigned char ip_src[4]; + unsigned char mac_dst[6]; + unsigned char ip_dst[4]; +}; + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long int tv_nsec; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; + +struct dmabuf_cmsg { + __u64 frag_offset; + __u32 frag_size; + __u32 frag_token; + __u32 dmabuf_id; + __u32 flags; +}; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +struct dmabuf_genpool_chunk_owner { + long unsigned int base_virtual; + dma_addr_t base_dma_addr; + struct net_iov *niovs; + size_t num_niovs; + struct net_devmem_dmabuf_binding *binding; +}; + +struct mmpin { + struct user_struct *user; + unsigned int num_pg; +}; + +struct ubuf_info_msgzc { + struct ubuf_info ubuf; + union { + struct { + long unsigned int desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy: 1; + u32 bytelen; + }; + }; + struct mmpin mmp; +}; + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT = 2, + BPF_TCP_SYN_RECV = 3, + BPF_TCP_FIN_WAIT1 = 4, + BPF_TCP_FIN_WAIT2 = 5, + BPF_TCP_TIME_WAIT = 6, + BPF_TCP_CLOSE = 7, + BPF_TCP_CLOSE_WAIT = 8, + BPF_TCP_LAST_ACK = 9, + BPF_TCP_LISTEN = 10, + BPF_TCP_CLOSING = 11, + BPF_TCP_NEW_SYN_RECV = 12, + BPF_TCP_BOUND_INACTIVE = 13, + BPF_TCP_MAX_STATES = 14, +}; + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; + +struct tcp_bbr_info { + __u32 bbr_bw_lo; + __u32 bbr_bw_hi; + __u32 bbr_min_rtt; + __u32 bbr_pacing_gain; + __u32 bbr_cwnd_gain; +}; + +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; + struct tcp_bbr_info bbr; +}; + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +struct tcp_repair_window { + __u32 snd_wl1; + __u32 snd_wnd; + __u32 max_window; + __u32 rcv_wnd; + __u32 rcv_wup; +}; + +enum { + TCP_NO_QUEUE = 0, + TCP_RECV_QUEUE = 1, + TCP_SEND_QUEUE = 2, + TCP_QUEUES_NR = 3, +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale: 4; + __u8 tcpi_rcv_wscale: 4; + __u8 tcpi_delivery_rate_app_limited: 1; + __u8 tcpi_fastopen_client_fail: 2; + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + __u32 tcpi_total_retrans; + __u64 tcpi_pacing_rate; + __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; + __u64 tcpi_bytes_received; + __u32 tcpi_segs_out; + __u32 tcpi_segs_in; + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; + __u32 tcpi_data_segs_out; + __u64 tcpi_delivery_rate; + __u64 tcpi_busy_time; + __u64 tcpi_rwnd_limited; + __u64 tcpi_sndbuf_limited; + __u32 tcpi_delivered; + __u32 tcpi_delivered_ce; + __u64 tcpi_bytes_sent; + __u64 tcpi_bytes_retrans; + __u32 tcpi_dsack_dups; + __u32 tcpi_reord_seen; + __u32 tcpi_rcv_ooopack; + __u32 tcpi_snd_wnd; + __u32 tcpi_rcv_wnd; + __u32 tcpi_rehash; + __u16 tcpi_total_rto; + __u16 tcpi_total_rto_recoveries; + __u32 tcpi_total_rto_time; +}; + +enum { + TCP_NLA_PAD = 0, + TCP_NLA_BUSY = 1, + TCP_NLA_RWND_LIMITED = 2, + TCP_NLA_SNDBUF_LIMITED = 3, + TCP_NLA_DATA_SEGS_OUT = 4, + TCP_NLA_TOTAL_RETRANS = 5, + TCP_NLA_PACING_RATE = 6, + TCP_NLA_DELIVERY_RATE = 7, + TCP_NLA_SND_CWND = 8, + TCP_NLA_REORDERING = 9, + TCP_NLA_MIN_RTT = 10, + TCP_NLA_RECUR_RETRANS = 11, + TCP_NLA_DELIVERY_RATE_APP_LMT = 12, + TCP_NLA_SNDQ_SIZE = 13, + TCP_NLA_CA_STATE = 14, + TCP_NLA_SND_SSTHRESH = 15, + TCP_NLA_DELIVERED = 16, + TCP_NLA_DELIVERED_CE = 17, + TCP_NLA_BYTES_SENT = 18, + TCP_NLA_BYTES_RETRANS = 19, + TCP_NLA_DSACK_DUPS = 20, + TCP_NLA_REORD_SEEN = 21, + TCP_NLA_SRTT = 22, + TCP_NLA_TIMEOUT_REHASH = 23, + TCP_NLA_BYTES_NOTSENT = 24, + TCP_NLA_EDT = 25, + TCP_NLA_TTL = 26, + TCP_NLA_REHASH = 27, +}; + +struct tcp_zerocopy_receive { + __u64 address; + __u32 length; + __u32 recv_skip_hint; + __u32 inq; + __s32 err; + __u64 copybuf_address; + __s32 copybuf_len; + __u32 flags; + __u64 msg_control; + __u64 msg_controllen; + __u32 msg_flags; + __u32 reserved; +}; + +enum tcp_skb_cb_sacked_flags { + TCPCB_SACKED_ACKED = 1, + TCPCB_SACKED_RETRANS = 2, + TCPCB_LOST = 4, + TCPCB_TAGBITS = 7, + TCPCB_REPAIRED = 16, + TCPCB_EVER_RETRANS = 128, + TCPCB_RETRANS = 146, +}; + +enum tcp_chrono { + TCP_CHRONO_UNSPEC = 0, + TCP_CHRONO_BUSY = 1, + TCP_CHRONO_RWND_LIMITED = 2, + TCP_CHRONO_SNDBUF_LIMITED = 3, + __TCP_CHRONO_MAX = 4, +}; + +enum { + TCP_CMSG_INQ = 1, + TCP_CMSG_TS = 2, +}; + +struct tcp_splice_state { + struct pipe_inode_info *pipe; + size_t len; + unsigned int flags; +}; + +struct tcp_xa_pool { + u8 max; + u8 idx; + __u32 tokens[17]; + netmem_ref netmems[17]; +}; + +enum ip_defrag_users { + IP_DEFRAG_LOCAL_DELIVER = 0, + IP_DEFRAG_CALL_RA_CHAIN = 1, + IP_DEFRAG_CONNTRACK_IN = 2, + __IP_DEFRAG_CONNTRACK_IN_END = 65537, + IP_DEFRAG_CONNTRACK_OUT = 65538, + __IP_DEFRAG_CONNTRACK_OUT_END = 131073, + IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, + IP_DEFRAG_VS_IN = 196610, + IP_DEFRAG_VS_OUT = 196611, + IP_DEFRAG_VS_FWD = 196612, + IP_DEFRAG_AF_PACKET = 196613, + IP_DEFRAG_MACVLAN = 196614, +}; + +struct __bridge_info { + __u64 designated_root; + __u64 bridge_id; + __u32 root_path_cost; + __u32 max_age; + __u32 hello_time; + __u32 forward_delay; + __u32 bridge_max_age; + __u32 bridge_hello_time; + __u32 bridge_forward_delay; + __u8 topology_change; + __u8 topology_change_detected; + __u8 root_port; + __u8 stp_enabled; + __u32 ageing_time; + __u32 gc_interval; + __u32 hello_timer_value; + __u32 tcn_timer_value; + __u32 topology_change_timer_value; + __u32 gc_timer_value; +}; + +struct __port_info { + __u64 designated_root; + __u64 designated_bridge; + __u16 port_id; + __u16 designated_port; + __u32 path_cost; + __u32 designated_cost; + __u8 state; + __u8 top_change_ack; + __u8 config_pending; + __u8 unused0; + __u32 message_age_timer_value; + __u32 forward_delay_timer_value; + __u32 hold_timer_value; + long: 32; +}; + +typedef void (*perf_irq_t)(struct pt_regs *); + +enum { + IRQCHIP_FWNODE_REAL = 0, + IRQCHIP_FWNODE_NAMED = 1, + IRQCHIP_FWNODE_NAMED_ID = 2, +}; + +struct irqchip_fwid { + struct fwnode_handle fwnode; + unsigned int type; + char *name; + phys_addr_t *pa; +}; + +struct trace_event_raw_csd_queue_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_raw_csd_function { + struct trace_entry ent; + void *func; + void *csd; + char __data[0]; +}; + +struct trace_event_data_offsets_csd_queue_cpu {}; + +struct trace_event_data_offsets_csd_function {}; + +typedef void (*btf_trace_csd_queue_cpu)(void *, const unsigned int, long unsigned int, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_entry)(void *, smp_call_func_t, call_single_data_t *); + +typedef void (*btf_trace_csd_function_exit)(void *, smp_call_func_t, call_single_data_t *); + +struct call_function_data { + call_single_data_t *csd; + cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; +}; + +struct smp_call_on_cpu_struct { + struct work_struct work; + struct completion done; + int (*func)(void *); + void *data; + int ret; + int cpu; +}; + +enum bpf_perf_event_type { + BPF_PERF_EVENT_UNSPEC = 0, + BPF_PERF_EVENT_UPROBE = 1, + BPF_PERF_EVENT_URETPROBE = 2, + BPF_PERF_EVENT_KPROBE = 3, + BPF_PERF_EVENT_KRETPROBE = 4, + BPF_PERF_EVENT_TRACEPOINT = 5, + BPF_PERF_EVENT_EVENT = 6, +}; + +enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +}; + +struct bpf_btf_info { + __u64 btf; + __u32 btf_size; + __u32 id; + __u64 name; + __u32 name_len; + __u32 kernel_btf; +}; + +struct bpf_spin_lock { + __u32 val; +}; + +struct bpf_tracing_link { + struct bpf_tramp_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; + long: 32; +}; + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX = 3, +}; + +enum bpf_audit { + BPF_AUDIT_LOAD = 0, + BPF_AUDIT_UNLOAD = 1, + BPF_AUDIT_MAX = 2, +}; + +struct bpf_prog_kstats { + u64 nsecs; + u64 cnt; + u64 misses; +}; + +struct bpf_perf_link { + struct bpf_link link; + struct file *perf_file; + long: 32; +}; + +typedef u64 (*btf_bpf_sys_bpf)(int, union bpf_attr *, u32); + +typedef u64 (*btf_bpf_sys_close)(u32); + +typedef u64 (*btf_bpf_kallsyms_lookup_name)(const char *, int, int, u64 *); + +struct audit_buffer; + +struct trace_event_raw_oom_score_adj_update { + struct trace_entry ent; + pid_t pid; + char comm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_reclaim_retry_zone { + struct trace_entry ent; + int node; + int zone_idx; + int order; + long unsigned int reclaimable; + long unsigned int available; + long unsigned int min_wmark; + int no_progress_loops; + bool wmark_check; + char __data[0]; +}; + +struct trace_event_raw_mark_victim { + struct trace_entry ent; + int pid; + u32 __data_loc_comm; + long unsigned int total_vm; + long unsigned int anon_rss; + long unsigned int file_rss; + long unsigned int shmem_rss; + uid_t uid; + long unsigned int pgtables; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_wake_reaper { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_start_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_finish_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_skip_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_compact_retry { + struct trace_entry ent; + int order; + int priority; + int result; + int retries; + int max_retries; + bool ret; + char __data[0]; +}; + +struct trace_event_data_offsets_oom_score_adj_update {}; + +struct trace_event_data_offsets_reclaim_retry_zone {}; + +struct trace_event_data_offsets_mark_victim { + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_wake_reaper {}; + +struct trace_event_data_offsets_start_task_reaping {}; + +struct trace_event_data_offsets_finish_task_reaping {}; + +struct trace_event_data_offsets_skip_task_reaping {}; + +struct trace_event_data_offsets_compact_retry {}; + +typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); + +typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool); + +typedef void (*btf_trace_mark_victim)(void *, struct task_struct *, uid_t); + +typedef void (*btf_trace_wake_reaper)(void *, int); + +typedef void (*btf_trace_start_task_reaping)(void *, int); + +typedef void (*btf_trace_finish_task_reaping)(void *, int); + +typedef void (*btf_trace_skip_task_reaping)(void *, int); + +typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); + +struct vmap_area { + long unsigned int va_start; + long unsigned int va_end; + struct rb_node rb_node; + struct list_head list; + union { + long unsigned int subtree_max_size; + struct vm_struct *vm; + }; + long unsigned int flags; +}; + +typedef unsigned int kasan_vmalloc_flags_t; + +enum memcg_stat_item { + MEMCG_SWAP = 45, + MEMCG_SOCK = 46, + MEMCG_PERCPU_B = 47, + MEMCG_VMALLOC = 48, + MEMCG_KMEM = 49, + MEMCG_ZSWAP_B = 50, + MEMCG_ZSWAPPED = 51, + MEMCG_NR_STAT = 52, +}; + +struct trace_event_raw_alloc_vmap_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int size; + long unsigned int align; + long unsigned int vstart; + long unsigned int vend; + int failed; + char __data[0]; +}; + +struct trace_event_raw_purge_vmap_area_lazy { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + unsigned int npurged; + char __data[0]; +}; + +struct trace_event_raw_free_vmap_area_noflush { + struct trace_entry ent; + long unsigned int va_start; + long unsigned int nr_lazy; + long unsigned int nr_lazy_max; + char __data[0]; +}; + +struct trace_event_data_offsets_alloc_vmap_area {}; + +struct trace_event_data_offsets_purge_vmap_area_lazy {}; + +struct trace_event_data_offsets_free_vmap_area_noflush {}; + +typedef void (*btf_trace_alloc_vmap_area)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_purge_vmap_area_lazy)(void *, long unsigned int, long unsigned int, unsigned int); + +typedef void (*btf_trace_free_vmap_area_noflush)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; + +struct rb_list { + struct rb_root root; + struct list_head head; + spinlock_t lock; +}; + +struct vmap_pool { + struct list_head head; + long unsigned int len; +}; + +struct vmap_node { + struct vmap_pool pool[256]; + spinlock_t pool_lock; + bool skip_populate; + struct rb_list busy; + struct rb_list lazy; + struct list_head purge_list; + struct work_struct purge_work; + long unsigned int nr_purged; +}; + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, + LE_FIT_TYPE = 2, + RE_FIT_TYPE = 3, + NE_FIT_TYPE = 4, +}; + +struct vmap_block_queue { + spinlock_t lock; + struct list_head free; + struct xarray vmap_blocks; +}; + +struct vmap_block { + spinlock_t lock; + struct vmap_area *va; + long unsigned int free; + long unsigned int dirty; + long unsigned int used_map[2]; + long unsigned int dirty_min; + long unsigned int dirty_max; + struct list_head free_list; + struct callback_head callback_head; + struct list_head purge; + unsigned int cpu; +}; + +typedef __kernel_mode_t mode_t; + +struct dx_hash_info { + u32 hash; + u32 minor_hash; + int hash_version; + u32 *seed; +}; + +struct orlov_stats { + __u64 free_clusters; + __u32 free_inodes; + __u32 used_dirs; +}; + +struct fat_cache { + struct list_head cache_list; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct fat_cache_id { + unsigned int id; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct eventfs_root_inode { + struct eventfs_inode ei; + struct dentry *events_dir; +}; + +enum { + EVENTFS_SAVE_MODE = 65536, + EVENTFS_SAVE_UID = 131072, + EVENTFS_SAVE_GID = 262144, +}; + +struct workspace___2 { + z_stream strm; + char *buf; + unsigned int buf_size; + struct list_head list; + int level; +}; + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; +}; + +enum { + SKCIPHER_WALK_PHYS = 1, + SKCIPHER_WALK_SLOW = 2, + SKCIPHER_WALK_COPY = 4, + SKCIPHER_WALK_DIFF = 8, + SKCIPHER_WALK_SLEEP = 16, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[0]; +}; + +struct fat_boot_sector { + __u8 ignored[3]; + __u8 system_id[8]; + __u8 sector_size[2]; + __u8 sec_per_clus; + __le16 reserved; + __u8 fats; + __u8 dir_entries[2]; + __u8 sectors[2]; + __u8 media; + __le16 fat_length; + __le16 secs_track; + __le16 heads; + __le32 hidden; + __le32 total_sect; + union { + struct { + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat16; + struct { + __le32 length; + __le16 flags; + __u8 version[2]; + __le32 root_cluster; + __le16 info_sector; + __le16 backup_boot; + __le16 reserved2[6]; + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat32; + }; +}; + +struct msdos_partition { + u8 boot_ind; + u8 head; + u8 sector; + u8 cyl; + u8 sys_ind; + u8 end_head; + u8 end_sector; + u8 end_cyl; + __le32 start_sect; + __le32 nr_sects; +}; + +enum msdos_sys_ind { + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 133, + WIN98_EXTENDED_PARTITION = 15, + LINUX_DATA_PARTITION = 131, + LINUX_LVM_PARTITION = 142, + LINUX_RAID_PARTITION = 253, + SOLARIS_X86_PARTITION = 130, + NEW_SOLARIS_X86_PARTITION = 191, + DM6_AUX1PARTITION = 81, + DM6_AUX3PARTITION = 83, + DM6_PARTITION = 84, + EZD_PARTITION = 85, + FREEBSD_PARTITION = 165, + OPENBSD_PARTITION = 166, + NETBSD_PARTITION = 169, + BSDI_PARTITION = 183, + MINIX_PARTITION = 129, + UNIXWARE_PARTITION = 99, +}; + +struct rq_depth { + unsigned int max_depth; + int scale_step; + bool scaled_max; + unsigned int queue_depth; + unsigned int default_depth; +}; + +struct trace_event_raw_wbt_stat { + struct trace_entry ent; + char name[32]; + s64 rmean; + u64 rmin; + u64 rmax; + s64 rnr_samples; + s64 rtime; + s64 wmean; + u64 wmin; + u64 wmax; + s64 wnr_samples; + s64 wtime; + char __data[0]; +}; + +struct trace_event_raw_wbt_lat { + struct trace_entry ent; + char name[32]; + long unsigned int lat; + char __data[0]; +}; + +struct trace_event_raw_wbt_step { + struct trace_entry ent; + char name[32]; + const char *msg; + int step; + long unsigned int window; + unsigned int bg; + unsigned int normal; + unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_wbt_timer { + struct trace_entry ent; + char name[32]; + unsigned int status; + int step; + unsigned int inflight; + char __data[0]; +}; + +struct trace_event_data_offsets_wbt_stat {}; + +struct trace_event_data_offsets_wbt_lat {}; + +struct trace_event_data_offsets_wbt_step {}; + +struct trace_event_data_offsets_wbt_timer {}; + +typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); + +typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, long unsigned int); + +typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, long unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, unsigned int); + +enum wbt_flags { + WBT_TRACKED = 1, + WBT_READ = 2, + WBT_SWAP = 4, + WBT_DISCARD = 8, + WBT_NR_BITS = 4, +}; + +enum { + WBT_RWQ_BG = 0, + WBT_RWQ_SWAP = 1, + WBT_RWQ_DISCARD = 2, + WBT_NUM_RWQ = 3, +}; + +enum { + WBT_STATE_ON_DEFAULT = 1, + WBT_STATE_ON_MANUAL = 2, + WBT_STATE_OFF_DEFAULT = 3, + WBT_STATE_OFF_MANUAL = 4, +}; + +struct rq_wb { + unsigned int wb_background; + unsigned int wb_normal; + short int enable_state; + unsigned int unknown_cnt; + u64 win_nsec; + u64 cur_win_nsec; + struct blk_stat_callback *cb; + long: 32; + u64 sync_issue; + void *sync_cookie; + long unsigned int last_issue; + long unsigned int last_comp; + long unsigned int min_lat_nsec; + struct rq_qos rqos; + struct rq_wait rq_wait[3]; + struct rq_depth rq_depth; +}; + +enum { + RWB_DEF_DEPTH = 16, + RWB_WINDOW_NSEC = 100000000, + RWB_MIN_WRITE_SAMPLES = 3, + RWB_UNKNOWN_BUMP = 5, +}; + +enum { + LAT_OK = 1, + LAT_UNKNOWN = 2, + LAT_UNKNOWN_WRITES = 3, + LAT_EXCEEDED = 4, +}; + +struct wbt_wait_data { + struct rq_wb *rwb; + enum wbt_flags wb_acct; + blk_opf_t opf; +}; + +struct sg_append_table { + struct sg_table sgt; + struct scatterlist *prv; + unsigned int total_nents; +}; + +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +struct ZSTD_DDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictSize; + ZSTD_entropyDTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; + +typedef enum { + ZSTD_dlm_byCopy = 0, + ZSTD_dlm_byRef = 1, +} ZSTD_dictLoadMethod_e; + +typedef ZSTD_customMem zstd_custom_mem; + +typedef ZSTD_DCtx zstd_dctx; + +typedef ZSTD_DDict zstd_ddict; + +typedef ZSTD_frameHeader zstd_frame_header; + +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + __le16 queue_notify_data; + __le16 queue_reset; + __le16 admin_queue_index; + __le16 admin_queue_num; +}; + +struct device_attach_data { + struct device *dev; + bool check_async; + bool want_async; + bool have_async; +}; + +struct macio_chip; + +struct macio_bus { + struct macio_chip *chip; + int index; + struct pci_dev *pdev; +}; + +struct macio_chip { + struct device_node *of_node; + int type; + const char *name; + int rev; + volatile u32 *base; + long unsigned int flags; + struct macio_bus lbus; +}; + +struct macio_dev { + struct macio_bus *bus; + struct macio_dev *media_bay; + struct platform_device ofdev; + struct device_dma_parameters dma_parms; + int n_resources; + struct resource resource[8]; + int n_interrupts; + struct resource interrupt[8]; + long: 32; +}; + +struct macio_driver { + int (*probe)(struct macio_dev *, const struct of_device_id *); + void (*remove)(struct macio_dev *); + int (*suspend)(struct macio_dev *, pm_message_t); + int (*resume)(struct macio_dev *); + int (*shutdown)(struct macio_dev *); + struct device_driver driver; +}; + +enum { + macio_unknown = 0, + macio_grand_central = 1, + macio_ohare = 2, + macio_ohareII = 3, + macio_heathrow = 4, + macio_gatwick = 5, + macio_paddington = 6, + macio_keylargo = 7, + macio_pangea = 8, + macio_intrepid = 9, + macio_keylargo2 = 10, + macio_shasta = 11, +}; + +struct macio_devres { + u32 res_mask; +}; + +struct ata_internal { + struct scsi_transport_template t; + struct device_attribute private_port_attrs[3]; + struct device_attribute private_link_attrs[3]; + struct device_attribute private_dev_attrs[9]; + struct transport_container link_attr_cont; + struct transport_container dev_attr_cont; + struct device_attribute *link_attrs[4]; + struct device_attribute *port_attrs[4]; + struct device_attribute *dev_attrs[10]; +}; + +struct ata_show_ering_arg { + char *buf; + int written; +}; + +struct bulk_cb_wrap { + __le32 Signature; + __u32 Tag; + __le32 DataTransferLength; + __u8 Flags; + __u8 Lun; + __u8 Length; + __u8 CDB[16]; +}; + +struct bulk_cs_wrap { + __le32 Signature; + __u32 Tag; + __le32 Residue; + __u8 Status; +}; + +struct i2c_smbus_alert_setup { + int irq; +}; + +struct trace_event_raw_smbus_write { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_read { + struct trace_entry ent; + int adapter_nr; + __u16 flags; + __u16 addr; + __u8 command; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_reply { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_result { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 read_write; + __u8 command; + __s16 res; + __u32 protocol; + char __data[0]; +}; + +struct trace_event_data_offsets_smbus_write {}; + +struct trace_event_data_offsets_smbus_read {}; + +struct trace_event_data_offsets_smbus_reply {}; + +struct trace_event_data_offsets_smbus_result {}; + +typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *); + +typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int); + +typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *, int); + +typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, int); + +enum check_states { + check_state_idle = 0, + check_state_run = 1, + check_state_run_q = 2, + check_state_run_pq = 3, + check_state_check_result = 4, + check_state_compute_run = 5, + check_state_compute_result = 6, +}; + +enum reconstruct_states { + reconstruct_state_idle = 0, + reconstruct_state_prexor_drain_run = 1, + reconstruct_state_drain_run = 2, + reconstruct_state_run = 3, + reconstruct_state_prexor_drain_result = 4, + reconstruct_state_drain_result = 5, + reconstruct_state_result = 6, +}; + +struct stripe_operations { + int target; + int target2; + enum sum_check_flags zero_sum_result; +}; + +struct r5dev { + struct bio req; + struct bio rreq; + struct bio_vec vec; + struct bio_vec rvec; + struct page *page; + struct page *orig_page; + unsigned int offset; + struct bio *toread; + struct bio *read; + struct bio *towrite; + struct bio *written; + long: 32; + sector_t sector; + long unsigned int flags; + u32 log_checksum; + short unsigned int write_hint; + long: 32; +}; + +struct r5conf; + +struct r5worker_group; + +struct r5l_io_unit; + +struct ppl_io_unit; + +struct stripe_head { + struct hlist_node hash; + struct list_head lru; + struct llist_node release_list; + struct r5conf *raid_conf; + short int generation; + long: 32; + sector_t sector; + short int pd_idx; + short int qd_idx; + short int ddf_layout; + short int hash_lock_index; + long unsigned int state; + atomic_t count; + int bm_seq; + int disks; + int overwrite_disks; + enum check_states check_state; + enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; + int cpu; + struct r5worker_group *group; + struct stripe_head *batch_head; + spinlock_t batch_lock; + struct list_head batch_list; + union { + struct r5l_io_unit *log_io; + struct ppl_io_unit *ppl_io; + }; + struct list_head log_list; + long: 32; + sector_t log_start; + struct list_head r5c; + struct page *ppl_page; + struct stripe_operations ops; + struct r5dev dev[0]; +}; + +struct raid5_percpu; + +struct disk_info; + +struct r5l_log; + +struct r5pending_data; + +struct r5conf { + struct hlist_head *stripe_hashtbl; + spinlock_t hash_locks[8]; + struct mddev *mddev; + int chunk_sectors; + int level; + int algorithm; + int rmw_level; + int max_degraded; + int raid_disks; + int max_nr_stripes; + int min_nr_stripes; + sector_t reshape_progress; + sector_t reshape_safe; + int previous_raid_disks; + int prev_chunk_sectors; + int prev_algo; + short int generation; + seqcount_spinlock_t gen_lock; + long unsigned int reshape_checkpoint; + long long int min_offset_diff; + struct list_head handle_list; + struct list_head loprio_list; + struct list_head hold_list; + struct list_head delayed_list; + struct list_head bitmap_list; + struct bio *retry_read_aligned; + unsigned int retry_read_offset; + struct bio *retry_read_aligned_list; + atomic_t preread_active_stripes; + atomic_t active_aligned_reads; + atomic_t pending_full_writes; + int bypass_count; + int bypass_threshold; + int skip_copy; + struct list_head *last_hold; + atomic_t reshape_stripes; + int active_name; + char cache_name[96]; + struct kmem_cache *slab_cache; + struct mutex cache_size_mutex; + int seq_flush; + int seq_write; + int quiesce; + int fullsync; + int recovery_disabled; + struct raid5_percpu *percpu; + int scribble_disks; + int scribble_sectors; + struct hlist_node node; + atomic_t active_stripes; + struct list_head inactive_list[8]; + atomic_t r5c_cached_full_stripes; + struct list_head r5c_full_stripe_list; + atomic_t r5c_cached_partial_stripes; + struct list_head r5c_partial_stripe_list; + atomic_t r5c_flushing_full_stripes; + atomic_t r5c_flushing_partial_stripes; + atomic_t empty_inactive_list_nr; + struct llist_head released_stripes; + wait_queue_head_t wait_for_quiescent; + wait_queue_head_t wait_for_stripe; + wait_queue_head_t wait_for_reshape; + long unsigned int cache_state; + struct shrinker *shrinker; + int pool_size; + spinlock_t device_lock; + struct disk_info *disks; + struct bio_set bio_split; + struct md_thread *thread; + struct list_head temp_inactive_list[8]; + struct r5worker_group *worker_groups; + int group_cnt; + int worker_cnt_per_group; + struct r5l_log *log; + void *log_private; + spinlock_t pending_bios_lock; + bool batch_bio_dispatch; + struct r5pending_data *pending_data; + struct list_head free_list; + struct list_head pending_list; + int pending_data_cnt; + struct r5pending_data *next_pending_data; +}; + +struct r5worker; + +struct r5worker_group { + struct list_head handle_list; + struct list_head loprio_list; + struct r5conf *conf; + struct r5worker *workers; + int stripes_cnt; +}; + +struct stripe_head_state { + int syncing; + int expanding; + int expanded; + int replacing; + int locked; + int uptodate; + int to_read; + int to_write; + int failed; + int written; + int to_fill; + int compute; + int req_compute; + int non_overwrite; + int injournal; + int just_cached; + int failed_num[2]; + int p_failed; + int q_failed; + int dec_preread_active; + long unsigned int ops_request; + struct md_rdev *blocked_rdev; + int handle_bad_blocks; + int log_failed; + int waiting_extra_page; +}; + +enum r5dev_flags { + R5_UPTODATE = 0, + R5_LOCKED = 1, + R5_DOUBLE_LOCKED = 2, + R5_OVERWRITE = 3, + R5_Insync = 4, + R5_Wantread = 5, + R5_Wantwrite = 6, + R5_Overlap = 7, + R5_ReadNoMerge = 8, + R5_ReadError = 9, + R5_ReWrite = 10, + R5_Expanded = 11, + R5_Wantcompute = 12, + R5_Wantfill = 13, + R5_Wantdrain = 14, + R5_WantFUA = 15, + R5_SyncIO = 16, + R5_WriteError = 17, + R5_MadeGood = 18, + R5_ReadRepl = 19, + R5_MadeGoodRepl = 20, + R5_NeedReplace = 21, + R5_WantReplace = 22, + R5_Discard = 23, + R5_SkipCopy = 24, + R5_InJournal = 25, + R5_OrigPageUPTDODATE = 26, +}; + +enum { + STRIPE_ACTIVE = 0, + STRIPE_HANDLE = 1, + STRIPE_SYNC_REQUESTED = 2, + STRIPE_SYNCING = 3, + STRIPE_INSYNC = 4, + STRIPE_REPLACED = 5, + STRIPE_PREREAD_ACTIVE = 6, + STRIPE_DELAYED = 7, + STRIPE_DEGRADED = 8, + STRIPE_BIT_DELAY = 9, + STRIPE_EXPANDING = 10, + STRIPE_EXPAND_SOURCE = 11, + STRIPE_EXPAND_READY = 12, + STRIPE_IO_STARTED = 13, + STRIPE_FULL_WRITE = 14, + STRIPE_BIOFILL_RUN = 15, + STRIPE_COMPUTE_RUN = 16, + STRIPE_ON_UNPLUG_LIST = 17, + STRIPE_DISCARD = 18, + STRIPE_ON_RELEASE_LIST = 19, + STRIPE_BATCH_READY = 20, + STRIPE_BATCH_ERR = 21, + STRIPE_BITMAP_PENDING = 22, + STRIPE_LOG_TRAPPED = 23, + STRIPE_R5C_CACHING = 24, + STRIPE_R5C_PARTIAL_STRIPE = 25, + STRIPE_R5C_FULL_STRIPE = 26, + STRIPE_R5C_PREFLUSH = 27, +}; + +enum { + STRIPE_OP_BIOFILL = 0, + STRIPE_OP_COMPUTE_BLK = 1, + STRIPE_OP_PREXOR = 2, + STRIPE_OP_BIODRAIN = 3, + STRIPE_OP_RECONSTRUCT = 4, + STRIPE_OP_CHECK = 5, + STRIPE_OP_PARTIAL_PARITY = 6, +}; + +enum { + PARITY_DISABLE_RMW = 0, + PARITY_ENABLE_RMW = 1, + PARITY_PREFER_RMW = 2, +}; + +enum { + SYNDROME_SRC_ALL = 0, + SYNDROME_SRC_WANT_DRAIN = 1, + SYNDROME_SRC_WRITTEN = 2, +}; + +struct disk_info { + struct md_rdev *rdev; + struct md_rdev *replacement; + struct page *extra_page; +}; + +struct r5worker { + struct work_struct work; + struct r5worker_group *group; + struct list_head temp_inactive_list[8]; + bool working; +}; + +enum r5_cache_state { + R5_INACTIVE_BLOCKED = 0, + R5_ALLOC_MORE = 1, + R5_DID_ALLOC = 2, + R5C_LOG_TIGHT = 3, + R5C_LOG_CRITICAL = 4, + R5C_EXTRA_PAGE_IN_USE = 5, +}; + +struct r5pending_data { + struct list_head sibling; + sector_t sector; + struct bio_list bios; +}; + +struct raid5_percpu { + struct page *spare_page; + void *scribble; + int scribble_obj_size; + local_lock_t lock; +}; + +enum stripe_result { + STRIPE_SUCCESS = 0, + STRIPE_RETRY = 1, + STRIPE_SCHEDULE_AND_RETRY = 2, + STRIPE_FAIL = 3, + STRIPE_WAIT_RESHAPE = 4, +}; + +struct stripe_request_ctx { + struct stripe_head *batch_last; + long: 32; + sector_t first_sector; + sector_t last_sector; + long unsigned int sectors_to_do[9]; + bool do_flush; +}; + +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; + struct list_head temp_inactive_list[8]; +}; + +enum reshape_loc { + LOC_NO_RESHAPE = 0, + LOC_AHEAD_OF_RESHAPE = 1, + LOC_INSIDE_RESHAPE = 2, + LOC_BEHIND_RESHAPE = 3, +}; + +struct sch_frag_data { + long unsigned int dst; + struct qdisc_skb_cb cb; + __be16 inner_protocol; + u16 vlan_tci; + __be16 vlan_proto; + unsigned int l2_len; + u8 l2_data[18]; + int (*xmit)(struct sk_buff *); +}; + +enum { + ETHTOOL_A_MM_STAT_UNSPEC = 0, + ETHTOOL_A_MM_STAT_PAD = 1, + ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS = 2, + ETHTOOL_A_MM_STAT_SMD_ERRORS = 3, + ETHTOOL_A_MM_STAT_REASSEMBLY_OK = 4, + ETHTOOL_A_MM_STAT_RX_FRAG_COUNT = 5, + ETHTOOL_A_MM_STAT_TX_FRAG_COUNT = 6, + ETHTOOL_A_MM_STAT_HOLD_COUNT = 7, + __ETHTOOL_A_MM_STAT_CNT = 8, + ETHTOOL_A_MM_STAT_MAX = 7, +}; + +struct mm_reply_data { + struct ethnl_reply_data base; + struct ethtool_mm_state state; + long: 32; + struct ethtool_mm_stats stats; +}; + +struct nf_nat_lookup_hook_priv { + struct nf_hook_entries *entries; + struct callback_head callback_head; +}; + +struct nf_nat_hooks_net { + struct nf_hook_ops *nat_hook_ops; + unsigned int users; +}; + +struct nat_net { + struct nf_nat_hooks_net nat_proto_net[11]; +}; + +struct nf_nat_proto_clean { + u8 l3proto; + u8 l4proto; +}; + +enum ctattr_nat { + CTA_NAT_UNSPEC = 0, + CTA_NAT_V4_MINIP = 1, + CTA_NAT_V4_MAXIP = 2, + CTA_NAT_PROTO = 3, + CTA_NAT_V6_MINIP = 4, + CTA_NAT_V6_MAXIP = 5, + __CTA_NAT_MAX = 6, +}; + +enum ctattr_protonat { + CTA_PROTONAT_UNSPEC = 0, + CTA_PROTONAT_PORT_MIN = 1, + CTA_PROTONAT_PORT_MAX = 2, + __CTA_PROTONAT_MAX = 3, +}; + +struct xt_tcp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 option; + __u8 flg_mask; + __u8 flg_cmp; + __u8 invflags; +}; + +struct xt_udp { + __u16 spts[2]; + __u16 dpts[2]; + __u8 invflags; +}; + +struct ipt_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +struct ip6t_icmp { + __u8 type; + __u8 code[2]; + __u8 invflags; +}; + +enum tcp_queue { + TCP_FRAG_IN_WRITE_QUEUE = 0, + TCP_FRAG_IN_RTX_QUEUE = 1, +}; + +struct ip_tunnel_parm { + char name[16]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +enum tunnel_encap_types { + TUNNEL_ENCAP_NONE = 0, + TUNNEL_ENCAP_FOU = 1, + TUNNEL_ENCAP_GUE = 2, + TUNNEL_ENCAP_MPLS = 3, +}; + +struct gro_cell; + +struct gro_cells { + struct gro_cell *cells; +}; + +struct ip_tunnel_prl_entry { + struct ip_tunnel_prl_entry *next; + __be32 addr; + u16 flags; + struct callback_head callback_head; +}; + +struct ip_tunnel { + struct ip_tunnel *next; + struct hlist_node hash_node; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net *net; + long unsigned int err_time; + int err_count; + u32 i_seqno; + atomic_t o_seqno; + int tun_hlen; + u32 index; + u8 erspan_ver; + u8 dir; + u16 hwid; + struct dst_cache dst_cache; + struct ip_tunnel_parm_kern parms; + int mlink; + int encap_hlen; + int hlen; + struct ip_tunnel_encap encap; + struct ip_tunnel_prl_entry *prl; + unsigned int prl_count; + unsigned int ip_tnl_net_id; + struct gro_cells gro_cells; + __u32 fwmark; + bool collect_md; + bool ignore_df; +}; + +struct tnl_ptk_info { + long unsigned int flags[1]; + __be16 proto; + __be32 key; + __be32 seq; + int hdr_len; +}; + +struct ip_tunnel_net { + struct net_device *fb_tunnel_dev; + struct rtnl_link_ops *rtnl_link_ops; + struct hlist_head tunnels[128]; + struct ip_tunnel *collect_md_tun; + int type; +}; + +struct ioam6_pernet_data { + struct mutex lock; + struct rhashtable namespaces; + struct rhashtable schemas; +}; + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; +}; + +struct ioam6_hdr { + __u8 opt_type; + __u8 opt_len; + char: 8; + __u8 type; +}; + +struct ioam6_trace_hdr { + __be16 namespace_id; + __u8 nodelen: 5; + __u8 overflow: 1; + char: 2; + char: 1; + __u8 remlen: 7; + union { + __be32 type_be32; + struct { + __u32 bit0: 1; + __u32 bit1: 1; + __u32 bit2: 1; + __u32 bit3: 1; + __u32 bit4: 1; + __u32 bit5: 1; + __u32 bit6: 1; + __u32 bit7: 1; + __u32 bit8: 1; + __u32 bit9: 1; + __u32 bit10: 1; + __u32 bit11: 1; + __u32 bit12: 1; + __u32 bit13: 1; + __u32 bit14: 1; + __u32 bit15: 1; + __u32 bit16: 1; + __u32 bit17: 1; + __u32 bit18: 1; + __u32 bit19: 1; + __u32 bit20: 1; + __u32 bit21: 1; + __u32 bit22: 1; + __u32 bit23: 1; + } type; + }; + __u8 data[0]; +}; + +enum ioam6_event_type { + IOAM6_EVENT_UNSPEC = 0, + IOAM6_EVENT_TRACE = 1, +}; + +struct ioam6_schema; + +struct ioam6_namespace { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_schema *schema; + __be16 id; + __be32 data; + __be64 data_wide; +}; + +struct ioam6_schema { + struct rhash_head head; + struct callback_head rcu; + struct ioam6_namespace *ns; + u32 id; + int len; + __be32 hdr; + u8 data[0]; +}; + +enum { + IFLA_BRIDGE_FLAGS = 0, + IFLA_BRIDGE_MODE = 1, + IFLA_BRIDGE_VLAN_INFO = 2, + IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, + IFLA_BRIDGE_MRP = 4, + IFLA_BRIDGE_CFM = 5, + IFLA_BRIDGE_MST = 6, + __IFLA_BRIDGE_MAX = 7, +}; + +enum { + IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC = 0, + IFLA_BRIDGE_VLAN_TUNNEL_ID = 1, + IFLA_BRIDGE_VLAN_TUNNEL_VID = 2, + IFLA_BRIDGE_VLAN_TUNNEL_FLAGS = 3, + __IFLA_BRIDGE_VLAN_TUNNEL_MAX = 4, +}; + +struct vtunnel_info { + u32 tunid; + u16 vid; + u16 flags; +}; + +struct vdso_rng_data { + u64 generation; + u8 is_ready; + long: 32; +}; + +struct vdso_arch_data { + __u64 tb_ticks_per_sec; + __u32 syscall_map[15]; + __u32 compat_syscall_map[0]; + long: 32; + struct vdso_rng_data rng_data; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct vdso_data data[2]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum vvar_pages { + VVAR_BASE_PAGE_OFFSET = 0, + VVAR_TIME_PAGE_OFFSET = 1, + VVAR_TIMENS_PAGE_OFFSET = 2, + VVAR_NR_PAGES = 3, +}; + +struct tk_read_base { + struct clocksource *clock; + long: 32; + u64 mask; + u64 cycle_last; + u32 mult; + u32 shift; + u64 xtime_nsec; + ktime_t base; + u64 base_real; +}; + +struct timekeeper { + struct tk_read_base tkr_mono; + u64 xtime_sec; + long unsigned int ktime_sec; + long: 32; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + long: 32; + struct tk_read_base tkr_raw; + u64 raw_sec; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + struct timespec64 monotonic_to_boot; + u64 cycle_interval; + u64 xtime_interval; + s64 xtime_remainder; + u64 raw_interval; + ktime_t next_leap_ktime; + u64 ntp_tick; + s64 ntp_error; + u32 ntp_error_shift; + u32 ntp_err_mult; + u32 skip_second_overflow; + long: 32; +}; + +struct filter_pred; + +struct prog_entry { + int target; + int when_to_branch; + struct filter_pred *pred; +}; + +struct regex; + +typedef int (*regex_match_func)(char *, struct regex *, int); + +struct regex { + char pattern[256]; + int len; + int field_len; + regex_match_func match; +}; + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY = 1, + MATCH_MIDDLE_ONLY = 2, + MATCH_END_ONLY = 3, + MATCH_GLOB = 4, + MATCH_INDEX = 5, +}; + +enum filter_op_ids { + OP_GLOB = 0, + OP_NE = 1, + OP_EQ = 2, + OP_LE = 3, + OP_LT = 4, + OP_GE = 5, + OP_GT = 6, + OP_BAND = 7, + OP_MAX = 8, +}; + +enum filter_pred_fn { + FILTER_PRED_FN_NOP = 0, + FILTER_PRED_FN_64 = 1, + FILTER_PRED_FN_64_CPUMASK = 2, + FILTER_PRED_FN_S64 = 3, + FILTER_PRED_FN_U64 = 4, + FILTER_PRED_FN_32 = 5, + FILTER_PRED_FN_32_CPUMASK = 6, + FILTER_PRED_FN_S32 = 7, + FILTER_PRED_FN_U32 = 8, + FILTER_PRED_FN_16 = 9, + FILTER_PRED_FN_16_CPUMASK = 10, + FILTER_PRED_FN_S16 = 11, + FILTER_PRED_FN_U16 = 12, + FILTER_PRED_FN_8 = 13, + FILTER_PRED_FN_8_CPUMASK = 14, + FILTER_PRED_FN_S8 = 15, + FILTER_PRED_FN_U8 = 16, + FILTER_PRED_FN_COMM = 17, + FILTER_PRED_FN_STRING = 18, + FILTER_PRED_FN_STRLOC = 19, + FILTER_PRED_FN_STRRELLOC = 20, + FILTER_PRED_FN_PCHAR_USER = 21, + FILTER_PRED_FN_PCHAR = 22, + FILTER_PRED_FN_CPU = 23, + FILTER_PRED_FN_CPU_CPUMASK = 24, + FILTER_PRED_FN_CPUMASK = 25, + FILTER_PRED_FN_CPUMASK_CPU = 26, + FILTER_PRED_FN_FUNCTION = 27, + FILTER_PRED_FN_ = 28, + FILTER_PRED_TEST_VISITED = 29, +}; + +struct filter_pred { + struct regex *regex; + struct cpumask *mask; + short unsigned int *ops; + struct ftrace_event_field *field; + u64 val; + u64 val2; + enum filter_pred_fn fn_num; + int offset; + int not; + int op; +}; + +enum { + FILT_ERR_NONE = 0, + FILT_ERR_INVALID_OP = 1, + FILT_ERR_TOO_MANY_OPEN = 2, + FILT_ERR_TOO_MANY_CLOSE = 3, + FILT_ERR_MISSING_QUOTE = 4, + FILT_ERR_MISSING_BRACE_OPEN = 5, + FILT_ERR_MISSING_BRACE_CLOSE = 6, + FILT_ERR_OPERAND_TOO_LONG = 7, + FILT_ERR_EXPECT_STRING = 8, + FILT_ERR_EXPECT_DIGIT = 9, + FILT_ERR_ILLEGAL_FIELD_OP = 10, + FILT_ERR_FIELD_NOT_FOUND = 11, + FILT_ERR_ILLEGAL_INTVAL = 12, + FILT_ERR_BAD_SUBSYS_FILTER = 13, + FILT_ERR_TOO_MANY_PREDS = 14, + FILT_ERR_INVALID_FILTER = 15, + FILT_ERR_INVALID_CPULIST = 16, + FILT_ERR_IP_FIELD_ONLY = 17, + FILT_ERR_INVALID_VALUE = 18, + FILT_ERR_NO_FUNCTION = 19, + FILT_ERR_ERRNO = 20, + FILT_ERR_NO_FILTER = 21, +}; + +struct filter_parse_error { + int lasterr; + int lasterr_pos; +}; + +typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); + +enum { + INVERT = 1, + PROCESS_AND = 2, + PROCESS_OR = 4, +}; + +struct ustring_buffer { + char buffer[1024]; +}; + +enum { + TOO_MANY_CLOSE = -1, + TOO_MANY_OPEN = -2, + MISSING_QUOTE = -3, +}; + +struct filter_list { + struct list_head list; + struct event_filter *filter; +}; + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head; + u32 tail; + u32 size; + char elements[0]; +}; + +struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; +}; + +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS = 0, + BTF_FIELD_ITER_STRS = 1, +}; + +struct btf_field_desc { + int t_off_cnt; + int t_offs[2]; + int m_sz; + int m_off_cnt; + int m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + +struct btf_relocate { + struct btf *btf; + const struct btf *base_btf; + const struct btf *dist_base_btf; + unsigned int nr_base_types; + unsigned int nr_split_types; + unsigned int nr_dist_base_types; + int dist_str_len; + int base_str_len; + __u32 *id_map; + __u32 *str_map; +}; + +struct btf_name_info { + const char *name; + bool needs_size: 1; + unsigned int size: 31; + __u32 id; +}; + +typedef struct { + rwlock_t *lock; +} class_read_lock_t; + +typedef struct { + rwlock_t *lock; +} class_write_lock_t; + +typedef struct rw_semaphore *class_rwsem_read_t; + +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +struct statmount { + __u32 size; + __u32 mnt_opts; + __u64 mask; + __u32 sb_dev_major; + __u32 sb_dev_minor; + __u64 sb_magic; + __u32 sb_flags; + __u32 fs_type; + __u64 mnt_id; + __u64 mnt_parent_id; + __u32 mnt_id_old; + __u32 mnt_parent_id_old; + __u64 mnt_attr; + __u64 mnt_propagation; + __u64 mnt_peer_group; + __u64 mnt_master; + __u64 propagate_from; + __u32 mnt_root; + __u32 mnt_point; + __u64 mnt_ns_id; + __u32 fs_subtype; + __u32 sb_source; + __u32 opt_num; + __u32 opt_array; + __u32 opt_sec_num; + __u32 opt_sec_array; + __u64 __spare2[46]; + char str[0]; +}; + +struct mnt_id_req { + __u32 size; + __u32 spare; + __u64 mnt_id; + __u64 param; + __u64 mnt_ns_id; +}; + +struct proc_mounts { + struct mnt_namespace *ns; + struct path root; + int (*show)(struct seq_file *, struct vfsmount *); +}; + +struct mount_kattr { + unsigned int attr_set; + unsigned int attr_clr; + unsigned int propagation; + unsigned int lookup_flags; + bool recurse; + struct user_namespace *mnt_userns; + struct mnt_idmap *mnt_idmap; +}; + +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; + +enum mnt_tree_flags_t { + MNT_TREE_MOVE = 1, + MNT_TREE_BENEATH = 2, +}; + +struct kstatmount { + struct statmount *buf; + size_t bufsize; + struct vfsmount *mnt; + long: 32; + u64 mask; + struct path root; + struct statmount sm; + struct seq_file seq; +}; + +enum criteria { + CR_POWER2_ALIGNED = 0, + CR_GOAL_LEN_FAST = 1, + CR_BEST_AVAIL_LEN = 2, + CR_GOAL_LEN_SLOW = 3, + CR_ANY_FREE = 4, + EXT4_MB_NUM_CRS = 5, +}; + +struct ext4_dir_entry_hash { + __le32 hash; + __le32 minor_hash; +}; + +struct ext4_dir_entry_2 { + __le32 inode; + __le16 rec_len; + __u8 name_len; + __u8 file_type; + char name[255]; +}; + +struct fname; + +struct dir_private_info { + struct rb_root root; + struct rb_node *curr_node; + struct fname *extra_fname; + long: 32; + loff_t last_pos; + __u32 curr_hash; + __u32 curr_minor_hash; + __u32 next_hash; + long: 32; + u64 cookie; + bool initialized; + long: 32; +}; + +struct fname { + __u32 hash; + __u32 minor_hash; + struct rb_node rb_hash; + struct fname *next; + __u32 inode; + __u8 name_len; + __u8 file_type; + char name[0]; +}; + +struct ext4_orphan_block_tail { + __le32 ob_magic; + __le32 ob_checksum; +}; + +struct fat_boot_fsinfo { + __le32 signature1; + __le32 reserved1[120]; + __le32 signature2; + __le32 free_clusters; + __le32 next_cluster; + __le32 reserved2[4]; +}; + +struct fat_bios_param_block { + u16 fat_sector_size; + u8 fat_sec_per_clus; + u16 fat_reserved; + u8 fat_fats; + u16 fat_dir_entries; + u16 fat_sectors; + u16 fat_fat_length; + u32 fat_total_sect; + u8 fat16_state; + u32 fat16_vol_id; + u32 fat32_length; + u32 fat32_root_cluster; + u16 fat32_info_sector; + u8 fat32_state; + u32 fat32_vol_id; +}; + +struct fat_floppy_defaults { + unsigned int nr_sectors; + unsigned int sec_per_clus; + unsigned int dir_entries; + unsigned int media; + unsigned int fat_length; +}; + +enum { + Opt_check = 0, + Opt_uid___5 = 1, + Opt_gid___6 = 2, + Opt_umask = 3, + Opt_dmask = 4, + Opt_fmask = 5, + Opt_allow_utime = 6, + Opt_codepage = 7, + Opt_usefree = 8, + Opt_nocase = 9, + Opt_quiet = 10, + Opt_showexec = 11, + Opt_debug = 12, + Opt_immutable = 13, + Opt_dots = 14, + Opt_dotsOK = 15, + Opt_charset = 16, + Opt_shortname = 17, + Opt_utf8 = 18, + Opt_utf8_bool = 19, + Opt_uni_xl = 20, + Opt_uni_xl_bool = 21, + Opt_nonumtail = 22, + Opt_nonumtail_bool = 23, + Opt_obsolete = 24, + Opt_flush = 25, + Opt_tz = 26, + Opt_rodir = 27, + Opt_errors = 28, + Opt_discard = 29, + Opt_nfs = 30, + Opt_nfs_enum = 31, + Opt_time_offset = 32, + Opt_dos1xfloppy = 33, +}; + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[36]; + long: 32; +}; + +typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); + +struct debugfs_short_fops { + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + loff_t (*llseek)(struct file *, loff_t, int); +}; + +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +struct debugfs_fsdata { + const struct file_operations *real_fops; + const struct debugfs_short_fops *short_fops; + union { + debugfs_automount_t automount; + struct { + refcount_t active_users; + struct completion active_users_drained; + struct mutex cancellations_mtx; + struct list_head cancellations; + }; + }; +}; + +struct debugfs_fs_info { + kuid_t uid; + kgid_t gid; + umode_t mode; + unsigned int opts; +}; + +enum { + Opt_uid___6 = 0, + Opt_gid___7 = 1, + Opt_mode___5 = 2, + Opt_source = 3, +}; + +struct prop_handler { + struct hlist_node node; + const char *xattr_name; + int (*validate)(const struct btrfs_inode *, const char *, size_t); + int (*apply)(struct inode *, const char *, size_t); + const char * (*extract)(const struct inode *); + bool (*ignore)(const struct btrfs_inode *); + int inheritable; +}; + +struct btrfs_raid_stride { + __le64 devid; + __le64 physical; +}; + +struct btrfs_stripe_extent { + struct { + struct {} __empty_strides; + struct btrfs_raid_stride strides[0]; + }; +}; + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[0]; +}; + +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + short unsigned int name_len; + void (*describe)(const struct key *, struct seq_file *); + void (*destroy)(void *, void *); + int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*verify_signature)(const struct key *, const struct public_key_signature *); +}; + +enum asymmetric_payload_bits { + asym_crypto = 0, + asym_subtype = 1, + asym_key_ids = 2, + asym_auth = 3, +}; + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP = 2, + IOPRIO_WHO_USER = 3, +}; + +struct io_poll_update { + struct file *file; + long: 32; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int nr_entries; + int error; + bool owning; + __poll_t result_mask; +}; + +enum { + IOU_POLL_DONE = 0, + IOU_POLL_NO_ACTION = 1, + IOU_POLL_REMOVE_POLL_USE_RES = 2, + IOU_POLL_REISSUE = 3, + IOU_POLL_REQUEUE = 4, +}; + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty = 0, + assoc_array_walk_found_terminal_node = 1, + assoc_array_walk_found_wrong_shortcut = 2, +}; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + long unsigned int sc_segments; + long unsigned int dissimilarity; + } wrong_shortcut; +}; + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +typedef enum { + ZSTD_dtlm_fast = 0, + ZSTD_dtlm_full = 1, +} ZSTD_dictTableLoadMethod_e; + +typedef size_t (*ZSTD_blockCompressor)(ZSTD_matchState_t *, seqStore_t *, U32 *, const void *, size_t); + +typedef struct { + U64 rolling; + U64 stopMask; +} ldmRollingHashState_t; + +struct pci_bus_resource { + struct list_head list; + struct resource *res; +}; + +typedef unsigned int u_int; + +enum { + KERNEL_PARAM_FL_UNSAFE = 1, + KERNEL_PARAM_FL_HWPARAM = 2, +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +struct font_desc { + int idx; + const char *name; + unsigned int width; + unsigned int height; + unsigned int charcount; + const void *data; + int pref; +}; + +enum { + FBCON_LOGO_CANSHOW = -1, + FBCON_LOGO_DRAW = -2, + FBCON_LOGO_DONTSHOW = -3, +}; + +struct usb_dynid { + struct list_head node; + struct usb_device_id id; +}; + +typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *); + +struct i2c_smbus_alert { + struct work_struct alert; + struct i2c_client *ara; +}; + +struct alert_data { + short unsigned int addr; + enum i2c_alert_protocol type; + unsigned int data; +}; + +typedef struct thermal_cooling_device *class_cooling_dev_t; + +struct thermal_instance { + int id; + char name[20]; + struct thermal_cooling_device *cdev; + const struct thermal_trip *trip; + bool initialized; + long unsigned int upper; + long unsigned int lower; + long unsigned int target; + char attr_name[20]; + struct device_attribute attr; + char weight_attr_name[20]; + struct device_attribute weight_attr; + struct list_head trip_node; + struct list_head cdev_node; + unsigned int weight; + bool upper_no_limit; +}; + +struct page_frag_cache { + long unsigned int encoded_page; + __u16 offset; + __u16 pagecnt_bias; +}; + +struct sk_buff_fclones { + struct sk_buff skb1; + struct sk_buff skb2; + refcount_t fclone_ref; + long: 32; +}; + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; + __u32 frag_off; +}; + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +enum skb_drop_reason_subsys { + SKB_DROP_REASON_SUBSYS_CORE = 0, + SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE = 1, + SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR = 2, + SKB_DROP_REASON_SUBSYS_OPENVSWITCH = 3, + SKB_DROP_REASON_SUBSYS_NUM = 4, +}; + +struct drop_reason_list { + const char * const *reasons; + size_t n_reasons; +}; + +struct ts_state { + unsigned int offset; + char cb[48]; +}; + +struct ts_config; + +struct ts_ops { + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +struct ts_config { + struct ts_ops *ops; + int flags; + unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); + void (*finish)(struct ts_config *, struct ts_state *); +}; + +struct page_frag_1k { + void *va; + u16 offset; + bool pfmemalloc; +}; + +struct napi_alloc_cache { + local_lock_t bh_lock; + struct page_frag_cache page; + struct page_frag_1k page_small; + unsigned int skb_count; + void *skb_cache[64]; +}; + +struct skb_free_array { + unsigned int skb_count; + void *skb_array[16]; +}; + +typedef int (*sendmsg_func)(struct sock *, struct msghdr *); + +struct tc_qopt_offload_stats { + struct gnet_stats_basic_sync *bstats; + struct gnet_stats_queue *qstats; +}; + +enum tc_mq_command { + TC_MQ_CREATE = 0, + TC_MQ_DESTROY = 1, + TC_MQ_STATS = 2, + TC_MQ_GRAFT = 3, +}; + +struct tc_mq_opt_offload_graft_params { + long unsigned int queue; + u32 child_handle; +}; + +struct tc_mq_qopt_offload { + enum tc_mq_command command; + u32 handle; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; +}; + +struct mq_sched { + struct Qdisc **qdiscs; +}; + +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT = 0, + ETHTOOL_FEC_AUTO_BIT = 1, + ETHTOOL_FEC_OFF_BIT = 2, + ETHTOOL_FEC_RS_BIT = 3, + ETHTOOL_FEC_BASER_BIT = 4, + ETHTOOL_FEC_LLRS_BIT = 5, +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC = 0, + ETHTOOL_A_FEC_STAT_PAD = 1, + ETHTOOL_A_FEC_STAT_CORRECTED = 2, + ETHTOOL_A_FEC_STAT_UNCORR = 3, + ETHTOOL_A_FEC_STAT_CORR_BITS = 4, + __ETHTOOL_A_FEC_STAT_CNT = 5, + ETHTOOL_A_FEC_STAT_MAX = 4, +}; + +struct fec_stat_grp { + u64 stats[9]; + u8 cnt; + long: 32; +}; + +struct fec_reply_data { + struct ethnl_reply_data base; + long unsigned int fec_link_modes[4]; + u32 active_fec; + u8 fec_auto; + long: 32; + struct fec_stat_grp corr; + struct fec_stat_grp uncorr; + struct fec_stat_grp corr_bits; +}; + +enum nft_exthdr_flags { + NFT_EXTHDR_F_PRESENT = 1, +}; + +enum nft_exthdr_op { + NFT_EXTHDR_OP_IPV6 = 0, + NFT_EXTHDR_OP_TCPOPT = 1, + NFT_EXTHDR_OP_IPV4 = 2, + NFT_EXTHDR_OP_SCTP = 3, + NFT_EXTHDR_OP_DCCP = 4, + __NFT_EXTHDR_OP_MAX = 5, +}; + +enum nft_exthdr_attributes { + NFTA_EXTHDR_UNSPEC = 0, + NFTA_EXTHDR_DREG = 1, + NFTA_EXTHDR_TYPE = 2, + NFTA_EXTHDR_OFFSET = 3, + NFTA_EXTHDR_LEN = 4, + NFTA_EXTHDR_FLAGS = 5, + NFTA_EXTHDR_OP = 6, + NFTA_EXTHDR_SREG = 7, + __NFTA_EXTHDR_MAX = 8, +}; + +struct dccp_hdr { + __be16 dccph_sport; + __be16 dccph_dport; + __u8 dccph_doff; + __u8 dccph_ccval: 4; + __u8 dccph_cscov: 4; + __sum16 dccph_checksum; + __u8 dccph_reserved: 3; + __u8 dccph_type: 4; + __u8 dccph_x: 1; + __u8 dccph_seq2; + __be16 dccph_seq; +}; + +enum dccp_pkt_type { + DCCP_PKT_REQUEST = 0, + DCCP_PKT_RESPONSE = 1, + DCCP_PKT_DATA = 2, + DCCP_PKT_ACK = 3, + DCCP_PKT_DATAACK = 4, + DCCP_PKT_CLOSEREQ = 5, + DCCP_PKT_CLOSE = 6, + DCCP_PKT_RESET = 7, + DCCP_PKT_SYNC = 8, + DCCP_PKT_SYNCACK = 9, + DCCP_PKT_INVALID = 10, +}; + +enum { + DCCPO_PADDING = 0, + DCCPO_MANDATORY = 1, + DCCPO_MIN_RESERVED = 3, + DCCPO_MAX_RESERVED = 31, + DCCPO_CHANGE_L = 32, + DCCPO_CONFIRM_L = 33, + DCCPO_CHANGE_R = 34, + DCCPO_CONFIRM_R = 35, + DCCPO_NDP_COUNT = 37, + DCCPO_ACK_VECTOR_0 = 38, + DCCPO_ACK_VECTOR_1 = 39, + DCCPO_TIMESTAMP = 41, + DCCPO_TIMESTAMP_ECHO = 42, + DCCPO_ELAPSED_TIME = 43, + DCCPO_MAX = 45, + DCCPO_MIN_RX_CCID_SPECIFIC = 128, + DCCPO_MAX_RX_CCID_SPECIFIC = 191, + DCCPO_MIN_TX_CCID_SPECIFIC = 192, + DCCPO_MAX_TX_CCID_SPECIFIC = 255, +}; + +struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +}; + +struct nft_exthdr { + u8 type; + u8 offset; + u8 len; + u8 op; + u8 dreg; + u8 sreg; + u8 flags; +}; + +struct ipq { + struct inet_frag_queue q; + u8 ecn; + u16 max_df_size; + int iif; + unsigned int rid; + struct inet_peer *peer; +}; + +struct icmp_filter { + __u32 data; +}; + +struct raw_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct raw_sock { + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + short unsigned int nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + short unsigned int nduseropt_pad2; + unsigned int nduseropt_pad3; +}; + +enum { + NDUSEROPT_UNSPEC = 0, + NDUSEROPT_SRCADDR = 1, + __NDUSEROPT_MAX = 2, +}; + +struct rs_msg { + struct icmp6hdr icmph; + __u8 opt[0]; +}; + +struct ra_msg { + struct icmp6hdr icmph; + __be32 reachable_time; + __be32 retrans_timer; +}; + +union net_bridge_eht_addr { + __be32 ip4; + struct in6_addr ip6; +}; + +struct net_bridge_group_eht_host { + struct rb_node rb_node; + union net_bridge_eht_addr h_addr; + struct hlist_head set_entries; + unsigned int num_entries; + unsigned char filter_mode; + struct net_bridge_port_group *pg; +}; + +struct net_bridge_group_eht_set; + +struct net_bridge_group_eht_set_entry { + struct rb_node rb_node; + struct hlist_node host_list; + union net_bridge_eht_addr h_addr; + struct timer_list timer; + struct net_bridge *br; + struct net_bridge_group_eht_set *eht_set; + struct net_bridge_group_eht_host *h_parent; + struct net_bridge_mcast_gc mcast_gc; +}; + +struct net_bridge_group_eht_set { + struct rb_node rb_node; + union net_bridge_eht_addr src_addr; + struct rb_root entry_tree; + struct timer_list timer; + struct net_bridge_port_group *pg; + struct net_bridge *br; + struct net_bridge_mcast_gc mcast_gc; +}; + +struct regbit { + long unsigned int bit; + const char *name; +}; + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +enum { + input_adb_none = 0, + input_adb_pmu = 1, + input_adb_cuda = 2, +}; + +enum { + FW_FEATURE_POSSIBLE = 0, + FW_FEATURE_ALWAYS = 0, +}; + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context = 1, + perf_nr_task_contexts = 2, +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1, + PERF_SAMPLE_BRANCH_KERNEL = 2, + PERF_SAMPLE_BRANCH_HV = 4, + PERF_SAMPLE_BRANCH_ANY = 8, + PERF_SAMPLE_BRANCH_ANY_CALL = 16, + PERF_SAMPLE_BRANCH_ANY_RETURN = 32, + PERF_SAMPLE_BRANCH_IND_CALL = 64, + PERF_SAMPLE_BRANCH_ABORT_TX = 128, + PERF_SAMPLE_BRANCH_IN_TX = 256, + PERF_SAMPLE_BRANCH_NO_TX = 512, + PERF_SAMPLE_BRANCH_COND = 1024, + PERF_SAMPLE_BRANCH_CALL_STACK = 2048, + PERF_SAMPLE_BRANCH_IND_JUMP = 4096, + PERF_SAMPLE_BRANCH_CALL = 8192, + PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, + PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, + PERF_SAMPLE_BRANCH_HW_INDEX = 131072, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 262144, + PERF_SAMPLE_BRANCH_COUNTERS = 524288, + PERF_SAMPLE_BRANCH_MAX = 1048576, +}; + +struct mmcr_regs { + long unsigned int mmcr0; + long unsigned int mmcr1; + long unsigned int mmcr2; + long unsigned int mmcra; + long unsigned int mmcr3; +}; + +struct power_pmu { + const char *name; + int n_counter; + int max_alternatives; + long unsigned int add_fields; + long unsigned int test_adder; + int (*compute_mmcr)(u64 *, int, unsigned int *, struct mmcr_regs *, struct perf_event **, u32); + int (*get_constraint)(u64, long unsigned int *, long unsigned int *, u64); + int (*get_alternatives)(u64, unsigned int, u64 *); + void (*get_mem_data_src)(union perf_mem_data_src *, u32, struct pt_regs *); + void (*get_mem_weight)(u64 *, u64); + long unsigned int group_constraint_mask; + long unsigned int group_constraint_val; + u64 (*bhrb_filter_map)(u64); + void (*config_bhrb)(u64); + void (*disable_pmc)(unsigned int, struct mmcr_regs *); + int (*limited_pmc_event)(u64); + u32 flags; + const struct attribute_group **attr_groups; + int n_generic; + int *generic_events; + u64 (*cache_events)[42]; + int n_blacklist_ev; + int *blacklist_ev; + int bhrb_nr; + int capabilities; + int (*check_attr_config)(struct perf_event *); +}; + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; + long: 32; +}; + +struct cpu_hw_events { + int n_events; + int n_percpu; + int disabled; + int n_added; + int n_limited; + u8 pmcs_enabled; + struct perf_event *event[8]; + u64 events[8]; + unsigned int flags[8]; + struct mmcr_regs mmcr; + struct perf_event *limited_counter[2]; + u8 limited_hwidx[2]; + u64 alternatives[64]; + long unsigned int amasks[64]; + long unsigned int avalues[64]; + unsigned int txn_flags; + int n_txn_start; + u64 bhrb_filter; + unsigned int bhrb_users; + void *bhrb_context; + struct perf_branch_stack bhrb_stack; + struct perf_branch_entry bhrb_entries[32]; + u64 ic_init; + long unsigned int pmcs[8]; +}; + +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX = 1, + UCLAMP_CNT = 2, +}; + +enum cpu_idle_type { + __CPU_NOT_IDLE = 0, + CPU_IDLE = 1, + CPU_NEWLY_IDLE = 2, + CPU_MAX_IDLE_TYPES = 3, +}; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE = 0, + SCHED_TUNABLESCALING_LOG = 1, + SCHED_TUNABLESCALING_LINEAR = 2, + SCHED_TUNABLESCALING_END = 3, +}; + +struct asym_cap_data { + struct list_head link; + struct callback_head rcu; + long unsigned int capacity; + long unsigned int cpus[0]; +}; + +typedef int (*tg_visitor)(struct task_group *, void *); + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irqsave_t; + +struct energy_env { + long unsigned int task_busy_time; + long unsigned int pd_busy_time; + long unsigned int cpu_cap; + long unsigned int pd_cap; +}; + +enum fbq_type { + regular = 0, + remote = 1, + all = 2, +}; + +enum group_type { + group_has_spare = 0, + group_fully_busy = 1, + group_misfit_task = 2, + group_smt_balance = 3, + group_asym_packing = 4, + group_imbalanced = 5, + group_overloaded = 6, +}; + +enum migration_type { + migrate_load = 0, + migrate_util = 1, + migrate_task = 2, + migrate_misfit = 3, +}; + +struct lb_env { + struct sched_domain *sd; + struct rq *src_rq; + int src_cpu; + int dst_cpu; + struct rq *dst_rq; + struct cpumask *dst_grpmask; + int new_dst_cpu; + enum cpu_idle_type idle; + long int imbalance; + struct cpumask *cpus; + unsigned int flags; + unsigned int loop; + unsigned int loop_break; + unsigned int loop_max; + enum fbq_type fbq_type; + enum migration_type migration_type; + struct list_head tasks; +}; + +struct sg_lb_stats { + long unsigned int avg_load; + long unsigned int group_load; + long unsigned int group_capacity; + long unsigned int group_util; + long unsigned int group_runnable; + unsigned int sum_nr_running; + unsigned int sum_h_nr_running; + unsigned int idle_cpus; + unsigned int group_weight; + enum group_type group_type; + unsigned int group_asym_packing; + unsigned int group_smt_balance; + long unsigned int group_misfit_task_load; +}; + +struct sd_lb_stats { + struct sched_group *busiest; + struct sched_group *local; + long unsigned int total_load; + long unsigned int total_capacity; + long unsigned int avg_load; + unsigned int prefer_sibling; + struct sg_lb_stats busiest_stat; + struct sg_lb_stats local_stat; +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT = 0, + TICK_BROADCAST_ENTER = 1, +}; + +struct trace_event_raw_error_report_template { + struct trace_entry ent; + enum error_detector error_detector; + long unsigned int id; + char __data[0]; +}; + +struct trace_event_data_offsets_error_report_template {}; + +typedef void (*btf_trace_error_report_end)(void *, enum error_detector, long unsigned int); + +struct bpf_lpm_trie_key_hdr { + __u32 prefixlen; +}; + +struct bpf_lpm_trie_key_u8 { + union { + struct bpf_lpm_trie_key_hdr hdr; + __u32 prefixlen; + }; + __u8 data[0]; +}; + +struct lpm_trie_node { + struct callback_head rcu; + struct lpm_trie_node *child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node *root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + spinlock_t lock; + long: 32; +}; + +enum pgt_entry { + NORMAL_PMD = 0, + HPAGE_PMD = 1, + NORMAL_PUD = 2, + HPAGE_PUD = 3, +}; + +enum dentry_d_lock_class { + DENTRY_D_LOCK_NORMAL = 0, + DENTRY_D_LOCK_NESTED = 1, +}; + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +enum { + DIR_OFFSET_MIN = 2, +}; + +struct simple_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; + char set_buf[24]; + void *data; + const char *fmt; + struct mutex mutex; +}; + +struct epoll_params { + __u32 busy_poll_usecs; + __u16 busy_poll_budget; + __u8 prefer_busy_poll; + __u8 __pad; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + long unsigned int timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + long unsigned int event_count; + long unsigned int active_count; + long unsigned int relax_count; + long unsigned int expire_count; + long unsigned int wakeup_count; + struct device *dev; + bool active: 1; + bool autosleep_enabled: 1; + long: 32; +}; + +struct epoll_filefd { + struct file *file; + int fd; +}; + +struct epitem; + +struct eppoll_entry { + struct eppoll_entry *next; + struct epitem *base; + wait_queue_entry_t wait; + wait_queue_head_t *whead; +}; + +struct eventpoll; + +struct epitem { + union { + struct rb_node rbn; + struct callback_head rcu; + }; + struct list_head rdllink; + struct epitem *next; + struct epoll_filefd ffd; + bool dying; + struct eppoll_entry *pwqlist; + struct eventpoll *ep; + struct hlist_node fllink; + struct wakeup_source *ws; + struct epoll_event event; +}; + +struct eventpoll { + struct mutex mtx; + wait_queue_head_t wq; + wait_queue_head_t poll_wait; + struct list_head rdllist; + rwlock_t lock; + struct rb_root_cached rbr; + struct epitem *ovflist; + struct wakeup_source *ws; + struct user_struct *user; + struct file *file; + u64 gen; + struct hlist_head refs; + refcount_t refcount; + unsigned int napi_id; + u32 busy_poll_usecs; + u16 busy_poll_budget; + bool prefer_busy_poll; + long: 32; +}; + +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + +struct epitems_head { + struct hlist_head epitems; + struct epitems_head *next; +}; + +enum { + ES_WRITTEN_B = 0, + ES_UNWRITTEN_B = 1, + ES_DELAYED_B = 2, + ES_HOLE_B = 3, + ES_REFERENCED_B = 4, + ES_FLAGS = 5, +}; + +struct pending_reservation { + struct rb_node rb_node; + ext4_lblk_t lclu; +}; + +struct rsvd_count { + int ndelayed; + bool first_do_lblk_found; + ext4_lblk_t first_do_lblk; + ext4_lblk_t last_do_lblk; + struct extent_status *left_es; + bool partial; + ext4_lblk_t lclu; +}; + +struct cdrom_msf0 { + __u8 minute; + __u8 second; + __u8 frame; +}; + +union cdrom_addr { + struct cdrom_msf0 msf; + int lba; +}; + +struct cdrom_tocentry { + __u8 cdte_track; + __u8 cdte_adr: 4; + __u8 cdte_ctrl: 4; + __u8 cdte_format; + union cdrom_addr cdte_addr; + __u8 cdte_datamode; +}; + +struct cdrom_multisession { + union cdrom_addr addr; + __u8 xa_flag; + __u8 addr_format; +}; + +struct cdrom_mcn { + __u8 medium_catalog_number[14]; +}; + +struct packet_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +struct cdrom_device_ops; + +struct cdrom_device_info { + const struct cdrom_device_ops *ops; + struct list_head list; + struct gendisk *disk; + void *handle; + int mask; + int speed; + int capacity; + unsigned int options: 30; + unsigned int mc_flags: 2; + unsigned int vfs_events; + unsigned int ioctl_events; + int use_count; + char name[20]; + __u8 sanyo_slot: 2; + __u8 keeplocked: 1; + __u8 reserved: 5; + int cdda_method; + __u8 last_sense; + __u8 media_written; + short unsigned int mmc3_profile; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; + bool opened_for_data; + long: 32; + __s64 last_media_change_ms; +}; + +struct cdrom_device_ops { + int (*open)(struct cdrom_device_info *, int); + void (*release)(struct cdrom_device_info *); + int (*drive_status)(struct cdrom_device_info *, int); + unsigned int (*check_events)(struct cdrom_device_info *, unsigned int, int); + int (*tray_move)(struct cdrom_device_info *, int); + int (*lock_door)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, long unsigned int); + int (*get_last_session)(struct cdrom_device_info *, struct cdrom_multisession *); + int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); + int (*reset)(struct cdrom_device_info *); + int (*audio_ioctl)(struct cdrom_device_info *, unsigned int, void *); + int (*generic_packet)(struct cdrom_device_info *, struct packet_command *); + int (*read_cdda_bpc)(struct cdrom_device_info *, void *, u32, u32, u8 *); + const int capability; +}; + +struct iso_volume_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2041]; +}; + +struct iso_primary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct iso_supplementary_descriptor { + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 flags[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 escape[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 opt_type_l_path_table[4]; + __u8 type_m_path_table[4]; + __u8 opt_type_m_path_table[4]; + __u8 root_directory_record[34]; + char volume_set_id[128]; + char publisher_id[128]; + char preparer_id[128]; + char application_id[128]; + char copyright_file_id[37]; + char abstract_file_id[37]; + char bibliographic_file_id[37]; + __u8 creation_date[17]; + __u8 modification_date[17]; + __u8 expiration_date[17]; + __u8 effective_date[17]; + __u8 file_structure_version[1]; + __u8 unused4[1]; + __u8 application_data[512]; + __u8 unused5[653]; +}; + +struct hs_volume_descriptor { + __u8 foo[8]; + __u8 type[1]; + char id[5]; + __u8 version[1]; + __u8 data[2033]; +}; + +struct hs_primary_descriptor { + __u8 foo[8]; + __u8 type[1]; + __u8 id[5]; + __u8 version[1]; + __u8 unused1[1]; + char system_id[32]; + char volume_id[32]; + __u8 unused2[8]; + __u8 volume_space_size[8]; + __u8 unused3[32]; + __u8 volume_set_size[4]; + __u8 volume_sequence_number[4]; + __u8 logical_block_size[4]; + __u8 path_table_size[8]; + __u8 type_l_path_table[4]; + __u8 unused4[28]; + __u8 root_directory_record[34]; +}; + +enum isofs_file_format { + isofs_file_normal = 0, + isofs_file_sparse = 1, + isofs_file_compressed = 2, +}; + +struct isofs_options { + unsigned int rock: 1; + unsigned int joliet: 1; + unsigned int cruft: 1; + unsigned int hide: 1; + unsigned int showassoc: 1; + unsigned int nocompress: 1; + unsigned int overriderockperm: 1; + unsigned int uid_set: 1; + unsigned int gid_set: 1; + unsigned char map; + unsigned char check; + unsigned int blocksize; + umode_t fmode; + umode_t dmode; + kgid_t gid; + kuid_t uid; + char *iocharset; + s32 session; + s32 sbsector; +}; + +enum { + Opt_block = 0, + Opt_check___2 = 1, + Opt_cruft = 2, + Opt_gid___8 = 3, + Opt_ignore___2 = 4, + Opt_iocharset = 5, + Opt_map = 6, + Opt_mode___6 = 7, + Opt_nojoliet = 8, + Opt_norock = 9, + Opt_sb = 10, + Opt_session = 11, + Opt_uid___7 = 12, + Opt_unhide = 13, + Opt_utf8___2 = 14, + Opt_err___3 = 15, + Opt_nocompress = 16, + Opt_hide = 17, + Opt_showassoc = 18, + Opt_dmode = 19, + Opt_overriderockperm = 20, +}; + +struct isofs_iget5_callback_data { + long unsigned int block; + long unsigned int offset; +}; + +struct debugfs_blob_wrapper { + void *data; + long unsigned int size; +}; + +struct debugfs_reg32 { + char *name; + long unsigned int offset; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void *base; + struct device *dev; +}; + +struct debugfs_devm_entry { + int (*read)(struct seq_file *, void *); + struct device *dev; +}; + +struct btrfs_ioctl_dev_replace_start_params { + __u64 srcdevid; + __u64 cont_reading_from_srcdev_mode; + __u8 srcdev_name[1025]; + __u8 tgtdev_name[1025]; + long: 32; +}; + +struct btrfs_ioctl_dev_replace_status_params { + __u64 replace_state; + __u64 progress_1000; + __u64 time_started; + __u64 time_stopped; + __u64 num_write_errors; + __u64 num_uncorrectable_read_errors; +}; + +struct btrfs_ioctl_dev_replace_args { + __u64 cmd; + __u64 result; + union { + struct btrfs_ioctl_dev_replace_start_params start; + struct btrfs_ioctl_dev_replace_status_params status; + }; + __u64 spare[64]; +}; + +struct btrfs_dev_replace_item { + __le64 src_devid; + __le64 cursor_left; + __le64 cursor_right; + __le64 cont_reading_from_srcdev_mode; + __le64 replace_state; + __le64 time_started; + __le64 time_stopped; + __le64 num_write_errors; + __le64 num_uncorrectable_read_errors; +}; + +enum rsapubkey_actions { + ACT_rsa_get_e___2 = 0, + ACT_rsa_get_n___2 = 1, + NR__rsapubkey_actions = 2, +}; + +struct akcipher_instance { + void (*free)(struct akcipher_instance *); + long: 32; + union { + struct { + char head[32]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct pkcs1pad_inst_ctx { + struct crypto_akcipher_spawn spawn; +}; + +struct pkcs1pad_request { + struct scatterlist in_sg[2]; + struct scatterlist out_sg[1]; + uint8_t *in_buf; + uint8_t *out_buf; + long: 32; + struct akcipher_request child_req; +}; + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +struct sbq_wait { + struct sbitmap_queue *sbq; + struct wait_queue_entry wait; +}; + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = 65535, +}; + +struct bt_iter_data { + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + busy_tag_iter_fn *fn; + void *data; + bool reserved; +}; + +struct bt_tags_iter_data { + struct blk_mq_tags *tags; + busy_tag_iter_fn *fn; + void *data; + unsigned int flags; +}; + +enum io_uring_register_pbuf_ring_flags { + IOU_PBUF_RING_MMAP = 1, + IOU_PBUF_RING_INC = 2, +}; + +struct io_uring_buf_reg { + __u64 ring_addr; + __u32 ring_entries; + __u16 bgid; + __u16 flags; + __u64 resv[3]; +}; + +struct io_uring_buf_status { + __u32 buf_group; + __u32 head; + __u32 resv[8]; +}; + +enum { + KBUF_MODE_EXPAND = 1, + KBUF_MODE_FREE = 2, +}; + +struct buf_sel_arg { + struct iovec *iovs; + size_t out_len; + size_t max_len; + short unsigned int nr_iovs; + short unsigned int mode; +}; + +struct io_provide_buf { + struct file *file; + long: 32; + __u64 addr; + __u32 len; + __u32 bgid; + __u32 nbufs; + __u16 bid; +}; + +typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); + +enum devm_ioremap_type { + DEVM_IOREMAP = 0, + DEVM_IOREMAP_UC = 1, + DEVM_IOREMAP_WC = 2, + DEVM_IOREMAP_NP = 3, +}; + +struct arch_io_reserve_memtype_wc_devres { + resource_size_t start; + resource_size_t size; +}; + +struct word_at_a_time { + const long unsigned int high_bits; + const long unsigned int low_bits; +}; + +struct n_tty_data { + size_t read_head; + size_t commit_head; + size_t canon_head; + size_t echo_head; + size_t echo_commit; + size_t echo_mark; + long unsigned int char_map[8]; + long unsigned int overrun_time; + unsigned int num_overrun; + bool no_room; + unsigned char lnext: 1; + unsigned char erasing: 1; + unsigned char raw: 1; + unsigned char real_raw: 1; + unsigned char icanon: 1; + unsigned char push: 1; + u8 read_buf[4096]; + long unsigned int read_flags[128]; + u8 echo_buf[4096]; + size_t read_tail; + size_t line_start; + size_t lookahead_count; + unsigned int column; + unsigned int canon_column; + size_t echo_tail; + struct mutex atomic_read_lock; + struct mutex output_lock; +}; + +enum { + ERASE = 0, + WERASE = 1, + KILL = 2, +}; + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + bool of_node_reused; + const char *name; + int id; + const struct resource *res; + unsigned int num_res; + const void *data; + size_t size_data; + long: 32; + u64 dma_mask; + const struct property_entry *properties; + long: 32; +}; + +struct irq_affinity_devres { + unsigned int count; + unsigned int irq[0]; +}; + +struct platform_object { + struct platform_device pdev; + char name[0]; +}; + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1, + BIP_MAPPED_INTEGRITY = 2, + BIP_CTRL_NOCHECK = 4, + BIP_DISK_NOCHECK = 8, + BIP_IP_CHECKSUM = 16, + BIP_COPY_USER = 32, +}; + +enum { + DISK_EVENT_FLAG_POLL = 1, + DISK_EVENT_FLAG_UEVENT = 2, + DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 4, +}; + +enum string_size_units { + STRING_UNITS_10 = 0, + STRING_UNITS_2 = 1, + STRING_UNITS_MASK = 1, + STRING_UNITS_NO_SPACE = 1073741824, + STRING_UNITS_NO_BYTES = 2147483648, +}; + +enum pr_status { + PR_STS_SUCCESS = 0, + PR_STS_IOERR = 2, + PR_STS_RESERVATION_CONFLICT = 24, + PR_STS_RETRY_PATH_FAILURE = 917504, + PR_STS_PATH_FAST_FAILED = 983040, + PR_STS_PATH_FAILED = 65536, +}; + +enum t10_dif_type { + T10_PI_TYPE0_PROTECTION = 0, + T10_PI_TYPE1_PROTECTION = 1, + T10_PI_TYPE2_PROTECTION = 2, + T10_PI_TYPE3_PROTECTION = 3, +}; + +struct scsi_io_group_descriptor { + u8 io_advice_hints_mode: 2; + u8 reserved1: 3; + u8 st_enble: 1; + u8 cs_enble: 1; + u8 ic_enable: 1; + u8 reserved2[3]; + u8 acdlu: 1; + u8 reserved3: 1; + u8 rlbsr: 2; + u8 lbm_descriptor_type: 4; + u8 params[2]; + u8 reserved4; + u8 reserved5[8]; +}; + +struct scsi_stream_status { + u8 perm: 1; + u8 reserved1: 7; + u8 reserved2; + __be16 stream_identifier; + u8 reserved3: 2; + u8 rel_lifetime: 6; + u8 reserved4[3]; +}; + +struct scsi_stream_status_header { + __be32 len; + u16 reserved; + __be16 number_of_open_streams; + struct { + struct {} __empty_stream_status; + struct scsi_stream_status stream_status[0]; + }; +}; + +enum scsi_pr_type { + SCSI_PR_WRITE_EXCLUSIVE = 1, + SCSI_PR_EXCLUSIVE_ACCESS = 3, + SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 5, + SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 6, + SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 7, + SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 8, +}; + +struct scsi_mode_data { + __u32 length; + __u16 block_descriptor_length; + __u8 medium_type; + __u8 device_specific; + __u8 header_length; + __u8 longlba: 1; +}; + +enum scsi_prot_flags { + SCSI_PROT_TRANSFER_PI = 1, + SCSI_PROT_GUARD_CHECK = 2, + SCSI_PROT_REF_CHECK = 4, + SCSI_PROT_REF_INCREMENT = 8, + SCSI_PROT_IP_CHECKSUM = 16, +}; + +enum scsi_host_prot_capabilities { + SHOST_DIF_TYPE1_PROTECTION = 1, + SHOST_DIF_TYPE2_PROTECTION = 2, + SHOST_DIF_TYPE3_PROTECTION = 4, + SHOST_DIX_TYPE0_PROTECTION = 8, + SHOST_DIX_TYPE1_PROTECTION = 16, + SHOST_DIX_TYPE2_PROTECTION = 32, + SHOST_DIX_TYPE3_PROTECTION = 64, +}; + +enum { + SD_EXT_CDB_SIZE = 32, + SD_MEMPOOL_SIZE = 2, +}; + +enum { + SD_DEF_XFER_BLOCKS = 65535, + SD_MAX_XFER_BLOCKS = 4294967295, + SD_MAX_WS10_BLOCKS = 65535, + SD_MAX_WS16_BLOCKS = 8388607, +}; + +enum { + SD_LBP_FULL = 0, + SD_LBP_UNMAP = 1, + SD_LBP_WS16 = 2, + SD_LBP_WS10 = 3, + SD_LBP_ZERO = 4, + SD_LBP_DISABLE = 5, +}; + +enum { + SD_ZERO_WRITE = 0, + SD_ZERO_WS = 1, + SD_ZERO_WS16_UNMAP = 2, + SD_ZERO_WS10_UNMAP = 3, +}; + +struct opal_dev; + +struct scsi_disk { + struct scsi_device *device; + long: 32; + struct device disk_dev; + struct gendisk *disk; + struct opal_dev *opal_dev; + atomic_t openers; + long: 32; + sector_t capacity; + int max_retries; + u32 min_xfer_blocks; + u32 max_xfer_blocks; + u32 opt_xfer_blocks; + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; + u32 max_atomic; + u32 atomic_alignment; + u32 atomic_granularity; + u32 max_atomic_with_boundary; + u32 max_atomic_boundary; + u32 index; + unsigned int physical_block_size; + unsigned int max_medium_access_timeouts; + unsigned int medium_access_timed_out; + u16 permanent_stream_count; + u8 media_present; + u8 write_prot; + u8 protection_type; + u8 provisioning_mode; + u8 zeroing_mode; + u8 nr_actuators; + bool suspended; + unsigned int ATO: 1; + unsigned int cache_override: 1; + unsigned int WCE: 1; + unsigned int RCD: 1; + unsigned int DPOFUA: 1; + unsigned int first_scan: 1; + unsigned int lbpme: 1; + unsigned int lbprz: 1; + unsigned int lbpu: 1; + unsigned int lbpws: 1; + unsigned int lbpws10: 1; + unsigned int lbpvpd: 1; + unsigned int ws10: 1; + unsigned int ws16: 1; + unsigned int rc_basis: 2; + unsigned int zoned: 2; + unsigned int urswrz: 1; + unsigned int security: 1; + unsigned int ignore_medium_access_errors: 1; + unsigned int rscs: 1; + unsigned int use_atomic_write_boundary: 1; +}; + +enum pci_ers_result { + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NO_AER_DRIVER = 6, +}; + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 255, +}; + +enum phy___2 { + phy_100a = 992, + phy_100c = 55575208, + phy_82555_tx = 22020776, + phy_nsc_tx = 1543512064, + phy_82562_et = 53478056, + phy_82562_em = 52429480, + phy_82562_ek = 51380904, + phy_82562_eh = 24117928, + phy_82552_v = 3496017997, + phy_unknown = 4294967295, +}; + +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 8, + rus_ready = 16, + rus_mask = 60, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0, + stat_ack_sw_gen = 4, + stat_ack_rnr = 16, + stat_ack_cu_idle = 32, + stat_ack_frame_rx = 64, + stat_ack_cu_cmd_done = 128, + stat_ack_not_present = 255, + stat_ack_rx = 84, + stat_ack_tx = 160, +}; + +enum scb_cmd_hi { + irq_mask_none = 0, + irq_mask_all = 1, + irq_sw_gen = 2, +}; + +enum scb_cmd_lo { + cuc_nop = 0, + ruc_start = 1, + ruc_load_base = 6, + cuc_start = 16, + cuc_resume = 32, + cuc_dump_addr = 64, + cuc_dump_stats = 80, + cuc_load_base = 96, + cuc_dump_reset = 112, +}; + +enum cuc_dump { + cuc_dump_complete = 40965, + cuc_dump_reset_complete = 40967, +}; + +enum port { + software_reset = 0, + selftest = 1, + selective_reset = 2, +}; + +enum eeprom_ctrl_lo { + eesk = 1, + eecs = 2, + eedi = 4, + eedo = 8, +}; + +enum mdi_ctrl { + mdi_write = 67108864, + mdi_read = 134217728, + mdi_ready = 268435456, +}; + +enum eeprom_op { + op_write = 5, + op_read = 6, + op_ewds = 16, + op_ewen = 19, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 3, + eeprom_phy_iface = 6, + eeprom_id = 10, + eeprom_config_asf = 13, + eeprom_smbus_addr = 144, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 128, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB = 1, + I82553C = 2, + I82503 = 3, + DP83840 = 4, + S80C240 = 5, + S80C24 = 6, + I82555 = 7, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 32, +}; + +enum eeprom_config_asf { + eeprom_asf = 32768, + eeprom_gcl = 16384, +}; + +enum cb_status { + cb_complete = 32768, + cb_ok = 8192, +}; + +enum cb_command { + cb_nop = 0, + cb_iaaddr = 1, + cb_config = 2, + cb_multi = 3, + cb_tx = 4, + cb_ucode = 5, + cb_dump = 6, + cb_tx_sf = 8, + cb_tx_nc = 16, + cb_cid = 7936, + cb_i = 8192, + cb_s = 16384, + cb_el = 32768, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next; + struct rx *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +struct config { + u8 pad0: 2; + u8 byte_count: 6; + u8 pad1: 1; + u8 tx_fifo_limit: 3; + u8 rx_fifo_limit: 4; + u8 adaptive_ifs; + u8 pad3: 4; + u8 term_write_cache_line: 1; + u8 read_align_enable: 1; + u8 type_enable: 1; + u8 mwi_enable: 1; + u8 pad4: 1; + u8 rx_dma_max_count: 7; + u8 dma_max_count_enable: 1; + u8 tx_dma_max_count: 7; + u8 rx_save_bad_frames: 1; + u8 rx_save_overruns: 1; + u8 standard_stat_counter: 1; + u8 standard_tcb: 1; + u8 cna_intr: 1; + u8 tno_intr: 1; + u8 direct_rx_dma: 1; + u8 late_scb_update: 1; + u8 tx_dynamic_tbd: 1; + u8 tx_two_frames_in_fifo: 1; + u8 rx_extended_rfd: 1; + u8 pad7: 2; + u8 tx_underrun_retry: 2; + u8 rx_discard_short_frames: 1; + u8 csma_disabled: 1; + u8 pad8: 6; + u8 mii_mode: 1; + u8 mcmatch_wake: 1; + u8 arp_wake: 1; + u8 link_status_wake: 1; + u8 vlan_arp_tco: 1; + u8 pad9: 3; + u8 rx_tcpudp_checksum: 1; + u8 loopback: 2; + u8 preamble_length: 2; + u8 no_source_addr_insertion: 1; + u8 pad10: 3; + u8 pad11: 5; + u8 linear_priority: 3; + u8 ifs: 4; + u8 pad12: 3; + u8 linear_priority_mode: 1; + u8 ip_addr_lo; + u8 ip_addr_hi; + u8 crs_or_cdt: 1; + u8 pad15_2: 1; + u8 crc_16_bit: 1; + u8 ignore_ul_bit: 1; + u8 pad15_1: 1; + u8 wait_after_win: 1; + u8 broadcast_disabled: 1; + u8 promiscuous_mode: 1; + u8 fc_delay_lo; + u8 fc_delay_hi; + u8 pad18: 1; + u8 fc_priority_threshold: 3; + u8 rx_long_ok: 1; + u8 rx_crc_transfer: 1; + u8 tx_padding: 1; + u8 rx_stripping: 1; + u8 full_duplex_pin: 1; + u8 full_duplex_force: 1; + u8 fc_reject: 1; + u8 fc_restart: 1; + u8 fc_restop: 1; + u8 fc_disable: 1; + u8 magic_packet_disable: 1; + u8 addr_wake: 1; + u8 pad20_2: 1; + u8 multi_ia: 1; + u8 fc_priority_location: 1; + u8 pad20_1: 5; + u8 pad21_2: 4; + u8 multicast_all: 1; + u8 pad21_1: 3; + u8 pad22: 6; + u8 rx_vlan_drop: 1; + u8 rx_d102_mode: 1; + u8 pad_d102[9]; +}; + +struct multi { + __le16 count; + u8 addr[386]; +}; + +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[6]; + __le32 ucode[134]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next; + struct cb *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, + lb_mac = 1, + lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames; + __le32 tx_max_collisions; + __le32 tx_late_collisions; + __le32 tx_underruns; + __le32 tx_lost_crs; + __le32 tx_deferred; + __le32 tx_single_collisions; + __le32 tx_multiple_collisions; + __le32 tx_total_collisions; + __le32 rx_good_frames; + __le32 rx_crc_errors; + __le32 rx_alignment_errors; + __le32 rx_resource_errors; + __le32 rx_overrun_errors; + __le32 rx_cdt_errors; + __le32 rx_short_frame_errors; + __le32 fc_xmt_pause; + __le32 fc_rcv_pause; + __le32 fc_rcv_unsupported; + __le16 xmt_tco_frames; + __le16 rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + u32 msg_enable; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *, u32, u32, u32, u16); + long: 32; + long: 32; + long: 32; + long: 32; + struct rx *rxs; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + spinlock_t cb_lock; + spinlock_t cmd_lock; + struct csr *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + long: 32; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + enum { + ich = 1, + promiscuous = 2, + multicast_all = 4, + wol_magic = 8, + ich_10h_workaround = 16, + } flags; + enum mac mac; + enum phy___2 phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + struct mem *mem; + dma_addr_t dma_addr; + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + long: 32; +}; + +enum led_state { + led_on = 1, + led_off = 4, + led_on_559 = 5, + led_on_557 = 7, +}; + +enum usb_led_event { + USB_LED_EVENT_HOST = 0, + USB_LED_EVENT_GADGET = 1, +}; + +typedef class_mutex_t class_mutex_intr_t; + +struct vivaldi_data { + u32 function_row_physmap[24]; + unsigned int num_function_row_keys; +}; + +struct atkbd { + struct ps2dev ps2dev; + struct input_dev *dev; + char name[64]; + char phys[32]; + short unsigned int id; + short unsigned int keycode[512]; + long unsigned int force_release_mask[16]; + unsigned char set; + bool translated; + bool extra; + bool write; + bool softrepeat; + bool softraw; + bool scroll; + bool enabled; + unsigned char emul; + bool resend; + bool release; + long unsigned int xl_bit; + unsigned int last; + long unsigned int time; + long unsigned int err_count; + struct delayed_work event_work; + long unsigned int event_jiffies; + long unsigned int event_mask; + struct mutex mutex; + struct vivaldi_data vdata; +}; + +struct hidraw_devinfo { + __u32 bustype; + __s16 vendor; + __s16 product; +}; + +struct hidraw { + unsigned int minor; + int exist; + int open; + wait_queue_head_t wait; + struct hid_device *hid; + struct device *dev; + spinlock_t list_lock; + struct list_head list; +}; + +struct hidraw_report { + __u8 *value; + int len; +}; + +struct hidraw_list { + struct hidraw_report buffer[64]; + int head; + int tail; + struct fasync_struct *fasync; + struct hidraw *hidraw; + struct list_head node; + struct mutex read_mutex; + bool revoked; +}; + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *, char *); + ssize_t (*store)(struct netdev_queue *, const char *, size_t); +}; + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC = 0, + ETHTOOL_A_BITSET_BIT_INDEX = 1, + ETHTOOL_A_BITSET_BIT_NAME = 2, + ETHTOOL_A_BITSET_BIT_VALUE = 3, + __ETHTOOL_A_BITSET_BIT_CNT = 4, + ETHTOOL_A_BITSET_BIT_MAX = 3, +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC = 0, + ETHTOOL_A_BITSET_BITS_BIT = 1, + __ETHTOOL_A_BITSET_BITS_CNT = 2, + ETHTOOL_A_BITSET_BITS_MAX = 1, +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC = 0, + ETHTOOL_A_BITSET_NOMASK = 1, + ETHTOOL_A_BITSET_SIZE = 2, + ETHTOOL_A_BITSET_BITS = 3, + ETHTOOL_A_BITSET_VALUE = 4, + ETHTOOL_A_BITSET_MASK = 5, + __ETHTOOL_A_BITSET_CNT = 6, + ETHTOOL_A_BITSET_MAX = 5, +}; + +struct nf_log_buf { + unsigned int count; + char buf[1020]; +}; + +struct nft_bitmap_elem { + struct nft_elem_priv priv; + struct list_head head; + struct nft_set_ext ext; +}; + +struct nft_bitmap { + struct list_head list; + u16 bitmap_size; + u8 bitmap[0]; +}; + +enum { + LWTUNNEL_XMIT_DONE = 0, + LWTUNNEL_XMIT_CONTINUE = 256, +}; + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +struct xfrm_tunnel { + int (*handler)(struct sk_buff *); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, u32); + struct xfrm_tunnel *next; + int priority; +}; + +struct ip6fl_iter_state { + struct seq_net_private p; + struct pid_namespace *pid_ns; + int bucket; +}; + +struct nf_br_ops { + int (*br_dev_xmit_hook)(struct sk_buff *); +}; + +typedef struct { + long unsigned int key[2]; +} hsiphash_key_t; + +struct __user_cap_header_struct { + __u32 version; + int pid; +}; + +typedef struct __user_cap_header_struct *cap_user_header_t; + +struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +}; + +typedef struct __user_cap_data_struct *cap_user_data_t; + +enum proc_cn_event { + PROC_EVENT_NONE = 0, + PROC_EVENT_FORK = 1, + PROC_EVENT_EXEC = 2, + PROC_EVENT_UID = 4, + PROC_EVENT_GID = 64, + PROC_EVENT_SID = 128, + PROC_EVENT_PTRACE = 256, + PROC_EVENT_COMM = 512, + PROC_EVENT_NONZERO_EXIT = 536870912, + PROC_EVENT_COREDUMP = 1073741824, + PROC_EVENT_EXIT = 2147483648, +}; + +struct ktime_timestamps { + u64 mono; + u64 boot; + u64 real; +}; + +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t boot; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + long: 32; +}; + +struct system_counterval_t { + u64 cycles; + enum clocksource_ids cs_id; + bool use_nsecs; +}; + +typedef struct { + seqcount_t seqcount; +} seqcount_latch_t; + +struct audit_ntp_data {}; + +enum timekeeping_adv_mode { + TK_ADV_TICK = 0, + TK_ADV_FREQ = 1, +}; + +struct tk_data { + seqcount_raw_spinlock_t seq; + long: 32; + struct timekeeper timekeeper; + struct timekeeper shadow_timekeeper; + raw_spinlock_t lock; + long: 32; +}; + +struct tk_fast { + seqcount_latch_t seq; + long: 32; + struct tk_read_base base[2]; +}; + +struct cgroup_of_peak { + long unsigned int value; + struct list_head list; +}; + +struct cgroup_fs_context { + struct kernfs_fs_context kfc; + struct cgroup_root *root; + struct cgroup_namespace *ns; + unsigned int flags; + bool cpuset_clone_children; + bool none; + bool all_ss; + u16 subsys_mask; + char *name; + char *release_agent; +}; + +enum cgroup_filetype { + CGROUP_FILE_PROCS = 0, + CGROUP_FILE_TASKS = 1, +}; + +struct cgroup_pidlist { + struct { + enum cgroup_filetype type; + struct pid_namespace *ns; + } key; + pid_t *list; + int length; + struct list_head links; + struct cgroup *owner; + struct delayed_work destroy_dwork; +}; + +struct cgroup_file_ctx { + struct cgroup_namespace *ns; + struct { + void *trigger; + } psi; + struct { + bool started; + struct css_task_iter iter; + } procs; + struct { + struct cgroup_pidlist *pidlist; + } procs1; + struct cgroup_of_peak peak; +}; + +struct cgrp_cset_link { + struct cgroup *cgrp; + struct css_set *cset; + struct list_head cset_link; + struct list_head cgrp_link; +}; + +struct cgroup_mgctx { + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + struct cgroup_taskset tset; + u16 ss_mask; +}; + +enum cgroup1_param { + Opt_all = 0, + Opt_clone_children = 1, + Opt_cpuset_v2_mode = 2, + Opt_name = 3, + Opt_none = 4, + Opt_noprefix = 5, + Opt_release_agent = 6, + Opt_xattr = 7, + Opt_favordynmods = 8, + Opt_nofavordynmods = 9, +}; + +struct trace_dynamic_info { + u16 len; + u16 offset; +}; + +struct synth_field_desc { + const char *type; + const char *name; +}; + +struct synth_trace_event; + +struct synth_event; + +struct synth_event_trace_state { + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int cur_field; + unsigned int n_u64; + bool disabled; + bool add_next; + bool add_name; +}; + +union trace_synth_field { + u8 as_u8; + u16 as_u16; + u32 as_u32; + u64 as_u64; + struct trace_dynamic_info as_dynamic; +}; + +struct synth_trace_event { + struct trace_entry ent; + union trace_synth_field fields[0]; +}; + +struct synth_field; + +struct synth_event { + struct dyn_event devent; + int ref; + char *name; + struct synth_field **fields; + unsigned int n_fields; + struct synth_field **dynamic_fields; + unsigned int n_dynamic_fields; + unsigned int n_u64; + struct trace_event_class class; + struct trace_event_call call; + struct tracepoint *tp; + struct module *mod; +}; + +struct synth_field { + char *type; + char *name; + size_t size; + unsigned int offset; + unsigned int field_pos; + bool is_signed; + bool is_string; + bool is_dynamic; + bool is_stack; +}; + +enum { + SYNTH_ERR_BAD_NAME = 0, + SYNTH_ERR_INVALID_CMD = 1, + SYNTH_ERR_INVALID_DYN_CMD = 2, + SYNTH_ERR_EVENT_EXISTS = 3, + SYNTH_ERR_TOO_MANY_FIELDS = 4, + SYNTH_ERR_INCOMPLETE_TYPE = 5, + SYNTH_ERR_INVALID_TYPE = 6, + SYNTH_ERR_INVALID_FIELD = 7, + SYNTH_ERR_INVALID_ARRAY_SPEC = 8, +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL = 0, +}; + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +struct bpf_iter__bpf_map_elem { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + void *value; + }; +}; + +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; + swp_entry_t *slots_ret; + int n_ret; +}; + +struct dentry_stat_t { + long int nr_dentry; + long int nr_unused; + long int age_limit; + long int want_pages; + long int nr_negative; + long int dummy; +}; + +struct external_name { + union { + atomic_t count; + struct callback_head head; + } u; + unsigned char name[0]; +}; + +enum d_walk_ret { + D_WALK_CONTINUE = 0, + D_WALK_QUIT = 1, + D_WALK_NORETRY = 2, + D_WALK_SKIP = 3, +}; + +struct check_mount { + struct vfsmount *mnt; + unsigned int mounted; +}; + +struct select_data { + struct dentry *start; + union { + long int found; + struct dentry *victim; + }; + struct list_head dispose; +}; + +enum { + VERBOSE_STATUS = 1, +}; + +enum { + Enabled = 0, + Magic = 1, +}; + +typedef struct { + struct list_head list; + long unsigned int flags; + int offset; + int size; + char *magic; + char *mask; + const char *interpreter; + char *name; + struct dentry *dentry; + struct file *interp_file; + refcount_t users; +} Node; + +struct fscrypt_inode_info; + +struct btrfs_extent_owner_ref { + __le64 root_id; +}; + +struct root_name_map { + u64 id; + const char *name; + long: 32; +}; + +struct workspace___3 { + void *mem; + void *buf; + void *cbuf; + struct list_head list; +}; + +struct ccm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn mac; +}; + +struct crypto_ccm_ctx { + struct crypto_ahash *mac; + struct crypto_skcipher *ctr; +}; + +struct crypto_rfc4309_ctx { + struct crypto_aead *child; + u8 nonce[3]; +}; + +struct crypto_rfc4309_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct aead_request subreq; +}; + +struct crypto_ccm_req_priv_ctx { + u8 odata[16]; + u8 idata[16]; + u8 auth_tag[16]; + u32 flags; + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 32; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + }; +}; + +struct cbcmac_tfm_ctx { + struct crypto_cipher *child; +}; + +struct cbcmac_desc_ctx { + unsigned int len; + u8 dg[0]; +}; + +struct mq_inflight { + struct block_device *part; + unsigned int inflight[2]; +}; + +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +struct blk_expired_data { + bool has_timedout_rq; + long unsigned int next; + long unsigned int timeout_start; +}; + +struct flush_busy_ctx_data { + struct blk_mq_hw_ctx *hctx; + struct list_head *list; +}; + +struct dispatch_rq_data { + struct blk_mq_hw_ctx *hctx; + struct request *rq; +}; + +enum prep_dispatch { + PREP_DISPATCH_OK = 0, + PREP_DISPATCH_NO_TAG = 1, + PREP_DISPATCH_NO_BUDGET = 2, +}; + +struct rq_iter_data { + struct blk_mq_hw_ctx *hctx; + bool has_rq; +}; + +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +enum { + IORING_MEM_REGION_TYPE_USER = 1, +}; + +struct io_uring_region_desc { + __u64 user_addr; + __u64 size; + __u32 flags; + __u32 id; + __u64 mmap_offset; + __u64 __resv[4]; +}; + +typedef enum { + trustInput = 0, + checkMaxSymbolValue = 1, +} HIST_checkInput_e; + +struct repcodes_s { + U32 rep[3]; +}; + +typedef struct repcodes_s repcodes_t; + +typedef U32 (*ZSTD_getAllMatchesFn)(ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, const BYTE *, const U32 *, const U32, const U32); + +typedef struct { + rawSeqStore_t seqStore; + U32 startPosInBlock; + U32 endPosInBlock; + U32 offset; +} ZSTD_optLdm_t; + +enum pci_bar_type { + pci_bar_unknown = 0, + pci_bar_io = 1, + pci_bar_mem32 = 2, + pci_bar_mem64 = 3, +}; + +struct pci_domain_busn_res { + struct list_head list; + struct resource res; + int domain_nr; +}; + +struct devm_clk_state { + struct clk *clk; + void (*exit)(struct clk *); +}; + +struct clk_bulk_devres { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct u32_fract { + __u32 numerator; + __u32 denominator; +}; + +struct clk_fractional_divider { + struct clk_hw hw; + void *reg; + u8 mshift; + u8 mwidth; + u8 nshift; + u8 nwidth; + u8 flags; + void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *); + spinlock_t *lock; +}; + +struct subsys_interface { + const char *name; + const struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *, struct subsys_interface *); + void (*remove_dev)(struct device *, struct subsys_interface *); +}; + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = -1, + PM_QOS_FLAGS_NONE = 0, + PM_QOS_FLAGS_SOME = 1, + PM_QOS_FLAGS_ALL = 2, +}; + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; + long: 32; +}; + +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + union { + __u32 data_len; + __u32 vec_cnt; + }; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; + __u64 result; +}; + +struct nvme_uring_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; +}; + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, + NVME_NQN_NVME = 2, + NVME_NQN_CURR = 3, +}; + +enum nvme_ctrl_type { + NVME_CTRL_IO = 1, + NVME_CTRL_DISC = 2, + NVME_CTRL_ADMIN = 3, +}; + +enum nvme_dctype { + NVME_DCTYPE_NOT_REPORTED = 0, + NVME_DCTYPE_DDC = 1, + NVME_DCTYPE_CDC = 2, +}; + +struct nvme_id_power_state { + __le16 max_power; + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; + __le32 exit_lat; + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; + +enum { + NVME_CTRL_CMIC_MULTI_PORT = 1, + NVME_CTRL_CMIC_MULTI_CTRL = 2, + NVME_CTRL_CMIC_ANA = 8, + NVME_CTRL_ONCS_COMPARE = 1, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 2, + NVME_CTRL_ONCS_DSM = 4, + NVME_CTRL_ONCS_WRITE_ZEROES = 8, + NVME_CTRL_ONCS_RESERVATIONS = 32, + NVME_CTRL_ONCS_TIMESTAMP = 64, + NVME_CTRL_VWC_PRESENT = 1, + NVME_CTRL_OACS_SEC_SUPP = 1, + NVME_CTRL_OACS_NS_MNGT_SUPP = 8, + NVME_CTRL_OACS_DIRECTIVES = 32, + NVME_CTRL_OACS_DBBUF_SUPP = 256, + NVME_CTRL_LPA_CMD_EFFECTS_LOG = 2, + NVME_CTRL_CTRATT_128_ID = 1, + NVME_CTRL_CTRATT_NON_OP_PSP = 2, + NVME_CTRL_CTRATT_NVM_SETS = 4, + NVME_CTRL_CTRATT_READ_RECV_LVLS = 8, + NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 16, + NVME_CTRL_CTRATT_PREDICTABLE_LAT = 32, + NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 128, + NVME_CTRL_CTRATT_UUID_LIST = 512, + NVME_CTRL_SGLS_BYTE_ALIGNED = 1, + NVME_CTRL_SGLS_DWORD_ALIGNED = 2, + NVME_CTRL_SGLS_KSDBDS = 4, + NVME_CTRL_SGLS_MSDS = 524288, + NVME_CTRL_SGLS_SAOS = 1048576, +}; + +enum { + NVME_ID_CNS_NS = 0, + NVME_ID_CNS_CTRL = 1, + NVME_ID_CNS_NS_ACTIVE_LIST = 2, + NVME_ID_CNS_NS_DESC_LIST = 3, + NVME_ID_CNS_CS_NS = 5, + NVME_ID_CNS_CS_CTRL = 6, + NVME_ID_CNS_NS_ACTIVE_LIST_CS = 7, + NVME_ID_CNS_NS_CS_INDEP = 8, + NVME_ID_CNS_NS_PRESENT_LIST = 16, + NVME_ID_CNS_NS_PRESENT = 17, + NVME_ID_CNS_CTRL_NS_LIST = 18, + NVME_ID_CNS_CTRL_LIST = 19, + NVME_ID_CNS_SCNDRY_CTRL_LIST = 21, + NVME_ID_CNS_NS_GRANULARITY = 22, + NVME_ID_CNS_UUID_LIST = 23, + NVME_ID_CNS_ENDGRP_LIST = 25, +}; + +enum { + NVME_CMD_EFFECTS_CSUPP = 1, + NVME_CMD_EFFECTS_LBCC = 2, + NVME_CMD_EFFECTS_NCC = 4, + NVME_CMD_EFFECTS_NIC = 8, + NVME_CMD_EFFECTS_CCC = 16, + NVME_CMD_EFFECTS_CSER_MASK = 49152, + NVME_CMD_EFFECTS_CSE_MASK = 458752, + NVME_CMD_EFFECTS_UUID_SEL = 524288, + NVME_CMD_EFFECTS_SCOPE_MASK = 4293918720, +}; + +struct nvme_effects_log { + __le32 acs[256]; + __le32 iocs[256]; + __u8 resv[2048]; +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + union { + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + }; + struct { + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; + } cdws; + }; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2; + __le32 cdw3; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +enum { + NVME_RW_LR = 32768, + NVME_RW_FUA = 16384, + NVME_RW_APPEND_PIREMAP = 512, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0, + NVME_RW_DSM_LATENCY_IDLE = 16, + NVME_RW_DSM_LATENCY_NORM = 32, + NVME_RW_DSM_LATENCY_LOW = 48, + NVME_RW_DSM_SEQ_REQ = 64, + NVME_RW_DSM_COMPRESSED = 128, + NVME_RW_PRINFO_PRCHK_REF = 1024, + NVME_RW_PRINFO_PRCHK_APP = 2048, + NVME_RW_PRINFO_PRCHK_GUARD = 4096, + NVME_RW_PRINFO_PRACT = 8192, + NVME_RW_DTYPE_STREAMS = 16, + NVME_WZ_DEAC = 512, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 lbat; + __le16 lbatm; +}; + +struct nvme_zone_mgmt_send_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le32 cdw12; + __u8 zsa; + __u8 select_all; + __u8 rsvd13[2]; + __le32 cdw14[2]; +}; + +struct nvme_zone_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __le64 slba; + __le32 numd; + __u8 zra; + __u8 zrasf; + __u8 pr; + __u8 rsvd13; + __le32 cdw14[2]; +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; + __le16 cnssid; + __u8 rsvd11; + __u8 csi; + __u32 rsvd12[4]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + union nvme_data_ptr dptr; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 lsp; + __le16 numdl; + __le16 numdu; + __le16 lsi; + union { + struct { + __le32 lpol; + __le32 lpou; + }; + __le64 lpo; + }; + __u8 rsvd14[3]; + __u8 csi; + __u32 rsvd15; +}; + +struct nvme_directive_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 numd; + __u8 doper; + __u8 dtype; + __le16 dspec; + __u8 endir; + __u8 tdtype; + __u16 rsvd15; + __u32 rsvd16[3]; +}; + +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 127, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + +struct nvmf_auth_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al_tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_send_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_receive_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al; + __u8 resv4[16]; +}; + +struct nvme_dbbuf { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __u32 rsvd12[6]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_zone_mgmt_send_cmd zms; + struct nvme_zone_mgmt_recv_cmd zmr; + struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; + struct nvmf_auth_common_command auth_common; + struct nvmf_auth_send_command auth_send; + struct nvmf_auth_receive_command auth_receive; + struct nvme_dbbuf dbbuf; + struct nvme_directive_cmd directive; + }; +}; + +union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; +}; + +struct nvme_ctrl; + +struct nvme_request { + struct nvme_command *cmd; + long: 32; + union nvme_result result; + u8 genctr; + u8 retries; + u8 flags; + u16 status; + struct nvme_ctrl *ctrl; + long: 32; +}; + +enum nvme_ctrl_state { + NVME_CTRL_NEW = 0, + NVME_CTRL_LIVE = 1, + NVME_CTRL_RESETTING = 2, + NVME_CTRL_CONNECTING = 3, + NVME_CTRL_DELETING = 4, + NVME_CTRL_DELETING_NOIO = 5, + NVME_CTRL_DEAD = 6, +}; + +struct nvme_fault_inject {}; + +struct nvme_ctrl_ops; + +struct nvme_subsystem; + +struct nvmf_ctrl_options; + +struct nvme_ctrl { + bool comp_seen; + bool identified; + bool passthru_err_log_enabled; + enum nvme_ctrl_state state; + spinlock_t lock; + struct mutex scan_lock; + const struct nvme_ctrl_ops *ops; + struct request_queue *admin_q; + struct request_queue *connect_q; + struct request_queue *fabrics_q; + struct device *dev; + int instance; + int numa_node; + struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; + struct list_head namespaces; + struct mutex namespaces_lock; + struct srcu_struct srcu; + long: 32; + struct device ctrl_device; + struct device *device; + struct cdev cdev; + struct work_struct reset_work; + struct work_struct delete_work; + wait_queue_head_t state_wq; + struct nvme_subsystem *subsys; + struct list_head subsys_entry; + struct opal_dev *opal_dev; + u16 cntlid; + u16 mtfa; + u32 ctrl_config; + u32 queue_count; + u64 cap; + u32 max_hw_sectors; + u32 max_segments; + u32 max_integrity_segments; + u32 max_zeroes_sectors; + u16 crdt[3]; + u16 oncs; + u8 dmrl; + u32 dmrsl; + u16 oacs; + u16 sqsize; + u32 max_namespaces; + atomic_t abort_limit; + u8 vwc; + u32 vs; + u32 sgls; + u16 kas; + u8 npss; + u8 apsta; + u16 wctemp; + u16 cctemp; + u32 oaes; + u32 aen_result; + u32 ctratt; + unsigned int shutdown_timeout; + unsigned int kato; + bool subsystem; + long unsigned int quirks; + struct nvme_id_power_state psd[32]; + struct nvme_effects_log *effects; + struct xarray cels; + struct work_struct scan_work; + struct work_struct async_event_work; + struct delayed_work ka_work; + struct delayed_work failfast_work; + long: 32; + struct nvme_command ka_cmd; + long unsigned int ka_last_check_time; + struct work_struct fw_act_work; + long unsigned int events; + key_serial_t tls_pskid; + long: 32; + u64 ps_max_latency_us; + bool apst_enabled; + u16 hmmaxd; + u32 hmpre; + u32 hmmin; + u32 hmminds; + u32 ioccsz; + u32 iorcsz; + u16 icdoff; + u16 maxcmd; + int nr_reconnects; + long unsigned int flags; + struct nvmf_ctrl_options *opts; + struct page *discard_page; + long unsigned int discard_page_busy; + struct nvme_fault_inject fault_inject; + enum nvme_ctrl_type cntrltype; + enum nvme_dctype dctype; +}; + +enum { + NVME_REQ_CANCELLED = 1, + NVME_REQ_USERCMD = 2, + NVME_MPATH_IO_STATS = 4, + NVME_MPATH_CNT_ACTIVE = 8, +}; + +struct nvme_ctrl_ops { + const char *name; + struct module *module; + unsigned int flags; + const struct attribute_group **dev_attr_groups; + int (*reg_read32)(struct nvme_ctrl *, u32, u32 *); + int (*reg_write32)(struct nvme_ctrl *, u32, u32); + int (*reg_read64)(struct nvme_ctrl *, u32, u64 *); + void (*free_ctrl)(struct nvme_ctrl *); + void (*submit_async_event)(struct nvme_ctrl *); + int (*subsystem_reset)(struct nvme_ctrl *); + void (*delete_ctrl)(struct nvme_ctrl *); + void (*stop_ctrl)(struct nvme_ctrl *); + int (*get_address)(struct nvme_ctrl *, char *, int); + void (*print_device_info)(struct nvme_ctrl *); + bool (*supports_pci_p2pdma)(struct nvme_ctrl *); +}; + +struct nvme_subsystem { + int instance; + long: 32; + struct device dev; + struct kref ref; + struct list_head entry; + struct mutex lock; + struct list_head ctrls; + struct list_head nsheads; + char subnqn[223]; + char serial[20]; + char model[40]; + char firmware_rev[8]; + u8 cmic; + enum nvme_subsys_type subtype; + u16 vendor_id; + u16 awupf; + struct ida ns_ida; +}; + +struct nvme_ns_ids { + u8 eui64[8]; + u8 nguid[16]; + uuid_t uuid; + u8 csi; +}; + +struct nvme_ns_head { + struct list_head list; + struct srcu_struct srcu; + struct nvme_subsystem *subsys; + struct nvme_ns_ids ids; + u8 lba_shift; + u16 ms; + u16 pi_size; + u8 pi_type; + u8 guard_type; + struct list_head entry; + struct kref ref; + bool shared; + bool rotational; + bool passthru_err_log_enabled; + struct nvme_effects_log *effects; + long: 32; + u64 nuse; + unsigned int ns_id; + int instance; + long unsigned int features; + struct ratelimit_state rs_nuse; + struct cdev cdev; + long: 32; + struct device cdev_device; + struct gendisk *disk; + long: 32; +}; + +enum nvme_ns_features { + NVME_NS_EXT_LBAS = 1, + NVME_NS_METADATA_SUPPORTED = 2, + NVME_NS_DEAC = 4, +}; + +struct nvme_ns { + struct list_head list; + struct nvme_ctrl *ctrl; + struct request_queue *queue; + struct gendisk *disk; + struct list_head siblings; + struct kref kref; + struct nvme_ns_head *head; + long unsigned int flags; + struct cdev cdev; + long: 32; + struct device cdev_device; + struct nvme_fault_inject fault_inject; +}; + +enum { + NVME_IOCTL_VEC = 1, + NVME_IOCTL_PARTITION = 2, +}; + +struct nvme_uring_data { + __u64 metadata; + __u64 addr; + __u32 data_len; + __u32 metadata_len; + __u32 timeout_ms; + long: 32; +}; + +struct nvme_uring_cmd_pdu { + struct request *req; + struct bio *bio; + u64 result; + int status; + long: 32; +}; + +struct swmii_regs { + u16 bmsr; + u16 lpa; + u16 lpagb; + u16 estat; +}; + +enum { + SWMII_SPEED_10 = 0, + SWMII_SPEED_100 = 1, + SWMII_SPEED_1000 = 2, + SWMII_DUPLEX_HALF = 0, + SWMII_DUPLEX_FULL = 1, +}; + +struct dbc_regs { + __le32 capability; + __le32 doorbell; + __le32 ersts; + __le32 __reserved_0; + __le64 erstba; + __le64 erdp; + __le32 control; + __le32 status; + __le32 portsc; + __le32 __reserved_1; + __le64 dccp; + __le32 devinfo1; + __le32 devinfo2; +}; + +struct dbc_str_descs { + char string0[64]; + char manufacturer[64]; + char product[64]; + char serial[64]; +}; + +enum dbc_state { + DS_DISABLED = 0, + DS_INITIALIZED = 1, + DS_ENABLED = 2, + DS_CONNECTED = 3, + DS_CONFIGURED = 4, + DS_MAX = 5, +}; + +struct xhci_dbc; + +struct dbc_ep { + struct xhci_dbc *dbc; + struct list_head list_pending; + struct xhci_ring *ring; + unsigned int direction: 1; + unsigned int halted: 1; +}; + +struct dbc_driver; + +struct xhci_dbc { + spinlock_t lock; + struct device *dev; + struct xhci_hcd *xhci; + struct dbc_regs *regs; + struct xhci_ring *ring_evt; + struct xhci_ring *ring_in; + struct xhci_ring *ring_out; + struct xhci_erst erst; + struct xhci_container_ctx *ctx; + struct dbc_str_descs *string; + dma_addr_t string_dma; + size_t string_size; + u16 idVendor; + u16 idProduct; + u16 bcdDevice; + u8 bInterfaceProtocol; + enum dbc_state state; + struct delayed_work event_work; + unsigned int poll_interval; + unsigned int resume_required: 1; + struct dbc_ep eps[2]; + const struct dbc_driver *driver; + void *priv; +}; + +struct dbc_driver { + int (*configure)(struct xhci_dbc *); + void (*disconnect)(struct xhci_dbc *); +}; + +struct dbc_request { + void *buf; + unsigned int length; + dma_addr_t dma; + void (*complete)(struct xhci_dbc *, struct dbc_request *); + struct list_head list_pool; + int status; + unsigned int actual; + struct xhci_dbc *dbc; + struct list_head list_pending; + dma_addr_t trb_dma; + union xhci_trb *trb; + unsigned int direction: 1; +}; + +struct trace_event_raw_xhci_log_msg { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctx { + struct trace_entry ent; + int ctx_64; + unsigned int ctx_type; + dma_addr_t ctx_dma; + u8 *ctx_va; + unsigned int ctx_ep_num; + u32 __data_loc_ctx_data; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_trb { + struct trace_entry ent; + dma_addr_t dma; + u32 type; + u32 field0; + u32 field1; + u32 field2; + u32 field3; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_free_virt_dev { + struct trace_entry ent; + void *vdev; + long: 32; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int slot_id; + u16 current_mel; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_virt_dev { + struct trace_entry ent; + void *vdev; + long: 32; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int devnum; + int state; + int speed; + u8 portnum; + u8 level; + int slot_id; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_urb { + struct trace_entry ent; + u32 __data_loc_devname; + void *urb; + unsigned int pipe; + unsigned int stream; + int status; + unsigned int flags; + int num_mapped_sgs; + int num_sgs; + int length; + int actual; + int epnum; + int dir_in; + int type; + int slot_id; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_stream_ctx { + struct trace_entry ent; + unsigned int stream_id; + long: 32; + u64 stream_ring; + dma_addr_t ctx_array_dma; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_ep_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u64 deq; + u32 tx_info; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_xhci_log_slot_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u32 tt_info; + u32 state; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctrl_ctx { + struct trace_entry ent; + u32 drop; + u32 add; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ring { + struct trace_entry ent; + u32 type; + void *ring; + dma_addr_t enq; + dma_addr_t deq; + unsigned int num_segs; + unsigned int stream_id; + unsigned int cycle_state; + unsigned int bounce_buf_len; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_portsc { + struct trace_entry ent; + u32 busnum; + u32 portnum; + u32 portsc; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_doorbell { + struct trace_entry ent; + u32 slot; + u32 doorbell; + char __data[0]; +}; + +struct trace_event_raw_xhci_dbc_log_request { + struct trace_entry ent; + struct dbc_request *req; + bool dir; + unsigned int actual; + unsigned int length; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_xhci_log_msg { + u32 msg; + const void *msg_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_ctx { + u32 ctx_data; + const void *ctx_data_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_trb {}; + +struct trace_event_data_offsets_xhci_log_free_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_urb { + u32 devname; + const void *devname_ptr_; +}; + +struct trace_event_data_offsets_xhci_log_stream_ctx {}; + +struct trace_event_data_offsets_xhci_log_ep_ctx {}; + +struct trace_event_data_offsets_xhci_log_slot_ctx {}; + +struct trace_event_data_offsets_xhci_log_ctrl_ctx {}; + +struct trace_event_data_offsets_xhci_log_ring {}; + +struct trace_event_data_offsets_xhci_log_portsc {}; + +struct trace_event_data_offsets_xhci_log_doorbell {}; + +struct trace_event_data_offsets_xhci_dbc_log_request {}; + +typedef void (*btf_trace_xhci_dbg_address)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_context_change)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_quirks)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_reset_ep)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_cancel_urb)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_init)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_ring_expansion)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_address_ctx)(void *, struct xhci_hcd *, struct xhci_container_ctx *, unsigned int); + +typedef void (*btf_trace_xhci_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_command)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_queue_trb)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_dbc_gadget_ep_queue)(void *, struct xhci_ring *, struct xhci_generic_trb *, dma_addr_t); + +typedef void (*btf_trace_xhci_free_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_alloc_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_addressable_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_stop_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_urb_enqueue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_giveback)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_dequeue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_alloc_stream_info_ctx)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_stream)(void *, struct xhci_stream_info *, unsigned int); + +typedef void (*btf_trace_xhci_handle_cmd_stop_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_config_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_add_endpoint)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_alloc_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_free_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_disable_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_discover_or_reset_device)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_setup_device_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_addr_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_address_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_ring_alloc)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_free)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_expansion)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_enq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_deq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_handle_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_get_port_status)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_hub_status_data)(void *, struct xhci_port *, u32); + +typedef void (*btf_trace_xhci_ring_ep_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_ring_host_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_dbc_alloc_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_free_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_queue_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_giveback_request)(void *, struct dbc_request *); + +typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_cell_post_process_t)(void *, const char *, int, unsigned int, void *, size_t); + +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM = 1, + NVMEM_TYPE_OTP = 2, + NVMEM_TYPE_BATTERY_BACKED = 3, + NVMEM_TYPE_FRAM = 4, +}; + +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + size_t raw_len; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; + struct device_node *np; + nvmem_cell_post_process_t read_post_process; + void *priv; +}; + +struct nvmem_layout; + +struct nvmem_device { + struct module *owner; + long: 32; + struct device dev; + struct list_head node; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + struct nvmem_layout *layout; + void *priv; + bool sysfs_cells_populated; + long: 32; +}; + +struct nvmem_layout { + struct device dev; + struct nvmem_device *nvmem; + int (*add_cells)(struct nvmem_layout *); +}; + +struct nvmem_layout_driver { + struct device_driver driver; + int (*probe)(struct nvmem_layout *); + void (*remove)(struct nvmem_layout *); +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl: 8; + u32 mpls_bos: 1; + u32 mpls_tc: 3; + u32 mpls_label: 20; +}; + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +struct flow_dissector_key_enc_opts { + u8 data[255]; + u8 len; + u32 dst_opt_type; +}; + +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + +struct flow_dissector_key_ports_range { + union { + struct flow_dissector_key_ports tp; + struct { + struct flow_dissector_key_ports tp_min; + struct flow_dissector_key_ports tp_max; + }; + }; +}; + +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +struct flow_dissector_key_pppoe { + __be16 session_id; + __be16 ppp_proto; + __be16 type; +}; + +struct flow_dissector_key_l2tpv3 { + __be32 session_id; +}; + +struct flow_dissector_key_ipsec { + __be32 spi; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key; + struct flow_dissector_key_meta *mask; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key; + struct flow_dissector_key_basic *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key; + struct flow_dissector_key_control *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key; + struct flow_dissector_key_eth_addrs *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key; + struct flow_dissector_key_vlan *mask; +}; + +struct flow_match_arp { + struct flow_dissector_key_arp *key; + struct flow_dissector_key_arp *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key; + struct flow_dissector_key_ipv4_addrs *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key; + struct flow_dissector_key_ipv6_addrs *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key; + struct flow_dissector_key_ip *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key; + struct flow_dissector_key_ports *mask; +}; + +struct flow_match_ports_range { + struct flow_dissector_key_ports_range *key; + struct flow_dissector_key_ports_range *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key; + struct flow_dissector_key_icmp *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key; + struct flow_dissector_key_tcp *mask; +}; + +struct flow_match_ipsec { + struct flow_dissector_key_ipsec *key; + struct flow_dissector_key_ipsec *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key; + struct flow_dissector_key_mpls *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key; + struct flow_dissector_key_keyid *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key; + struct flow_dissector_key_enc_opts *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key; + struct flow_dissector_key_ct *mask; +}; + +struct flow_match_pppoe { + struct flow_dissector_key_pppoe *key; + struct flow_dissector_key_pppoe *mask; +}; + +struct flow_match_l2tpv3 { + struct flow_dissector_key_l2tpv3 *key; + struct flow_dissector_key_l2tpv3 *mask; +}; + +enum offload_act_command { + FLOW_ACT_REPLACE = 0, + FLOW_ACT_DESTROY = 1, + FLOW_ACT_STATS = 2, +}; + +struct flow_offload_action { + struct netlink_ext_ack *extack; + enum offload_act_command command; + enum flow_action_id id; + u32 index; + long unsigned int cookie; + long: 32; + struct flow_stats stats; + struct flow_action action; +}; + +typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); + +struct flow_indr_dev { + struct list_head list; + flow_indr_block_bind_cb_t *cb; + void *cb_priv; + refcount_t refcnt; +}; + +struct flow_indir_dev_info { + void *data; + struct net_device *dev; + struct Qdisc *sch; + enum tc_setup_type type; + void (*cleanup)(struct flow_block_cb *); + struct list_head list; + enum flow_block_command command; + enum flow_block_binder_type binder_type; + struct list_head *cb_list; +}; + +struct llc_pdu_sn { + u8 dsap; + u8 ssap; + u8 ctrl_1; + u8 ctrl_2; +}; + +struct llc_pdu_un { + u8 dsap; + u8 ssap; + u8 ctrl_1; +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[32]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +struct phy_req_info { + struct ethnl_req_info base; + struct phy_device_node *pdn; +}; + +struct ethnl_phy_dump_ctx { + struct phy_req_info *phy_req_info; + long unsigned int ifindex; + long unsigned int phy_index; +}; + +enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = 1, + NFT_RULE_COMPAT_F_INV = 2, + NFT_RULE_COMPAT_F_MASK = 2, +}; + +enum nft_rule_compat_attributes { + NFTA_RULE_COMPAT_UNSPEC = 0, + NFTA_RULE_COMPAT_PROTO = 1, + NFTA_RULE_COMPAT_FLAGS = 2, + __NFTA_RULE_COMPAT_MAX = 3, +}; + +enum nft_target_attributes { + NFTA_TARGET_UNSPEC = 0, + NFTA_TARGET_NAME = 1, + NFTA_TARGET_REV = 2, + NFTA_TARGET_INFO = 3, + __NFTA_TARGET_MAX = 4, +}; + +enum nft_match_attributes { + NFTA_MATCH_UNSPEC = 0, + NFTA_MATCH_NAME = 1, + NFTA_MATCH_REV = 2, + NFTA_MATCH_INFO = 3, + __NFTA_MATCH_MAX = 4, +}; + +enum { + NFNL_MSG_COMPAT_GET = 0, + NFNL_MSG_COMPAT_MAX = 1, +}; + +enum { + NFTA_COMPAT_UNSPEC = 0, + NFTA_COMPAT_NAME = 1, + NFTA_COMPAT_REV = 2, + NFTA_COMPAT_TYPE = 3, + __NFTA_COMPAT_MAX = 4, +}; + +struct ip6t_entry { + struct ip6t_ip6 ipv6; + unsigned int nfcache; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + long: 32; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct ebt_entry { + unsigned int bitmask; + unsigned int invflags; + __be16 ethproto; + char in[16]; + char logical_in[16]; + char out[16]; + char logical_out[16]; + unsigned char sourcemac[6]; + unsigned char sourcemsk[6]; + unsigned char destmac[6]; + unsigned char destmsk[6]; + union { + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + }; + struct { + unsigned int watchers_offset; + unsigned int target_offset; + unsigned int next_offset; + } offsets; + }; + unsigned char elems[0]; +}; + +struct arpt_devaddr_info { + char addr[16]; + char mask[16]; +}; + +struct arpt_arp { + struct in_addr src; + struct in_addr tgt; + struct in_addr smsk; + struct in_addr tmsk; + __u8 arhln; + __u8 arhln_mask; + struct arpt_devaddr_info src_devaddr; + struct arpt_devaddr_info tgt_devaddr; + __be16 arpop; + __be16 arpop_mask; + __be16 arhrd; + __be16 arhrd_mask; + __be16 arpro; + __be16 arpro_mask; + char iniface[16]; + char outiface[16]; + unsigned char iniface_mask[16]; + unsigned char outiface_mask[16]; + __u8 flags; + __u16 invflags; +}; + +struct arpt_entry { + struct arpt_arp arp; + __u16 target_offset; + __u16 next_offset; + unsigned int comefrom; + long: 32; + struct xt_counters counters; + unsigned char elems[0]; +}; + +struct nft_xt_match_priv { + void *info; +}; + +union nft_entry { + struct ipt_entry e4; + struct ip6t_entry e6; + struct ebt_entry ebt; + struct arpt_entry arp; +}; + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER = 0, + IP6_DEFRAG_CONNTRACK_IN = 1, + __IP6_DEFRAG_CONNTRACK_IN = 65536, + IP6_DEFRAG_CONNTRACK_OUT = 65537, + __IP6_DEFRAG_CONNTRACK_OUT = 131072, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, +}; + +enum idle_boot_override { + IDLE_NO_OVERRIDE = 0, + IDLE_POWERSAVE_OFF = 1, +}; + +enum { + NDD_UNARMED = 1, + NDD_LOCKED = 2, + NDD_SECURITY_OVERWRITE = 3, + NDD_WORK_PENDING = 4, + NDD_LABELING = 6, + NDD_INCOHERENT = 7, + NDD_REGISTER_SYNC = 8, + ND_IOCTL_MAX_BUFLEN = 4194304, + ND_CMD_MAX_ELEM = 5, + ND_CMD_MAX_ENVELOPE = 256, + ND_MAX_MAPPINGS = 32, + ND_REGION_PAGEMAP = 0, + ND_REGION_PERSIST_CACHE = 1, + ND_REGION_PERSIST_MEMCTRL = 2, + ND_REGION_ASYNC = 3, + ND_REGION_CXL = 4, + DPA_RESOURCE_ADJUSTED = 1, +}; + +enum { + __SD_BALANCE_NEWIDLE = 0, + __SD_BALANCE_EXEC = 1, + __SD_BALANCE_FORK = 2, + __SD_BALANCE_WAKE = 3, + __SD_WAKE_AFFINE = 4, + __SD_ASYM_CPUCAPACITY = 5, + __SD_ASYM_CPUCAPACITY_FULL = 6, + __SD_SHARE_CPUCAPACITY = 7, + __SD_CLUSTER = 8, + __SD_SHARE_LLC = 9, + __SD_SERIALIZE = 10, + __SD_ASYM_PACKING = 11, + __SD_PREFER_SIBLING = 12, + __SD_OVERLAP = 13, + __SD_NUMA = 14, + __SD_FLAG_CNT = 15, +}; + +struct sd_flag_debug { + unsigned int meta_flags; + char *name; +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = 1, + MEMBARRIER_FLAG_RSEQ = 2, +}; + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = 1, + MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, + MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, + MEMBARRIER_CMD_GET_REGISTRATIONS = 512, + MEMBARRIER_CMD_SHARED = 1, +}; + +enum membarrier_cmd_flag { + MEMBARRIER_CMD_FLAG_CPU = 1, +}; + +struct psi_window { + u64 size; + u64 start_time; + u64 start_value; + u64 prev_growth; +}; + +struct psi_trigger { + enum psi_states state; + long: 32; + u64 threshold; + struct list_head node; + struct psi_group *group; + wait_queue_head_t event_wait; + struct kernfs_open_file *of; + int event; + struct psi_window win; + u64 last_event_time; + bool pending_event; + enum psi_aggregators aggregator; +}; + +enum cpuacct_stat_index { + CPUACCT_STAT_USER = 0, + CPUACCT_STAT_SYSTEM = 1, + CPUACCT_STAT_NSTATS = 2, +}; + +struct cpuacct { + struct cgroup_subsys_state css; + u64 *cpuusage; + struct kernel_cpustat *cpustat; +}; + +enum dl_param { + DL_RUNTIME = 0, + DL_PERIOD = 1, +}; + +struct s_data { + struct sched_domain **sd; + struct root_domain *rd; +}; + +enum s_alloc { + sa_rootdomain = 0, + sa_sd = 1, + sa_sd_storage = 2, + sa_none = 3, +}; + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = 2, + HK_FLAG_MISC = 4, + HK_FLAG_SCHED = 8, + HK_FLAG_TICK = 16, + HK_FLAG_DOMAIN = 32, + HK_FLAG_WQ = 64, + HK_FLAG_MANAGED_IRQ = 128, + HK_FLAG_KTHREAD = 256, +}; + +struct housekeeping { + struct cpumask cpumasks[9]; + long unsigned int flags; +}; + +enum { + CSS_TASK_ITER_PROCS = 1, + CSS_TASK_ITER_THREADED = 2, + CSS_TASK_ITER_SKIPPED = 65536, +}; + +struct trace_event_raw_cgroup_root { + struct trace_entry ent; + int root; + u16 ss_mask; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_cgroup { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cgroup_migrate { + struct trace_entry ent; + int dst_root; + int dst_level; + u64 dst_id; + int pid; + u32 __data_loc_dst_path; + u32 __data_loc_comm; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_cgroup_event { + struct trace_entry ent; + int root; + int level; + u64 id; + u32 __data_loc_path; + int val; + char __data[0]; +}; + +struct trace_event_raw_cgroup_rstat { + struct trace_entry ent; + int root; + int level; + u64 id; + int cpu; + bool contended; + char __data[0]; +}; + +struct trace_event_data_offsets_cgroup_root { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cgroup { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_migrate { + u32 dst_path; + const void *dst_path_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_cgroup_event { + u32 path; + const void *path_ptr_; +}; + +struct trace_event_data_offsets_cgroup_rstat {}; + +typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_rstat_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_locked_fastpath)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock)(void *, struct cgroup *, int, bool); + +typedef void (*btf_trace_cgroup_rstat_cpu_unlock_fastpath)(void *, struct cgroup *, int, bool); + +enum cgroup_opt_features { + OPT_FEATURE_PRESSURE = 0, + OPT_FEATURE_COUNT = 1, +}; + +enum cgroup2_param { + Opt_nsdelegate = 0, + Opt_favordynmods___2 = 1, + Opt_memory_localevents = 2, + Opt_memory_recursiveprot = 3, + Opt_memory_hugetlb_accounting = 4, + Opt_pids_localevents = 5, + nr__cgroup2_params = 6, +}; + +enum { + BPF_F_SYSCTL_BASE_NAME = 1, +}; + +struct bpf_cgroup_dev_ctx { + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_sysctl { + __u32 write; + __u32 file_pos; +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + const struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + long: 32; + u64 tmp_reg; +}; + +struct bpf_sockopt_buf { + u8 data[32]; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + struct task_struct *current_task; + long: 32; + u64 tmp_reg; +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +struct bpf_prog_list { + struct hlist_node node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[2]; +}; + +typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_retval)(); + +typedef u64 (*btf_bpf_set_retval)(int); + +typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); + +typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); + +typedef u64 (*btf_bpf_get_netns_cookie_sockopt)(struct bpf_sockopt_kern *); + +enum { + FOLL_TOUCH = 65536, + FOLL_TRIED = 131072, + FOLL_REMOTE = 262144, + FOLL_PIN = 524288, + FOLL_FAST_ONLY = 1048576, + FOLL_UNLOCKABLE = 2097152, + FOLL_MADV_POPULATE = 4194304, +}; + +struct follow_page_context { + struct dev_pagemap *pgmap; + unsigned int page_mask; +}; + +struct pages_or_folios { + union { + struct page **pages; + struct folio **folios; + void **entries; + }; + bool has_folios; + long int nr_entries; +}; + +struct swap_iocb { + struct kiocb iocb; + struct bio_vec bvec[32]; + int pages; + int len; +}; + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); + +struct char_device_struct { + struct char_device_struct *next; + unsigned int major; + unsigned int baseminor; + int minorct; + char name[64]; + struct cdev *cdev; +}; + +struct prepend_buffer { + char *buf; + int len; +}; + +struct core_vma_metadata; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct file *file; + long unsigned int limit; + long unsigned int mm_flags; + int cpu; + long: 32; + loff_t written; + loff_t pos; + loff_t to_skip; + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; + long: 32; +}; + +struct elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + long unsigned int pr_flag; + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct core_vma_metadata { + long unsigned int start; + long unsigned int end; + long unsigned int flags; + long unsigned int dump_size; + long unsigned int pgoff; + struct file *file; +}; + +struct arch_elf_state {}; + +struct memelfnote { + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +struct elf_thread_core_info { + struct elf_thread_core_info *next; + struct task_struct *task; + struct elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info { + struct elf_thread_core_info *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +enum { + attr_noop = 0, + attr_delayed_allocation_blocks = 1, + attr_session_write_kbytes = 2, + attr_lifetime_write_kbytes = 3, + attr_reserved_clusters = 4, + attr_sra_exceeded_retry_limit = 5, + attr_inode_readahead = 6, + attr_trigger_test_error = 7, + attr_first_error_time = 8, + attr_last_error_time = 9, + attr_clusters_in_group = 10, + attr_mb_order = 11, + attr_feature = 12, + attr_pointer_pi = 13, + attr_pointer_ui = 14, + attr_pointer_ul = 15, + attr_pointer_u64 = 16, + attr_pointer_u8 = 17, + attr_pointer_string = 18, + attr_pointer_atomic = 19, + attr_journal_task = 20, +}; + +enum { + ptr_explicit = 0, + ptr_ext4_sb_info_offset = 1, + ptr_ext4_super_block_offset = 2, +}; + +struct ext4_attr { + struct attribute attr; + short int attr_id; + short int attr_ptr; + short unsigned int attr_size; + union { + int offset; + void *explicit_ptr; + } u; +}; + +struct autofs_packet_hdr { + int proto_version; + int type; +}; + +struct autofs_packet_missing { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +struct autofs_packet_expire { + struct autofs_packet_hdr hdr; + int len; + char name[256]; +}; + +enum autofs_notify { + NFY_NONE = 0, + NFY_MOUNT = 1, + NFY_EXPIRE = 2, +}; + +struct autofs_packet_expire_multi { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[256]; +}; + +union autofs_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_packet_missing missing; + struct autofs_packet_expire expire; + struct autofs_packet_expire_multi expire_multi; +}; + +struct autofs_v5_packet { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + __u32 dev; + __u64 ino; + __u32 uid; + __u32 gid; + __u32 pid; + __u32 tgid; + __u32 len; + char name[256]; + long: 32; +}; + +typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; + +typedef struct autofs_v5_packet autofs_packet_missing_direct_t; + +typedef struct autofs_v5_packet autofs_packet_expire_direct_t; + +union autofs_v5_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_v5_packet v5_packet; + autofs_packet_missing_indirect_t missing_indirect; + autofs_packet_expire_indirect_t expire_indirect; + autofs_packet_missing_direct_t missing_direct; + autofs_packet_expire_direct_t expire_direct; +}; + +struct btrfs_stripe_hash { + struct list_head hash_list; + spinlock_t lock; +}; + +struct btrfs_stripe_hash_table { + struct list_head stripe_cache; + spinlock_t cache_lock; + int cache_size; + struct btrfs_stripe_hash table[0]; +}; + +enum btrfs_rbio_ops { + BTRFS_RBIO_WRITE = 0, + BTRFS_RBIO_READ_REBUILD = 1, + BTRFS_RBIO_PARITY_SCRUB = 2, +}; + +struct sector_ptr; + +struct btrfs_raid_bio { + struct btrfs_io_context *bioc; + struct list_head hash_list; + struct list_head stripe_cache; + struct work_struct work; + struct bio_list bio_list; + spinlock_t bio_list_lock; + struct list_head plug_list; + long unsigned int flags; + enum btrfs_rbio_ops operation; + u16 nr_pages; + u16 nr_sectors; + u8 nr_data; + u8 real_stripes; + u8 stripe_npages; + u8 stripe_nsectors; + u8 scrubp; + int bio_list_bytes; + refcount_t refs; + atomic_t stripes_pending; + wait_queue_head_t io_wait; + long unsigned int dbitmap; + long unsigned int finish_pbitmap; + struct page **stripe_pages; + struct sector_ptr *bio_sectors; + struct sector_ptr *stripe_sectors; + void **finish_pointers; + long unsigned int *error_bitmap; + u8 *csum_buf; + long unsigned int *csum_bitmap; +}; + +struct sector_ptr { + struct page *page; + unsigned int pgoff: 24; + unsigned int uptodate: 8; +}; + +struct raid56_bio_trace_info { + u64 devid; + u32 offset; + u8 stripe_nr; +}; + +struct btrfs_plug_cb { + struct blk_plug_cb cb; + struct btrfs_fs_info *info; + struct list_head rbio_list; +}; + +typedef short int __kernel_ipc_pid_t; + +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + +struct shmid64_ds { + struct ipc64_perm shm_perm; + long unsigned int shm_atime_high; + long unsigned int shm_atime; + long unsigned int shm_dtime_high; + long unsigned int shm_dtime; + long unsigned int shm_ctime_high; + long unsigned int shm_ctime; + long unsigned int __unused4; + __kernel_size_t shm_segsz; + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + long unsigned int shm_nattch; + long unsigned int __unused5; + long unsigned int __unused6; + long: 32; +}; + +struct shminfo64 { + long unsigned int shmmax; + long unsigned int shmmin; + long unsigned int shmmni; + long unsigned int shmseg; + long unsigned int shmall; + long unsigned int __unused1; + long unsigned int __unused2; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; + +struct shmid_kernel { + struct kern_ipc_perm shm_perm; + struct file *shm_file; + long unsigned int shm_nattch; + long unsigned int shm_segsz; + long: 32; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct ucounts *mlock_ucounts; + struct task_struct *shm_creator; + struct list_head shm_clist; + struct ipc_namespace *ns; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *); + long: 32; + union { + struct { + char head[48]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + +enum pkcs7_actions { + ACT_pkcs7_check_content_type = 0, + ACT_pkcs7_extract_cert = 1, + ACT_pkcs7_note_OID = 2, + ACT_pkcs7_note_certificate_list = 3, + ACT_pkcs7_note_content = 4, + ACT_pkcs7_note_data = 5, + ACT_pkcs7_note_signed_info = 6, + ACT_pkcs7_note_signeddata_version = 7, + ACT_pkcs7_note_signerinfo_version = 8, + ACT_pkcs7_sig_note_authenticated_attr = 9, + ACT_pkcs7_sig_note_digest_algo = 10, + ACT_pkcs7_sig_note_issuer = 11, + ACT_pkcs7_sig_note_pkey_algo = 12, + ACT_pkcs7_sig_note_serial = 13, + ACT_pkcs7_sig_note_set_of_authattrs = 14, + ACT_pkcs7_sig_note_signature = 15, + ACT_pkcs7_sig_note_skid = 16, + NR__pkcs7_actions = 17, +}; + +enum prio_policy { + POLICY_NO_CHANGE = 0, + POLICY_PROMOTE_TO_RT = 1, + POLICY_RESTRICT_TO_BE = 2, + POLICY_ALL_TO_IDLE = 3, + POLICY_NONE_TO_RT = 4, +}; + +struct ioprio_blkcg { + struct blkcg_policy_data cpd; + enum prio_policy prio_policy; +}; + +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; + +struct io_open { + struct file *file; + int dfd; + u32 file_slot; + struct filename *filename; + struct open_how how; + long unsigned int nofile; + long: 32; +}; + +struct io_close { + struct file *file; + int fd; + u32 file_slot; +}; + +struct io_fixed_install { + struct file *file; + unsigned int o_flags; +}; + +enum io_uring_napi_op { + IO_URING_NAPI_REGISTER_OP = 0, + IO_URING_NAPI_STATIC_ADD_ID = 1, + IO_URING_NAPI_STATIC_DEL_ID = 2, +}; + +struct io_uring_napi { + __u32 busy_poll_to; + __u8 prefer_busy_poll; + __u8 opcode; + __u8 pad[2]; + __u32 op_param; + __u32 resv; +}; + +struct io_napi_entry { + unsigned int napi_id; + struct list_head list; + long unsigned int timeout; + struct hlist_node node; + struct callback_head rcu; +}; + +struct ZSTD_CDict_s { + const void *dictContent; + size_t dictContentSize; + ZSTD_dictContentType_e dictContentType; + U32 *entropyWorkspace; + ZSTD_cwksp workspace; + ZSTD_matchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_customMem customMem; + U32 dictID; + int compressionLevel; + ZSTD_paramSwitch_e useRowMatchFinder; +}; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_sequenceLength; + +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 pool_index_plus_1: 17; + u32 offset: 10; + u32 extra: 5; + }; +}; + +struct stack_record { + struct list_head hash_list; + u32 hash; + u32 size; + union handle_parts handle; + refcount_t count; + union { + long unsigned int entries[64]; + struct { + struct list_head free_list; + long unsigned int rcu_state; + }; + }; +}; + +typedef u32 depot_flags_t; + +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS = 0, + DEPOT_COUNTER_REFD_FREES = 1, + DEPOT_COUNTER_REFD_INUSE = 2, + DEPOT_COUNTER_FREELIST_SIZE = 3, + DEPOT_COUNTER_PERSIST_COUNT = 4, + DEPOT_COUNTER_PERSIST_BYTES = 5, + DEPOT_COUNTER_COUNT = 6, +}; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; + +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +struct clk_notifier_data { + struct clk *clk; + long unsigned int old_rate; + long unsigned int new_rate; +}; + +struct clk_parent_map; + +struct clk_core { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct module *owner; + struct device *dev; + struct hlist_node rpm_node; + struct device_node *of_node; + struct clk_core *parent; + struct clk_parent_map *parents; + u8 num_parents; + u8 new_parent_index; + long unsigned int rate; + long unsigned int req_rate; + long unsigned int new_rate; + struct clk_core *new_parent; + struct clk_core *new_child; + long unsigned int flags; + bool orphan; + bool rpm_enabled; + unsigned int enable_count; + unsigned int prepare_count; + unsigned int protect_count; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int accuracy; + int phase; + struct clk_duty duty; + struct hlist_head children; + struct hlist_node child_node; + struct hlist_head clks; + unsigned int notifier_count; + struct dentry *dentry; + struct hlist_node debug_node; + struct kref ref; +}; + +struct clk_onecell_data { + struct clk **clks; + unsigned int clk_num; +}; + +struct clk_hw_onecell_data { + unsigned int num; + struct clk_hw *hws[0]; +}; + +struct clk_parent_map { + const struct clk_hw *hw; + struct clk_core *core; + const char *fw_name; + const char *name; + int index; +}; + +struct trace_event_raw_clk { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_clk_rate { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int rate; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_range { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int min; + long unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_clk_parent { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + char __data[0]; +}; + +struct trace_event_raw_clk_phase { + struct trace_entry ent; + u32 __data_loc_name; + int phase; + char __data[0]; +}; + +struct trace_event_raw_clk_duty_cycle { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int num; + unsigned int den; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_request { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + long unsigned int min; + long unsigned int max; + long unsigned int prate; + char __data[0]; +}; + +struct trace_event_data_offsets_clk { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_range { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_parent { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +struct trace_event_data_offsets_clk_phase { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_duty_cycle { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_clk_rate_request { + u32 name; + const void *name_ptr_; + u32 pname; + const void *pname_ptr_; +}; + +typedef void (*btf_trace_clk_enable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_min_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_max_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_range)(void *, struct clk_core *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_rate_request_start)(void *, struct clk_rate_request *); + +typedef void (*btf_trace_clk_rate_request_done)(void *, struct clk_rate_request *); + +struct clk_notifier_devres { + struct clk *clk; + struct notifier_block *nb; +}; + +struct of_clk_provider { + struct list_head link; + struct device_node *node; + struct clk * (*get)(struct of_phandle_args *, void *); + struct clk_hw * (*get_hw)(struct of_phandle_args *, void *); + void *data; +}; + +struct clock_provider { + void (*clk_init_cb)(struct device_node *); + struct device_node *np; + struct list_head node; +}; + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); + +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 dlfeat; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __le16 noiob; + __u8 nvmcap[16]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __u8 rsvd74[18]; + __le32 anagrpid; + __u8 rsvd96[3]; + __u8 nsattr; + __le16 nvmsetid; + __le16 endgid; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[64]; + __u8 vs[3712]; +}; + +enum nvme_ctrl_flags { + NVME_CTRL_FAILFAST_EXPIRED = 0, + NVME_CTRL_ADMIN_Q_STOPPED = 1, + NVME_CTRL_STARTED_ONCE = 2, + NVME_CTRL_STOPPED = 3, + NVME_CTRL_SKIP_ID_CNS_CS = 4, + NVME_CTRL_DIRTY_CAPABILITY = 5, + NVME_CTRL_FROZEN = 6, +}; + +struct nvmf_host; + +struct nvmf_ctrl_options { + unsigned int mask; + int max_reconnects; + char *transport; + char *subsysnqn; + char *traddr; + char *trsvcid; + char *host_traddr; + char *host_iface; + size_t queue_size; + unsigned int nr_io_queues; + unsigned int reconnect_delay; + bool discovery_nqn; + bool duplicate_connect; + unsigned int kato; + struct nvmf_host *host; + char *dhchap_secret; + char *dhchap_ctrl_secret; + struct key *keyring; + struct key *tls_key; + bool tls; + bool disable_sqflow; + bool hdr_digest; + bool data_digest; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; + int tos; + int fast_io_fail_tmo; +}; + +struct nvmf_host { + struct kref ref; + struct list_head list; + char nqn[223]; + uuid_t id; +}; + +enum rtl_fw_opcode { + PHY_READ = 0, + PHY_DATA_OR = 1, + PHY_DATA_AND = 2, + PHY_BJMPN = 3, + PHY_MDIO_CHG = 4, + PHY_CLEAR_READCOUNT = 7, + PHY_WRITE = 8, + PHY_READCOUNT_EQ_SKIP = 9, + PHY_COMP_EQ_SKIPN = 10, + PHY_COMP_NEQ_SKIPN = 11, + PHY_WRITE_PREVIOUS = 12, + PHY_SKIPN = 13, + PHY_DELAY_MS = 14, +}; + +struct fw_info { + u32 magic; + char version[32]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __attribute__((packed)); + +struct usb_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; +}; + +struct find_interface_arg { + int minor; + struct device_driver *drv; +}; + +struct each_dev_arg { + void *data; + int (*fn)(struct usb_device *, void *); +}; + +struct class_info { + int class; + char *class_name; +}; + +enum serio_event_type { + SERIO_RESCAN_PORT = 0, + SERIO_RECONNECT_PORT = 1, + SERIO_RECONNECT_SUBTREE = 2, + SERIO_REGISTER_PORT = 3, + SERIO_ATTACH_DRIVER = 4, +}; + +struct serio_event { + enum serio_event_type type; + void *object; + struct module *owner; + struct list_head node; +}; + +struct power_supply_hwmon { + struct power_supply *psy; + long unsigned int *props; +}; + +struct hwmon_type_attr_list { + const u32 *attrs; + size_t n_attrs; +}; + +struct r5l_payload_header { + __le16 type; + __le16 flags; +}; + +enum r5l_payload_type { + R5LOG_PAYLOAD_DATA = 0, + R5LOG_PAYLOAD_PARITY = 1, + R5LOG_PAYLOAD_FLUSH = 2, +}; + +struct r5l_payload_data_parity { + struct r5l_payload_header header; + __le32 size; + __le64 location; + __le32 checksum[0]; +}; + +struct r5l_payload_flush { + struct r5l_payload_header header; + __le32 size; + __le64 flush_stripes[0]; +}; + +struct r5l_meta_block { + __le32 magic; + __le32 checksum; + __u8 version; + __u8 __zero_pading_1; + __le16 __zero_pading_2; + __le32 meta_size; + __le64 seq; + __le64 position; + struct r5l_payload_header payloads[0]; +}; + +struct r5l_io_unit { + struct r5l_log *log; + struct page *meta_page; + int meta_offset; + struct bio *current_bio; + atomic_t pending_stripe; + long: 32; + u64 seq; + sector_t log_start; + sector_t log_end; + struct list_head log_sibling; + struct list_head stripe_list; + int state; + bool need_split_bio; + struct bio *split_bio; + unsigned int has_flush: 1; + unsigned int has_fua: 1; + unsigned int has_null_flush: 1; + unsigned int has_flush_payload: 1; + unsigned int io_deferred: 1; + struct bio_list flush_barriers; +}; + +enum r5c_journal_mode { + R5C_JOURNAL_MODE_WRITE_THROUGH = 0, + R5C_JOURNAL_MODE_WRITE_BACK = 1, +}; + +struct r5l_log { + struct md_rdev *rdev; + u32 uuid_checksum; + sector_t device_size; + sector_t max_free_space; + sector_t last_checkpoint; + u64 last_cp_seq; + sector_t log_start; + u64 seq; + sector_t next_checkpoint; + struct mutex io_mutex; + struct r5l_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head running_ios; + struct list_head io_end_ios; + struct list_head flushing_ios; + struct list_head finished_ios; + long: 32; + struct bio flush_bio; + struct list_head no_mem_stripes; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + mempool_t meta_pool; + struct md_thread *reclaim_thread; + long unsigned int reclaim_target; + wait_queue_head_t iounit_wait; + struct list_head no_space_stripes; + spinlock_t no_space_stripes_lock; + bool need_cache_flush; + enum r5c_journal_mode r5c_journal_mode; + struct list_head stripe_in_journal_list; + spinlock_t stripe_in_journal_lock; + atomic_t stripe_in_journal_count; + struct work_struct deferred_io_work; + struct work_struct disable_writeback_work; + spinlock_t tree_lock; + struct xarray big_stripe_tree; + long: 32; +}; + +enum r5l_io_unit_state { + IO_UNIT_RUNNING = 0, + IO_UNIT_IO_START = 1, + IO_UNIT_IO_END = 2, + IO_UNIT_STRIPE_END = 3, +}; + +struct r5l_recovery_ctx { + struct page *meta_page; + long: 32; + sector_t meta_total_blocks; + sector_t pos; + u64 seq; + int data_parity_stripes; + int data_only_stripes; + struct list_head cached_list; + struct page *ra_pool[256]; + struct bio_vec ra_bvec[256]; + sector_t pool_offset; + int total_pages; + int valid_pages; +}; + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +struct unix_edge; + +struct scm_fp_list { + short int count; + short int count_unix; + short int max; + bool inflight; + bool dead; + struct list_head vertices; + struct unix_edge *edges; + struct user_struct *user; + struct file *fp[253]; +}; + +struct unix_sock; + +struct unix_edge { + struct unix_sock *predecessor; + struct unix_sock *successor; + struct list_head vertex_entry; + struct list_head stack_entry; +}; + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; +}; + +struct scm_timestamping { + struct __kernel_old_timespec ts[3]; +}; + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; +}; + +struct unix_vertex { + struct list_head edges; + struct list_head entry; + struct list_head scc_entry; + long unsigned int out_degree; + long unsigned int index; + long unsigned int scc_index; +}; + +struct scm_stat { + atomic_t nr_fds; + long unsigned int nr_unix_fds; +}; + +struct unix_address; + +struct unix_sock { + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock; + struct mutex bindlock; + struct sock *peer; + struct sock *listener; + struct unix_vertex *vertex; + spinlock_t lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; + struct sk_buff *oob_skb; +}; + +struct unix_address { + refcount_t refcnt; + int len; + struct sockaddr_un name[0]; +}; + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + short unsigned int overhead; + short int cell_align; + short unsigned int mpu; + __u32 rate; +}; + +struct tc_prio_qopt { + int bands; + __u8 priomap[16]; +}; + +struct skb_array { + struct ptr_ring ring; +}; + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u16 mpu; + u8 linklayer; + u8 shift; + long: 32; +}; + +struct psched_pktrate { + u64 rate_pkts_ps; + u32 mult; + u8 shift; +}; + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc **p_miniq; +}; + +struct pfifo_fast_priv { + struct skb_array q[3]; +}; + +enum ethtool_reset_flags { + ETH_RESET_MGMT = 1, + ETH_RESET_IRQ = 2, + ETH_RESET_DMA = 4, + ETH_RESET_FILTER = 8, + ETH_RESET_OFFLOAD = 16, + ETH_RESET_MAC = 32, + ETH_RESET_PHY = 64, + ETH_RESET_RAM = 128, + ETH_RESET_AP = 256, + ETH_RESET_DEDICATED = 65535, + ETH_RESET_ALL = 4294967295, +}; + +struct ethtool_cmis_cdb { + u8 cmis_rev; + u8 read_write_len_ext; + u16 max_completion_time; +}; + +enum ethtool_cmis_cdb_cmd_id { + ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 64, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 65, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 257, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 259, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_EPL = 260, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 263, + ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 265, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 266, +}; + +struct ethtool_cmis_cdb_request { + __be16 id; + union { + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + }; + struct { + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[120]; + } body; + }; + u8 *epl; +}; + +struct ethtool_cmis_cdb_cmd_args { + struct ethtool_cmis_cdb_request req; + u16 max_duration; + u8 read_write_len_ext; + u8 msleep_pre_rpl; + u8 rpl_exp_len; + u8 flags; + char *err_msg; +}; + +struct cmis_fw_update_fw_mng_features { + u8 start_cmd_payload_size; + u8 write_mechanism; + u16 max_duration_start; + u16 max_duration_write; + u16 max_duration_complete; +}; + +struct cmis_cdb_fw_mng_features_rpl { + u8 resv1; + u8 resv2; + u8 start_cmd_payload_size; + u8 resv3; + u8 read_write_len_ext; + u8 write_mechanism; + u8 resv4; + u8 resv5; + __be16 max_duration_start; + __be16 resv6; + __be16 max_duration_write; + __be16 max_duration_complete; + __be16 resv7; +}; + +enum cmis_cdb_fw_write_mechanism { + CMIS_CDB_FW_WRITE_MECHANISM_NONE = 0, + CMIS_CDB_FW_WRITE_MECHANISM_LPL = 1, + CMIS_CDB_FW_WRITE_MECHANISM_EPL = 16, + CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 17, +}; + +struct cmis_cdb_start_fw_download_pl_h { + __be32 image_size; + __be32 resv1; +}; + +struct cmis_cdb_start_fw_download_pl { + union { + struct { + __be32 image_size; + __be32 resv1; + }; + struct cmis_cdb_start_fw_download_pl_h head; + }; + u8 vendor_data[112]; +}; + +struct cmis_cdb_write_fw_block_lpl_pl { + __be32 block_address; + u8 fw_block[116]; +}; + +struct cmis_cdb_write_fw_block_epl_pl { + u8 fw_block[2048]; +}; + +enum { + CMIS_MODULE_LOW_PWR = 1, + CMIS_MODULE_READY = 3, +}; + +struct cmis_cdb_run_fw_image_pl { + u8 resv1; + u8 image_to_run; + u16 delay_to_reset; +}; + +enum nft_cmp_ops { + NFT_CMP_EQ = 0, + NFT_CMP_NEQ = 1, + NFT_CMP_LT = 2, + NFT_CMP_LTE = 3, + NFT_CMP_GT = 4, + NFT_CMP_GTE = 5, +}; + +enum nft_cmp_attributes { + NFTA_CMP_UNSPEC = 0, + NFTA_CMP_SREG = 1, + NFTA_CMP_OP = 2, + NFTA_CMP_DATA = 3, + __NFTA_CMP_MAX = 4, +}; + +enum nft_offload_reg_flags { + NFT_OFFLOAD_F_NETWORK2HOST = 1, +}; + +struct nft_cmp_expr { + struct nft_data data; + u8 sreg; + u8 len; + enum nft_cmp_ops op: 8; + long: 32; +}; + +union nft_cmp_offload_data { + u16 val16; + u32 val32; + u64 val64; +}; + +struct hop_jumbo_hdr { + u8 nexthdr; + u8 hdrlen; + u8 tlv_type; + u8 tlv_len; + __be32 jumbo_payload_len; +}; + +struct ida_bitmap { + long unsigned int bitmap[32]; +}; + +struct Hydra { + char Pad1[48]; + u_int CachePD; + u_int IDs; + u_int Feature_Control; + char Pad2[32708]; + char SCSI_DMA[256]; + char Pad3[768]; + char SCCA_Tx_DMA[256]; + char SCCA_Rx_DMA[256]; + char SCCB_Tx_DMA[256]; + char SCCB_Rx_DMA[256]; + char Pad4[30720]; + char SCSI[4096]; + char ADB[4096]; + char SCC_Legacy[4096]; + char SCC[4096]; + char Pad9[8192]; + char VIA[8192]; + char Pad10[163840]; + char OpenPIC[262144]; +}; + +enum { + REGION_INTERSECTS = 0, + REGION_DISJOINT = 1, + REGION_MIXED = 2, +}; + +enum { + MAX_IORES_LEVEL = 5, +}; + +struct region_devres { + struct resource *parent; + resource_size_t start; + resource_size_t n; +}; + +enum pci_p2pdma_map_type { + PCI_P2PDMA_MAP_UNKNOWN = 0, + PCI_P2PDMA_MAP_NOT_SUPPORTED = 1, + PCI_P2PDMA_MAP_BUS_ADDR = 2, + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE = 3, +}; + +struct pci_p2pdma_map_state { + struct dev_pagemap *pgmap; + int map; + u64 bus_off; +}; + +struct io_tlb_pool; + +struct rchan_percpu_buf_dispatcher { + struct rchan_buf *buf; + struct dentry *dentry; +}; + +struct bpf_timer { + __u64 __opaque[2]; +}; + +struct bpf_wq { + __u64 __opaque[2]; +}; + +struct bpf_list_head { + __u64 __opaque[2]; +}; + +struct bpf_list_node { + __u64 __opaque[3]; +}; + +struct bpf_rb_root { + __u64 __opaque[2]; +}; + +struct bpf_rb_node { + __u64 __opaque[4]; +}; + +struct bpf_refcount { + __u32 __opaque[1]; +}; + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +enum { + BPF_F_TIMER_ABS = 1, + BPF_F_TIMER_CPU_PIN = 2, +}; + +enum bpf_kfunc_flags { + BPF_F_PAD_ZEROS = 1, +}; + +struct bpf_rb_node_kern { + struct rb_node rb_node; + void *owner; +}; + +struct bpf_list_node_kern { + struct list_head list_head; + void *owner; + long: 32; +}; + +typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + +typedef u64 (*btf_bpf_get_smp_processor_id)(); + +typedef u64 (*btf_bpf_get_numa_node_id)(); + +typedef u64 (*btf_bpf_ktime_get_ns)(); + +typedef u64 (*btf_bpf_ktime_get_boot_ns)(); + +typedef u64 (*btf_bpf_ktime_get_coarse_ns)(); + +typedef u64 (*btf_bpf_ktime_get_tai_ns)(); + +typedef u64 (*btf_bpf_get_current_pid_tgid)(); + +typedef u64 (*btf_bpf_get_current_uid_gid)(); + +typedef u64 (*btf_bpf_get_current_comm)(char *, u32); + +typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_jiffies64)(); + +typedef u64 (*btf_bpf_get_current_cgroup_id)(); + +typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); + +typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, s64 *); + +typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, u64 *); + +typedef u64 (*btf_bpf_strncmp)(const char *, u32, const char *); + +typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); + +typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_copy_from_user_task)(void *, u32, const void *, struct task_struct *, u64); + +typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); + +typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); + +struct bpf_bprintf_buffers { + char bin_args[512]; + char buf[1024]; +}; + +typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); + +struct bpf_async_cb { + struct bpf_map *map; + struct bpf_prog *prog; + void *callback_fn; + void *value; + union { + struct callback_head rcu; + struct work_struct delete_work; + }; + u64 flags; +}; + +struct bpf_hrtimer { + struct bpf_async_cb cb; + struct hrtimer timer; + atomic_t cancelling; + long: 32; +}; + +struct bpf_work { + struct bpf_async_cb cb; + struct work_struct work; + struct work_struct delete_work; +}; + +struct bpf_async_kern { + union { + struct bpf_async_cb *cb; + struct bpf_hrtimer *timer; + struct bpf_work *work; + }; + struct bpf_spin_lock lock; +}; + +enum bpf_async_type { + BPF_ASYNC_TYPE_TIMER = 0, + BPF_ASYNC_TYPE_WQ = 1, +}; + +typedef u64 (*btf_bpf_timer_init)(struct bpf_async_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_timer_set_callback)(struct bpf_async_kern *, void *, struct bpf_prog_aux *); + +typedef u64 (*btf_bpf_timer_start)(struct bpf_async_kern *, u64, u64); + +typedef u64 (*btf_bpf_timer_cancel)(struct bpf_async_kern *); + +typedef u64 (*btf_bpf_kptr_xchg)(void *, void *); + +typedef u64 (*btf_bpf_dynptr_from_mem)(void *, u32, u64, struct bpf_dynptr_kern *); + +typedef u64 (*btf_bpf_dynptr_read)(void *, u32, const struct bpf_dynptr_kern *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_write)(const struct bpf_dynptr_kern *, u32, void *, u32, u64); + +typedef u64 (*btf_bpf_dynptr_data)(const struct bpf_dynptr_kern *, u32, u32); + +typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); + +struct bpf_throw_ctx { + struct bpf_prog_aux *aux; + long: 32; + u64 sp; + u64 bp; + int cnt; + long: 32; +}; + +struct bpf_iter_bits { + __u64 __opaque[2]; +}; + +struct bpf_iter_bits_kern { + union { + __u64 *bits; + __u64 bits_copy; + }; + int nr_bits; + int bit; +}; + +enum lruvec_flags { + LRUVEC_CGROUP_CONGESTED = 0, + LRUVEC_NODE_CONGESTED = 1, +}; + +enum pgdat_flags { + PGDAT_DIRTY = 0, + PGDAT_WRITEBACK = 1, + PGDAT_RECLAIM_LOCKED = 2, +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK = 0, + ZONE_RECLAIM_ACTIVE = 1, + ZONE_BELOW_HIGH = 2, +}; + +struct reclaim_stat { + unsigned int nr_dirty; + unsigned int nr_unqueued_dirty; + unsigned int nr_congested; + unsigned int nr_writeback; + unsigned int nr_immediate; + unsigned int nr_pageout; + unsigned int nr_activate[2]; + unsigned int nr_ref_keep; + unsigned int nr_unmap_fail; + unsigned int nr_lazyfree_fail; + unsigned int nr_demoted; +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + int generation; +}; + +struct trace_event_raw_mm_vmscan_kswapd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_kswapd_wake { + struct trace_entry ent; + int nid; + int zid; + int order; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_wakeup_kswapd { + struct trace_entry ent; + int nid; + int zid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { + struct trace_entry ent; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { + struct trace_entry ent; + long unsigned int nr_reclaimed; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_start { + struct trace_entry ent; + struct shrinker *shr; + void *shrink; + int nid; + long int nr_objects_to_shrink; + long unsigned int gfp_flags; + long unsigned int cache_items; + long long unsigned int delta; + long unsigned int total_scan; + int priority; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_end { + struct trace_entry ent; + struct shrinker *shr; + int nid; + void *shrink; + long int unused_scan; + long int new_scan; + int retval; + long int total_scan; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_isolate { + struct trace_entry ent; + int highest_zoneidx; + int order; + long unsigned int nr_requested; + long unsigned int nr_scanned; + long unsigned int nr_skipped; + long unsigned int nr_taken; + int lru; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_write_folio { + struct trace_entry ent; + long unsigned int pfn; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_reclaim_pages { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_inactive { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_active { + struct trace_entry ent; + int nid; + long unsigned int nr_taken; + long unsigned int nr_active; + long unsigned int nr_deactivated; + long unsigned int nr_referenced; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_node_reclaim_begin { + struct trace_entry ent; + int nid; + int order; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_throttled { + struct trace_entry ent; + int nid; + int usec_timeout; + int usec_delayed; + int reason; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; + +struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; + +struct trace_event_data_offsets_mm_shrink_slab_start {}; + +struct trace_event_data_offsets_mm_shrink_slab_end {}; + +struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; + +struct trace_event_data_offsets_mm_vmscan_write_folio {}; + +struct trace_event_data_offsets_mm_vmscan_reclaim_pages {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; + +struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; + +struct trace_event_data_offsets_mm_vmscan_throttled {}; + +typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); + +typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); + +typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int); + +typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_vmscan_write_folio)(void *, struct folio *); + +typedef void (*btf_trace_mm_vmscan_reclaim_pages)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_throttled)(void *, int, int, int, int); + +struct scan_control { + long unsigned int nr_to_reclaim; + nodemask_t *nodemask; + struct mem_cgroup *target_mem_cgroup; + long unsigned int anon_cost; + long unsigned int file_cost; + int *proactive_swappiness; + unsigned int may_deactivate: 2; + unsigned int force_deactivate: 1; + unsigned int skipped_deactivate: 1; + unsigned int may_writepage: 1; + unsigned int may_unmap: 1; + unsigned int may_swap: 1; + unsigned int no_cache_trim_mode: 1; + unsigned int cache_trim_mode_failed: 1; + unsigned int proactive: 1; + unsigned int memcg_low_reclaim: 1; + unsigned int memcg_low_skipped: 1; + unsigned int memcg_full_walk: 1; + unsigned int hibernation_mode: 1; + unsigned int compaction_ready: 1; + unsigned int cache_trim_mode: 1; + unsigned int file_is_tiny: 1; + unsigned int no_demotion: 1; + s8 order; + s8 priority; + s8 reclaim_idx; + gfp_t gfp_mask; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + struct { + unsigned int dirty; + unsigned int unqueued_dirty; + unsigned int congested; + unsigned int writeback; + unsigned int immediate; + unsigned int file_taken; + unsigned int taken; + } nr; + struct reclaim_state reclaim_state; +}; + +typedef enum { + PAGE_KEEP = 0, + PAGE_ACTIVATE = 1, + PAGE_SUCCESS = 2, + PAGE_CLEAN = 3, +} pageout_t; + +enum folio_references { + FOLIOREF_RECLAIM = 0, + FOLIOREF_RECLAIM_CLEAN = 1, + FOLIOREF_KEEP = 2, + FOLIOREF_ACTIVATE = 3, +}; + +enum scan_balance { + SCAN_EQUAL = 0, + SCAN_FRACT = 1, + SCAN_ANON = 2, + SCAN_FILE = 3, +}; + +struct proc_fs_opts { + int flag; + const char *str; +}; + +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + +enum procmap_query_flags { + PROCMAP_QUERY_VMA_READABLE = 1, + PROCMAP_QUERY_VMA_WRITABLE = 2, + PROCMAP_QUERY_VMA_EXECUTABLE = 4, + PROCMAP_QUERY_VMA_SHARED = 8, + PROCMAP_QUERY_COVERING_OR_NEXT_VMA = 16, + PROCMAP_QUERY_FILE_BACKED_VMA = 32, +}; + +struct procmap_query { + __u64 size; + __u64 query_flags; + __u64 query_addr; + __u64 vma_start; + __u64 vma_end; + __u64 vma_flags; + __u64 vma_page_size; + __u64 vma_offset; + __u64 inode; + __u32 dev_major; + __u32 dev_minor; + __u32 vma_name_size; + __u32 build_id_size; + __u64 vma_name_addr; + __u64 build_id_addr; +}; + +struct proc_maps_private { + struct inode *inode; + struct task_struct *task; + struct mm_struct *mm; + struct vma_iterator iter; +}; + +struct mem_size_stats { + long unsigned int resident; + long unsigned int shared_clean; + long unsigned int shared_dirty; + long unsigned int private_clean; + long unsigned int private_dirty; + long unsigned int referenced; + long unsigned int anonymous; + long unsigned int lazyfree; + long unsigned int anonymous_thp; + long unsigned int shmem_thp; + long unsigned int file_thp; + long unsigned int swap; + long unsigned int shared_hugetlb; + long unsigned int private_hugetlb; + long unsigned int ksm; + long: 32; + u64 pss; + u64 pss_anon; + u64 pss_file; + u64 pss_shmem; + u64 pss_dirty; + u64 pss_locked; + u64 swap_pss; +}; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON = 2, + CLEAR_REFS_MAPPED = 3, + CLEAR_REFS_SOFT_DIRTY = 4, + CLEAR_REFS_MM_HIWATER_RSS = 5, + CLEAR_REFS_LAST = 6, +}; + +struct clear_refs_private { + enum clear_refs_types type; +}; + +typedef struct { + u64 pme; +} pagemap_entry_t; + +struct pagemapread { + int pos; + int len; + pagemap_entry_t *buffer; + bool show_pfn; +}; + +struct pagemap_scan_private { + struct pm_scan_arg arg; + long unsigned int masks_of_interest; + long unsigned int cur_vma_category; + struct page_region *vec_buf; + long unsigned int vec_buf_len; + long unsigned int vec_buf_index; + long unsigned int found_pages; + struct page_region *vec_out; + long: 32; +}; + +enum jbd2_shrink_type { + JBD2_SHRINK_DESTROY = 0, + JBD2_SHRINK_BUSY_STOP = 1, + JBD2_SHRINK_BUSY_SKIP = 2, +}; + +struct getdents_callback___2 { + struct dir_context ctx; + char *name; + long: 32; + u64 ino; + int found; + int sequence; +}; + +struct trace_print_flags_u64 { + long long unsigned int mask; + const char *name; + long: 32; +}; + +struct btrfs_ioctl_vol_args { + __s64 fd; + char name[4088]; +}; + +struct btrfs_workqueue { + struct workqueue_struct *normal_wq; + struct btrfs_fs_info *fs_info; + struct list_head ordered_list; + spinlock_t list_lock; + atomic_t pending; + int limit_active; + int current_active; + int thresh; + unsigned int count; + spinlock_t thres_lock; +}; + +enum btrfs_flush_state { + FLUSH_DELAYED_ITEMS_NR = 1, + FLUSH_DELAYED_ITEMS = 2, + FLUSH_DELAYED_REFS_NR = 3, + FLUSH_DELAYED_REFS = 4, + FLUSH_DELALLOC = 5, + FLUSH_DELALLOC_WAIT = 6, + FLUSH_DELALLOC_FULL = 7, + ALLOC_CHUNK = 8, + ALLOC_CHUNK_FORCE = 9, + RUN_DELAYED_IPUTS = 10, + COMMIT_TRANS = 11, +}; + +enum { + __QGROUP_RESERVE_BIT = 0, + QGROUP_RESERVE = 1, + __QGROUP_RESERVE_SEQ = 0, + __QGROUP_RELEASE_BIT = 1, + QGROUP_RELEASE = 2, + __QGROUP_RELEASE_SEQ = 1, + __QGROUP_FREE_BIT = 2, + QGROUP_FREE = 4, + __QGROUP_FREE_SEQ = 2, +}; + +enum btrfs_extent_allocation_policy { + BTRFS_EXTENT_ALLOC_CLUSTERED = 0, + BTRFS_EXTENT_ALLOC_ZONED = 1, +}; + +struct find_free_extent_ctl { + u64 ram_bytes; + u64 num_bytes; + u64 min_alloc_size; + u64 empty_size; + u64 flags; + int delalloc; + long: 32; + u64 search_start; + u64 empty_cluster; + struct btrfs_free_cluster *last_ptr; + bool use_cluster; + bool have_caching_bg; + bool orig_have_caching_bg; + bool for_treelog; + bool for_data_reloc; + int index; + int loop; + bool retry_uncached; + int cached; + long: 32; + u64 max_extent_size; + u64 total_free_space; + u64 found_offset; + u64 hint_byte; + enum btrfs_extent_allocation_policy policy; + bool hinted; + enum btrfs_block_group_size_class size_class; + long: 32; +}; + +struct trace_event_raw_btrfs_transaction_commit { + struct trace_entry ent; + u8 fsid[16]; + u64 generation; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__inode { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 blocks; + u64 disk_i_size; + u64 generation; + u64 last_trans; + u64 logged_trans; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + u64 start; + u64 len; + u32 flags; + int refs; + char __data[0]; +}; + +struct trace_event_raw_btrfs_handle_em_exist { + struct trace_entry ent; + u8 fsid[16]; + u64 e_start; + u64 e_len; + u64 map_start; + u64 map_len; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_regular { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u64 num_bytes; + u64 ram_bytes; + u64 disk_bytenr; + u64 disk_num_bytes; + u64 extent_offset; + u8 extent_type; + u8 compression; + long: 32; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__file_extent_item_inline { + struct trace_entry ent; + u8 fsid[16]; + u64 root_obj; + u64 ino; + loff_t isize; + u64 disk_isize; + u8 extent_type; + u8 compression; + long: 32; + u64 extent_start; + u64 extent_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs__ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 file_offset; + u64 start; + u64 len; + u64 disk_len; + u64 bytes_left; + long unsigned int flags; + int compress_type; + int refs; + long: 32; + u64 root_objectid; + u64 truncated_len; + char __data[0]; +}; + +struct trace_event_raw_btrfs_finish_ordered_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 len; + bool uptodate; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs__writepage { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + long unsigned int index; + long int nr_to_write; + long int pages_skipped; + long: 32; + loff_t range_start; + loff_t range_end; + char for_kupdate; + char for_reclaim; + char range_cyclic; + long unsigned int writeback_index; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_writepage_end_io_hook { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 start; + u64 end; + int uptodate; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_file { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 parent; + int datasync; + long: 32; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sync_fs { + struct trace_entry ent; + u8 fsid[16]; + int wait; + char __data[0]; +}; + +struct trace_event_raw_btrfs_add_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 offset; + u64 size; + u64 flags; + u64 bytes_used; + u64 bytes_super; + int create; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_delayed_tree_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + long: 32; + u64 parent; + u64 ref_root; + int level; + int type; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_data_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + long: 32; + u64 parent; + u64 ref_root; + u64 owner; + u64 offset; + int type; + long: 32; + u64 seq; + char __data[0]; +}; + +struct trace_event_raw_btrfs_delayed_ref_head { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + int action; + int is_data; + char __data[0]; +}; + +struct trace_event_raw_btrfs__chunk { + struct trace_entry ent; + u8 fsid[16]; + int num_stripes; + long: 32; + u64 type; + int sub_stripes; + long: 32; + u64 offset; + u64 size; + u64 root_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_cow_block { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 buf_start; + int refs; + long: 32; + u64 cow_start; + int buf_level; + int cow_level; + char __data[0]; +}; + +struct trace_event_raw_btrfs_space_reservation { + struct trace_entry ent; + u8 fsid[16]; + u32 __data_loc_type; + long: 32; + u64 val; + u64 bytes; + int reserve; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_trigger_flush { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + int flush; + u32 __data_loc_reason; + char __data[0]; +}; + +struct trace_event_raw_btrfs_flush_space { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 num_bytes; + int state; + int ret; + bool for_preempt; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__reserved_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 len; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_search_loop { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + char __data[0]; +}; + +struct trace_event_raw_find_free_extent_have_block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 num_bytes; + u64 empty_size; + u64 flags; + u64 loop; + bool hinted; + long: 32; + u64 bg_start; + u64 bg_flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs__reserve_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + int bg_size_class; + long: 32; + u64 start; + u64 len; + u64 loop; + bool hinted; + int size_class; + char __data[0]; +}; + +struct trace_event_raw_btrfs_find_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 bytes; + u64 empty_size; + u64 min_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_failed_cluster_setup { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_setup_cluster { + struct trace_entry ent; + u8 fsid[16]; + u64 bg_objectid; + u64 flags; + u64 start; + u64 max_size; + u64 size; + int bitmap; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_alloc_extent_state { + struct trace_entry ent; + const struct extent_state *state; + long unsigned int mask; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_free_extent_state { + struct trace_entry ent; + const struct extent_state *state; + const void *ip; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work { + struct trace_entry ent; + u8 fsid[16]; + const void *work; + const void *wq; + const void *func; + const void *ordered_func; + const void *normal_work; + char __data[0]; +}; + +struct trace_event_raw_btrfs__work__done { + struct trace_entry ent; + u8 fsid[16]; + const void *wtag; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_btrfs_workqueue_done { + struct trace_entry ent; + u8 fsid[16]; + const void *wq; + char __data[0]; +}; + +struct trace_event_raw_btrfs__qgroup_rsv_data { + struct trace_entry ent; + u8 fsid[16]; + u64 rootid; + u64 ino; + u64 start; + u64 len; + u64 reserved; + int op; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_qgroup_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 num_bytes; + char __data[0]; +}; + +struct trace_event_raw_qgroup_num_dirty_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 num_dirty_extents; + char __data[0]; +}; + +struct trace_event_raw_btrfs_qgroup_account_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 transid; + u64 bytenr; + u64 num_bytes; + u64 nr_old_roots; + u64 nr_new_roots; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_counters { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 old_rfer; + u64 old_excl; + u64 cur_old_count; + u64 cur_new_count; + char __data[0]; +}; + +struct trace_event_raw_qgroup_update_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 qgid; + u64 cur_reserved; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_qgroup_meta_reserve { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_qgroup_meta_convert { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_qgroup_meta_free_all_pertrans { + struct trace_entry ent; + u8 fsid[16]; + u64 refroot; + s64 diff; + int type; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__prelim_ref { + struct trace_entry ent; + u8 fsid[16]; + u64 root_id; + u64 objectid; + u8 type; + long: 32; + u64 offset; + int level; + int old_count; + u64 parent; + u64 bytenr; + int mod_count; + long: 32; + u64 tree_size; + char __data[0]; +}; + +struct trace_event_raw_btrfs_inode_mod_outstanding_extents { + struct trace_entry ent; + u8 fsid[16]; + u64 root_objectid; + u64 ino; + int mod; + unsigned int outstanding; + char __data[0]; +}; + +struct trace_event_raw_btrfs__block_group { + struct trace_entry ent; + u8 fsid[16]; + u64 bytenr; + u64 len; + u64 used; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_btrfs_set_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_clear_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int clear_bits; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_convert_extent_bit { + struct trace_entry ent; + u8 fsid[16]; + unsigned int owner; + long: 32; + u64 ino; + u64 rootid; + u64 start; + u64 len; + unsigned int set_bits; + unsigned int clear_bits; + char __data[0]; +}; + +struct trace_event_raw_btrfs_dump_space_info { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 total_bytes; + u64 bytes_used; + u64 bytes_pinned; + u64 bytes_reserved; + u64 bytes_may_use; + u64 bytes_readonly; + u64 reclaim_size; + int clamp; + long: 32; + u64 global_reserved; + u64 trans_reserved; + u64 delayed_refs_reserved; + u64 delayed_reserved; + u64 free_chunk_space; + u64 delalloc_bytes; + u64 ordered_bytes; + char __data[0]; +}; + +struct trace_event_raw_btrfs_reserve_ticket { + struct trace_entry ent; + u8 fsid[16]; + u64 flags; + u64 bytes; + u64 start_ns; + int flush; + int error; + char __data[0]; +}; + +struct trace_event_raw_btrfs_sleep_tree_lock { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 start_ns; + u64 end_ns; + u64 diff_ns; + u64 owner; + int is_log_tree; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_locking_events { + struct trace_entry ent; + u8 fsid[16]; + u64 block; + u64 generation; + u64 owner; + int is_log_tree; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs__space_info_update { + struct trace_entry ent; + u8 fsid[16]; + u64 type; + u64 old; + s64 diff; + char __data[0]; +}; + +struct trace_event_raw_btrfs_raid56_bio { + struct trace_entry ent; + u8 fsid[16]; + u64 full_stripe; + u64 physical; + u64 devid; + u32 offset; + u32 len; + u8 opf; + u8 total_stripes; + u8 real_stripes; + u8 nr_data; + u8 stripe_nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_insert_one_raid_extent { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + int num_stripes; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_btrfs_raid_extent_delete { + struct trace_entry ent; + u8 fsid[16]; + u64 start; + u64 end; + u64 found_start; + u64 found_end; + char __data[0]; +}; + +struct trace_event_raw_btrfs_get_raid_extent_offset { + struct trace_entry ent; + u8 fsid[16]; + u64 logical; + u64 length; + u64 physical; + u64 devid; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_count { + struct trace_entry ent; + u8 fsid[16]; + long int nr; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_enter { + struct trace_entry ent; + u8 fsid[16]; + long int nr_to_scan; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_scan_exit { + struct trace_entry ent; + u8 fsid[16]; + long int nr_dropped; + long int nr; + u64 last_root_id; + u64 last_ino; + char __data[0]; +}; + +struct trace_event_raw_btrfs_extent_map_shrinker_remove_em { + struct trace_entry ent; + u8 fsid[16]; + u64 ino; + u64 root_id; + u64 start; + u64 len; + u32 flags; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_btrfs_transaction_commit {}; + +struct trace_event_data_offsets_btrfs__inode {}; + +struct trace_event_data_offsets_btrfs_get_extent {}; + +struct trace_event_data_offsets_btrfs_handle_em_exist {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_regular {}; + +struct trace_event_data_offsets_btrfs__file_extent_item_inline {}; + +struct trace_event_data_offsets_btrfs__ordered_extent {}; + +struct trace_event_data_offsets_btrfs_finish_ordered_extent {}; + +struct trace_event_data_offsets_btrfs__writepage {}; + +struct trace_event_data_offsets_btrfs_writepage_end_io_hook {}; + +struct trace_event_data_offsets_btrfs_sync_file {}; + +struct trace_event_data_offsets_btrfs_sync_fs {}; + +struct trace_event_data_offsets_btrfs_add_block_group {}; + +struct trace_event_data_offsets_btrfs_delayed_tree_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_data_ref {}; + +struct trace_event_data_offsets_btrfs_delayed_ref_head {}; + +struct trace_event_data_offsets_btrfs__chunk {}; + +struct trace_event_data_offsets_btrfs_cow_block {}; + +struct trace_event_data_offsets_btrfs_space_reservation { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_btrfs_trigger_flush { + u32 reason; + const void *reason_ptr_; +}; + +struct trace_event_data_offsets_btrfs_flush_space {}; + +struct trace_event_data_offsets_btrfs__reserved_extent {}; + +struct trace_event_data_offsets_find_free_extent {}; + +struct trace_event_data_offsets_find_free_extent_search_loop {}; + +struct trace_event_data_offsets_find_free_extent_have_block_group {}; + +struct trace_event_data_offsets_btrfs__reserve_extent {}; + +struct trace_event_data_offsets_btrfs_find_cluster {}; + +struct trace_event_data_offsets_btrfs_failed_cluster_setup {}; + +struct trace_event_data_offsets_btrfs_setup_cluster {}; + +struct trace_event_data_offsets_alloc_extent_state {}; + +struct trace_event_data_offsets_free_extent_state {}; + +struct trace_event_data_offsets_btrfs__work {}; + +struct trace_event_data_offsets_btrfs__work__done {}; + +struct trace_event_data_offsets_btrfs_workqueue { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_btrfs_workqueue_done {}; + +struct trace_event_data_offsets_btrfs__qgroup_rsv_data {}; + +struct trace_event_data_offsets_btrfs_qgroup_extent {}; + +struct trace_event_data_offsets_qgroup_num_dirty_extents {}; + +struct trace_event_data_offsets_btrfs_qgroup_account_extent {}; + +struct trace_event_data_offsets_qgroup_update_counters {}; + +struct trace_event_data_offsets_qgroup_update_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_reserve {}; + +struct trace_event_data_offsets_qgroup_meta_convert {}; + +struct trace_event_data_offsets_qgroup_meta_free_all_pertrans {}; + +struct trace_event_data_offsets_btrfs__prelim_ref {}; + +struct trace_event_data_offsets_btrfs_inode_mod_outstanding_extents {}; + +struct trace_event_data_offsets_btrfs__block_group {}; + +struct trace_event_data_offsets_btrfs_set_extent_bit {}; + +struct trace_event_data_offsets_btrfs_clear_extent_bit {}; + +struct trace_event_data_offsets_btrfs_convert_extent_bit {}; + +struct trace_event_data_offsets_btrfs_dump_space_info {}; + +struct trace_event_data_offsets_btrfs_reserve_ticket {}; + +struct trace_event_data_offsets_btrfs_sleep_tree_lock {}; + +struct trace_event_data_offsets_btrfs_locking_events {}; + +struct trace_event_data_offsets_btrfs__space_info_update {}; + +struct trace_event_data_offsets_btrfs_raid56_bio {}; + +struct trace_event_data_offsets_btrfs_insert_one_raid_extent {}; + +struct trace_event_data_offsets_btrfs_raid_extent_delete {}; + +struct trace_event_data_offsets_btrfs_get_raid_extent_offset {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_count {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_enter {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_scan_exit {}; + +struct trace_event_data_offsets_btrfs_extent_map_shrinker_remove_em {}; + +typedef void (*btf_trace_btrfs_transaction_commit)(void *, const struct btrfs_fs_info *); + +typedef void (*btf_trace_btrfs_inode_new)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_request)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_inode_evict)(void *, const struct inode *); + +typedef void (*btf_trace_btrfs_get_extent)(void *, const struct btrfs_root *, const struct btrfs_inode *, const struct extent_map *); + +typedef void (*btf_trace_btrfs_handle_em_exist)(void *, const struct btrfs_fs_info *, const struct extent_map *, const struct extent_map *, u64, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_regular)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, u64); + +typedef void (*btf_trace_btrfs_get_extent_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_truncate_show_fi_inline)(void *, const struct btrfs_inode *, const struct extent_buffer *, const struct btrfs_file_extent_item *, int, u64); + +typedef void (*btf_trace_btrfs_ordered_extent_add)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_remove)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_start)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_put)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first_range)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_for_logging)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_lookup_first)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_split)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_dec_test_pending)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_ordered_extent_mark_finished)(void *, const struct btrfs_inode *, const struct btrfs_ordered_extent *); + +typedef void (*btf_trace_btrfs_finish_ordered_extent)(void *, const struct btrfs_inode *, u64, u64, bool); + +typedef void (*btf_trace_extent_writepage)(void *, const struct folio *, const struct inode *, const struct writeback_control *); + +typedef void (*btf_trace_btrfs_writepage_end_io_hook)(void *, const struct btrfs_inode *, u64, u64, int); + +typedef void (*btf_trace_btrfs_sync_file)(void *, const struct file *, int); + +typedef void (*btf_trace_btrfs_sync_fs)(void *, const struct btrfs_fs_info *, int); + +typedef void (*btf_trace_btrfs_add_block_group)(void *, const struct btrfs_fs_info *, const struct btrfs_block_group *, int); + +typedef void (*btf_trace_add_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_tree_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_run_delayed_data_ref)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_node *); + +typedef void (*btf_trace_add_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_run_delayed_ref_head)(void *, const struct btrfs_fs_info *, const struct btrfs_delayed_ref_head *, int); + +typedef void (*btf_trace_btrfs_chunk_alloc)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_chunk_free)(void *, const struct btrfs_fs_info *, const struct btrfs_chunk_map *, u64, u64); + +typedef void (*btf_trace_btrfs_cow_block)(void *, const struct btrfs_root *, const struct extent_buffer *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_space_reservation)(void *, const struct btrfs_fs_info *, const char *, u64, u64, int); + +typedef void (*btf_trace_btrfs_trigger_flush)(void *, const struct btrfs_fs_info *, u64, u64, int, const char *); + +typedef void (*btf_trace_btrfs_flush_space)(void *, const struct btrfs_fs_info *, u64, u64, int, int, bool); + +typedef void (*btf_trace_btrfs_reserved_extent_alloc)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_reserved_extent_free)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_find_free_extent)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_search_loop)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_find_free_extent_have_block_group)(void *, const struct btrfs_root *, const struct find_free_extent_ctl *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reserve_extent)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_reserve_extent_cluster)(void *, const struct btrfs_block_group *, const struct find_free_extent_ctl *); + +typedef void (*btf_trace_btrfs_find_cluster)(void *, const struct btrfs_block_group *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_failed_cluster_setup)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_setup_cluster)(void *, const struct btrfs_block_group *, const struct btrfs_free_cluster *, u64, int); + +typedef void (*btf_trace_alloc_extent_state)(void *, const struct extent_state *, gfp_t, long unsigned int); + +typedef void (*btf_trace_free_extent_state)(void *, const struct extent_state *, long unsigned int); + +typedef void (*btf_trace_btrfs_work_queued)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_work_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_all_work_done)(void *, const struct btrfs_fs_info *, const void *); + +typedef void (*btf_trace_btrfs_ordered_sched)(void *, const struct btrfs_work *); + +typedef void (*btf_trace_btrfs_workqueue_alloc)(void *, const struct btrfs_workqueue *, const char *); + +typedef void (*btf_trace_btrfs_workqueue_destroy)(void *, const struct btrfs_workqueue *); + +typedef void (*btf_trace_btrfs_qgroup_reserve_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_release_data)(void *, const struct inode *, u64, u64, u64, int); + +typedef void (*btf_trace_btrfs_qgroup_account_extents)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_btrfs_qgroup_trace_extent)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup_extent_record *, u64); + +typedef void (*btf_trace_qgroup_num_dirty_extents)(void *, const struct btrfs_fs_info *, u64, u64); + +typedef void (*btf_trace_btrfs_qgroup_account_extent)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64, u64); + +typedef void (*btf_trace_qgroup_update_counters)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, u64, u64); + +typedef void (*btf_trace_qgroup_update_reserve)(void *, const struct btrfs_fs_info *, const struct btrfs_qgroup *, s64, int); + +typedef void (*btf_trace_qgroup_meta_reserve)(void *, const struct btrfs_root *, s64, int); + +typedef void (*btf_trace_qgroup_meta_convert)(void *, const struct btrfs_root *, s64); + +typedef void (*btf_trace_qgroup_meta_free_all_pertrans)(void *, struct btrfs_root *); + +typedef void (*btf_trace_btrfs_prelim_ref_merge)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_prelim_ref_insert)(void *, const struct btrfs_fs_info *, const struct prelim_ref *, const struct prelim_ref *, u64); + +typedef void (*btf_trace_btrfs_inode_mod_outstanding_extents)(void *, const struct btrfs_root *, u64, int, unsigned int); + +typedef void (*btf_trace_btrfs_remove_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_add_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_reclaim_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_skip_unused_block_group)(void *, const struct btrfs_block_group *); + +typedef void (*btf_trace_btrfs_set_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_clear_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int); + +typedef void (*btf_trace_btrfs_convert_extent_bit)(void *, const struct extent_io_tree *, u64, u64, unsigned int, unsigned int); + +typedef void (*btf_trace_btrfs_done_preemptive_reclaim)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_fail_all_tickets)(void *, struct btrfs_fs_info *, const struct btrfs_space_info *); + +typedef void (*btf_trace_btrfs_reserve_ticket)(void *, const struct btrfs_fs_info *, u64, u64, u64, int, int); + +typedef void (*btf_trace_btrfs_tree_read_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_lock)(void *, const struct extent_buffer *, u64); + +typedef void (*btf_trace_btrfs_tree_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_unlock_blocking)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_read)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_set_lock_blocking_write)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_try_tree_read_lock)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_btrfs_tree_read_lock_atomic)(void *, const struct extent_buffer *); + +typedef void (*btf_trace_update_bytes_may_use)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_pinned)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_update_bytes_zone_unusable)(void *, const struct btrfs_fs_info *, const struct btrfs_space_info *, u64, s64); + +typedef void (*btf_trace_raid56_read)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_raid56_write)(void *, const struct btrfs_raid_bio *, const struct bio *, const struct raid56_bio_trace_info *); + +typedef void (*btf_trace_btrfs_insert_one_raid_extent)(void *, const struct btrfs_fs_info *, u64, u64, int); + +typedef void (*btf_trace_btrfs_raid_extent_delete)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_get_raid_extent_offset)(void *, const struct btrfs_fs_info *, u64, u64, u64, u64); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_count)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_enter)(void *, const struct btrfs_fs_info *, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_scan_exit)(void *, const struct btrfs_fs_info *, long int, long int); + +typedef void (*btf_trace_btrfs_extent_map_shrinker_remove_em)(void *, const struct btrfs_inode *, const struct extent_map *); + +struct btrfs_fs_context { + char *subvol_name; + long: 32; + u64 subvol_objectid; + u64 max_inline; + u32 commit_interval; + u32 metadata_ratio; + u32 thread_pool_size; + long: 32; + long long unsigned int mount_opt; + long unsigned int compress_type: 4; + unsigned int compress_level; + refcount_t refs; + long: 32; +}; + +enum { + Opt_acl = 0, + Opt_clear_cache = 1, + Opt_commit_interval = 2, + Opt_compress = 3, + Opt_compress_force = 4, + Opt_compress_force_type = 5, + Opt_compress_type = 6, + Opt_degraded = 7, + Opt_device = 8, + Opt_fatal_errors = 9, + Opt_flushoncommit = 10, + Opt_max_inline = 11, + Opt_barrier = 12, + Opt_datacow = 13, + Opt_datasum = 14, + Opt_defrag = 15, + Opt_discard___2 = 16, + Opt_discard_mode = 17, + Opt_ratio = 18, + Opt_rescan_uuid_tree = 19, + Opt_skip_balance = 20, + Opt_space_cache = 21, + Opt_space_cache_version = 22, + Opt_ssd = 23, + Opt_ssd_spread = 24, + Opt_subvol = 25, + Opt_subvol_empty = 26, + Opt_subvolid = 27, + Opt_thread_pool = 28, + Opt_treelog = 29, + Opt_user_subvol_rm_allowed = 30, + Opt_norecovery = 31, + Opt_rescue = 32, + Opt_usebackuproot = 33, + Opt_nologreplay = 34, + Opt_enospc_debug = 35, + Opt_err___4 = 36, +}; + +enum { + Opt_fatal_errors_panic = 0, + Opt_fatal_errors_bug = 1, +}; + +enum { + Opt_discard_sync = 0, + Opt_discard_async = 1, +}; + +enum { + Opt_space_cache_v1 = 0, + Opt_space_cache_v2 = 1, +}; + +enum { + Opt_rescue_usebackuproot = 0, + Opt_rescue_nologreplay = 1, + Opt_rescue_ignorebadroots = 2, + Opt_rescue_ignoredatacsums = 3, + Opt_rescue_ignoremetacsums = 4, + Opt_rescue_ignoresuperflags = 5, + Opt_rescue_parameter_all = 6, +}; + +struct init_sequence { + int (*init_func)(); + void (*exit_func)(); +}; + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +struct asymmetric_key_ids { + void *id[3]; +}; + +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + int (*parse)(struct key_preparsed_payload *); +}; + +typedef guid_t efi_guid_t; + +struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; +}; + +typedef struct _gpt_header gpt_header; + +struct _gpt_entry_attributes { + u64 required_to_function: 1; + u64 reserved: 47; + u64 type_guid_specific: 16; +}; + +typedef struct _gpt_entry_attributes gpt_entry_attributes; + +struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + __le16 partition_name[36]; +}; + +typedef struct _gpt_entry gpt_entry; + +struct _gpt_mbr_record { + u8 boot_indicator; + u8 start_head; + u8 start_sector; + u8 start_track; + u8 os_type; + u8 end_head; + u8 end_sector; + u8 end_track; + __le32 starting_lba; + __le32 size_in_lba; +}; + +typedef struct _gpt_mbr_record gpt_mbr_record; + +struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __attribute__((packed)); + +typedef struct _legacy_mbr legacy_mbr; + +struct bd_holder_disk { + struct list_head list; + struct kobject *holder_dir; + int refcnt; +}; + +struct io_splice { + struct file *file_out; + long: 32; + loff_t off_out; + loff_t off_in; + u64 len; + int splice_fd_in; + unsigned int flags; + struct io_rsrc_node *rsrc_node; + long: 32; +}; + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[64]; + unsigned int buflen; + unsigned int outlen; +}; + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +struct pci_filp_private { + enum pci_mmap_state mmap_state; + int write_combine; +}; + +struct clk_gate { + struct clk_hw hw; + void *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +struct unipair { + short unsigned int unicode; + short unsigned int fontpos; +}; + +struct unimapdesc { + short unsigned int entry_ct; + struct unipair *entries; +}; + +struct vt_stat { + short unsigned int v_active; + short unsigned int v_signal; + short unsigned int v_state; +}; + +struct vt_sizes { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_scrollsize; +}; + +struct vt_consize { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_vlin; + short unsigned int v_clin; + short unsigned int v_vcol; + short unsigned int v_ccol; +}; + +struct vt_event { + unsigned int event; + unsigned int oldev; + unsigned int newev; + unsigned int pad[4]; +}; + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +struct builtin_fw { + char *name; + void *data; + long unsigned int size; +}; + +struct trace_event_raw_dma_fence { + struct trace_entry ent; + u32 __data_loc_driver; + u32 __data_loc_timeline; + unsigned int context; + unsigned int seqno; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_fence { + u32 driver; + const void *driver_ptr_; + u32 timeline; + const void *timeline_ptr_; +}; + +typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +union e1000_rx_desc_packet_split { + struct { + __le64 buffer_addr[4]; + } read; + struct { + struct { + __le32 mrq; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length0; + __le16 vlan; + } middle; + struct { + __le16 header_status; + __le16 length[3]; + } upper; + __le64 reserved; + } wb; +}; + +struct e1000_tx_desc { + __le64 buffer_addr; + union { + __le32 data; + struct { + __le16 length; + u8 cso; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; + u8 css; + __le16 special; + } fields; + } upper; +}; + +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; + u8 ipcso; + __le16 ipcse; + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; + u8 tucso; + __le16 tucse; + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; + u8 hdr_len; + __le16 mss; + } fields; + } tcp_seg_setup; +}; + +enum e1000_boards { + board_82571 = 0, + board_82572 = 1, + board_82573 = 2, + board_82574 = 3, + board_82583 = 4, + board_80003es2lan = 5, + board_ich8lan = 6, + board_ich9lan = 7, + board_ich10lan = 8, + board_pchlan = 9, + board_pch2lan = 10, + board_pch_lpt = 11, + board_pch_spt = 12, + board_pch_cnp = 13, + board_pch_tgp = 14, + board_pch_adp = 15, + board_pch_mtp = 16, +}; + +enum e1000_state_t___2 { + __E1000_TESTING = 0, + __E1000_RESETTING = 1, + __E1000_ACCESS_SHARED_RESOURCE = 2, + __E1000_DOWN = 3, +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255, +}; + +struct trace_event_raw_e1000e_trace_mac_register { + struct trace_entry ent; + uint32_t reg; + char __data[0]; +}; + +struct trace_event_data_offsets_e1000e_trace_mac_register {}; + +typedef void (*btf_trace_e1000e_trace_mac_register)(void *, uint32_t); + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +struct my_u0 { + __le64 a; + __le64 b; +}; + +struct my_u1 { + __le64 a; + __le64 b; + __le64 c; + __le64 d; +}; + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +struct pps_kinfo { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + long: 32; +}; + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +struct pps_bind_args { + int tsformat; + int edge; + int consumer; +}; + +struct amba_cs_uci_id { + unsigned int devarch; + unsigned int devarch_mask; + unsigned int devtype; + void *data; +}; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + struct device_dma_parameters dma_parms; + unsigned int periphid; + struct mutex periphid_lock; + unsigned int cid; + struct amba_cs_uci_id uci; + unsigned int irq[9]; + const char *driver_override; + long: 32; +}; + +enum { + TCA_STATS_UNSPEC = 0, + TCA_STATS_BASIC = 1, + TCA_STATS_RATE_EST = 2, + TCA_STATS_QUEUE = 3, + TCA_STATS_APP = 4, + TCA_STATS_RATE_EST64 = 5, + TCA_STATS_PAD = 6, + TCA_STATS_BASIC_HW = 7, + TCA_STATS_PKT64 = 8, + __TCA_STATS_MAX = 9, +}; + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; + long: 32; +}; + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +struct ethtool_cmis_cdb_rpl_hdr { + u8 rpl_len; + u8 rpl_chk_code; +}; + +struct ethtool_cmis_cdb_rpl { + struct ethtool_cmis_cdb_rpl_hdr hdr; + u8 payload[120]; +}; + +struct cmis_rev_rpl { + u8 rev; +}; + +struct cmis_cdb_advert_rpl { + u8 inst_supported; + u8 read_write_len_ext; + u8 resv1; + u8 resv2; +}; + +struct cmis_password_entry_pl { + __be32 password; +}; + +struct cmis_cdb_query_status_pl { + u16 response_delay; +}; + +struct cmis_cdb_query_status_rpl { + u8 length; + u8 status; +}; + +struct cmis_cdb_module_features_rpl { + u8 resv1[34]; + __be16 max_completion_time; +}; + +struct cmis_wait_for_cond_rpl { + u8 state; +}; + +enum { + IPT_TTL_EQ = 0, + IPT_TTL_NE = 1, + IPT_TTL_LT = 2, + IPT_TTL_GT = 3, +}; + +struct ipt_ttl_info { + __u8 mode; + __u8 ttl; +}; + +enum { + IP6T_HL_EQ = 0, + IP6T_HL_NE = 1, + IP6T_HL_LT = 2, + IP6T_HL_GT = 3, +}; + +struct ip6t_hl_info { + __u8 mode; + __u8 hop_limit; +}; + +struct tcp_plb_state { + u8 consec_cong_rounds: 5; + u8 unused: 3; + u32 pause_until; +}; + +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; + __u8 mld2q_resv2: 4; + __u8 mld2q_suppress: 1; + __u8 mld2q_qrv: 3; + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +struct igmp6_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct igmp6_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; + struct ifmcaddr6 *im; +}; + +enum { + MDB_RTR_TYPE_DISABLED = 0, + MDB_RTR_TYPE_TEMP_QUERY = 1, + MDB_RTR_TYPE_PERM = 2, + MDB_RTR_TYPE_TEMP = 3, +}; + +enum { + BR_FDB_LOCAL = 0, + BR_FDB_STATIC = 1, + BR_FDB_STICKY = 2, + BR_FDB_ADDED_BY_USER = 3, + BR_FDB_ADDED_BY_EXT_LEARN = 4, + BR_FDB_OFFLOADED = 5, + BR_FDB_NOTIFY = 6, + BR_FDB_NOTIFY_INACTIVE = 7, + BR_FDB_LOCKED = 8, + BR_FDB_DYNAMIC_LEARNED = 9, +}; + +struct br_frame_type { + __be16 type; + int (*frame_handler)(struct net_bridge_port *, struct sk_buff *); + struct hlist_node list; +}; + +typedef struct { + rwlock_t *lock; +} class_write_lock_irq_t; + +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + +struct ptrace_syscall_info { + __u8 op; + __u8 pad[3]; + __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + long: 32; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + long: 32; + } seccomp; + }; +}; + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + +enum { + AFFINITY = 0, + AFFINITY_LIST = 1, + EFFECTIVE = 2, + EFFECTIVE_LIST = 3, +}; + +struct latch_tree_root { + seqcount_latch_t seq; + struct rb_root tree[2]; +}; + +enum kernel_read_file_id { + READING_UNKNOWN = 0, + READING_FIRMWARE = 1, + READING_MODULE = 2, + READING_KEXEC_IMAGE = 3, + READING_KEXEC_INITRAMFS = 4, + READING_POLICY = 5, + READING_X509_CERTIFICATE = 6, + READING_MAX_ID = 7, +}; + +enum mod_license { + NOT_GPL_ONLY = 0, + GPL_ONLY = 1, +}; + +struct find_symbol_arg { + const char *name; + bool gplok; + bool warn; + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +enum fail_dup_mod_reason { + FAIL_DUP_MOD_BECOMING = 0, + FAIL_DUP_MOD_LOAD = 1, +}; + +struct mod_tree_root { + struct latch_tree_root root; + long unsigned int addr_min; + long unsigned int addr_max; + long unsigned int data_addr_min; + long unsigned int data_addr_max; +}; + +struct trace_event_raw_module_load { + struct trace_entry ent; + unsigned int taints; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_free { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_refcnt { + struct trace_entry ent; + long unsigned int ip; + int refcnt; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_request { + struct trace_entry ent; + long unsigned int ip; + bool wait; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_module_load { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_free { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_refcnt { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_module_request { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_module_load)(void *, struct module *); + +typedef void (*btf_trace_module_free)(void *, struct module *); + +typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int); + +struct symsearch { + const struct kernel_symbol *start; + const struct kernel_symbol *stop; + const s32 *crcs; + enum mod_license license; +}; + +struct mod_initfree { + struct llist_node node; + void *init_text; + void *init_data; + void *init_rodata; +}; + +struct idempotent { + const void *cookie; + struct hlist_node entry; + struct completion complete; + int ret; +}; + +struct trace_bprintk_fmt { + struct list_head list; + const char *fmt; +}; + +struct reuseport_array { + struct bpf_map map; + struct sock *ptrs[0]; +}; + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = -1, + RSEQ_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = 1, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, +}; + +struct rseq_cs { + __u32 version; + __u32 flags; + __u64 start_ip; + __u64 post_commit_offset; + __u64 abort_ip; +}; + +struct trace_event_raw_rseq_update { + struct trace_entry ent; + s32 cpu_id; + s32 node_id; + s32 mm_cid; + char __data[0]; +}; + +struct trace_event_raw_rseq_ip_fixup { + struct trace_entry ent; + long unsigned int regs_ip; + long unsigned int start_ip; + long unsigned int post_commit_offset; + long unsigned int abort_ip; + char __data[0]; +}; + +struct trace_event_data_offsets_rseq_update {}; + +struct trace_event_data_offsets_rseq_ip_fixup {}; + +typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); + +typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; + +struct fs_error_report { + int error; + struct inode *inode; + struct super_block *sb; +}; + +typedef struct fsnotify_mark_connector *fsnotify_connp_t; + +struct core_name { + char *corename; + int used; + int size; +}; + +struct mmp_struct { + __le32 mmp_magic; + __le32 mmp_seq; + __le64 mmp_time; + char mmp_nodename[64]; + char mmp_bdevname[32]; + __le16 mmp_check_interval; + __le16 mmp_pad1; + __le32 mmp_pad2[226]; + __le32 mmp_checksum; +}; + +struct btrfs_squota_delta { + u64 root; + u64 num_bytes; + u64 generation; + bool is_inc; + bool is_data; + long: 32; +}; + +enum btrfs_loop_type { + LOOP_CACHING_NOWAIT = 0, + LOOP_CACHING_WAIT = 1, + LOOP_UNSET_SIZE_CLASS = 2, + LOOP_ALLOC_CHUNK = 3, + LOOP_WRONG_SIZE_CLASS = 4, + LOOP_NO_EMPTY_SIZE = 5, +}; + +struct walk_control___2 { + u64 refs[8]; + u64 flags[8]; + struct btrfs_key update_progress; + struct btrfs_key drop_progress; + int drop_level; + int stage; + int level; + int shared_level; + int update_ref; + int keep_locks; + int reada_slot; + int reada_count; + int restarted; + int lookup_info; + long: 32; +}; + +struct msg_msgseg; + +struct msg_msg { + struct list_head m_list; + long int m_type; + size_t m_ts; + struct msg_msgseg *next; + void *security; +}; + +struct msg_msgseg { + struct msg_msgseg *next; +}; + +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct crypto_kpp { + unsigned int reqsize; + long: 32; + struct crypto_tfm base; +}; + +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); + int (*generate_public_key)(struct kpp_request *); + int (*compute_shared_secret)(struct kpp_request *); + unsigned int (*max_size)(struct crypto_kpp *); + int (*init)(struct crypto_kpp *); + void (*exit)(struct crypto_kpp *); + struct crypto_alg base; +}; + +struct kpp_instance { + void (*free)(struct kpp_instance *); + long: 32; + union { + struct { + char head[24]; + struct crypto_instance base; + } s; + struct kpp_alg alg; + }; +}; + +struct crypto_kpp_spawn { + struct crypto_spawn base; +}; + +struct bio_alloc_cache { + struct bio *free_list; + struct bio *free_list_irq; + unsigned int nr; + unsigned int nr_irq; +}; + +enum xen_domain_type { + XEN_NATIVE = 0, + XEN_PV_DOMAIN = 1, + XEN_HVM_DOMAIN = 2, +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; + +struct io_uring_rsrc_register { + __u32 nr; + __u32 flags; + __u64 resv2; + __u64 data; + __u64 tags; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __u64 data; + __u64 tags; + __u32 nr; + __u32 resv2; +}; + +enum { + IORING_REGISTER_SRC_REGISTERED = 1, + IORING_REGISTER_DST_REPLACE = 2, +}; + +struct io_uring_clone_buffers { + __u32 src_fd; + __u32 flags; + __u32 src_off; + __u32 dst_off; + __u32 nr; + __u32 pad[3]; +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + +struct io_imu_folio_data { + unsigned int nr_pages_head; + unsigned int nr_pages_mid; + unsigned int folio_shift; +}; + +struct io_rsrc_update { + struct file *file; + long: 32; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct wrapper { + cmp_func_t cmp; + swap_func_t swap; +}; + +struct rnd_state { + __u32 s1; + __u32 s2; + __u32 s3; + __u32 s4; +}; + +typedef unsigned int uInt; + +typedef unsigned char uch; + +typedef short unsigned int ush; + +typedef long unsigned int ulg; + +struct ct_data_s { + union { + ush freq; + ush code; + } fc; + union { + ush dad; + ush len; + } dl; +}; + +typedef struct ct_data_s ct_data; + +struct static_tree_desc_s { + const ct_data *static_tree; + const int *extra_bits; + int extra_base; + int elems; + int max_length; +}; + +typedef struct static_tree_desc_s static_tree_desc; + +struct tree_desc_s { + ct_data *dyn_tree; + int max_code; + static_tree_desc *stat_desc; +}; + +typedef ush Pos; + +typedef unsigned int IPos; + +struct deflate_state { + z_streamp strm; + int status; + Byte *pending_buf; + ulg pending_buf_size; + Byte *pending_out; + int pending; + int noheader; + Byte data_type; + Byte method; + int last_flush; + uInt w_size; + uInt w_bits; + uInt w_mask; + Byte *window; + ulg window_size; + Pos *prev; + Pos *head; + uInt ins_h; + uInt hash_size; + uInt hash_bits; + uInt hash_mask; + uInt hash_shift; + long int block_start; + uInt match_length; + IPos prev_match; + int match_available; + uInt strstart; + uInt match_start; + uInt lookahead; + uInt prev_length; + uInt max_chain_length; + uInt max_lazy_match; + int level; + int strategy; + uInt good_match; + int nice_match; + struct ct_data_s dyn_ltree[573]; + struct ct_data_s dyn_dtree[61]; + struct ct_data_s bl_tree[39]; + struct tree_desc_s l_desc; + struct tree_desc_s d_desc; + struct tree_desc_s bl_desc; + ush bl_count[16]; + int heap[573]; + int heap_len; + int heap_max; + uch depth[573]; + uch *l_buf; + uInt lit_bufsize; + uInt last_lit; + ush *d_buf; + ulg opt_len; + ulg static_len; + ulg compressed_len; + uInt matches; + int last_eob_len; + ush bi_buf; + int bi_valid; +}; + +typedef struct deflate_state deflate_state; + +typedef enum { + need_more = 0, + block_done = 1, + finish_started = 2, + finish_done = 3, +} block_state; + +typedef block_state (*compress_func)(deflate_state *, int); + +struct deflate_workspace { + deflate_state deflate_memory; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; +}; + +typedef struct deflate_workspace deflate_workspace; + +struct config_s { + ush good_length; + ush max_lazy; + ush nice_length; + ush max_chain; + compress_func func; +}; + +typedef struct config_s config; + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct virtio_blk_geometry { + __virtio16 cylinders; + __u8 heads; + __u8 sectors; +}; + +struct virtio_blk_zoned_characteristics { + __virtio32 zone_sectors; + __virtio32 max_open_zones; + __virtio32 max_active_zones; + __virtio32 max_append_sectors; + __virtio32 write_granularity; + __u8 model; + __u8 unused2[3]; +}; + +struct virtio_blk_config { + __virtio64 capacity; + __virtio32 size_max; + __virtio32 seg_max; + struct virtio_blk_geometry geometry; + __virtio32 blk_size; + __u8 physical_block_exp; + __u8 alignment_offset; + __virtio16 min_io_size; + __virtio32 opt_io_size; + __u8 wce; + __u8 unused; + __virtio16 num_queues; + __virtio32 max_discard_sectors; + __virtio32 max_discard_seg; + __virtio32 discard_sector_alignment; + __virtio32 max_write_zeroes_sectors; + __virtio32 max_write_zeroes_seg; + __u8 write_zeroes_may_unmap; + __u8 unused1[3]; + __virtio32 max_secure_erase_sectors; + __virtio32 max_secure_erase_seg; + __virtio32 secure_erase_sector_alignment; + struct virtio_blk_zoned_characteristics zoned; +}; + +struct virtio_blk_outhdr { + __virtio32 type; + __virtio32 ioprio; + __virtio64 sector; +}; + +struct virtio_blk_discard_write_zeroes { + __le64 sector; + __le32 num_sectors; + __le32 flags; +}; + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[16]; + long: 32; + long: 32; +}; + +struct virtio_blk { + struct mutex vdev_mutex; + struct virtio_device *vdev; + struct gendisk *disk; + struct blk_mq_tag_set tag_set; + struct work_struct config_work; + int index; + int num_vqs; + int io_queues[3]; + struct virtio_blk_vq *vqs; + unsigned int zone_sectors; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + union { + u8 status; + struct { + __virtio64 sector; + u8 status; + long: 32; + } zone_append; + } in_hdr; + size_t in_hdr_len; + struct sg_table sg_table; + struct scatterlist sg[0]; +}; + +enum { + NVME_REG_CAP = 0, + NVME_REG_VS = 8, + NVME_REG_INTMS = 12, + NVME_REG_INTMC = 16, + NVME_REG_CC = 20, + NVME_REG_CSTS = 28, + NVME_REG_NSSR = 32, + NVME_REG_AQA = 36, + NVME_REG_ASQ = 40, + NVME_REG_ACQ = 48, + NVME_REG_CMBLOC = 56, + NVME_REG_CMBSZ = 60, + NVME_REG_BPINFO = 64, + NVME_REG_BPRSEL = 68, + NVME_REG_BPMBL = 72, + NVME_REG_CMBMSC = 80, + NVME_REG_CRTO = 104, + NVME_REG_PMRCAP = 3584, + NVME_REG_PMRCTL = 3588, + NVME_REG_PMRSTS = 3592, + NVME_REG_PMREBS = 3596, + NVME_REG_PMRSWTP = 3600, + NVME_REG_DBS = 4096, +}; + +enum { + NVME_CMBSZ_SQS = 1, + NVME_CMBSZ_CQS = 2, + NVME_CMBSZ_LISTS = 4, + NVME_CMBSZ_RDS = 8, + NVME_CMBSZ_WDS = 16, + NVME_CMBSZ_SZ_SHIFT = 12, + NVME_CMBSZ_SZ_MASK = 1048575, + NVME_CMBSZ_SZU_SHIFT = 8, + NVME_CMBSZ_SZU_MASK = 15, +}; + +enum { + NVME_CC_ENABLE = 1, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 96, + NVME_CC_CSS_MASK = 112, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 2048, + NVME_CC_AMS_VS = 14336, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 16384, + NVME_CC_SHN_ABRUPT = 32768, + NVME_CC_SHN_MASK = 49152, + NVME_CC_IOSQES = 393216, + NVME_CC_IOCQES = 4194304, + NVME_CC_CRIME = 16777216, +}; + +enum { + NVME_CSTS_RDY = 1, + NVME_CSTS_CFS = 2, + NVME_CSTS_NSSRO = 16, + NVME_CSTS_PP = 32, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 4, + NVME_CSTS_SHST_CMPLT = 8, + NVME_CSTS_SHST_MASK = 12, +}; + +enum { + NVME_CMBMSC_CRE = 1, + NVME_CMBMSC_CMSE = 2, +}; + +enum { + NVME_SGL_FMT_DATA_DESC = 0, + NVME_SGL_FMT_SEG_DESC = 2, + NVME_SGL_FMT_LAST_SEG_DESC = 3, + NVME_KEY_SGL_FMT_DATA_DESC = 4, + NVME_TRANSPORT_SGL_DATA_DESC = 5, +}; + +enum { + NVME_CMD_FUSE_FIRST = 1, + NVME_CMD_FUSE_SECOND = 2, + NVME_CMD_SGL_METABUF = 64, + NVME_CMD_SGL_METASEG = 128, + NVME_CMD_SGL_ALL = 192, +}; + +enum { + NVME_HOST_MEM_ENABLE = 1, + NVME_HOST_MEM_RETURN = 2, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = 1, + NVME_CQ_IRQ_ENABLED = 2, + NVME_SQ_PRIO_URGENT = 0, + NVME_SQ_PRIO_HIGH = 2, + NVME_SQ_PRIO_MEDIUM = 4, + NVME_SQ_PRIO_LOW = 6, + NVME_FEAT_ARBITRATION = 1, + NVME_FEAT_POWER_MGMT = 2, + NVME_FEAT_LBA_RANGE = 3, + NVME_FEAT_TEMP_THRESH = 4, + NVME_FEAT_ERR_RECOVERY = 5, + NVME_FEAT_VOLATILE_WC = 6, + NVME_FEAT_NUM_QUEUES = 7, + NVME_FEAT_IRQ_COALESCE = 8, + NVME_FEAT_IRQ_CONFIG = 9, + NVME_FEAT_WRITE_ATOMIC = 10, + NVME_FEAT_ASYNC_EVENT = 11, + NVME_FEAT_AUTO_PST = 12, + NVME_FEAT_HOST_MEM_BUF = 13, + NVME_FEAT_TIMESTAMP = 14, + NVME_FEAT_KATO = 15, + NVME_FEAT_HCTM = 16, + NVME_FEAT_NOPSC = 17, + NVME_FEAT_RRL = 18, + NVME_FEAT_PLM_CONFIG = 19, + NVME_FEAT_PLM_WINDOW = 20, + NVME_FEAT_HOST_BEHAVIOR = 22, + NVME_FEAT_SANITIZE = 23, + NVME_FEAT_SW_PROGRESS = 128, + NVME_FEAT_HOST_ID = 129, + NVME_FEAT_RESV_MASK = 130, + NVME_FEAT_RESV_PERSIST = 131, + NVME_FEAT_WRITE_PROTECT = 132, + NVME_FEAT_VENDOR_START = 192, + NVME_FEAT_VENDOR_END = 255, + NVME_LOG_SUPPORTED = 0, + NVME_LOG_ERROR = 1, + NVME_LOG_SMART = 2, + NVME_LOG_FW_SLOT = 3, + NVME_LOG_CHANGED_NS = 4, + NVME_LOG_CMD_EFFECTS = 5, + NVME_LOG_DEVICE_SELF_TEST = 6, + NVME_LOG_TELEMETRY_HOST = 7, + NVME_LOG_TELEMETRY_CTRL = 8, + NVME_LOG_ENDURANCE_GROUP = 9, + NVME_LOG_ANA = 12, + NVME_LOG_FEATURES = 18, + NVME_LOG_RMI = 22, + NVME_LOG_DISC = 112, + NVME_LOG_RESERVATION = 128, + NVME_FWACT_REPL = 0, + NVME_FWACT_REPL_ACTV = 8, + NVME_FWACT_ACTV = 16, +}; + +struct nvme_host_mem_buf_desc { + __le64 addr; + __le32 size; + __u32 rsvd; +}; + +struct nvme_completion { + union nvme_result result; + __le16 sq_head; + __le16 sq_id; + __u16 command_id; + __le16 status; +}; + +enum nvme_quirks { + NVME_QUIRK_STRIPE_SIZE = 1, + NVME_QUIRK_IDENTIFY_CNS = 2, + NVME_QUIRK_DEALLOCATE_ZEROES = 4, + NVME_QUIRK_DELAY_BEFORE_CHK_RDY = 8, + NVME_QUIRK_NO_APST = 16, + NVME_QUIRK_NO_DEEPEST_PS = 32, + NVME_QUIRK_QDEPTH_ONE = 64, + NVME_QUIRK_MEDIUM_PRIO_SQ = 128, + NVME_QUIRK_IGNORE_DEV_SUBNQN = 256, + NVME_QUIRK_DISABLE_WRITE_ZEROES = 512, + NVME_QUIRK_SIMPLE_SUSPEND = 1024, + NVME_QUIRK_SINGLE_VECTOR = 2048, + NVME_QUIRK_128_BYTES_SQES = 4096, + NVME_QUIRK_SHARED_TAGS = 8192, + NVME_QUIRK_NO_TEMP_THRESH_CHANGE = 16384, + NVME_QUIRK_NO_NS_DESC_LIST = 32768, + NVME_QUIRK_DMA_ADDRESS_BITS_48 = 65536, + NVME_QUIRK_SKIP_CID_GEN = 131072, + NVME_QUIRK_BOGUS_NID = 262144, + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = 524288, + NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = 1048576, + NVME_QUIRK_BROKEN_MSI = 2097152, +}; + +struct nvme_queue; + +struct nvme_dev { + struct nvme_queue *queues; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; + u32 *dbs; + struct device *dev; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + unsigned int online_queues; + unsigned int max_qid; + unsigned int io_queues[3]; + unsigned int num_vecs; + u32 q_depth; + int io_sqes; + u32 db_stride; + void *bar; + long unsigned int bar_mapped_size; + struct mutex shutdown_lock; + bool subsystem; + u64 cmb_size; + bool cmb_use_sqes; + u32 cmbsz; + u32 cmbloc; + long: 32; + struct nvme_ctrl ctrl; + u32 last_ps; + bool hmb; + struct sg_table *hmb_sgt; + mempool_t *iod_mempool; + mempool_t *iod_meta_mempool; + __le32 *dbbuf_dbs; + dma_addr_t dbbuf_dbs_dma_addr; + __le32 *dbbuf_eis; + dma_addr_t dbbuf_eis_dma_addr; + long: 32; + u64 host_mem_size; + u32 nr_host_mem_descs; + u32 host_mem_descs_size; + dma_addr_t host_mem_descs_dma; + struct nvme_host_mem_buf_desc *host_mem_descs; + void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; +}; + +struct nvme_queue { + struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t cq_poll_lock; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 *q_db; + u32 q_depth; + u16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + long unsigned int flags; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; + struct completion delete_done; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +union nvme_descriptor { + struct nvme_sgl_desc *sg_list; + __le64 *prp_list; +}; + +struct nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + bool aborted; + s8 nr_allocations; + unsigned int dma_len; + dma_addr_t first_dma; + dma_addr_t meta_dma; + struct sg_table sgt; + struct sg_table meta_sgt; + union nvme_descriptor meta_list; + union nvme_descriptor list[5]; +}; + +struct ich8_hsfsts { + u16 flcdone: 1; + u16 flcerr: 1; + u16 dael: 1; + u16 berasesz: 2; + u16 flcinprog: 1; + u16 reserved1: 2; + u16 reserved2: 6; + u16 fldesvalid: 1; + u16 flockdn: 1; +}; + +union ich8_hws_flash_status { + struct ich8_hsfsts hsf_status; + u16 regval; +}; + +struct ich8_hsflctl { + u16 flcgo: 1; + u16 flcycle: 2; + u16 reserved: 5; + u16 fldbcount: 2; + u16 flockdn: 6; +}; + +union ich8_hws_flash_ctrl { + struct ich8_hsflctl hsf_ctrl; + u16 regval; +}; + +struct ich8_pr { + u32 base: 13; + u32 reserved1: 2; + u32 rpe: 1; + u32 limit: 13; + u32 reserved2: 2; + u32 wpe: 1; +}; + +union ich8_flash_protected_range { + struct ich8_pr range; + u32 regval; +}; + +struct xhci_regset { + char name[32]; + struct debugfs_regset32 regset; + size_t nregs; + struct list_head list; +}; + +struct xhci_file_map { + const char *name; + int (*show)(struct seq_file *, void *); +}; + +struct xhci_ep_priv { + char name[32]; + struct dentry *root; + struct xhci_stream_info *stream_info; + struct xhci_ring *show_ring; + unsigned int stream_id; +}; + +struct xhci_slot_priv { + char name[32]; + struct dentry *root; + struct xhci_ep_priv *eps[31]; + struct xhci_virt_device *dev; +}; + +struct thermal_hwmon_device { + char type[20]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; + struct thermal_hwmon_attr temp_crit; +}; + +enum { + NETNSA_NONE = 0, + NETNSA_NSID = 1, + NETNSA_PID = 2, + NETNSA_FD = 3, + NETNSA_TARGET_NSID = 4, + NETNSA_CURRENT_NSID = 5, + __NETNSA_MAX = 6, +}; + +struct pcpu_gen_cookie { + local_t nesting; + long: 32; + u64 last; +}; + +struct gen_cookie { + struct pcpu_gen_cookie *local; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic64_t forward_last; + atomic64_t reverse_last; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct net_fill_args { + u32 portid; + u32 seq; + int flags; + int cmd; + int nsid; + bool add_ref; + int ref_nsid; +}; + +struct rtnl_net_dump_cb { + struct net *tgt_net; + struct net *ref_net; + struct sk_buff *skb; + struct net_fill_args fillargs; + int idx; + int s_idx; +}; + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +struct percpu_free_defer { + struct callback_head rcu; + void *ptr; +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +struct plca_reply_data { + struct ethnl_reply_data base; + struct phy_plca_cfg plca_cfg; + struct phy_plca_status plca_st; +}; + +enum nft_range_ops { + NFT_RANGE_EQ = 0, + NFT_RANGE_NEQ = 1, +}; + +enum nft_range_attributes { + NFTA_RANGE_UNSPEC = 0, + NFTA_RANGE_SREG = 1, + NFTA_RANGE_OP = 2, + NFTA_RANGE_FROM_DATA = 3, + NFTA_RANGE_TO_DATA = 4, + __NFTA_RANGE_MAX = 5, +}; + +struct nft_range_expr { + struct nft_data data_from; + struct nft_data data_to; + u8 sreg; + u8 len; + enum nft_range_ops op: 8; + long: 32; +}; + +struct xt_log_info { + unsigned char level; + unsigned char logflags; + char prefix[30]; +}; + +enum tcp_fastopen_client_fail { + TFO_STATUS_UNSPEC = 0, + TFO_COOKIE_UNAVAILABLE = 1, + TFO_DATA_NOT_ACKED = 2, + TFO_SYN_RETRANSMITTED = 3, +}; + +enum br_boolopt_id { + BR_BOOLOPT_NO_LL_LEARN = 0, + BR_BOOLOPT_MCAST_VLAN_SNOOPING = 1, + BR_BOOLOPT_MST_ENABLE = 2, + BR_BOOLOPT_MAX = 3, +}; + +enum { + BR_GROUPFWD_STP = 1, + BR_GROUPFWD_MACPAUSE = 2, + BR_GROUPFWD_LACP = 4, +}; + +struct net_bridge_fdb_flush_desc { + long unsigned int flags; + long unsigned int flags_mask; + int port_ifindex; + u16 vlan_id; +}; + +enum format_type { + FORMAT_TYPE_NONE = 0, + FORMAT_TYPE_WIDTH = 1, + FORMAT_TYPE_PRECISION = 2, + FORMAT_TYPE_CHAR = 3, + FORMAT_TYPE_STR = 4, + FORMAT_TYPE_PTR = 5, + FORMAT_TYPE_PERCENT_CHAR = 6, + FORMAT_TYPE_INVALID = 7, + FORMAT_TYPE_LONG_LONG = 8, + FORMAT_TYPE_ULONG = 9, + FORMAT_TYPE_LONG = 10, + FORMAT_TYPE_UBYTE = 11, + FORMAT_TYPE_BYTE = 12, + FORMAT_TYPE_USHORT = 13, + FORMAT_TYPE_SHORT = 14, + FORMAT_TYPE_UINT = 15, + FORMAT_TYPE_INT = 16, + FORMAT_TYPE_SIZE_T = 17, + FORMAT_TYPE_PTRDIFF = 18, +}; + +struct printf_spec { + unsigned int type: 8; + int field_width: 24; + unsigned int flags: 8; + unsigned int base: 8; + int precision: 16; +}; + +struct page_flags_fields { + int width; + int shift; + int mask; + const struct printf_spec *spec; + const char *name; +}; + +struct cache_index_dir; + +struct cache_dir { + struct kobject *kobj; + struct cache_index_dir *index; +}; + +struct cache; + +struct cache_index_dir { + struct kobject kobj; + struct cache_index_dir *next; + struct cache *cache; +}; + +struct cache { + struct device_node *ofnode; + struct cpumask shared_cpu_map; + int type; + int level; + int group_id; + struct list_head list; + struct cache *next_local; +}; + +struct cache_type_info { + const char *name; + const char *size_prop; + const char *line_size_props[2]; + const char *nr_sets_prop; +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = 3, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = 7, +}; + +enum bp_type_idx { + TYPE_INST = 0, + TYPE_DATA = 1, + TYPE_MAX = 2, +}; + +struct dma_map_ops { + void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); + void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); + struct page * (*alloc_pages_op)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); + void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); + int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); + int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); + dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); + int (*dma_supported)(struct device *, u64); + u64 (*get_required_mask)(struct device *); + size_t (*max_mapping_size)(struct device *); + size_t (*opt_mapping_size)(); + long unsigned int (*get_merge_boundary)(struct device *); +}; + +struct of_bus; + +struct of_pci_range_parser { + struct device_node *node; + const struct of_bus *bus; + const __be32 *range; + const __be32 *end; + int na; + int ns; + int pna; + bool dma; +}; + +struct of_bus { + const char *name; + const char *addresses; + int (*match)(struct device_node *); + void (*count_cells)(struct device_node *, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int, int); + int (*translate)(__be32 *, u64, int); + int flag_cells; + unsigned int (*get_flags)(const __be32 *); +}; + +struct of_pci_range { + union { + u64 pci_addr; + u64 bus_addr; + }; + u64 cpu_addr; + u64 size; + u32 flags; + long: 32; +}; + +struct pci_intx_virq { + int virq; + struct kref kref; + struct list_head list_node; +}; + +struct trace_event_raw_cpuhp_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_multi_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_exit { + struct trace_entry ent; + unsigned int cpu; + int state; + int idx; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_cpuhp_enter {}; + +struct trace_event_data_offsets_cpuhp_multi_enter {}; + +struct trace_event_data_offsets_cpuhp_exit {}; + +typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); + +typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); + +typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); + +struct cpuhp_cpu_state { + enum cpuhp_state state; + enum cpuhp_state target; + enum cpuhp_state fail; + struct task_struct *thread; + bool should_run; + bool rollback; + bool single; + bool bringup; + struct hlist_node *node; + struct hlist_node *last; + enum cpuhp_state cb_state; + int result; + atomic_t ap_sync_state; + struct completion done_up; + struct completion done_down; +}; + +struct cpuhp_step { + const char *name; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } startup; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } teardown; + struct hlist_head list; + bool cant_stop; + bool multi_instance; +}; + +enum cpuhp_sync_state { + SYNC_STATE_DEAD = 0, + SYNC_STATE_KICKED = 1, + SYNC_STATE_SHOULD_DIE = 2, + SYNC_STATE_ALIVE = 3, + SYNC_STATE_SHOULD_ONLINE = 4, + SYNC_STATE_ONLINE = 5, +}; + +enum cpu_mitigations { + CPU_MITIGATIONS_OFF = 0, + CPU_MITIGATIONS_AUTO = 1, + CPU_MITIGATIONS_AUTO_NOSMT = 2, +}; + +enum rwsem_waiter_type { + RWSEM_WAITING_FOR_WRITE = 0, + RWSEM_WAITING_FOR_READ = 1, +}; + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + enum rwsem_waiter_type type; + long unsigned int timeout; + bool handoff_set; +}; + +enum rwsem_wake_type { + RWSEM_WAKE_ANY = 0, + RWSEM_WAKE_READERS = 1, + RWSEM_WAKE_READ_OWNED = 2, +}; + +enum owner_state { + OWNER_NULL = 1, + OWNER_WRITER = 2, + OWNER_READER = 4, + OWNER_NONSPINNABLE = 8, +}; + +typedef int suspend_state_t; + +struct trace_event_raw_timer_class { + struct trace_entry ent; + void *timer; + char __data[0]; +}; + +struct trace_event_raw_timer_start { + struct trace_entry ent; + void *timer; + void *function; + long unsigned int expires; + long unsigned int bucket_expiry; + long unsigned int now; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_timer_expire_entry { + struct trace_entry ent; + void *timer; + long unsigned int now; + void *function; + long unsigned int baseclk; + char __data[0]; +}; + +struct trace_event_raw_timer_base_idle { + struct trace_entry ent; + bool is_idle; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_init { + struct trace_entry ent; + void *hrtimer; + clockid_t clockid; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_start { + struct trace_entry ent; + void *hrtimer; + void *function; + s64 expires; + s64 softexpires; + enum hrtimer_mode mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_hrtimer_expire_entry { + struct trace_entry ent; + void *hrtimer; + long: 32; + s64 now; + void *function; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_hrtimer_class { + struct trace_entry ent; + void *hrtimer; + char __data[0]; +}; + +struct trace_event_raw_itimer_state { + struct trace_entry ent; + int which; + long: 32; + long long unsigned int expires; + long int value_sec; + long int value_nsec; + long int interval_sec; + long int interval_nsec; + char __data[0]; +}; + +struct trace_event_raw_itimer_expire { + struct trace_entry ent; + int which; + pid_t pid; + long long unsigned int now; + char __data[0]; +}; + +struct trace_event_data_offsets_timer_class {}; + +struct trace_event_data_offsets_timer_start {}; + +struct trace_event_data_offsets_timer_expire_entry {}; + +struct trace_event_data_offsets_timer_base_idle {}; + +struct trace_event_data_offsets_hrtimer_init {}; + +struct trace_event_data_offsets_hrtimer_start {}; + +struct trace_event_data_offsets_hrtimer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_class {}; + +struct trace_event_data_offsets_itimer_state {}; + +struct trace_event_data_offsets_itimer_expire {}; + +typedef void (*btf_trace_timer_init)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_base_idle)(void *, bool, unsigned int); + +typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); + +typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); + +typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); + +typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int); + +typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int); + +struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; + long unsigned int clk; + long unsigned int next_expiry; + unsigned int cpu; + bool next_expiry_recalc; + bool is_idle; + bool timers_pending; + long unsigned int pending_map[18]; + struct hlist_head vectors[576]; +}; + +typedef u32 kprobe_opcode_t; + +struct arch_specific_insn { + kprobe_opcode_t *insn; + int boostable; +}; + +struct kprobe; + +struct prev_kprobe { + struct kprobe *kp; + long unsigned int status; + long unsigned int saved_msr; +}; + +typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); + +typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); + +struct kprobe { + struct hlist_node hlist; + struct list_head list; + long unsigned int nmissed; + kprobe_opcode_t *addr; + const char *symbol_name; + unsigned int offset; + kprobe_pre_handler_t pre_handler; + kprobe_post_handler_t post_handler; + kprobe_opcode_t opcode; + struct arch_specific_insn ainsn; + u32 flags; +}; + +struct kprobe_ctlblk { + long unsigned int kprobe_status; + long unsigned int kprobe_saved_msr; + struct prev_kprobe prev_kprobe; +}; + +struct arch_optimized_insn { + kprobe_opcode_t copied_insn[1]; + kprobe_opcode_t *insn; +}; + +struct kretprobe_instance; + +typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); + +struct kretprobe_instance { + struct rethook_node node; + char data[0]; +}; + +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct rethook *rh; +}; + +struct kprobe_blacklist_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; +}; + +struct optimized_kprobe { + struct kprobe kp; + struct list_head list; + struct arch_optimized_insn optinsn; +}; + +struct kprobe_insn_page { + struct list_head list; + kprobe_opcode_t *insns; + struct kprobe_insn_cache *cache; + int nused; + int ngarbage; + char slot_used[0]; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, +}; + +enum bpf_stack_slot_type { + STACK_INVALID = 0, + STACK_SPILL = 1, + STACK_MISC = 2, + STACK_ZERO = 3, + STACK_DYNPTR = 4, + STACK_ITER = 5, +}; + +typedef u64 (*btf_bpf_cgrp_storage_get)(struct bpf_map *, struct cgroup *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_cgrp_storage_delete)(struct bpf_map *, struct cgroup *); + +enum pageblock_bits { + PB_migrate = 0, + PB_migrate_end = 2, + PB_migrate_skip = 3, + NR_PAGEBLOCK_BITS = 4, +}; + +enum meminit_context { + MEMINIT_EARLY = 0, + MEMINIT_HOTPLUG = 1, +}; + +enum mminit_level { + MMINIT_WARNING = 0, + MMINIT_VERIFY = 1, + MMINIT_TRACE = 2, +}; + +union swap_header { + struct { + char reserved[4086]; + char magic[10]; + } magic; + struct { + char bootbits[1024]; + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + +struct swap_extent { + struct rb_node rb_node; + long unsigned int start_page; + long unsigned int nr_pages; + long: 32; + sector_t start_block; +}; + +typedef int __kernel_daddr_t; + +struct ustat { + __kernel_daddr_t f_tfree; + long unsigned int f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +struct statfs { + __u32 f_type; + __u32 f_bsize; + __u32 f_blocks; + __u32 f_bfree; + __u32 f_bavail; + __u32 f_files; + __u32 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_flags; + __u32 f_spare[4]; +}; + +struct statfs64 { + __u32 f_type; + __u32 f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_flags; + __u32 f_spare[4]; + long: 32; +}; + +struct kernfs_global_locks { + struct mutex open_file_mutex[1024]; +}; + +struct kernfs_open_node { + struct callback_head callback_head; + atomic_t event; + wait_queue_head_t poll; + struct list_head files; + unsigned int nr_mmapped; + unsigned int nr_to_release; +}; + +struct dax_holder_operations { + int (*notify_failure)(struct dax_device *, u64, u64, int); +}; + +struct ext4_allocation_request { + struct inode *inode; + unsigned int len; + ext4_lblk_t logical; + ext4_lblk_t lleft; + ext4_lblk_t lright; + long: 32; + ext4_fsblk_t goal; + ext4_fsblk_t pleft; + ext4_fsblk_t pright; + unsigned int flags; + long: 32; +}; + +struct ext4_lazy_init { + long unsigned int li_state; + struct list_head li_request_list; + struct mutex li_list_mtx; +}; + +struct partial_cluster { + ext4_fsblk_t pclu; + ext4_lblk_t lblk; + enum { + initial = 0, + tofree = 1, + nofree = 2, + } state; +}; + +struct ext4_journal_cb_entry { + struct list_head jce_list; + void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); +}; + +struct ext4_prealloc_space { + union { + struct rb_node inode_node; + struct list_head lg_list; + } pa_node; + struct list_head pa_group_list; + union { + struct list_head pa_tmp_list; + struct callback_head pa_rcu; + } u; + spinlock_t pa_lock; + atomic_t pa_count; + unsigned int pa_deleted; + ext4_fsblk_t pa_pstart; + ext4_lblk_t pa_lstart; + ext4_grpblk_t pa_len; + ext4_grpblk_t pa_free; + short unsigned int pa_type; + union { + rwlock_t *inode_lock; + spinlock_t *lg_lock; + } pa_node_lock; + struct inode *pa_inode; +}; + +struct ext4_free_extent { + ext4_lblk_t fe_logical; + ext4_grpblk_t fe_start; + ext4_group_t fe_group; + ext4_grpblk_t fe_len; +}; + +struct ext4_allocation_context { + struct inode *ac_inode; + struct super_block *ac_sb; + struct ext4_free_extent ac_o_ex; + struct ext4_free_extent ac_g_ex; + struct ext4_free_extent ac_b_ex; + struct ext4_free_extent ac_f_ex; + ext4_grpblk_t ac_orig_goal_len; + __u32 ac_flags; + __u32 ac_groups_linear_remaining; + __u16 ac_groups_scanned; + __u16 ac_found; + __u16 ac_cX_found[5]; + __u16 ac_tail; + __u16 ac_buddy; + __u8 ac_status; + __u8 ac_criteria; + __u8 ac_2order; + __u8 ac_op; + struct folio *ac_bitmap_folio; + struct folio *ac_buddy_folio; + struct ext4_prealloc_space *ac_pa; + struct ext4_locality_group *ac_lg; +}; + +struct trace_event_raw_ext4_other_inode_update_time { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t orig_ino; + uid_t uid; + gid_t gid; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + uid_t uid; + gid_t gid; + __u64 blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_request_inode { + struct trace_entry ent; + dev_t dev; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_evict_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int nlink; + char __data[0]; +}; + +struct trace_event_raw_ext4_drop_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int drop; + char __data[0]; +}; + +struct trace_event_raw_ext4_nfs_commit_metadata { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_mark_inode_dirty { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_ext4_begin_ordered_truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t new_size; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__write_end { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int copied; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + long unsigned int writeback_index; + int sync_mode; + char for_kupdate; + char range_cyclic; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_da_write_pages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int first_page; + long int nr_to_write; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 lblk; + __u32 len; + __u32 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages_result { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + int pages_written; + long int pages_skipped; + long unsigned int writeback_index; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_ext4_invalidate_folio_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + size_t offset; + size_t length; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_blocks { + struct trace_entry ent; + dev_t dev; + long: 32; + __u64 blk; + __u64 count; + char __data[0]; +}; + +struct trace_event_raw_ext4__mb_new_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 pa_pstart; + __u64 pa_lstart; + __u32 pa_len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_mb_release_inode_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + __u32 count; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_mb_release_group_pa { + struct trace_entry ent; + dev_t dev; + long: 32; + __u64 pa_pstart; + __u32 pa_len; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_discard_preallocations { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_discard_preallocations { + struct trace_entry ent; + dev_t dev; + int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_allocate_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_free_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + long unsigned int count; + int flags; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_sync_file_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + int datasync; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_fs { + struct trace_entry ent; + dev_t dev; + int wait; + char __data[0]; +}; + +struct trace_event_raw_ext4_alloc_da_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int data_blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_alloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 goal_logical; + int goal_start; + __u32 goal_group; + int goal_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + __u16 found; + __u16 groups; + __u16 buddy; + __u16 flags; + __u16 tail; + __u8 cr; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_prealloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4__mballoc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_forget { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + int is_metadata; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_update_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int used_blocks; + int reserved_data_blocks; + int quota_claim; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int reserve_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_da_release_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int freed_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_read_block_bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + bool prefetch; + char __data[0]; +}; + +struct trace_event_raw_ext4__fallocate_mode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + int mode; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_fallocate_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int blocks; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + long: 32; + loff_t size; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4__truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + ext4_lblk_t i_lblk; + unsigned int i_len; + ext4_fsblk_t i_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int flags; + long: 32; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + unsigned int len; + unsigned int mflags; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_load_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_load_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_sb { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_inode { + struct trace_entry ent; + long unsigned int ino; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + int type; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_reserved { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4__trim { + struct trace_entry ent; + int dev_major; + int dev_minor; + __u32 group; + int start; + int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_handle_unwritten_extents { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + unsigned int allocated; + ext4_fsblk_t newblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + ext4_lblk_t lblk; + long: 32; + ext4_fsblk_t pblk; + unsigned int len; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_show_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + short unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_remove_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t from; + ext4_lblk_t to; + ext4_fsblk_t ee_pblk; + ext4_lblk_t ee_lblk; + short unsigned int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_leaf { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t ee_lblk; + ext4_fsblk_t ee_pblk; + short int ee_len; + long: 32; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_idx { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space_done { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + long: 32; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + short unsigned int eh_entries; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4__es_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_es_remove_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t lblk; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_es_lookup_extent_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + int found; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_shrink_enter { + struct trace_entry ent; + dev_t dev; + int nr_to_scan; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_collapse_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_insert_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + long long unsigned int scan_time; + int nr_skipped; + int retried; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_insert_delayed_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + bool lclu_allocated; + bool end_allocated; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_ext4_fsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u32 agno; + long: 32; + u64 bno; + u64 len; + u64 owner; + char __data[0]; +}; + +struct trace_event_raw_ext4_getfsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u64 block; + u64 len; + u64 owner; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_shutdown { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_error { + struct trace_entry ent; + dev_t dev; + const char *function; + unsigned int line; + char __data[0]; +}; + +struct trace_event_raw_ext4_prefetch_bitmaps { + struct trace_entry ent; + dev_t dev; + __u32 group; + __u32 next; + __u32 ios; + char __data[0]; +}; + +struct trace_event_raw_ext4_lazy_itable_init { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay_scan { + struct trace_entry ent; + dev_t dev; + int error; + int off; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay { + struct trace_entry ent; + dev_t dev; + int tag; + int ino; + int priv1; + int priv2; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_start { + struct trace_entry ent; + dev_t dev; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_stop { + struct trace_entry ent; + dev_t dev; + int nblks; + int reason; + int num_fc; + int num_fc_ineligible; + int nblks_agg; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_stats { + struct trace_entry ent; + dev_t dev; + unsigned int fc_ineligible_rc[10]; + long unsigned int fc_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_numblks; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_dentry { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_inode { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_range { + struct trace_entry ent; + dev_t dev; + tid_t t_tid; + ino_t i_ino; + tid_t i_sync_tid; + long int start; + long int end; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_cleanup { + struct trace_entry ent; + dev_t dev; + int j_fc_off; + int full; + tid_t tid; + char __data[0]; +}; + +struct trace_event_raw_ext4_update_sb { + struct trace_entry ent; + dev_t dev; + long: 32; + ext4_fsblk_t fsblk; + unsigned int flags; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_ext4_other_inode_update_time {}; + +struct trace_event_data_offsets_ext4_free_inode {}; + +struct trace_event_data_offsets_ext4_request_inode {}; + +struct trace_event_data_offsets_ext4_allocate_inode {}; + +struct trace_event_data_offsets_ext4_evict_inode {}; + +struct trace_event_data_offsets_ext4_drop_inode {}; + +struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; + +struct trace_event_data_offsets_ext4_mark_inode_dirty {}; + +struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; + +struct trace_event_data_offsets_ext4__write_begin {}; + +struct trace_event_data_offsets_ext4__write_end {}; + +struct trace_event_data_offsets_ext4_writepages {}; + +struct trace_event_data_offsets_ext4_da_write_pages {}; + +struct trace_event_data_offsets_ext4_da_write_pages_extent {}; + +struct trace_event_data_offsets_ext4_writepages_result {}; + +struct trace_event_data_offsets_ext4__folio_op {}; + +struct trace_event_data_offsets_ext4_invalidate_folio_op {}; + +struct trace_event_data_offsets_ext4_discard_blocks {}; + +struct trace_event_data_offsets_ext4__mb_new_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_group_pa {}; + +struct trace_event_data_offsets_ext4_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_request_blocks {}; + +struct trace_event_data_offsets_ext4_allocate_blocks {}; + +struct trace_event_data_offsets_ext4_free_blocks {}; + +struct trace_event_data_offsets_ext4_sync_file_enter {}; + +struct trace_event_data_offsets_ext4_sync_file_exit {}; + +struct trace_event_data_offsets_ext4_sync_fs {}; + +struct trace_event_data_offsets_ext4_alloc_da_blocks {}; + +struct trace_event_data_offsets_ext4_mballoc_alloc {}; + +struct trace_event_data_offsets_ext4_mballoc_prealloc {}; + +struct trace_event_data_offsets_ext4__mballoc {}; + +struct trace_event_data_offsets_ext4_forget {}; + +struct trace_event_data_offsets_ext4_da_update_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_release_space {}; + +struct trace_event_data_offsets_ext4__bitmap_load {}; + +struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; + +struct trace_event_data_offsets_ext4__fallocate_mode {}; + +struct trace_event_data_offsets_ext4_fallocate_exit {}; + +struct trace_event_data_offsets_ext4_unlink_enter {}; + +struct trace_event_data_offsets_ext4_unlink_exit {}; + +struct trace_event_data_offsets_ext4__truncate {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; + +struct trace_event_data_offsets_ext4__map_blocks_enter {}; + +struct trace_event_data_offsets_ext4__map_blocks_exit {}; + +struct trace_event_data_offsets_ext4_ext_load_extent {}; + +struct trace_event_data_offsets_ext4_load_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_sb {}; + +struct trace_event_data_offsets_ext4_journal_start_inode {}; + +struct trace_event_data_offsets_ext4_journal_start_reserved {}; + +struct trace_event_data_offsets_ext4__trim {}; + +struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; + +struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; + +struct trace_event_data_offsets_ext4_ext_show_extent {}; + +struct trace_event_data_offsets_ext4_remove_blocks {}; + +struct trace_event_data_offsets_ext4_ext_rm_leaf {}; + +struct trace_event_data_offsets_ext4_ext_rm_idx {}; + +struct trace_event_data_offsets_ext4_ext_remove_space {}; + +struct trace_event_data_offsets_ext4_ext_remove_space_done {}; + +struct trace_event_data_offsets_ext4__es_extent {}; + +struct trace_event_data_offsets_ext4_es_remove_extent {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; + +struct trace_event_data_offsets_ext4__es_shrink_enter {}; + +struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; + +struct trace_event_data_offsets_ext4_collapse_range {}; + +struct trace_event_data_offsets_ext4_insert_range {}; + +struct trace_event_data_offsets_ext4_es_shrink {}; + +struct trace_event_data_offsets_ext4_es_insert_delayed_extent {}; + +struct trace_event_data_offsets_ext4_fsmap_class {}; + +struct trace_event_data_offsets_ext4_getfsmap_class {}; + +struct trace_event_data_offsets_ext4_shutdown {}; + +struct trace_event_data_offsets_ext4_error {}; + +struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; + +struct trace_event_data_offsets_ext4_lazy_itable_init {}; + +struct trace_event_data_offsets_ext4_fc_replay_scan {}; + +struct trace_event_data_offsets_ext4_fc_replay {}; + +struct trace_event_data_offsets_ext4_fc_commit_start {}; + +struct trace_event_data_offsets_ext4_fc_commit_stop {}; + +struct trace_event_data_offsets_ext4_fc_stats {}; + +struct trace_event_data_offsets_ext4_fc_track_dentry {}; + +struct trace_event_data_offsets_ext4_fc_track_inode {}; + +struct trace_event_data_offsets_ext4_fc_track_range {}; + +struct trace_event_data_offsets_ext4_fc_cleanup {}; + +struct trace_event_data_offsets_ext4_update_sb {}; + +typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); + +typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); + +typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int); + +typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); + +typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int); + +typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); + +typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); + +typedef void (*btf_trace_ext4_read_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_release_folio)(void *, struct inode *, struct folio *); + +typedef void (*btf_trace_ext4_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_journalled_invalidate_folio)(void *, struct folio *, size_t, size_t); + +typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int); + +typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int); + +typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); + +typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int); + +typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int); + +typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); + +typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); + +typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); + +typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int, bool); + +typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); + +typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); + +typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); + +typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); + +typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_sb)(void *, struct super_block *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_inode)(void *, struct inode *, int, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int); + +typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int); + +typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); + +typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); + +typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); + +typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); + +typedef void (*btf_trace_ext4_es_insert_delayed_extent)(void *, struct inode *, struct extent_status *, bool, bool); + +typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); + +typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); + +typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); + +typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); + +typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *, tid_t); + +typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int, tid_t); + +typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_track_create)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_link)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_unlink)(void *, handle_t *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_inode)(void *, handle_t *, struct inode *, int); + +typedef void (*btf_trace_ext4_fc_track_range)(void *, handle_t *, struct inode *, long int, long int, int); + +typedef void (*btf_trace_ext4_fc_cleanup)(void *, journal_t *, int, tid_t); + +typedef void (*btf_trace_ext4_update_sb)(void *, struct super_block *, ext4_fsblk_t, unsigned int); + +struct ext4_err_translation { + int code; + int errno; +}; + +enum { + Opt_bsd_df = 0, + Opt_minix_df = 1, + Opt_grpid = 2, + Opt_nogrpid = 3, + Opt_resgid = 4, + Opt_resuid = 5, + Opt_sb___2 = 6, + Opt_nouid32 = 7, + Opt_debug___2 = 8, + Opt_removed = 9, + Opt_user_xattr = 10, + Opt_acl___2 = 11, + Opt_auto_da_alloc = 12, + Opt_noauto_da_alloc = 13, + Opt_noload = 14, + Opt_commit = 15, + Opt_min_batch_time = 16, + Opt_max_batch_time = 17, + Opt_journal_dev = 18, + Opt_journal_path = 19, + Opt_journal_checksum = 20, + Opt_journal_async_commit = 21, + Opt_abort = 22, + Opt_data_journal = 23, + Opt_data_ordered = 24, + Opt_data_writeback = 25, + Opt_data_err_abort = 26, + Opt_data_err_ignore = 27, + Opt_test_dummy_encryption = 28, + Opt_inlinecrypt = 29, + Opt_usrjquota = 30, + Opt_grpjquota = 31, + Opt_quota___2 = 32, + Opt_noquota = 33, + Opt_barrier___2 = 34, + Opt_nobarrier = 35, + Opt_err___5 = 36, + Opt_usrquota___2 = 37, + Opt_grpquota___2 = 38, + Opt_prjquota = 39, + Opt_dax = 40, + Opt_dax_always = 41, + Opt_dax_inode = 42, + Opt_dax_never = 43, + Opt_stripe = 44, + Opt_delalloc = 45, + Opt_nodelalloc = 46, + Opt_warn_on_error = 47, + Opt_nowarn_on_error = 48, + Opt_mblk_io_submit = 49, + Opt_debug_want_extra_isize = 50, + Opt_nomblk_io_submit = 51, + Opt_block_validity = 52, + Opt_noblock_validity = 53, + Opt_inode_readahead_blks = 54, + Opt_journal_ioprio = 55, + Opt_dioread_nolock = 56, + Opt_dioread_lock = 57, + Opt_discard___3 = 58, + Opt_nodiscard = 59, + Opt_init_itable = 60, + Opt_noinit_itable = 61, + Opt_max_dir_size_kb = 62, + Opt_nojournal_checksum = 63, + Opt_nombcache = 64, + Opt_no_prefetch_block_bitmaps = 65, + Opt_mb_optimize_scan = 66, + Opt_errors___2 = 67, + Opt_data = 68, + Opt_data_err = 69, + Opt_jqfmt = 70, + Opt_dax_type = 71, +}; + +struct mount_opts { + int token; + int mount_opt; + int flags; +}; + +struct ext4_fs_context { + char *s_qf_names[3]; + struct fscrypt_dummy_policy dummy_enc_policy; + int s_jquota_fmt; + short unsigned int qname_spec; + long unsigned int vals_s_flags; + long unsigned int mask_s_flags; + long unsigned int journal_devnum; + long unsigned int s_commit_interval; + long unsigned int s_stripe; + unsigned int s_inode_readahead_blks; + unsigned int s_want_extra_isize; + unsigned int s_li_wait_mult; + unsigned int s_max_dir_size_kb; + unsigned int journal_ioprio; + unsigned int vals_s_mount_opt; + unsigned int mask_s_mount_opt; + unsigned int vals_s_mount_opt2; + unsigned int mask_s_mount_opt2; + unsigned int opt_flags; + unsigned int spec; + u32 s_max_batch_time; + u32 s_min_batch_time; + kuid_t s_resuid; + kgid_t s_resgid; + long: 32; + ext4_fsblk_t s_sb_block; +}; + +struct ext4_mount_options { + long unsigned int s_mount_opt; + long unsigned int s_mount_opt2; + kuid_t s_resuid; + kgid_t s_resgid; + long unsigned int s_commit_interval; + u32 s_min_batch_time; + u32 s_max_batch_time; +}; + +struct keyctl_dh_params { + union { + __s32 private; + __s32 priv; + }; + __s32 prime; + __s32 base; +}; + +struct keyctl_kdf_params { + char *hashname; + char *otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct xor_block_template { + struct xor_block_template *next; + const char *name; + int speed; + void (*do_2)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict); + void (*do_3)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_4)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); + void (*do_5)(long unsigned int, long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict, const long unsigned int * restrict); +}; + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +struct io_fadvise { + struct file *file; + long: 32; + u64 offset; + u64 len; + u32 advice; + long: 32; +}; + +struct io_madvise { + struct file *file; + long: 32; + u64 addr; + u64 len; + u32 advice; + long: 32; +}; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = 1, + CACHE_TYPE_DATA = 2, + CACHE_TYPE_SEPARATE = 3, + CACHE_TYPE_UNIFIED = 4, +}; + +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int per_cpu_data_slice_size; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; + bool early_ci_levels; +}; + +struct cache_type_info___2 { + const char *size_prop; + const char *line_size_props[2]; + const char *nr_sets_prop; +}; + +struct trace_event_raw_scsi_dispatch_cmd_start { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_dispatch_cmd_error { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int rtn; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_cmd_done_timeout_template { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int result; + unsigned int opcode; + unsigned int cmd_len; + int driver_tag; + int scheduler_tag; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + u8 sense_key; + u8 asc; + u8 ascq; + char __data[0]; +}; + +struct trace_event_raw_scsi_eh_wakeup { + struct trace_entry ent; + unsigned int host_no; + char __data[0]; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_start { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_error { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_cmd_done_timeout_template { + u32 cmnd; + const void *cmnd_ptr_; +}; + +struct trace_event_data_offsets_scsi_eh_wakeup {}; + +typedef void (*btf_trace_scsi_dispatch_cmd_start)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_error)(void *, struct scsi_cmnd *, int); + +typedef void (*btf_trace_scsi_dispatch_cmd_done)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_timeout)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_eh_wakeup)(void *, struct Scsi_Host *); + +enum scsi_vpd_parameters { + SCSI_VPD_HEADER_SIZE = 4, + SCSI_VPD_LIST_SIZE = 36, +}; + +struct fixed_mdio_bus { + struct mii_bus *mii_bus; + struct list_head phys; +}; + +struct fixed_phy { + int addr; + struct phy_device *phydev; + struct fixed_phy_status status; + bool no_carrier; + int (*link_update)(struct net_device *, struct fixed_phy_status *); + struct list_head node; + struct gpio_desc *link_gpiod; +}; + +struct usb_otg_caps { + u16 otg_rev; + bool hnp_support; + bool srp_support; + bool adp_support; +}; + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN = 0, + USB_DR_MODE_HOST = 1, + USB_DR_MODE_PERIPHERAL = 2, + USB_DR_MODE_OTG = 3, +}; + +struct input_dev_poller { + void (*poll)(struct input_dev *); + unsigned int poll_interval; + unsigned int poll_interval_max; + unsigned int poll_interval_min; + struct input_dev *input; + struct delayed_work work; +}; + +struct md_setup_args { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct dmabuf_token { + __u32 token_start; + __u32 token_count; +}; + +struct linger { + int l_onoff; + int l_linger; +}; + +struct so_timestamping { + int flags; + int bind_phc; +}; + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = 1, + SOF_TXTIME_REPORT_ERRORS = 2, + SOF_TXTIME_FLAGS_LAST = 2, + SOF_TXTIME_FLAGS_MASK = 3, +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +struct sock_skb_cb { + u32 dropcount; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC = 0, + SK_MEMINFO_RCVBUF = 1, + SK_MEMINFO_WMEM_ALLOC = 2, + SK_MEMINFO_SNDBUF = 3, + SK_MEMINFO_FWD_ALLOC = 4, + SK_MEMINFO_WMEM_QUEUED = 5, + SK_MEMINFO_OPTMEM = 6, + SK_MEMINFO_BACKLOG = 7, + SK_MEMINFO_DROPS = 8, + SK_MEMINFO_VARS = 9, +}; + +enum sknetlink_groups { + SKNLGRP_NONE = 0, + SKNLGRP_INET_TCP_DESTROY = 1, + SKNLGRP_INET_UDP_DESTROY = 2, + SKNLGRP_INET6_TCP_DESTROY = 3, + SKNLGRP_INET6_UDP_DESTROY = 4, + __SKNLGRP_MAX = 5, +}; + +enum { + ETHTOOL_A_TS_STAT_UNSPEC = 0, + ETHTOOL_A_TS_STAT_TX_PKTS = 1, + ETHTOOL_A_TS_STAT_TX_LOST = 2, + ETHTOOL_A_TS_STAT_TX_ERR = 3, + __ETHTOOL_A_TS_STAT_CNT = 4, + ETHTOOL_A_TS_STAT_MAX = 3, +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct kernel_ethtool_ts_info ts_info; + struct ethtool_ts_stats stats; +}; + +struct nf_conntrack_nat_helper { + struct list_head list; + char mod_name[16]; + struct module *module; +}; + +enum nft_byteorder_ops { + NFT_BYTEORDER_NTOH = 0, + NFT_BYTEORDER_HTON = 1, +}; + +enum nft_byteorder_attributes { + NFTA_BYTEORDER_UNSPEC = 0, + NFTA_BYTEORDER_SREG = 1, + NFTA_BYTEORDER_DREG = 2, + NFTA_BYTEORDER_OP = 3, + NFTA_BYTEORDER_LEN = 4, + NFTA_BYTEORDER_SIZE = 5, + __NFTA_BYTEORDER_MAX = 6, +}; + +struct nft_byteorder { + u8 sreg; + u8 dreg; + enum nft_byteorder_ops op: 8; + u8 len; + u8 size; +}; + +struct fib_result_nl { + __be32 fl_addr; + u32 fl_mark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + unsigned char tb_id; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; + +struct iptable_nat_pernet { + struct nf_hook_ops *nf_nat_ops; +}; + +struct brport_attribute { + struct attribute attr; + ssize_t (*show)(struct net_bridge_port *, char *); + int (*store)(struct net_bridge_port *, long unsigned int); + int (*store_raw)(struct net_bridge_port *, char *); +}; + +struct div_result { + u64 result_high; + u64 result_low; +}; + +typedef long unsigned int old_sigset_t; + +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + long unsigned int sa_flags; + __sigrestore_t sa_restorer; +}; + +enum { + TRACE_SIGNAL_DELIVERED = 0, + TRACE_SIGNAL_IGNORED = 1, + TRACE_SIGNAL_ALREADY_PENDING = 2, + TRACE_SIGNAL_OVERFLOW_FAIL = 3, + TRACE_SIGNAL_LOSE_INFO = 4, +}; + +struct trace_event_raw_signal_generate { + struct trace_entry ent; + int sig; + int errno; + int code; + char comm[16]; + pid_t pid; + int group; + int result; + char __data[0]; +}; + +struct trace_event_raw_signal_deliver { + struct trace_entry ent; + int sig; + int errno; + int code; + long unsigned int sa_handler; + long unsigned int sa_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_signal_generate {}; + +struct trace_event_data_offsets_signal_deliver {}; + +typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); + +typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); + +enum sig_handler { + HANDLER_CURRENT = 0, + HANDLER_SIG_DFL = 1, + HANDLER_EXIT = 2, +}; + +struct msi_dev_domain { + struct xarray store; + struct irq_domain *domain; +}; + +struct msi_device_data { + long unsigned int properties; + struct mutex mutex; + struct msi_dev_domain __domains[1]; + long unsigned int __iter_idx; +}; + +struct msi_ctrl { + unsigned int domid; + unsigned int first; + unsigned int last; + unsigned int nirqs; +}; + +enum tick_broadcast_mode { + TICK_BROADCAST_OFF = 0, + TICK_BROADCAST_ON = 1, + TICK_BROADCAST_FORCE = 2, +}; + +enum pidcg_event { + PIDCG_MAX = 0, + PIDCG_FORKFAIL = 1, + NR_PIDCG_EVENTS = 2, +}; + +struct pids_cgroup { + struct cgroup_subsys_state css; + atomic64_t counter; + atomic64_t limit; + int64_t watermark; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + atomic64_t events[2]; + atomic64_t events_local[2]; +}; + +struct trace_buffer_meta { + __u32 meta_page_size; + __u32 meta_struct_len; + __u32 subbuf_size; + __u32 nr_subbufs; + struct { + __u64 lost_events; + __u32 id; + __u32 read; + } reader; + __u64 flags; + __u64 entries; + __u64 overrun; + __u64 read; + __u64 Reserved1; + __u64 Reserved2; +}; + +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING = 29, + RINGBUF_TYPE_TIME_EXTEND = 30, + RINGBUF_TYPE_TIME_STAMP = 31, +}; + +typedef bool (*ring_buffer_cond_fn)(void *); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1, +}; + +struct ring_buffer_per_cpu; + +struct buffer_page; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + long unsigned int head; + long unsigned int next_event; + struct buffer_page *head_page; + struct buffer_page *cache_reader_page; + long unsigned int cache_read; + long unsigned int cache_pages_removed; + long: 32; + u64 read_stamp; + u64 page_stamp; + struct ring_buffer_event *event; + size_t event_size; + int missed_events; + long: 32; +}; + +struct rb_irq_work { + struct irq_work work; + wait_queue_head_t waiters; + wait_queue_head_t full_waiters; + atomic_t seq; + bool waiters_pending; + bool full_waiters_pending; + bool wakeup_full; +}; + +struct trace_buffer { + unsigned int flags; + int cpus; + atomic_t record_disabled; + atomic_t resizing; + cpumask_var_t cpumask; + struct lock_class_key *reader_lock_key; + struct mutex mutex; + struct ring_buffer_per_cpu **buffers; + struct hlist_node node; + u64 (*clock)(); + struct rb_irq_work irq_work; + bool time_stamp_abs; + long unsigned int range_addr_start; + long unsigned int range_addr_end; + long int last_text_delta; + long int last_data_delta; + unsigned int subbuf_size; + unsigned int subbuf_order; + unsigned int max_data_size; +}; + +struct ring_buffer_meta { + int magic; + int struct_size; + long unsigned int text_addr; + long unsigned int data_addr; + long unsigned int first_buffer; + long unsigned int head_buffer; + long unsigned int commit_buffer; + __u32 subbuf_size; + __u32 nr_subbufs; + int buffers[0]; +}; + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 8, +}; + +struct buffer_data_page { + u64 time_stamp; + local_t commit; + unsigned char data[0]; + long: 32; +}; + +struct buffer_data_read_page { + unsigned int order; + struct buffer_data_page *data; +}; + +struct buffer_page { + struct list_head list; + local_t write; + unsigned int read; + local_t entries; + long unsigned int real_end; + unsigned int order; + u32 id: 30; + u32 range: 1; + struct buffer_data_page *page; +}; + +struct rb_event_info { + u64 ts; + u64 delta; + u64 before; + u64 after; + long unsigned int length; + struct buffer_page *tail_page; + int add_timestamp; + long: 32; +}; + +enum { + RB_ADD_STAMP_NONE = 0, + RB_ADD_STAMP_EXTEND = 2, + RB_ADD_STAMP_ABSOLUTE = 4, + RB_ADD_STAMP_FORCE = 8, +}; + +enum { + RB_CTX_TRANSITION = 0, + RB_CTX_NMI = 1, + RB_CTX_IRQ = 2, + RB_CTX_SOFTIRQ = 3, + RB_CTX_NORMAL = 4, + RB_CTX_MAX = 5, +}; + +struct rb_time_struct { + local64_t time; +}; + +typedef struct rb_time_struct rb_time_t; + +struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + atomic_t resize_disabled; + struct trace_buffer *buffer; + raw_spinlock_t reader_lock; + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct buffer_data_page *free_page; + long unsigned int nr_pages; + unsigned int current_context; + struct list_head *pages; + long unsigned int cnt; + struct buffer_page *head_page; + struct buffer_page *tail_page; + struct buffer_page *commit_page; + struct buffer_page *reader_page; + long unsigned int lost_events; + long unsigned int last_overrun; + long unsigned int nest; + local_t entries_bytes; + local_t entries; + local_t overrun; + local_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; + local_t pages_touched; + local_t pages_lost; + local_t pages_read; + long int last_pages_touch; + size_t shortest_full; + long unsigned int read; + long unsigned int read_bytes; + rb_time_t write_stamp; + rb_time_t before_stamp; + u64 event_stamp[5]; + u64 read_stamp; + long unsigned int pages_removed; + unsigned int mapped; + unsigned int user_mapped; + struct mutex mapping_lock; + long unsigned int *subbuf_ids; + struct trace_buffer_meta *meta_page; + struct ring_buffer_meta *ring_meta; + long int nr_pages_to_update; + struct list_head new_pages; + struct work_struct update_pages_work; + struct completion update_done; + struct rb_irq_work irq_work; +}; + +struct rb_wait_data { + struct rb_irq_work *irq_work; + int seq; +}; + +enum { + BTF_VAR_STATIC = 0, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + +struct btf_var { + __u32 linkage; +}; + +struct btf_var_secinfo { + __u32 type; + __u32 offset; + __u32 size; +}; + +struct btf_decl_tag { + __s32 component_idx; +}; + +struct btf_enum64 { + __u32 name_off; + __u32 val_lo32; + __u32 val_hi32; +}; + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + +struct bpf_sockopt { + union { + struct bpf_sock *sk; + }; + union { + void *optval; + }; + union { + void *optval_end; + }; + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +enum { + BTF_F_COMPACT = 1, + BTF_F_NONAME = 2, + BTF_F_PTR_RAW = 4, + BTF_F_ZERO = 8, +}; + +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, + BPF_CORE_FIELD_BYTE_SIZE = 1, + BPF_CORE_FIELD_EXISTS = 2, + BPF_CORE_FIELD_SIGNED = 3, + BPF_CORE_FIELD_LSHIFT_U64 = 4, + BPF_CORE_FIELD_RSHIFT_U64 = 5, + BPF_CORE_TYPE_ID_LOCAL = 6, + BPF_CORE_TYPE_ID_TARGET = 7, + BPF_CORE_TYPE_EXISTS = 8, + BPF_CORE_TYPE_SIZE = 9, + BPF_CORE_ENUMVAL_EXISTS = 10, + BPF_CORE_ENUMVAL_VALUE = 11, + BPF_CORE_TYPE_MATCHES = 12, +}; + +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; + +struct btf_id_set { + u32 cnt; + u32 ids[0]; +}; + +struct btf_struct_metas { + u32 cnt; + struct btf_struct_meta types[0]; +}; + +enum { + BTF_FIELDS_MAX = 11, +}; + +struct bpf_core_ctx { + struct bpf_verifier_log *log; + const struct btf *btf; +}; + +struct bpf_core_cand { + const struct btf *btf; + __u32 id; +}; + +struct bpf_core_cand_list { + struct bpf_core_cand *cands; + int len; +}; + +struct bpf_core_accessor { + __u32 type_id; + __u32 idx; + const char *name; +}; + +struct bpf_core_spec { + const struct btf *btf; + struct bpf_core_accessor spec[64]; + __u32 root_type_id; + enum bpf_core_relo_kind relo_kind; + int len; + int raw_spec[64]; + int raw_len; + __u32 bit_offset; +}; + +struct bpf_core_relo_res { + __u64 orig_val; + __u64 new_val; + bool poison; + bool validate; + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; + long: 32; +}; + +enum btf_kfunc_hook { + BTF_KFUNC_HOOK_COMMON = 0, + BTF_KFUNC_HOOK_XDP = 1, + BTF_KFUNC_HOOK_TC = 2, + BTF_KFUNC_HOOK_STRUCT_OPS = 3, + BTF_KFUNC_HOOK_TRACING = 4, + BTF_KFUNC_HOOK_SYSCALL = 5, + BTF_KFUNC_HOOK_FMODRET = 6, + BTF_KFUNC_HOOK_CGROUP = 7, + BTF_KFUNC_HOOK_SCHED_ACT = 8, + BTF_KFUNC_HOOK_SK_SKB = 9, + BTF_KFUNC_HOOK_SOCKET_FILTER = 10, + BTF_KFUNC_HOOK_LWT = 11, + BTF_KFUNC_HOOK_NETFILTER = 12, + BTF_KFUNC_HOOK_KPROBE = 13, + BTF_KFUNC_HOOK_MAX = 14, +}; + +enum { + BTF_KFUNC_SET_MAX_CNT = 256, + BTF_DTOR_KFUNC_MAX_CNT = 256, + BTF_KFUNC_FILTER_MAX_CNT = 16, +}; + +struct btf_kfunc_hook_filter { + btf_kfunc_filter_t filters[16]; + u32 nr_filters; +}; + +struct btf_kfunc_set_tab { + struct btf_id_set8 *sets[14]; + struct btf_kfunc_hook_filter hook_filters[14]; +}; + +struct btf_id_dtor_kfunc_tab { + u32 cnt; + struct btf_id_dtor_kfunc dtors[0]; +}; + +struct btf_struct_ops_tab { + u32 cnt; + u32 capacity; + struct bpf_struct_ops_desc ops[0]; +}; + +enum verifier_phase { + CHECK_META = 0, + CHECK_TYPE = 1, +}; + +struct resolve_vertex { + const struct btf_type *t; + u32 type_id; + u16 next_member; +}; + +enum visit_state { + NOT_VISITED = 0, + VISITED = 1, + RESOLVED = 2, +}; + +enum resolve_mode { + RESOLVE_TBD = 0, + RESOLVE_PTR = 1, + RESOLVE_STRUCT_OR_ARRAY = 2, +}; + +struct btf_sec_info { + u32 off; + u32 len; +}; + +struct btf_verifier_env { + struct btf *btf; + u8 *visit_states; + struct resolve_vertex stack[32]; + struct bpf_verifier_log log; + u32 log_type_id; + u32 top_stack; + enum verifier_phase phase; + enum resolve_mode resolve_mode; +}; + +struct btf_show { + u64 flags; + void *target; + void (*showfn)(struct btf_show *, const char *, struct __va_list_tag *); + const struct btf *btf; + struct { + u8 depth; + u8 depth_to_show; + u8 depth_check; + u8 array_member: 1; + u8 array_terminated: 1; + u16 array_encoding; + u32 type_id; + int status; + const struct btf_type *type; + const struct btf_member *member; + char name[80]; + } state; + struct { + u32 size; + void *head; + void *data; + u8 safe[32]; + } obj; +}; + +struct btf_kind_operations { + s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); + int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); + int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + void (*log_details)(struct btf_verifier_env *, const struct btf_type *); + void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); +}; + +enum { + BTF_FIELD_IGNORE = 0, + BTF_FIELD_FOUND = 1, +}; + +struct btf_field_info { + enum btf_field_type type; + u32 off; + union { + struct { + u32 type_id; + } kptr; + struct { + const char *node_name; + u32 value_btf_id; + } graph_root; + }; +}; + +struct bpf_ctx_convert { + struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; + struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; + struct xdp_md BPF_PROG_TYPE_XDP_prog; + struct xdp_buff BPF_PROG_TYPE_XDP_kern; + struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; + struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; + struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; + struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; + struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; + struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; + struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; + struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; + struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; + struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; + struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; + struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; + struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; + struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; + long: 32; + struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; + struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; + bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; + struct pt_regs BPF_PROG_TYPE_KPROBE_kern; + __u64 BPF_PROG_TYPE_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_TRACEPOINT_kern; + struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; + struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; + long: 32; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; + void *BPF_PROG_TYPE_TRACING_prog; + void *BPF_PROG_TYPE_TRACING_kern; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; + struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; + struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; + struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; + struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; + struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; + struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; + struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; + struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; + void *BPF_PROG_TYPE_STRUCT_OPS_prog; + void *BPF_PROG_TYPE_STRUCT_OPS_kern; + void *BPF_PROG_TYPE_EXT_prog; + void *BPF_PROG_TYPE_EXT_kern; + void *BPF_PROG_TYPE_SYSCALL_prog; + void *BPF_PROG_TYPE_SYSCALL_kern; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_prog; + struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_kern; + long: 32; +}; + +enum { + __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0, + __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1, + __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2, + __ctx_convertBPF_PROG_TYPE_XDP = 3, + __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6, + __ctx_convertBPF_PROG_TYPE_LWT_IN = 7, + __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8, + __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9, + __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10, + __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11, + __ctx_convertBPF_PROG_TYPE_SK_SKB = 12, + __ctx_convertBPF_PROG_TYPE_SK_MSG = 13, + __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14, + __ctx_convertBPF_PROG_TYPE_KPROBE = 15, + __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16, + __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19, + __ctx_convertBPF_PROG_TYPE_TRACING = 20, + __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21, + __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23, + __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 24, + __ctx_convertBPF_PROG_TYPE_SK_LOOKUP = 25, + __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 26, + __ctx_convertBPF_PROG_TYPE_EXT = 27, + __ctx_convertBPF_PROG_TYPE_SYSCALL = 28, + __ctx_convertBPF_PROG_TYPE_NETFILTER = 29, + __ctx_convert_unused = 30, +}; + +enum bpf_struct_walk_result { + WALK_SCALAR = 0, + WALK_PTR = 1, + WALK_STRUCT = 2, +}; + +struct bpf_cand_cache { + const char *name; + u32 name_len; + u16 kind; + u16 cnt; + struct { + const struct btf *btf; + u32 id; + } cands[0]; +}; + +enum btf_arg_tag { + ARG_TAG_CTX = 1, + ARG_TAG_NONNULL = 2, + ARG_TAG_TRUSTED = 4, + ARG_TAG_NULLABLE = 8, + ARG_TAG_ARENA = 16, +}; + +struct btf_show_snprintf { + struct btf_show show; + int len_left; + int len; +}; + +enum { + BTF_MODULE_F_LIVE = 1, +}; + +struct btf_module { + struct list_head list; + struct module *module; + struct btf *btf; + struct bin_attribute *sysfs_attr; + int flags; +}; + +typedef u64 (*btf_bpf_btf_find_by_name_kind)(char *, int, u32, int); + +struct dma_block { + struct dma_block *next_block; + dma_addr_t dma; +}; + +struct dma_pool { + struct list_head page_list; + spinlock_t lock; + struct dma_block *next_block; + size_t nr_blocks; + size_t nr_active; + size_t nr_pages; + struct device *dev; + unsigned int size; + unsigned int allocation; + unsigned int boundary; + char name[32]; + struct list_head pools; +}; + +struct dma_page { + struct list_head page_list; + void *vaddr; + dma_addr_t dma; +}; + +struct user_arg_ptr { + union { + const char * const *native; + } ptr; +}; + +struct iomap_swapfile_info { + struct iomap iomap; + struct swap_info_struct *sis; + long: 32; + uint64_t lowest_ppage; + uint64_t highest_ppage; + long unsigned int nr_pages; + int nr_extents; + struct file *file; + long: 32; +}; + +struct vmcore { + struct list_head list; + long long unsigned int paddr; + long long unsigned int size; + loff_t offset; +}; + +struct elf64_note { + Elf64_Word n_namesz; + Elf64_Word n_descsz; + Elf64_Word n_type; +}; + +typedef struct elf64_note Elf64_Nhdr; + +struct vmcore_cb { + bool (*pfn_is_ram)(struct vmcore_cb *, long unsigned int); + struct list_head next; +}; + +enum cc_attr { + CC_ATTR_MEM_ENCRYPT = 0, + CC_ATTR_HOST_MEM_ENCRYPT = 1, + CC_ATTR_GUEST_MEM_ENCRYPT = 2, + CC_ATTR_GUEST_STATE_ENCRYPT = 3, + CC_ATTR_GUEST_UNROLL_STRING_IO = 4, + CC_ATTR_GUEST_SEV_SNP = 5, + CC_ATTR_HOST_SEV_SNP = 6, +}; + +struct ext4_dir_entry { + __le32 inode; + __le16 rec_len; + __le16 name_len; + char name[255]; +}; + +struct ext4_dir_entry_tail { + __le32 det_reserved_zero1; + __le16 det_rec_len; + __u8 det_reserved_zero2; + __u8 det_reserved_ft; + __le32 det_checksum; +}; + +struct ext4_filename { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + struct dx_hash_info hinfo; +}; + +typedef enum { + EITHER = 0, + INDEX = 1, + DIRENT = 2, + DIRENT_HTREE = 3, +} dirblock_type_t; + +struct fake_dirent { + __le32 inode; + __le16 rec_len; + u8 name_len; + u8 file_type; +}; + +struct dx_countlimit { + __le16 limit; + __le16 count; +}; + +struct dx_entry { + __le32 hash; + __le32 block; +}; + +struct dx_root_info { + __le32 reserved_zero; + u8 hash_version; + u8 info_length; + u8 indirect_levels; + u8 unused_flags; +}; + +struct dx_root { + struct fake_dirent dot; + char dot_name[4]; + struct fake_dirent dotdot; + char dotdot_name[4]; + struct dx_root_info info; + struct dx_entry entries[0]; +}; + +struct dx_node { + struct fake_dirent fake; + struct dx_entry entries[0]; +}; + +struct dx_frame { + struct buffer_head *bh; + struct dx_entry *entries; + struct dx_entry *at; +}; + +struct dx_map_entry { + u32 hash; + u16 offs; + u16 size; +}; + +struct dx_tail { + u32 dt_reserved; + __le32 dt_checksum; +}; + +struct ext4_renament { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool is_dir; + int dir_nlink_delta; + struct buffer_head *bh; + struct ext4_dir_entry_2 *de; + int inlined; + struct buffer_head *dir_bh; + struct ext4_dir_entry_2 *parent_de; + int dir_inlined; +}; + +typedef u32 unicode_t; + +struct utf8_table { + int cmask; + int cval; + int shift; + long int lmask; + long int lval; +}; + +enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_INVAL_INODE = 2, + FUSE_NOTIFY_INVAL_ENTRY = 3, + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, + FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, + FUSE_NOTIFY_CODE_MAX = 8, +}; + +struct fuse_forget_in { + uint64_t nlookup; +}; + +struct fuse_batch_forget_in { + uint32_t count; + uint32_t dummy; +}; + +struct fuse_interrupt_in { + uint64_t unique; +}; + +struct fuse_notify_inval_inode_out { + uint64_t ino; + int64_t off; + int64_t len; +}; + +struct fuse_notify_inval_entry_out { + uint64_t parent; + uint32_t namelen; + uint32_t flags; +}; + +struct fuse_notify_delete_out { + uint64_t parent; + uint64_t child; + uint32_t namelen; + uint32_t padding; +}; + +struct fuse_notify_store_out { + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_out { + uint64_t notify_unique; + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_in { + uint64_t dummy1; + uint64_t offset; + uint32_t size; + uint32_t dummy2; + uint64_t dummy3; + uint64_t dummy4; +}; + +enum fuse_req_flag { + FR_ISREPLY = 0, + FR_FORCE = 1, + FR_BACKGROUND = 2, + FR_WAITING = 3, + FR_ABORTED = 4, + FR_INTERRUPTED = 5, + FR_LOCKED = 6, + FR_PENDING = 7, + FR_SENT = 8, + FR_FINISHED = 9, + FR_PRIVATE = 10, + FR_ASYNC = 11, +}; + +struct trace_event_raw_fuse_request_send { + struct trace_entry ent; + dev_t connection; + long: 32; + uint64_t unique; + enum fuse_opcode opcode; + uint32_t len; + char __data[0]; +}; + +struct trace_event_raw_fuse_request_end { + struct trace_entry ent; + dev_t connection; + long: 32; + uint64_t unique; + uint32_t len; + int32_t error; + char __data[0]; +}; + +struct trace_event_data_offsets_fuse_request_send {}; + +struct trace_event_data_offsets_fuse_request_end {}; + +typedef void (*btf_trace_fuse_request_send)(void *, const struct fuse_req *); + +typedef void (*btf_trace_fuse_request_end)(void *, const struct fuse_req *); + +struct fuse_copy_state { + int write; + struct fuse_req *req; + struct iov_iter *iter; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; + long unsigned int nr_segs; + struct page *pg; + unsigned int len; + unsigned int offset; + unsigned int move_pages: 1; +}; + +struct fuse_retrieve_args { + struct fuse_args_pages ap; + struct fuse_notify_retrieve_in inarg; +}; + +struct scrub_sector_verification; + +struct scrub_stripe { + struct scrub_ctx *sctx; + struct btrfs_block_group *bg; + struct page *pages[16]; + struct scrub_sector_verification *sectors; + struct btrfs_device *dev; + u64 logical; + u64 physical; + u16 mirror_num; + u16 nr_sectors; + u16 nr_data_extents; + u16 nr_meta_extents; + atomic_t pending_io; + wait_queue_head_t io_wait; + wait_queue_head_t repair_wait; + long unsigned int state; + long unsigned int extent_sector_bitmap; + long unsigned int init_error_bitmap; + unsigned int init_nr_io_errors; + unsigned int init_nr_csum_errors; + unsigned int init_nr_meta_errors; + long unsigned int error_bitmap; + long unsigned int io_error_bitmap; + long unsigned int csum_error_bitmap; + long unsigned int meta_error_bitmap; + long unsigned int write_error_bitmap; + spinlock_t write_error_lock; + u8 *csums; + struct work_struct work; +}; + +struct scrub_ctx { + struct scrub_stripe stripes[128]; + struct scrub_stripe *raid56_data_stripes; + struct btrfs_fs_info *fs_info; + struct btrfs_path extent_path; + struct btrfs_path csum_path; + int first_free; + int cur_stripe; + atomic_t cancel_req; + int readonly; + ktime_t throttle_deadline; + u64 throttle_sent; + int is_dev_replace; + long: 32; + u64 write_pointer; + struct mutex wr_lock; + struct btrfs_device *wr_tgtdev; + struct btrfs_scrub_progress stat; + spinlock_t stat_lock; + refcount_t refs; +}; + +struct scrub_sector_verification { + bool is_metadata; + long: 32; + union { + u8 *csum; + u64 generation; + }; +}; + +enum scrub_stripe_flags { + SCRUB_STRIPE_FLAG_INITIALIZED = 0, + SCRUB_STRIPE_FLAG_REPAIR_DONE = 1, + SCRUB_STRIPE_FLAG_NO_REPORT = 2, +}; + +struct scrub_warning { + struct btrfs_path *path; + long: 32; + u64 extent_item_size; + const char *errstr; + long: 32; + u64 physical; + u64 logical; + struct btrfs_device *dev; + long: 32; +}; + +struct btrfs_dio_data { + ssize_t submitted; + struct extent_changeset *data_reserved; + struct btrfs_ordered_extent *ordered; + bool data_space_reserved; + bool nocow_done; +}; + +struct btrfs_dio_private { + u64 file_offset; + u32 bytes; + long: 32; + struct btrfs_bio bbio; +}; + +struct crypto_akcipher_sync_data { + struct crypto_akcipher *tfm; + const void *src; + void *dst; + unsigned int slen; + unsigned int dlen; + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist sg; + u8 *buf; +}; + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct gendisk *, char *); + ssize_t (*store)(struct gendisk *, const char *, size_t); + void (*load_module)(struct gendisk *, const char *, size_t); +}; + +enum { + IORING_MEM_REGION_REG_WAIT_ARG = 1, +}; + +struct io_uring_mem_region_reg { + __u64 region_uptr; + __u64 flags; + __u64 __resv[2]; +}; + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; + +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; + __u8 sqe_op; + __u8 sqe_flags; + }; + __u8 resv; + __u32 resv2[3]; +}; + +struct io_uring_clock_register { + __u32 clockid; + __u32 __resv[3]; +}; + +enum io_uring_register_restriction_op { + IORING_RESTRICTION_REGISTER_OP = 0, + IORING_RESTRICTION_SQE_OP = 1, + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + IORING_RESTRICTION_LAST = 4, +}; + +struct io_uring_file_index_range { + __u32 off; + __u32 len; + __u64 resv; +}; + +struct io_ring_ctx_rings { + short unsigned int n_ring_pages; + short unsigned int n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; + struct io_uring_sqe *sq_sqes; + struct io_rings *rings; +}; + +typedef struct { + FSE_CTable CTable[59]; + U32 scratchBuffer[41]; + unsigned int count[13]; + S16 norm[13]; +} HUF_CompressWeightsWksp; + +typedef struct { + HUF_CompressWeightsWksp wksp; + BYTE bitsToWeight[13]; + BYTE huffWeight[255]; +} HUF_WriteCTableWksp; + +struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +}; + +typedef struct nodeElt_s nodeElt; + +typedef struct { + U16 base; + U16 curr; +} rankPos; + +typedef nodeElt huffNodeTable[512]; + +typedef struct { + huffNodeTable huffNodeTbl; + rankPos rankPosition[192]; +} HUF_buildCTable_wksp_tables; + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + BYTE *startPtr; + BYTE *ptr; + BYTE *endPtr; +} HUF_CStream_t; + +typedef enum { + HUF_singleStream = 0, + HUF_fourStreams = 1, +} HUF_nbStreams_e; + +typedef struct { + unsigned int count[256]; + HUF_CElt CTable[257]; + union { + HUF_buildCTable_wksp_tables buildCTable_wksp; + HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[1024]; + } wksps; +} HUF_compress_tables_t; + +struct vc_selection { + struct mutex lock; + struct vc_data *cons; + char *buffer; + unsigned int buf_len; + volatile int start; + int end; +}; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *); + long: 32; +}; + +enum { + ATA_EH_SPDN_NCQ_OFF = 1, + ATA_EH_SPDN_SPEED_DOWN = 2, + ATA_EH_SPDN_FALLBACK_TO_PIO = 4, + ATA_EH_SPDN_KEEP_ERRORS = 8, + ATA_EFLAG_IS_IO = 1, + ATA_EFLAG_DUBIOUS_XFER = 2, + ATA_EFLAG_OLD_ER = -2147483648, + ATA_ECAT_NONE = 0, + ATA_ECAT_ATA_BUS = 1, + ATA_ECAT_TOUT_HSM = 2, + ATA_ECAT_UNK_DEV = 3, + ATA_ECAT_DUBIOUS_NONE = 4, + ATA_ECAT_DUBIOUS_ATA_BUS = 5, + ATA_ECAT_DUBIOUS_TOUT_HSM = 6, + ATA_ECAT_DUBIOUS_UNK_DEV = 7, + ATA_ECAT_NR = 8, + ATA_EH_CMD_DFL_TIMEOUT = 5000, + ATA_EH_RESET_COOL_DOWN = 5000, + ATA_EH_PRERESET_TIMEOUT = 10000, + ATA_EH_FASTDRAIN_INTERVAL = 3000, + ATA_EH_UA_TRIES = 5, + ATA_EH_PROBE_TRIAL_INTERVAL = 60000, + ATA_EH_PROBE_TRIALS = 2, +}; + +struct ata_eh_cmd_timeout_ent { + const u8 *commands; + const unsigned int *timeouts; +}; + +struct speed_down_verdict_arg { + u64 since; + int xfer_ok; + int nr_errors[8]; + long: 32; +}; + +struct usb_dev_cap_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED = 0, +}; + +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + void *drv_data; + const struct attribute_group **attr_grp; + char **supplied_to; + size_t num_supplicants; + bool no_wakeup_source; +}; + +struct psy_am_i_supplied_data { + struct power_supply *psy; + unsigned int count; +}; + +struct psy_get_supplier_prop_data { + struct power_supply *psy; + enum power_supply_property psp; + union power_supply_propval *val; +}; + +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); + +struct of_intc_desc { + struct list_head list; + of_irq_init_cb_t irq_init_cb; + struct device_node *dev; + struct device_node *interrupt_parent; +}; + +enum { + NDA_UNSPEC = 0, + NDA_DST = 1, + NDA_LLADDR = 2, + NDA_CACHEINFO = 3, + NDA_PROBES = 4, + NDA_VLAN = 5, + NDA_PORT = 6, + NDA_VNI = 7, + NDA_IFINDEX = 8, + NDA_MASTER = 9, + NDA_LINK_NETNSID = 10, + NDA_SRC_VNI = 11, + NDA_PROTOCOL = 12, + NDA_NH_ID = 13, + NDA_FDB_EXT_ATTRS = 14, + NDA_FLAGS_EXT = 15, + NDA_NDM_STATE_MASK = 16, + NDA_NDM_FLAGS_MASK = 17, + __NDA_MAX = 18, +}; + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +enum { + NDTPA_UNSPEC = 0, + NDTPA_IFINDEX = 1, + NDTPA_REFCNT = 2, + NDTPA_REACHABLE_TIME = 3, + NDTPA_BASE_REACHABLE_TIME = 4, + NDTPA_RETRANS_TIME = 5, + NDTPA_GC_STALETIME = 6, + NDTPA_DELAY_PROBE_TIME = 7, + NDTPA_QUEUE_LEN = 8, + NDTPA_APP_PROBES = 9, + NDTPA_UCAST_PROBES = 10, + NDTPA_MCAST_PROBES = 11, + NDTPA_ANYCAST_DELAY = 12, + NDTPA_PROXY_DELAY = 13, + NDTPA_PROXY_QLEN = 14, + NDTPA_LOCKTIME = 15, + NDTPA_QUEUE_LENBYTES = 16, + NDTPA_MCAST_REPROBES = 17, + NDTPA_PAD = 18, + NDTPA_INTERVAL_PROBE_TIME_MS = 19, + __NDTPA_MAX = 20, +}; + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC = 0, + NDTA_NAME = 1, + NDTA_THRESH1 = 2, + NDTA_THRESH2 = 3, + NDTA_THRESH3 = 4, + NDTA_CONFIG = 5, + NDTA_PARMS = 6, + NDTA_STATS = 7, + NDTA_GC_INTERVAL = 8, + NDTA_PAD = 9, + __NDTA_MAX = 10, +}; + +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + +struct neigh_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table neigh_vars[21]; +}; + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + short unsigned int nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED = 0, + NLMSGERR_ATTR_MSG = 1, + NLMSGERR_ATTR_OFFS = 2, + NLMSGERR_ATTR_COOKIE = 3, + NLMSGERR_ATTR_POLICY = 4, + NLMSGERR_ATTR_MISS_TYPE = 5, + NLMSGERR_ATTR_MISS_NEST = 6, + __NLMSGERR_ATTR_MAX = 7, + NLMSGERR_ATTR_MAX = 6, +}; + +struct nl_pktinfo { + __u32 group; +}; + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED = 1, +}; + +enum netlink_skb_flags { + NETLINK_SKB_DST = 8, +}; + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +struct trace_event_raw_netlink_extack { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_netlink_extack { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_netlink_extack)(void *, const char *); + +enum { + NETLINK_F_KERNEL_SOCKET = 0, + NETLINK_F_RECV_PKTINFO = 1, + NETLINK_F_BROADCAST_SEND_ERROR = 2, + NETLINK_F_RECV_NO_ENOBUFS = 3, + NETLINK_F_LISTEN_ALL_NSID = 4, + NETLINK_F_CAP_ACK = 5, + NETLINK_F_EXT_ACK = 6, + NETLINK_F_STRICT_CHK = 7, +}; + +struct netlink_sock { + struct sock sk; + long unsigned int flags; + u32 portid; + u32 dst_portid; + u32 dst_group; + u32 subscriptions; + u32 ngroups; + long unsigned int *groups; + long unsigned int state; + size_t max_recvmsg_len; + wait_queue_head_t wait; + bool bound; + bool cb_running; + int dump_done_errno; + struct netlink_callback cb; + struct mutex nl_cb_mutex; + void (*netlink_rcv)(struct sk_buff *); + int (*netlink_bind)(struct net *, int); + void (*netlink_unbind)(struct net *, int); + void (*netlink_release)(struct sock *, long unsigned int *); + struct module *module; + struct rhash_head node; + struct callback_head rcu; +}; + +struct listeners; + +struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; + struct listeners *listeners; + unsigned int flags; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + void (*release)(struct sock *, long unsigned int *); + int registered; +}; + +struct listeners { + struct callback_head rcu; + long unsigned int masks[0]; +}; + +struct netlink_tap_net { + struct list_head netlink_tap_all; + struct mutex netlink_tap_lock; +}; + +struct netlink_compare_arg { + possible_net_t pnet; + u32 portid; +}; + +struct netlink_broadcast_data { + struct sock *exclude_sk; + struct net *net; + u32 portid; + u32 group; + int failure; + int delivery_failure; + int congested; + int delivered; + gfp_t allocation; + struct sk_buff *skb; + struct sk_buff *skb2; + int (*tx_filter)(struct sock *, struct sk_buff *, void *); + void *tx_data; +}; + +struct netlink_set_err_data { + struct sock *exclude_sk; + u32 portid; + u32 group; + int code; +}; + +struct nl_seq_iter { + struct seq_net_private p; + struct rhashtable_iter hti; + int link; +}; + +struct bpf_iter__netlink { + union { + struct bpf_iter_meta *meta; + }; + union { + struct netlink_sock *sk; + }; +}; + +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *, unsigned int); + void (*nf_hook_drop)(struct net *); +}; + +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, +}; + +enum tsq_flags { + TSQF_THROTTLED = 1, + TSQF_QUEUED = 2, + TCPF_TSQ_DEFERRED = 4, + TCPF_WRITE_TIMER_DEFERRED = 8, + TCPF_DELACK_TIMER_DEFERRED = 16, + TCPF_MTU_REDUCED_DEFERRED = 32, + TCPF_ACK_DEFERRED = 64, +}; + +struct mptcp_out_options {}; + +struct tcp_out_options { + u16 options; + u16 mss; + u8 ws; + u8 num_sack_blocks; + u8 hash_size; + u8 bpf_opt_len; + __u8 *hash_location; + __u32 tsval; + __u32 tsecr; + struct tcp_fastopen_cookie *fastopen_cookie; + struct mptcp_out_options mptcp; +}; + +struct tsq_tasklet { + struct tasklet_struct tasklet; + struct list_head head; +}; + +enum ipt_reject_with { + IPT_ICMP_NET_UNREACHABLE = 0, + IPT_ICMP_HOST_UNREACHABLE = 1, + IPT_ICMP_PROT_UNREACHABLE = 2, + IPT_ICMP_PORT_UNREACHABLE = 3, + IPT_ICMP_ECHOREPLY = 4, + IPT_ICMP_NET_PROHIBITED = 5, + IPT_ICMP_HOST_PROHIBITED = 6, + IPT_TCP_RESET = 7, + IPT_ICMP_ADMIN_PROHIBITED = 8, +}; + +struct ipt_reject_info { + enum ipt_reject_with with; +}; + +struct sr6_tlv { + __u8 type; + __u8 len; + __u8 data[0]; +}; + +enum { + SEG6_ATTR_UNSPEC = 0, + SEG6_ATTR_DST = 1, + SEG6_ATTR_DSTLEN = 2, + SEG6_ATTR_HMACKEYID = 3, + SEG6_ATTR_SECRET = 4, + SEG6_ATTR_SECRETLEN = 5, + SEG6_ATTR_ALGID = 6, + SEG6_ATTR_HMACINFO = 7, + __SEG6_ATTR_MAX = 8, +}; + +enum { + SEG6_CMD_UNSPEC = 0, + SEG6_CMD_SETHMAC = 1, + SEG6_CMD_DUMPHMAC = 2, + SEG6_CMD_SET_TUNSRC = 3, + SEG6_CMD_GET_TUNSRC = 4, + __SEG6_CMD_MAX = 5, +}; + +struct stp_proto { + unsigned char group_address[6]; + void (*rcv)(const struct stp_proto *, struct sk_buff *, struct net_device *); + void *data; +}; + +struct switchdev_brport { + struct net_device *dev; + const void *ctx; + struct notifier_block *atomic_nb; + struct notifier_block *blocking_nb; + bool tx_fwd_offload; +}; + +enum switchdev_notifier_type { + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, + SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, + SWITCHDEV_FDB_ADD_TO_DEVICE = 3, + SWITCHDEV_FDB_DEL_TO_DEVICE = 4, + SWITCHDEV_FDB_OFFLOADED = 5, + SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, + SWITCHDEV_PORT_OBJ_ADD = 7, + SWITCHDEV_PORT_OBJ_DEL = 8, + SWITCHDEV_PORT_ATTR_SET = 9, + SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, + SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, + SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, + SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, + SWITCHDEV_BRPORT_OFFLOADED = 15, + SWITCHDEV_BRPORT_UNOFFLOADED = 16, + SWITCHDEV_BRPORT_REPLAY = 17, +}; + +struct switchdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; + const void *ctx; +}; + +struct switchdev_notifier_fdb_info { + struct switchdev_notifier_info info; + const unsigned char *addr; + u16 vid; + u8 added_by_user: 1; + u8 is_local: 1; + u8 locked: 1; + u8 offloaded: 1; +}; + +struct switchdev_notifier_brport_info { + struct switchdev_notifier_info info; + const struct switchdev_brport brport; +}; + +struct br_boolopt_multi { + __u32 optval; + __u32 optmask; +}; + +enum { + OPAL_P7IOC_NUM_PEST_REGS = 128, + OPAL_PHB3_NUM_PEST_REGS = 256, + OPAL_PHB4_NUM_PEST_REGS = 512, +}; + +struct ibm_feature { + long unsigned int cpu_features; + long unsigned int mmu_features; + unsigned int cpu_user_ftrs; + unsigned int cpu_user_ftrs2; + unsigned char pabyte; + unsigned char pabit; + unsigned char clear; +}; + +struct feature_property { + const char *name; + u32 min_value; + long unsigned int cpu_feature; + long unsigned int cpu_user_ftr; +}; + +struct serial_rs485 { + __u32 flags; + __u32 delay_rts_before_send; + __u32 delay_rts_after_send; + union { + __u32 padding[5]; + struct { + __u8 addr_recv; + __u8 addr_dest; + __u8 padding0[2]; + __u32 padding1[4]; + }; + }; +}; + +struct serial_iso7816 { + __u32 flags; + __u32 tg; + __u32 sc_fi; + __u32 sc_di; + __u32 clk; + __u32 reserved[5]; +}; + +struct uart_port; + +struct uart_ops { + unsigned int (*tx_empty)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_mctrl)(struct uart_port *); + void (*stop_tx)(struct uart_port *); + void (*start_tx)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + void (*send_xchar)(struct uart_port *, char); + void (*stop_rx)(struct uart_port *); + void (*start_rx)(struct uart_port *); + void (*enable_ms)(struct uart_port *); + void (*break_ctl)(struct uart_port *, int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + const char * (*type)(struct uart_port *); + void (*release_port)(struct uart_port *); + int (*request_port)(struct uart_port *); + void (*config_port)(struct uart_port *, int); + int (*verify_port)(struct uart_port *, struct serial_struct *); + int (*ioctl)(struct uart_port *, unsigned int, long unsigned int); +}; + +struct uart_icount { + __u32 cts; + __u32 dsr; + __u32 rng; + __u32 dcd; + __u32 rx; + __u32 tx; + __u32 frame; + __u32 overrun; + __u32 parity; + __u32 brk; + __u32 buf_overrun; +}; + +typedef u64 upf_t; + +typedef unsigned int upstat_t; + +struct serial_port_device; + +struct uart_state; + +struct uart_port { + spinlock_t lock; + long unsigned int iobase; + unsigned char *membase; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); + void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); + int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); + int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *); + unsigned int ctrl_id; + unsigned int port_id; + unsigned int irq; + long unsigned int irqflags; + unsigned int uartclk; + unsigned int fifosize; + unsigned char x_char; + unsigned char regshift; + unsigned char iotype; + unsigned char quirks; + unsigned int read_status_mask; + unsigned int ignore_status_mask; + struct uart_state *state; + struct uart_icount icount; + struct console *cons; + upf_t flags; + upstat_t status; + bool hw_stopped; + unsigned int mctrl; + unsigned int frame_time; + unsigned int type; + const struct uart_ops *ops; + unsigned int custom_divisor; + unsigned int line; + unsigned int minor; + resource_size_t mapbase; + resource_size_t mapsize; + struct device *dev; + struct serial_port_device *port_dev; + long unsigned int sysrq; + u8 sysrq_ch; + unsigned char has_sysrq; + unsigned char sysrq_seq; + unsigned char hub6; + unsigned char suspended; + unsigned char console_reinit; + const char *name; + struct attribute_group *attr_group; + const struct attribute_group **tty_groups; + struct serial_rs485 rs485; + struct serial_rs485 rs485_supported; + struct gpio_desc *rs485_term_gpio; + struct gpio_desc *rs485_rx_during_tx_gpio; + struct serial_iso7816 iso7816; + void *private_data; +}; + +enum uart_pm_state { + UART_PM_STATE_ON = 0, + UART_PM_STATE_OFF = 3, + UART_PM_STATE_UNDEFINED = 4, +}; + +struct uart_state { + struct tty_port port; + enum uart_pm_state pm_state; + atomic_t refcount; + wait_queue_head_t remove_wait; + struct uart_port *uart_port; +}; + +struct uart_8250_port; + +struct plat_serial8250_port { + long unsigned int iobase; + void *membase; + resource_size_t mapbase; + resource_size_t mapsize; + unsigned int uartclk; + unsigned int irq; + long unsigned int irqflags; + void *private_data; + unsigned char regshift; + unsigned char iotype; + unsigned char hub6; + unsigned char has_sysrq; + unsigned int type; + upf_t flags; + u16 bugs; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + u32 (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, u32); + void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); + long: 32; +}; + +struct mctrl_gpios; + +struct uart_8250_dma; + +struct uart_8250_ops; + +struct uart_8250_em485; + +struct uart_8250_port { + struct uart_port port; + struct timer_list timer; + struct list_head list; + u32 capabilities; + u16 bugs; + unsigned int tx_loadsz; + unsigned char acr; + unsigned char fcr; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char cur_iotype; + unsigned int rpm_tx_active; + unsigned char canary; + unsigned char probe; + struct mctrl_gpios *gpios; + u16 lsr_saved_flags; + u16 lsr_save_mask; + unsigned char msr_saved_flags; + struct uart_8250_dma *dma; + const struct uart_8250_ops *ops; + u32 (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, u32); + struct uart_8250_em485 *em485; + void (*rs485_start_tx)(struct uart_8250_port *); + void (*rs485_stop_tx)(struct uart_8250_port *); + struct delayed_work overrun_backoff; + u32 overrun_backoff_time_ms; +}; + +enum { + PLAT8250_DEV_LEGACY = -1, + PLAT8250_DEV_PLATFORM = 0, + PLAT8250_DEV_PLATFORM1 = 1, + PLAT8250_DEV_PLATFORM2 = 2, + PLAT8250_DEV_FOURPORT = 3, + PLAT8250_DEV_ACCENT = 4, + PLAT8250_DEV_BOCA = 5, + PLAT8250_DEV_EXAR_ST16C554 = 6, + PLAT8250_DEV_HUB6 = 7, + PLAT8250_DEV_AU1X00 = 8, + PLAT8250_DEV_SM501 = 9, +}; + +struct uart_8250_ops { + int (*setup_irq)(struct uart_8250_port *); + void (*release_irq)(struct uart_8250_port *); + void (*setup_timer)(struct uart_8250_port *); +}; + +struct uart_8250_em485 { + struct hrtimer start_tx_timer; + struct hrtimer stop_tx_timer; + struct hrtimer *active_timer; + struct uart_8250_port *port; + unsigned int tx_stopped: 1; + long: 32; +}; + +struct legacy_serial_info { + struct device_node *np; + unsigned int speed; + unsigned int clock; + int irq_check_parent; + phys_addr_t taddr; + void *early_addr; +}; + +struct ppc_bat { + u32 batu; + u32 batl; +}; + +struct hash_pte { + long unsigned int v: 1; + long unsigned int vsid: 24; + long unsigned int h: 1; + long unsigned int api: 6; + long unsigned int rpn: 20; + long unsigned int xpn: 3; + long unsigned int r: 1; + long unsigned int c: 1; + long unsigned int w: 1; + long unsigned int i: 1; + long unsigned int m: 1; + long unsigned int g: 1; + long unsigned int x: 1; + long unsigned int pp: 2; +}; + +struct batrange { + long unsigned int start; + long unsigned int limit; + phys_addr_t phys; +}; + +enum rtmutex_chainwalk { + RT_MUTEX_MIN_CHAINWALK = 0, + RT_MUTEX_FULL_CHAINWALK = 1, +}; + +struct trace_event_raw_dma_map { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 phys_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_unmap { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_alloc_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + gfp_t flags; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_alloc_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + gfp_t flags; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_free_class { + struct trace_entry ent; + u32 __data_loc_device; + void *virt_addr; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_dma_free_sgt { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_map_sg_err { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_phys_addrs; + int err; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_unmap_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_addrs; + enum dma_data_direction dir; + long unsigned int attrs; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_single { + struct trace_entry ent; + u32 __data_loc_device; + long: 32; + u64 dma_addr; + size_t size; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_raw_dma_sync_sg { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_dma_addrs; + u32 __data_loc_lengths; + enum dma_data_direction dir; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_map { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_alloc_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_free_class { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_free_sgt { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +struct trace_event_data_offsets_dma_map_sg_err { + u32 device; + const void *device_ptr_; + u32 phys_addrs; + const void *phys_addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_unmap_sg { + u32 device; + const void *device_ptr_; + u32 addrs; + const void *addrs_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_single { + u32 device; + const void *device_ptr_; +}; + +struct trace_event_data_offsets_dma_sync_sg { + u32 device; + const void *device_ptr_; + u32 dma_addrs; + const void *dma_addrs_ptr_; + u32 lengths; + const void *lengths_ptr_; +}; + +typedef void (*btf_trace_dma_map_page)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_resource)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_page)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_resource)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_alloc)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt_err)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_alloc_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction, gfp_t, long unsigned int); + +typedef void (*btf_trace_dma_free)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_pages)(void *, struct device *, void *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_free_sgt)(void *, struct device *, struct sg_table *, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_map_sg)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_map_sg_err)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_unmap_sg)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + +typedef void (*btf_trace_dma_sync_single_for_cpu)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_single_for_device)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_cpu)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +typedef void (*btf_trace_dma_sync_sg_for_device)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); + +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + long unsigned int attrs; +}; + +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = 1, + CGROUP_FREEZING_SELF = 2, + CGROUP_FREEZING_PARENT = 4, + CGROUP_FROZEN = 8, + CGROUP_FREEZING = 6, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; + long: 32; +}; + +struct saved_cmdlines_buffer { + unsigned int map_pid_to_cmdline[32769]; + unsigned int *map_cmdline_to_pid; + unsigned int cmdline_num; + int cmdline_idx; + char saved_cmdlines[0]; +}; + +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *, struct latch_tree_node *); + int (*comp)(void *, struct latch_tree_node *); +}; + +struct bpf_binary_header { + u32 size; + long: 32; + u8 image[0]; +}; + +typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); + +enum page_size_enum { + __PAGE_SIZE = 4096, +}; + +struct bpf_prog_pack { + struct list_head list; + void *ptr; + long unsigned int bitmap[0]; +}; + +struct bpf_prog_dummy { + struct bpf_prog prog; +}; + +typedef u64 (*btf_bpf_user_rnd_u32)(); + +typedef u64 (*btf_bpf_get_raw_cpu_id)(); + +struct _bpf_dtab_netdev { + struct net_device *dev; +}; + +struct trace_event_raw_xdp_exception { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_xdp_bulk_tx { + struct trace_entry ent; + int ifindex; + u32 act; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_xdp_redirect_template { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + int err; + int to_ifindex; + u32 map_id; + int map_index; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_kthread { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int sched; + unsigned int xdp_pass; + unsigned int xdp_drop; + unsigned int xdp_redirect; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_enqueue { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int to_cpu; + char __data[0]; +}; + +struct trace_event_raw_xdp_devmap_xmit { + struct trace_entry ent; + int from_ifindex; + u32 act; + int to_ifindex; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_mem_disconnect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + char __data[0]; +}; + +struct trace_event_raw_mem_connect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + const struct xdp_rxq_info *rxq; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_mem_return_failed { + struct trace_entry ent; + const struct page *page; + u32 mem_id; + u32 mem_type; + char __data[0]; +}; + +struct trace_event_raw_bpf_xdp_link_attach_failed { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_xdp_exception {}; + +struct trace_event_data_offsets_xdp_bulk_tx {}; + +struct trace_event_data_offsets_xdp_redirect_template {}; + +struct trace_event_data_offsets_xdp_cpumap_kthread {}; + +struct trace_event_data_offsets_xdp_cpumap_enqueue {}; + +struct trace_event_data_offsets_xdp_devmap_xmit {}; + +struct trace_event_data_offsets_mem_disconnect {}; + +struct trace_event_data_offsets_mem_connect {}; + +struct trace_event_data_offsets_mem_return_failed {}; + +struct trace_event_data_offsets_bpf_xdp_link_attach_failed { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); + +typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); + +typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); + +typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); + +typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); + +typedef void (*btf_trace_bpf_xdp_link_attach_failed)(void *, const char *); + +struct bp_slots_histogram { + atomic_t *count; +}; + +struct bp_cpuinfo { + unsigned int cpu_pinned; + struct bp_slots_histogram tsk_pinned; +}; + +struct crypto_acomp_ctx { + struct crypto_acomp *acomp; + struct acomp_req *req; + struct crypto_wait wait; + u8 *buffer; + struct mutex mutex; + bool is_sleepable; +}; + +struct zpool; + +struct zswap_pool { + struct zpool *zpool; + struct crypto_acomp_ctx *acomp_ctx; + struct percpu_ref ref; + struct list_head list; + struct work_struct release_work; + struct hlist_node node; + char tfm_name[128]; +}; + +struct zswap_entry { + swp_entry_t swpentry; + unsigned int length; + bool referenced; + struct zswap_pool *pool; + long unsigned int handle; + struct obj_cgroup *objcg; + struct list_head lru; +}; + +enum zswap_init_type { + ZSWAP_UNINIT = 0, + ZSWAP_INIT_SUCCEED = 1, + ZSWAP_INIT_FAILED = 2, +}; + +struct xattr_args { + __u64 value; + __u32 size; + __u32 flags; +}; + +struct kioctx; + +struct kioctx_table { + struct callback_head rcu; + unsigned int nr; + struct kioctx *table[0]; +}; + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + +struct iocb { + __u64 aio_data; + __kernel_rwf_t aio_rw_flags; + __u32 aio_key; + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + __u64 aio_reserved2; + __u32 aio_flags; + __u32 aio_resfd; +}; + +typedef int kiocb_cancel_fn(struct kiocb *); + +struct aio_ring { + unsigned int id; + unsigned int nr; + unsigned int head; + unsigned int tail; + unsigned int magic; + unsigned int compat_features; + unsigned int incompat_features; + unsigned int header_length; + struct io_event io_events[0]; +}; + +struct kioctx_cpu; + +struct ctx_rq_wait; + +struct kioctx { + struct percpu_ref users; + atomic_t dead; + struct percpu_ref reqs; + long unsigned int user_id; + struct kioctx_cpu *cpu; + unsigned int req_batch; + unsigned int max_reqs; + unsigned int nr_events; + long unsigned int mmap_base; + long unsigned int mmap_size; + struct folio **ring_folios; + long int nr_pages; + struct rcu_work free_rwork; + struct ctx_rq_wait *rq_wait; + long: 32; + long: 32; + struct { + atomic_t reqs_available; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + spinlock_t ctx_lock; + struct list_head active_reqs; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct { + struct mutex ring_lock; + wait_queue_head_t wait; + }; + struct { + unsigned int tail; + unsigned int completed_events; + spinlock_t completion_lock; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + }; + struct folio *internal_folios[8]; + struct file *aio_ring_file; + unsigned int id; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kioctx_cpu { + unsigned int reqs_available; +}; + +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + +struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; + struct cred *creds; +}; + +struct poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool cancelled; + bool work_scheduled; + bool work_need_resched; + struct wait_queue_entry wait; + struct work_struct work; +}; + +struct aio_kiocb { + union { + struct file *ki_filp; + struct kiocb rw; + struct fsync_iocb fsync; + struct poll_iocb poll; + }; + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + struct io_event ki_res; + struct list_head ki_list; + refcount_t ki_refcnt; + struct eventfd_ctx *ki_eventfd; +}; + +struct aio_waiter { + struct wait_queue_entry w; + size_t min_nr; +}; + +struct aio_poll_table { + struct poll_table_struct pt; + struct aio_kiocb *iocb; + bool queued; + int error; +}; + +struct __aio_sigset { + const sigset_t *sigmask; + size_t sigsetsize; +}; + +struct ext4_free_data { + struct list_head efd_list; + struct rb_node efd_node; + ext4_group_t efd_group; + ext4_grpblk_t efd_start_cluster; + ext4_grpblk_t efd_count; + tid_t efd_tid; +}; + +enum { + MB_INODE_PA = 0, + MB_GROUP_PA = 1, +}; + +struct ext4_buddy { + struct folio *bd_buddy_folio; + void *bd_buddy; + struct folio *bd_bitmap_folio; + void *bd_bitmap; + struct ext4_group_info *bd_info; + struct super_block *bd_sb; + __u16 bd_blkbits; + ext4_group_t bd_group; +}; + +struct sg { + struct ext4_group_info info; + ext4_grpblk_t counters[18]; +}; + +struct btrfs_async_delayed_work { + struct btrfs_delayed_root *delayed_root; + int nr; + struct btrfs_work work; +}; + +typedef int __kernel_mqd_t; + +typedef __kernel_mqd_t mqd_t; + +struct mq_attr { + __kernel_long_t mq_flags; + __kernel_long_t mq_maxmsg; + __kernel_long_t mq_msgsize; + __kernel_long_t mq_curmsgs; + __kernel_long_t __reserved[4]; +}; + +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; + bool newns; +}; + +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; + +struct ext_wait_queue { + struct task_struct *task; + struct list_head list; + struct msg_msg *msg; + int state; +}; + +struct mqueue_inode_info { + spinlock_t lock; + long: 32; + struct inode vfs_inode; + wait_queue_head_t wait_q; + struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; + struct posix_msg_tree_node *node_cache; + struct mq_attr attr; + struct sigevent notify; + struct pid *notify_owner; + u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct ucounts *ucounts; + struct sock *notify_sock; + struct sk_buff *notify_cookie; + struct ext_wait_queue e_wait_q[2]; + long unsigned int qsize; + long: 32; +}; + +struct uuidcmp { + const char *uuid; + int len; +}; + +struct show_busy_params { + struct seq_file *m; + struct blk_mq_hw_ctx *hctx; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + struct filename *filename; + struct statx *buffer; +}; + +union nested_table { + union nested_table *table; + struct rhash_lock_head *bucket; +}; + +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, + SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, + SWITCHTEC_GAS_NTB_OFFSET = 65536, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, +}; + +struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info ntp_info[48]; +}; + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; + u64 lut_entry[512]; +}; + +struct pci_dev_reset_methods { + u16 vendor; + u16 device; + int (*reset)(struct pci_dev *, bool); +}; + +struct pci_dev_acs_enabled { + u16 vendor; + u16 device; + int (*acs_enabled)(struct pci_dev *, u16); +}; + +struct pci_dev_acs_ops { + u16 vendor; + u16 device; + int (*enable_acs)(struct pci_dev *); + int (*disable_acs_redir)(struct pci_dev *); +}; + +typedef int (*pm_callback_t)(struct device *); + +struct scsi_dev_info_list { + struct list_head dev_info_list; + char vendor[8]; + char model[16]; + blist_flags_t flags; + unsigned int compatible; + long: 32; +}; + +struct scsi_dev_info_list_table { + struct list_head node; + struct list_head scsi_dev_info_list; + const char *name; + int key; +}; + +struct phy_fixup { + struct list_head list; + char bus_id[64]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *); +}; + +struct sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; + u8 e10g_base_er: 1; + u8 e10g_base_lrm: 1; + u8 e10g_base_lr: 1; + u8 e10g_base_sr: 1; + u8 if_1x_sx: 1; + u8 if_1x_lx: 1; + u8 if_1x_copper_active: 1; + u8 if_1x_copper_passive: 1; + u8 escon_mmf_1310_led: 1; + u8 escon_smf_1310_laser: 1; + u8 sonet_oc192_short_reach: 1; + u8 sonet_reach_bit1: 1; + u8 sonet_reach_bit2: 1; + u8 sonet_oc48_long_reach: 1; + u8 sonet_oc48_intermediate_reach: 1; + u8 sonet_oc48_short_reach: 1; + u8 unallocated_5_7: 1; + u8 sonet_oc12_smf_long_reach: 1; + u8 sonet_oc12_smf_intermediate_reach: 1; + u8 sonet_oc12_short_reach: 1; + u8 unallocated_5_3: 1; + u8 sonet_oc3_smf_long_reach: 1; + u8 sonet_oc3_smf_intermediate_reach: 1; + u8 sonet_oc3_short_reach: 1; + u8 e_base_px: 1; + u8 e_base_bx10: 1; + u8 e100_base_fx: 1; + u8 e100_base_lx: 1; + u8 e1000_base_t: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_sx: 1; + u8 fc_ll_v: 1; + u8 fc_ll_s: 1; + u8 fc_ll_i: 1; + u8 fc_ll_l: 1; + u8 fc_ll_m: 1; + u8 fc_tech_sa: 1; + u8 fc_tech_lc: 1; + u8 fc_tech_electrical_inter_enclosure: 1; + u8 fc_tech_electrical_intra_enclosure: 1; + u8 fc_tech_sn: 1; + u8 fc_tech_sl: 1; + u8 fc_tech_ll: 1; + u8 sfp_ct_active: 1; + u8 sfp_ct_passive: 1; + u8 unallocated_8_1: 1; + u8 unallocated_8_0: 1; + u8 fc_media_tw: 1; + u8 fc_media_tp: 1; + u8 fc_media_mi: 1; + u8 fc_media_tv: 1; + u8 fc_media_m6: 1; + u8 fc_media_m5: 1; + u8 unallocated_9_1: 1; + u8 fc_media_sm: 1; + u8 fc_speed_1200: 1; + u8 fc_speed_800: 1; + u8 fc_speed_1600: 1; + u8 fc_speed_400: 1; + u8 fc_speed_3200: 1; + u8 fc_speed_200: 1; + u8 unallocated_10_1: 1; + u8 fc_speed_100: 1; + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + __be16 cable_compliance; + struct { + u8 reserved60_2: 6; + u8 fc_pi_4_app_h: 1; + u8 sff8431_app_e: 1; + u8 reserved61: 8; + } passive; + struct { + u8 reserved60_4: 4; + u8 fc_pi_4_lim: 1; + u8 sff8431_lim: 1; + u8 fc_pi_4_app_h: 1; + u8 sff8431_app_e: 1; + u8 reserved61: 8; + } active; + }; + u8 reserved62; + u8 cc_base; +}; + +struct sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +}; + +struct sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +}; + +struct sfp_upstream_ops { + void (*attach)(void *, struct sfp_bus *); + void (*detach)(void *, struct sfp_bus *); + int (*module_insert)(void *, const struct sfp_eeprom_id *); + void (*module_remove)(void *); + int (*module_start)(void *); + void (*module_stop)(void *); + void (*link_down)(void *); + void (*link_up)(void *); + int (*connect_phy)(void *, struct phy_device *); + void (*disconnect_phy)(void *, struct phy_device *); +}; + +struct netdev_lag_lower_state_info { + u8 link_up: 1; + u8 tx_enabled: 1; +}; + +struct net_failover_info { + struct net_device *primary_dev; + struct net_device *standby_dev; + struct rtnl_link_stats64 primary_stats; + struct rtnl_link_stats64 standby_stats; + struct rtnl_link_stats64 failover_stats; + spinlock_t stats_lock; + long: 32; +}; + +enum rc5_state { + STATE_INACTIVE___7 = 0, + STATE_BIT_START = 1, + STATE_BIT_END = 2, + STATE_CHECK_RC5X = 3, + STATE_FINISHED___4 = 4, +}; + +struct ppl_header_entry { + __le64 data_sector; + __le32 pp_size; + __le32 data_size; + __le32 parity_disk; + __le32 checksum; +}; + +struct ppl_header { + __u8 reserved[512]; + __le32 signature; + __le32 padding; + __le64 generation; + __le32 entries_count; + __le32 checksum; + struct ppl_header_entry entries[148]; +}; + +struct ppl_log; + +struct ppl_io_unit { + struct ppl_log *log; + struct page *header_page; + unsigned int entries_count; + unsigned int pp_size; + u64 seq; + struct list_head log_sibling; + struct list_head stripe_list; + atomic_t pending_stripes; + atomic_t pending_flushes; + bool submitted; + long: 32; + struct bio bio; + struct bio_vec biovec[32]; +}; + +struct ppl_conf { + struct mddev *mddev; + struct ppl_log *child_logs; + int count; + int block_size; + u32 signature; + long: 32; + atomic64_t seq; + struct kmem_cache *io_kc; + mempool_t io_pool; + struct bio_set bs; + struct bio_set flush_bs; + int recovered_entries; + int mismatch_count; + struct list_head no_mem_stripes; + spinlock_t no_mem_stripes_lock; + short unsigned int write_hint; + long: 32; +}; + +struct ppl_log { + struct ppl_conf *ppl_conf; + struct md_rdev *rdev; + struct mutex io_mutex; + struct ppl_io_unit *current_io; + spinlock_t io_list_lock; + struct list_head io_list; + long: 32; + sector_t next_io_sector; + unsigned int entry_space; + bool use_multippl; + bool wb_cache_on; + long unsigned int disk_flush_bitmap; + long: 32; +}; + +struct net_packet_attrs { + const unsigned char *src; + const unsigned char *dst; + u32 ip_src; + u32 ip_dst; + bool tcp; + u16 sport; + u16 dport; + int timeout; + int size; + int max_size; + u8 id; + u16 queue_mapping; +}; + +struct net_test_priv { + struct net_packet_attrs *packet; + struct packet_type pt; + struct completion comp; + int double_vlan; + int vlan_id; + int ok; +}; + +struct netsfhdr { + __be32 version; + __be64 magic; + u8 id; +} __attribute__((packed)); + +struct net_test { + char name[32]; + int (*fn)(struct net_device *); +}; + +struct eeprom_req_info { + struct ethnl_req_info base; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; +}; + +struct eeprom_reply_data { + struct ethnl_reply_data base; + u32 length; + u8 *data; +}; + +struct nft_pipapo_elem; + +union nft_pipapo_map_bucket { + struct { + long unsigned int to: 24; + long unsigned int n: 8; + }; + struct nft_pipapo_elem *e; +}; + +struct nft_pipapo_elem { + struct nft_elem_priv priv; + struct nft_set_ext ext; +}; + +struct nft_pipapo_field { + unsigned int rules; + unsigned int bsize; + unsigned int rules_alloc; + u8 groups; + u8 bb; + long unsigned int *lt; + union nft_pipapo_map_bucket *mt; +}; + +struct nft_pipapo_scratch { + u8 map_index; + u32 align_off; + long unsigned int map[0]; +}; + +struct nft_pipapo_match { + u8 field_count; + unsigned int bsize_max; + struct nft_pipapo_scratch **scratch; + struct callback_head rcu; + struct nft_pipapo_field f[0]; +}; + +struct nft_pipapo { + struct nft_pipapo_match *match; + struct nft_pipapo_match *clone; + int width; + long unsigned int last_gc; +}; + +struct ip_tunnel_prl { + __be32 addr; + __u16 flags; + __u16 __reserved; + __u32 datalen; + __u32 __reserved2; +}; + +struct sit_net { + struct ip_tunnel *tunnels_r_l[16]; + struct ip_tunnel *tunnels_r[16]; + struct ip_tunnel *tunnels_l[16]; + struct ip_tunnel *tunnels_wc[1]; + struct ip_tunnel **tunnels[4]; + struct net_device *fb_tunnel_dev; +}; + +struct interrupt_nmi_state {}; + +struct codegen_context { + unsigned int seen; + unsigned int idx; + unsigned int stack_size; + int b2p[14]; + unsigned int exentry_idx; + unsigned int alt_exit_addr; +}; + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET = 0, + AUDIT_NTP_FREQ = 1, + AUDIT_NTP_STATUS = 2, + AUDIT_NTP_TAI = 3, + AUDIT_NTP_TICK = 4, + AUDIT_NTP_ADJUST = 5, + AUDIT_NTP_NVALS = 6, +}; + +struct ntp_data { + long unsigned int tick_usec; + long: 32; + u64 tick_length; + u64 tick_length_base; + int time_state; + int time_status; + s64 time_offset; + long int time_constant; + long int time_maxerror; + long int time_esterror; + long: 32; + s64 time_freq; + time64_t time_reftime; + long int time_adjust; + long: 32; + s64 ntp_tick_adj; + time64_t ntp_next_leap_sec; +}; + +typedef __u16 comp_t; + +typedef __u32 comp2_t; + +struct acct { + char ac_flag; + char ac_version; + __u16 ac_uid16; + __u16 ac_gid16; + __u16 ac_tty; + __u32 ac_btime; + comp_t ac_utime; + comp_t ac_stime; + comp_t ac_etime; + comp_t ac_mem; + comp_t ac_io; + comp_t ac_rw; + comp_t ac_minflt; + comp_t ac_majflt; + comp_t ac_swaps; + __u16 ac_ahz; + __u32 ac_exitcode; + char ac_comm[17]; + __u8 ac_etime_hi; + __u16 ac_etime_lo; + __u32 ac_uid; + __u32 ac_gid; +}; + +typedef struct acct acct_t; + +struct bsd_acct_struct { + struct fs_pin pin; + atomic_long_t count; + struct callback_head rcu; + struct mutex lock; + int active; + long unsigned int needcheck; + struct file *file; + struct pid_namespace *ns; + struct work_struct work; + struct completion done; +}; + +struct bucket { + struct hlist_nulls_head head; + raw_spinlock_t raw_lock; +}; + +struct htab_elem; + +struct bpf_htab { + struct bpf_map map; + struct bpf_mem_alloc ma; + struct bpf_mem_alloc pcpu_ma; + struct bucket *buckets; + void *elems; + union { + struct pcpu_freelist freelist; + struct bpf_lru lru; + }; + struct htab_elem **extra_elems; + long: 32; + struct percpu_counter pcount; + atomic_t count; + bool use_percpu_counter; + u32 n_buckets; + u32 elem_size; + u32 hashrnd; + struct lock_class_key lockdep_key; + int *map_locked[8]; + long: 32; + long: 32; + long: 32; +}; + +struct htab_elem { + union { + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct pcpu_freelist_node fnode; + struct htab_elem *batch_flink; + }; + }; + }; + union { + void *ptr_to_pptr; + struct bpf_lru_node lru_node; + }; + u32 hash; + char key[0]; +}; + +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; + u32 bucket_id; + u32 skip_elems; +}; + +struct trace_event_raw_mmap_lock { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + char __data[0]; +}; + +struct trace_event_raw_mmap_lock_acquire_returned { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + bool success; + char __data[0]; +}; + +struct trace_event_data_offsets_mmap_lock { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +struct trace_event_data_offsets_mmap_lock_acquire_returned { + u32 memcg_path; + const void *memcg_path_ptr_; +}; + +typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, const char *, bool, bool); + +struct trace_event_raw_cma_release { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_start { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int count; + unsigned int align; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_finish { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + unsigned int align; + int errorno; + char __data[0]; +}; + +struct trace_event_raw_cma_alloc_busy_retry { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int pfn; + const struct page *page; + long unsigned int count; + unsigned int align; + char __data[0]; +}; + +struct trace_event_data_offsets_cma_release { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_start { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_finish { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_cma_alloc_busy_retry { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_cma_release)(void *, const char *, long unsigned int, const struct page *, long unsigned int); + +typedef void (*btf_trace_cma_alloc_start)(void *, const char *, long unsigned int, unsigned int); + +typedef void (*btf_trace_cma_alloc_finish)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int, int); + +typedef void (*btf_trace_cma_alloc_busy_retry)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int); + +struct cma { + long unsigned int base_pfn; + long unsigned int count; + long unsigned int *bitmap; + unsigned int order_per_bit; + spinlock_t lock; + char name[64]; + bool reserve_pages_on_error; +}; + +typedef void (*iomap_punch_t)(struct inode *, loff_t, loff_t, struct iomap *); + +struct iomap_ioend { + struct list_head io_list; + u16 io_type; + u16 io_flags; + struct inode *io_inode; + size_t io_size; + long: 32; + loff_t io_offset; + sector_t io_sector; + struct bio io_bio; +}; + +struct iomap_writepage_ctx; + +struct iomap_writeback_ops { + int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t, unsigned int); + int (*prepare_ioend)(struct iomap_ioend *, int); + void (*discard_folio)(struct folio *, loff_t); +}; + +struct iomap_writepage_ctx { + struct iomap iomap; + struct iomap_ioend *ioend; + const struct iomap_writeback_ops *ops; + u32 nr_folios; + long: 32; +}; + +struct iomap_folio_state { + spinlock_t state_lock; + unsigned int read_bytes_pending; + atomic_t write_bytes_pending; + long unsigned int state[0]; +}; + +struct iomap_readpage_ctx { + struct folio *cur_folio; + bool cur_folio_in_bio; + struct bio *bio; + struct readahead_control *rac; +}; + +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[8]; + __be64 h_commit_sec; + __be32 h_commit_nsec; + long: 32; +}; + +struct journal_block_tag3_s { + __be32 t_blocknr; + __be32 t_flags; + __be32 t_blocknr_high; + __be32 t_checksum; +}; + +typedef struct journal_block_tag3_s journal_block_tag3_t; + +struct journal_block_tag_s { + __be32 t_blocknr; + __be16 t_checksum; + __be16 t_flags; + __be32 t_blocknr_high; +}; + +typedef struct journal_block_tag_s journal_block_tag_t; + +struct args_protover { + __u32 version; +}; + +struct args_protosubver { + __u32 sub_version; +}; + +struct args_openmount { + __u32 devid; +}; + +struct args_ready { + __u32 token; +}; + +struct args_fail { + __u32 token; + __s32 status; +}; + +struct args_setpipefd { + __s32 pipefd; +}; + +struct args_timeout { + __u64 timeout; +}; + +struct args_requester { + __u32 uid; + __u32 gid; +}; + +struct args_expire { + __u32 how; +}; + +struct args_askumount { + __u32 may_umount; +}; + +struct args_in { + __u32 type; +}; + +struct args_out { + __u32 devid; + __u32 magic; +}; + +struct args_ismountpoint { + union { + struct args_in in; + struct args_out out; + }; +}; + +struct autofs_dev_ioctl { + __u32 ver_major; + __u32 ver_minor; + __u32 size; + __s32 ioctlfd; + union { + struct args_protover protover; + struct args_protosubver protosubver; + struct args_openmount openmount; + struct args_ready ready; + struct args_fail fail; + struct args_setpipefd setpipefd; + struct args_timeout timeout; + struct args_requester requester; + struct args_expire expire; + struct args_askumount askumount; + struct args_ismountpoint ismountpoint; + }; + char path[0]; +}; + +enum { + AUTOFS_DEV_IOCTL_VERSION_CMD = 113, + AUTOFS_DEV_IOCTL_PROTOVER_CMD = 114, + AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD = 115, + AUTOFS_DEV_IOCTL_OPENMOUNT_CMD = 116, + AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD = 117, + AUTOFS_DEV_IOCTL_READY_CMD = 118, + AUTOFS_DEV_IOCTL_FAIL_CMD = 119, + AUTOFS_DEV_IOCTL_SETPIPEFD_CMD = 120, + AUTOFS_DEV_IOCTL_CATATONIC_CMD = 121, + AUTOFS_DEV_IOCTL_TIMEOUT_CMD = 122, + AUTOFS_DEV_IOCTL_REQUESTER_CMD = 123, + AUTOFS_DEV_IOCTL_EXPIRE_CMD = 124, + AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD = 125, + AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD = 126, +}; + +typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *, struct autofs_dev_ioctl *); + +enum { + WORK_DONE_BIT = 0, + WORK_ORDER_DONE_BIT = 1, +}; + +struct btrfs_ioctl_quota_ctl_args { + __u64 cmd; + __u64 status; +}; + +struct btrfs_qgroup_status_item { + __le64 version; + __le64 generation; + __le64 flags; + __le64 rescan; + __le64 enable_gen; +}; + +struct btrfs_qgroup_info_item { + __le64 generation; + __le64 rfer; + __le64 rfer_cmpr; + __le64 excl; + __le64 excl_cmpr; +}; + +struct btrfs_qgroup_limit_item { + __le64 flags; + __le64 max_rfer; + __le64 max_excl; + __le64 rsv_rfer; + __le64 rsv_excl; +}; + +struct btrfs_qgroup_swapped_block { + struct rb_node node; + int level; + bool trace_leaf; + long: 32; + u64 subvol_bytenr; + u64 subvol_generation; + u64 reloc_bytenr; + u64 reloc_generation; + u64 last_snapshot; + struct btrfs_key first_key; + long: 32; +}; + +struct btrfs_qgroup_list { + struct list_head next_group; + struct list_head next_member; + struct btrfs_qgroup *group; + struct btrfs_qgroup *member; +}; + +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kuid_t rootid; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + +struct xts_tfm_ctx { + struct crypto_skcipher *child; + struct crypto_cipher *tweak; +}; + +struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; + struct crypto_cipher_spawn tweak_spawn; +}; + +struct xts_request_ctx { + le128 t; + struct scatterlist *tail; + struct scatterlist sg[2]; + long: 32; + struct skcipher_request subreq; +}; + +enum x509_actions { + ACT_x509_extract_key_data = 0, + ACT_x509_extract_name_segment___2 = 1, + ACT_x509_note_OID___2 = 2, + ACT_x509_note_issuer = 3, + ACT_x509_note_not_after = 4, + ACT_x509_note_not_before = 5, + ACT_x509_note_params = 6, + ACT_x509_note_serial = 7, + ACT_x509_note_sig_algo = 8, + ACT_x509_note_signature = 9, + ACT_x509_note_subject = 10, + ACT_x509_note_tbs_certificate = 11, + ACT_x509_process_extension = 12, + NR__x509_actions = 13, +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); +}; + +enum io_wq_type { + IO_WQ_BOUND = 0, + IO_WQ_UNBOUND = 1, +}; + +struct io_wq_acct { + unsigned int nr_workers; + unsigned int max_workers; + int index; + atomic_t nr_running; + raw_spinlock_t lock; + struct io_wq_work_list work_list; + long unsigned int flags; +}; + +struct io_wq { + long unsigned int state; + free_work_fn *free_work; + io_wq_work_fn *do_work; + struct io_wq_hash *hash; + atomic_t worker_refs; + struct completion worker_done; + struct hlist_node cpuhp_node; + struct task_struct *task; + struct io_wq_acct acct[2]; + raw_spinlock_t lock; + struct hlist_nulls_head free_list; + struct list_head all_list; + struct wait_queue_entry wait; + struct io_wq_work *hash_tail[32]; + cpumask_var_t cpu_mask; +}; + +enum { + IO_WORKER_F_UP = 0, + IO_WORKER_F_RUNNING = 1, + IO_WORKER_F_FREE = 2, + IO_WORKER_F_BOUND = 3, +}; + +enum { + IO_WQ_BIT_EXIT = 0, +}; + +enum { + IO_ACCT_STALLED_BIT = 0, +}; + +struct io_worker { + refcount_t ref; + int create_index; + long unsigned int flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wq *wq; + struct io_wq_work *cur_work; + raw_spinlock_t lock; + struct completion ref_done; + long unsigned int create_state; + struct callback_head create_work; + int init_retries; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +enum { + IO_WQ_ACCT_BOUND = 0, + IO_WQ_ACCT_UNBOUND = 1, + IO_WQ_ACCT_NR = 2, +}; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +struct online_data { + unsigned int cpu; + bool online; +}; + +struct pci_dynid { + struct list_head node; + struct pci_device_id id; +}; + +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + long: 32; + struct device classdev; +}; + +typedef unsigned int __kernel_old_dev_t; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +enum { + LO_FLAGS_READ_ONLY = 1, + LO_FLAGS_AUTOCLEAR = 4, + LO_FLAGS_PARTSCAN = 8, + LO_FLAGS_DIRECT_IO = 16, +}; + +struct loop_info { + int lo_number; + __kernel_old_dev_t lo_device; + long unsigned int lo_inode; + __kernel_old_dev_t lo_rdevice; + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; + int lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + long unsigned int lo_init[2]; + char reserved[4]; +}; + +struct loop_info64 { + __u64 lo_device; + __u64 lo_inode; + __u64 lo_rdevice; + __u64 lo_offset; + __u64 lo_sizelimit; + __u32 lo_number; + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; + __u32 lo_flags; + __u8 lo_file_name[64]; + __u8 lo_crypt_name[64]; + __u8 lo_encrypt_key[32]; + __u64 lo_init[2]; +}; + +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + +enum { + Lo_unbound = 0, + Lo_bound = 1, + Lo_rundown = 2, + Lo_deleting = 3, +}; + +struct loop_device { + int lo_number; + long: 32; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + char lo_file_name[64]; + struct file *lo_backing_file; + struct block_device *lo_device; + gfp_t old_gfp_mask; + spinlock_t lo_lock; + int lo_state; + spinlock_t lo_work_lock; + struct workqueue_struct *workqueue; + struct work_struct rootcg_work; + struct list_head rootcg_cmd_list; + struct list_head idle_worker_list; + struct rb_root worker_tree; + struct timer_list timer; + bool use_dio; + bool sysfs_inited; + struct request_queue *lo_queue; + struct blk_mq_tag_set tag_set; + struct gendisk *lo_disk; + struct mutex lo_mutex; + bool idr_visible; +}; + +struct loop_cmd { + struct list_head list_entry; + bool use_aio; + atomic_t ref; + long int ret; + long: 32; + struct kiocb iocb; + struct bio_vec *bvec; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; + long: 32; +}; + +struct loop_worker { + struct rb_node rb_node; + struct work_struct work; + struct list_head cmd_list; + struct list_head idle_list; + struct loop_device *lo; + struct cgroup_subsys_state *blkcg_css; + long unsigned int last_ran_at; +}; + +struct mdio_driver { + struct mdio_driver_common mdiodrv; + int (*probe)(struct mdio_device *); + void (*remove)(struct mdio_device *); + void (*shutdown)(struct mdio_device *); +}; + +struct mdio_board_info { + const char *bus_id; + char modalias[32]; + int mdio_addr; + const void *platform_data; +}; + +struct trace_event_raw_mdio_access { + struct trace_entry ent; + char busid[61]; + char read; + u8 addr; + u16 val; + unsigned int regnum; + char __data[0]; +}; + +struct trace_event_data_offsets_mdio_access {}; + +typedef void (*btf_trace_mdio_access)(void *, struct mii_bus *, char, u8, unsigned int, u16, int); + +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +enum sharp_state { + STATE_INACTIVE___8 = 0, + STATE_BIT_PULSE___5 = 1, + STATE_BIT_SPACE___5 = 2, + STATE_TRAILER_PULSE___4 = 3, + STATE_ECHO_SPACE = 4, + STATE_TRAILER_SPACE___4 = 5, +}; + +struct hid_item { + unsigned int format; + __u8 size; + __u8 type; + __u8 tag; + union { + __u8 u8; + __s8 s8; + __u16 u16; + __s16 s16; + __u32 u32; + __s32 s32; + const __u8 *longdata; + } data; +}; + +struct hid_global { + unsigned int usage_page; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned int unit; + unsigned int report_id; + unsigned int report_size; + unsigned int report_count; +}; + +struct hid_local { + unsigned int usage[12288]; + u8 usage_size[12288]; + unsigned int collection_index[12288]; + unsigned int usage_index; + unsigned int usage_minimum; + unsigned int delimiter_depth; + unsigned int delimiter_branch; +}; + +struct hid_parser { + struct hid_global global; + struct hid_global global_stack[4]; + unsigned int global_stack_ptr; + struct hid_local local; + unsigned int *collection_stack; + unsigned int collection_stack_ptr; + unsigned int collection_stack_size; + struct hid_device *device; + unsigned int scan_flags; +}; + +struct hiddev { + int minor; + int exist; + int open; + struct mutex existancelock; + wait_queue_head_t wait; + struct hid_device *hid; + struct list_head list; + spinlock_t list_lock; + bool initialized; +}; + +struct hid_dynid { + struct list_head list; + struct hid_device_id id; +}; + +enum { + INET_DIAG_REQ_NONE = 0, + INET_DIAG_REQ_BYTECODE = 1, + INET_DIAG_REQ_SK_BPF_STORAGES = 2, + INET_DIAG_REQ_PROTOCOL = 3, + __INET_DIAG_REQ_MAX = 4, +}; + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +struct sock_diag_handler { + struct module *owner; + __u8 family; + int (*dump)(struct sk_buff *, struct nlmsghdr *); + int (*get_info)(struct sk_buff *, struct sock *); + int (*destroy)(struct sk_buff *, struct nlmsghdr *); +}; + +struct sock_diag_inet_compat { + struct module *owner; + int (*fn)(struct sk_buff *, struct nlmsghdr *); +}; + +struct broadcast_sk { + struct sock *sk; + struct work_struct work; +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +struct xt_template { + struct list_head list; + int (*table_init)(struct net *); + struct module *me; + char name[32]; +}; + +struct xt_pernet { + struct list_head tables[11]; +}; + +struct xt_af { + struct mutex mutex; + struct list_head match; + struct list_head target; +}; + +struct nf_mttg_trav { + struct list_head *head; + struct list_head *curr; + uint8_t class; +}; + +enum { + MTTG_TRAV_INIT = 0, + MTTG_TRAV_NFP_UNSPEC = 1, + MTTG_TRAV_NFP_SPEC = 2, + MTTG_TRAV_DONE = 3, +}; + +struct ipfrag_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + }; + struct sk_buff *next_frag; + int frag_run_len; + int ip_defrag_offset; +}; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + u32 consumed; +}; + +enum unix_vertex_index { + UNIX_VERTEX_INDEX_MARK1 = 0, + UNIX_VERTEX_INDEX_MARK2 = 1, + UNIX_VERTEX_INDEX_START = 2, +}; + +enum { + IOAM6_ATTR_UNSPEC = 0, + IOAM6_ATTR_NS_ID = 1, + IOAM6_ATTR_NS_DATA = 2, + IOAM6_ATTR_NS_DATA_WIDE = 3, + IOAM6_ATTR_SC_ID = 4, + IOAM6_ATTR_SC_DATA = 5, + IOAM6_ATTR_SC_NONE = 6, + IOAM6_ATTR_PAD = 7, + __IOAM6_ATTR_MAX = 8, +}; + +enum { + IOAM6_CMD_UNSPEC = 0, + IOAM6_CMD_ADD_NAMESPACE = 1, + IOAM6_CMD_DEL_NAMESPACE = 2, + IOAM6_CMD_DUMP_NAMESPACES = 3, + IOAM6_CMD_ADD_SCHEMA = 4, + IOAM6_CMD_DEL_SCHEMA = 5, + IOAM6_CMD_DUMP_SCHEMAS = 6, + IOAM6_CMD_NS_SET_SCHEMA = 7, + __IOAM6_CMD_MAX = 8, +}; + +enum ioam6_event_attr { + IOAM6_EVENT_ATTR_UNSPEC = 0, + IOAM6_EVENT_ATTR_TRACE_NAMESPACE = 1, + IOAM6_EVENT_ATTR_TRACE_NODELEN = 2, + IOAM6_EVENT_ATTR_TRACE_TYPE = 3, + IOAM6_EVENT_ATTR_TRACE_DATA = 4, + __IOAM6_EVENT_ATTR_MAX = 5, +}; + +enum { + IFLA_BR_UNSPEC = 0, + IFLA_BR_FORWARD_DELAY = 1, + IFLA_BR_HELLO_TIME = 2, + IFLA_BR_MAX_AGE = 3, + IFLA_BR_AGEING_TIME = 4, + IFLA_BR_STP_STATE = 5, + IFLA_BR_PRIORITY = 6, + IFLA_BR_VLAN_FILTERING = 7, + IFLA_BR_VLAN_PROTOCOL = 8, + IFLA_BR_GROUP_FWD_MASK = 9, + IFLA_BR_ROOT_ID = 10, + IFLA_BR_BRIDGE_ID = 11, + IFLA_BR_ROOT_PORT = 12, + IFLA_BR_ROOT_PATH_COST = 13, + IFLA_BR_TOPOLOGY_CHANGE = 14, + IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 15, + IFLA_BR_HELLO_TIMER = 16, + IFLA_BR_TCN_TIMER = 17, + IFLA_BR_TOPOLOGY_CHANGE_TIMER = 18, + IFLA_BR_GC_TIMER = 19, + IFLA_BR_GROUP_ADDR = 20, + IFLA_BR_FDB_FLUSH = 21, + IFLA_BR_MCAST_ROUTER = 22, + IFLA_BR_MCAST_SNOOPING = 23, + IFLA_BR_MCAST_QUERY_USE_IFADDR = 24, + IFLA_BR_MCAST_QUERIER = 25, + IFLA_BR_MCAST_HASH_ELASTICITY = 26, + IFLA_BR_MCAST_HASH_MAX = 27, + IFLA_BR_MCAST_LAST_MEMBER_CNT = 28, + IFLA_BR_MCAST_STARTUP_QUERY_CNT = 29, + IFLA_BR_MCAST_LAST_MEMBER_INTVL = 30, + IFLA_BR_MCAST_MEMBERSHIP_INTVL = 31, + IFLA_BR_MCAST_QUERIER_INTVL = 32, + IFLA_BR_MCAST_QUERY_INTVL = 33, + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 34, + IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 35, + IFLA_BR_NF_CALL_IPTABLES = 36, + IFLA_BR_NF_CALL_IP6TABLES = 37, + IFLA_BR_NF_CALL_ARPTABLES = 38, + IFLA_BR_VLAN_DEFAULT_PVID = 39, + IFLA_BR_PAD = 40, + IFLA_BR_VLAN_STATS_ENABLED = 41, + IFLA_BR_MCAST_STATS_ENABLED = 42, + IFLA_BR_MCAST_IGMP_VERSION = 43, + IFLA_BR_MCAST_MLD_VERSION = 44, + IFLA_BR_VLAN_STATS_PER_PORT = 45, + IFLA_BR_MULTI_BOOLOPT = 46, + IFLA_BR_MCAST_QUERIER_STATE = 47, + IFLA_BR_FDB_N_LEARNED = 48, + IFLA_BR_FDB_MAX_LEARNED = 49, + __IFLA_BR_MAX = 50, +}; + +enum { + IFLA_BRPORT_UNSPEC = 0, + IFLA_BRPORT_STATE = 1, + IFLA_BRPORT_PRIORITY = 2, + IFLA_BRPORT_COST = 3, + IFLA_BRPORT_MODE = 4, + IFLA_BRPORT_GUARD = 5, + IFLA_BRPORT_PROTECT = 6, + IFLA_BRPORT_FAST_LEAVE = 7, + IFLA_BRPORT_LEARNING = 8, + IFLA_BRPORT_UNICAST_FLOOD = 9, + IFLA_BRPORT_PROXYARP = 10, + IFLA_BRPORT_LEARNING_SYNC = 11, + IFLA_BRPORT_PROXYARP_WIFI = 12, + IFLA_BRPORT_ROOT_ID = 13, + IFLA_BRPORT_BRIDGE_ID = 14, + IFLA_BRPORT_DESIGNATED_PORT = 15, + IFLA_BRPORT_DESIGNATED_COST = 16, + IFLA_BRPORT_ID = 17, + IFLA_BRPORT_NO = 18, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, + IFLA_BRPORT_CONFIG_PENDING = 20, + IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, + IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, + IFLA_BRPORT_HOLD_TIMER = 23, + IFLA_BRPORT_FLUSH = 24, + IFLA_BRPORT_MULTICAST_ROUTER = 25, + IFLA_BRPORT_PAD = 26, + IFLA_BRPORT_MCAST_FLOOD = 27, + IFLA_BRPORT_MCAST_TO_UCAST = 28, + IFLA_BRPORT_VLAN_TUNNEL = 29, + IFLA_BRPORT_BCAST_FLOOD = 30, + IFLA_BRPORT_GROUP_FWD_MASK = 31, + IFLA_BRPORT_NEIGH_SUPPRESS = 32, + IFLA_BRPORT_ISOLATED = 33, + IFLA_BRPORT_BACKUP_PORT = 34, + IFLA_BRPORT_MRP_RING_OPEN = 35, + IFLA_BRPORT_MRP_IN_OPEN = 36, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, + IFLA_BRPORT_LOCKED = 39, + IFLA_BRPORT_MAB = 40, + IFLA_BRPORT_MCAST_N_GROUPS = 41, + IFLA_BRPORT_MCAST_MAX_GROUPS = 42, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 43, + IFLA_BRPORT_BACKUP_NHID = 44, + __IFLA_BRPORT_MAX = 45, +}; + +enum { + IFLA_STATS_UNSPEC = 0, + IFLA_STATS_LINK_64 = 1, + IFLA_STATS_LINK_XSTATS = 2, + IFLA_STATS_LINK_XSTATS_SLAVE = 3, + IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, + IFLA_STATS_AF_SPEC = 5, + __IFLA_STATS_MAX = 6, +}; + +enum { + LINK_XSTATS_TYPE_UNSPEC = 0, + LINK_XSTATS_TYPE_BRIDGE = 1, + LINK_XSTATS_TYPE_BOND = 2, + __LINK_XSTATS_TYPE_MAX = 3, +}; + +struct bridge_vlan_info { + __u16 flags; + __u16 vid; +}; + +struct bridge_vlan_xstats { + __u64 rx_bytes; + __u64 rx_packets; + __u64 tx_bytes; + __u64 tx_packets; + __u16 vid; + __u16 flags; + __u32 pad2; +}; + +enum { + BRIDGE_XSTATS_UNSPEC = 0, + BRIDGE_XSTATS_VLAN = 1, + BRIDGE_XSTATS_MCAST = 2, + BRIDGE_XSTATS_PAD = 3, + BRIDGE_XSTATS_STP = 4, + __BRIDGE_XSTATS_MAX = 5, +}; + +struct kretprobe_blackpoint { + const char *name; + void *addr; +}; + +struct powerpc_jit_data { + struct bpf_binary_header *hdr; + struct bpf_binary_header *fhdr; + u32 *addrs; + u8 *fimage; + u32 proglen; + struct codegen_context ctx; +}; + +struct nbcon_state { + union { + unsigned int atom; + struct { + unsigned int prio: 2; + unsigned int req_prio: 2; + unsigned int unsafe: 1; + unsigned int unsafe_takeover: 1; + unsigned int cpu: 24; + }; + }; +}; + +struct printk_buffers { + char outbuf[2048]; + char scratchbuf[1024]; +}; + +struct console_flush_type { + bool nbcon_atomic; + bool nbcon_offload; + bool legacy_direct; + bool legacy_offload; +}; + +struct printk_message { + struct printk_buffers *pbufs; + unsigned int outbuf_len; + u64 seq; + long unsigned int dropped; + long: 32; +}; + +struct tick_sched { + long unsigned int flags; + unsigned int stalled_jiffies; + long unsigned int last_tick_jiffies; + long: 32; + struct hrtimer sched_timer; + ktime_t last_tick; + ktime_t next_tick; + long unsigned int idle_jiffies; + long: 32; + ktime_t idle_waketime; + unsigned int got_idle_tick; + seqcount_t idle_sleeptime_seq; + ktime_t idle_entrytime; + long unsigned int last_jiffies; + long: 32; + u64 timer_expires_base; + u64 timer_expires; + u64 next_timer; + ktime_t idle_expires; + long unsigned int idle_calls; + long unsigned int idle_sleeps; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + atomic_t tick_dep_mask; + long unsigned int check_clocks; +}; + +struct timer_list_iter { + int cpu; + bool second_pass; + u64 now; +}; + +struct kallsym_iter { + loff_t pos; + loff_t pos_mod_end; + loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; + long unsigned int value; + unsigned int nameoff; + char type; + char name[512]; + char module_name[60]; + int exported; + int show_value; +}; + +struct bpf_iter__ksym { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kallsym_iter *ksym; + }; +}; + +struct kprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int ip; +}; + +struct kretprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int func; + long unsigned int ret_ip; +}; + +struct trace_kprobe { + struct dyn_event devent; + struct kretprobe rp; + long unsigned int *nhit; + const char *symbol; + struct trace_probe tp; +}; + +struct sym_count_ctx { + unsigned int count; + const char *name; +}; + +struct callchain_cpus_entries { + struct callback_head callback_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type highest_zoneidx; + bool spread_dirty_pages; +}; + +typedef int fpi_t; + +struct postprocess_bh_ctx { + struct work_struct work; + struct buffer_head *bh; +}; + +struct bh_lru { + struct buffer_head *bhs[16]; +}; + +struct bh_accounting { + int nr; + int ratelimit; +}; + +enum SHIFT_DIRECTION { + SHIFT_LEFT = 0, + SHIFT_RIGHT = 1, +}; + +struct ext4_extent_tail { + __le32 et_checksum; +}; + +struct btrfs_ioctl_defrag_range_args { + __u64 start; + __u64 len; + __u64 flags; + __u32 extent_thresh; + __u32 compress_type; + __u32 unused[4]; +}; + +struct inode_defrag { + struct rb_node rb_node; + long: 32; + u64 ino; + u64 transid; + u64 root; + u32 extent_thresh; + long: 32; +}; + +struct defrag_target_range { + struct list_head list; + u64 start; + u64 len; +}; + +struct btrfs_free_space_info { + __le32 extent_count; + __le32 flags; +}; + +struct msgbuf { + __kernel_long_t mtype; + char mtext[1]; +}; + +struct msg; + +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; + struct msg *msg_last; + __kernel_old_time_t msg_stime; + __kernel_old_time_t msg_rtime; + __kernel_old_time_t msg_ctime; + long unsigned int msg_lcbytes; + long unsigned int msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + __kernel_ipc_pid_t msg_lspid; + __kernel_ipc_pid_t msg_lrpid; +}; + +struct msqid64_ds { + struct ipc64_perm msg_perm; + long unsigned int msg_stime_high; + long unsigned int msg_stime; + long unsigned int msg_rtime_high; + long unsigned int msg_rtime; + long unsigned int msg_ctime_high; + long unsigned int msg_ctime; + long unsigned int msg_cbytes; + long unsigned int msg_qnum; + long unsigned int msg_qbytes; + __kernel_pid_t msg_lspid; + __kernel_pid_t msg_lrpid; + long unsigned int __unused4; + long unsigned int __unused5; + long: 32; +}; + +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + short unsigned int msgseg; +}; + +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; + time64_t q_rtime; + time64_t q_ctime; + long unsigned int q_cbytes; + long unsigned int q_qnum; + long unsigned int q_qbytes; + struct pid *q_lspid; + struct pid *q_lrpid; + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct msg_receiver { + struct list_head r_list; + struct task_struct *r_tsk; + int r_mode; + long int r_msgtype; + long int r_maxsize; + struct msg_msg *r_msg; +}; + +struct msg_sender { + struct list_head list; + struct task_struct *tsk; + size_t msgsz; +}; + +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + +struct xxhash64_tfm_ctx { + u64 seed; +}; + +struct xxhash64_desc_ctx { + struct xxh64_state xxhstate; +}; + +enum blacklist_hash_type { + BLACKLIST_HASH_X509_TBS = 1, + BLACKLIST_HASH_BINARY = 2, +}; + +struct rq_qos_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wait *rqw; + acquire_inflight_cb_t *cb; + void *private_data; + bool got_token; +}; + +struct user_msghdr { + void *msg_name; + int msg_namelen; + struct iovec *msg_iov; + __kernel_size_t msg_iovlen; + void *msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct io_uring_recvmsg_out { + __u32 namelen; + __u32 controllen; + __u32 payloadlen; + __u32 flags; +}; + +struct io_async_msghdr { + struct iovec fast_iov; + struct iovec *free_iov; + int free_iov_nr; + int namelen; + __kernel_size_t controllen; + __kernel_size_t payloadlen; + struct sockaddr *uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_shutdown { + struct file *file; + int how; +}; + +struct io_accept { + struct file *file; + struct sockaddr *addr; + int *addr_len; + int flags; + int iou_flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_socket { + struct file *file; + int domain; + int type; + int protocol; + int flags; + u32 file_slot; + long unsigned int nofile; +}; + +struct io_connect { + struct file *file; + struct sockaddr *addr; + int addr_len; + bool in_progress; + bool seen_econnaborted; +}; + +struct io_bind { + struct file *file; + int addr_len; +}; + +struct io_listen { + struct file *file; + int backlog; +}; + +struct io_sr_msg { + struct file *file; + union { + struct compat_msghdr *umsg_compat; + struct user_msghdr *umsg; + void *buf; + }; + int len; + unsigned int done_io; + unsigned int msg_flags; + unsigned int nr_multishot_loops; + u16 flags; + u16 buf_group; + u16 buf_index; + void *msg_control; + struct io_kiocb *notif; +}; + +struct io_recvmsg_multishot_hdr { + struct io_uring_recvmsg_out msg; + struct __kernel_sockaddr_storage addr; +}; + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +typedef enum { + ZSTD_c_compressionLevel = 100, + ZSTD_c_windowLog = 101, + ZSTD_c_hashLog = 102, + ZSTD_c_chainLog = 103, + ZSTD_c_searchLog = 104, + ZSTD_c_minMatch = 105, + ZSTD_c_targetLength = 106, + ZSTD_c_strategy = 107, + ZSTD_c_enableLongDistanceMatching = 160, + ZSTD_c_ldmHashLog = 161, + ZSTD_c_ldmMinMatch = 162, + ZSTD_c_ldmBucketSizeLog = 163, + ZSTD_c_ldmHashRateLog = 164, + ZSTD_c_contentSizeFlag = 200, + ZSTD_c_checksumFlag = 201, + ZSTD_c_dictIDFlag = 202, + ZSTD_c_nbWorkers = 400, + ZSTD_c_jobSize = 401, + ZSTD_c_overlapLog = 402, + ZSTD_c_experimentalParam1 = 500, + ZSTD_c_experimentalParam2 = 10, + ZSTD_c_experimentalParam3 = 1000, + ZSTD_c_experimentalParam4 = 1001, + ZSTD_c_experimentalParam5 = 1002, + ZSTD_c_experimentalParam6 = 1003, + ZSTD_c_experimentalParam7 = 1004, + ZSTD_c_experimentalParam8 = 1005, + ZSTD_c_experimentalParam9 = 1006, + ZSTD_c_experimentalParam10 = 1007, + ZSTD_c_experimentalParam11 = 1008, + ZSTD_c_experimentalParam12 = 1009, + ZSTD_c_experimentalParam13 = 1010, + ZSTD_c_experimentalParam14 = 1011, + ZSTD_c_experimentalParam15 = 1012, +} ZSTD_cParameter; + +typedef struct { + size_t error; + int lowerBound; + int upperBound; +} ZSTD_bounds; + +typedef enum { + ZSTD_reset_session_only = 1, + ZSTD_reset_parameters = 2, + ZSTD_reset_session_and_parameters = 3, +} ZSTD_ResetDirective; + +typedef enum { + ZSTD_e_continue = 0, + ZSTD_e_flush = 1, + ZSTD_e_end = 2, +} ZSTD_EndDirective; + +typedef struct { + long long unsigned int ingested; + long long unsigned int consumed; + long long unsigned int produced; + long long unsigned int flushed; + unsigned int currentJobID; + unsigned int nbActiveWorkers; +} ZSTD_frameProgression; + +typedef enum { + ZSTD_cpm_noAttachDict = 0, + ZSTD_cpm_attachDict = 1, + ZSTD_cpm_createCDict = 2, + ZSTD_cpm_unknown = 3, +} ZSTD_cParamMode_e; + +typedef enum { + ZSTDcrp_makeClean = 0, + ZSTDcrp_leaveDirty = 1, +} ZSTD_compResetPolicy_e; + +typedef enum { + ZSTDirp_continue = 0, + ZSTDirp_reset = 1, +} ZSTD_indexResetPolicy_e; + +typedef enum { + ZSTD_resetTarget_CDict = 0, + ZSTD_resetTarget_CCtx = 1, +} ZSTD_resetTarget_e; + +typedef struct { + U32 LLtype; + U32 Offtype; + U32 MLtype; + size_t size; + size_t lastCountSize; +} ZSTD_symbolEncodingTypeStats_t; + +enum { + ZSTDbss_compress = 0, + ZSTDbss_noCompress = 1, +}; + +typedef struct { + U32 *splitLocations; + size_t idx; +} seqStoreSplits; + +typedef struct { + U32 idx; + U32 posInSequence; + size_t posInSrc; +} ZSTD_sequencePosition; + +typedef size_t (*ZSTD_sequenceCopier)(ZSTD_CCtx *, ZSTD_sequencePosition *, const ZSTD_Sequence * const, size_t, const void *, size_t); + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY = 0, + BACKLIGHT_UPDATE_SYSFS = 1, +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM = 2, + BACKLIGHT_FIRMWARE = 3, + BACKLIGHT_TYPE_MAX = 4, +}; + +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR = 1, + BACKLIGHT_SCALE_NON_LINEAR = 2, +}; + +struct backlight_device; + +struct backlight_ops { + unsigned int options; + int (*update_status)(struct backlight_device *); + int (*get_brightness)(struct backlight_device *); + bool (*controls_device)(struct backlight_device *, struct device *); +}; + +struct backlight_properties { + int brightness; + int max_brightness; + int power; + enum backlight_type type; + unsigned int state; + enum backlight_scale scale; +}; + +struct backlight_device { + struct backlight_properties props; + struct mutex update_lock; + struct mutex ops_lock; + const struct backlight_ops *ops; + struct notifier_block fb_notif; + struct list_head entry; + struct device dev; + bool fb_bl_on[32]; + int use_count; + long: 32; +}; + +struct virtio_admin_cmd { + __le16 opcode; + __le16 group_type; + long: 32; + __le64 group_member_id; + struct scatterlist *data_sg; + struct scatterlist *result_sg; + struct completion completion; + u32 result_sg_size; + int ret; +}; + +struct virtio_admin_cmd_hdr { + __le16 opcode; + __le16 group_type; + __u8 reserved1[12]; + __le64 group_member_id; +}; + +struct virtio_admin_cmd_status { + __le16 status; + __le16 status_qualifier; + __u8 reserved2[4]; +}; + +struct virtio_dev_parts_cap { + __u8 get_parts_resource_objects_limit; + __u8 set_parts_resource_objects_limit; +}; + +struct virtio_admin_cmd_query_cap_id_result { + __le64 supported_caps[1]; +}; + +struct virtio_admin_cmd_cap_get_data { + __le16 id; + __u8 reserved[6]; +}; + +struct virtio_admin_cmd_cap_set_data { + __le16 id; + __u8 reserved[6]; + __u8 cap_specific_data[0]; +}; + +struct virtio_admin_cmd_resource_obj_cmd_hdr { + __le16 type; + __u8 reserved[2]; + __le32 id; +}; + +struct virtio_admin_cmd_resource_obj_create_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __le64 flags; + __u8 resource_obj_specific_data[0]; +}; + +struct virtio_resource_obj_dev_parts { + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_admin_cmd_dev_parts_metadata_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; +}; + +struct virtio_dev_part_hdr { + __le16 part_type; + __u8 flags; + __u8 reserved; + union { + struct { + __le32 offset; + __le32 reserved; + } pci_common_cfg; + struct { + __le16 index; + __u8 reserved[6]; + } vq_index; + } selector; + __le32 length; +}; + +struct virtio_admin_cmd_dev_parts_metadata_result { + union { + struct { + __le32 size; + __le32 reserved; + } parts_size; + struct { + __le32 count; + __le32 reserved; + } hdr_list_count; + struct { + __le32 count; + __le32 reserved; + struct virtio_dev_part_hdr hdrs[0]; + } hdr_list; + }; +}; + +struct virtio_admin_cmd_dev_parts_get_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; + struct virtio_dev_part_hdr hdr_list[0]; +}; + +struct virtio_admin_cmd_dev_mode_set_data { + __u8 flags; +}; + +struct devres_node { + struct list_head entry; + dr_release_t release; + const char *name; + size_t size; +}; + +struct devres { + struct devres_node node; + long: 32; + u8 data[0]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; +}; + +struct action_devres { + void *data; + void (*action)(void *); +}; + +struct pages_devres { + long unsigned int addr; + unsigned int order; +}; + +struct dma_fence_chain { + struct dma_fence base; + struct dma_fence *prev; + long: 32; + u64 prev_seqno; + struct dma_fence *fence; + union { + struct dma_fence_cb cb; + struct irq_work work; + }; + spinlock_t lock; +}; + +struct sg_io_v4 { + __s32 guard; + __u32 protocol; + __u32 subprotocol; + __u32 request_len; + __u64 request; + __u64 request_tag; + __u32 request_attr; + __u32 request_priority; + __u32 request_extra; + __u32 max_response_len; + __u64 response; + __u32 dout_iovec_count; + __u32 dout_xfer_len; + __u32 din_iovec_count; + __u32 din_xfer_len; + __u64 dout_xferp; + __u64 din_xferp; + __u32 timeout; + __u32 flags; + __u64 usr_ptr; + __u32 spare_in; + __u32 driver_status; + __u32 transport_status; + __u32 device_status; + __u32 retry_delay; + __u32 info; + __u32 duration; + __u32 response_len; + __s32 din_resid; + __s32 dout_resid; + __u64 generated_tag; + __u32 spare_out; + __u32 padding; +}; + +typedef int bsg_sg_io_fn(struct request_queue *, struct sg_io_v4 *, bool, unsigned int); + +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + +struct tc_query_caps_base { + enum tc_setup_type type; + void *caps; +}; + +struct tc_cbs_qopt_offload { + u8 enable; + s32 queue; + s32 hicredit; + s32 locredit; + s32 idleslope; + s32 sendslope; +}; + +struct tc_etf_qopt_offload { + u8 enable; + s32 queue; +}; + +struct tc_taprio_caps { + bool supports_queue_max_sdu: 1; + bool gate_mask_per_txq: 1; + bool broken_mqprio: 1; +}; + +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +struct pps_event_time { + struct timespec64 ts_real; +}; + +enum ptp_clock_events { + PTP_CLOCK_ALARM = 0, + PTP_CLOCK_EXTTS = 1, + PTP_CLOCK_EXTOFF = 2, + PTP_CLOCK_PPS = 3, + PTP_CLOCK_PPSUSR = 4, +}; + +struct ptp_clock_event { + int type; + int index; + union { + u64 timestamp; + s64 offset; + struct pps_event_time pps_times; + }; +}; + +enum igb_tx_flags { + IGB_TX_FLAGS_VLAN = 1, + IGB_TX_FLAGS_TSO = 2, + IGB_TX_FLAGS_TSTAMP = 4, + IGB_TX_FLAGS_IPV4 = 16, + IGB_TX_FLAGS_CSUM = 32, +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER = 0, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED = 1, + IGB_RING_FLAG_RX_SCTP_CSUM = 2, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP = 3, + IGB_RING_FLAG_TX_CTX_IDX = 4, + IGB_RING_FLAG_TX_DETECT_HANG = 5, +}; + +enum igb_boards { + board_82575 = 0, +}; + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY = 0, + QUEUE_MODE_STREAM_RESERVATION = 1, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH = 0, + TX_QUEUE_PRIO_LOW = 1, +}; + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +enum xmp_state { + STATE_INACTIVE___9 = 0, + STATE_LEADER_PULSE = 1, + STATE_NIBBLE_SPACE = 2, +}; + +struct quirks_list_struct { + struct hid_device_id hid_bl_item; + struct list_head node; +}; + +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN = 0, + NETDEV_LAG_TX_TYPE_RANDOM = 1, + NETDEV_LAG_TX_TYPE_BROADCAST = 2, + NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4, + NETDEV_LAG_TX_TYPE_HASH = 5, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE = 0, + NETDEV_LAG_HASH_L2 = 1, + NETDEV_LAG_HASH_L34 = 2, + NETDEV_LAG_HASH_L23 = 3, + NETDEV_LAG_HASH_E23 = 4, + NETDEV_LAG_HASH_E34 = 5, + NETDEV_LAG_HASH_VLAN_SRCMAC = 6, + NETDEV_LAG_HASH_UNKNOWN = 7, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC = 0, + ETHTOOL_TUNABLE_U8 = 1, + ETHTOOL_TUNABLE_U16 = 2, + ETHTOOL_TUNABLE_U32 = 3, + ETHTOOL_TUNABLE_U64 = 4, + ETHTOOL_TUNABLE_STRING = 5, + ETHTOOL_TUNABLE_S8 = 6, + ETHTOOL_TUNABLE_S16 = 7, + ETHTOOL_TUNABLE_S32 = 8, + ETHTOOL_TUNABLE_S64 = 9, +}; + +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; + +enum ethtool_flags { + ETH_FLAG_TXVLAN = 128, + ETH_FLAG_RXVLAN = 256, + ETH_FLAG_LRO = 32768, + ETH_FLAG_NTUPLE = 134217728, + ETH_FLAG_RXHASH = 268435456, +}; + +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 input_xfrm; + __u8 rsvd8[2]; + __u32 rsvd32; + __u32 rss_config[0]; +}; + +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT = 0, + ETHTOOL_F_WISH__BIT = 1, + ETHTOOL_F_COMPAT__BIT = 2, +}; + +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[128]; + char data[0]; +}; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + long unsigned int priv[0]; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_devlink_compat { + struct devlink *devlink; + union { + struct ethtool_flash efl; + struct ethtool_drvinfo info; + }; +}; + +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[4]; + __u32 advertising[4]; + __u32 lp_advertising[4]; + } link_modes; +}; + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; +}; + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +struct nft_rbtree { + struct rb_root root; + rwlock_t lock; + seqcount_rwlock_t count; + long unsigned int last_gc; +}; + +struct nft_rbtree_elem { + struct nft_elem_priv priv; + struct rb_node node; + struct nft_set_ext ext; +}; + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; + unsigned char nh_protocol; + unsigned char resvd; + unsigned int nh_flags; +}; + +struct nexthop_grp { + __u32 id; + __u8 weight; + __u8 weight_high; + __u16 resvd2; +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH = 0, + NEXTHOP_GRP_TYPE_RES = 1, + __NEXTHOP_GRP_TYPE_MAX = 2, +}; + +enum { + NHA_UNSPEC = 0, + NHA_ID = 1, + NHA_GROUP = 2, + NHA_GROUP_TYPE = 3, + NHA_BLACKHOLE = 4, + NHA_OIF = 5, + NHA_GATEWAY = 6, + NHA_ENCAP_TYPE = 7, + NHA_ENCAP = 8, + NHA_GROUPS = 9, + NHA_MASTER = 10, + NHA_FDB = 11, + NHA_RES_GROUP = 12, + NHA_RES_BUCKET = 13, + NHA_OP_FLAGS = 14, + NHA_GROUP_STATS = 15, + NHA_HW_STATS_ENABLE = 16, + NHA_HW_STATS_USED = 17, + __NHA_MAX = 18, +}; + +enum { + NHA_RES_GROUP_UNSPEC = 0, + NHA_RES_GROUP_PAD = 0, + NHA_RES_GROUP_BUCKETS = 1, + NHA_RES_GROUP_IDLE_TIMER = 2, + NHA_RES_GROUP_UNBALANCED_TIMER = 3, + NHA_RES_GROUP_UNBALANCED_TIME = 4, + __NHA_RES_GROUP_MAX = 5, +}; + +enum { + NHA_RES_BUCKET_UNSPEC = 0, + NHA_RES_BUCKET_PAD = 0, + NHA_RES_BUCKET_INDEX = 1, + NHA_RES_BUCKET_IDLE_TIME = 2, + NHA_RES_BUCKET_NH_ID = 3, + __NHA_RES_BUCKET_MAX = 4, +}; + +enum { + NHA_GROUP_STATS_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY = 1, + __NHA_GROUP_STATS_MAX = 2, +}; + +enum { + NHA_GROUP_STATS_ENTRY_UNSPEC = 0, + NHA_GROUP_STATS_ENTRY_ID = 1, + NHA_GROUP_STATS_ENTRY_PACKETS = 2, + NHA_GROUP_STATS_ENTRY_PACKETS_HW = 3, + __NHA_GROUP_STATS_ENTRY_MAX = 4, +}; + +struct nh_config { + u32 nh_id; + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u8 nh_fdb; + u32 nh_flags; + int nh_ifindex; + struct net_device *dev; + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + struct nlattr *nh_grp; + u16 nh_grp_type; + u16 nh_grp_res_num_buckets; + long unsigned int nh_grp_res_idle_timer; + long unsigned int nh_grp_res_unbalanced_timer; + bool nh_grp_res_has_num_buckets; + bool nh_grp_res_has_idle_timer; + bool nh_grp_res_has_unbalanced_timer; + bool nh_hw_stats; + struct nlattr *nh_encap; + u16 nh_encap_type; + u32 nlflags; + struct nl_info nlinfo; +}; + +enum nexthop_event_type { + NEXTHOP_EVENT_DEL = 0, + NEXTHOP_EVENT_REPLACE = 1, + NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, + NEXTHOP_EVENT_BUCKET_REPLACE = 3, + NEXTHOP_EVENT_HW_STATS_REPORT_DELTA = 4, +}; + +enum nh_notifier_info_type { + NH_NOTIFIER_INFO_TYPE_SINGLE = 0, + NH_NOTIFIER_INFO_TYPE_GRP = 1, + NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, + NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, + NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS = 4, +}; + +struct nh_notifier_single_info { + struct net_device *dev; + u8 gw_family; + union { + __be32 ipv4; + struct in6_addr ipv6; + }; + u32 id; + u8 is_reject: 1; + u8 is_fdb: 1; + u8 has_encap: 1; +}; + +struct nh_notifier_grp_entry_info { + u16 weight; + struct nh_notifier_single_info nh; +}; + +struct nh_notifier_grp_info { + u16 num_nh; + bool is_fdb; + bool hw_stats; + struct nh_notifier_grp_entry_info nh_entries[0]; +}; + +struct nh_notifier_res_bucket_info { + u16 bucket_index; + unsigned int idle_timer_ms; + bool force; + struct nh_notifier_single_info old_nh; + struct nh_notifier_single_info new_nh; +}; + +struct nh_notifier_res_table_info { + u16 num_nh_buckets; + bool hw_stats; + struct nh_notifier_single_info nhs[0]; +}; + +struct nh_notifier_grp_hw_stats_entry_info { + u32 id; + long: 32; + u64 packets; +}; + +struct nh_notifier_grp_hw_stats_info { + u16 num_nh; + bool hw_stats_used; + long: 32; + struct nh_notifier_grp_hw_stats_entry_info stats[0]; +}; + +struct nh_notifier_info { + struct net *net; + struct netlink_ext_ack *extack; + u32 id; + enum nh_notifier_info_type type; + union { + struct nh_notifier_single_info *nh; + struct nh_notifier_grp_info *nh_grp; + struct nh_notifier_res_table_info *nh_res_table; + struct nh_notifier_res_bucket_info *nh_res_bucket; + struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats; + }; +}; + +struct nh_dump_filter { + u32 nh_id; + int dev_idx; + int master_idx; + bool group_filter; + bool fdb_filter; + u32 res_bucket_nh_id; + u32 op_flags; +}; + +struct rtm_dump_nh_ctx { + u32 idx; +}; + +struct rtm_dump_res_bucket_ctx { + struct rtm_dump_nh_ctx nh; + u16 bucket_index; +}; + +struct rtm_dump_nexthop_bucket_data { + struct rtm_dump_res_bucket_ctx *ctx; + struct nh_dump_filter filter; +}; + +struct sockaddr_pkt { + short unsigned int spkt_family; + unsigned char spkt_device[14]; + __be16 spkt_protocol; +}; + +struct sockaddr_ll { + short unsigned int sll_family; + __be16 sll_protocol; + int sll_ifindex; + short unsigned int sll_hatype; + unsigned char sll_pkttype; + unsigned char sll_halen; + unsigned char sll_addr[8]; +}; + +struct tpacket_stats { + unsigned int tp_packets; + unsigned int tp_drops; +}; + +struct tpacket_stats_v3 { + unsigned int tp_packets; + unsigned int tp_drops; + unsigned int tp_freeze_q_cnt; +}; + +struct tpacket_rollover_stats { + __u64 tp_all; + __u64 tp_huge; + __u64 tp_failed; +}; + +union tpacket_stats_u { + struct tpacket_stats stats1; + struct tpacket_stats_v3 stats3; +}; + +struct tpacket_auxdata { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; +}; + +struct tpacket_hdr { + long unsigned int tp_status; + unsigned int tp_len; + unsigned int tp_snaplen; + short unsigned int tp_mac; + short unsigned int tp_net; + unsigned int tp_sec; + unsigned int tp_usec; +}; + +struct tpacket2_hdr { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u8 tp_padding[4]; +}; + +struct tpacket_hdr_variant1 { + __u32 tp_rxhash; + __u32 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u16 tp_padding; +}; + +struct tpacket3_hdr { + __u32 tp_next_offset; + __u32 tp_sec; + __u32 tp_nsec; + __u32 tp_snaplen; + __u32 tp_len; + __u32 tp_status; + __u16 tp_mac; + __u16 tp_net; + union { + struct tpacket_hdr_variant1 hv1; + }; + __u8 tp_padding[8]; +}; + +struct tpacket_bd_ts { + unsigned int ts_sec; + union { + unsigned int ts_usec; + unsigned int ts_nsec; + }; +}; + +struct tpacket_hdr_v1 { + __u32 block_status; + __u32 num_pkts; + __u32 offset_to_first_pkt; + __u32 blk_len; + __u64 seq_num; + struct tpacket_bd_ts ts_first_pkt; + struct tpacket_bd_ts ts_last_pkt; +}; + +union tpacket_bd_header_u { + struct tpacket_hdr_v1 bh1; +}; + +struct tpacket_block_desc { + __u32 version; + __u32 offset_to_priv; + union tpacket_bd_header_u hdr; +}; + +enum tpacket_versions { + TPACKET_V1 = 0, + TPACKET_V2 = 1, + TPACKET_V3 = 2, +}; + +struct tpacket_req { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; +}; + +struct tpacket_req3 { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; + unsigned int tp_retire_blk_tov; + unsigned int tp_sizeof_priv; + unsigned int tp_feature_req_word; +}; + +union tpacket_req_u { + struct tpacket_req req; + struct tpacket_req3 req3; +}; + +struct fanout_args { + __u16 type_flags; + __u16 id; + __u32 max_num_members; +}; + +struct packet_mclist { + struct packet_mclist *next; + int ifindex; + int count; + short unsigned int type; + short unsigned int alen; + unsigned char addr[32]; +}; + +struct pgv; + +struct tpacket_kbdq_core { + struct pgv *pkbdq; + unsigned int feature_req_word; + unsigned int hdrlen; + unsigned char reset_pending_on_curr_blk; + unsigned char delete_blk_timer; + short unsigned int kactive_blk_num; + short unsigned int blk_sizeof_priv; + short unsigned int last_kactive_blk_num; + char *pkblk_start; + char *pkblk_end; + int kblk_size; + unsigned int max_frame_len; + unsigned int knum_blocks; + uint64_t knxt_seq_num; + char *prev; + char *nxt_offset; + struct sk_buff *skb; + rwlock_t blk_fill_in_prog_lock; + short unsigned int retire_blk_tov; + short unsigned int version; + long unsigned int tov_in_jiffies; + struct timer_list retire_blk_timer; + long: 32; +}; + +struct pgv { + char *buffer; +}; + +struct packet_ring_buffer { + struct pgv *pg_vec; + unsigned int head; + unsigned int frames_per_block; + unsigned int frame_size; + unsigned int frame_max; + unsigned int pg_vec_order; + unsigned int pg_vec_pages; + unsigned int pg_vec_len; + unsigned int *pending_refcnt; + long: 32; + union { + long unsigned int *rx_owner_map; + struct tpacket_kbdq_core prb_bdqc; + }; +}; + +struct packet_fanout { + possible_net_t net; + unsigned int num_members; + u32 max_num_members; + u16 id; + u8 type; + u8 flags; + union { + atomic_t rr_cur; + struct bpf_prog *bpf_prog; + }; + struct list_head list; + spinlock_t lock; + refcount_t sk_ref; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + struct packet_type prot_hook; + struct sock *arr[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct packet_rollover { + int sock; + atomic_long_t num; + atomic_long_t num_huge; + atomic_long_t num_failed; + long: 32; + long: 32; + long: 32; + long: 32; + u32 history[8]; +}; + +struct packet_sock { + struct sock sk; + struct packet_fanout *fanout; + union tpacket_stats_u stats; + struct packet_ring_buffer rx_ring; + struct packet_ring_buffer tx_ring; + int copy_thresh; + spinlock_t bind_lock; + struct mutex pg_vec_lock; + long unsigned int flags; + int ifindex; + u8 vnet_hdr_sz; + __be16 num; + struct packet_rollover *rollover; + struct packet_mclist *mclist; + atomic_long_t mapped; + enum tpacket_versions tp_version; + unsigned int tp_hdrlen; + unsigned int tp_reserve; + unsigned int tp_tstamp; + struct completion skb_completion; + struct net_device *cached_dev; + long: 32; + long: 32; + struct packet_type prot_hook; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + atomic_t tp_drops; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +enum packet_sock_flags { + PACKET_SOCK_ORIGDEV = 0, + PACKET_SOCK_AUXDATA = 1, + PACKET_SOCK_TX_HAS_OFF = 2, + PACKET_SOCK_TP_LOSS = 3, + PACKET_SOCK_RUNNING = 4, + PACKET_SOCK_PRESSURE = 5, + PACKET_SOCK_QDISC_BYPASS = 6, +}; + +struct packet_mreq_max { + int mr_ifindex; + short unsigned int mr_type; + short unsigned int mr_alen; + unsigned char mr_address[32]; +}; + +union tpacket_uhdr { + struct tpacket_hdr *h1; + struct tpacket2_hdr *h2; + struct tpacket3_hdr *h3; + void *raw; +}; + +struct packet_skb_cb { + union { + struct sockaddr_pkt pkt; + union { + unsigned int origlen; + struct sockaddr_ll ll; + }; + } sa; +}; + +struct pt_regs_offset { + const char *name; + int offset; +}; + +struct rtas_ext_event_log_v6 { + u8 byte0; + u8 byte1; + u8 byte2; + u8 byte3; + u8 reserved[8]; + __be32 company_id; + u8 vendor_log[1]; +}; + +struct pseries_errorlog { + __be16 id; + __be16 length; + u8 version; + u8 subtype; + __be16 creator_component; + u8 data[0]; +}; + +struct rtas_filter { + const int buf_idx1; + const int size_idx1; + const int buf_idx2; + const int size_idx2; + const int fixed_size; +}; + +struct rtas_function { + s32 token; + const bool banned_for_syscall_on_le: 1; + const char * const name; + const struct rtas_filter *filter; + struct mutex *lock; +}; + +struct indicator_elem { + __be32 token; + __be32 maxindex; +}; + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +struct optimistic_spin_node { + struct optimistic_spin_node *next; + struct optimistic_spin_node *prev; + int locked; + int cpu; +}; + +struct kmsg_dump_iter { + u64 cur_seq; + u64 next_seq; +}; + +struct kmsg_dump_detail { + enum kmsg_dump_reason reason; + const char *description; +}; + +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *, struct kmsg_dump_detail *); + enum kmsg_dump_reason max_reason; + bool registered; +}; + +struct trace_event_raw_console { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_console { + u32 msg; + const void *msg_ptr_; +}; + +typedef void (*btf_trace_console)(void *, const char *, size_t); + +struct console_cmdline { + char name[16]; + int index; + char devname[32]; + bool user_specified; + char *options; +}; + +enum printk_info_flags { + LOG_FORCE_CON = 1, + LOG_NEWLINE = 2, + LOG_CONT = 8, +}; + +enum devkmsg_log_bits { + __DEVKMSG_LOG_BIT_ON = 0, + __DEVKMSG_LOG_BIT_OFF = 1, + __DEVKMSG_LOG_BIT_LOCK = 2, +}; + +enum devkmsg_log_masks { + DEVKMSG_LOG_MASK_ON = 1, + DEVKMSG_LOG_MASK_OFF = 2, + DEVKMSG_LOG_MASK_LOCK = 4, +}; + +enum con_msg_format_flags { + MSG_FORMAT_DEFAULT = 0, + MSG_FORMAT_SYSLOG = 1, +}; + +struct latched_seq { + seqcount_latch_t latch; + long: 32; + u64 val[2]; +}; + +struct devkmsg_user { + atomic64_t seq; + struct ratelimit_state rs; + struct mutex lock; + struct printk_buffers pbufs; +}; + +typedef long unsigned int perf_trace_t[2048]; + +struct bpf_kfunc_desc { + struct btf_func_model func_model; + u32 func_id; + s32 imm; + u16 offset; + long unsigned int addr; +}; + +struct bpf_kfunc_desc_tab { + struct bpf_kfunc_desc descs[256]; + u32 nr_descs; +}; + +struct bpf_kfunc_btf { + struct btf *btf; + struct module *module; + u16 offset; +}; + +struct bpf_kfunc_btf_tab { + struct bpf_kfunc_btf descs[256]; + u32 nr_descs; +}; + +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + +struct bpf_verifier_stack_elem { + struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; + struct bpf_verifier_stack_elem *next; + u32 log_pos; +}; + +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; + bool raw_mode; + bool pkt_access; + u8 release_regno; + int regno; + int access_size; + int mem_size; + long: 32; + u64 msize_max_value; + int ref_obj_id; + int dynptr_id; + int map_uid; + int func_id; + struct btf *btf; + u32 btf_id; + struct btf *ret_btf; + u32 ret_btf_id; + u32 subprogno; + struct btf_field *kptr_field; +}; + +struct bpf_kfunc_call_arg_meta { + struct btf *btf; + u32 func_id; + u32 kfunc_flags; + const struct btf_type *func_proto; + const char *func_name; + u32 ref_obj_id; + u8 release_regno; + bool r0_rdonly; + u32 ret_btf_id; + u64 r0_size; + u32 subprogno; + long: 32; + struct { + u64 value; + bool found; + long: 32; + } arg_constant; + struct btf *arg_btf; + u32 arg_btf_id; + bool arg_owning_ref; + struct { + struct btf_field *field; + } arg_list_head; + struct { + struct btf_field *field; + } arg_rbtree_root; + struct { + enum bpf_dynptr_type type; + u32 id; + u32 ref_obj_id; + } initialized_dynptr; + struct { + u8 spi; + u8 frameno; + } iter; + struct { + struct bpf_map *ptr; + int uid; + } map; + long: 32; + u64 mem_size; +}; + +enum reg_arg_type { + SRC_OP = 0, + DST_OP = 1, + DST_OP_NO_MARK = 2, +}; + +struct linked_reg { + u8 frameno; + union { + u8 spi; + u8 regno; + }; + bool is_reg; +}; + +struct linked_regs { + int cnt; + struct linked_reg entries[6]; +}; + +enum bpf_access_src { + ACCESS_DIRECT = 1, + ACCESS_HELPER = 2, +}; + +struct task_struct__safe_rcu { + const cpumask_t *cpus_ptr; + struct css_set *cgroups; + struct task_struct *real_parent; + struct task_struct *group_leader; +}; + +struct cgroup__safe_rcu { + struct kernfs_node *kn; +}; + +struct css_set__safe_rcu { + struct cgroup *dfl_cgrp; +}; + +struct mm_struct__safe_rcu_or_null { + struct file *exe_file; +}; + +struct sk_buff__safe_rcu_or_null { + struct sock *sk; +}; + +struct request_sock__safe_rcu_or_null { + struct sock *sk; +}; + +struct bpf_iter_meta__safe_trusted { + struct seq_file *seq; +}; + +struct bpf_iter__task__safe_trusted { + struct bpf_iter_meta *meta; + struct task_struct *task; +}; + +struct linux_binprm__safe_trusted { + struct file *file; +}; + +struct file__safe_trusted { + struct inode *f_inode; +}; + +struct dentry__safe_trusted { + struct inode *d_inode; +}; + +struct socket__safe_trusted_or_null { + struct sock *sk; +}; + +struct bpf_reg_types { + const enum bpf_reg_type types[10]; + u32 *btf_id; +}; + +enum { + AT_PKT_END = -1, + BEYOND_PKT_END = -2, +}; + +typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *, int); + +enum { + KF_ARG_DYNPTR_ID = 0, + KF_ARG_LIST_HEAD_ID = 1, + KF_ARG_LIST_NODE_ID = 2, + KF_ARG_RB_ROOT_ID = 3, + KF_ARG_RB_NODE_ID = 4, + KF_ARG_WORKQUEUE_ID = 5, +}; + +enum kfunc_ptr_arg_type { + KF_ARG_PTR_TO_CTX = 0, + KF_ARG_PTR_TO_ALLOC_BTF_ID = 1, + KF_ARG_PTR_TO_REFCOUNTED_KPTR = 2, + KF_ARG_PTR_TO_DYNPTR = 3, + KF_ARG_PTR_TO_ITER = 4, + KF_ARG_PTR_TO_LIST_HEAD = 5, + KF_ARG_PTR_TO_LIST_NODE = 6, + KF_ARG_PTR_TO_BTF_ID = 7, + KF_ARG_PTR_TO_MEM = 8, + KF_ARG_PTR_TO_MEM_SIZE = 9, + KF_ARG_PTR_TO_CALLBACK = 10, + KF_ARG_PTR_TO_RB_ROOT = 11, + KF_ARG_PTR_TO_RB_NODE = 12, + KF_ARG_PTR_TO_NULL = 13, + KF_ARG_PTR_TO_CONST_STR = 14, + KF_ARG_PTR_TO_MAP = 15, + KF_ARG_PTR_TO_WORKQUEUE = 16, +}; + +enum special_kfunc_type { + KF_bpf_obj_new_impl = 0, + KF_bpf_obj_drop_impl = 1, + KF_bpf_refcount_acquire_impl = 2, + KF_bpf_list_push_front_impl = 3, + KF_bpf_list_push_back_impl = 4, + KF_bpf_list_pop_front = 5, + KF_bpf_list_pop_back = 6, + KF_bpf_cast_to_kern_ctx = 7, + KF_bpf_rdonly_cast = 8, + KF_bpf_rcu_read_lock = 9, + KF_bpf_rcu_read_unlock = 10, + KF_bpf_rbtree_remove = 11, + KF_bpf_rbtree_add_impl = 12, + KF_bpf_rbtree_first = 13, + KF_bpf_dynptr_from_skb = 14, + KF_bpf_dynptr_from_xdp = 15, + KF_bpf_dynptr_slice = 16, + KF_bpf_dynptr_slice_rdwr = 17, + KF_bpf_dynptr_clone = 18, + KF_bpf_percpu_obj_new_impl = 19, + KF_bpf_percpu_obj_drop_impl = 20, + KF_bpf_throw = 21, + KF_bpf_wq_set_callback_impl = 22, + KF_bpf_preempt_disable = 23, + KF_bpf_preempt_enable = 24, + KF_bpf_iter_css_task_new = 25, + KF_bpf_session_cookie = 26, + KF_bpf_get_kmem_cache = 27, +}; + +enum { + REASON_BOUNDS = -1, + REASON_TYPE = -2, + REASON_PATHS = -3, + REASON_LIMIT = -4, + REASON_STACK = -5, +}; + +struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; + bool mask_to_left; + long: 32; +}; + +enum { + DISCOVERED = 16, + EXPLORED = 32, + FALLTHROUGH = 1, + BRANCH___2 = 2, +}; + +enum { + DONE_EXPLORING = 0, + KEEP_EXPLORING = 1, +}; + +enum exact_level { + NOT_EXACT = 0, + EXACT = 1, + RANGE_WITHIN = 2, +}; + +struct bpf_iter; + +struct trace_event_raw_iomap_readpage_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + int nr_pages; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_range_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + loff_t size; + loff_t offset; + u64 length; + char __data[0]; +}; + +struct trace_event_raw_iomap_class { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_writepage_map { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + u64 pos; + u64 dirty_len; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_iter { + struct trace_entry ent; + dev_t dev; + long: 32; + u64 ino; + loff_t pos; + u64 length; + s64 processed; + unsigned int flags; + const void *ops; + long unsigned int caller; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_dio_rw_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + size_t count; + size_t done_before; + int ki_flags; + unsigned int dio_flags; + bool aio; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_iomap_dio_complete { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t isize; + loff_t pos; + int ki_flags; + bool aio; + int error; + ssize_t ret; + char __data[0]; +}; + +struct trace_event_data_offsets_iomap_readpage_class {}; + +struct trace_event_data_offsets_iomap_range_class {}; + +struct trace_event_data_offsets_iomap_class {}; + +struct trace_event_data_offsets_iomap_writepage_map {}; + +struct trace_event_data_offsets_iomap_iter {}; + +struct trace_event_data_offsets_iomap_dio_rw_begin {}; + +struct trace_event_data_offsets_iomap_dio_complete {}; + +typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_release_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_invalidate_folio)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_dio_rw_queued)(void *, struct inode *, loff_t, u64); + +typedef void (*btf_trace_iomap_iter_dstmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_iter_srcmap)(void *, struct inode *, struct iomap *); + +typedef void (*btf_trace_iomap_writepage_map)(void *, struct inode *, u64, unsigned int, struct iomap *); + +typedef void (*btf_trace_iomap_iter)(void *, struct iomap_iter *, const void *, long unsigned int); + +typedef void (*btf_trace_iomap_dio_rw_begin)(void *, struct kiocb *, struct iov_iter *, unsigned int, size_t); + +typedef void (*btf_trace_iomap_dio_complete)(void *, struct kiocb *, int, ssize_t); + +struct fat_fid { + u32 i_gen; + u32 i_pos_low; + u16 i_pos_hi; + u16 parent_i_pos_hi; + u32 parent_i_pos_low; + u32 parent_i_gen; +}; + +enum btrfs_delayed_ref_flags { + BTRFS_DELAYED_REFS_FLUSHING = 0, +}; + +struct cmac_tfm_ctx { + struct crypto_cipher *child; + long: 32; + __be64 consts[0]; +}; + +struct cmac_desc_ctx { + unsigned int len; + u8 odds[0]; +}; + +struct gcm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; +}; + +struct crypto_gcm_ctx { + struct crypto_skcipher *ctr; + struct crypto_ahash *ghash; +}; + +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4106_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct aead_request subreq; +}; + +struct crypto_rfc4543_instance_ctx { + struct crypto_aead_spawn aead; +}; + +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + struct crypto_sync_skcipher *null; + u8 nonce[4]; +}; + +struct crypto_rfc4543_req_ctx { + struct aead_request subreq; +}; + +struct crypto_gcm_ghash_ctx { + unsigned int cryptlen; + struct scatterlist *src; + int (*complete)(struct aead_request *, u32); +}; + +struct crypto_gcm_req_priv_ctx { + u8 iv[16]; + u8 auth_tag[16]; + u8 iauth_tag[16]; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct scatterlist sg; + struct crypto_gcm_ghash_ctx ghash_ctx; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + } u; +}; + +enum bio_merge_status { + BIO_MERGE_OK = 0, + BIO_MERGE_NONE = 1, + BIO_MERGE_FAILED = 2, +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async; + unsigned int last_cq_tail; + refcount_t refs; + atomic_t ops; + struct callback_head rcu; +}; + +enum { + IO_EVENTFD_OP_SIGNAL_BIT = 0, +}; + +typedef s32 compat_ssize_t; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +typedef struct tree_desc_s tree_desc; + +typedef void (*of_init_fn_1)(struct device_node *); + +struct clk_fixed_rate { + struct clk_hw hw; + long unsigned int fixed_rate; + long unsigned int fixed_accuracy; + long unsigned int flags; +}; + +struct probe { + struct probe *next; + dev_t dev; + long unsigned int range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; +}; + +struct kobj_map { + struct probe *probes[255]; + struct mutex *lock; +}; + +enum fw_opt { + FW_OPT_UEVENT = 1, + FW_OPT_NOWAIT = 2, + FW_OPT_USERHELPER = 4, + FW_OPT_NO_WARN = 8, + FW_OPT_NOCACHE = 16, + FW_OPT_NOFALLBACK_SYSFS = 32, + FW_OPT_FALLBACK_PLATFORM = 64, + FW_OPT_PARTIAL = 128, +}; + +enum fw_status { + FW_STATUS_UNKNOWN = 0, + FW_STATUS_LOADING = 1, + FW_STATUS_DONE = 2, + FW_STATUS_ABORTED = 3, +}; + +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct firmware_cache; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; + const char *fw_name; +}; + +struct firmware_cache { + spinlock_t lock; + struct list_head head; + int state; +}; + +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *, void *); + u32 opt_flags; +}; + +enum nvme_pr_type { + NVME_PR_WRITE_EXCLUSIVE = 1, + NVME_PR_EXCLUSIVE_ACCESS = 2, + NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +enum nvme_eds { + NVME_EXTENDED_DATA_STRUCT = 1, +}; + +struct nvme_registered_ctrl { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 hostid; + __le64 rkey; +}; + +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + struct nvme_registered_ctrl regctl_ds[0]; +}; + +struct nvme_registered_ctrl_ext { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 rkey; + __u8 hostid[16]; + __u8 rsvd32[32]; +}; + +struct nvme_reservation_status_ext { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + __u8 rsvd24[40]; + struct nvme_registered_ctrl_ext regctl_eds[0]; +}; + +enum { + NVME_SCT_GENERIC = 0, + NVME_SC_SUCCESS = 0, + NVME_SC_INVALID_OPCODE = 1, + NVME_SC_INVALID_FIELD = 2, + NVME_SC_CMDID_CONFLICT = 3, + NVME_SC_DATA_XFER_ERROR = 4, + NVME_SC_POWER_LOSS = 5, + NVME_SC_INTERNAL = 6, + NVME_SC_ABORT_REQ = 7, + NVME_SC_ABORT_QUEUE = 8, + NVME_SC_FUSED_FAIL = 9, + NVME_SC_FUSED_MISSING = 10, + NVME_SC_INVALID_NS = 11, + NVME_SC_CMD_SEQ_ERROR = 12, + NVME_SC_SGL_INVALID_LAST = 13, + NVME_SC_SGL_INVALID_COUNT = 14, + NVME_SC_SGL_INVALID_DATA = 15, + NVME_SC_SGL_INVALID_METADATA = 16, + NVME_SC_SGL_INVALID_TYPE = 17, + NVME_SC_CMB_INVALID_USE = 18, + NVME_SC_PRP_INVALID_OFFSET = 19, + NVME_SC_ATOMIC_WU_EXCEEDED = 20, + NVME_SC_OP_DENIED = 21, + NVME_SC_SGL_INVALID_OFFSET = 22, + NVME_SC_RESERVED = 23, + NVME_SC_HOST_ID_INCONSIST = 24, + NVME_SC_KA_TIMEOUT_EXPIRED = 25, + NVME_SC_KA_TIMEOUT_INVALID = 26, + NVME_SC_ABORTED_PREEMPT_ABORT = 27, + NVME_SC_SANITIZE_FAILED = 28, + NVME_SC_SANITIZE_IN_PROGRESS = 29, + NVME_SC_SGL_INVALID_GRANULARITY = 30, + NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 31, + NVME_SC_NS_WRITE_PROTECTED = 32, + NVME_SC_CMD_INTERRUPTED = 33, + NVME_SC_TRANSIENT_TR_ERR = 34, + NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 36, + NVME_SC_INVALID_IO_CMD_SET = 44, + NVME_SC_LBA_RANGE = 128, + NVME_SC_CAP_EXCEEDED = 129, + NVME_SC_NS_NOT_READY = 130, + NVME_SC_RESERVATION_CONFLICT = 131, + NVME_SC_FORMAT_IN_PROGRESS = 132, + NVME_SCT_COMMAND_SPECIFIC = 256, + NVME_SC_CQ_INVALID = 256, + NVME_SC_QID_INVALID = 257, + NVME_SC_QUEUE_SIZE = 258, + NVME_SC_ABORT_LIMIT = 259, + NVME_SC_ABORT_MISSING = 260, + NVME_SC_ASYNC_LIMIT = 261, + NVME_SC_FIRMWARE_SLOT = 262, + NVME_SC_FIRMWARE_IMAGE = 263, + NVME_SC_INVALID_VECTOR = 264, + NVME_SC_INVALID_LOG_PAGE = 265, + NVME_SC_INVALID_FORMAT = 266, + NVME_SC_FW_NEEDS_CONV_RESET = 267, + NVME_SC_INVALID_QUEUE = 268, + NVME_SC_FEATURE_NOT_SAVEABLE = 269, + NVME_SC_FEATURE_NOT_CHANGEABLE = 270, + NVME_SC_FEATURE_NOT_PER_NS = 271, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 272, + NVME_SC_FW_NEEDS_RESET = 273, + NVME_SC_FW_NEEDS_MAX_TIME = 274, + NVME_SC_FW_ACTIVATE_PROHIBITED = 275, + NVME_SC_OVERLAPPING_RANGE = 276, + NVME_SC_NS_INSUFFICIENT_CAP = 277, + NVME_SC_NS_ID_UNAVAILABLE = 278, + NVME_SC_NS_ALREADY_ATTACHED = 280, + NVME_SC_NS_IS_PRIVATE = 281, + NVME_SC_NS_NOT_ATTACHED = 282, + NVME_SC_THIN_PROV_NOT_SUPP = 283, + NVME_SC_CTRL_LIST_INVALID = 284, + NVME_SC_SELT_TEST_IN_PROGRESS = 285, + NVME_SC_BP_WRITE_PROHIBITED = 286, + NVME_SC_CTRL_ID_INVALID = 287, + NVME_SC_SEC_CTRL_STATE_INVALID = 288, + NVME_SC_CTRL_RES_NUM_INVALID = 289, + NVME_SC_RES_ID_INVALID = 290, + NVME_SC_PMR_SAN_PROHIBITED = 291, + NVME_SC_ANA_GROUP_ID_INVALID = 292, + NVME_SC_ANA_ATTACH_FAILED = 293, + NVME_SC_BAD_ATTRIBUTES = 384, + NVME_SC_INVALID_PI = 385, + NVME_SC_READ_ONLY = 386, + NVME_SC_ONCS_NOT_SUPPORTED = 387, + NVME_SC_CONNECT_FORMAT = 384, + NVME_SC_CONNECT_CTRL_BUSY = 385, + NVME_SC_CONNECT_INVALID_PARAM = 386, + NVME_SC_CONNECT_RESTART_DISC = 387, + NVME_SC_CONNECT_INVALID_HOST = 388, + NVME_SC_DISCOVERY_RESTART = 400, + NVME_SC_AUTH_REQUIRED = 401, + NVME_SC_ZONE_BOUNDARY_ERROR = 440, + NVME_SC_ZONE_FULL = 441, + NVME_SC_ZONE_READ_ONLY = 442, + NVME_SC_ZONE_OFFLINE = 443, + NVME_SC_ZONE_INVALID_WRITE = 444, + NVME_SC_ZONE_TOO_MANY_ACTIVE = 445, + NVME_SC_ZONE_TOO_MANY_OPEN = 446, + NVME_SC_ZONE_INVALID_TRANSITION = 447, + NVME_SCT_MEDIA_ERROR = 512, + NVME_SC_WRITE_FAULT = 640, + NVME_SC_READ_ERROR = 641, + NVME_SC_GUARD_CHECK = 642, + NVME_SC_APPTAG_CHECK = 643, + NVME_SC_REFTAG_CHECK = 644, + NVME_SC_COMPARE_FAILED = 645, + NVME_SC_ACCESS_DENIED = 646, + NVME_SC_UNWRITTEN_BLOCK = 647, + NVME_SCT_PATH = 768, + NVME_SC_INTERNAL_PATH_ERROR = 768, + NVME_SC_ANA_PERSISTENT_LOSS = 769, + NVME_SC_ANA_INACCESSIBLE = 770, + NVME_SC_ANA_TRANSITION = 771, + NVME_SC_CTRL_PATH_ERROR = 864, + NVME_SC_HOST_PATH_ERROR = 880, + NVME_SC_HOST_ABORTED_CMD = 881, + NVME_SC_MASK = 255, + NVME_SCT_MASK = 1792, + NVME_SCT_SC_MASK = 2047, + NVME_STATUS_CRD = 6144, + NVME_STATUS_MORE = 8192, + NVME_STATUS_DNR = 16384, +}; + +struct nvmet_pr_register_data { + __le64 crkey; + __le64 nrkey; +}; + +struct nvmet_pr_acquire_data { + __le64 crkey; + __le64 prkey; +}; + +struct nvmet_pr_release_data { + __le64 crkey; +}; + +enum nvme_pr_register_action { + NVME_PR_REGISTER_ACT_REG = 0, + NVME_PR_REGISTER_ACT_UNREG = 1, + NVME_PR_REGISTER_ACT_REPLACE = 2, +}; + +enum nvme_pr_acquire_action { + NVME_PR_ACQUIRE_ACT_ACQUIRE = 0, + NVME_PR_ACQUIRE_ACT_PREEMPT = 1, + NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 2, +}; + +enum nvme_pr_release_action { + NVME_PR_RELEASE_ACT_RELEASE = 0, + NVME_PR_RELEASE_ACT_CLEAR = 1, +}; + +enum nvme_pr_change_ptpl { + NVME_PR_CPTPL_NO_CHANGE = 0, + NVME_PR_CPTPL_RESV = 1073741824, + NVME_PR_CPTPL_CLEARED = -2147483648, + NVME_PR_CPTPL_PERSIST = -1073741824, +}; + +enum usb_phy_interface { + USBPHY_INTERFACE_MODE_UNKNOWN = 0, + USBPHY_INTERFACE_MODE_UTMI = 1, + USBPHY_INTERFACE_MODE_UTMIW = 2, + USBPHY_INTERFACE_MODE_ULPI = 3, + USBPHY_INTERFACE_MODE_SERIAL = 4, + USBPHY_INTERFACE_MODE_HSIC = 5, +}; + +struct ignore_entry { + u16 vid; + u16 pid; + u16 bcdmin; + u16 bcdmax; +}; + +struct firmware_map_entry { + u64 start; + u64 end; + const char *type; + struct list_head list; + struct kobject kobj; +}; + +struct memmap_attribute { + struct attribute attr; + ssize_t (*show)(struct firmware_map_entry *, char *); +}; + +struct of_bus___2 { + void (*count_cells)(const void *, int, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int); + int (*translate)(__be32 *, u64, int); +}; + +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE = 2, + NVMEM_CELL_ADD = 3, + NVMEM_CELL_REMOVE = 4, + NVMEM_LAYOUT_ADD = 5, + NVMEM_LAYOUT_REMOVE = 6, +}; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + const struct nvmem_cell_info *cells; + int ncells; + bool add_legacy_fixed_of_cells; + void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + enum nvmem_type type; + bool read_only; + bool root_only; + bool ignore_wp; + struct nvmem_layout *layout; + struct device_node *of_node; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + bool compat; + struct device *base_dev; +}; + +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + +struct nvmem_cell_entry { + const char *name; + int offset; + size_t raw_len; + int bytes; + int bit_offset; + int nbits; + nvmem_cell_post_process_t read_post_process; + void *priv; + struct device_node *np; + struct nvmem_device *nvmem; + struct list_head node; +}; + +struct nvmem_cell { + struct nvmem_cell_entry *entry; + const char *id; + int index; +}; + +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE = 0, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, + __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE = 0, + SK_DIAG_BPF_STORAGE = 1, + __SK_DIAG_BPF_STORAGE_REP_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_NONE = 0, + SK_DIAG_BPF_STORAGE_PAD = 1, + SK_DIAG_BPF_STORAGE_MAP_ID = 2, + SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, + __SK_DIAG_BPF_STORAGE_MAX = 4, +}; + +typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); + +typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, u64, gfp_t); + +typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[0]; +}; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned int skip_elems; +}; + +struct bpf_iter__bpf_sk_storage_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + struct sock *sk; + }; + union { + void *value; + }; +}; + +enum ethtool_supported_ring_param { + ETHTOOL_RING_USE_RX_BUF_LEN = 1, + ETHTOOL_RING_USE_CQE_SIZE = 2, + ETHTOOL_RING_USE_TX_PUSH = 4, + ETHTOOL_RING_USE_RX_PUSH = 8, + ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = 16, + ETHTOOL_RING_USE_TCP_DATA_SPLIT = 32, +}; + +enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED = 1, + ETHTOOL_TCP_DATA_SPLIT_ENABLED = 2, +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; + u32 supported_ring_params; +}; + +struct nft_counter { + u64_stats_t bytes; + u64_stats_t packets; +}; + +struct nft_counter_tot { + s64 bytes; + s64 packets; +}; + +struct nft_counter_percpu_priv { + struct nft_counter *counter; +}; + +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = -1, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; + +struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +}; + +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +struct module_sect_attr { + struct bin_attribute battr; + long unsigned int address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[0]; +}; + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[0]; +}; + +struct posix_clock_desc { + struct file *fp; + struct posix_clock *clk; +}; + +struct trace_export { + struct trace_export *next; + void (*write)(struct trace_export *, const void *, unsigned int); + int flags; +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + +struct ftrace_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; +}; + +struct stack_entry { + struct trace_entry ent; + int size; + long unsigned int caller[0]; +}; + +struct bprint_entry { + struct trace_entry ent; + long unsigned int ip; + const char *fmt; + u32 buf[0]; +}; + +struct print_entry { + struct trace_entry ent; + long unsigned int ip; + char buf[0]; +}; + +struct raw_data_entry { + struct trace_entry ent; + unsigned int id; + char buf[0]; +}; + +struct bputs_entry { + struct trace_entry ent; + long unsigned int ip; + const char *str; +}; + +struct func_repeats_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; + u16 count; + u16 top_delta_ts; + u32 bottom_delta_ts; +}; + +typedef bool (*cond_update_fn_t)(struct trace_array *, void *); + +struct trace_min_max_param { + struct mutex *lock; + u64 *val; + u64 *min; + u64 *max; +}; + +struct pipe_wait { + struct trace_iterator *iter; + int wait_index; +}; + +struct ftrace_stack { + long unsigned int calls[1024]; +}; + +struct ftrace_stacks { + struct ftrace_stack stacks[4]; +}; + +struct trace_buffer_struct { + int nesting; + char buffer[4096]; +}; + +struct ftrace_buffer_info { + struct trace_iterator iter; + void *spare; + unsigned int spare_cpu; + unsigned int spare_size; + unsigned int read; +}; + +struct err_info { + const char **errs; + u8 type; + u16 pos; + u64 ts; +}; + +struct tracing_log_err { + struct list_head list; + struct err_info info; + char loc[128]; + char *cmd; + long: 32; +}; + +struct buffer_ref { + struct trace_buffer *buffer; + void *page; + int cpu; + refcount_t refcount; +}; + +struct bpf_crypto_type_list { + const struct bpf_crypto_type *type; + struct list_head list; +}; + +struct bpf_crypto_params { + char type[14]; + u8 reserved[2]; + char algo[128]; + u8 key[256]; + u32 key_len; + u32 authsize; +}; + +struct bpf_crypto_ctx { + const struct bpf_crypto_type *type; + void *tfm; + u32 siv_len; + struct callback_head rcu; + refcount_t usage; +}; + +typedef int folio_walk_flags_t; + +enum folio_walk_level { + FW_LEVEL_PTE = 0, + FW_LEVEL_PMD = 1, + FW_LEVEL_PUD = 2, +}; + +struct folio_walk { + struct page *page; + enum folio_walk_level level; + union { + pte_t *ptep; + pud_t *pudp; + pmd_t *pmdp; + }; + union { + pte_t pte; + pud_t pud; + pmd_t pmd; + }; + struct vm_area_struct *vma; + spinlock_t *ptl; +}; + +struct zpool { + struct zpool_driver *driver; + void *pool; +}; + +struct saved { + struct path link; + struct delayed_call done; + const char *name; + unsigned int seq; +}; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; + unsigned int flags; + unsigned int state; + unsigned int seq; + unsigned int next_seq; + unsigned int m_seq; + unsigned int r_seq; + int last_type; + unsigned int depth; + int total_link_count; + struct saved *stack; + struct saved internal[2]; + struct filename *name; + const char *pathname; + struct nameidata *saved; + unsigned int root_seq; + int dfd; + vfsuid_t dir_vfsuid; + umode_t dir_mode; +}; + +struct renamedata { + struct mnt_idmap *old_mnt_idmap; + struct inode *old_dir; + struct dentry *old_dentry; + struct mnt_idmap *new_mnt_idmap; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +}; + +enum { + LAST_NORM = 0, + LAST_ROOT = 1, + LAST_DOT = 2, + LAST_DOTDOT = 3, +}; + +enum { + WALK_TRAILING = 1, + WALK_MORE = 2, + WALK_NOFOLLOW = 4, +}; + +struct timerfd_ctx { + union { + struct hrtimer tmr; + struct alarm alarm; + } t; + ktime_t tintv; + ktime_t moffs; + wait_queue_head_t wqh; + long: 32; + u64 ticks; + int clockid; + short unsigned int expired; + short unsigned int settime_flags; + struct callback_head rcu; + struct list_head clist; + spinlock_t cancel_lock; + bool might_cancel; +}; + +typedef short unsigned int __kernel_uid16_t; + +typedef short unsigned int __kernel_gid16_t; + +typedef __kernel_uid16_t uid16_t; + +typedef __kernel_gid16_t gid16_t; + +struct ext4_xattr_ibody_header { + __le32 h_magic; +}; + +struct ext4_xattr_inode_array { + unsigned int count; + struct inode *inodes[0]; +}; + +struct mpage_da_data { + struct inode *inode; + struct writeback_control *wbc; + unsigned int can_map: 1; + long unsigned int first_page; + long unsigned int next_page; + long unsigned int last_page; + struct ext4_map_blocks map; + struct ext4_io_submit io_submit; + unsigned int do_map: 1; + unsigned int scanned_until_end: 1; + unsigned int journalled_more_data: 1; + long: 32; +}; + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC = 0, + CRYPTO_AUTHENC_KEYA_PARAM = 1, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct authenc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct crypto_authenc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + struct crypto_sync_skcipher *null; +}; + +struct authenc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[0]; +}; + +struct bsg_device { + struct request_queue *queue; + long: 32; + struct device device; + struct cdev cdev; + int max_queue; + unsigned int timeout; + unsigned int reserved_size; + bsg_sg_io_fn *sg_io_fn; + long: 32; +}; + +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK = 1, +}; + +typedef ZSTD_CCtx zstd_cctx; + +typedef ZSTD_CDict zstd_cdict; + +struct ddebug_class_param { + union { + long unsigned int *bits; + unsigned int *lvl; + }; + char flags[8]; + const struct ddebug_class_map *map; +}; + +enum ib_uverbs_write_cmds { + IB_USER_VERBS_CMD_GET_CONTEXT = 0, + IB_USER_VERBS_CMD_QUERY_DEVICE = 1, + IB_USER_VERBS_CMD_QUERY_PORT = 2, + IB_USER_VERBS_CMD_ALLOC_PD = 3, + IB_USER_VERBS_CMD_DEALLOC_PD = 4, + IB_USER_VERBS_CMD_CREATE_AH = 5, + IB_USER_VERBS_CMD_MODIFY_AH = 6, + IB_USER_VERBS_CMD_QUERY_AH = 7, + IB_USER_VERBS_CMD_DESTROY_AH = 8, + IB_USER_VERBS_CMD_REG_MR = 9, + IB_USER_VERBS_CMD_REG_SMR = 10, + IB_USER_VERBS_CMD_REREG_MR = 11, + IB_USER_VERBS_CMD_QUERY_MR = 12, + IB_USER_VERBS_CMD_DEREG_MR = 13, + IB_USER_VERBS_CMD_ALLOC_MW = 14, + IB_USER_VERBS_CMD_BIND_MW = 15, + IB_USER_VERBS_CMD_DEALLOC_MW = 16, + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, + IB_USER_VERBS_CMD_CREATE_CQ = 18, + IB_USER_VERBS_CMD_RESIZE_CQ = 19, + IB_USER_VERBS_CMD_DESTROY_CQ = 20, + IB_USER_VERBS_CMD_POLL_CQ = 21, + IB_USER_VERBS_CMD_PEEK_CQ = 22, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, + IB_USER_VERBS_CMD_CREATE_QP = 24, + IB_USER_VERBS_CMD_QUERY_QP = 25, + IB_USER_VERBS_CMD_MODIFY_QP = 26, + IB_USER_VERBS_CMD_DESTROY_QP = 27, + IB_USER_VERBS_CMD_POST_SEND = 28, + IB_USER_VERBS_CMD_POST_RECV = 29, + IB_USER_VERBS_CMD_ATTACH_MCAST = 30, + IB_USER_VERBS_CMD_DETACH_MCAST = 31, + IB_USER_VERBS_CMD_CREATE_SRQ = 32, + IB_USER_VERBS_CMD_MODIFY_SRQ = 33, + IB_USER_VERBS_CMD_QUERY_SRQ = 34, + IB_USER_VERBS_CMD_DESTROY_SRQ = 35, + IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, + IB_USER_VERBS_CMD_OPEN_XRCD = 37, + IB_USER_VERBS_CMD_CLOSE_XRCD = 38, + IB_USER_VERBS_CMD_CREATE_XSRQ = 39, + IB_USER_VERBS_CMD_OPEN_QP = 40, +}; + +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, + IB_UVERBS_WC_FLUSH = 8, + IB_UVERBS_WC_ATOMIC_WRITE = 9, +}; + +enum ib_uverbs_create_qp_mask { + IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, +}; + +enum ib_uverbs_wr_opcode { + IB_UVERBS_WR_RDMA_WRITE = 0, + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, + IB_UVERBS_WR_SEND = 2, + IB_UVERBS_WR_SEND_WITH_IMM = 3, + IB_UVERBS_WR_RDMA_READ = 4, + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_UVERBS_WR_LOCAL_INV = 7, + IB_UVERBS_WR_BIND_MW = 8, + IB_UVERBS_WR_SEND_WITH_INV = 9, + IB_UVERBS_WR_TSO = 10, + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + IB_UVERBS_WR_FLUSH = 14, + IB_UVERBS_WR_ATOMIC_WRITE = 15, +}; + +enum ib_uverbs_device_cap_flags { + IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1ULL, + IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 2ULL, + IB_UVERBS_DEVICE_BAD_QKEY_CNTR = 4ULL, + IB_UVERBS_DEVICE_RAW_MULTI = 8ULL, + IB_UVERBS_DEVICE_AUTO_PATH_MIG = 16ULL, + IB_UVERBS_DEVICE_CHANGE_PHY_PORT = 32ULL, + IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = 64ULL, + IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = 128ULL, + IB_UVERBS_DEVICE_SHUTDOWN_PORT = 256ULL, + IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = 1024ULL, + IB_UVERBS_DEVICE_SYS_IMAGE_GUID = 2048ULL, + IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = 4096ULL, + IB_UVERBS_DEVICE_SRQ_RESIZE = 8192ULL, + IB_UVERBS_DEVICE_N_NOTIFY_CQ = 16384ULL, + IB_UVERBS_DEVICE_MEM_WINDOW = 131072ULL, + IB_UVERBS_DEVICE_UD_IP_CSUM = 262144ULL, + IB_UVERBS_DEVICE_XRC = 1048576ULL, + IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = 2097152ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = 8388608ULL, + IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = 16777216ULL, + IB_UVERBS_DEVICE_RC_IP_CSUM = 33554432ULL, + IB_UVERBS_DEVICE_RAW_IP_CSUM = 67108864ULL, + IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = 536870912ULL, + IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 17179869184ULL, + IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 68719476736ULL, + IB_UVERBS_DEVICE_FLUSH_GLOBAL = 274877906944ULL, + IB_UVERBS_DEVICE_FLUSH_PERSISTENT = 549755813888ULL, + IB_UVERBS_DEVICE_ATOMIC_WRITE = 1099511627776ULL, +}; + +enum ib_uverbs_raw_packet_caps { + IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = 1, + IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = 2, + IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = 4, + IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = 8, +}; + +enum ib_uverbs_access_flags { + IB_UVERBS_ACCESS_LOCAL_WRITE = 1, + IB_UVERBS_ACCESS_REMOTE_WRITE = 2, + IB_UVERBS_ACCESS_REMOTE_READ = 4, + IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, + IB_UVERBS_ACCESS_MW_BIND = 16, + IB_UVERBS_ACCESS_ZERO_BASED = 32, + IB_UVERBS_ACCESS_ON_DEMAND = 64, + IB_UVERBS_ACCESS_HUGETLB = 128, + IB_UVERBS_ACCESS_FLUSH_GLOBAL = 256, + IB_UVERBS_ACCESS_FLUSH_PERSISTENT = 512, + IB_UVERBS_ACCESS_RELAXED_ORDERING = 1048576, + IB_UVERBS_ACCESS_OPTIONAL_RANGE = 1072693248, +}; + +enum ib_uverbs_srq_type { + IB_UVERBS_SRQT_BASIC = 0, + IB_UVERBS_SRQT_XRC = 1, + IB_UVERBS_SRQT_TM = 2, +}; + +enum ib_uverbs_wq_type { + IB_UVERBS_WQT_RQ = 0, +}; + +enum ib_uverbs_wq_flags { + IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1, + IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 2, + IB_UVERBS_WQ_FLAGS_DELAY_DROP = 4, + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 8, +}; + +enum ib_uverbs_qp_type { + IB_UVERBS_QPT_RC = 2, + IB_UVERBS_QPT_UC = 3, + IB_UVERBS_QPT_UD = 4, + IB_UVERBS_QPT_RAW_PACKET = 8, + IB_UVERBS_QPT_XRC_INI = 9, + IB_UVERBS_QPT_XRC_TGT = 10, + IB_UVERBS_QPT_DRIVER = 255, +}; + +enum ib_uverbs_qp_create_flags { + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, + IB_UVERBS_QP_CREATE_SCATTER_FCS = 256, + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 512, + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 2048, + IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 4096, +}; + +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB = 0, + IB_UVERBS_GID_TYPE_ROCE_V1 = 1, + IB_UVERBS_GID_TYPE_ROCE_V2 = 2, +}; + +enum ib_poll_context { + IB_POLL_SOFTIRQ = 0, + IB_POLL_WORKQUEUE = 1, + IB_POLL_UNBOUND_WORKQUEUE = 2, + IB_POLL_LAST_POOL_TYPE = 2, + IB_POLL_DIRECT = 3, +}; + +struct ddebug_table { + struct list_head link; + struct list_head maps; + const char *mod_name; + unsigned int num_ddebugs; + struct _ddebug *ddebugs; +}; + +struct ddebug_query { + const char *filename; + const char *module; + const char *function; + const char *format; + const char *class_string; + unsigned int first_lineno; + unsigned int last_lineno; +}; + +struct ddebug_iter { + struct ddebug_table *table; + int idx; +}; + +struct flag_settings { + unsigned int flags; + unsigned int mask; +}; + +struct flagsbuf { + char buf[8]; +}; + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; + long unsigned int acc; + unsigned int flags; +}; + +struct timer_rand_state { + long unsigned int last_time; + long int last_delta; + long int last_delta2; +}; + +enum chacha_constants { + CHACHA_CONSTANT_EXPA = 1634760805, + CHACHA_CONSTANT_ND_3 = 857760878, + CHACHA_CONSTANT_2_BY = 2036477234, + CHACHA_CONSTANT_TE_K = 1797285236, +}; + +enum blake2s_iv { + BLAKE2S_IV0 = 1779033703, + BLAKE2S_IV1 = 3144134277, + BLAKE2S_IV2 = 1013904242, + BLAKE2S_IV3 = 2773480762, + BLAKE2S_IV4 = 1359893119, + BLAKE2S_IV5 = 2600822924, + BLAKE2S_IV6 = 528734635, + BLAKE2S_IV7 = 1541459225, +}; + +enum { + CRNG_EMPTY = 0, + CRNG_EARLY = 1, + CRNG_READY = 2, +}; + +enum { + CRNG_RESEED_START_INTERVAL = 1000, + CRNG_RESEED_INTERVAL = 60000, +}; + +struct crng { + u8 key[32]; + long unsigned int generation; + local_lock_t lock; +}; + +struct batch_u8 { + u8 entropy[96]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u16 { + u16 entropy[48]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u32 { + u32 entropy[24]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +struct batch_u64 { + u64 entropy[12]; + local_lock_t lock; + long unsigned int generation; + unsigned int position; +}; + +enum { + POOL_BITS = 256, + POOL_READY_BITS = 256, + POOL_EARLY_BITS = 128, +}; + +struct fast_pool { + long unsigned int pool[4]; + long unsigned int last; + unsigned int count; + struct timer_list mix; +}; + +struct entropy_timer_state { + long unsigned int entropy; + struct timer_list timer; + atomic_t samples; + unsigned int samples_per_bit; +}; + +enum { + NUM_TRIAL_SAMPLES = 8192, + MAX_SAMPLES_PER_BIT = 66, +}; + +enum { + MIX_INFLIGHT = 2147483648, +}; + +struct scsi_event { + enum scsi_device_event evt_type; + struct list_head node; +}; + +struct iommu_fault_page_request { + u32 flags; + u32 pasid; + u32 grpid; + u32 perm; + u64 addr; + u64 private_data[2]; +}; + +struct iommu_fault { + u32 type; + long: 32; + struct iommu_fault_page_request prm; +}; + +struct iopf_fault { + struct iommu_fault fault; + struct list_head list; +}; + +struct iommu_attach_handle; + +struct iommu_fault_param; + +struct iopf_group { + struct iopf_fault last_fault; + struct list_head faults; + size_t fault_count; + struct list_head pending_node; + struct work_struct work; + struct iommu_attach_handle *attach_handle; + struct iommu_fault_param *fault_param; + struct list_head node; + u32 cookie; +}; + +struct iommu_fault_param {}; + +struct iommu_domain; + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); + +struct iommu_domain_ops; + +struct iommu_domain_geometry { + dma_addr_t aperture_start; + dma_addr_t aperture_end; + bool force_aperture; +}; + +struct iommu_dma_cookie; + +struct iommu_dirty_ops; + +struct iommu_ops; + +struct iommu_domain { + unsigned int type; + const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + const struct iommu_ops *owner; + long unsigned int pgsize_bitmap; + struct iommu_domain_geometry geometry; + struct iommu_dma_cookie *iova_cookie; + int (*iopf_handler)(struct iopf_group *); + void *fault_data; + union { + struct { + iommu_fault_handler_t handler; + void *handler_token; + }; + struct { + struct mm_struct *mm; + int users; + struct list_head next; + }; + }; +}; + +struct iommu_dirty_ops {}; + +struct iommu_ops {}; + +enum xhci_overhead_type { + LS_OVERHEAD_TYPE = 0, + FS_OVERHEAD_TYPE = 1, + HS_OVERHEAD_TYPE = 2, +}; + +typedef __u16 bitmap_counter_t; + +enum bitmap_state { + BITMAP_STALE = 1, + BITMAP_WRITE_ERROR = 2, + BITMAP_HOSTENDIAN = 15, +}; + +struct bitmap_super_s { + __le32 magic; + __le32 version; + __u8 uuid[16]; + __le64 events; + __le64 events_cleared; + __le64 sync_size; + __le32 state; + __le32 chunksize; + __le32 daemon_sleep; + __le32 write_behind; + __le32 sectors_reserved; + __le32 nodes; + __u8 cluster_name[64]; + __u8 pad[120]; +}; + +typedef struct bitmap_super_s bitmap_super_t; + +struct bitmap_page { + char *map; + unsigned int hijacked: 1; + unsigned int pending: 1; + unsigned int count: 30; +}; + +struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + long unsigned int pages; + long unsigned int missing_pages; + long unsigned int chunkshift; + long unsigned int chunks; +}; + +struct bitmap_storage { + struct file *file; + struct page *sb_page; + long unsigned int sb_index; + struct page **filemap; + long unsigned int *filemap_attr; + long unsigned int file_pages; + long unsigned int bytes; +}; + +struct bitmap { + struct bitmap_counts counts; + struct mddev *mddev; + long: 32; + __u64 events_cleared; + int need_sync; + struct bitmap_storage storage; + long unsigned int flags; + int allclean; + atomic_t behind_writes; + long unsigned int behind_writes_used; + long unsigned int daemon_lastrun; + long unsigned int last_end_sync; + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + struct kernfs_node *sysfs_can_clear; + int cluster_slot; +}; + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, + BITMAP_PAGE_PENDING = 1, + BITMAP_PAGE_NEEDWRITE = 2, +}; + +struct bitmap_unplug_work { + struct work_struct work; + struct bitmap *bitmap; + struct completion *done; +}; + +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + __u32 rx_compressed; + __u32 tx_compressed; + __u32 rx_nohandler; +}; + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; + long: 32; +}; + +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC = 0, + IFLA_PROTO_DOWN_REASON_MASK = 1, + IFLA_PROTO_DOWN_REASON_VALUE = 2, + __IFLA_PROTO_DOWN_REASON_CNT = 3, + IFLA_PROTO_DOWN_REASON_MAX = 2, +}; + +enum { + IFLA_INFO_UNSPEC = 0, + IFLA_INFO_KIND = 1, + IFLA_INFO_DATA = 2, + IFLA_INFO_XSTATS = 3, + IFLA_INFO_SLAVE_KIND = 4, + IFLA_INFO_SLAVE_DATA = 5, + __IFLA_INFO_MAX = 6, +}; + +enum { + IFLA_VF_INFO_UNSPEC = 0, + IFLA_VF_INFO = 1, + __IFLA_VF_INFO_MAX = 2, +}; + +enum { + IFLA_VF_UNSPEC = 0, + IFLA_VF_MAC = 1, + IFLA_VF_VLAN = 2, + IFLA_VF_TX_RATE = 3, + IFLA_VF_SPOOFCHK = 4, + IFLA_VF_LINK_STATE = 5, + IFLA_VF_RATE = 6, + IFLA_VF_RSS_QUERY_EN = 7, + IFLA_VF_STATS = 8, + IFLA_VF_TRUST = 9, + IFLA_VF_IB_NODE_GUID = 10, + IFLA_VF_IB_PORT_GUID = 11, + IFLA_VF_VLAN_LIST = 12, + IFLA_VF_BROADCAST = 13, + __IFLA_VF_MAX = 14, +}; + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC = 0, + IFLA_VF_VLAN_INFO = 1, + __IFLA_VF_VLAN_INFO_MAX = 2, +}; + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_STATS_RX_PACKETS = 0, + IFLA_VF_STATS_TX_PACKETS = 1, + IFLA_VF_STATS_RX_BYTES = 2, + IFLA_VF_STATS_TX_BYTES = 3, + IFLA_VF_STATS_BROADCAST = 4, + IFLA_VF_STATS_MULTICAST = 5, + IFLA_VF_STATS_PAD = 6, + IFLA_VF_STATS_RX_DROPPED = 7, + IFLA_VF_STATS_TX_DROPPED = 8, + __IFLA_VF_STATS_MAX = 9, +}; + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_PORT_UNSPEC = 0, + IFLA_VF_PORT = 1, + __IFLA_VF_PORT_MAX = 2, +}; + +enum { + IFLA_PORT_UNSPEC = 0, + IFLA_PORT_VF = 1, + IFLA_PORT_PROFILE = 2, + IFLA_PORT_VSI_TYPE = 3, + IFLA_PORT_INSTANCE_UUID = 4, + IFLA_PORT_HOST_UUID = 5, + IFLA_PORT_REQUEST = 6, + IFLA_PORT_RESPONSE = 7, + __IFLA_PORT_MAX = 8, +}; + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +enum { + IFLA_STATS_GETSET_UNSPEC = 0, + IFLA_STATS_GET_FILTERS = 1, + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 2, + __IFLA_STATS_GETSET_MAX = 3, +}; + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 2, + IFLA_OFFLOAD_XSTATS_L3_STATS = 3, + __IFLA_OFFLOAD_XSTATS_MAX = 4, +}; + +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 1, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 2, + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX = 3, +}; + +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV = 1, + XDP_ATTACHED_SKB = 2, + XDP_ATTACHED_HW = 3, + XDP_ATTACHED_MULTI = 4, +}; + +enum { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +}; + +enum { + IFLA_EVENT_NONE = 0, + IFLA_EVENT_REBOOT = 1, + IFLA_EVENT_FEATURES = 2, + IFLA_EVENT_BONDING_FAILOVER = 3, + IFLA_EVENT_NOTIFY_PEERS = 4, + IFLA_EVENT_IGMP_RESEND = 5, + IFLA_EVENT_BONDING_OPTIONS = 6, +}; + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +enum rtnl_kinds { + RTNL_KIND_NEW = 0, + RTNL_KIND_DEL = 1, + RTNL_KIND_GET = 2, + RTNL_KIND_SET = 3, +}; + +struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + struct module *owner; + unsigned int flags; + struct callback_head rcu; +}; + +struct rtnl_nets { + struct net *net[3]; + unsigned char len; +}; + +struct rtnl_newlink_tbs { + struct nlattr *tb[67]; + struct nlattr *linkinfo[6]; + struct nlattr *attr[51]; + struct nlattr *slave_attr[45]; +}; + +struct rtnl_offload_xstats_request_used { + bool request; + bool used; +}; + +struct rtnl_stats_dump_filters { + u32 mask[6]; +}; + +struct rtnl_mdb_dump_ctx { + long int idx; +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_keee eee; +}; + +struct ct_iter_state { + struct seq_net_private p; + struct hlist_nulls_head *hash; + unsigned int htable_size; + unsigned int bucket; + u_int64_t time_now; +}; + +enum nf_ct_sysctl_index { + NF_SYSCTL_CT_MAX = 0, + NF_SYSCTL_CT_COUNT = 1, + NF_SYSCTL_CT_BUCKETS = 2, + NF_SYSCTL_CT_CHECKSUM = 3, + NF_SYSCTL_CT_LOG_INVALID = 4, + NF_SYSCTL_CT_EXPECT_MAX = 5, + NF_SYSCTL_CT_ACCT = 6, + NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC = 7, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT = 8, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV = 9, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED = 10, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT = 11, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT = 12, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK = 13, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT = 14, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE = 15, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS = 16, + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK = 17, + NF_SYSCTL_CT_PROTO_TCP_LOOSE = 18, + NF_SYSCTL_CT_PROTO_TCP_LIBERAL = 19, + NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST = 20, + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS = 21, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP = 22, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM = 23, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP = 24, + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6 = 25, + NF_SYSCTL_CT_LAST_SYSCTL = 26, +}; + +enum nft_payload_csum_types { + NFT_PAYLOAD_CSUM_NONE = 0, + NFT_PAYLOAD_CSUM_INET = 1, + NFT_PAYLOAD_CSUM_SCTP = 2, +}; + +enum nft_payload_csum_flags { + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 1, +}; + +enum nft_payload_attributes { + NFTA_PAYLOAD_UNSPEC = 0, + NFTA_PAYLOAD_DREG = 1, + NFTA_PAYLOAD_BASE = 2, + NFTA_PAYLOAD_OFFSET = 3, + NFTA_PAYLOAD_LEN = 4, + NFTA_PAYLOAD_SREG = 5, + NFTA_PAYLOAD_CSUM_TYPE = 6, + NFTA_PAYLOAD_CSUM_OFFSET = 7, + NFTA_PAYLOAD_CSUM_FLAGS = 8, + __NFTA_PAYLOAD_MAX = 9, +}; + +struct gre_full_hdr { + struct gre_base_hdr fixed_header; + __be16 csum; + __be16 reserved1; + __be32 key; + __be32 seq; +}; + +struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +}; + +struct nft_payload_set { + enum nft_payload_bases base: 8; + u8 offset; + u8 len; + u8 sreg; + u8 csum_type; + u8 csum_offset; + u8 csum_flags; +}; + +struct nft_payload_vlan_hdr { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; +}; + +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = 1, + CA_ACK_WIN_UPDATE = 2, + CA_ACK_ECE = 4, +}; + +struct tcp_sacktag_state { + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; + long: 32; +}; + +struct icmp6_filter { + __u32 data[8]; +}; + +struct raw6_sock { + struct inet_sock inet; + __u32 checksum; + __u32 offset; + struct icmp6_filter filter; + __u32 ip6mr_table; + struct ipv6_pinfo inet6; +}; + +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __sum16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; + __u8 resv: 4; + __u8 suppress: 1; + __u8 qrv: 3; + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +enum { + PIM_TYPE_HELLO = 0, + PIM_TYPE_REGISTER = 1, + PIM_TYPE_REGISTER_STOP = 2, + PIM_TYPE_JOIN_PRUNE = 3, + PIM_TYPE_BOOTSTRAP = 4, + PIM_TYPE_ASSERT = 5, + PIM_TYPE_GRAFT = 6, + PIM_TYPE_GRAFT_ACK = 7, + PIM_TYPE_CANDIDATE_RP_ADV = 8, +}; + +struct pimhdr { + __u8 type; + __u8 reserved; + __be16 csum; +}; + +enum { + BRIDGE_QUERIER_UNSPEC = 0, + BRIDGE_QUERIER_IP_ADDRESS = 1, + BRIDGE_QUERIER_IP_PORT = 2, + BRIDGE_QUERIER_IP_OTHER_TIMER = 3, + BRIDGE_QUERIER_PAD = 4, + BRIDGE_QUERIER_IPV6_ADDRESS = 5, + BRIDGE_QUERIER_IPV6_PORT = 6, + BRIDGE_QUERIER_IPV6_OTHER_TIMER = 7, + __BRIDGE_QUERIER_MAX = 8, +}; + +struct br_ip_list { + struct list_head list; + struct br_ip addr; +}; + +enum { + kExit = 0, + kSetAndTest = 1, + kTest = 2, +}; + +struct dbdma_regs { + unsigned int control; + unsigned int status; + unsigned int cmdptr_hi; + unsigned int cmdptr; + unsigned int intr_sel; + unsigned int br_sel; + unsigned int wait_sel; + unsigned int xfer_mode; + unsigned int data2ptr_hi; + unsigned int data2ptr; + unsigned int res1; + unsigned int address_hi; + unsigned int br_addr_hi; + unsigned int res2[3]; +}; + +typedef long int (*feature_call)(struct device_node *, long int, long int); + +struct feature_table_entry { + unsigned int selector; + feature_call function; +}; + +struct pmac_mb_def { + const char *model_string; + const char *model_name; + int model_id; + struct feature_table_entry *features; + long unsigned int board_flags; +}; + +struct slot_names_prop { + int count; + char name[1]; +}; + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +struct trace_event_raw_rpm_internal { + struct trace_entry ent; + u32 __data_loc_name; + int flags; + int usage_count; + int disable_depth; + int runtime_auto; + int request_pending; + int irq_safe; + int child_count; + char __data[0]; +}; + +struct trace_event_raw_rpm_return_int { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int ip; + int ret; + char __data[0]; +}; + +struct trace_event_raw_rpm_status { + struct trace_entry ent; + u32 __data_loc_name; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_rpm_internal { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_return_int { + u32 name; + const void *name_ptr_; +}; + +struct trace_event_data_offsets_rpm_status { + u32 name; + const void *name_ptr_; +}; + +typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_return_int)(void *, struct device *, long unsigned int, int); + +typedef void (*btf_trace_rpm_status)(void *, struct device *, enum rpm_status); + +struct bpf_bloom_filter { + struct bpf_map map; + u32 bitset_mask; + u32 hash_seed; + u32 nr_hash_funcs; + long unsigned int bitset[0]; + long: 32; +}; + +struct pcpu_group_info { + int nr_units; + long unsigned int base_offset; + unsigned int *cpu_map; +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[0]; +}; + +typedef int pcpu_fc_cpu_to_node_fn_t(int); + +typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); + +struct trace_event_raw_percpu_alloc_percpu { + struct trace_entry ent; + long unsigned int call_site; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + void *base_addr; + int off; + void *ptr; + size_t bytes_alloc; + long unsigned int gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_percpu_free_percpu { + struct trace_entry ent; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu_fail { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + char __data[0]; +}; + +struct trace_event_raw_percpu_create_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_raw_percpu_destroy_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_data_offsets_percpu_alloc_percpu {}; + +struct trace_event_data_offsets_percpu_free_percpu {}; + +struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; + +struct trace_event_data_offsets_percpu_create_chunk {}; + +struct trace_event_data_offsets_percpu_destroy_chunk {}; + +typedef void (*btf_trace_percpu_alloc_percpu)(void *, long unsigned int, bool, bool, size_t, size_t, void *, int, void *, size_t, gfp_t); + +typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *); + +typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); + +typedef void (*btf_trace_percpu_create_chunk)(void *, void *); + +typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); + +struct pcpu_block_md { + int scan_hint; + int scan_hint_start; + int contig_hint; + int contig_hint_start; + int left_free; + int right_free; + int first_free; + int nr_bits; +}; + +struct pcpuobj_ext { + struct obj_cgroup *cgroup; +}; + +struct pcpu_chunk { + struct list_head list; + int free_bytes; + struct pcpu_block_md chunk_md; + long unsigned int *bound_map; + long: 32; + long: 32; + long: 32; + long: 32; + void *base_addr; + long unsigned int *alloc_map; + struct pcpu_block_md *md_blocks; + void *data; + bool immutable; + bool isolated; + int start_offset; + int end_offset; + struct pcpuobj_ext *obj_exts; + int nr_pages; + int nr_populated; + int nr_empty_pop_pages; + long unsigned int populated[0]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct lruvec_stats_percpu { + long int state[30]; + long int state_prev[30]; +}; + +struct lruvec_stats { + long int state[30]; + long int state_local[30]; + long int state_pending[30]; +}; + +struct memcg_vmstats { + long int state[37]; + long unsigned int events[20]; + long int state_local[37]; + long unsigned int events_local[20]; + long int state_pending[37]; + long unsigned int events_pending[20]; + long: 32; + atomic64_t stats_updates; +}; + +struct memcg_vmstats_percpu { + unsigned int stats_updates; + struct memcg_vmstats_percpu *parent; + struct memcg_vmstats *vmstats; + long int state[37]; + long unsigned int events[20]; + long int state_prev[37]; + long unsigned int events_prev[20]; + long: 32; + long: 32; + long: 32; +}; + +struct trace_event_raw_memcg_rstat_stats { + struct trace_entry ent; + u64 id; + int item; + int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_rstat_events { + struct trace_entry ent; + u64 id; + int item; + long unsigned int val; + char __data[0]; +}; + +struct trace_event_raw_memcg_flush_stats { + struct trace_entry ent; + u64 id; + s64 stats_updates; + bool force; + bool needs_flush; + char __data[0]; + long: 32; +}; + +struct trace_event_data_offsets_memcg_rstat_stats {}; + +struct trace_event_data_offsets_memcg_rstat_events {}; + +struct trace_event_data_offsets_memcg_flush_stats {}; + +typedef void (*btf_trace_mod_memcg_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_mod_memcg_lruvec_state)(void *, struct mem_cgroup *, int, int); + +typedef void (*btf_trace_count_memcg_events)(void *, struct mem_cgroup *, int, long unsigned int); + +typedef void (*btf_trace_memcg_flush_stats)(void *, struct mem_cgroup *, s64, bool, bool); + +struct memory_stat { + const char *name; + unsigned int idx; +}; + +struct memcg_stock_pcp { + local_lock_t stock_lock; + struct mem_cgroup *cached; + unsigned int nr_pages; + struct obj_cgroup *cached_objcg; + struct pglist_data *cached_pgdat; + unsigned int nr_bytes; + int nr_slab_reclaimable_b; + int nr_slab_unreclaimable_b; + struct work_struct work; + long unsigned int flags; +}; + +struct aggregate_control { + long int *aggregate; + long int *local; + long int *pending; + long int *ppending; + long int *cstat; + long int *cstat_prev; + int size; +}; + +enum { + MEMORY_RECLAIM_SWAPPINESS = 0, + MEMORY_RECLAIM_NULL = 1, +}; + +struct uncharge_gather { + struct mem_cgroup *memcg; + long unsigned int nr_memory; + long unsigned int pgpgout; + long unsigned int nr_kmem; + int nid; +}; + +enum handle_to_path_flags { + HANDLE_CHECK_PERMS = 1, + HANDLE_CHECK_SUBTREE = 2, +}; + +struct handle_to_path_ctx { + struct path root; + enum handle_to_path_flags flags; + unsigned int fh_flags; +}; + +struct ext4_xattr_header { + __le32 h_magic; + __le32 h_refcount; + __le32 h_blocks; + __le32 h_hash; + __le32 h_checksum; + __u32 h_reserved[3]; +}; + +struct ext4_xattr_entry { + __u8 e_name_len; + __u8 e_name_index; + __le16 e_value_offs; + __le32 e_value_inum; + __le32 e_value_size; + __le32 e_hash; + char e_name[0]; +}; + +struct ext4_xattr_info { + const char *name; + const void *value; + size_t value_len; + int name_index; + int in_inode; +}; + +struct ext4_xattr_search { + struct ext4_xattr_entry *first; + void *base; + void *end; + struct ext4_xattr_entry *here; + int not_found; +}; + +struct ext4_xattr_ibody_find { + struct ext4_xattr_search s; + struct ext4_iloc iloc; +}; + +struct ext4_xattr_block_find { + struct ext4_xattr_search s; + struct buffer_head *bh; +}; + +enum { + AUTOFS_IOC_READY_CMD = 96, + AUTOFS_IOC_FAIL_CMD = 97, + AUTOFS_IOC_CATATONIC_CMD = 98, + AUTOFS_IOC_PROTOVER_CMD = 99, + AUTOFS_IOC_SETTIMEOUT_CMD = 100, + AUTOFS_IOC_EXPIRE_CMD = 101, +}; + +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 102, + AUTOFS_IOC_PROTOSUBVER_CMD = 103, + AUTOFS_IOC_ASKUMOUNT_CMD = 112, +}; + +struct btrfs_ioctl_qgroup_limit_args { + __u64 qgroupid; + struct btrfs_qgroup_limit lim; +}; + +struct btrfs_ioctl_vol_args_v2 { + __s64 fd; + __u64 transid; + __u64 flags; + union { + struct { + __u64 size; + struct btrfs_qgroup_inherit *qgroup_inherit; + long: 32; + }; + __u64 unused[4]; + }; + union { + char name[4040]; + __u64 devid; + __u64 subvolid; + }; +}; + +struct btrfs_ioctl_scrub_args { + __u64 devid; + __u64 start; + __u64 end; + __u64 flags; + struct btrfs_scrub_progress progress; + __u64 unused[109]; +}; + +struct btrfs_ioctl_dev_info_args { + __u64 devid; + __u8 uuid[16]; + __u64 bytes_used; + __u64 total_bytes; + __u8 fsid[16]; + __u64 unused[377]; + __u8 path[1024]; +}; + +struct btrfs_ioctl_fs_info_args { + __u64 max_id; + __u64 num_devices; + __u8 fsid[16]; + __u32 nodesize; + __u32 sectorsize; + __u32 clone_alignment; + __u16 csum_type; + __u16 csum_size; + __u64 flags; + __u64 generation; + __u8 metadata_uuid[16]; + __u8 reserved[944]; +}; + +struct btrfs_ioctl_feature_flags { + __u64 compat_flags; + __u64 compat_ro_flags; + __u64 incompat_flags; +}; + +struct btrfs_ioctl_ino_lookup_args { + __u64 treeid; + __u64 objectid; + char name[4080]; +}; + +struct btrfs_ioctl_ino_lookup_user_args { + __u64 dirid; + __u64 treeid; + char name[256]; + char path[3824]; +}; + +struct btrfs_ioctl_search_key { + __u64 tree_id; + __u64 min_objectid; + __u64 max_objectid; + __u64 min_offset; + __u64 max_offset; + __u64 min_transid; + __u64 max_transid; + __u32 min_type; + __u32 max_type; + __u32 nr_items; + __u32 unused; + __u64 unused1; + __u64 unused2; + __u64 unused3; + __u64 unused4; +}; + +struct btrfs_ioctl_search_header { + __u64 transid; + __u64 objectid; + __u64 offset; + __u32 type; + __u32 len; +}; + +struct btrfs_ioctl_search_args { + struct btrfs_ioctl_search_key key; + char buf[3992]; +}; + +struct btrfs_ioctl_search_args_v2 { + struct btrfs_ioctl_search_key key; + __u64 buf_size; + __u64 buf[0]; +}; + +struct btrfs_ioctl_space_info { + __u64 flags; + __u64 total_bytes; + __u64 used_bytes; +}; + +struct btrfs_ioctl_space_args { + __u64 space_slots; + __u64 total_spaces; + struct btrfs_ioctl_space_info spaces[0]; +}; + +struct btrfs_ioctl_ino_path_args { + __u64 inum; + __u64 size; + __u64 reserved[4]; + __u64 fspath; +}; + +struct btrfs_ioctl_logical_ino_args { + __u64 logical; + __u64 size; + __u64 reserved[3]; + __u64 flags; + __u64 inodes; +}; + +struct btrfs_ioctl_quota_rescan_args { + __u64 flags; + __u64 progress; + __u64 reserved[6]; +}; + +struct btrfs_ioctl_qgroup_assign_args { + __u64 assign; + __u64 src; + __u64 dst; +}; + +struct btrfs_ioctl_qgroup_create_args { + __u64 create; + __u64 qgroupid; +}; + +struct btrfs_ioctl_timespec { + __u64 sec; + __u32 nsec; + long: 32; +}; + +struct btrfs_ioctl_received_subvol_args { + char uuid[16]; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 flags; + __u64 reserved[16]; +}; + +struct btrfs_ioctl_send_args { + __s64 send_fd; + __u64 clone_sources_count; + __u64 *clone_sources; + long: 32; + __u64 parent_root; + __u64 flags; + __u32 version; + __u8 reserved[28]; +}; + +struct btrfs_ioctl_get_subvol_info_args { + __u64 treeid; + char name[256]; + __u64 parent_id; + __u64 dirid; + __u64 generation; + __u64 flags; + __u8 uuid[16]; + __u8 parent_uuid[16]; + __u8 received_uuid[16]; + __u64 ctransid; + __u64 otransid; + __u64 stransid; + __u64 rtransid; + struct btrfs_ioctl_timespec ctime; + struct btrfs_ioctl_timespec otime; + struct btrfs_ioctl_timespec stime; + struct btrfs_ioctl_timespec rtime; + __u64 reserved[8]; +}; + +struct btrfs_ioctl_get_subvol_rootref_args { + __u64 min_treeid; + struct { + __u64 treeid; + __u64 dirid; + } rootref[255]; + __u8 num_items; + __u8 align[7]; +}; + +struct btrfs_ioctl_subvol_wait { + __u64 subvolid; + __u32 mode; + __u32 count; +}; + +struct btrfs_uring_priv { + struct io_uring_cmd *cmd; + struct page **pages; + long unsigned int nr_pages; + long: 32; + struct kiocb iocb; + struct iovec *iov; + long: 32; + struct iov_iter iter; + struct extent_state *cached_state; + long: 32; + u64 count; + u64 start; + u64 lockend; + int err; + bool compressed; +}; + +struct io_btrfs_cmd { + struct btrfs_uring_priv *priv; +}; + +enum blake2b_lengths { + BLAKE2B_BLOCK_SIZE = 128, + BLAKE2B_HASH_SIZE = 64, + BLAKE2B_KEY_SIZE = 64, + BLAKE2B_160_HASH_SIZE = 20, + BLAKE2B_256_HASH_SIZE = 32, + BLAKE2B_384_HASH_SIZE = 48, + BLAKE2B_512_HASH_SIZE = 64, +}; + +struct blake2b_state { + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[128]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2b_iv { + BLAKE2B_IV0 = 7640891576956012808ULL, + BLAKE2B_IV1 = 13503953896175478587ULL, + BLAKE2B_IV2 = 4354685564936845355ULL, + BLAKE2B_IV3 = 11912009170470909681ULL, + BLAKE2B_IV4 = 5840696475078001361ULL, + BLAKE2B_IV5 = 11170449401992604703ULL, + BLAKE2B_IV6 = 2270897969802886507ULL, + BLAKE2B_IV7 = 6620516959819538809ULL, +}; + +typedef void (*blake2b_compress_t)(struct blake2b_state *, const u8 *, size_t, u32); + +struct blake2b_tfm_ctx { + u8 key[64]; + unsigned int keylen; +}; + +typedef unsigned char u8___2; + +struct rand_data { + void *hash_state; + long: 32; + __u64 prev_time; + __u64 last_delta; + __s64 last_delta2; + unsigned int flags; + unsigned int osr; + unsigned char *mem; + unsigned int memlocation; + unsigned int memblocks; + unsigned int memblocksize; + unsigned int memaccessloops; + unsigned int rct_count; + unsigned int apt_cutoff; + unsigned int apt_cutoff_permanent; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + unsigned int health_failure; + unsigned int apt_base_set: 1; + long: 32; +}; + +struct disk_events { + struct list_head node; + struct gendisk *disk; + spinlock_t lock; + struct mutex block_mutex; + int block; + unsigned int pending; + unsigned int clearing; + long int poll_msecs; + struct delayed_work dwork; +}; + +struct io_waitid_async { + struct io_kiocb *req; + struct wait_opts wo; +}; + +struct io_waitid { + struct file *file; + int which; + pid_t upid; + int options; + atomic_t refs; + struct wait_queue_head *head; + struct siginfo *infop; + struct waitid_info info; +}; + +struct strarray { + char **array; + size_t n; +}; + +typedef enum { + HEAD = 0, + FLAGS = 1, + TIME = 2, + OS = 3, + EXLEN = 4, + EXTRA = 5, + NAME = 6, + COMMENT = 7, + HCRC = 8, + DICTID = 9, + DICT = 10, + TYPE = 11, + TYPEDO = 12, + STORED = 13, + COPY = 14, + TABLE = 15, + LENLENS = 16, + CODELENS = 17, + LEN = 18, + LENEXT = 19, + DIST = 20, + DISTEXT = 21, + MATCH = 22, + LIT = 23, + CHECK = 24, + LENGTH = 25, + DONE = 26, + BAD = 27, + MEM = 28, + SYNC = 29, +} inflate_mode; + +struct inflate_state { + inflate_mode mode; + int last; + int wrap; + int havedict; + int flags; + unsigned int dmax; + long unsigned int check; + long unsigned int total; + unsigned int wbits; + unsigned int wsize; + unsigned int whave; + unsigned int write; + unsigned char *window; + long unsigned int hold; + unsigned int bits; + unsigned int length; + unsigned int offset; + unsigned int extra; + const code *lencode; + const code *distcode; + unsigned int lenbits; + unsigned int distbits; + unsigned int ncode; + unsigned int nlen; + unsigned int ndist; + unsigned int have; + code *next; + short unsigned int lens[320]; + short unsigned int work[288]; + code codes[2048]; +}; + +union uu { + short unsigned int us; + unsigned char b[2]; +}; + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +typedef struct { + U32 fastMode; + U32 tableLog; +} ZSTD_seqSymbol_header; + +typedef enum { + not_streaming = 0, + is_streaming = 1, +} streaming_operation; + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; +} seq_t; + +typedef struct { + size_t state; + const ZSTD_seqSymbol *table; +} ZSTD_fseState; + +typedef struct { + BIT_DStream_t DStream; + ZSTD_fseState stateLL; + ZSTD_fseState stateOffb; + ZSTD_fseState stateML; + size_t prevOffset[3]; +} seqState_t; + +typedef enum { + ZSTD_lo_isRegularOffset = 0, + ZSTD_lo_isLongOffset = 1, +} ZSTD_longOffset_e; + +struct font_data { + unsigned int extra[4]; + const unsigned char data[0]; +}; + +struct scsi_varlen_cdb_hdr { + __u8 opcode; + __u8 control; + __u8 misc[5]; + __u8 additional_cdb_length; + __be16 service_action; +}; + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +typedef struct thermal_zone_device *class_thermal_zone_reverse_t; + +struct trace_event_raw_thermal_temperature { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int temp_prev; + int temp; + char __data[0]; +}; + +struct trace_event_raw_cdev_update { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int target; + char __data[0]; +}; + +struct trace_event_raw_thermal_zone_trip { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int trip; + enum thermal_trip_type trip_type; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_temperature { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +struct trace_event_data_offsets_cdev_update { + u32 type; + const void *type_ptr_; +}; + +struct trace_event_data_offsets_thermal_zone_trip { + u32 thermal_zone; + const void *thermal_zone_ptr_; +}; + +typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); + +typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int); + +typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); + +enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, + __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, +}; + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD = 0, + FLOW_DISSECT_RET_OUT_BAD = 1, + FLOW_DISSECT_RET_PROTO_AGAIN = 2, + FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, + FLOW_DISSECT_RET_CONTINUE = 4, +}; + +struct flow_dissector_key_hash { + u32 hash; +}; + +struct flow_dissector_key_num_of_vlans { + u8 num_of_vlans; +}; + +struct flow_dissector_key_cfm { + u8 mdl_ver; + u8 opcode; +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; +}; + +struct flow_keys_digest { + u8 data[16]; +}; + +enum bpf_ret_code { + BPF_OK = 0, + BPF_DROP = 2, + BPF_REDIRECT = 7, + BPF_LWT_REROUTE = 128, + BPF_FLOW_DISSECTOR_CONTINUE = 129, +}; + +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1, + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2, + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4, +}; + +struct pptp_gre_header { + struct gre_base_hdr gre_hd; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; +}; + +struct tipc_basic_hdr { + __be32 w[4]; +}; + +enum dccp_state { + DCCP_OPEN = 1, + DCCP_REQUESTING = 2, + DCCP_LISTEN = 10, + DCCP_RESPOND = 3, + DCCP_ACTIVE_CLOSEREQ = 4, + DCCP_PASSIVE_CLOSE = 8, + DCCP_CLOSING = 11, + DCCP_TIME_WAIT = 6, + DCCP_CLOSED = 7, + DCCP_NEW_SYN_RECV = 12, + DCCP_PARTOPEN = 14, + DCCP_PASSIVE_CLOSEREQ = 15, + DCCP_MAX_STATES = 16, +}; + +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +}; + +struct pppoe_hdr { + __u8 ver: 4; + __u8 type: 4; + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +}; + +struct hsr_tag { + __be16 path_and_LSDU_size; + __be16 sequence_nr; + __be16 encap_proto; +}; + +struct mpls_label { + __be32 entry; +}; + +enum batadv_packettype { + BATADV_IV_OGM = 0, + BATADV_BCAST = 1, + BATADV_CODED = 2, + BATADV_ELP = 3, + BATADV_OGM2 = 4, + BATADV_MCAST = 5, + BATADV_UNICAST = 64, + BATADV_UNICAST_FRAG = 65, + BATADV_UNICAST_4ADDR = 66, + BATADV_ICMP = 67, + BATADV_UNICAST_TVLV = 68, +}; + +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; + __u8 dest[6]; +}; + +struct _flow_keys_digest_data { + __be16 n_proto; + u8 ip_proto; + u8 padding; + __be32 ports; + __be32 src; + __be32 dst; +}; + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + spinlock_t lock; +}; + +typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); + +struct sock_map_seq_info { + struct bpf_map *map; + struct sock *sk; + u32 index; +}; + +struct bpf_iter__sockmap { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + struct sock *sk; + }; +}; + +struct bpf_shtab_elem { + struct callback_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct bpf_shtab_bucket { + struct hlist_head head; + spinlock_t lock; +}; + +struct bpf_shtab { + struct bpf_map map; + struct bpf_shtab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; +}; + +typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); + +struct sock_hash_seq_info { + struct bpf_map *map; + struct bpf_shtab *htab; + u32 bucket_id; +}; + +struct sockmap_link { + struct bpf_link link; + struct bpf_map *map; + enum bpf_attach_type attach_type; +}; + +enum { + ETHTOOL_A_PROFILE_UNSPEC = 0, + ETHTOOL_A_PROFILE_IRQ_MODERATION = 1, + __ETHTOOL_A_PROFILE_CNT = 2, + ETHTOOL_A_PROFILE_MAX = 1, +}; + +enum { + ETHTOOL_A_IRQ_MODERATION_UNSPEC = 0, + ETHTOOL_A_IRQ_MODERATION_USEC = 1, + ETHTOOL_A_IRQ_MODERATION_PKTS = 2, + ETHTOOL_A_IRQ_MODERATION_COMPS = 3, + __ETHTOOL_A_IRQ_MODERATION_CNT = 4, + ETHTOOL_A_IRQ_MODERATION_MAX = 3, +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + struct kernel_ethtool_coalesce kernel_coalesce; + u32 supported_params; +}; + +enum nfqnl_msg_types { + NFQNL_MSG_PACKET = 0, + NFQNL_MSG_VERDICT = 1, + NFQNL_MSG_CONFIG = 2, + NFQNL_MSG_VERDICT_BATCH = 3, + NFQNL_MSG_MAX = 4, +}; + +struct nfqnl_msg_packet_hdr { + __be32 packet_id; + __be16 hw_protocol; + __u8 hook; +} __attribute__((packed)); + +struct nfqnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfqnl_msg_packet_timestamp { + __be64 sec; + __be64 usec; +}; + +enum nfqnl_vlan_attr { + NFQA_VLAN_UNSPEC = 0, + NFQA_VLAN_PROTO = 1, + NFQA_VLAN_TCI = 2, + __NFQA_VLAN_MAX = 3, +}; + +enum nfqnl_attr_type { + NFQA_UNSPEC = 0, + NFQA_PACKET_HDR = 1, + NFQA_VERDICT_HDR = 2, + NFQA_MARK = 3, + NFQA_TIMESTAMP = 4, + NFQA_IFINDEX_INDEV = 5, + NFQA_IFINDEX_OUTDEV = 6, + NFQA_IFINDEX_PHYSINDEV = 7, + NFQA_IFINDEX_PHYSOUTDEV = 8, + NFQA_HWADDR = 9, + NFQA_PAYLOAD = 10, + NFQA_CT = 11, + NFQA_CT_INFO = 12, + NFQA_CAP_LEN = 13, + NFQA_SKB_INFO = 14, + NFQA_EXP = 15, + NFQA_UID = 16, + NFQA_GID = 17, + NFQA_SECCTX = 18, + NFQA_VLAN = 19, + NFQA_L2HDR = 20, + NFQA_PRIORITY = 21, + NFQA_CGROUP_CLASSID = 22, + __NFQA_MAX = 23, +}; + +struct nfqnl_msg_verdict_hdr { + __be32 verdict; + __be32 id; +}; + +enum nfqnl_msg_config_cmds { + NFQNL_CFG_CMD_NONE = 0, + NFQNL_CFG_CMD_BIND = 1, + NFQNL_CFG_CMD_UNBIND = 2, + NFQNL_CFG_CMD_PF_BIND = 3, + NFQNL_CFG_CMD_PF_UNBIND = 4, +}; + +struct nfqnl_msg_config_cmd { + __u8 command; + __u8 _pad; + __be16 pf; +}; + +enum nfqnl_config_mode { + NFQNL_COPY_NONE = 0, + NFQNL_COPY_META = 1, + NFQNL_COPY_PACKET = 2, +}; + +struct nfqnl_msg_config_params { + __be32 copy_range; + __u8 copy_mode; +} __attribute__((packed)); + +enum nfqnl_attr_config { + NFQA_CFG_UNSPEC = 0, + NFQA_CFG_CMD = 1, + NFQA_CFG_PARAMS = 2, + NFQA_CFG_QUEUE_MAXLEN = 3, + NFQA_CFG_MASK = 4, + NFQA_CFG_FLAGS = 5, + __NFQA_CFG_MAX = 6, +}; + +struct nfqnl_instance { + struct hlist_node hlist; + struct callback_head rcu; + u32 peer_portid; + unsigned int queue_maxlen; + unsigned int copy_range; + unsigned int queue_dropped; + unsigned int queue_user_dropped; + u_int16_t queue_num; + u_int8_t copy_mode; + u_int32_t flags; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + spinlock_t lock; + unsigned int queue_total; + unsigned int id_sequence; + struct list_head queue_list; + long: 32; + long: 32; + long: 32; +}; + +typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, long unsigned int); + +struct nfnl_queue_net { + spinlock_t instances_lock; + struct hlist_head instance_table[16]; +}; + +enum nft_inner_type { + NFT_INNER_UNSPEC = 0, + NFT_INNER_VXLAN = 1, + NFT_INNER_GENEVE = 2, +}; + +enum nft_inner_flags { + NFT_INNER_HDRSIZE = 1, + NFT_INNER_LL = 2, + NFT_INNER_NH = 4, + NFT_INNER_TH = 8, +}; + +enum nft_inner_attributes { + NFTA_INNER_UNSPEC = 0, + NFTA_INNER_NUM = 1, + NFTA_INNER_TYPE = 2, + NFTA_INNER_FLAGS = 3, + NFTA_INNER_HDRSIZE = 4, + NFTA_INNER_EXPR = 5, + __NFTA_INNER_MAX = 6, +}; + +struct genevehdr { + u8 ver: 2; + u8 opt_len: 6; + u8 oam: 1; + u8 critical: 1; + u8 rsvd1: 6; + __be16 proto_type; + u8 vni[3]; + u8 rsvd2; + u8 options[0]; +}; + +struct __nft_expr { + const struct nft_expr_ops *ops; + long: 32; + union { + struct nft_payload payload; + struct nft_meta meta; + }; +}; + +enum { + NFT_INNER_EXPR_PAYLOAD = 0, + NFT_INNER_EXPR_META = 1, +}; + +struct nft_inner { + u8 flags; + u8 hdrsize; + u8 type; + u8 expr_type; + long: 32; + struct __nft_expr expr; +}; + +enum { + IFLA_INET_UNSPEC = 0, + IFLA_INET_CONF = 1, + __IFLA_INET_MAX = 2, +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[33]; +}; + +struct fixup_entry { + long unsigned int mask; + long unsigned int value; + long int start_off; + long int end_off; + long int alt_start_off; + long int alt_end_off; +}; + +enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +}; + +enum { + KERNEL_PARAM_OPS_FL_NOARG = 1, +}; + +struct param_attribute { + struct module_attribute mattr; + const struct kernel_param *param; +}; + +struct module_param_attrs { + unsigned int num; + struct attribute_group grp; + struct param_attribute attrs[0]; +}; + +struct kmalloced_param { + struct list_head list; + char val[0]; +}; + +struct futex_waitv { + __u64 val; + __u64 uaddr; + __u32 flags; + __u32 __reserved; +}; + +struct futex_vector { + struct futex_waitv w; + struct futex_q q; +}; + +enum misc_res_type { + MISC_CG_RES_TYPES = 0, +}; + +struct misc_res { + u64 max; + atomic64_t watermark; + atomic64_t usage; + atomic64_t events; + atomic64_t events_local; +}; + +struct misc_cg { + struct cgroup_subsys_state css; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct misc_res res[0]; +}; + +enum { + BPF_F_UPROBE_MULTI_RETURN = 1, +}; + +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = 1, +}; + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; +}; + +struct bpf_key { + struct key *key; + bool has_ref; +}; + +struct perf_event_query_bpf { + __u32 ids_len; + __u32 prog_cnt; + __u32 ids[0]; +}; + +struct trace_event_raw_bpf_trace_printk { + struct trace_entry ent; + u32 __data_loc_bpf_string; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trace_printk { + u32 bpf_string; + const void *bpf_string_ptr_; +}; + +typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); + +struct bpf_trace_module { + struct module *module; + struct list_head list; +}; + +typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32); + +typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_trace_vprintk)(char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); + +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); + +struct bpf_nested_pt_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_get_current_task)(); + +typedef u64 (*btf_bpf_get_current_task_btf)(); + +typedef u64 (*btf_bpf_task_pt_regs)(struct task_struct *); + +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; + enum pid_type type; + bool has_siginfo; + struct kernel_siginfo info; +}; + +typedef u64 (*btf_bpf_send_signal)(u32); + +typedef u64 (*btf_bpf_send_signal_thread)(u32); + +typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); + +typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_get_func_ip_tracing)(void *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_kprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_func_ip_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_uprobe_multi)(struct pt_regs *); + +typedef u64 (*btf_bpf_get_attach_cookie_trace)(void *); + +typedef u64 (*btf_bpf_get_attach_cookie_pe)(struct bpf_perf_event_data_kern *); + +typedef u64 (*btf_bpf_get_attach_cookie_tracing)(void *); + +typedef u64 (*btf_bpf_get_branch_snapshot)(void *, u32, u64); + +typedef u64 (*btf_get_func_arg)(void *, u32, u64 *); + +typedef u64 (*btf_get_func_ret)(void *, u64 *); + +typedef u64 (*btf_get_func_arg_cnt)(void *); + +typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); + +typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); + +typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); + +struct bpf_session_run_ctx { + struct bpf_run_ctx run_ctx; + bool is_return; + void *data; +}; + +struct bpf_uprobe_multi_link; + +struct bpf_uprobe { + struct bpf_uprobe_multi_link *link; + long: 32; + loff_t offset; + long unsigned int ref_ctr_offset; + long: 32; + u64 cookie; + struct uprobe *uprobe; + long: 32; + struct uprobe_consumer consumer; + bool session; + long: 32; +}; + +struct bpf_uprobe_multi_link { + struct path path; + struct bpf_link link; + u32 cnt; + u32 flags; + struct bpf_uprobe *uprobes; + struct task_struct *task; +}; + +struct bpf_uprobe_multi_run_ctx { + struct bpf_session_run_ctx session_ctx; + long unsigned int entry_ip; + struct bpf_uprobe *uprobe; +}; + +struct bpf_cpumask { + cpumask_t cpumask; + refcount_t usage; +}; + +struct trace_event_raw_mm_compaction_isolate_template { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int nr_scanned; + long unsigned int nr_taken; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_migratepages { + struct trace_entry ent; + long unsigned int nr_migrated; + long unsigned int nr_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_begin { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_end { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_try_to_compact_pages { + struct trace_entry ent; + int order; + long unsigned int gfp_mask; + int prio; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_suitable_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_defer_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + unsigned int considered; + unsigned int defer_shift; + int order_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_kcompactd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_kcompactd_wake_template { + struct trace_entry ent; + int nid; + int order; + enum zone_type highest_zoneidx; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_compaction_isolate_template {}; + +struct trace_event_data_offsets_mm_compaction_migratepages {}; + +struct trace_event_data_offsets_mm_compaction_begin {}; + +struct trace_event_data_offsets_mm_compaction_end {}; + +struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; + +struct trace_event_data_offsets_mm_compaction_suitable_template {}; + +struct trace_event_data_offsets_mm_compaction_defer_template {}; + +struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; + +struct trace_event_data_offsets_kcompactd_wake_template {}; + +typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_fast_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_migratepages)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_mm_compaction_begin)(void *, struct compact_control *, long unsigned int, long unsigned int, bool); + +typedef void (*btf_trace_mm_compaction_end)(void *, struct compact_control *, long unsigned int, long unsigned int, bool, int); + +typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); + +typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); + +typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); + +typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); + +typedef enum { + ISOLATE_ABORT = 0, + ISOLATE_NONE = 1, + ISOLATE_SUCCESS = 2, +} isolate_migrate_t; + +struct swap_cgroup_ctrl { + struct page **map; + long unsigned int length; + spinlock_t lock; +}; + +struct swap_cgroup { + short unsigned int id; +}; + +typedef struct { + long unsigned int fds_bits[32]; +} __kernel_fd_set; + +typedef __kernel_fd_set fd_set; + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +struct poll_table_page; + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[18]; +}; + +struct poll_table_page { + struct poll_table_page *next; + struct poll_table_entry *entry; + struct poll_table_entry entries[0]; +}; + +enum poll_time_type { + PT_TIMEVAL = 0, + PT_OLD_TIMEVAL = 1, + PT_TIMESPEC = 2, + PT_OLD_TIMESPEC = 3, +}; + +typedef struct { + long unsigned int *in; + long unsigned int *out; + long unsigned int *ex; + long unsigned int *res_in; + long unsigned int *res_out; + long unsigned int *res_ex; +} fd_set_bits; + +struct sigset_argpack { + sigset_t *p; + size_t size; +}; + +struct sel_arg_struct { + long unsigned int n; + fd_set *inp; + fd_set *outp; + fd_set *exp; + struct __kernel_old_timeval *tvp; +}; + +struct poll_list { + struct poll_list *next; + unsigned int len; + struct pollfd entries[0]; +}; + +struct backing_aio { + struct kiocb iocb; + refcount_t ref; + struct kiocb *orig_iocb; + void (*end_write)(struct kiocb *, ssize_t); + struct work_struct work; + long int res; +}; + +struct fd_data { + fmode_t mode; + unsigned int fd; +}; + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} ext4_acl_entry; + +typedef struct { + __le32 a_version; +} ext4_acl_header; + +struct btrfs_em_shrink_ctx { + long int nr_to_scan; + long int scanned; +}; + +struct posix_acl_xattr_header { + __le32 a_version; +}; + +struct fsverity_descriptor { + __u8 version; + __u8 hash_algorithm; + __u8 log_blocksize; + __u8 salt_size; + __le32 sig_size; + __le64 data_size; + __u8 root_hash[64]; + __u8 salt[32]; + __u8 __reserved[144]; + __u8 signature[0]; +}; + +struct btrfs_stream_header { + char magic[13]; + __le32 version; +} __attribute__((packed)); + +struct btrfs_cmd_header { + __le32 len; + __le16 cmd; + __le32 crc; +} __attribute__((packed)); + +struct btrfs_tlv_header { + __le16 tlv_type; + __le16 tlv_len; +}; + +enum btrfs_send_cmd { + BTRFS_SEND_C_UNSPEC = 0, + BTRFS_SEND_C_SUBVOL = 1, + BTRFS_SEND_C_SNAPSHOT = 2, + BTRFS_SEND_C_MKFILE = 3, + BTRFS_SEND_C_MKDIR = 4, + BTRFS_SEND_C_MKNOD = 5, + BTRFS_SEND_C_MKFIFO = 6, + BTRFS_SEND_C_MKSOCK = 7, + BTRFS_SEND_C_SYMLINK = 8, + BTRFS_SEND_C_RENAME = 9, + BTRFS_SEND_C_LINK = 10, + BTRFS_SEND_C_UNLINK = 11, + BTRFS_SEND_C_RMDIR = 12, + BTRFS_SEND_C_SET_XATTR = 13, + BTRFS_SEND_C_REMOVE_XATTR = 14, + BTRFS_SEND_C_WRITE = 15, + BTRFS_SEND_C_CLONE = 16, + BTRFS_SEND_C_TRUNCATE = 17, + BTRFS_SEND_C_CHMOD = 18, + BTRFS_SEND_C_CHOWN = 19, + BTRFS_SEND_C_UTIMES = 20, + BTRFS_SEND_C_END = 21, + BTRFS_SEND_C_UPDATE_EXTENT = 22, + BTRFS_SEND_C_MAX_V1 = 22, + BTRFS_SEND_C_FALLOCATE = 23, + BTRFS_SEND_C_FILEATTR = 24, + BTRFS_SEND_C_ENCODED_WRITE = 25, + BTRFS_SEND_C_MAX_V2 = 25, + BTRFS_SEND_C_ENABLE_VERITY = 26, + BTRFS_SEND_C_MAX_V3 = 26, + BTRFS_SEND_C_MAX = 26, +}; + +enum { + BTRFS_SEND_A_UNSPEC = 0, + BTRFS_SEND_A_UUID = 1, + BTRFS_SEND_A_CTRANSID = 2, + BTRFS_SEND_A_INO = 3, + BTRFS_SEND_A_SIZE = 4, + BTRFS_SEND_A_MODE = 5, + BTRFS_SEND_A_UID = 6, + BTRFS_SEND_A_GID = 7, + BTRFS_SEND_A_RDEV = 8, + BTRFS_SEND_A_CTIME = 9, + BTRFS_SEND_A_MTIME = 10, + BTRFS_SEND_A_ATIME = 11, + BTRFS_SEND_A_OTIME = 12, + BTRFS_SEND_A_XATTR_NAME = 13, + BTRFS_SEND_A_XATTR_DATA = 14, + BTRFS_SEND_A_PATH = 15, + BTRFS_SEND_A_PATH_TO = 16, + BTRFS_SEND_A_PATH_LINK = 17, + BTRFS_SEND_A_FILE_OFFSET = 18, + BTRFS_SEND_A_DATA = 19, + BTRFS_SEND_A_CLONE_UUID = 20, + BTRFS_SEND_A_CLONE_CTRANSID = 21, + BTRFS_SEND_A_CLONE_PATH = 22, + BTRFS_SEND_A_CLONE_OFFSET = 23, + BTRFS_SEND_A_CLONE_LEN = 24, + BTRFS_SEND_A_MAX_V1 = 24, + BTRFS_SEND_A_FALLOCATE_MODE = 25, + BTRFS_SEND_A_FILEATTR = 26, + BTRFS_SEND_A_UNENCODED_FILE_LEN = 27, + BTRFS_SEND_A_UNENCODED_LEN = 28, + BTRFS_SEND_A_UNENCODED_OFFSET = 29, + BTRFS_SEND_A_COMPRESSION = 30, + BTRFS_SEND_A_ENCRYPTION = 31, + BTRFS_SEND_A_MAX_V2 = 31, + BTRFS_SEND_A_VERITY_ALGORITHM = 32, + BTRFS_SEND_A_VERITY_BLOCK_SIZE = 33, + BTRFS_SEND_A_VERITY_SALT_DATA = 34, + BTRFS_SEND_A_VERITY_SIG_DATA = 35, + BTRFS_SEND_A_MAX_V3 = 35, + __BTRFS_SEND_A_MAX = 35, +}; + +struct fs_path { + union { + struct { + char *start; + char *end; + char *buf; + short unsigned int buf_len: 15; + short unsigned int reversed: 1; + char inline_buf[0]; + }; + char pad[256]; + }; +}; + +struct clone_root { + struct btrfs_root *root; + long: 32; + u64 ino; + u64 offset; + u64 num_bytes; + bool found_ref; + long: 32; +}; + +struct backref_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 root_ids[17]; + int num_roots; + long: 32; +}; + +struct send_ctx { + struct file *send_filp; + long: 32; + loff_t send_off; + char *send_buf; + u32 send_size; + u32 send_max_size; + bool put_data; + struct page **send_buf_pages; + long: 32; + u64 flags; + u32 proto; + struct btrfs_root *send_root; + struct btrfs_root *parent_root; + struct clone_root *clone_roots; + int clone_roots_cnt; + struct btrfs_path *left_path; + struct btrfs_path *right_path; + struct btrfs_key *cmp_key; + u64 last_reloc_trans; + u64 cur_ino; + u64 cur_inode_gen; + u64 cur_inode_size; + u64 cur_inode_mode; + u64 cur_inode_rdev; + u64 cur_inode_last_extent; + u64 cur_inode_next_write_offset; + bool cur_inode_new; + bool cur_inode_new_gen; + bool cur_inode_deleted; + bool ignore_cur_inode; + bool cur_inode_needs_verity; + void *verity_descriptor; + long: 32; + u64 send_progress; + struct list_head new_refs; + struct list_head deleted_refs; + struct btrfs_lru_cache name_cache; + struct inode *cur_inode; + struct file_ra_state ra; + u64 page_cache_clear_start; + bool clean_page_cache; + struct rb_root pending_dir_moves; + struct rb_root waiting_dir_moves; + struct rb_root orphan_dirs; + struct rb_root rbtree_new_refs; + struct rb_root rbtree_deleted_refs; + struct btrfs_lru_cache backref_cache; + long: 32; + u64 backref_cache_last_reloc_trans; + struct btrfs_lru_cache dir_created_cache; + struct btrfs_lru_cache dir_utimes_cache; +}; + +struct pending_dir_move { + struct rb_node node; + struct list_head list; + long: 32; + u64 parent_ino; + u64 ino; + u64 gen; + struct list_head update_refs; +}; + +struct waiting_dir_move { + struct rb_node node; + long: 32; + u64 ino; + u64 rmdir_ino; + u64 rmdir_gen; + bool orphanized; + long: 32; +}; + +struct orphan_dir_info { + struct rb_node node; + long: 32; + u64 ino; + u64 gen; + u64 last_dir_index_offset; + u64 dir_high_seq_ino; +}; + +struct name_cache_entry { + struct btrfs_lru_cache_entry entry; + u64 parent_ino; + u64 parent_gen; + int ret; + int need_later_update; + int name_len; + char name[0]; + long: 32; +}; + +enum btrfs_compare_tree_result { + BTRFS_COMPARE_TREE_NEW = 0, + BTRFS_COMPARE_TREE_DELETED = 1, + BTRFS_COMPARE_TREE_CHANGED = 2, + BTRFS_COMPARE_TREE_SAME = 3, +}; + +struct btrfs_inode_info { + u64 size; + u64 gen; + u64 mode; + u64 uid; + u64 gid; + u64 rdev; + u64 fileattr; + u64 nlink; +}; + +typedef int (*iterate_inode_ref_t)(u64, struct fs_path *, void *); + +typedef int (*iterate_dir_item_t)(int, struct btrfs_key *, const char *, int, const char *, int, void *); + +struct backref_ctx { + struct send_ctx *sctx; + long: 32; + u64 found; + u64 cur_objectid; + u64 cur_offset; + u64 extent_len; + u64 bytenr; + u64 backref_owner; + u64 backref_offset; +}; + +enum inode_state { + inode_state_no_change = 0, + inode_state_will_create = 1, + inode_state_did_create = 2, + inode_state_will_delete = 3, + inode_state_did_delete = 4, +}; + +struct recorded_ref { + struct list_head list; + char *name; + struct fs_path *full_path; + u64 dir; + u64 dir_gen; + int name_len; + struct rb_node node; + struct rb_root *root; + long: 32; +}; + +struct find_xattr_ctx { + const char *name; + int name_len; + int found_idx; + char *found_data; + int found_data_len; +}; + +enum { + REQ_FSEQ_PREFLUSH = 1, + REQ_FSEQ_DATA = 2, + REQ_FSEQ_POSTFLUSH = 4, + REQ_FSEQ_DONE = 8, + REQ_FSEQ_ACTIONS = 7, + FLUSH_PENDING_TIMEOUT = 5000, +}; + +enum io_uring_socket_op { + SOCKET_URING_OP_SIOCINQ = 0, + SOCKET_URING_OP_SIOCOUTQ = 1, + SOCKET_URING_OP_GETSOCKOPT = 2, + SOCKET_URING_OP_SETSOCKOPT = 3, +}; + +struct uring_cache { + struct io_uring_sqe sqes[2]; +}; + +struct inflate_workspace { + struct inflate_state inflate_state; + unsigned char working_window[32768]; +}; + +typedef enum { + ZSTD_d_windowLogMax = 100, + ZSTD_d_experimentalParam1 = 1000, + ZSTD_d_experimentalParam2 = 1001, + ZSTD_d_experimentalParam3 = 1002, + ZSTD_d_experimentalParam4 = 1003, +} ZSTD_dParameter; + +typedef enum { + ZSTDnit_frameHeader = 0, + ZSTDnit_blockHeader = 1, + ZSTDnit_block = 2, + ZSTDnit_lastBlock = 3, + ZSTDnit_checksum = 4, + ZSTDnit_skippableFrame = 5, +} ZSTD_nextInputType_e; + +typedef struct { + size_t compressedSize; + long: 32; + long long unsigned int decompressedBound; +} ZSTD_frameSizeInfo; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +struct clk_lookup_alloc { + struct clk_lookup cl; + char dev_id[24]; + char con_id[16]; +}; + +struct vring_desc { + __virtio64 addr; + __virtio32 len; + __virtio16 flags; + __virtio16 next; +}; + +struct vring_avail { + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[0]; +}; + +struct vring_used_elem { + __virtio32 id; + __virtio32 len; +}; + +typedef struct vring_used_elem vring_used_elem_t; + +struct vring_used { + __virtio16 flags; + __virtio16 idx; + vring_used_elem_t ring[0]; +}; + +typedef struct vring_desc vring_desc_t; + +typedef struct vring_avail vring_avail_t; + +typedef struct vring_used vring_used_t; + +struct vring { + unsigned int num; + vring_desc_t *desc; + vring_avail_t *avail; + vring_used_t *used; +}; + +struct vring_packed_desc_event { + __le16 off_wrap; + __le16 flags; +}; + +struct vring_packed_desc { + __le64 addr; + __le32 len; + __le16 id; + __le16 flags; +}; + +struct vring_desc_state_split { + void *data; + struct vring_desc *indir_desc; +}; + +struct vring_desc_state_packed { + void *data; + struct vring_packed_desc *indir_desc; + u16 num; + u16 last; +}; + +struct vring_desc_extra { + dma_addr_t addr; + u32 len; + u16 flags; + u16 next; +}; + +struct vring_virtqueue_split { + struct vring vring; + u16 avail_flags_shadow; + u16 avail_idx_shadow; + struct vring_desc_state_split *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + u32 vring_align; + bool may_reduce_num; +}; + +struct vring_virtqueue_packed { + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + bool avail_wrap_counter; + u16 avail_used_flags; + u16 next_avail_idx; + u16 event_flags_shadow; + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; +}; + +struct vring_virtqueue { + struct virtqueue vq; + bool packed_ring; + bool use_dma_api; + bool weak_barriers; + bool broken; + bool indirect; + bool event; + unsigned int free_head; + unsigned int num_added; + u16 last_used_idx; + bool event_triggered; + union { + struct vring_virtqueue_split split; + struct vring_virtqueue_packed packed; + }; + bool (*notify)(struct virtqueue *); + bool we_own_ring; + struct device *dma_dev; +}; + +struct serport { + struct tty_struct *tty; + wait_queue_head_t wait; + struct serio *serio; + struct serio_device_id id; + spinlock_t lock; + long unsigned int flags; +}; + +struct tso_t { + int next_frag_idx; + int size; + void *data; + u16 ip_id; + u8 tlen; + bool ipv6; + u32 tcp_seq; +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, + ETHTOOL_A_PAUSE_STAT_PAD = 1, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, + __ETHTOOL_A_PAUSE_STAT_CNT = 4, + ETHTOOL_A_PAUSE_STAT_MAX = 3, +}; + +struct pause_req_info { + struct ethnl_req_info base; + enum ethtool_mac_stats_src src; +}; + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + long: 32; + struct ethtool_pause_stats pausestat; +}; + +enum nft_rt_keys { + NFT_RT_CLASSID = 0, + NFT_RT_NEXTHOP4 = 1, + NFT_RT_NEXTHOP6 = 2, + NFT_RT_TCPMSS = 3, + NFT_RT_XFRM = 4, + __NFT_RT_MAX = 5, +}; + +enum nft_rt_attributes { + NFTA_RT_UNSPEC = 0, + NFTA_RT_DREG = 1, + NFTA_RT_KEY = 2, + __NFTA_RT_MAX = 3, +}; + +struct nft_rt { + enum nft_rt_keys key: 8; + u8 dreg; +}; + +typedef struct sock * (*udp_lookup_t)(const struct sk_buff *, __be16, __be16); + +typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); + +struct bictcp { + u32 cnt; + u32 last_max_cwnd; + u32 last_cwnd; + u32 last_time; + u32 bic_origin_point; + u32 bic_K; + u32 delay_min; + u32 epoch_start; + u32 ack_cnt; + u32 tcp_cwnd; + u16 unused; + u8 sample_cnt; + u8 found; + u32 round_start; + u32 end_seq; + u32 last_ack; + u32 curr_rtt; +}; + +enum { + FDB_NOTIFY_BIT = 1, + FDB_NOTIFY_INACTIVE_BIT = 2, +}; + +enum { + NFEA_UNSPEC = 0, + NFEA_ACTIVITY_NOTIFY = 1, + NFEA_DONT_REFRESH = 2, + __NFEA_MAX = 3, +}; + +struct __fdb_entry { + __u8 mac_addr[6]; + __u8 port_no; + __u8 is_local; + __u32 ageing_timer_value; + __u8 port_hi; + __u8 pad0; + __u16 unused; +}; + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +struct signal_frame_32 { + char dummy[64]; + struct sigcontext sctx; + struct mcontext mctx; + int abigap[56]; +}; + +struct rt_signal_frame_32 { + char dummy[80]; + struct siginfo info; + struct ucontext uc; + int abigap[56]; +}; + +struct async_domain { + struct list_head pending; + unsigned int registered: 1; +}; + +struct async_entry { + struct list_head domain_list; + struct list_head global_list; + struct work_struct work; + async_cookie_t cookie; + async_func_t func; + void *data; + struct async_domain *domain; + long: 32; +}; + +struct trace_event_raw_contention_begin { + struct trace_entry ent; + void *lock_addr; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_contention_end { + struct trace_entry ent; + void *lock_addr; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_contention_begin {}; + +struct trace_event_data_offsets_contention_end {}; + +typedef void (*btf_trace_contention_begin)(void *, void *, unsigned int); + +typedef void (*btf_trace_contention_end)(void *, void *, int); + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +}; + +struct kexec_load_limit { + struct mutex mutex; + int limit; +}; + +struct ctx_switch_entry { + struct trace_entry ent; + unsigned int prev_pid; + unsigned int next_pid; + unsigned int next_cpu; + unsigned char prev_prio; + unsigned char prev_state; + unsigned char next_prio; + unsigned char next_state; +}; + +struct userstack_entry { + struct trace_entry ent; + unsigned int tgid; + long unsigned int caller[8]; +}; + +struct hwlat_entry { + struct trace_entry ent; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + unsigned int nmi_count; + unsigned int seqnum; + unsigned int count; + long: 32; +}; + +struct osnoise_entry { + struct trace_entry ent; + u64 noise; + u64 runtime; + u64 max_sample; + unsigned int hw_count; + unsigned int nmi_count; + unsigned int irq_count; + unsigned int softirq_count; + unsigned int thread_count; + long: 32; +}; + +struct timerlat_entry { + struct trace_entry ent; + unsigned int seqnum; + int context; + u64 timer_latency; +}; + +struct trace_mark { + long long unsigned int val; + char sym; + long: 32; +}; + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; + enum bpf_iter_task_type type; + u32 pid; + u32 pid_visiting; +}; + +struct bpf_iter_seq_task_info { + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +struct bpf_iter__task { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; +}; + +struct bpf_iter_seq_task_file_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + u32 tid; + u32 fd; +}; + +struct bpf_iter__task_file { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + u32 fd; + long: 32; + union { + struct file *file; + }; +}; + +struct bpf_iter_seq_task_vma_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + u32 tid; + long unsigned int prev_vm_start; + long unsigned int prev_vm_end; +}; + +enum bpf_task_vma_iter_find_op { + task_vma_iter_first_vma = 0, + task_vma_iter_next_vma = 1, + task_vma_iter_find_vma = 2, +}; + +struct bpf_iter__task_vma { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + union { + struct vm_area_struct *vma; + }; +}; + +typedef u64 (*btf_bpf_find_vma)(struct task_struct *, u64, bpf_callback_t, void *, u64); + +struct bpf_iter_task_vma_kern_data { + struct task_struct *task; + struct mm_struct *mm; + struct mmap_unlock_irq_work *work; + struct vma_iterator vmi; +}; + +struct bpf_iter_task_vma { + __u64 __opaque[1]; +}; + +struct bpf_iter_task_vma_kern { + struct bpf_iter_task_vma_kern_data *data; + long: 32; +}; + +struct bpf_iter_css_task { + __u64 __opaque[1]; +}; + +struct bpf_iter_css_task_kern { + struct css_task_iter *css_it; + long: 32; +}; + +struct bpf_iter_task { + __u64 __opaque[3]; +}; + +struct bpf_iter_task_kern { + struct task_struct *task; + struct task_struct *pos; + unsigned int flags; + long: 32; +}; + +enum { + BPF_TASK_ITER_ALL_PROCS = 0, + BPF_TASK_ITER_ALL_THREADS = 1, + BPF_TASK_ITER_PROC_THREADS = 2, +}; + +struct bpf_iter_kmem_cache { + __u64 __opaque[1]; +}; + +struct bpf_iter_kmem_cache_kern { + struct kmem_cache *pos; + long: 32; +}; + +struct bpf_iter__kmem_cache { + union { + struct bpf_iter_meta *meta; + }; + union { + struct kmem_cache *s; + }; +}; + +union kmem_cache_iter_priv { + struct bpf_iter_kmem_cache it; + struct bpf_iter_kmem_cache_kern kit; +}; + +struct wb_stats { + long unsigned int nr_dirty; + long unsigned int nr_io; + long unsigned int nr_more_io; + long unsigned int nr_dirty_time; + long unsigned int nr_writeback; + long unsigned int nr_reclaimable; + long unsigned int nr_dirtied; + long unsigned int nr_written; + long unsigned int dirty_thresh; + long unsigned int wb_thresh; +}; + +typedef int cydp_t; + +struct madvise_walk_private { + struct mmu_gather *tlb; + bool pageout; +}; + +struct posix_acl_xattr_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +typedef struct { + __le32 *p; + __le32 key; + struct buffer_head *bh; +} Indirect; + +struct recovery_info { + tid_t start_transaction; + tid_t end_transaction; + long unsigned int head_block; + int nr_replays; + int nr_revokes; + int nr_revoke_hits; +}; + +struct SU_SP_s { + __u8 magic[2]; + __u8 skip; +}; + +struct SU_CE_s { + __u8 extent[8]; + __u8 offset[8]; + __u8 size[8]; +}; + +struct SU_ER_s { + __u8 len_id; + __u8 len_des; + __u8 len_src; + __u8 ext_ver; + __u8 data[0]; +}; + +struct RR_RR_s { + __u8 flags[1]; +}; + +struct RR_PX_s { + __u8 mode[8]; + __u8 n_links[8]; + __u8 uid[8]; + __u8 gid[8]; +}; + +struct RR_PN_s { + __u8 dev_high[8]; + __u8 dev_low[8]; +}; + +struct SL_component { + __u8 flags; + __u8 len; + __u8 text[0]; +}; + +struct RR_SL_s { + __u8 flags; + struct SL_component link; +}; + +struct RR_NM_s { + __u8 flags; + char name[0]; +}; + +struct RR_CL_s { + __u8 location[8]; +}; + +struct RR_PL_s { + __u8 location[8]; +}; + +struct stamp { + __u8 time[7]; +}; + +struct RR_TF_s { + __u8 flags; + struct stamp times[0]; +}; + +struct RR_ZF_s { + __u8 algorithm[2]; + __u8 parms[2]; + __u8 real_size[8]; +}; + +struct rock_ridge { + __u8 signature[2]; + __u8 len; + __u8 version; + union { + struct SU_SP_s SP; + struct SU_CE_s CE; + struct SU_ER_s ER; + struct RR_RR_s RR; + struct RR_PX_s PX; + struct RR_PN_s PN; + struct RR_SL_s SL; + struct RR_NM_s NM; + struct RR_CL_s CL; + struct RR_PL_s PL; + struct RR_TF_s TF; + struct RR_ZF_s ZF; + } u; +}; + +struct rock_state { + void *buffer; + unsigned char *chr; + int len; + int cont_size; + int cont_extent; + int cont_offset; + int cont_loops; + struct inode *inode; +}; + +struct file_extent_cluster { + u64 start; + u64 end; + u64 boundary[128]; + unsigned int nr; + long: 32; + u64 owning_root; +}; + +struct mapping_tree { + struct rb_root rb_root; + spinlock_t lock; +}; + +enum reloc_stage { + MOVE_DATA_EXTENTS = 0, + UPDATE_DATA_PTRS = 1, +}; + +struct reloc_control { + struct btrfs_block_group *block_group; + struct btrfs_root *extent_root; + struct inode *data_inode; + struct btrfs_block_rsv *block_rsv; + struct btrfs_backref_cache backref_cache; + struct file_extent_cluster cluster; + struct extent_io_tree processed_blocks; + struct mapping_tree reloc_root_tree; + struct list_head reloc_roots; + struct list_head dirty_subvol_roots; + u64 merging_rsv_size; + u64 nodes_relocated; + u64 reserved_bytes; + u64 search_start; + u64 extents_found; + enum reloc_stage stage; + bool create_reloc_tree; + bool merge_reloc_tree; + bool found_file_extent; +}; + +struct mapping_node { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + void *data; + long: 32; +}; + +struct tree_block { + struct { + struct rb_node rb_node; + long: 32; + u64 bytenr; + }; + u64 owner; + struct btrfs_key key; + u8 level; + bool key_ready; + long: 32; +}; + +struct ipc_proc_iface { + const char *path; + const char *header; + int ids; + int (*show)(struct seq_file *, void *); +}; + +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct pid_namespace *pid_ns; + struct ipc_proc_iface *iface; +}; + +struct crypto_rfc3686_ctx { + struct crypto_skcipher *child; + u8 nonce[4]; +}; + +struct crypto_rfc3686_req_ctx { + u8 iv[16]; + struct skcipher_request subreq; +}; + +struct x509_parse_context { + struct x509_certificate *cert; + long unsigned int data; + const void *key; + size_t key_size; + const void *params; + size_t params_size; + enum OID key_algo; + enum OID last_oid; + enum OID sig_algo; + u8 o_size; + u8 cn_size; + u8 email_size; + u16 o_offset; + u16 cn_offset; + u16 email_offset; + unsigned int raw_akid_size; + const void *raw_akid; + const void *akid_raw_issuer; + unsigned int akid_raw_issuer_size; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_mkdir { + struct file *file; + int dfd; + umode_t mode; + struct filename *filename; +}; + +struct io_link { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +typedef struct { + BYTE maxTableLog; + BYTE tableType; + BYTE tableLog; + BYTE reserved; +} DTableDesc; + +typedef struct { + BYTE nbBits; + BYTE byte; +} HUF_DEltX1; + +typedef struct { + U32 rankVal[13]; + U32 rankStart[13]; + U32 statsWksp[218]; + BYTE symbols[256]; + BYTE huffWeight[256]; +} HUF_ReadDTableX1_Workspace; + +typedef struct { + U16 sequence; + BYTE nbBits; + BYTE length; +} HUF_DEltX2; + +typedef struct { + BYTE symbol; +} sortedSymbol_t; + +typedef U32 rankValCol_t[13]; + +typedef struct { + U32 rankVal[156]; + U32 rankStats[13]; + U32 rankStart0[15]; + sortedSymbol_t sortedSymbol[256]; + BYTE weightList[256]; + U32 calleeWksp[218]; +} HUF_ReadDTableX2_Workspace; + +typedef struct { + U32 tableTime; + U32 decode256Time; +} algo_time_t; + +struct clk_multiplier { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +struct vcs_poll_data { + struct notifier_block notifier; + unsigned int cons_num; + int event; + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +enum { + ACTION_FAIL = 0, + ACTION_REPREP = 1, + ACTION_DELAYED_REPREP = 2, + ACTION_RETRY = 3, + ACTION_DELAYED_RETRY = 4, +}; + +enum { + NETDEV_STATS = 0, + E1000_STATS = 1, +}; + +struct e1000_stats { + char stat_string[32]; + int type; + int sizeof_stat; + int stat_offset; +}; + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + +struct used_address { + struct __kernel_sockaddr_storage name; + unsigned int name_len; +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[2]; + u32 wanted[2]; + u32 active[2]; + u32 nochange[2]; + u32 all[2]; +}; + +enum ip_conntrack_expect_events { + IPEXP_NEW = 0, + IPEXP_DESTROY = 1, +}; + +struct ct_expect_iter_state { + struct seq_net_private p; + unsigned int bucket; +}; + +enum nft_lookup_flags { + NFT_LOOKUP_F_INV = 1, +}; + +enum nft_lookup_attributes { + NFTA_LOOKUP_UNSPEC = 0, + NFTA_LOOKUP_SET = 1, + NFTA_LOOKUP_SREG = 2, + NFTA_LOOKUP_DREG = 3, + NFTA_LOOKUP_SET_ID = 4, + NFTA_LOOKUP_FLAGS = 5, + __NFTA_LOOKUP_MAX = 6, +}; + +struct nft_lookup { + struct nft_set *set; + u8 sreg; + u8 dreg; + bool dreg_set; + bool invert; + struct nft_set_binding binding; +}; + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +struct bpf_unix_iter_state { + struct seq_net_private p; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct bpf_iter__unix { + union { + struct bpf_iter_meta *meta; + }; + union { + struct unix_sock *unix_sk; + }; + uid_t uid; + long: 32; +}; + +enum nf_br_hook_priorities { + NF_BR_PRI_FIRST = -2147483648, + NF_BR_PRI_NAT_DST_BRIDGED = -300, + NF_BR_PRI_FILTER_BRIDGED = -200, + NF_BR_PRI_BRNF = 0, + NF_BR_PRI_NAT_DST_OTHER = 100, + NF_BR_PRI_FILTER_OTHER = 200, + NF_BR_PRI_NAT_SRC = 300, + NF_BR_PRI_LAST = 2147483647, +}; + +struct brnf_net { + bool enabled; + struct ctl_table_header *ctl_hdr; + int call_iptables; + int call_ip6tables; + int call_arptables; + int filter_vlan_tagged; + int filter_pppoe_tagged; + int pass_vlan_indev; +}; + +struct brnf_frag_data { + local_lock_t bh_lock; + char mac[22]; + u8 encap_size; + u8 size; + u16 vlan_tci; + __be16 vlan_proto; +}; + +struct individual_sensor { + unsigned int token; + unsigned int quant; +}; + +struct rtas_sensors { + struct individual_sensor sensor[17]; + unsigned int quant; +}; + +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + +enum perf_event_powerpc_regs { + PERF_REG_POWERPC_R0 = 0, + PERF_REG_POWERPC_R1 = 1, + PERF_REG_POWERPC_R2 = 2, + PERF_REG_POWERPC_R3 = 3, + PERF_REG_POWERPC_R4 = 4, + PERF_REG_POWERPC_R5 = 5, + PERF_REG_POWERPC_R6 = 6, + PERF_REG_POWERPC_R7 = 7, + PERF_REG_POWERPC_R8 = 8, + PERF_REG_POWERPC_R9 = 9, + PERF_REG_POWERPC_R10 = 10, + PERF_REG_POWERPC_R11 = 11, + PERF_REG_POWERPC_R12 = 12, + PERF_REG_POWERPC_R13 = 13, + PERF_REG_POWERPC_R14 = 14, + PERF_REG_POWERPC_R15 = 15, + PERF_REG_POWERPC_R16 = 16, + PERF_REG_POWERPC_R17 = 17, + PERF_REG_POWERPC_R18 = 18, + PERF_REG_POWERPC_R19 = 19, + PERF_REG_POWERPC_R20 = 20, + PERF_REG_POWERPC_R21 = 21, + PERF_REG_POWERPC_R22 = 22, + PERF_REG_POWERPC_R23 = 23, + PERF_REG_POWERPC_R24 = 24, + PERF_REG_POWERPC_R25 = 25, + PERF_REG_POWERPC_R26 = 26, + PERF_REG_POWERPC_R27 = 27, + PERF_REG_POWERPC_R28 = 28, + PERF_REG_POWERPC_R29 = 29, + PERF_REG_POWERPC_R30 = 30, + PERF_REG_POWERPC_R31 = 31, + PERF_REG_POWERPC_NIP = 32, + PERF_REG_POWERPC_MSR = 33, + PERF_REG_POWERPC_ORIG_R3 = 34, + PERF_REG_POWERPC_CTR = 35, + PERF_REG_POWERPC_LINK = 36, + PERF_REG_POWERPC_XER = 37, + PERF_REG_POWERPC_CCR = 38, + PERF_REG_POWERPC_SOFTE = 39, + PERF_REG_POWERPC_TRAP = 40, + PERF_REG_POWERPC_DAR = 41, + PERF_REG_POWERPC_DSISR = 42, + PERF_REG_POWERPC_SIER = 43, + PERF_REG_POWERPC_MMCRA = 44, + PERF_REG_POWERPC_MMCR0 = 45, + PERF_REG_POWERPC_MMCR1 = 46, + PERF_REG_POWERPC_MMCR2 = 47, + PERF_REG_POWERPC_MMCR3 = 48, + PERF_REG_POWERPC_SIER2 = 49, + PERF_REG_POWERPC_SIER3 = 50, + PERF_REG_POWERPC_PMC1 = 51, + PERF_REG_POWERPC_PMC2 = 52, + PERF_REG_POWERPC_PMC3 = 53, + PERF_REG_POWERPC_PMC4 = 54, + PERF_REG_POWERPC_PMC5 = 55, + PERF_REG_POWERPC_PMC6 = 56, + PERF_REG_POWERPC_SDAR = 57, + PERF_REG_POWERPC_SIAR = 58, + PERF_REG_POWERPC_MAX = 45, + PERF_REG_EXTENDED_MAX = 59, +}; + +typedef struct { + void *lock; +} class_preempt_t; + +typedef struct { + raw_spinlock_t *lock; +} class_raw_spinlock_irq_t; + +typedef struct { + void *lock; +} class_cpus_read_lock_t; + +struct trace_event_raw_sched_kthread_stop { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_stop_ret { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_queue_work { + struct trace_entry ent; + void *work; + void *function; + void *worker; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_wakeup_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int target_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_switch { + struct trace_entry ent; + char prev_comm[16]; + pid_t prev_pid; + int prev_prio; + long int prev_state; + char next_comm[16]; + pid_t next_pid; + int next_prio; + char __data[0]; +}; + +struct trace_event_raw_sched_migrate_task { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int orig_cpu; + int dest_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_process_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_wait { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_fork { + struct trace_entry ent; + char parent_comm[16]; + pid_t parent_pid; + char child_comm[16]; + pid_t child_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_process_exec { + struct trace_entry ent; + u32 __data_loc_filename; + pid_t pid; + pid_t old_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_prepare_exec { + struct trace_entry ent; + u32 __data_loc_interp; + u32 __data_loc_filename; + pid_t pid; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_runtime { + struct trace_entry ent; + char comm[16]; + pid_t pid; + long: 32; + u64 runtime; + char __data[0]; +}; + +struct trace_event_raw_sched_pi_setprio { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int oldprio; + int newprio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_hang { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_move_numa { + struct trace_entry ent; + pid_t pid; + pid_t tgid; + pid_t ngid; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_numa_pair_template { + struct trace_entry ent; + pid_t src_pid; + pid_t src_tgid; + pid_t src_ngid; + int src_cpu; + int src_nid; + pid_t dst_pid; + pid_t dst_tgid; + pid_t dst_ngid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_wake_idle_without_ipi { + struct trace_entry ent; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_kthread_stop {}; + +struct trace_event_data_offsets_sched_kthread_stop_ret {}; + +struct trace_event_data_offsets_sched_kthread_work_queue_work {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_start {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_end {}; + +struct trace_event_data_offsets_sched_wakeup_template {}; + +struct trace_event_data_offsets_sched_switch {}; + +struct trace_event_data_offsets_sched_migrate_task {}; + +struct trace_event_data_offsets_sched_process_template {}; + +struct trace_event_data_offsets_sched_process_wait {}; + +struct trace_event_data_offsets_sched_process_fork {}; + +struct trace_event_data_offsets_sched_process_exec { + u32 filename; + const void *filename_ptr_; +}; + +struct trace_event_data_offsets_sched_prepare_exec { + u32 interp; + const void *interp_ptr_; + u32 filename; + const void *filename_ptr_; + u32 comm; + const void *comm_ptr_; +}; + +struct trace_event_data_offsets_sched_stat_runtime {}; + +struct trace_event_data_offsets_sched_pi_setprio {}; + +struct trace_event_data_offsets_sched_process_hang {}; + +struct trace_event_data_offsets_sched_move_numa {}; + +struct trace_event_data_offsets_sched_numa_pair_template {}; + +struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; + +typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); + +typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, kthread_work_func_t); + +typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *, unsigned int); + +typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); + +typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); + +typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); + +typedef void (*btf_trace_sched_prepare_exec)(void *, struct task_struct *, struct linux_binprm *); + +typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); + +typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); + +typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_hw_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); + +typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); + +typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); + +typedef void (*btf_trace_sched_compute_energy_tp)(void *, struct task_struct *, int, long unsigned int, long unsigned int, long unsigned int); + +struct trace_event_raw_ipi_raise { + struct trace_entry ent; + u32 __data_loc_target_cpus; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpu { + struct trace_entry ent; + unsigned int cpu; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_send_cpumask { + struct trace_entry ent; + u32 __data_loc_cpumask; + void *callsite; + void *callback; + char __data[0]; +}; + +struct trace_event_raw_ipi_handler { + struct trace_entry ent; + const char *reason; + char __data[0]; +}; + +struct trace_event_data_offsets_ipi_raise { + u32 target_cpus; + const void *target_cpus_ptr_; +}; + +struct trace_event_data_offsets_ipi_send_cpu {}; + +struct trace_event_data_offsets_ipi_send_cpumask { + u32 cpumask; + const void *cpumask_ptr_; +}; + +struct trace_event_data_offsets_ipi_handler {}; + +typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); + +typedef void (*btf_trace_ipi_send_cpu)(void *, const unsigned int, long unsigned int, void *); + +typedef void (*btf_trace_ipi_send_cpumask)(void *, const struct cpumask *, long unsigned int, void *); + +typedef void (*btf_trace_ipi_entry)(void *, const char *); + +typedef void (*btf_trace_ipi_exit)(void *, const char *); + +typedef struct { + struct rq *lock; + struct rq_flags rf; +} class_rq_lock_irq_t; + +struct set_affinity_pending; + +struct migration_arg { + struct task_struct *task; + int dest_cpu; + struct set_affinity_pending *pending; +}; + +struct set_affinity_pending { + refcount_t refs; + unsigned int stop_pending; + struct completion done; + struct cpu_stop_work stop_work; + struct migration_arg arg; +}; + +struct cfs_schedulable_data { + struct task_group *tg; + long: 32; + u64 period; + u64 quota; +}; + +enum { + cpuset = 0, + possible = 1, + fail = 2, +}; + +union cpumask_rcuhead { + cpumask_t cpumask; + struct callback_head rcu; +}; + +struct trace_probe_log { + const char *subsystem; + const char **argv; + int argc; + int index; +}; + +struct bpf_cgroup_storage_map { + struct bpf_map map; + spinlock_t lock; + struct rb_root root; + struct list_head list; +}; + +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_ID = 4, + PERF_FORMAT_GROUP = 8, + PERF_FORMAT_LOST = 16, + PERF_FORMAT_MAX = 32, +}; + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1, +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + NR_NAMESPACES = 7, +}; + +enum perf_pmu_scope { + PERF_PMU_SCOPE_NONE = 0, + PERF_PMU_SCOPE_CORE = 1, + PERF_PMU_SCOPE_DIE = 2, + PERF_PMU_SCOPE_CLUSTER = 3, + PERF_PMU_SCOPE_PKG = 4, + PERF_PMU_SCOPE_SYS_WIDE = 5, + PERF_PMU_MAX_SCOPE = 6, +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START = 1, + PERF_ADDR_FILTER_ACTION_FILTER = 2, +}; + +struct perf_addr_filter { + struct list_head entry; + struct path path; + long unsigned int offset; + long unsigned int size; + enum perf_addr_filter_action_t action; +}; + +struct swevent_hlist { + struct hlist_head heads[256]; + struct callback_head callback_head; +}; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int online; + struct perf_cgroup *cgrp; + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; + long: 32; +}; + +struct min_heap_char { + int nr; + int size; + char *data; + char preallocated[0]; +}; + +typedef struct min_heap_char min_heap_char; + +struct min_heap_callbacks { + bool (*less)(const void *, const void *, void *); + void (*swp)(void *, void *, void *); +}; + +typedef int (*remote_function_f)(void *); + +struct remote_function_call { + struct task_struct *p; + remote_function_f func; + void *info; + int ret; +}; + +enum event_type_t { + EVENT_FLEXIBLE = 1, + EVENT_PINNED = 2, + EVENT_TIME = 4, + EVENT_FROZEN = 8, + EVENT_CPU = 16, + EVENT_CGROUP = 32, + EVENT_ALL = 3, + EVENT_TIME_FROZEN = 12, +}; + +typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); + +struct event_function_struct { + struct perf_event *event; + event_f func; + void *data; +}; + +struct __group_key { + int cpu; + struct pmu *pmu; + struct cgroup *cgroup; +}; + +struct stop_event_data { + struct perf_event *event; + unsigned int restart; +}; + +struct perf_event_min_heap { + int nr; + int size; + struct perf_event **data; + struct perf_event *preallocated[0]; +}; + +struct perf_read_data { + struct perf_event *event; + bool group; + int ret; +}; + +struct perf_read_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +typedef void perf_iterate_f(struct perf_event *, void *); + +struct remote_output { + struct perf_buffer *rb; + int err; +}; + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + struct { + struct perf_event_header header; + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + } event_id; +}; + +struct perf_namespaces_event { + struct task_struct *task; + long: 32; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 nr_namespaces; + struct perf_ns_link_info link_info[7]; + } event_id; +}; + +struct perf_cgroup_event { + char *path; + int path_size; + struct { + struct perf_event_header header; + u64 id; + char path[0]; + } event_id; +}; + +struct perf_mmap_event { + struct vm_area_struct *vma; + const char *file_name; + int file_size; + int maj; + int min; + long: 32; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + u8 build_id[20]; + u32 build_id_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +struct perf_switch_event { + struct task_struct *task; + struct task_struct *next_prev; + struct { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; + } event_id; +}; + +struct perf_ksymbol_event { + const char *name; + int name_len; + struct { + struct perf_event_header header; + u64 addr; + u32 len; + u16 ksym_type; + u16 flags; + } event_id; +}; + +struct perf_bpf_event { + struct bpf_prog *prog; + struct { + struct perf_event_header header; + u16 type; + u16 flags; + u32 id; + u8 tag[8]; + } event_id; +}; + +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + struct { + struct perf_event_header header; + u64 addr; + } event_id; +}; + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; +}; + +enum perf_probe_config { + PERF_PROBE_CONFIG_IS_RETPROBE = 1, + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, +}; + +enum { + IF_ACT_NONE = -1, + IF_ACT_FILTER = 0, + IF_ACT_START = 1, + IF_ACT_STOP = 2, + IF_SRC_FILE = 3, + IF_SRC_KERNEL = 4, + IF_SRC_FILEADDR = 5, + IF_SRC_KERNELADDR = 6, +}; + +enum { + IF_STATE_ACTION = 0, + IF_STATE_SOURCE = 1, + IF_STATE_END = 2, +}; + +struct perf_aux_event { + struct perf_event_header header; + u64 hw_id; +}; + +struct perf_aux_event___2 { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +struct perf_aux_event___3 { + struct perf_event_header header; + u64 offset; + u64 size; + u64 flags; +}; + +struct files_stat_struct { + long unsigned int nr_files; + long unsigned int nr_free_files; + long unsigned int max_files; +}; + +struct backing_file { + struct file file; + union { + struct path user_path; + freeptr_t bf_freeptr; + }; +}; + +struct pde_opener { + struct list_head lh; + struct file *file; + bool closing; + struct completion *c; +}; + +enum { + BIAS = 2147483648, +}; + +struct fuse_dirent { + uint64_t ino; + uint64_t off; + uint32_t namelen; + uint32_t type; + char name[0]; +}; + +struct fuse_direntplus { + struct fuse_entry_out entry_out; + struct fuse_dirent dirent; +}; + +enum fuse_parse_result { + FOUND_ERR = -1, + FOUND_NONE = 0, + FOUND_SOME = 1, + FOUND_ALL = 2, +}; + +struct reserve_ticket { + u64 bytes; + int error; + bool steal; + struct list_head list; + wait_queue_head_t wait; + long: 32; +}; + +struct sha512_state { + u64 state[8]; + u64 count[2]; + u8 buf[128]; +}; + +typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); + +struct trace_event_raw_kyber_latency { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char type[8]; + u8 percentile; + u8 numerator; + u8 denominator; + unsigned int samples; + char __data[0]; +}; + +struct trace_event_raw_kyber_adjust { + struct trace_entry ent; + dev_t dev; + char domain[16]; + unsigned int depth; + char __data[0]; +}; + +struct trace_event_raw_kyber_throttled { + struct trace_entry ent; + dev_t dev; + char domain[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_kyber_latency {}; + +struct trace_event_data_offsets_kyber_adjust {}; + +struct trace_event_data_offsets_kyber_throttled {}; + +typedef void (*btf_trace_kyber_latency)(void *, dev_t, const char *, const char *, unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_kyber_adjust)(void *, dev_t, const char *, unsigned int); + +typedef void (*btf_trace_kyber_throttled)(void *, dev_t, const char *); + +enum { + KYBER_READ = 0, + KYBER_WRITE = 1, + KYBER_DISCARD = 2, + KYBER_OTHER = 3, + KYBER_NUM_DOMAINS = 4, +}; + +enum { + KYBER_ASYNC_PERCENT = 75, +}; + +enum { + KYBER_LATENCY_SHIFT = 2, + KYBER_GOOD_BUCKETS = 4, + KYBER_LATENCY_BUCKETS = 8, +}; + +enum { + KYBER_TOTAL_LATENCY = 0, + KYBER_IO_LATENCY = 1, +}; + +struct kyber_cpu_latency { + atomic_t buckets[48]; +}; + +struct kyber_ctx_queue { + spinlock_t lock; + struct list_head rq_list[4]; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; + long: 32; +}; + +struct kyber_queue_data { + struct request_queue *q; + dev_t dev; + struct sbitmap_queue domain_tokens[4]; + unsigned int async_depth; + struct kyber_cpu_latency *cpu_latency; + struct timer_list timer; + unsigned int latency_buckets[48]; + long unsigned int latency_timeout[3]; + int domain_p99[3]; + long: 32; + u64 latency_targets[3]; +}; + +struct kyber_hctx_data { + spinlock_t lock; + struct list_head rqs[4]; + unsigned int cur_domain; + unsigned int batching; + struct kyber_ctx_queue *kcqs; + struct sbitmap kcq_map[4]; + struct sbq_wait domain_wait[4]; + struct sbq_wait_state *domain_ws[4]; + atomic_t wait_index[4]; +}; + +struct flush_kcq_data { + struct kyber_hctx_data *khd; + unsigned int sched_domain; + struct list_head *list; +}; + +struct io_futex { + struct file *file; + union { + u32 *uaddr; + struct futex_waitv *uwaitv; + }; + long unsigned int futex_val; + long unsigned int futex_mask; + long unsigned int futexv_owned; + u32 futex_flags; + unsigned int futex_nr; + bool futexv_unqueued; +}; + +struct io_futex_data { + struct futex_q q; + struct io_kiocb *req; + long: 32; +}; + +struct pcim_iomap_devres { + void *table[6]; +}; + +struct pcim_intx_devres { + int orig_intx; +}; + +enum pcim_addr_devres_type { + PCIM_ADDR_DEVRES_TYPE_INVALID = 0, + PCIM_ADDR_DEVRES_TYPE_REGION = 1, + PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING = 2, + PCIM_ADDR_DEVRES_TYPE_MAPPING = 3, +}; + +struct pcim_addr_devres { + enum pcim_addr_devres_type type; + void *baseaddr; + long unsigned int offset; + long unsigned int len; + int bar; +}; + +struct broken_edid { + u8 manufacturer[4]; + u32 model; + u32 fix; +}; + +struct __fb_timings { + u32 dclk; + u32 hfreq; + u32 vfreq; + u32 hactive; + u32 vactive; + u32 hblank; + u32 vblank; + u32 htotal; + u32 vtotal; +}; + +struct uni_pagedict { + u16 **uni_pgdir[32]; + long unsigned int refcount; + long unsigned int sum; + unsigned char *inverse_translations[4]; + u16 *inverse_trans_unicode; +}; + +struct dev_pm_domain_attach_data { + const char * const *pd_names; + const u32 num_pd_names; + const u32 pd_flags; +}; + +struct dev_pm_domain_list { + struct device **pd_devs; + struct device_link **pd_links; + u32 *opp_tokens; + u32 num_pds; +}; + +struct opp_table; + +struct dev_pm_opp; + +typedef int (*config_regulators_t)(struct device *, struct dev_pm_opp *, struct dev_pm_opp *, struct regulator **, unsigned int); + +typedef int (*config_clks_t)(struct device *, struct opp_table *, struct dev_pm_opp *, void *, bool); + +struct dev_pm_opp_config { + const char * const *clk_names; + config_clks_t config_clks; + const char *prop_name; + config_regulators_t config_regulators; + const unsigned int *supported_hw; + unsigned int supported_hw_count; + const char * const *regulator_names; + struct device *required_dev; + unsigned int required_dev_index; +}; + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + __u64 sync_fence_info; +}; + +struct sync_set_deadline { + __u64 deadline_ns; + __u64 pad; +}; + +enum { + NVME_CAP_CSS_NVM = 1, + NVME_CAP_CSS_CSI = 64, +}; + +enum { + NVME_CAP_CRMS_CRWMS = 576460752303423488ULL, + NVME_CAP_CRMS_CRIMS = 1152921504606846976ULL, +}; + +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1, + NVME_PS_FLAGS_NON_OP_STATE = 2, +}; + +enum nvme_ctrl_attr { + NVME_CTRL_ATTR_HID_128_BIT = 1, + NVME_CTRL_ATTR_TBKAS = 64, + NVME_CTRL_ATTR_ELBAS = 32768, +}; + +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 cmic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[11]; + __u8 cntrltype; + __u8 fguid[16]; + __le16 crdt1; + __le16 crdt2; + __le16 crdt3; + __u8 rsvd134[122]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __le16 edstt; + __u8 dsto; + __u8 fwug; + __le16 kas; + __le16 hctma; + __le16 mntmt; + __le16 mxtmt; + __le32 sanicap; + __le32 hmminds; + __le16 hmmaxd; + __le16 nvmsetidmax; + __le16 endgidmax; + __u8 anatt; + __u8 anacap; + __le32 anagrpmax; + __le32 nanagrpid; + __u8 rsvd352[160]; + __u8 sqes; + __u8 cqes; + __le16 maxcmd; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 nwpc; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __le32 mnan; + __u8 rsvd544[224]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[2]; + __u8 dctype; + __u8 rsvd1807[241]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; + +struct nvme_id_ns_cs_indep { + __u8 nsfeat; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __le32 anagrpid; + __u8 nsattr; + __u8 rsvd9; + __le16 nvmsetid; + __le16 endgid; + __u8 nstat; + __u8 rsvd15[4081]; +}; + +struct nvme_id_ns_nvm { + __le64 lbstm; + __u8 pic; + __u8 rsvd9[3]; + __le32 elbaf[64]; + __u8 rsvd268[3828]; +}; + +enum { + NVME_ID_NS_NVM_STS_MASK = 127, + NVME_ID_NS_NVM_GUARD_SHIFT = 7, + NVME_ID_NS_NVM_GUARD_MASK = 3, + NVME_ID_NS_NVM_QPIF_SHIFT = 9, + NVME_ID_NS_NVM_QPIF_MASK = 15, + NVME_ID_NS_NVM_QPIFS = 8, +}; + +struct nvme_id_ctrl_nvm { + __u8 vsl; + __u8 wzsl; + __u8 wusl; + __u8 dmrl; + __le32 dmrsl; + __le64 dmsl; + __u8 rsvd16[4080]; +}; + +enum { + NVME_CSI_NVM = 0, + NVME_CSI_ZNS = 2, +}; + +enum { + NVME_NS_FEAT_THIN = 1, + NVME_NS_FEAT_ATOMICS = 2, + NVME_NS_FEAT_IO_OPT = 16, + NVME_NS_ATTR_RO = 1, + NVME_NS_FLBAS_LBA_MASK = 15, + NVME_NS_FLBAS_LBA_UMASK = 96, + NVME_NS_FLBAS_LBA_SHIFT = 1, + NVME_NS_FLBAS_META_EXT = 16, + NVME_NS_NMIC_SHARED = 1, + NVME_NS_ROTATIONAL = 16, + NVME_NS_VWC_NOT_PRESENT = 32, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 16, + NVME_NS_DPC_PI_FIRST = 8, + NVME_NS_DPC_PI_TYPE3 = 4, + NVME_NS_DPC_PI_TYPE2 = 2, + NVME_NS_DPC_PI_TYPE1 = 1, + NVME_NS_DPS_PI_FIRST = 8, + NVME_NS_DPS_PI_MASK = 7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +enum { + NVME_NSTAT_NRDY = 1, +}; + +enum { + NVME_NVM_NS_16B_GUARD = 0, + NVME_NVM_NS_32B_GUARD = 1, + NVME_NVM_NS_64B_GUARD = 2, + NVME_NVM_NS_QTYPE_GUARD = 3, +}; + +struct nvme_ns_id_desc { + __u8 nidt; + __u8 nidl; + __le16 reserved; +}; + +enum { + NVME_NIDT_EUI64 = 1, + NVME_NIDT_NGUID = 2, + NVME_NIDT_UUID = 3, + NVME_NIDT_CSI = 4, +}; + +struct nvme_fw_slot_info_log { + __u8 afi; + __u8 rsvd1[7]; + __le64 frs[7]; + __u8 rsvd64[448]; +}; + +enum { + NVME_AER_ERROR = 0, + NVME_AER_SMART = 1, + NVME_AER_NOTICE = 2, + NVME_AER_CSS = 6, + NVME_AER_VS = 7, +}; + +enum { + NVME_AER_ERROR_PERSIST_INT_ERR = 3, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0, + NVME_AER_NOTICE_FW_ACT_STARTING = 1, + NVME_AER_NOTICE_ANA = 3, + NVME_AER_NOTICE_DISC_CHANGED = 240, +}; + +enum { + NVME_AEN_CFG_NS_ATTR = 256, + NVME_AEN_CFG_FW_ACT = 512, + NVME_AEN_CFG_ANA_CHANGE = 2048, + NVME_AEN_CFG_DISC_CHANGE = -2147483648, +}; + +enum { + NVME_DSMGMT_IDR = 1, + NVME_DSMGMT_IDW = 2, + NVME_DSMGMT_AD = 4, +}; + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +enum nvme_zone_mgmt_action { + NVME_ZONE_CLOSE = 1, + NVME_ZONE_FINISH = 2, + NVME_ZONE_OPEN = 3, + NVME_ZONE_RESET = 4, + NVME_ZONE_OFFLINE = 5, + NVME_ZONE_SET_DESC_EXT = 16, +}; + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + +struct nvme_feat_host_behavior { + __u8 acre; + __u8 etdas; + __u8 lbafee; + __u8 resv1[509]; +}; + +enum { + NVME_ENABLE_ACRE = 1, + NVME_ENABLE_LBAFEE = 1, +}; + +enum { + NVME_SUBMIT_AT_HEAD = 1, + NVME_SUBMIT_NOWAIT = 2, + NVME_SUBMIT_RESERVED = 4, + NVME_SUBMIT_RETRY = 8, +}; + +struct nvme_zone_info { + u64 zone_size; + unsigned int max_open_zones; + unsigned int max_active_zones; +}; + +struct trace_event_raw_nvme_setup_cmd { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + u8 opcode; + u8 flags; + u8 fctype; + u16 cid; + u32 nsid; + bool metadata; + u8 cdw10[24]; + char __data[0]; +}; + +struct trace_event_raw_nvme_complete_rq { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + int cid; + long: 32; + u64 result; + u8 retries; + u8 flags; + u16 status; + char __data[0]; + long: 32; +}; + +struct trace_event_raw_nvme_async_event { + struct trace_entry ent; + int ctrl_id; + u32 result; + char __data[0]; +}; + +struct trace_event_raw_nvme_sq { + struct trace_entry ent; + int ctrl_id; + char disk[32]; + int qid; + u16 sq_head; + u16 sq_tail; + char __data[0]; +}; + +struct trace_event_data_offsets_nvme_setup_cmd {}; + +struct trace_event_data_offsets_nvme_complete_rq {}; + +struct trace_event_data_offsets_nvme_async_event {}; + +struct trace_event_data_offsets_nvme_sq {}; + +typedef void (*btf_trace_nvme_setup_cmd)(void *, struct request *, struct nvme_command *); + +typedef void (*btf_trace_nvme_complete_rq)(void *, struct request *); + +typedef void (*btf_trace_nvme_async_event)(void *, struct nvme_ctrl *, u32); + +typedef void (*btf_trace_nvme_sq)(void *, struct request *, __le16, int); + +struct nvme_ns_info { + struct nvme_ns_ids ids; + u32 nsid; + __le32 anagrpid; + u8 pi_offset; + bool is_shared; + bool is_readonly; + bool is_ready; + bool is_removed; + bool is_rotational; + bool no_vwc; +}; + +enum nvme_disposition { + COMPLETE___2 = 0, + RETRY = 1, + FAILOVER = 2, + AUTHENTICATE = 3, +}; + +struct nvme_core_quirk_entry { + u16 vid; + const char *mn; + const char *fr; + long unsigned int quirks; +}; + +struct async_scan_info { + struct nvme_ctrl *ctrl; + atomic_t next_nsid; + __le32 *ns_list; +}; + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *, struct phy_device *); + +struct phy_reg { + u16 reg; + u16 val; +}; + +enum i8042_controller_reset_mode { + I8042_RESET_NEVER = 0, + I8042_RESET_ALWAYS = 1, + I8042_RESET_ON_S2RAM = 2, +}; + +struct i8042_port { + struct serio *serio; + int irq; + bool exists; + bool driver_bound; + signed char mux; +}; + +struct of_endpoint { + unsigned int port; + unsigned int id; + const struct device_node *local_node; +}; + +struct supplier_bindings { + struct device_node * (*parse_prop)(struct device_node *, const char *, int); + struct device_node * (*get_con_dev)(struct device_node *); + bool optional; + u8 fwlink_flags; +}; + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + +struct net_rate_estimator { + struct gnet_stats_basic_sync *bstats; + spinlock_t *stats_lock; + bool running; + struct gnet_stats_basic_sync *cpu_bstats; + u8 ewma_log; + u8 intvl_log; + seqcount_t seq; + u64 last_packets; + u64 last_bytes; + u64 avpps; + u64 avbps; + long unsigned int next_jiffies; + struct timer_list timer; + struct callback_head rcu; +}; + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +enum nfnl_cthelper_msg_types { + NFNL_MSG_CTHELPER_NEW = 0, + NFNL_MSG_CTHELPER_GET = 1, + NFNL_MSG_CTHELPER_DEL = 2, + NFNL_MSG_CTHELPER_MAX = 3, +}; + +enum nfnl_cthelper_type { + NFCTH_UNSPEC = 0, + NFCTH_NAME = 1, + NFCTH_TUPLE = 2, + NFCTH_QUEUE_NUM = 3, + NFCTH_POLICY = 4, + NFCTH_PRIV_DATA_LEN = 5, + NFCTH_STATUS = 6, + __NFCTH_MAX = 7, +}; + +enum nfnl_cthelper_policy_type { + NFCTH_POLICY_SET_UNSPEC = 0, + NFCTH_POLICY_SET_NUM = 1, + NFCTH_POLICY_SET = 2, + NFCTH_POLICY_SET1 = 2, + NFCTH_POLICY_SET2 = 3, + NFCTH_POLICY_SET3 = 4, + NFCTH_POLICY_SET4 = 5, + __NFCTH_POLICY_SET_MAX = 6, +}; + +enum nfnl_cthelper_pol_type { + NFCTH_POLICY_UNSPEC = 0, + NFCTH_POLICY_NAME = 1, + NFCTH_POLICY_EXPECT_MAX = 2, + NFCTH_POLICY_EXPECT_TIMEOUT = 3, + __NFCTH_POLICY_MAX = 4, +}; + +enum nfnl_cthelper_tuple_type { + NFCTH_TUPLE_UNSPEC = 0, + NFCTH_TUPLE_L3PROTONUM = 1, + NFCTH_TUPLE_L4PROTONUM = 2, + __NFCTH_TUPLE_MAX = 3, +}; + +struct nfnl_cthelper { + struct list_head list; + struct nf_conntrack_helper helper; +}; + +enum nft_ct_keys { + NFT_CT_STATE = 0, + NFT_CT_DIRECTION = 1, + NFT_CT_STATUS = 2, + NFT_CT_MARK = 3, + NFT_CT_SECMARK = 4, + NFT_CT_EXPIRATION = 5, + NFT_CT_HELPER = 6, + NFT_CT_L3PROTOCOL = 7, + NFT_CT_SRC = 8, + NFT_CT_DST = 9, + NFT_CT_PROTOCOL = 10, + NFT_CT_PROTO_SRC = 11, + NFT_CT_PROTO_DST = 12, + NFT_CT_LABELS = 13, + NFT_CT_PKTS = 14, + NFT_CT_BYTES = 15, + NFT_CT_AVGPKT = 16, + NFT_CT_ZONE = 17, + NFT_CT_EVENTMASK = 18, + NFT_CT_SRC_IP = 19, + NFT_CT_DST_IP = 20, + NFT_CT_SRC_IP6 = 21, + NFT_CT_DST_IP6 = 22, + NFT_CT_ID = 23, + __NFT_CT_MAX = 24, +}; + +enum nft_ct_attributes { + NFTA_CT_UNSPEC = 0, + NFTA_CT_DREG = 1, + NFTA_CT_KEY = 2, + NFTA_CT_DIRECTION = 3, + NFTA_CT_SREG = 4, + __NFTA_CT_MAX = 5, +}; + +enum nft_ct_helper_attributes { + NFTA_CT_HELPER_UNSPEC = 0, + NFTA_CT_HELPER_NAME = 1, + NFTA_CT_HELPER_L3PROTO = 2, + NFTA_CT_HELPER_L4PROTO = 3, + __NFTA_CT_HELPER_MAX = 4, +}; + +enum nft_ct_expectation_attributes { + NFTA_CT_EXPECT_UNSPEC = 0, + NFTA_CT_EXPECT_L3PROTO = 1, + NFTA_CT_EXPECT_L4PROTO = 2, + NFTA_CT_EXPECT_DPORT = 3, + NFTA_CT_EXPECT_TIMEOUT = 4, + NFTA_CT_EXPECT_SIZE = 5, + __NFTA_CT_EXPECT_MAX = 6, +}; + +struct nft_ct { + enum nft_ct_keys key: 8; + enum ip_conntrack_dir dir: 8; + u8 len; + union { + u8 dreg; + u8 sreg; + }; +}; + +struct nft_ct_helper_obj { + struct nf_conntrack_helper *helper4; + struct nf_conntrack_helper *helper6; + u8 l4proto; +}; + +struct nft_ct_expect_obj { + u16 l3num; + __be16 dport; + u8 l4proto; + u8 size; + u32 timeout; +}; + +struct udp_dev_scratch { + u32 _tsize_state; +}; + +struct bpf_iter__udp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct udp_sock *udp_sk; + }; + uid_t uid; + long: 32; + int bucket; + long: 32; +}; + +struct bpf_udp_iter_state { + struct udp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + int offset; + struct sock **batch; + bool st_bucket_done; +}; + +struct ifaddrlblmsg { + __u8 ifal_family; + __u8 __ifal_reserved; + __u8 ifal_prefixlen; + __u8 ifal_flags; + __u32 ifal_index; + __u32 ifal_seq; +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX = 3, +}; + +struct ip6addrlbl_entry { + struct in6_addr prefix; + int prefixlen; + int ifindex; + int addrtype; + u32 label; + struct hlist_node list; + struct callback_head rcu; +}; + +struct ip6addrlbl_init_table { + const struct in6_addr *prefix; + int prefixlen; + u32 label; +}; + +struct nf_bridge_frag_data; + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute pop +#endif + +#endif /* __VMLINUX_H__ */ diff --git a/selftest/struct-ops/dep/include/arch/riscv/vmlinux-v6.13-rc1-g353b2a7de2a3.h b/selftest/struct-ops/dep/include/arch/riscv/vmlinux-v6.13-rc1-g353b2a7de2a3.h new file mode 100644 index 00000000..84e853cd --- /dev/null +++ b/selftest/struct-ops/dep/include/arch/riscv/vmlinux-v6.13-rc1-g353b2a7de2a3.h @@ -0,0 +1,100680 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) +#endif + +typedef unsigned char __u8; + +typedef short unsigned int __u16; + +typedef int __s32; + +typedef unsigned int __u32; + +typedef long long int __s64; + +typedef long long unsigned int __u64; + +typedef __u8 u8; + +typedef __u16 u16; + +typedef __s32 s32; + +typedef __u32 u32; + +typedef __s64 s64; + +typedef __u64 u64; + +enum { + false = 0, + true = 1, +}; + +typedef long int __kernel_long_t; + +typedef long unsigned int __kernel_ulong_t; + +typedef int __kernel_pid_t; + +typedef unsigned int __kernel_uid32_t; + +typedef __kernel_ulong_t __kernel_size_t; + +typedef long long int __kernel_time64_t; + +typedef __kernel_long_t __kernel_clock_t; + +typedef int __kernel_timer_t; + +typedef int __kernel_clockid_t; + +typedef __kernel_pid_t pid_t; + +typedef __kernel_clockid_t clockid_t; + +typedef _Bool bool; + +typedef __kernel_size_t size_t; + +typedef s64 ktime_t; + +typedef struct { + int counter; +} atomic_t; + +typedef struct { + s64 counter; +} atomic64_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long int tv_nsec; +}; + +typedef long unsigned int cycles_t; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +struct qspinlock { + union { + atomic_t val; + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; + }; +}; + +typedef struct qspinlock arch_spinlock_t; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct thread_info { + long unsigned int flags; + int preempt_count; + long int kernel_sp; + long int user_sp; + int cpu; + long unsigned int syscall_work; + long unsigned int a0; + long unsigned int a1; + long unsigned int a2; +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; + u16 src; + u16 dst; +}; + +struct load_weight { + long unsigned int weight; + u32 inv_weight; +}; + +struct rb_node { + long unsigned int __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + long unsigned int load_avg; + long unsigned int runnable_avg; + long unsigned int util_avg; + unsigned int util_est; +}; + +struct cfs_rq; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + u64 deadline; + u64 min_vruntime; + u64 min_slice; + struct list_head group_node; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + unsigned char custom_slice; + u64 exec_start; + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; + u64 nr_migrations; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + long unsigned int runnable_weight; + long: 64; + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + long unsigned int timeout; + long unsigned int watchdog_stamp; + unsigned int time_slice; + short unsigned int on_rq; + short unsigned int on_list; + struct sched_rt_entity *back; +}; + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; + +struct sched_dl_entity; + +typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); + +struct task_struct; + +typedef struct task_struct * (*dl_server_pick_f)(struct sched_dl_entity *); + +struct rq; + +struct sched_dl_entity { + struct rb_node rb_node; + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + s64 runtime; + u64 deadline; + unsigned int flags; + unsigned int dl_throttled: 1; + unsigned int dl_yielded: 1; + unsigned int dl_non_contending: 1; + unsigned int dl_overrun: 1; + unsigned int dl_server: 1; + unsigned int dl_defer: 1; + unsigned int dl_defer_armed: 1; + unsigned int dl_defer_running: 1; + struct hrtimer dl_timer; + struct hrtimer inactive_timer; + struct rq *rq; + dl_server_has_tasks_f server_has_tasks; + dl_server_pick_f server_pick_task; + struct sched_dl_entity *pi_se; +}; + +struct scx_dsq_list_node { + struct list_head node; + u32 flags; + u32 priv; +}; + +typedef atomic64_t atomic_long_t; + +struct scx_dispatch_q; + +struct cgroup; + +struct sched_ext_entity { + struct scx_dispatch_q *dsq; + struct scx_dsq_list_node dsq_list; + struct rb_node dsq_priq; + u32 dsq_seq; + u32 dsq_flags; + u32 flags; + u32 weight; + s32 sticky_cpu; + s32 holding_cpu; + u32 kf_mask; + struct task_struct *kf_tasks[2]; + atomic_long_t ops_state; + struct list_head runnable_node; + long unsigned int runnable_at; + u64 ddsp_dsq_id; + u64 ddsp_enq_flags; + u64 slice; + u64 dsq_vtime; + bool disallow; + struct cgroup *cgrp_moving_from; + struct list_head tasks_node; +}; + +struct sched_statistics {}; + +struct cpumask { + long unsigned int bits[128]; +}; + +typedef struct cpumask cpumask_t; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + long unsigned int pcount; + long long unsigned int run_delay; + long long unsigned int last_arrival; + long long unsigned int last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +struct pollfd; + +struct restart_block { + long unsigned int arch_data; + long int (*fn)(struct restart_block *); + union { + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + u64 expires; + } nanosleep; + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + long unsigned int tv_sec; + long unsigned int tv_nsec; + } poll; + }; +}; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct optimistic_spin_queue { + atomic_t tail; +}; + +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; + struct optimistic_spin_queue osq; + struct list_head wait_list; +}; + +struct posix_cputimers_work { + struct callback_head work; + struct mutex mutex; + unsigned int scheduled; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + long unsigned int sig[1]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct syscall_user_dispatch { + char *selector; + long unsigned int offset; + long unsigned int len; + bool on_dispatch; +}; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + long unsigned int bits[1]; +} nodemask_t; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +struct arch_tlbflush_unmap_batch { + struct cpumask cpumask; +}; + +struct tlbflush_unmap_batch { + struct arch_tlbflush_unmap_batch arch; + bool flush_required; + bool writable; +}; + +struct page; + +struct page_frag { + struct page *page; + __u32 offset; + __u32 size; +}; + +struct kmap_ctrl {}; + +struct timer_list { + struct hlist_node entry; + long unsigned int expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct llist_head { + struct llist_node *first; +}; + +struct __riscv_d_ext_state { + __u64 f[32]; + __u32 fcsr; +}; + +struct __riscv_v_ext_state { + long unsigned int vstart; + long unsigned int vl; + long unsigned int vtype; + long unsigned int vcsr; + long unsigned int vlenb; + void *datap; +}; + +struct thread_struct { + long unsigned int ra; + long unsigned int sp; + long unsigned int s[12]; + struct __riscv_d_ext_state fstate; + long unsigned int bad_cause; + long unsigned int envcfg; + u32 riscv_v_flags; + u32 vstate_ctrl; + struct __riscv_v_ext_state vstate; + long unsigned int align_ctl; + struct __riscv_v_ext_state kernel_vstate; + bool force_icache_flush; + unsigned int prev_cpu; +}; + +struct sched_class; + +struct task_group; + +struct rcu_node; + +struct mm_struct; + +struct address_space; + +struct pid; + +struct completion; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct rseq; + +struct pipe_inode_info; + +struct task_delay_info; + +struct mem_cgroup; + +struct obj_cgroup; + +struct gendisk; + +struct uprobe_task; + +struct vm_struct; + +struct bpf_local_storage; + +struct bpf_run_ctx; + +struct bpf_net_context; + +struct task_struct { + struct thread_info thread_info; + unsigned int __state; + unsigned int saved_state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + int on_cpu; + struct __call_single_node wake_entry; + unsigned int wakee_flips; + long unsigned int wakee_flip_decay_ts; + struct task_struct *last_wakee; + int recent_used_cpu; + int wake_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + long: 64; + long: 64; + struct sched_entity se; + struct sched_rt_entity rt; + struct sched_dl_entity dl; + struct sched_dl_entity *dl_server; + struct sched_ext_entity scx; + const struct sched_class *sched_class; + struct task_group *sched_task_group; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sched_statistics stats; + struct hlist_head preempt_notifiers; + unsigned int btrace_seq; + unsigned int policy; + long unsigned int max_allowed_capacity; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + short unsigned int migration_disabled; + short unsigned int migration_flags; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; + long unsigned int rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_exit_cpu; + struct list_head rcu_tasks_exit_list; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + long unsigned int jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork: 1; + unsigned int sched_contributes_to_load: 1; + unsigned int sched_migrated: 1; + long: 29; + unsigned int sched_remote_wakeup: 1; + unsigned int sched_rt_mutex: 1; + unsigned int in_execve: 1; + unsigned int in_iowait: 1; + unsigned int no_cgroup_migration: 1; + unsigned int frozen: 1; + unsigned int use_memdelay: 1; + unsigned int in_memstall: 1; + unsigned int in_eventfd: 1; + unsigned int in_thrashing: 1; + long unsigned int atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + long unsigned int stack_canary; + struct task_struct *real_parent; + struct task_struct *parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_node; + struct completion *vfork_done; + int *set_child_tid; + int *clear_child_tid; + void *worker_private; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + u64 start_time; + u64 start_boottime; + long unsigned int min_flt; + long unsigned int maj_flt; + struct posix_cputimers posix_cputimers; + struct posix_cputimers_work posix_cputimers_work; + const struct cred *ptracer_cred; + const struct cred *real_cred; + const struct cred *cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + long unsigned int last_switch_count; + long unsigned int last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + long unsigned int sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct io_context *io_context; + struct capture_control *capture_control; + long unsigned int ptrace_message; + kernel_siginfo_t *last_siginfo; + struct task_io_accounting ioac; + unsigned int psi_flags; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + struct css_set *cgroups; + struct list_head cg_list; + struct robust_list_head *robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + u8 perf_recursion[4]; + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct rseq *rseq; + u32 rseq_len; + u32 rseq_sig; + long unsigned int rseq_event_mask; + int mm_cid; + int last_mm_cid; + int migrate_from_cpu; + int mm_cid_active; + struct callback_head cid_work; + struct tlbflush_unmap_batch tlb_ubc; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + long unsigned int dirty_paused_when; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + unsigned int kasan_depth; + long unsigned int trace_recursion; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct obj_cgroup *objcg; + struct gendisk *throttle_disk; + struct uprobe_task *utask; + struct kmap_ctrl kmap_ctrl; + struct callback_head rcu; + refcount_t rcu_users; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; + struct vm_struct *stack_vm_area; + refcount_t stack_refcount; + struct bpf_local_storage *bpf_storage; + struct bpf_run_ctx *bpf_ctx; + struct bpf_net_context *bpf_net_context; + struct llist_head kretprobe_instances; + struct llist_head rethooks; + struct thread_struct thread; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +union sigval { + int sival_int; + void *sival_ptr; +}; + +typedef union sigval sigval_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void *_addr; + union { + int _trapno; + short int _addr_lsb; + struct { + char _dummy_bnd[8]; + void *_lower; + void *_upper; + } _addr_bnd; + struct { + char _dummy_pkey[8]; + __u32 _pkey; + } _addr_pkey; + struct { + long unsigned int _data; + __u32 _type; + __u32 _flags; + } _perf; + }; + } _sigfault; + struct { + long int _band; + int _fd; + } _sigpoll; + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + __u64 rseq_cs; + __u32 flags; + __u32 node_id; + __u32 mm_cid; + char end[0]; +}; + +struct rhash_head { + struct rhash_head *next; +}; + +struct scx_dispatch_q { + raw_spinlock_t lock; + struct list_head list; + struct rb_root priq; + u32 nr; + u32 seq; + u64 id; + struct rhash_head hash_node; + struct llist_node free_node; + struct callback_head rcu; +}; + +struct rq_flags; + +struct affinity_context; + +struct sched_class { + void (*enqueue_task)(struct rq *, struct task_struct *, int); + bool (*dequeue_task)(struct rq *, struct task_struct *, int); + void (*yield_task)(struct rq *); + bool (*yield_to_task)(struct rq *, struct task_struct *); + void (*wakeup_preempt)(struct rq *, struct task_struct *, int); + int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); + struct task_struct * (*pick_task)(struct rq *); + struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *); + void (*put_prev_task)(struct rq *, struct task_struct *, struct task_struct *); + void (*set_next_task)(struct rq *, struct task_struct *, bool); + int (*select_task_rq)(struct task_struct *, int, int); + void (*migrate_task_rq)(struct task_struct *, int); + void (*task_woken)(struct rq *, struct task_struct *); + void (*set_cpus_allowed)(struct task_struct *, struct affinity_context *); + void (*rq_online)(struct rq *); + void (*rq_offline)(struct rq *); + struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); + void (*task_tick)(struct rq *, struct task_struct *, int); + void (*task_fork)(struct task_struct *); + void (*task_dead)(struct task_struct *); + void (*switching_to)(struct rq *, struct task_struct *); + void (*switched_from)(struct rq *, struct task_struct *); + void (*switched_to)(struct rq *, struct task_struct *); + void (*reweight_task)(struct rq *, struct task_struct *, const struct load_weight *); + void (*prio_changed)(struct rq *, struct task_struct *, int); + unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); + void (*update_curr)(struct rq *); + void (*task_change_group)(struct task_struct *); +}; + +typedef __kernel_uid32_t uid_t; + +typedef struct { + uid_t val; +} kuid_t; + +typedef unsigned int __kernel_gid32_t; + +typedef __kernel_gid32_t gid_t; + +typedef struct { + gid_t val; +} kgid_t; + +typedef struct { + u64 val; +} kernel_cap_t; + +struct user_struct; + +struct user_namespace; + +struct ucounts; + +struct group_info; + +struct cred { + atomic_long_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + struct user_struct *user; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; +}; + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +typedef signed char __s8; + +typedef __s8 s8; + +typedef __kernel_long_t __kernel_ssize_t; + +typedef long long int __kernel_loff_t; + +typedef unsigned int __poll_t; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +typedef short unsigned int umode_t; + +typedef __kernel_loff_t loff_t; + +typedef __kernel_ssize_t ssize_t; + +typedef s32 int32_t; + +typedef u32 uint32_t; + +typedef u64 sector_t; + +typedef u64 blkcnt_t; + +typedef unsigned int gfp_t; + +typedef unsigned int fmode_t; + +typedef void (*ctor_fn_t)(); + +struct lock_class_key {}; + +struct fs_context; + +struct fs_parameter_spec; + +struct dentry; + +struct super_block; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct qrwlock { + union { + atomic_t cnts; + struct { + u8 wlocked; + u8 __lstate[3]; + }; + }; + arch_spinlock_t wait_lock; +}; + +typedef struct qrwlock arch_rwlock_t; + +struct lockdep_map {}; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + unsigned int flags; + long unsigned int begin; +}; + +struct jump_entry { + s32 code; + s32 target; + long int key; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + long unsigned int type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno: 18; + unsigned int class_id: 6; + unsigned int flags: 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; +}; + +enum class_map_type { + DD_CLASS_TYPE_DISJOINT_BITS = 0, + DD_CLASS_TYPE_LEVEL_NUM = 1, + DD_CLASS_TYPE_DISJOINT_NAMES = 2, + DD_CLASS_TYPE_LEVEL_NAMES = 3, +}; + +struct ddebug_class_map { + struct list_head link; + struct module *mod; + const char *mod_name; + const char **class_names; + const int length; + const int base; + enum class_map_type map_type; +}; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kernfs_node; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + const struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized: 1; + unsigned int state_in_sysfs: 1; + unsigned int state_add_uevent_sent: 1; + unsigned int state_remove_uevent_sent: 1; + unsigned int uevent_suppress: 1; +}; + +struct module_param_attrs; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_memory { + void *base; + void *rw_copy; + bool is_rox; + unsigned int size; + struct mod_tree_node mtn; +}; + +struct elf64_shdr; + +typedef struct elf64_shdr Elf64_Shdr; + +struct mod_section { + Elf64_Shdr *shdr; + int num_entries; + int max_entries; +}; + +struct mod_arch_specific { + struct mod_section got; + struct mod_section plt; + struct mod_section got_plt; +}; + +struct elf64_sym; + +typedef struct elf64_sym Elf64_Sym; + +struct mod_kallsyms { + Elf64_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +struct _ddebug_info { + struct _ddebug *descs; + struct ddebug_class_map *classes; + unsigned int num_descs; + unsigned int num_classes; +}; + +struct module_attribute; + +struct kernel_symbol; + +struct kernel_param; + +struct exception_table_entry; + +struct bug_entry; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct tracepoint; + +typedef struct tracepoint * const tracepoint_ptr_t; + +struct srcu_struct; + +struct bpf_raw_event_map; + +struct trace_event_call; + +struct trace_eval_map; + +struct error_injection_entry; + +struct module { + enum module_state state; + struct list_head list; + char name[56]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + bool async_probe_requested; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + struct module_memory mem[7]; + struct mod_arch_specific arch; + long unsigned int taints; + unsigned int num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void *percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + unsigned int btf_data_size; + unsigned int btf_base_data_size; + void *btf_data; + void *btf_base_data; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + void *kprobes_text_start; + unsigned int kprobes_text_size; + long unsigned int *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + ctor_fn_t *ctors; + unsigned int num_ctors; + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; + struct _ddebug_info dyndbg_info; + long: 64; + long: 64; + long: 64; +}; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +typedef unsigned int fop_flags_t; + +typedef void *fl_owner_t; + +struct file; + +struct kiocb; + +struct iov_iter; + +struct io_comp_batch; + +struct dir_context; + +struct poll_table_struct; + +struct vm_area_struct; + +struct inode; + +struct file_lock; + +struct file_lease; + +struct seq_file; + +struct io_uring_cmd; + +struct file_operations { + struct module *owner; + fop_flags_t fop_flags; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap)(struct file *, struct vm_area_struct *); + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct file *); + int (*setlease)(struct file *, int, struct file_lease **, void **); + long int (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); + int (*uring_cmd)(struct io_uring_cmd *, unsigned int); + int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); +}; + +struct bug_entry { + int bug_addr_disp; + int file_disp; + short unsigned int line; + short unsigned int flags; +}; + +struct static_call_key { + void *func; +}; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +typedef struct { + long unsigned int pgd; +} pgd_t; + +typedef struct { + long unsigned int pte; +} pte_t; + +typedef struct { + long unsigned int pgprot; +} pgprot_t; + +typedef struct page *pgtable_t; + +struct page_pool; + +struct dev_pagemap; + +struct page { + long unsigned int flags; + union { + struct { + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + struct list_head buddy_list; + struct list_head pcp_list; + }; + struct address_space *mapping; + union { + long unsigned int index; + long unsigned int share; + }; + long unsigned int private; + }; + struct { + long unsigned int pp_magic; + struct page_pool *pp; + long unsigned int _pp_mapping_pad; + long unsigned int dma_addr; + atomic_long_t pp_ref_count; + }; + struct { + long unsigned int compound_head; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + unsigned int page_type; + atomic_t _mapcount; + }; + atomic_t _refcount; + long unsigned int memcg_data; +}; + +struct pt_regs { + long unsigned int epc; + long unsigned int ra; + long unsigned int sp; + long unsigned int gp; + long unsigned int tp; + long unsigned int t0; + long unsigned int t1; + long unsigned int t2; + long unsigned int s0; + long unsigned int s1; + long unsigned int a0; + long unsigned int a1; + long unsigned int a2; + long unsigned int a3; + long unsigned int a4; + long unsigned int a5; + long unsigned int a6; + long unsigned int a7; + long unsigned int s2; + long unsigned int s3; + long unsigned int s4; + long unsigned int s5; + long unsigned int s6; + long unsigned int s7; + long unsigned int s8; + long unsigned int s9; + long unsigned int s10; + long unsigned int s11; + long unsigned int t3; + long unsigned int t4; + long unsigned int t5; + long unsigned int t6; + long unsigned int status; + long unsigned int badaddr; + long unsigned int cause; + long unsigned int orig_a0; +}; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + struct optimistic_spin_queue osq; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +typedef __s64 time64_t; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; +}; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +struct workqueue_struct; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + long unsigned int gp_seq[4]; + long int len; + long int seglen[4]; + u8 flags; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +struct srcu_node; + +struct srcu_data { + atomic_long_t srcu_lock_count[2]; + atomic_long_t srcu_unlock_count[2]; + int srcu_reader_flavor; + long: 64; + long: 64; + long: 64; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + long unsigned int grpmask; + int cpu; + struct srcu_struct *ssp; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct srcu_node { + spinlock_t lock; + long unsigned int srcu_have_cbs[4]; + long unsigned int srcu_data_have_cbs[4]; + long unsigned int srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_usage; + +struct srcu_struct { + unsigned int srcu_idx; + struct srcu_data *sda; + struct lockdep_map dep_map; + struct srcu_usage *srcu_sup; +}; + +struct srcu_usage { + struct srcu_node *node; + struct srcu_node *level[3]; + int srcu_size_state; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + long unsigned int srcu_gp_seq; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + long unsigned int srcu_gp_start; + long unsigned int srcu_last_gp_end; + long unsigned int srcu_size_jiffies; + long unsigned int srcu_n_lock_retries; + long unsigned int srcu_n_exp_nodelay; + bool sda_is_static; + long unsigned int srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + long unsigned int reschedule_jiffies; + long unsigned int reschedule_count; + struct delayed_work work; + struct srcu_struct *srcu_ssp; +}; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void *xa_head; +}; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct proc_ns_operations; + +struct ns_common { + struct dentry *stashed; + const struct proc_ns_operations *ops; + unsigned int inum; + refcount_t count; +}; + +struct kmem_cache; + +struct fs_pin; + +struct pid_namespace { + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; + int memfd_noexec_scope; +}; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t *__sighandler_t; + +struct sigaction { + __sighandler_t sa_handler; + long unsigned int sa_flags; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +typedef struct {} lockdep_map_p; + +struct maple_tree { + union { + spinlock_t ma_lock; + lockdep_map_p ma_external_lock; + }; + unsigned int ma_flags; + void *ma_root; +}; + +struct percpu_counter { + raw_spinlock_t lock; + s64 count; + struct list_head list; + s32 *counters; +}; + +typedef struct { + atomic_long_t id; + void *vdso; + cpumask_t icache_stale_mask; + bool force_icache_flush; + long unsigned int flags; + u8 pmlen; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct mm_cid; + +struct linux_binfmt; + +struct kioctx_table; + +struct mmu_notifier_subscriptions; + +struct mm_struct { + struct { + struct { + atomic_t mm_count; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct maple_tree mm_mt; + long unsigned int mmap_base; + long unsigned int mmap_legacy_base; + long unsigned int task_size; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + struct mm_cid *pcpu_cid; + long unsigned int mm_cid_next_scan; + unsigned int nr_cpus_allowed; + atomic_t max_nr_cid; + raw_spinlock_t cpus_allowed_lock; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + int mm_lock_seq; + long unsigned int hiwater_rss; + long unsigned int hiwater_vm; + long unsigned int total_vm; + long unsigned int locked_vm; + atomic64_t pinned_vm; + long unsigned int data_vm; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int def_flags; + seqcount_t write_protect_seq; + spinlock_t arg_lock; + long unsigned int start_code; + long unsigned int end_code; + long unsigned int start_data; + long unsigned int end_data; + long unsigned int start_brk; + long unsigned int brk; + long unsigned int start_stack; + long unsigned int arg_start; + long unsigned int arg_end; + long unsigned int env_start; + long unsigned int env_end; + long unsigned int saved_auxv[66]; + struct percpu_counter rss_stat[4]; + struct linux_binfmt *binfmt; + mm_context_t context; + long unsigned int flags; + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; + struct task_struct *owner; + struct user_namespace *user_ns; + struct file *exe_file; + struct mmu_notifier_subscriptions *notifier_subscriptions; + atomic_t tlb_flush_pending; + atomic_t tlb_flush_batched; + struct uprobes_state uprobes_state; + struct work_struct async_put_work; + long: 64; + long: 64; + long: 64; + long: 64; + }; + long unsigned int cpu_bitmap[0]; +}; + +typedef u32 errseq_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + struct rw_semaphore invalidate_lock; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + long unsigned int nrpages; + long unsigned int writeback_index; + const struct address_space_operations *a_ops; + long unsigned int flags; + errseq_t wb_err; + spinlock_t i_private_lock; + struct list_head i_private_list; + struct rw_semaphore i_mmap_rwsem; + void *i_private_data; +}; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct dentry *stashed; + u64 ino; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[0]; +}; + +typedef int32_t key_serial_t; + +typedef uint32_t key_perm_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + long unsigned int hash; + union { + struct { + u16 desc_len; + char desc[6]; + }; + long unsigned int x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + long unsigned int nr_leaves_on_tree; +}; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct rw_semaphore sem; + struct key_user *user; + void *security; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + short unsigned int quotalen; + short unsigned int datalen; + short int state; + long unsigned int flags; + union { + struct keyring_index_key index_key; + struct { + long unsigned int hash; + long unsigned int len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + refcount_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct pacct_struct { + int ac_flag; + long int ac_exitcode; + long unsigned int ac_mem; + u64 ac_utime; + u64 ac_stime; + long unsigned int ac_minflt; + long unsigned int ac_majflt; +}; + +struct core_state; + +struct tty_struct; + +struct taskstats; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + int quick_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exec_task; + int group_stop_count; + unsigned int flags; + struct core_state *core_state; + unsigned int is_child_subreaper: 1; + unsigned int has_child_subreaper: 1; + unsigned int next_posix_timer_id; + struct hlist_head posix_timers; + struct hlist_head ignored_posix_timers; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + seqlock_t stats_lock; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + long unsigned int cnvcsw; + long unsigned int cnivcsw; + long unsigned int min_flt; + long unsigned int maj_flt; + long unsigned int cmin_flt; + long unsigned int cmaj_flt; + long unsigned int inblock; + long unsigned int oublock; + long unsigned int cinblock; + long unsigned int coublock; + long unsigned int maxrss; + long unsigned int cmaxrss; + struct task_io_accounting ioac; + long long unsigned int sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + bool oom_flag_origin; + short int oom_score_adj; + short int oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + short unsigned int ioprio; + spinlock_t lock; + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +struct arch_uprobe_task { + long unsigned int saved_cause; +}; + +struct return_instance; + +struct uprobe; + +struct arch_uprobe; + +struct uprobe_task { + enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { + struct { + struct arch_uprobe_task autask; + long unsigned int vaddr; + }; + struct { + struct callback_head dup_xol_work; + long unsigned int dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + struct timer_list ri_timer; + long unsigned int xol_vaddr; + struct arch_uprobe *auprobe; +}; + +typedef u32 probe_opcode_t; + +typedef bool probes_handler_t(u32, long unsigned int, struct pt_regs *); + +struct arch_probe_insn { + probe_opcode_t *insn; + probes_handler_t *handler; + long unsigned int restore; +}; + +struct arch_uprobe { + union { + u8 insn[8]; + u8 ixol[8]; + }; + struct arch_probe_insn api; + long unsigned int insn_size; + bool simulate; +}; + +enum hprobe_state { + HPROBE_LEASED = 0, + HPROBE_STABLE = 1, + HPROBE_GONE = 2, + HPROBE_CONSUMED = 3, +}; + +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + +struct return_consumer { + __u64 cookie; + __u64 id; +}; + +struct return_instance { + struct hprobe hprobe; + long unsigned int func; + long unsigned int stack; + long unsigned int orig_ret_vaddr; + bool chained; + int consumers_cnt; + struct return_instance *next; + struct callback_head rcu; + struct return_consumer consumers[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct vmem_altmap { + long unsigned int base_pfn; + const long unsigned int end_pfn; + const long unsigned int reserve; + long unsigned int free; + long unsigned int align; + long unsigned int alloc; + bool inaccessible; +}; + +struct percpu_ref_data; + +struct percpu_ref { + long unsigned int percpu_count_ptr; + struct percpu_ref_data *data; +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT = 2, + MEMORY_DEVICE_FS_DAX = 3, + MEMORY_DEVICE_GENERIC = 4, + MEMORY_DEVICE_PCI_P2PDMA = 5, +}; + +struct range { + u64 start; + u64 end; +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref ref; + struct completion done; + enum memory_type type; + unsigned int flags; + long unsigned int vmemmap_shift; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + union { + struct range range; + struct { + struct {} __empty_ranges; + struct range ranges[0]; + }; + }; +}; + +typedef struct { + long unsigned int val; +} swp_entry_t; + +struct folio { + union { + struct { + long unsigned int flags; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; + struct address_space *mapping; + long unsigned int index; + union { + void *private; + swp_entry_t swap; + }; + atomic_t _mapcount; + atomic_t _refcount; + long unsigned int memcg_data; + }; + struct page page; + }; + union { + struct { + long unsigned int _flags_1; + long unsigned int _head_1; + atomic_t _large_mapcount; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + unsigned int _folio_nr_pages; + }; + struct page __page_1; + }; + union { + struct { + long unsigned int _flags_2; + long unsigned int _head_2; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; + }; + struct { + long unsigned int _flags_2a; + long unsigned int _head_2a; + struct list_head _deferred_list; + }; + struct page __page_2; + }; +}; + +typedef long unsigned int vm_flags_t; + +typedef struct { + atomic64_t refcnt; +} file_ref_t; + +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +struct file_ra_state { + long unsigned int start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; +}; + +typedef struct { + long unsigned int v; +} freeptr_t; + +struct fown_struct; + +struct file { + file_ref_t f_ref; + spinlock_t f_lock; + fmode_t f_mode; + const struct file_operations *f_op; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + struct path f_path; + union { + struct mutex f_pos_lock; + u64 f_pipe; + }; + loff_t f_pos; + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; + struct hlist_head *f_ep; + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + freeptr_t f_freeptr; + }; +}; + +struct vm_userfaultfd_ctx {}; + +struct vma_lock { + struct rw_semaphore lock; +}; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + union { + struct { + long unsigned int vm_start; + long unsigned int vm_end; + }; + struct callback_head vm_rcu; + }; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + union { + const vm_flags_t vm_flags; + vm_flags_t __vm_flags; + }; + bool detached; + int vm_lock_seq; + struct vma_lock *vm_lock; + struct { + struct rb_node rb; + long unsigned int rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + long unsigned int vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +typedef unsigned int vm_fault_t; + +struct vm_fault; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*may_split)(struct vm_area_struct *, long unsigned int); + int (*mremap)(struct vm_area_struct *); + int (*mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int, long unsigned int); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); + vm_fault_t (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); + long unsigned int (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); + const char * (*name)(struct vm_area_struct *); + struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); +}; + +struct mm_cid { + u64 time; + int cid; + int recent_cid; +}; + +enum fault_flag { + FAULT_FLAG_WRITE = 1, + FAULT_FLAG_MKWRITE = 2, + FAULT_FLAG_ALLOW_RETRY = 4, + FAULT_FLAG_RETRY_NOWAIT = 8, + FAULT_FLAG_KILLABLE = 16, + FAULT_FLAG_TRIED = 32, + FAULT_FLAG_USER = 64, + FAULT_FLAG_REMOTE = 128, + FAULT_FLAG_INSTRUCTION = 256, + FAULT_FLAG_INTERRUPTIBLE = 512, + FAULT_FLAG_UNSHARE = 1024, + FAULT_FLAG_ORIG_PTE_VALID = 2048, + FAULT_FLAG_VMA_LOCK = 4096, +}; + +typedef struct { + long unsigned int pmd; +} pmd_t; + +typedef struct { + long unsigned int pud; +} pud_t; + +struct vm_fault { + const struct { + struct vm_area_struct *vma; + gfp_t gfp_mask; + long unsigned int pgoff; + long unsigned int address; + long unsigned int real_address; + }; + enum fault_flag flags; + pmd_t *pmd; + pud_t *pud; + union { + pte_t orig_pte; + pmd_t orig_pmd; + }; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic: 1; + bool allow_reinit: 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; + struct xarray xa; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + long unsigned int subdirs; + struct rb_root children; + struct kernfs_root *root; + long unsigned int rev; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + short unsigned int flags; + umode_t mode; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + u64 id; + void *priv; + struct kernfs_iattrs *iattr; + struct callback_head rcu; +}; + +struct kernfs_open_file; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); + loff_t (*llseek)(struct kernfs_open_file *, loff_t, int); +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped: 1; + bool released: 1; + const struct vm_operations_struct *vm_ops; +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void * (*grab_current_ns)(); + const void * (*netlink_ns)(struct sock *); + const void * (*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; + u32 dio_mem_align; + u32 dio_offset_align; + u64 change_cookie; + u64 subvol; + u32 atomic_write_unit_min; + u32 atomic_write_unit_max; + u32 atomic_write_segments_max; +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + struct address_space * (*f_mapping)(); + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); + int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *, struct vm_area_struct *); +}; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); + size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); + struct attribute **attrs; + union { + struct bin_attribute **bin_attrs; + const struct bin_attribute * const *bin_attrs_new; + }; +}; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations * (*child_ns_type)(const struct kobject *); + const void * (*namespace)(const struct kobject *); + void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(const struct kobject *); + const char * (* const name)(const struct kobject *); + int (* const uevent)(const struct kobject *, struct kobj_uevent_env *); +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active: 1; + unsigned int in_hrtirq: 1; + unsigned int hang_detected: 1; + unsigned int softirq_activated: 1; + unsigned int online: 1; + unsigned int nr_events; + short unsigned int nr_retries; + short unsigned int nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[8]; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint_ext { + int (*regfunc)(); + void (*unregfunc)(); + unsigned int faultable: 1; +}; + +struct tracepoint { + const char *name; + struct static_key_false key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + void *probestub; + struct tracepoint_func *funcs; + struct tracepoint_ext *ext; +}; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long: 64; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + long unsigned int nr_to_scan; + long unsigned int nr_scanned; + struct mem_cgroup *memcg; +}; + +struct shrinker { + long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); + long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); + long int batch; + int seeks; + unsigned int flags; + refcount_t refcount; + struct completion done; + struct callback_head rcu; + void *private_data; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); + int (*memory_failure)(struct dev_pagemap *, long unsigned int, long unsigned int, int); +}; + +struct hlist_bl_node; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct lockref { + union { + __u64 lock_count; + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct qstr { + union { + struct { + u32 hash; + u32 len; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[40]; + const struct dentry_operations *d_op; + struct super_block *d_sb; + long unsigned int d_time; + void *d_fsdata; + struct lockref d_lockref; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct hlist_node d_sib; + struct hlist_head d_children; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +} __attribute__((mode(byte))); + +struct posix_acl; + +struct inode_operations; + +struct bdi_writeback; + +struct file_lock_context; + +struct cdev; + +struct fsnotify_mark_connector; + +struct inode { + umode_t i_mode; + short unsigned int i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + long unsigned int i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + time64_t i_atime_sec; + time64_t i_mtime_sec; + time64_t i_ctime_sec; + u32 i_atime_nsec; + u32 i_mtime_nsec; + u32 i_ctime_nsec; + u32 i_generation; + spinlock_t i_lock; + short unsigned int i_bytes; + u8 i_blkbits; + enum rw_hint i_write_hint; + blkcnt_t i_blocks; + u32 i_state; + struct rw_semaphore i_rwsem; + long unsigned int dirtied_when; + long unsigned int dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + void *i_private; +}; + +enum d_real_type { + D_REAL_DATA = 0, + D_REAL_METADATA = 1, +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char * (*d_dname)(struct dentry *, char *, int); + struct vfsmount * (*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry * (*d_real)(struct dentry *, enum d_real_type); + long: 64; + long: 64; + long: 64; +}; + +struct mtd_info; + +typedef long long int qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + long unsigned int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct *task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + short unsigned int frozen; + int freeze_kcount; + int freeze_ucount; + struct percpu_rw_semaphore rw_sem[3]; +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct block_device; + +struct backing_dev_info; + +struct fsnotify_sb_info; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + long unsigned int s_blocksize; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + long unsigned int s_flags; + long unsigned int s_iflags; + long unsigned int s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + const struct xattr_handler * const *s_xattr; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct file *s_bdev_file; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + u32 s_fsnotify_mask; + struct fsnotify_sb_info *s_fsnotify_info; + char s_id[32]; + uuid_t s_uuid; + u8 s_uuid_len; + char s_sysfs_name[37]; + unsigned int s_max_links; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + struct shrinker *s_shrink; + atomic_long_t s_remove_count; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long: 64; + long: 64; +}; + +struct mnt_idmap; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; + struct mnt_idmap *mnt_idmap; +}; + +struct list_lru_one { + struct list_head list; + long int nr_items; + spinlock_t lock; +}; + +struct list_lru_node { + struct list_lru_one lru; + atomic_long_t nr_items; + long: 64; + long: 64; + long: 64; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, +}; + +struct exception_table_entry { + int insn; + int fixup; + short int type; + short int data; +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct key_preparsed_payload; + +struct key_match_data; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long int (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction * (*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +struct user_struct { + refcount_t __count; + struct percpu_counter epoll_watches; + long unsigned int unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + struct ratelimit_state ratelimit; +}; + +struct group_info { + refcount_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +struct request_queue; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long int); + void *private; + int ki_flags; + u16 ki_ioprio; + union { + struct wait_page_queue *ki_waitq; + ssize_t (*dio_complete)(void *); + }; +}; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + long unsigned int dq_flags; + struct mem_dqblk dq_dqb; +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot * (*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t * (*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_dqblk { + int d_fieldmask; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + long long unsigned int ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + struct qc_type_state s_state[3]; +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct writeback_control; + +struct readahead_control; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*read_folio)(struct file *, struct folio *); + int (*writepages)(struct address_space *, struct writeback_control *); + bool (*dirty_folio)(struct address_space *, struct folio *); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio)(struct folio *, size_t, size_t); + bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, enum migrate_mode); + int (*launder_folio)(struct folio *); + bool (*is_partially_uptodate)(struct folio *, size_t, size_t); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); + int (*error_remove_folio)(struct address_space *, struct folio *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); + int (*swap_rw)(struct kiocb *, struct iov_iter *); +}; + +struct iovec { + void *iov_base; + __kernel_size_t iov_len; +}; + +struct kvec; + +struct bio_vec; + +struct folio_queue; + +struct iov_iter { + u8 iter_type; + bool nofault; + bool data_source; + size_t iov_offset; + union { + struct iovec __ubuf_iovec; + struct { + union { + const struct iovec *__iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + const struct folio_queue *folioq; + struct xarray *xarray; + void *ubuf; + }; + size_t count; + }; + }; + union { + long unsigned int nr_segs; + u8 folioq_slot; + loff_t xarray_start; + }; +}; + +struct fiemap_extent_info; + +struct fileattr; + +struct offset_ctx; + +struct inode_operations { + struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); + const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct mnt_idmap *, struct inode *, int); + struct posix_acl * (*get_inode_acl)(struct inode *, int, bool); + int (*readlink)(struct dentry *, char *, int); + int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); + int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); + int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); + int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); + int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); + struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int); + int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); + int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); + int (*fileattr_get)(struct dentry *, struct fileattr *); + struct offset_ctx * (*get_offset_ctx)(struct inode *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct fown_struct { + struct file *file; + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +enum freeze_holder { + FREEZE_HOLDER_KERNEL = 1, + FREEZE_HOLDER_USERSPACE = 2, + FREEZE_MAY_NEST = 4, +}; + +struct kstatfs; + +struct super_operations { + struct inode * (*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *, enum freeze_holder); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *, enum freeze_holder); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long int (*free_cached_objects)(struct super_block *, struct shrink_control *); + void (*shutdown)(struct super_block *); +}; + +struct fid; + +struct iomap; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry * (*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); + long unsigned int flags; +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); +}; + +typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +struct offset_ctx { + struct maple_tree mt; + long unsigned int next_offset; +}; + +struct p_log; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + short unsigned int flags; + const void *data; +}; + +typedef __u64 Elf64_Addr; + +typedef __u16 Elf64_Half; + +typedef __u64 Elf64_Off; + +typedef __u32 Elf64_Word; + +typedef __u64 Elf64_Xword; + +struct elf64_sym { + Elf64_Word st_name; + unsigned char st_info; + unsigned char st_other; + Elf64_Half st_shndx; + Elf64_Addr st_value; + Elf64_Xword st_size; +}; + +struct elf64_shdr { + Elf64_Word sh_name; + Elf64_Word sh_type; + Elf64_Xword sh_flags; + Elf64_Addr sh_addr; + Elf64_Off sh_offset; + Elf64_Xword sh_size; + Elf64_Word sh_link; + Elf64_Word sh_info; + Elf64_Xword sh_addralign; + Elf64_Xword sh_entsize; +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct folio_batch { + unsigned char nr; + unsigned char i; + bool percpu_pvec_drained; + struct folio *folios[31]; +}; + +struct folio_queue { + struct folio_batch vec; + u8 orders[31]; + struct folio_queue *next; + struct folio_queue *prev; + long unsigned int marks; + long unsigned int marks2; + long unsigned int marks3; +}; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +struct error_injection_entry { + long unsigned int addr; + int etype; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct kernel_symbol { + long unsigned int value; + const char *name; + const char *namespace; +}; + +struct nsset; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common * (*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace * (*owner)(struct ns_common *); + struct ns_common * (*get_parent)(struct ns_common *); +}; + +enum mod_mem_type { + MOD_TEXT = 0, + MOD_DATA = 1, + MOD_RODATA = 2, + MOD_RO_AFTER_INIT = 3, + MOD_INIT_TEXT = 4, + MOD_INIT_DATA = 5, + MOD_INIT_RODATA = 6, + MOD_MEM_NUM_TYPES = 7, + MOD_INVALID = -1, +}; + +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[0]; +}; + +struct objpool_head; + +typedef int (*objpool_fini_cb)(struct objpool_head *, void *); + +struct objpool_head { + int obj_size; + int nr_objs; + int nr_possible_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + long unsigned int flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +struct rethook; + +struct rethook_node { + struct callback_head rcu; + struct llist_node llist; + struct rethook *rethook; + long unsigned int ret_addr; + long unsigned int frame; +}; + +struct rethook { + void *data; + void (*handler)(struct rethook_node *, void *, long unsigned int, struct pt_regs *); + struct objpool_head pool; + struct callback_head rcu; +}; + +typedef long unsigned int uintptr_t; + +typedef u64 phys_addr_t; + +struct kernel_mapping { + long unsigned int page_offset; + long unsigned int virt_addr; + long unsigned int virt_offset; + uintptr_t phys_addr; + uintptr_t size; + long unsigned int va_pa_offset; + long unsigned int va_kernel_pa_offset; +}; + +typedef u32 uprobe_opcode_t; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +enum pageflags { + PG_locked = 0, + PG_writeback = 1, + PG_referenced = 2, + PG_uptodate = 3, + PG_dirty = 4, + PG_lru = 5, + PG_head = 6, + PG_waiters = 7, + PG_active = 8, + PG_workingset = 9, + PG_owner_priv_1 = 10, + PG_owner_2 = 11, + PG_arch_1 = 12, + PG_reserved = 13, + PG_private = 14, + PG_private_2 = 15, + PG_reclaim = 16, + PG_swapbacked = 17, + PG_unevictable = 18, + PG_mlocked = 19, + __NR_PAGEFLAGS = 20, + PG_readahead = 16, + PG_swapcache = 10, + PG_checked = 10, + PG_anon_exclusive = 11, + PG_mappedtodisk = 11, + PG_fscache = 15, + PG_pinned = 10, + PG_savepinned = 4, + PG_foreign = 10, + PG_xen_remapped = 10, + PG_isolated = 16, + PG_reported = 3, + PG_has_hwpoisoned = 8, + PG_large_rmappable = 9, + PG_partially_mapped = 16, +}; + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +enum probe_insn { + INSN_REJECTED = 0, + INSN_GOOD_NO_SLOT = 1, + INSN_GOOD = 2, +}; + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +typedef int (*initcall_t)(); + +struct codetag { + unsigned int flags; + unsigned int lineno; + const char *modname; + const char *function; + const char *filename; +}; + +enum { + ___GFP_DMA_BIT = 0, + ___GFP_HIGHMEM_BIT = 1, + ___GFP_DMA32_BIT = 2, + ___GFP_MOVABLE_BIT = 3, + ___GFP_RECLAIMABLE_BIT = 4, + ___GFP_HIGH_BIT = 5, + ___GFP_IO_BIT = 6, + ___GFP_FS_BIT = 7, + ___GFP_ZERO_BIT = 8, + ___GFP_UNUSED_BIT = 9, + ___GFP_DIRECT_RECLAIM_BIT = 10, + ___GFP_KSWAPD_RECLAIM_BIT = 11, + ___GFP_WRITE_BIT = 12, + ___GFP_NOWARN_BIT = 13, + ___GFP_RETRY_MAYFAIL_BIT = 14, + ___GFP_NOFAIL_BIT = 15, + ___GFP_NORETRY_BIT = 16, + ___GFP_MEMALLOC_BIT = 17, + ___GFP_COMP_BIT = 18, + ___GFP_NOMEMALLOC_BIT = 19, + ___GFP_HARDWALL_BIT = 20, + ___GFP_THISNODE_BIT = 21, + ___GFP_ACCOUNT_BIT = 22, + ___GFP_ZEROTAGS_BIT = 23, + ___GFP_NO_OBJ_EXT_BIT = 24, + ___GFP_LAST_BIT = 25, +}; + +struct alloc_tag_counters { + u64 bytes; + u64 calls; +}; + +struct alloc_tag { + struct codetag ct; + struct alloc_tag_counters *counters; +}; + +struct maple_alloc { + long unsigned int total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[30]; +}; + +struct maple_enode; + +enum store_type { + wr_invalid = 0, + wr_new_root = 1, + wr_store_root = 2, + wr_exact_fit = 3, + wr_spanning_store = 4, + wr_split_store = 5, + wr_rebalance = 6, + wr_append = 7, + wr_node_store = 8, + wr_slot_store = 9, +}; + +enum maple_status { + ma_active = 0, + ma_start = 1, + ma_root = 2, + ma_none = 3, + ma_pause = 4, + ma_overflow = 5, + ma_underflow = 6, + ma_error = 7, +}; + +struct ma_state { + struct maple_tree *tree; + long unsigned int index; + long unsigned int last; + struct maple_enode *node; + long unsigned int min; + long unsigned int max; + struct maple_alloc *alloc; + enum maple_status status; + unsigned char depth; + unsigned char offset; + unsigned char mas_flags; + unsigned char end; + enum store_type store_type; +}; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + long unsigned int min_coredump; +}; + +struct vma_iterator { + struct ma_state mas; +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_COMPLETED = 16384, + VM_FAULT_HINDEX_MASK = 983040, +}; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); + void (*close)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +typedef unsigned int zap_flags_t; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_BOUNCE = 8, + NR_FREE_CMA_PAGES = 9, + NR_VM_ZONE_STAT_ITEMS = 10, +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_THROTTLED_WRITTEN = 33, + NR_KERNEL_MISC_RECLAIMABLE = 34, + NR_FOLL_PIN_ACQUIRED = 35, + NR_FOLL_PIN_RELEASED = 36, + NR_KERNEL_STACK_KB = 37, + NR_PAGETABLE = 38, + NR_SECONDARY_PAGETABLE = 39, + NR_IOMMU_PAGES = 40, + NR_SWAPCACHE = 41, + PGDEMOTE_KSWAPD = 42, + PGDEMOTE_DIRECT = 43, + PGDEMOTE_KHUGEPAGED = 44, + NR_VM_NODE_STAT_ITEMS = 45, +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_DMA = 0, + KMALLOC_RANDOM_START = 0, + KMALLOC_RANDOM_END = 0, + KMALLOC_RECLAIM = 1, + KMALLOC_CGROUP = 2, + NR_KMALLOC_TYPES = 3, +}; + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_DMA32 = 4, + PGALLOC_NORMAL = 5, + PGALLOC_MOVABLE = 6, + ALLOCSTALL_DMA32 = 7, + ALLOCSTALL_NORMAL = 8, + ALLOCSTALL_MOVABLE = 9, + PGSCAN_SKIP_DMA32 = 10, + PGSCAN_SKIP_NORMAL = 11, + PGSCAN_SKIP_MOVABLE = 12, + PGFREE = 13, + PGACTIVATE = 14, + PGDEACTIVATE = 15, + PGLAZYFREE = 16, + PGFAULT = 17, + PGMAJFAULT = 18, + PGLAZYFREED = 19, + PGREFILL = 20, + PGREUSE = 21, + PGSTEAL_KSWAPD = 22, + PGSTEAL_DIRECT = 23, + PGSTEAL_KHUGEPAGED = 24, + PGSCAN_KSWAPD = 25, + PGSCAN_DIRECT = 26, + PGSCAN_KHUGEPAGED = 27, + PGSCAN_DIRECT_THROTTLE = 28, + PGSCAN_ANON = 29, + PGSCAN_FILE = 30, + PGSTEAL_ANON = 31, + PGSTEAL_FILE = 32, + PGINODESTEAL = 33, + SLABS_SCANNED = 34, + KSWAPD_INODESTEAL = 35, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 36, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 37, + PAGEOUTRUN = 38, + PGROTATED = 39, + DROP_PAGECACHE = 40, + DROP_SLAB = 41, + OOM_KILL = 42, + PGMIGRATE_SUCCESS = 43, + PGMIGRATE_FAIL = 44, + THP_MIGRATION_SUCCESS = 45, + THP_MIGRATION_FAIL = 46, + THP_MIGRATION_SPLIT = 47, + COMPACTMIGRATE_SCANNED = 48, + COMPACTFREE_SCANNED = 49, + COMPACTISOLATED = 50, + COMPACTSTALL = 51, + COMPACTFAIL = 52, + COMPACTSUCCESS = 53, + KCOMPACTD_WAKE = 54, + KCOMPACTD_MIGRATE_SCANNED = 55, + KCOMPACTD_FREE_SCANNED = 56, + CMA_ALLOC_SUCCESS = 57, + CMA_ALLOC_FAIL = 58, + UNEVICTABLE_PGCULLED = 59, + UNEVICTABLE_PGSCANNED = 60, + UNEVICTABLE_PGRESCUED = 61, + UNEVICTABLE_PGMLOCKED = 62, + UNEVICTABLE_PGMUNLOCKED = 63, + UNEVICTABLE_PGCLEARED = 64, + UNEVICTABLE_PGSTRANDED = 65, + SWAP_RA = 66, + SWAP_RA_HIT = 67, + SWPIN_ZERO = 68, + SWPOUT_ZERO = 69, + ZSWPIN = 70, + ZSWPOUT = 71, + ZSWPWB = 72, + NR_VM_EVENT_ITEMS = 73, +}; + +struct zap_details { + struct folio *single_folio; + bool even_cows; + zap_flags_t zap_flags; +}; + +struct linux_binprm { + struct vm_area_struct *vma; + long unsigned int vma_pages; + long unsigned int argmin; + struct mm_struct *mm; + long unsigned int p; + unsigned int have_execfd: 1; + unsigned int execfd_creds: 1; + unsigned int secureexec: 1; + unsigned int point_of_no_return: 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + long unsigned int loader; + long unsigned int exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +struct arch_vdso_time_data { + __u64 all_cpu_hwprobe_values[11]; + __u8 homogeneous_cpus; +}; + +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +struct vdso_data { + u32 seq; + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + union { + struct vdso_timestamp basetime[12]; + struct timens_offset offset[12]; + }; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; + struct arch_vdso_time_data arch_data; +}; + +union vdso_data_store { + struct vdso_data data[2]; + u8 page[4096]; +}; + +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET = 0, + VVAR_TIMENS_PAGE_OFFSET = 1, + VVAR_NR_PAGES = 2, +}; + +struct __vdso_info { + const char *name; + const char *vdso_code_start; + const char *vdso_code_end; + long unsigned int vdso_pages; + struct vm_special_mapping *cm; +}; + +typedef short int __s16; + +typedef long unsigned int ulong; + +struct cacheline_padding { + char x[0]; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +struct task_cputime { + u64 stime; + u64 utime; + long long unsigned int sum_exec_runtime; +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + atomic_t count; + atomic_long_t ucount[10]; + atomic_long_t rlimit[4]; +}; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; + int nr_descendants; +}; + +struct cgroup_file { + struct kernfs_node *kn; + long unsigned int notified_at; + struct timer_list notify_timer; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; + u64 ntime; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + struct bpf_prog_array *effective[28]; + struct hlist_head progs[28]; + u8 flags[28]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + bool e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct psi_group; + +struct cgroup { + struct cgroup_subsys_state self; + long unsigned int flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + struct cgroup_file psi_files[3]; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state *subsys[13]; + int nr_dying_subsys[13]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[13]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad_; + struct cgroup *rstat_flush_next; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group *psi; + struct cgroup_bpf bpf; + struct cgroup_freezer_state freezer; + struct bpf_local_storage *bpf_cgrp_storage; + struct cgroup *ancestors[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +struct css_set { + struct cgroup_subsys_state *subsys[13]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[13]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + +typedef struct { + atomic_long_t a; +} local_t; + +struct perf_event_context { + raw_spinlock_t lock; + struct mutex mutex; + struct list_head pmu_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + int nr_events; + int nr_user; + int is_active; + int nr_task_data; + int nr_stat; + int nr_freq; + int rotate_disable; + refcount_t refcount; + struct task_struct *task; + u64 time; + u64 timestamp; + u64 timeoffset; + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + struct callback_head callback_head; + local_t nr_no_switch_fast; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct page_counter { + atomic_long_t usage; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad1_; + long unsigned int emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + long unsigned int elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + long unsigned int watermark; + long unsigned int local_watermark; + long unsigned int failcnt; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + bool protection_support; + long unsigned int min; + long unsigned int low; + long unsigned int high; + long unsigned int max; + struct page_counter *parent; + long: 64; + long: 64; +}; + +struct vmpressure { + long unsigned int scanned; + long unsigned int reclaimed; + long unsigned int tree_scanned; + long unsigned int tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + struct fprop_global completions; + struct timer_list period_timer; + long unsigned int period_time; + long unsigned int dirty_limit_tstamp; + long unsigned int dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + u64 at; + struct wb_completion done; +}; + +struct memcg_vmstats; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct list_head memory_peaks; + struct list_head swap_peaks; + spinlock_t peaks_lock; + struct work_struct high_work; + long unsigned int zswap_max; + bool zswap_writeback; + struct vmpressure vmpressure; + bool oom_group; + int swappiness; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct memcg_vmstats *vmstats; + atomic_long_t memory_events[9]; + atomic_long_t memory_events_local[9]; + long unsigned int socket_pressure; + int kmemcg_id; + struct obj_cgroup *objcg; + struct obj_cgroup *orig_objcg; + struct list_head objcg_list; + struct memcg_vmstats_percpu *vmstats_percpu; + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct mem_cgroup_per_node *nodeinfo[0]; + long: 64; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct bpf_run_ctx {}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + union { + struct { + struct uid_gid_extent extent[5]; + u32 nr_extents; + }; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +struct ctl_table; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + const struct ctl_table *ctl_table; + int ctl_table_size; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + const struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; + enum { + SYSCTL_TABLE_TYPE_DEFAULT = 0, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, + } type; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct binfmt_misc; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + long unsigned int flags; + bool parent_could_setfcap; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + long int ucount_max[10]; + long int rlimit_max[4]; + struct binfmt_misc *binfmt_misc; +}; + +struct zswap_lruvec_state { + atomic_long_t nr_disk_swapins; +}; + +struct free_area { + struct list_head free_list[6]; + long unsigned int nr_free; +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + spinlock_t lru_lock; + long unsigned int anon_cost; + long unsigned int file_cost; + atomic_long_t nonresident_age; + long unsigned int refaults[2]; + long unsigned int flags; + struct pglist_data *pgdat; + struct zswap_lruvec_state zswap_lruvec_state; +}; + +struct per_cpu_pages; + +struct per_cpu_zonestat; + +struct zone { + long unsigned int _watermark[4]; + long unsigned int watermark_boost; + long unsigned int nr_reserved_highatomic; + long unsigned int nr_free_highatomic; + long int lowmem_reserve[3]; + struct pglist_data *zone_pgdat; + struct per_cpu_pages *per_cpu_pageset; + struct per_cpu_zonestat *per_cpu_zonestats; + int pageset_high_min; + int pageset_high_max; + int pageset_batch; + long unsigned int *pageblock_flags; + long unsigned int zone_start_pfn; + atomic_long_t managed_pages; + long unsigned int spanned_pages; + long unsigned int present_pages; + long unsigned int cma_pages; + const char *name; + long unsigned int nr_isolate_pageblock; + int initialized; + long: 0; + struct cacheline_padding _pad1_; + struct free_area free_area[11]; + long unsigned int flags; + spinlock_t lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + long unsigned int percpu_drift_mark; + long unsigned int compact_cached_free_pfn; + long unsigned int compact_cached_migrate_pfn[2]; + long unsigned int compact_init_migrate_pfn; + long unsigned int compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + long: 0; + struct cacheline_padding _pad3_; + atomic_long_t vm_stat[10]; + atomic_long_t vm_numa_event[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[4]; +}; + +enum zone_type { + ZONE_DMA32 = 0, + ZONE_NORMAL = 1, + ZONE_MOVABLE = 2, + __MAX_NR_ZONES = 3, +}; + +struct per_cpu_nodestat; + +struct pglist_data { + struct zone node_zones[3]; + struct zonelist node_zonelists[1]; + int nr_zones; + struct page *node_mem_map; + long unsigned int node_start_pfn; + long unsigned int node_present_pages; + long unsigned int node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + wait_queue_head_t reclaim_wait[4]; + atomic_t nr_writeback_throttled; + long unsigned int nr_reclaim_start; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + bool proactive_compact_trigger; + long unsigned int totalreserve_pages; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad1_; + struct lruvec __lruvec; + long unsigned int flags; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[45]; + long: 64; + long: 64; +}; + +struct per_cpu_pages { + spinlock_t lock; + int count; + int high; + int high_min; + int high_max; + int batch; + u8 flags; + u8 alloc_factor; + short int free_count; + struct list_head lists[12]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct per_cpu_zonestat { + s8 vm_stat_diff[10]; + s8 stat_threshold; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[45]; +}; + +struct shrinker_info_unit { + atomic_long_t nr_deferred[64]; + long unsigned int map[1]; +}; + +struct shrinker_info { + struct callback_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[0]; +}; + +typedef int proc_handler(const struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set * (*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, kuid_t *, kgid_t *); + int (*permissions)(struct ctl_table_header *, const struct ctl_table *); +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + long: 0; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; + __u64 compact_count; + __u64 compact_delay_total; + __u32 ac_tgid; + __u64 ac_tgetime; + __u64 ac_exe_dev; + __u64 ac_exe_inode; + __u64 wpcopy_count; + __u64 wpcopy_delay_total; + __u64 irq_count; + __u64 irq_delay_total; +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +struct swap_iocb; + +struct writeback_control { + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate: 1; + unsigned int for_background: 1; + unsigned int tagged_writepages: 1; + unsigned int for_reclaim: 1; + unsigned int range_cyclic: 1; + unsigned int for_sync: 1; + unsigned int unpinned_netfs_wb: 1; + unsigned int no_cgroup_owner: 1; + struct swap_iocb **swap_plug; + struct list_head *list; + struct folio_batch fbatch; + long unsigned int index; + int saved_err; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +struct bdi_writeback { + struct backing_dev_info *bdi; + long unsigned int state; + long unsigned int last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + atomic_t writeback_inodes; + struct percpu_counter stat[4]; + long unsigned int bw_time_stamp; + long unsigned int dirtied_stamp; + long unsigned int written_stamp; + long unsigned int write_bandwidth; + long unsigned int avg_write_bandwidth; + long unsigned int dirty_ratelimit; + long unsigned int balanced_dirty_ratelimit; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + struct delayed_work bw_dwork; + struct list_head bdi_node; + struct percpu_ref refcnt; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + struct list_head b_attached; + struct list_head offline_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head defer_sync; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +enum rpm_status { + RPM_INVALID = -1, + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +struct wakeup_source; + +struct wake_irq; + +struct pm_subsys_data; + +struct device; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + bool can_wakeup: 1; + bool async_suspend: 1; + bool in_dpm_list: 1; + bool is_prepared: 1; + bool is_suspended: 1; + bool is_noirq_suspended: 1; + bool is_late_suspended: 1; + bool no_pm: 1; + bool early_init: 1; + bool direct_complete: 1; + u32 driver_flags; + spinlock_t lock; + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path: 1; + bool syscore: 1; + bool no_pm_callbacks: 1; + bool async_in_progress: 1; + bool must_resume: 1; + bool may_skip_resume: 1; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth: 3; + bool idle_notification: 1; + bool request_pending: 1; + bool deferred_resume: 1; + bool needs_force_resume: 1; + bool runtime_auto: 1; + bool ignore_children: 1; + bool no_callbacks: 1; + bool irq_safe: 1; + bool use_autosuspend: 1; + bool timer_autosuspends: 1; + bool memalloc_noio: 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + enum rpm_status last_status; + int runtime_error; + int autosuspend_delay; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +struct irq_domain; + +struct msi_device_data; + +struct dev_msi_info { + struct irq_domain *domain; + struct msi_device_data *data; +}; + +struct dev_archdata {}; + +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, + DEVICE_REMOVABLE_UNKNOWN = 1, + DEVICE_FIXED = 2, + DEVICE_REMOVABLE = 3, +}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct dma_coherent_mem; + +struct io_tlb_mem; + +struct device_node; + +struct fwnode_handle; + +struct class; + +struct iommu_group; + +struct dev_iommu; + +struct device_physical_location; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + const struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct dev_msi_info msi; + u64 *dma_mask; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct dma_coherent_mem *dma_mem; + struct io_tlb_mem *dma_io_tlb_mem; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + const struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + struct device_physical_location *physical_location; + enum device_removable removable; + bool offline_disabled: 1; + bool offline: 1; + bool of_node_reused: 1; + bool state_synced: 1; + bool can_match: 1; + bool dma_coherent: 1; + bool dma_skip_sync: 1; +}; + +struct disk_stats; + +struct blk_holder_ops; + +struct partition_meta_info; + +struct block_device { + sector_t bd_start_sect; + sector_t bd_nr_sectors; + struct gendisk *bd_disk; + struct request_queue *bd_queue; + struct disk_stats *bd_stats; + long unsigned int bd_stamp; + atomic_t __bd_flags; + dev_t bd_dev; + struct address_space *bd_mapping; + atomic_t bd_openers; + spinlock_t bd_size_lock; + void *bd_claiming; + void *bd_holder; + const struct blk_holder_ops *bd_holder_ops; + struct mutex bd_holder_lock; + int bd_holders; + struct kobject *bd_holder_dir; + atomic_t bd_fsfreeze_count; + struct mutex bd_fsfreeze_mutex; + struct partition_meta_info *bd_meta_info; + int bd_writers; + struct device bd_device; +}; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + long unsigned int ra_pages; + long unsigned int io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + long unsigned int last_bdp_sleep; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; +}; + +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +typedef __u32 blk_opf_t; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +} __attribute__((packed)); + +typedef unsigned int blk_qc_t; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct blkcg_gq; + +struct bio_set; + +struct bio { + struct bio *bi_next; + struct block_device *bi_bdev; + blk_opf_t bi_opf; + short unsigned int bi_flags; + short unsigned int bi_ioprio; + enum rw_hint bi_write_hint; + blk_status_t bi_status; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + union { + blk_qc_t bi_cookie; + unsigned int __bi_nr_segments; + }; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + short unsigned int bi_vcnt; + short unsigned int bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +}; + +struct cgroup_namespace { + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct fwnode_operations; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle * (*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); + bool (*device_dma_supported)(const struct fwnode_handle *); + enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); + const char * (*get_name)(const struct fwnode_handle *); + const char * (*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); + struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + void * (*iomap)(struct fwnode_handle *, int); + int (*irq_get)(const struct fwnode_handle *, unsigned int); + int (*add_links)(struct fwnode_handle *); +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; + struct list_head clock_list; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + long unsigned int timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + long unsigned int event_count; + long unsigned int active_count; + long unsigned int relax_count; + long unsigned int expire_count; + long unsigned int wakeup_count; + struct device *dev; + bool active: 1; + bool autosleep_enabled: 1; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); + int (*set_performance_state)(struct device *, unsigned int); +}; + +struct bus_type { + const char *name; + const char *dev_name; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, const struct device_driver *); + int (*uevent)(const struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + void (*dma_cleanup)(struct device *); + const struct dev_pm_ops *pm; + bool need_parent_lock; +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +struct of_device_id; + +struct acpi_device_id; + +struct driver_private; + +struct device_driver { + const char *name; + const struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +struct class { + const char *name; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *); + void (*class_release)(const struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void * (*namespace)(const struct device *); + void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; +}; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(const struct device *, struct kobj_uevent_env *); + char * (*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +typedef long unsigned int kernel_ulong_t; + +struct acpi_device_id { + __u8 id[16]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + unsigned int min_align_mask; + long unsigned int segment_boundary_mask; +}; + +enum device_physical_location_panel { + DEVICE_PANEL_TOP = 0, + DEVICE_PANEL_BOTTOM = 1, + DEVICE_PANEL_LEFT = 2, + DEVICE_PANEL_RIGHT = 3, + DEVICE_PANEL_FRONT = 4, + DEVICE_PANEL_BACK = 5, + DEVICE_PANEL_UNKNOWN = 6, +}; + +enum device_physical_location_vertical_position { + DEVICE_VERT_POS_UPPER = 0, + DEVICE_VERT_POS_CENTER = 1, + DEVICE_VERT_POS_LOWER = 2, +}; + +enum device_physical_location_horizontal_position { + DEVICE_HORI_POS_LEFT = 0, + DEVICE_HORI_POS_CENTER = 1, + DEVICE_HORI_POS_RIGHT = 2, +}; + +struct device_physical_location { + enum device_physical_location_panel panel; + enum device_physical_location_vertical_position vertical_position; + enum device_physical_location_horizontal_position horizontal_position; + bool dock; + bool lid; +}; + +typedef u64 dma_addr_t; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; +}; + +struct seq_operations { + void * (*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void * (*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +struct u64_stats_sync {}; + +struct bpf_prog; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + union { + struct bpf_cgroup_storage *cgroup_storage[2]; + u64 bpf_cookie; + }; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct psi_group_cpu { + seqcount_t seq; + unsigned int tasks[4]; + u32 state_mask; + u32 times[7]; + u64 state_start; + u32 times_prev[14]; + long: 64; +}; + +struct psi_group { + struct psi_group *parent; + bool enabled; + struct mutex avgs_lock; + struct psi_group_cpu *pcpu; + u64 avg_total[6]; + u64 avg_last_update; + u64 avg_next_update; + struct delayed_work avgs_work; + struct list_head avg_triggers; + u32 avg_nr_triggers[6]; + u64 total[12]; + long unsigned int avg[18]; + struct task_struct *rtpoll_task; + struct timer_list rtpoll_timer; + wait_queue_head_t rtpoll_wait; + atomic_t rtpoll_wakeup; + atomic_t rtpoll_scheduled; + struct mutex rtpoll_trigger_lock; + struct list_head rtpoll_triggers; + u32 rtpoll_nr_triggers[6]; + u32 rtpoll_states; + u64 rtpoll_min_period; + u64 rtpoll_total[6]; + u64 rtpoll_next_update; + u64 rtpoll_until; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init: 1; + bool implicit_on_dfl: 1; + bool threaded: 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat subtree_bstat; + struct cgroup_base_stat last_subtree_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct list_head root_list; + struct callback_head rcu; + long: 64; + long: 64; + struct cgroup cgrp; + struct cgroup *cgrp_ancestor_storage; + atomic_t nr_cgrps; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct cftype { + char name[64]; + long unsigned int private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + struct lock_class_key lockdep_key; +}; + +struct bpf_insn { + __u8 code; + __u8 dst_reg: 4; + __u8 src_reg: 4; + __s16 off; + __s32 imm; +}; + +enum bpf_cgroup_iter_order { + BPF_CGROUP_ITER_ORDER_UNSPEC = 0, + BPF_CGROUP_ITER_SELF_ONLY = 1, + BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, + BPF_CGROUP_ITER_DESCENDANTS_POST = 3, + BPF_CGROUP_ITER_ANCESTORS_UP = 4, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, + BPF_MAP_TYPE_USER_RINGBUF = 31, + BPF_MAP_TYPE_CGRP_STORAGE = 32, + BPF_MAP_TYPE_ARENA = 33, + __MAX_BPF_MAP_TYPE = 34, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, + BPF_PROG_TYPE_NETFILTER = 32, + __MAX_BPF_PROG_TYPE = 33, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + BPF_LSM_CGROUP = 43, + BPF_STRUCT_OPS = 44, + BPF_NETFILTER = 45, + BPF_TCX_INGRESS = 46, + BPF_TCX_EGRESS = 47, + BPF_TRACE_UPROBE_MULTI = 48, + BPF_CGROUP_UNIX_CONNECT = 49, + BPF_CGROUP_UNIX_SENDMSG = 50, + BPF_CGROUP_UNIX_RECVMSG = 51, + BPF_CGROUP_UNIX_GETPEERNAME = 52, + BPF_CGROUP_UNIX_GETSOCKNAME = 53, + BPF_NETKIT_PRIMARY = 54, + BPF_NETKIT_PEER = 55, + BPF_TRACE_KPROBE_SESSION = 56, + BPF_TRACE_UPROBE_SESSION = 57, + __MAX_BPF_ATTACH_TYPE = 58, +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + __u64 map_extra; + __s32 value_type_btf_obj_fd; + __s32 map_token_fd; + }; + struct { + __u32 map_fd; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; + __u64 fd_array; + __u64 core_relos; + __u32 core_relo_rec_size; + __u32 log_true_size; + __s32 prog_token_fd; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + __s32 path_fd; + }; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + union { + __u32 prog_cnt; + __u32 count; + }; + __u64 prog_attach_flags; + __u64 link_ids; + __u64 link_attach_flags; + __u64 revision; + } query; + struct { + __u64 name; + __u32 prog_fd; + __u64 cookie; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + __u32 btf_log_true_size; + __u32 btf_flags; + __s32 btf_token_fd; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + union { + __u32 prog_fd; + __u32 map_fd; + }; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + }; + struct { + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __u64 syms; + __u64 addrs; + __u64 cookies; + } kprobe_multi; + struct { + __u32 target_btf_id; + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } tcx; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; + }; + } link_create; + struct { + __u32 link_fd; + union { + __u32 new_prog_fd; + __u32 new_map_fd; + }; + __u32 flags; + union { + __u32 old_prog_fd; + __u32 old_map_fd; + }; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; + struct { + __u32 flags; + __u32 bpffs_fd; + } token_create; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +struct bpf_prog_stats; + +struct bpf_prog_aux; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited: 1; + u16 jit_requested: 1; + u16 gpl_compatible: 1; + u16 cb_access: 1; + u16 dst_needed: 1; + u16 blinding_requested: 1; + u16 blinded: 1; + u16 is_func: 1; + u16 kprobe_override: 1; + u16 has_callchain_buf: 1; + u16 enforce_expected_attach_type: 1; + u16 call_get_stack: 1; + u16 call_get_func_ip: 1; + u16 tstamp_type_access: 1; + u16 sleepable: 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_stats *stats; + int *active; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + union { + struct { + struct {} __empty_insns; + struct sock_filter insns[0]; + }; + struct { + struct {} __empty_insnsi; + struct bpf_insn insnsi[0]; + }; + }; +}; + +enum btf_field_type { + BPF_SPIN_LOCK = 1, + BPF_TIMER = 2, + BPF_KPTR_UNREF = 4, + BPF_KPTR_REF = 8, + BPF_KPTR_PERCPU = 16, + BPF_KPTR = 28, + BPF_LIST_HEAD = 32, + BPF_LIST_NODE = 64, + BPF_RB_ROOT = 128, + BPF_RB_NODE = 256, + BPF_GRAPH_NODE = 320, + BPF_GRAPH_ROOT = 160, + BPF_REFCOUNT = 512, + BPF_WORKQUEUE = 1024, + BPF_UPTR = 2048, +}; + +typedef void (*btf_dtor_kfunc_t)(void *); + +struct btf; + +struct btf_field_kptr { + struct btf *btf; + struct module *module; + btf_dtor_kfunc_t dtor; + u32 btf_id; +}; + +struct btf_record; + +struct btf_field_graph_root { + struct btf *btf; + u32 value_btf_id; + u32 node_offset; + struct btf_record *value_rec; +}; + +struct btf_field { + u32 offset; + u32 size; + enum btf_field_type type; + union { + struct btf_field_kptr kptr; + struct btf_field_graph_root graph_root; + }; +}; + +struct btf_record { + u32 cnt; + u32 field_mask; + int spin_lock_off; + int timer_off; + int wq_off; + int refcount_off; + struct btf_field fields[0]; +}; + +struct blk_holder_ops { + void (*mark_dead)(struct block_device *, bool); + void (*sync)(struct block_device *); + int (*freeze)(struct block_device *); + int (*thaw)(struct block_device *); +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_alloc_cache; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + struct bio_alloc_cache *cache; + mempool_t bio_pool; + mempool_t bvec_pool; + unsigned int back_pad; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; + struct hlist_node cpuhp_dead; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + atomic_t generation; +}; + +struct lruvec_stats_percpu; + +struct lruvec_stats; + +struct mem_cgroup_per_node { + struct mem_cgroup *memcg; + struct lruvec_stats_percpu *lruvec_stats_percpu; + struct lruvec_stats *lruvec_stats; + struct shrinker_info *shrinker_info; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad1_; + struct lruvec lruvec; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct cacheline_padding _pad2_; + long unsigned int lru_zone_size[15]; + struct mem_cgroup_reclaim_iter iter; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +enum bpf_iter_task_type { + BPF_TASK_ITER_ALL = 0, + BPF_TASK_ITER_TID = 1, + BPF_TASK_ITER_TGID = 2, +}; + +struct bpf_map; + +struct bpf_iter_aux_info { + struct bpf_map *map; + struct { + struct cgroup *start; + enum bpf_cgroup_iter_order order; + } cgroup; + struct { + enum bpf_iter_task_type type; + u32 pid; + } task; +}; + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +struct bpf_local_storage_map; + +struct bpf_verifier_env; + +struct bpf_func_state; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map * (*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, union bpf_attr *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + void * (*map_lookup_elem)(struct bpf_map *, void *); + long int (*map_update_elem)(struct bpf_map *, void *, void *, u64); + long int (*map_delete_elem)(struct bpf_map *, void *); + long int (*map_push_elem)(struct bpf_map *, void *, u64); + long int (*map_pop_elem)(struct bpf_map *, void *); + long int (*map_peek_elem)(struct bpf_map *, void *); + void * (*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); + void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + long unsigned int (*map_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); + long int (*map_redirect)(struct bpf_map *, u64, u64); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); + long int (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); + u64 (*map_mem_usage)(const struct bpf_map *); + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u64 map_extra; + u32 map_flags; + u32 id; + struct btf_record *record; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + u32 btf_vmlinux_value_type_id; + struct btf *btf; + struct obj_cgroup *objcg; + char name[16]; + struct mutex freeze_mutex; + atomic64_t refcnt; + atomic64_t usercnt; + union { + struct work_struct work; + struct callback_head rcu; + }; + atomic64_t writecnt; + struct { + const struct btf_type *attach_func_proto; + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; + bool bypass_spec_v1; + bool frozen; + bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + atomic64_t sleepable_refcnt; + s64 *elem_count; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf_kfunc_set_tab; + +struct btf_id_dtor_kfunc_tab; + +struct btf_struct_metas; + +struct btf_struct_ops_tab; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; + struct btf_kfunc_set_tab *kfunc_set_tab; + struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; + struct btf_struct_metas *struct_meta_tab; + struct btf_struct_ops_tab *struct_ops_tab; + struct btf *base_btf; + u32 start_id; + u32 start_str_off; + char name[56]; + bool kernel_btf; + __u32 *base_id_map; +}; + +struct bpf_ksym { + long unsigned int start; + long unsigned int end; + char name[512]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct bpf_arena; + +struct bpf_jit_poke_descriptor; + +struct bpf_kfunc_desc_tab; + +struct bpf_kfunc_btf_tab; + +struct bpf_prog_ops; + +struct btf_mod_pair; + +struct bpf_token; + +struct bpf_prog_offload; + +struct bpf_func_info_aux; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 real_func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + struct btf *attach_btf; + const struct bpf_ctx_arg_aux *ctx_arg_info; + void *priv_stack_ptr; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool dev_bound; + bool offload_requested; + bool attach_btf_trace; + bool attach_tracing_prog; + bool func_proto_unreliable; + bool tail_call_reachable; + bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; + bool is_extended; + bool jits_use_priv_stack; + bool priv_stack_requested; + u64 prog_array_member_cnt; + struct mutex ext_mutex; + struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *); + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; + struct bpf_kfunc_btf_tab *kfunc_btf_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct btf_mod_pair *used_btfs; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + u32 verified_insns; + int cgroup_atype; + struct bpf_map *cgroup_storage[2]; + char name[16]; + u64 (*bpf_exception_cb)(u64, u64, u64, u64, u64); + struct bpf_token *token; + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + struct module *mod; + u32 num_exentries; + struct exception_table_entry *extable; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_KEY = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCK_COMMON = 12, + PTR_TO_TCP_SOCK = 13, + PTR_TO_TP_BUFFER = 14, + PTR_TO_XDP_SOCK = 15, + PTR_TO_BTF_ID = 16, + PTR_TO_MEM = 17, + PTR_TO_ARENA = 18, + PTR_TO_BUF = 19, + PTR_TO_FUNC = 20, + CONST_PTR_TO_DYNPTR = 21, + __BPF_REG_TYPE_MAX = 22, + PTR_TO_MAP_VALUE_OR_NULL = 260, + PTR_TO_SOCKET_OR_NULL = 267, + PTR_TO_SOCK_COMMON_OR_NULL = 268, + PTR_TO_TCP_SOCK_OR_NULL = 269, + PTR_TO_BTF_ID_OR_NULL = 272, + __BPF_REG_TYPE_LIMIT = 134217727, +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); +}; + +struct net_device; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct btf_func_model { + u8 ret_size; + u8 ret_flags; + u8 nr_args; + u8 arg_size[12]; + u8 arg_flags[12]; +}; + +struct bpf_tramp_image { + void *image; + int size; + struct bpf_ksym ksym; + struct percpu_ref pcref; + void *ip_after_call; + void *ip_epilogue; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct ftrace_ops; + +struct bpf_trampoline { + struct hlist_node hlist; + struct ftrace_ops *fops; + struct mutex mutex; + refcount_t refcnt; + u32 flags; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + struct bpf_tramp_image *cur_image; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; + bool called: 1; + bool verified: 1; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + void *aux; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + struct btf *btf; + u32 btf_id; +}; + +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + +struct bpf_token { + struct work_struct work; + atomic64_t refcnt; + struct user_namespace *userns; + u64 allowed_cmds; + u64 allowed_maps; + u64 allowed_progs; + u64 allowed_attachs; +}; + +union reg_data { + u8 data_bytes[8]; + ulong data_ulong; + u64 data_u64; +}; + +typedef void (*smp_call_func_t)(void *); + +typedef bool (*smp_cond_func_t)(int, void *); + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_MCE_DEAD = 8, + CPUHP_VIRT_NET_DEAD = 9, + CPUHP_IBMVNIC_DEAD = 10, + CPUHP_SLUB_DEAD = 11, + CPUHP_DEBUG_OBJ_DEAD = 12, + CPUHP_MM_WRITEBACK_DEAD = 13, + CPUHP_MM_VMSTAT_DEAD = 14, + CPUHP_SOFTIRQ_DEAD = 15, + CPUHP_NET_MVNETA_DEAD = 16, + CPUHP_CPUIDLE_DEAD = 17, + CPUHP_ARM64_FPSIMD_DEAD = 18, + CPUHP_ARM_OMAP_WAKE_DEAD = 19, + CPUHP_IRQ_POLL_DEAD = 20, + CPUHP_BLOCK_SOFTIRQ_DEAD = 21, + CPUHP_BIO_DEAD = 22, + CPUHP_ACPI_CPUDRV_DEAD = 23, + CPUHP_S390_PFAULT_DEAD = 24, + CPUHP_BLK_MQ_DEAD = 25, + CPUHP_FS_BUFF_DEAD = 26, + CPUHP_PRINTK_DEAD = 27, + CPUHP_MM_MEMCQ_DEAD = 28, + CPUHP_PERCPU_CNT_DEAD = 29, + CPUHP_RADIX_DEAD = 30, + CPUHP_PAGE_ALLOC = 31, + CPUHP_NET_DEV_DEAD = 32, + CPUHP_PCI_XGENE_DEAD = 33, + CPUHP_IOMMU_IOVA_DEAD = 34, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, + CPUHP_PADATA_DEAD = 36, + CPUHP_AP_DTPM_CPU_DEAD = 37, + CPUHP_RANDOM_PREPARE = 38, + CPUHP_WORKQUEUE_PREP = 39, + CPUHP_POWER_NUMA_PREPARE = 40, + CPUHP_HRTIMERS_PREPARE = 41, + CPUHP_X2APIC_PREPARE = 42, + CPUHP_SMPCFD_PREPARE = 43, + CPUHP_RELAY_PREPARE = 44, + CPUHP_MD_RAID5_PREPARE = 45, + CPUHP_RCUTREE_PREP = 46, + CPUHP_CPUIDLE_COUPLED_PREPARE = 47, + CPUHP_POWERPC_PMAC_PREPARE = 48, + CPUHP_POWERPC_MMU_CTX_PREPARE = 49, + CPUHP_XEN_PREPARE = 50, + CPUHP_XEN_EVTCHN_PREPARE = 51, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, + CPUHP_SH_SH3X_PREPARE = 53, + CPUHP_TOPOLOGY_PREPARE = 54, + CPUHP_NET_IUCV_PREPARE = 55, + CPUHP_ARM_BL_PREPARE = 56, + CPUHP_TRACE_RB_PREPARE = 57, + CPUHP_MM_ZS_PREPARE = 58, + CPUHP_MM_ZSWP_POOL_PREPARE = 59, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 60, + CPUHP_ZCOMP_PREPARE = 61, + CPUHP_TIMERS_PREPARE = 62, + CPUHP_TMIGR_PREPARE = 63, + CPUHP_MIPS_SOC_PREPARE = 64, + CPUHP_BP_PREPARE_DYN = 65, + CPUHP_BP_PREPARE_DYN_END = 85, + CPUHP_BP_KICK_AP = 86, + CPUHP_BRINGUP_CPU = 87, + CPUHP_AP_IDLE_DEAD = 88, + CPUHP_AP_OFFLINE = 89, + CPUHP_AP_CACHECTRL_STARTING = 90, + CPUHP_AP_SCHED_STARTING = 91, + CPUHP_AP_RCUTREE_DYING = 92, + CPUHP_AP_CPU_PM_STARTING = 93, + CPUHP_AP_IRQ_GIC_STARTING = 94, + CPUHP_AP_IRQ_HIP04_STARTING = 95, + CPUHP_AP_IRQ_APPLE_AIC_STARTING = 96, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 97, + CPUHP_AP_IRQ_BCM2836_STARTING = 98, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 99, + CPUHP_AP_IRQ_EIOINTC_STARTING = 100, + CPUHP_AP_IRQ_AVECINTC_STARTING = 101, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 102, + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING = 103, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING = 104, + CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING = 105, + CPUHP_AP_ARM_MVEBU_COHERENCY = 106, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 107, + CPUHP_AP_PERF_X86_STARTING = 108, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 109, + CPUHP_AP_PERF_XTENSA_STARTING = 110, + CPUHP_AP_ARM_VFP_STARTING = 111, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 112, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 113, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 114, + CPUHP_AP_PERF_ARM_STARTING = 115, + CPUHP_AP_PERF_RISCV_STARTING = 116, + CPUHP_AP_ARM_L2X0_STARTING = 117, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 118, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 119, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING = 120, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 121, + CPUHP_AP_JCORE_TIMER_STARTING = 122, + CPUHP_AP_ARM_TWD_STARTING = 123, + CPUHP_AP_QCOM_TIMER_STARTING = 124, + CPUHP_AP_TEGRA_TIMER_STARTING = 125, + CPUHP_AP_ARMADA_TIMER_STARTING = 126, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 127, + CPUHP_AP_ARC_TIMER_STARTING = 128, + CPUHP_AP_REALTEK_TIMER_STARTING = 129, + CPUHP_AP_RISCV_TIMER_STARTING = 130, + CPUHP_AP_CLINT_TIMER_STARTING = 131, + CPUHP_AP_CSKY_TIMER_STARTING = 132, + CPUHP_AP_TI_GP_TIMER_STARTING = 133, + CPUHP_AP_HYPERV_TIMER_STARTING = 134, + CPUHP_AP_DUMMY_TIMER_STARTING = 135, + CPUHP_AP_ARM_XEN_STARTING = 136, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 137, + CPUHP_AP_ARM_CORESIGHT_STARTING = 138, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 139, + CPUHP_AP_ARM64_ISNDEP_STARTING = 140, + CPUHP_AP_SMPCFD_DYING = 141, + CPUHP_AP_HRTIMERS_DYING = 142, + CPUHP_AP_TICK_DYING = 143, + CPUHP_AP_X86_TBOOT_DYING = 144, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 145, + CPUHP_AP_ONLINE = 146, + CPUHP_TEARDOWN_CPU = 147, + CPUHP_AP_ONLINE_IDLE = 148, + CPUHP_AP_HYPERV_ONLINE = 149, + CPUHP_AP_KVM_ONLINE = 150, + CPUHP_AP_SCHED_WAIT_EMPTY = 151, + CPUHP_AP_SMPBOOT_THREADS = 152, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 153, + CPUHP_AP_BLK_MQ_ONLINE = 154, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 155, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 156, + CPUHP_AP_PERF_ONLINE = 157, + CPUHP_AP_PERF_X86_ONLINE = 158, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 159, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 160, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 161, + CPUHP_AP_PERF_S390_CF_ONLINE = 162, + CPUHP_AP_PERF_S390_SF_ONLINE = 163, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 164, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 165, + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 166, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 167, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 168, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 169, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 170, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 171, + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 172, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 173, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 174, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 175, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 176, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 177, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 178, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 179, + CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE = 180, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 181, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 182, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 183, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 184, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 185, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 186, + CPUHP_AP_PERF_CSKY_ONLINE = 187, + CPUHP_AP_TMIGR_ONLINE = 188, + CPUHP_AP_WATCHDOG_ONLINE = 189, + CPUHP_AP_WORKQUEUE_ONLINE = 190, + CPUHP_AP_RANDOM_ONLINE = 191, + CPUHP_AP_RCUTREE_ONLINE = 192, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 193, + CPUHP_AP_ONLINE_DYN = 194, + CPUHP_AP_ONLINE_DYN_END = 234, + CPUHP_AP_X86_HPET_ONLINE = 235, + CPUHP_AP_X86_KVM_CLK_ONLINE = 236, + CPUHP_AP_ACTIVE = 237, + CPUHP_ONLINE = 238, +}; + +typedef unsigned int slab_flags_t; + +enum _slab_flag_bits { + _SLAB_CONSISTENCY_CHECKS = 0, + _SLAB_RED_ZONE = 1, + _SLAB_POISON = 2, + _SLAB_KMALLOC = 3, + _SLAB_HWCACHE_ALIGN = 4, + _SLAB_CACHE_DMA = 5, + _SLAB_CACHE_DMA32 = 6, + _SLAB_STORE_USER = 7, + _SLAB_PANIC = 8, + _SLAB_TYPESAFE_BY_RCU = 9, + _SLAB_TRACE = 10, + _SLAB_NOLEAKTRACE = 11, + _SLAB_NO_MERGE = 12, + _SLAB_ACCOUNT = 13, + _SLAB_KASAN = 14, + _SLAB_NO_USER_FLAGS = 15, + _SLAB_RECLAIM_ACCOUNT = 16, + _SLAB_OBJECT_POISON = 17, + _SLAB_CMPXCHG_DOUBLE = 18, + _SLAB_NO_OBJ_EXT = 19, + _SLAB_FLAGS_LAST_BIT = 20, +}; + +struct kmem_cache_args { + unsigned int align; + unsigned int useroffset; + unsigned int usersize; + unsigned int freeptr_offset; + bool use_freeptr_offset; + void (*ctor)(void *); +}; + +typedef u32 phandle; + +struct property; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + struct kobject kobj; + long unsigned int _flags; + void *data; +}; + +struct property { + char *name; + int length; + void *value; + struct property *next; + struct bin_attribute attr; +}; + +struct cpu_operations { + int (*cpu_start)(unsigned int, struct task_struct *); + void (*cpu_stop)(); + int (*cpu_is_stopped)(unsigned int); +}; + +typedef long unsigned int irq_hw_number_t; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_FREEING_INITMEM = 2, + SYSTEM_RUNNING = 3, + SYSTEM_HALT = 4, + SYSTEM_POWER_OFF = 5, + SYSTEM_RESTART = 6, + SYSTEM_SUSPEND = 7, +}; + +typedef struct cpumask cpumask_var_t[1]; + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; + cpumask_var_t effective_affinity; + unsigned int ipi_offset; +}; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + irq_hw_number_t hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct irqstat; + +struct irqaction; + +struct irq_affinity_notify; + +struct proc_dir_entry; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + struct irqstat *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + long unsigned int last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + long unsigned int threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; + unsigned int force_resume_depth; + struct proc_dir_entry *dir; + struct callback_head rcu; + struct kobject kobj; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + struct hlist_node resend_node; + long: 64; + long: 64; + long: 64; +}; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, + IRQ_NO_DEBUG = 2097152, +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +struct msi_msg; + +struct irq_chip { + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + long unsigned int flags; +}; + +struct irqstat { + unsigned int cnt; +}; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + long unsigned int thread_flags; + long unsigned int thread_mask; + const char *name; + struct proc_dir_entry *dir; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +enum ipi_message_type { + IPI_RESCHEDULE = 0, + IPI_CALL_FUNC = 1, + IPI_CPU_STOP = 2, + IPI_CPU_CRASH_STOP = 3, + IPI_IRQ_WORK = 4, + IPI_TIMER = 5, + IPI_CPU_BACKTRACE = 6, + IPI_KGDB_ROUNDUP = 7, + IPI_MAX = 8, +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +enum utf8_normalization { + UTF8_NFDI = 0, + UTF8_NFDICF = 1, + UTF8_NMAX = 2, +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +enum sbi_ext_id { + SBI_EXT_BASE = 16, + SBI_EXT_TIME = 1414090053, + SBI_EXT_IPI = 7557193, + SBI_EXT_RFENCE = 1380339267, + SBI_EXT_HSM = 4739917, + SBI_EXT_SRST = 1397904212, + SBI_EXT_SUSP = 1398100816, + SBI_EXT_PMU = 5262677, + SBI_EXT_DBCN = 1145193294, + SBI_EXT_STA = 5461057, + SBI_EXT_NACL = 1312899916, + SBI_EXT_EXPERIMENTAL_START = 134217728, + SBI_EXT_EXPERIMENTAL_END = 150994943, + SBI_EXT_VENDOR_START = 150994944, + SBI_EXT_VENDOR_END = 167772159, +}; + +typedef __kernel_long_t __kernel_ptrdiff_t; + +typedef __u16 __le16; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +typedef __s64 Elf64_Sxword; + +struct elf64_rela { + Elf64_Addr r_offset; + Elf64_Xword r_info; + Elf64_Sxword r_addend; +}; + +typedef struct elf64_rela Elf64_Rela; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +typedef struct elf64_hdr Elf64_Ehdr; + +struct used_bucket { + struct list_head head; + struct hlist_head *bucket; +}; + +struct relocation_head { + struct hlist_node node; + struct list_head *rel_entry; + void *location; +}; + +struct relocation_entry { + struct list_head head; + Elf64_Addr value; + unsigned int type; +}; + +struct relocation_handlers { + int (*reloc_handler)(struct module *, void *, Elf64_Addr); + int (*accumulate_handler)(struct module *, void *, long int); +}; + +struct got_entry { + long unsigned int symbol_addr; +}; + +struct plt_entry { + u32 insn_auipc; + u32 insn_ld; + u32 insn_jr; +}; + +struct reclaim_state { + long unsigned int reclaimed; +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +struct wait_page_queue { + struct folio *folio; + int bit_nr; + wait_queue_entry_t wait; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + struct file_ra_state *ra; + long unsigned int _index; + unsigned int _nr_pages; + unsigned int _batch_count; + bool _workingset; + long unsigned int _pflags; +}; + +struct swap_cluster_info; + +struct percpu_cluster; + +struct swap_info_struct { + struct percpu_ref users; + long unsigned int flags; + short int prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + long unsigned int *zeromap; + struct swap_cluster_info *cluster_info; + struct list_head free_clusters; + struct list_head full_clusters; + struct list_head nonfull_clusters[1]; + struct list_head frag_clusters[1]; + unsigned int frag_cluster_nr[1]; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int *cluster_next_cpu; + struct percpu_cluster *percpu_cluster; + struct rb_root swap_extent_root; + struct block_device *bdev; + struct file *swap_file; + struct completion comp; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct work_struct reclaim_work; + struct list_head discard_clusters; + struct plist_node avail_lists[0]; +}; + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_OOM_GROUP_KILL = 5, + MEMCG_SWAP_HIGH = 6, + MEMCG_SWAP_MAX = 7, + MEMCG_SWAP_FAIL = 8, + MEMCG_NR_MEMORY_EVENTS = 9, +}; + +enum page_memcg_data_flags { + MEMCG_DATA_OBJEXTS = 1, + MEMCG_DATA_KMEM = 2, + __NR_MEMCG_DATA_FLAGS = 4, +}; + +struct swap_cluster_info { + spinlock_t lock; + u16 count; + u8 flags; + u8 order; + struct list_head list; +}; + +struct percpu_cluster { + unsigned int next[1]; +}; + +typedef int suspend_state_t; + +struct platform_suspend_ops { + int (*valid)(suspend_state_t); + int (*begin)(suspend_state_t); + int (*prepare)(); + int (*prepare_late)(); + int (*enter)(suspend_state_t); + void (*wake)(); + void (*finish)(); + bool (*suspend_again)(); + void (*end)(); + void (*recover)(); +}; + +enum sbi_ext_hsm_fid { + SBI_EXT_HSM_HART_START = 0, + SBI_EXT_HSM_HART_STOP = 1, + SBI_EXT_HSM_HART_STATUS = 2, + SBI_EXT_HSM_HART_SUSPEND = 3, +}; + +enum sbi_ext_susp_fid { + SBI_EXT_SUSP_SYSTEM_SUSPEND = 0, +}; + +enum sbi_ext_susp_sleep_type { + SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0, +}; + +struct sbiret { + long int error; + long int value; +}; + +struct suspend_context { + struct pt_regs regs; + long unsigned int envcfg; + long unsigned int tvec; + long unsigned int ie; + long unsigned int satp; +}; + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 20, +}; + +typedef bool (*stack_trace_consume_fn)(void *, long unsigned int); + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short int contexts; + bool contexts_maxed; +}; + +struct pollfd { + int fd; + short int events; + short int revents; +}; + +typedef struct { + void *lock; +} class_preempt_notrace_t; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct perf_event; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + union { + void *module; + atomic_t refcnt; + }; + void *data; + int flags; + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +struct trace_eval_map { + const char *system; + const char *eval_string; + long unsigned int eval_value; +}; + +struct ring_buffer_event { + u32 type_len: 5; + u32 time_delta: 27; + u32 array[0]; +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; +}; + +struct trace_seq { + char buffer[8156]; + struct seq_buf seq; + size_t readpos; + int full; +}; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled: 1; + __u64 inherit: 1; + __u64 pinned: 1; + __u64 exclusive: 1; + __u64 exclude_user: 1; + __u64 exclude_kernel: 1; + __u64 exclude_hv: 1; + __u64 exclude_idle: 1; + __u64 mmap: 1; + __u64 comm: 1; + __u64 freq: 1; + __u64 inherit_stat: 1; + __u64 enable_on_exec: 1; + __u64 task: 1; + __u64 watermark: 1; + __u64 precise_ip: 2; + __u64 mmap_data: 1; + __u64 sample_id_all: 1; + __u64 exclude_host: 1; + __u64 exclude_guest: 1; + __u64 exclude_callchain_kernel: 1; + __u64 exclude_callchain_user: 1; + __u64 mmap2: 1; + __u64 comm_exec: 1; + __u64 use_clockid: 1; + __u64 context_switch: 1; + __u64 write_backward: 1; + __u64 namespaces: 1; + __u64 ksymbol: 1; + __u64 bpf_event: 1; + __u64 aux_output: 1; + __u64 cgroup: 1; + __u64 text_poke: 1; + __u64 build_id: 1; + __u64 inherit_thread: 1; + __u64 remove_on_exec: 1; + __u64 sigtrap: 1; + __u64 __reserved_1: 26; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + union { + __u32 aux_action; + struct { + __u32 aux_start_paused: 1; + __u32 aux_pause: 1; + __u32 aux_resume: 1; + __u32 __reserved_3: 29; + }; + }; + __u64 sig_data; + __u64 config3; +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_op: 5; + __u64 mem_lvl: 14; + __u64 mem_snoop: 5; + __u64 mem_lock: 2; + __u64 mem_dtlb: 7; + __u64 mem_lvl_num: 4; + __u64 mem_remote: 1; + __u64 mem_snoopx: 2; + __u64 mem_blk: 3; + __u64 mem_hops: 3; + __u64 mem_rsvd: 18; + }; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred: 1; + __u64 predicted: 1; + __u64 in_tx: 1; + __u64 abort: 1; + __u64 cycles: 16; + __u64 type: 4; + __u64 spec: 2; + __u64 new_type: 4; + __u64 priv: 3; + __u64 reserved: 31; +}; + +union perf_sample_weight { + __u64 full; + struct { + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; + }; +}; + +typedef struct { + local_t a; +} local64_t; + +struct irq_work { + struct __call_single_node node; + void (*func)(struct irq_work *); + struct rcuwait irqwait; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, + __MAX_BPF_LINK_TYPE = 15, +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; + __u32 target_btf_id; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + union { + struct { + __u64 cgroup_id; + __u32 order; + } cgroup; + struct { + __u32 tid; + __u32 pid; + } task; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + struct { + __u32 map_id; + } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + struct { + __u64 addrs; + __u32 count; + __u32 flags; + __u64 missed; + __u64 cookies; + } kprobe_multi; + struct { + __u64 path; + __u64 offsets; + __u64 ref_ctr_offsets; + __u64 cookies; + __u32 path_size; + __u32 count; + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { + __u32 type; + union { + struct { + __u64 file_name; + __u32 name_len; + __u32 offset; + __u64 cookie; + } uprobe; + struct { + __u64 func_name; + __u32 name_len; + __u32 offset; + __u64 addr; + __u64 missed; + __u64 cookie; + } kprobe; + struct { + __u64 tp_name; + __u32 name_len; + __u64 cookie; + } tracepoint; + struct { + __u64 config; + __u32 type; + __u64 cookie; + } event; + }; + } perf_event; + struct { + __u32 ifindex; + __u32 attach_type; + } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; + }; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + bool sleepable; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + void (*dealloc_deferred)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); + int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; + u64 cookie; +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + long unsigned int config_base; + long unsigned int event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + u64 aux_config; + unsigned int aux_paused; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + long unsigned int addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); + +struct pmu; + +struct perf_event_pmu_context; + +struct perf_buffer; + +struct perf_addr_filter_range; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + unsigned int group_generation; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + struct perf_event_pmu_context *pmu_ctx; + atomic_long_t refcount; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + long unsigned int rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + unsigned int pending_wakeup; + unsigned int pending_kill; + unsigned int pending_disable; + long unsigned int pending_addr; + struct irq_work pending_irq; + struct irq_work pending_disable_irq; + struct callback_head pending_task; + unsigned int pending_work; + struct rcuwait pending_work_wait; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + long unsigned int addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + atomic64_t lost_samples; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + struct bpf_prog *prog; + u64 bpf_cookie; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct perf_cgroup *cgrp; + struct list_head sb_list; + __u32 orig_type; +}; + +typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + long unsigned int pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __attribute__((packed)); + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct perf_cpu_pmu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + struct device *parent; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + unsigned int scope; + int *pmu_disable_count; + struct perf_cpu_pmu_context *cpu_pmu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_pmu_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); + void * (*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + bool (*filter)(struct pmu *, int); + int (*check_period)(struct perf_event *, u64); +}; + +struct perf_event_pmu_context { + struct pmu *pmu; + struct perf_event_context *ctx; + struct list_head pmu_ctx_entry; + struct list_head pinned_active; + struct list_head flexible_active; + unsigned int embedded: 1; + unsigned int nr_events; + unsigned int nr_cgroups; + unsigned int nr_freq; + atomic_t refcount; + struct callback_head callback_head; + void *task_ctx_data; + int rotate_necessary; +}; + +struct perf_cpu_pmu_context { + struct perf_event_pmu_context epc; + struct perf_event_pmu_context *task_epc; + struct list_head sched_cb_entry; + int sched_cb_usage; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + long unsigned int wakeup; + long unsigned int size; + u64 aux_flags; + union { + void *addr; + long unsigned int head; + }; + int page; +}; + +struct perf_addr_filter_range { + long unsigned int start; + long unsigned int size; +}; + +struct perf_sample_data { + u64 sample_flags; + u64 period; + u64 dyn_size; + u64 type; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 ip; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; + union perf_sample_weight weight; + union perf_mem_data_src data_src; + u64 txn; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 stream_id; + u64 cgroup; + u64 addr; + u64 phys_addr; + u64 data_page_size; + u64 code_page_size; + u64 aux_size; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; + u64 timeoffset; + int active; +}; + +struct trace_entry { + short unsigned int type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + long unsigned int iter_flags; + void *temp; + unsigned int temp_size; + char *fmt; + unsigned int fmt_size; + atomic_t wait_index; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool closed; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + long unsigned int lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + loff_t pos; + long int idx; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + const int len; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head * (*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +struct trace_buffer; + +struct trace_event_file; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned int trace_ctx; + struct pt_regs *regs; +}; + +struct eventfs_inode; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct eventfs_inode *ei; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + long unsigned int flags; + refcount_t ref; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +enum { + TRACE_EVENT_FL_CAP_ANY_BIT = 0, + TRACE_EVENT_FL_NO_SET_FILTER_BIT = 1, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 2, + TRACE_EVENT_FL_TRACEPOINT_BIT = 3, + TRACE_EVENT_FL_DYNAMIC_BIT = 4, + TRACE_EVENT_FL_KPROBE_BIT = 5, + TRACE_EVENT_FL_UPROBE_BIT = 6, + TRACE_EVENT_FL_EPROBE_BIT = 7, + TRACE_EVENT_FL_FPROBE_BIT = 8, + TRACE_EVENT_FL_CUSTOM_BIT = 9, +}; + +enum { + TRACE_EVENT_FL_CAP_ANY = 1, + TRACE_EVENT_FL_NO_SET_FILTER = 2, + TRACE_EVENT_FL_IGNORE_ENABLE = 4, + TRACE_EVENT_FL_TRACEPOINT = 8, + TRACE_EVENT_FL_DYNAMIC = 16, + TRACE_EVENT_FL_KPROBE = 32, + TRACE_EVENT_FL_UPROBE = 64, + TRACE_EVENT_FL_EPROBE = 128, + TRACE_EVENT_FL_FPROBE = 256, + TRACE_EVENT_FL_CUSTOM = 512, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, + EVENT_FILE_FL_FREED_BIT = 11, +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, + EVENT_FILE_FL_FREED = 2048, +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_RDYN_STRING = 3, + FILTER_PTR_STRING = 4, + FILTER_TRACE_FN = 5, + FILTER_CPUMASK = 6, + FILTER_COMM = 7, + FILTER_CPU = 8, + FILTER_STACKTRACE = 9, +}; + +struct trace_event_raw_sbi_call { + struct trace_entry ent; + int ext; + int fid; + char __data[0]; +}; + +struct trace_event_raw_sbi_return { + struct trace_entry ent; + long int error; + long int value; + char __data[0]; +}; + +struct trace_event_data_offsets_sbi_call {}; + +struct trace_event_data_offsets_sbi_return {}; + +typedef void (*btf_trace_sbi_call)(void *, int, int); + +typedef void (*btf_trace_sbi_return)(void *, int, long int, long int); + +typedef __u32 __be32; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, + IRQD_AFFINITY_ON_ACTIVATE = 268435456, + IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, + IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, +}; + +struct kimage_arch { + void *fdt; + long unsigned int fdt_addr; +}; + +typedef void (*riscv_kexec_method)(long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef long unsigned int kimage_entry_t; + +struct kexec_segment { + union { + void *buf; + void *kbuf; + }; + size_t bufsz; + long unsigned int mem; + size_t memsz; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + long unsigned int start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + long unsigned int nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + long unsigned int control_page; + unsigned int type: 1; + unsigned int preserve_context: 1; + unsigned int file_mode: 1; + struct kimage_arch arch; + void *elf_headers; + long unsigned int elf_headers_sz; + long unsigned int elf_load_addr; +}; + +typedef __be32 fdt32_t; + +struct fdt_header { + fdt32_t magic; + fdt32_t totalsize; + fdt32_t off_dt_struct; + fdt32_t off_dt_strings; + fdt32_t off_mem_rsvmap; + fdt32_t version; + fdt32_t last_comp_version; + fdt32_t boot_cpuid_phys; + fdt32_t size_dt_strings; + fdt32_t size_dt_struct; +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +typedef __u64 __be64; + +typedef __be64 fdt64_t; + +typedef u16 uint16_t; + +typedef u64 uint64_t; + +struct word_at_a_time { + const long unsigned int one_bits; + const long unsigned int high_bits; +}; + +typedef u8 uint8_t; + +enum { + ASSUME_PERFECT = 255, + ASSUME_VALID_DTB = 1, + ASSUME_VALID_INPUT = 2, + ASSUME_LATEST = 4, + ASSUME_NO_ROLLBACK = 8, + ASSUME_LIBFDT_ORDER = 16, + ASSUME_LIBFDT_FLAWLESS = 32, +}; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +struct ptdesc { + long unsigned int __page_flags; + union { + struct callback_head pt_rcu_head; + struct list_head pt_list; + struct { + long unsigned int _pt_pad_1; + pgtable_t pmd_huge_pte; + }; + }; + long unsigned int __page_mapping; + union { + long unsigned int pt_index; + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + union { + long unsigned int _pt_pad_2; + spinlock_t ptl; + }; + unsigned int __page_type; + atomic_t __page_refcount; + long unsigned int pt_memcg_data; +}; + +enum pagetype { + PGTY_buddy = 240, + PGTY_offline = 241, + PGTY_table = 242, + PGTY_guard = 243, + PGTY_hugetlb = 244, + PGTY_slab = 245, + PGTY_zsmalloc = 246, + PGTY_unaccepted = 247, + PGTY_mapcount_underflow = 255, +}; + +typedef struct { + long unsigned int p4d; +} p4d_t; + +struct pt_alloc_ops { + pte_t * (*get_pte_virt)(phys_addr_t); + phys_addr_t (*alloc_pte)(uintptr_t); + pmd_t * (*get_pmd_virt)(phys_addr_t); + phys_addr_t (*alloc_pmd)(uintptr_t); + pud_t * (*get_pud_virt)(phys_addr_t); + phys_addr_t (*alloc_pud)(uintptr_t); + p4d_t * (*get_p4d_virt)(phys_addr_t); + phys_addr_t (*alloc_p4d)(uintptr_t); +}; + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, + MEMBLOCK_DRIVER_MANAGED = 8, + MEMBLOCK_RSRV_NOINIT = 16, +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; +}; + +struct memblock_type { + long unsigned int cnt; + long unsigned int max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +struct io_tlb_area; + +struct io_tlb_slot; + +struct io_tlb_pool { + phys_addr_t start; + phys_addr_t end; + void *vaddr; + long unsigned int nslabs; + bool late_alloc; + unsigned int nareas; + unsigned int area_nslabs; + struct io_tlb_area *areas; + struct io_tlb_slot *slots; +}; + +struct io_tlb_mem { + struct io_tlb_pool defpool; + long unsigned int nslabs; + struct dentry *debugfs; + bool force_bounce; + bool for_alloc; + atomic_long_t total_used; + atomic_long_t used_hiwater; + atomic_long_t transient_nslabs; +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +enum execmem_type { + EXECMEM_DEFAULT = 0, + EXECMEM_MODULE_TEXT = 0, + EXECMEM_KPROBES = 1, + EXECMEM_FTRACE = 2, + EXECMEM_BPF = 3, + EXECMEM_MODULE_DATA = 4, + EXECMEM_TYPE_MAX = 5, +}; + +enum execmem_range_flags { + EXECMEM_KASAN_SHADOW = 1, + EXECMEM_ROX_CACHE = 2, +}; + +struct execmem_range { + long unsigned int start; + long unsigned int end; + long unsigned int fallback_start; + long unsigned int fallback_end; + pgprot_t pgprot; + unsigned int alignment; + enum execmem_range_flags flags; +}; + +struct execmem_info { + struct execmem_range ranges[5]; +}; + +enum fixed_addresses { + FIX_HOLE = 0, + FIX_FDT_END = 1, + FIX_FDT = 1024, + FIX_PTE = 1025, + FIX_PMD = 1026, + FIX_PUD = 1027, + FIX_P4D = 1028, + FIX_TEXT_POKE1 = 1029, + FIX_TEXT_POKE0 = 1030, + FIX_EARLYCON_MEM_BASE = 1031, + __end_of_permanent_fixed_addresses = 1032, + FIX_BTMAP_END = 1032, + FIX_BTMAP_BEGIN = 1479, + __end_of_fixed_addresses = 1480, +}; + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +typedef __u16 __be16; + +typedef __u32 __wsum; + +struct rhashtable; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +struct ref_tracker_dir {}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + int sysctl_optmem_max; + u8 sysctl_txrehash; + u8 sysctl_tstamp_allow_data; + struct prot_inuse *prot_inuse; + struct cpumask *rps_default_mask; +}; + +struct ipstats_mib; + +struct tcp_mib; + +struct linux_mib; + +struct udp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct netns_mib { + struct ipstats_mib *ip_statistics; + struct ipstats_mib *ipv6_statistics; + struct tcp_mib *tcp_statistics; + struct linux_mib *net_statistics; + struct udp_mib *udp_statistics; + struct udp_mib *udp_stats_in6; + struct udp_mib *udplite_statistics; + struct udp_mib *udplite_stats_in6; + struct icmp_mib *icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct icmpv6_mib *icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct unix_table { + spinlock_t *locks; + struct hlist_head *buckets; +}; + +struct netns_unix { + struct unix_table table; + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + refcount_t tw_refcount; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct local_ports { + u32 range; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct udp_table; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct inet_peer_base; + +struct fqdir; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + __u8 __cacheline_group_begin__netns_ipv4_read_tx[0]; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_tso_rtt_log; + u8 sysctl_tcp_autocorking; + int sysctl_tcp_min_snd_mss; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_wmem[3]; + u8 sysctl_ip_fwd_use_pmtu; + __u8 __cacheline_group_end__netns_ipv4_read_tx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_txrx[0]; + u8 sysctl_tcp_moderate_rcvbuf; + __u8 __cacheline_group_end__netns_ipv4_read_txrx[0]; + __u8 __cacheline_group_begin__netns_ipv4_read_rx[0]; + u8 sysctl_ip_early_demux; + u8 sysctl_tcp_early_demux; + u8 sysctl_tcp_l3mdev_accept; + int sysctl_tcp_reordering; + int sysctl_tcp_rmem[3]; + __u8 __cacheline_group_end__netns_ipv4_read_rx[0]; + long: 64; + struct inet_timewait_death_row tcp_death_row; + struct udp_table *udp_table; + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + bool fib_has_custom_local_routes; + bool fib_offload_disabled; + u8 sysctl_tcp_shrink_window; + struct hlist_head *fib_table_hash; + struct sock *fibnl; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct fqdir *fqdir; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_msgs_per_sec; + int sysctl_icmp_msgs_burst; + atomic_t icmp_global_credit; + u32 icmp_global_stamp; + u32 ip_rt_min_pmtu; + int ip_rt_mtu_expires; + int ip_rt_min_advmss; + struct local_ports ip_local_ports; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; + u8 sysctl_ip_dynaddr; + u8 sysctl_udp_early_demux; + u8 sysctl_nexthop_compat_mode; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; + u8 sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; + u8 sysctl_tcp_comp_sack_nr; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; + int sysctl_tcp_fin_timeout; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + int sysctl_tcp_rto_min_us; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_adv_win_scale; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_challenge_ack_limit; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_reflect_tos; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + unsigned int sysctl_tcp_child_ehash_entries; + long unsigned int sysctl_tcp_comp_sack_delay_ns; + long unsigned int sysctl_tcp_comp_sack_slack_ns; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + long unsigned int tfo_active_disable_stamp; + u32 tcp_challenge_timestamp; + u32 tcp_challenge_count; + u8 sysctl_tcp_plb_enabled; + u8 sysctl_tcp_plb_idle_rehash_rounds; + u8 sysctl_tcp_plb_rehash_rounds; + u8 sysctl_tcp_plb_suspend_rto_sec; + int sysctl_tcp_plb_cong_thresh; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_tcp_syn_linear_timeouts; + u8 sysctl_igmp_llm_reports; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + unsigned int sysctl_udp_child_hash_entries; + long unsigned int *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + siphash_key_t ip_id_key; + struct hlist_head *inet_addr_lst; + struct delayed_work addr_chk_work; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct dst_entry; + +struct sk_buff; + +struct neighbour; + +struct dst_ops { + short unsigned int family; + unsigned int gc_thresh; + void (*gc)(struct dst_ops *); + struct dst_entry * (*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *); + void (*negative_advice)(struct sock *, struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + struct percpu_counter pcpuc_entries; + long: 64; + long: 64; + long: 64; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + u32 multipath_hash_fields; + u8 multipath_hash_policy; + u8 bindv6only; + u8 flowlabel_consistency; + u8 auto_flowlabels; + int icmpv6_time; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; + long unsigned int icmpv6_ratemask[4]; + long unsigned int *icmpv6_ratemask_ptr; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; + int idgen_retries; + int idgen_delay; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; + u8 skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; + u8 icmpv6_error_anycast_as_unicast; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct ioam6_pernet_data; + +struct netns_ipv6 { + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + atomic_t ip6_rt_gc_expire; + long unsigned int ip6_rt_last_gc; + unsigned char flowlabel_has_excl; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct hlist_head *inet6_addr_lst; + spinlock_t addrconf_hash_lock; + struct delayed_work addr_chk_work; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; + struct ioam6_pernet_data *ioam6_data; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_logger *nf_loggers[11]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries *hooks_ipv4[5]; + struct nf_hook_entries *hooks_ipv6[5]; + struct nf_hook_entries *hooks_bridge[5]; + unsigned int defrag_ipv4_users; + unsigned int defrag_ipv6_users; +}; + +struct nf_ct_event_notifier; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; +}; + +struct ip_conntrack_stat; + +struct netns_ct { + u8 sysctl_log_invalid; + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_tstamp; + u8 sysctl_checksum; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_ip_net nf_ct_proto; +}; + +struct netns_nftables { + u8 gencursor; +}; + +struct netns_bpf { + struct bpf_prog_array *run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + spinlock_t rules_mod_lock; + unsigned int dev_base_seq; + u32 ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct ref_tracker_dir refcnt_tracker; + struct ref_tracker_dir notrefcnt_tracker; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct xarray dev_by_index; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_nf nf; + struct netns_ct ct; + struct netns_nftables nft; + struct net_generic *gen; + struct netns_bpf bpf; + u64 net_cookie; + struct sock *diag_nlsk; + long: 64; + long: 64; +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +typedef unsigned int sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + long unsigned int dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + struct llist_node ll_node; + }; + struct sock *sk; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + long unsigned int _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + long unsigned int _sk_redir; + }; + long unsigned int _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned: 1; + __u8 nohdr: 1; + __u8 fclone: 2; + __u8 peeked: 1; + __u8 head_frag: 1; + __u8 pfmemalloc: 1; + __u8 pp_recycle: 1; + __u8 active_extensions; + union { + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + }; + struct { + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 dst_pending_confirm: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 __mono_tc_offset[0]; + __u8 tstamp_type: 2; + __u8 tc_at_ingress: 1; + __u8 tc_skip_classify: 1; + __u8 remcsum_offload: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 inner_protocol_type: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 ndisc_nodetype: 2; + __u8 nf_trace: 1; + __u8 redirected: 1; + __u8 slow_gro: 1; + __u8 unreadable: 1; + __u16 tc_index; + u16 alloc_cpu; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + } headers; + }; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; +}; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct ipstats_mib { + u64 mibs[38]; + struct u64_stats_sync syncp; +}; + +struct icmp_mib { + long unsigned int mibs[30]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + long unsigned int mibs[7]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct tcp_mib { + long unsigned int mibs[16]; +}; + +struct udp_mib { + long unsigned int mibs[10]; +}; + +struct linux_mib { + long unsigned int mibs[132]; +}; + +struct inet_frags; + +struct fqdir { + long int high_thresh; + long int low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long: 64; + long: 64; + struct rhashtable rhashtable; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_long_t mem; + struct work_struct destroy_work; + struct llist_node free_list; + long: 64; + long: 64; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + u8 tstamp_type; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*min_tso_segs)(struct sock *); + void (*cong_control)(struct sock *, u32, int, const struct rate_sample *); + u32 (*undo_cwnd)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef struct { + atomic_t refcnt; +} rcuref_t; + +typedef struct {} netdevice_tracker; + +struct uncached_list; + +struct lwtunnel_state; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + long unsigned int _metrics; + long unsigned int expires; + void *__pad1; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + short unsigned int flags; + short int obsolete; + short unsigned int header_len; + short unsigned int trailer_len; + rcuref_t __rcuref; + int __use; + long unsigned int lastuse; + struct callback_head callback_head; + short int error; + short int __pad; + __u32 tclassid; + netdevice_tracker dev_tracker; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; + struct lwtunnel_state *lwtstate; +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; + unsigned int chaintoolong; +}; + +enum { + TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = 1, + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = 2, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = 4, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = 8, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = 16, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = 32, + __TCA_FLOWER_KEY_FLAGS_MAX = 33, +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, + FLOW_DISSECTOR_KEY_PPPOE = 29, + FLOW_DISSECTOR_KEY_L2TPV3 = 30, + FLOW_DISSECTOR_KEY_CFM = 31, + FLOW_DISSECTOR_KEY_IPSEC = 32, + FLOW_DISSECTOR_KEY_MAX = 33, +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[1]; + u8 chunks; + long: 0; + char data[0]; +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_NUM = 1, +}; + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + NR_STATS = 10, +}; + +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS = 1, + CGROUP_INET_SOCK_CREATE = 2, + CGROUP_SOCK_OPS = 3, + CGROUP_DEVICE = 4, + CGROUP_INET4_BIND = 5, + CGROUP_INET6_BIND = 6, + CGROUP_INET4_CONNECT = 7, + CGROUP_INET6_CONNECT = 8, + CGROUP_UNIX_CONNECT = 9, + CGROUP_INET4_POST_BIND = 10, + CGROUP_INET6_POST_BIND = 11, + CGROUP_UDP4_SENDMSG = 12, + CGROUP_UDP6_SENDMSG = 13, + CGROUP_UNIX_SENDMSG = 14, + CGROUP_SYSCTL = 15, + CGROUP_UDP4_RECVMSG = 16, + CGROUP_UDP6_RECVMSG = 17, + CGROUP_UNIX_RECVMSG = 18, + CGROUP_GETSOCKOPT = 19, + CGROUP_SETSOCKOPT = 20, + CGROUP_INET4_GETPEERNAME = 21, + CGROUP_INET6_GETPEERNAME = 22, + CGROUP_UNIX_GETPEERNAME = 23, + CGROUP_INET4_GETSOCKNAME = 24, + CGROUP_INET6_GETSOCKNAME = 25, + CGROUP_UNIX_GETSOCKNAME = 26, + CGROUP_INET_SOCK_RELEASE = 27, + CGROUP_LSM_START = 28, + CGROUP_LSM_END = 27, + MAX_CGROUP_BPF_ATTACH_TYPE = 28, +}; + +enum psi_task_count { + NR_IOWAIT = 0, + NR_MEMSTALL = 1, + NR_RUNNING = 2, + NR_MEMSTALL_RUNNING = 3, + NR_PSI_TASK_COUNTS = 4, +}; + +enum psi_res { + PSI_IO = 0, + PSI_MEM = 1, + PSI_CPU = 2, + NR_PSI_RESOURCES = 3, +}; + +enum psi_states { + PSI_IO_SOME = 0, + PSI_IO_FULL = 1, + PSI_MEM_SOME = 2, + PSI_MEM_FULL = 3, + PSI_CPU_SOME = 4, + PSI_CPU_FULL = 5, + PSI_NONIDLE = 6, + NR_PSI_STATES = 7, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL = 1, + NR_PSI_AGGREGATORS = 2, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + pids_cgrp_id = 9, + rdma_cgrp_id = 10, + misc_cgrp_id = 11, + debug_cgrp_id = 12, + CGROUP_SUBSYS_COUNT = 13, +}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +typedef phys_addr_t resource_size_t; + +typedef int (*cmp_func_t)(const void *, const void *); + +struct __riscv_f_ext_state { + __u32 f[32]; + __u32 fcsr; +}; + +struct __riscv_q_ext_state { + __u64 f[64]; + __u32 fcsr; + __u32 reserved[3]; +}; + +union __riscv_fp_state { + struct __riscv_f_ext_state f; + struct __riscv_d_ext_state d; + struct __riscv_q_ext_state q; +}; + +struct preempt_notifier; + +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *, int); + void (*sched_out)(struct preempt_notifier *, struct task_struct *); +}; + +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +typedef struct mutex *class_mutex_t; + +typedef u64 gpa_t; + +typedef u64 gfn_t; + +typedef u64 hpa_t; + +typedef u64 hfn_t; + +typedef hfn_t kvm_pfn_t; + +struct kvm_memory_slot; + +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + long unsigned int hva; + long unsigned int len; + struct kvm_memory_slot *memslot; +}; + +struct interval_tree_node { + struct rb_node rb; + long unsigned int start; + long unsigned int last; + long unsigned int __subtree_last; +}; + +struct kvm_arch_memory_slot {}; + +struct kvm_memory_slot { + struct hlist_node id_node[2]; + struct interval_tree_node hva_node[2]; + struct rb_node gfn_node[2]; + gfn_t base_gfn; + long unsigned int npages; + long unsigned int *dirty_bitmap; + struct kvm_arch_memory_slot arch; + long unsigned int userspace_addr; + u32 flags; + short int id; + u16 as_id; +}; + +struct kvm_memslots { + u64 generation; + atomic_long_t last_used_slot; + struct rb_root_cached hva_tree; + struct rb_root gfn_tree; + struct hlist_head id_hash[128]; + int node_idx; +}; + +struct kvm_vm_stat_generic { + u64 remote_tlb_flush; + u64 remote_tlb_flush_requests; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vmid { + long unsigned int vmid_version; + long unsigned int vmid; +}; + +struct kvm_guest_timer { + u32 nsec_mult; + u32 nsec_shift; + u64 time_delta; +}; + +struct kvm_aia { + bool in_kernel; + bool initialized; + u32 mode; + u32 nr_ids; + u32 nr_sources; + u32 nr_group_bits; + u32 nr_group_shift; + u32 nr_hart_bits; + u32 nr_guest_bits; + gpa_t aplic_addr; + void *aplic_state; +}; + +struct kvm_arch { + struct kvm_vmid vmid; + pgd_t *pgd; + phys_addr_t pgd_phys; + struct kvm_guest_timer timer; + struct kvm_aia aia; +}; + +struct mmu_notifier_ops; + +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; + struct mm_struct *mm; + struct callback_head rcu; + unsigned int users; +}; + +struct kvm_io_bus; + +struct kvm_coalesced_mmio_ring; + +struct kvm_irq_routing_table; + +struct kvm_stat_data; + +struct kvm { + spinlock_t mmu_lock; + struct mutex slots_lock; + struct mutex slots_arch_lock; + struct mm_struct *mm; + long unsigned int nr_memslot_pages; + struct kvm_memslots __memslots[2]; + struct kvm_memslots *memslots[1]; + struct xarray vcpu_array; + atomic_t nr_memslots_dirty_logging; + spinlock_t mn_invalidate_lock; + long unsigned int mn_active_invalidate_count; + struct rcuwait mn_memslots_update_rcuwait; + spinlock_t gpc_lock; + struct list_head gpc_list; + atomic_t online_vcpus; + int max_vcpus; + int created_vcpus; + int last_boosted_vcpu; + struct list_head vm_list; + struct mutex lock; + struct kvm_io_bus *buses[5]; + struct { + spinlock_t lock; + struct list_head items; + struct list_head resampler_list; + struct mutex resampler_lock; + } irqfds; + struct list_head ioeventfds; + struct kvm_vm_stat stat; + struct kvm_arch arch; + refcount_t users_count; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + spinlock_t ring_lock; + struct list_head coalesced_zones; + struct mutex irq_lock; + struct kvm_irq_routing_table *irq_routing; + struct hlist_head irq_ack_notifier_list; + struct mmu_notifier mmu_notifier; + long unsigned int mmu_invalidate_seq; + long int mmu_invalidate_in_progress; + gfn_t mmu_invalidate_range_start; + gfn_t mmu_invalidate_range_end; + struct list_head devices; + u64 manual_dirty_log_protect; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; + pid_t userspace_pid; + bool override_halt_poll_ns; + unsigned int max_halt_poll_ns; + u32 dirty_ring_size; + bool dirty_ring_with_bitmap; + bool vm_bugged; + bool vm_dead; + char stats_id[48]; +}; + +struct kvm_mmu_memory_cache { + gfp_t gfp_zero; + gfp_t gfp_custom; + u64 init_value; + struct kmem_cache *kmem_cache; + int capacity; + int nobjs; + void **objects; +}; + +struct kvm_vcpu_stat_generic { + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_wait_ns; + u64 halt_poll_success_hist[32]; + u64 halt_poll_fail_hist[32]; + u64 halt_wait_hist[32]; + u64 blocking; +}; + +struct kvm_vcpu; + +struct kvm_io_device; + +struct kvm_io_device_ops { + int (*read)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, void *); + int (*write)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, const void *); + void (*destructor)(struct kvm_io_device *); +}; + +struct kvm_mmio_fragment { + gpa_t gpa; + void *data; + unsigned int len; +}; + +struct kvm_cpu_context { + long unsigned int zero; + long unsigned int ra; + long unsigned int sp; + long unsigned int gp; + long unsigned int tp; + long unsigned int t0; + long unsigned int t1; + long unsigned int t2; + long unsigned int s0; + long unsigned int s1; + long unsigned int a0; + long unsigned int a1; + long unsigned int a2; + long unsigned int a3; + long unsigned int a4; + long unsigned int a5; + long unsigned int a6; + long unsigned int a7; + long unsigned int s2; + long unsigned int s3; + long unsigned int s4; + long unsigned int s5; + long unsigned int s6; + long unsigned int s7; + long unsigned int s8; + long unsigned int s9; + long unsigned int s10; + long unsigned int s11; + long unsigned int t3; + long unsigned int t4; + long unsigned int t5; + long unsigned int t6; + long unsigned int sepc; + long unsigned int sstatus; + long unsigned int hstatus; + long: 64; + union __riscv_fp_state fp; + struct __riscv_v_ext_state vector; +}; + +struct kvm_vcpu_csr { + long unsigned int vsstatus; + long unsigned int vsie; + long unsigned int vstvec; + long unsigned int vsscratch; + long unsigned int vsepc; + long unsigned int vscause; + long unsigned int vstval; + long unsigned int hvip; + long unsigned int vsatp; + long unsigned int scounteren; + long unsigned int senvcfg; +}; + +struct kvm_vcpu_smstateen_csr { + long unsigned int sstateen0; +}; + +struct kvm_vcpu_timer { + bool init_done; + bool next_set; + u64 next_cycles; + struct hrtimer hrt; + bool sstc_enabled; + int (*timer_next_event)(struct kvm_vcpu *, u64); +}; + +enum kvm_riscv_hfence_type { + KVM_RISCV_HFENCE_UNKNOWN = 0, + KVM_RISCV_HFENCE_GVMA_VMID_GPA = 1, + KVM_RISCV_HFENCE_VVMA_ASID_GVA = 2, + KVM_RISCV_HFENCE_VVMA_ASID_ALL = 3, + KVM_RISCV_HFENCE_VVMA_GVA = 4, +}; + +struct kvm_riscv_hfence { + enum kvm_riscv_hfence_type type; + long unsigned int asid; + long unsigned int order; + gpa_t addr; + gpa_t size; +}; + +struct kvm_mmio_decode { + long unsigned int insn; + int insn_len; + int len; + int shift; + int return_handled; +}; + +struct kvm_csr_decode { + long unsigned int insn; + int return_handled; +}; + +enum kvm_riscv_sbi_ext_status { + KVM_RISCV_SBI_EXT_STATUS_UNINITIALIZED = 0, + KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE = 1, + KVM_RISCV_SBI_EXT_STATUS_ENABLED = 2, + KVM_RISCV_SBI_EXT_STATUS_DISABLED = 3, +}; + +struct kvm_vcpu_sbi_context { + int return_handled; + enum kvm_riscv_sbi_ext_status ext_status[11]; +}; + +struct kvm_vcpu_aia_csr { + long unsigned int vsiselect; + long unsigned int hviprio1; + long unsigned int hviprio2; + long unsigned int vsieh; + long unsigned int hviph; + long unsigned int hviprio1h; + long unsigned int hviprio2h; +}; + +struct kvm_vcpu_aia { + struct kvm_vcpu_aia_csr guest_csr; + struct kvm_vcpu_aia_csr guest_reset_csr; + gpa_t imsic_addr; + u32 hart_index; + void *imsic_state; +}; + +struct kvm_mp_state { + __u32 mp_state; +}; + +union sbi_pmu_ctr_info { + long unsigned int value; + struct { + long unsigned int csr: 12; + long unsigned int width: 6; + long unsigned int reserved: 45; + long unsigned int type: 1; + }; +}; + +struct kvm_pmc { + u8 idx; + struct perf_event *perf_event; + u64 counter_val; + union sbi_pmu_ctr_info cinfo; + bool started; + long unsigned int event_idx; + struct kvm_vcpu *vcpu; +}; + +struct kvm_fw_event { + u64 value; + bool started; +}; + +struct riscv_pmu_snapshot_data; + +struct kvm_pmu { + struct kvm_pmc pmc[64]; + struct kvm_fw_event fw_event[32]; + int num_fw_ctrs; + int num_hw_ctrs; + bool init_done; + long unsigned int pmc_in_use[1]; + long unsigned int pmc_overflown[1]; + gpa_t snapshot_addr; + struct riscv_pmu_snapshot_data *sdata; +}; + +struct kvm_vcpu_config { + u64 henvcfg; + u64 hstateen0; + long unsigned int hedeleg; +}; + +struct kvm_vcpu_arch { + bool ran_atle